Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +2 -0
- llava_next/share/terminfo/l/la120 +0 -0
- llava_next/share/terminfo/l/lft +0 -0
- llava_next/share/terminfo/l/linux +0 -0
- llava_next/share/terminfo/l/linux-basic +0 -0
- llava_next/share/terminfo/l/linux-c +0 -0
- llava_next/share/terminfo/l/linux-vt +0 -0
- llava_next/share/terminfo/l/linux2.2 +0 -0
- llava_next/share/terminfo/l/linux3.0 +0 -0
- llava_next/share/terminfo/l/lisaterm +0 -0
- llava_next/share/terminfo/l/liswb +0 -0
- llava_next/share/terminfo/l/ln03 +0 -0
- llava_next/share/terminfo/l/ln03-w +0 -0
- llava_next/share/terminfo/l/luna +0 -0
- llava_next/share/terminfo/p/pc3r-m +0 -0
- llava_next/share/terminfo/p/pccon+keys +0 -0
- llava_next/share/terminfo/p/pccons +0 -0
- llava_next/share/terminfo/p/putty-vt100 +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_tensorflow_internal.so +3 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/debug/__init__.py +71 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/debug/lib/debug_events_monitors.py +307 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/debug/lib/grpc_debug_server.py +492 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/__init__.py +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/__pycache__/__init__.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/nn_ops/__init__.py +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/nn_ops/__pycache__/__init__.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/nn_ops/__pycache__/bias_op_base.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/nn_ops/__pycache__/cudnn_deterministic_base.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/nn_ops/__pycache__/depthwise_conv_op_base.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/nn_ops/__pycache__/xent_op_test_base.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/nn_ops/bias_op_base.py +322 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/nn_ops/cudnn_deterministic_base.py +292 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/nn_ops/depthwise_conv_op_base.py +1172 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/nn_ops/xent_op_test_base.py +293 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/random/__init__.py +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/random/__pycache__/__init__.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/random/__pycache__/util.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/random/util.py +164 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/signal/__init__.py +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/signal/__pycache__/__init__.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/signal/__pycache__/test_util.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/signal/test_util.py +96 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/sparse_ops/__init__.py +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/sparse_ops/__pycache__/__init__.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/sparse_ops/__pycache__/sparse_xent_op_test_base.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/sparse_ops/sparse_xent_op_test_base.py +318 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/profiler/internal/_pywrap_traceme.so +3 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/__pycache__/__init__.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/__pycache__/api.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/__pycache__/bfloat16.cpython-310.pyc +0 -0
.gitattributes
CHANGED
|
@@ -817,3 +817,5 @@ videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_mlir.so filter
|
|
| 817 |
videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_parallel_device.so filter=lfs diff=lfs merge=lfs -text
|
| 818 |
pllava/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_adv.so.9 filter=lfs diff=lfs merge=lfs -text
|
| 819 |
videochat2/lib/python3.10/site-packages/tensorflow/lite/python/interpreter_wrapper/_pywrap_tensorflow_interpreter_wrapper.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 817 |
videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_parallel_device.so filter=lfs diff=lfs merge=lfs -text
|
| 818 |
pllava/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_adv.so.9 filter=lfs diff=lfs merge=lfs -text
|
| 819 |
videochat2/lib/python3.10/site-packages/tensorflow/lite/python/interpreter_wrapper/_pywrap_tensorflow_interpreter_wrapper.so filter=lfs diff=lfs merge=lfs -text
|
| 820 |
+
videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_tensorflow_internal.so filter=lfs diff=lfs merge=lfs -text
|
| 821 |
+
videochat2/lib/python3.10/site-packages/tensorflow/python/profiler/internal/_pywrap_traceme.so filter=lfs diff=lfs merge=lfs -text
|
llava_next/share/terminfo/l/la120
ADDED
|
Binary file (473 Bytes). View file
|
|
|
llava_next/share/terminfo/l/lft
ADDED
|
Binary file (1.3 kB). View file
|
|
|
llava_next/share/terminfo/l/linux
ADDED
|
Binary file (1.74 kB). View file
|
|
|
llava_next/share/terminfo/l/linux-basic
ADDED
|
Binary file (1.61 kB). View file
|
|
|
llava_next/share/terminfo/l/linux-c
ADDED
|
Binary file (2.06 kB). View file
|
|
|
llava_next/share/terminfo/l/linux-vt
ADDED
|
Binary file (1.67 kB). View file
|
|
|
llava_next/share/terminfo/l/linux2.2
ADDED
|
Binary file (1.73 kB). View file
|
|
|
llava_next/share/terminfo/l/linux3.0
ADDED
|
Binary file (1.75 kB). View file
|
|
|
llava_next/share/terminfo/l/lisaterm
ADDED
|
Binary file (732 Bytes). View file
|
|
|
llava_next/share/terminfo/l/liswb
ADDED
|
Binary file (563 Bytes). View file
|
|
|
llava_next/share/terminfo/l/ln03
ADDED
|
Binary file (375 Bytes). View file
|
|
|
llava_next/share/terminfo/l/ln03-w
ADDED
|
Binary file (393 Bytes). View file
|
|
|
llava_next/share/terminfo/l/luna
ADDED
|
Binary file (396 Bytes). View file
|
|
|
llava_next/share/terminfo/p/pc3r-m
ADDED
|
Binary file (1.52 kB). View file
|
|
|
llava_next/share/terminfo/p/pccon+keys
ADDED
|
Binary file (704 Bytes). View file
|
|
|
llava_next/share/terminfo/p/pccons
ADDED
|
Binary file (486 Bytes). View file
|
|
|
llava_next/share/terminfo/p/putty-vt100
ADDED
|
Binary file (2.43 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_tensorflow_internal.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f70335777d9d8013cae9b48cd9ea2dec79cf4c18049bc41116c4fedcd5d47904
|
| 3 |
+
size 2149625
|
videochat2/lib/python3.10/site-packages/tensorflow/python/debug/__init__.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Public Python API of TensorFlow Debugger (tfdbg).
|
| 16 |
+
|
| 17 |
+
See the [TFDBG](https://www.tensorflow.org/guide/debugger) guide.
|
| 18 |
+
|
| 19 |
+
@@add_debug_tensor_watch
|
| 20 |
+
@@watch_graph
|
| 21 |
+
@@watch_graph_with_denylists
|
| 22 |
+
@@DebugTensorDatum
|
| 23 |
+
@@DebugDumpDir
|
| 24 |
+
@@load_tensor_from_event
|
| 25 |
+
@@load_tensor_from_event_file
|
| 26 |
+
@@has_inf_or_nan
|
| 27 |
+
@@DumpingDebugHook
|
| 28 |
+
@@DumpingDebugWrapperSession
|
| 29 |
+
@@GrpcDebugHook
|
| 30 |
+
@@GrpcDebugWrapperSession
|
| 31 |
+
@@LocalCLIDebugHook
|
| 32 |
+
@@LocalCLIDebugWrapperSession
|
| 33 |
+
@@TensorBoardDebugHook
|
| 34 |
+
@@TensorBoardDebugWrapperSession
|
| 35 |
+
@@WatchOptions
|
| 36 |
+
|
| 37 |
+
@@reconstruct_non_debug_graph_def
|
| 38 |
+
|
| 39 |
+
@@GradientsDebugger
|
| 40 |
+
@@clear_gradient_debuggers
|
| 41 |
+
"""
|
| 42 |
+
|
| 43 |
+
# pylint: disable=unused-imports
|
| 44 |
+
from tensorflow.python.debug.lib.debug_data import DebugDumpDir
|
| 45 |
+
from tensorflow.python.debug.lib.debug_data import DebugTensorDatum
|
| 46 |
+
from tensorflow.python.debug.lib.debug_data import has_inf_or_nan
|
| 47 |
+
from tensorflow.python.debug.lib.debug_data import load_tensor_from_event
|
| 48 |
+
from tensorflow.python.debug.lib.debug_data import load_tensor_from_event_file
|
| 49 |
+
|
| 50 |
+
from tensorflow.python.debug.lib.debug_gradients import GradientsDebugger
|
| 51 |
+
|
| 52 |
+
from tensorflow.python.debug.lib.debug_graphs import reconstruct_non_debug_graph_def
|
| 53 |
+
|
| 54 |
+
from tensorflow.python.debug.lib.debug_utils import add_debug_tensor_watch
|
| 55 |
+
from tensorflow.python.debug.lib.debug_utils import watch_graph
|
| 56 |
+
from tensorflow.python.debug.lib.debug_utils import watch_graph_with_denylists
|
| 57 |
+
|
| 58 |
+
from tensorflow.python.debug.wrappers.dumping_wrapper import DumpingDebugWrapperSession
|
| 59 |
+
from tensorflow.python.debug.wrappers.framework import WatchOptions
|
| 60 |
+
from tensorflow.python.debug.wrappers.grpc_wrapper import GrpcDebugWrapperSession
|
| 61 |
+
from tensorflow.python.debug.wrappers.grpc_wrapper import TensorBoardDebugWrapperSession
|
| 62 |
+
from tensorflow.python.debug.wrappers.hooks import DumpingDebugHook
|
| 63 |
+
from tensorflow.python.debug.wrappers.hooks import GrpcDebugHook
|
| 64 |
+
from tensorflow.python.debug.wrappers.hooks import LocalCLIDebugHook
|
| 65 |
+
from tensorflow.python.debug.wrappers.hooks import TensorBoardDebugHook
|
| 66 |
+
from tensorflow.python.debug.wrappers.local_cli_wrapper import LocalCLIDebugWrapperSession
|
| 67 |
+
|
| 68 |
+
from tensorflow.python.util import all_util as _all_util
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
_all_util.remove_undocumented(__name__)
|
videochat2/lib/python3.10/site-packages/tensorflow/python/debug/lib/debug_events_monitors.py
ADDED
|
@@ -0,0 +1,307 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Monitors for Debug Events in the tfdbg2 format.
|
| 16 |
+
|
| 17 |
+
Monitors get access to graph-building- and execution-related data
|
| 18 |
+
objects as the DebugDataReader (see `debug_events_reader.py`) reads the
|
| 19 |
+
data in a continuous fashion, via a set of callbacks. This mechanism enables
|
| 20 |
+
hooking custom logic into the DebugEvent reading stream without the need for
|
| 21 |
+
any polling or iterating over the entire data held by DebugDataReader.
|
| 22 |
+
|
| 23 |
+
This module includes the following built-in hooks:
|
| 24 |
+
- InfNanMonitor: Monitors infinity and nan values in top-level execution and
|
| 25 |
+
intra-graph execution events.
|
| 26 |
+
|
| 27 |
+
When a monitor (subtype of `BaseMonitor`) is constructed with a DebugDataReader
|
| 28 |
+
as the first argument of the constructor call, the monitor is automatically
|
| 29 |
+
registered with the DebugDataReader. For example:
|
| 30 |
+
|
| 31 |
+
```py
|
| 32 |
+
debug_data_reader = debug_events_reader.DebugDataReader(dump_dir)
|
| 33 |
+
inf_nan_monitor = debug_events_monitors.InfNanMonitor(debug_data_reader)
|
| 34 |
+
|
| 35 |
+
debug_data_reader.update()
|
| 36 |
+
# `inf_nan_monitor`'s on_* methods will get called as the execution-related
|
| 37 |
+
# and other types of data are read by `debug_data_reader`.
|
| 38 |
+
```
|
| 39 |
+
"""
|
| 40 |
+
import numpy as np
|
| 41 |
+
|
| 42 |
+
from tensorflow.core.protobuf import debug_event_pb2
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class BaseMonitor(object):
|
| 46 |
+
"""Base class for debug event data monitors."""
|
| 47 |
+
|
| 48 |
+
def __init__(self, debug_events_reader):
|
| 49 |
+
self._debug_data_reader = debug_events_reader
|
| 50 |
+
debug_events_reader._add_monitor(self) # pylint:disable=protected-access
|
| 51 |
+
|
| 52 |
+
def on_execution(self, execution_index, execution):
|
| 53 |
+
"""Monitor method for top-level execution events.
|
| 54 |
+
|
| 55 |
+
Return values (if any) are ignored by the associated DebugDataReader.
|
| 56 |
+
|
| 57 |
+
Args:
|
| 58 |
+
execution_index: The index of the top-level execution event, as an int.
|
| 59 |
+
execution: An Execution data object, for a top-level op or function
|
| 60 |
+
execution event.
|
| 61 |
+
"""
|
| 62 |
+
|
| 63 |
+
def on_graph_execution_trace(self,
|
| 64 |
+
graph_execution_trace_index,
|
| 65 |
+
graph_execution_trace):
|
| 66 |
+
"""Monitor method for intra-graph execution events.
|
| 67 |
+
|
| 68 |
+
Return values (if any) are ignored by the associated DebugDataReader.
|
| 69 |
+
|
| 70 |
+
Args:
|
| 71 |
+
graph_execution_trace_index: The index of the intra-graph execution
|
| 72 |
+
event, as an int.
|
| 73 |
+
graph_execution_trace: A GraphExecutionTrace data object, for an
|
| 74 |
+
intra-graph tensor event.
|
| 75 |
+
"""
|
| 76 |
+
|
| 77 |
+
# TODO(cais): Add more monitor methods such as on_graph_op_creation().
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
class InfNanAlert(object):
|
| 81 |
+
"""Alert for Infinity and NaN values."""
|
| 82 |
+
|
| 83 |
+
def __init__(self,
|
| 84 |
+
wall_time,
|
| 85 |
+
op_type,
|
| 86 |
+
output_slot,
|
| 87 |
+
size=None,
|
| 88 |
+
num_neg_inf=None,
|
| 89 |
+
num_pos_inf=None,
|
| 90 |
+
num_nan=None,
|
| 91 |
+
execution_index=None,
|
| 92 |
+
graph_execution_trace_index=None):
|
| 93 |
+
self._wall_time = wall_time
|
| 94 |
+
self._op_type = op_type
|
| 95 |
+
self._output_slot = output_slot
|
| 96 |
+
self._size = size
|
| 97 |
+
self._num_neg_inf = num_neg_inf
|
| 98 |
+
self._num_pos_inf = num_pos_inf
|
| 99 |
+
self._num_nan = num_nan
|
| 100 |
+
self._execution_index = execution_index
|
| 101 |
+
self._graph_execution_trace_index = graph_execution_trace_index
|
| 102 |
+
|
| 103 |
+
@property
|
| 104 |
+
def wall_time(self):
|
| 105 |
+
return self._wall_time
|
| 106 |
+
|
| 107 |
+
@property
|
| 108 |
+
def op_type(self):
|
| 109 |
+
return self._op_type
|
| 110 |
+
|
| 111 |
+
@property
|
| 112 |
+
def output_slot(self):
|
| 113 |
+
return self._output_slot
|
| 114 |
+
|
| 115 |
+
@property
|
| 116 |
+
def size(self):
|
| 117 |
+
return self._size
|
| 118 |
+
|
| 119 |
+
@property
|
| 120 |
+
def num_neg_inf(self):
|
| 121 |
+
return self._num_neg_inf
|
| 122 |
+
|
| 123 |
+
@property
|
| 124 |
+
def num_pos_inf(self):
|
| 125 |
+
return self._num_pos_inf
|
| 126 |
+
|
| 127 |
+
@property
|
| 128 |
+
def num_nan(self):
|
| 129 |
+
return self._num_nan
|
| 130 |
+
|
| 131 |
+
@property
|
| 132 |
+
def execution_index(self):
|
| 133 |
+
return self._execution_index
|
| 134 |
+
|
| 135 |
+
@property
|
| 136 |
+
def graph_execution_trace_index(self):
|
| 137 |
+
return self._graph_execution_trace_index
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
class InfNanMonitor(BaseMonitor):
|
| 141 |
+
"""Monitor for Infinity and NaN in tensor values."""
|
| 142 |
+
|
| 143 |
+
def __init__(self, debug_events_reader, limit=0):
|
| 144 |
+
super(InfNanMonitor, self).__init__(debug_events_reader)
|
| 145 |
+
self._limit = limit # Track only the first _ alert events, for efficiency.
|
| 146 |
+
self._alerts = []
|
| 147 |
+
|
| 148 |
+
def _check_full_tensor_value(self,
|
| 149 |
+
tensor_value,
|
| 150 |
+
wall_time,
|
| 151 |
+
op_type,
|
| 152 |
+
output_slot,
|
| 153 |
+
execution_index=None,
|
| 154 |
+
graph_execution_trace_index=None):
|
| 155 |
+
"""Check a full tensor value.
|
| 156 |
+
|
| 157 |
+
Appends to the list of alerts if any inf or nan is found in the full tensor
|
| 158 |
+
value.
|
| 159 |
+
|
| 160 |
+
Args:
|
| 161 |
+
tensor_value: The full tensor value as a `np.ndarray`.
|
| 162 |
+
wall_time: Wall timestamp for the execution event that generated the
|
| 163 |
+
tensor value.
|
| 164 |
+
op_type: Op type executed.
|
| 165 |
+
output_slot: The output slot of the op.
|
| 166 |
+
execution_index: Index to the top-level execution event.
|
| 167 |
+
graph_execution_trace_index: Index to the intra-graph execution trace
|
| 168 |
+
(if applicable.)
|
| 169 |
+
"""
|
| 170 |
+
size = np.size(tensor_value)
|
| 171 |
+
if not size or not np.issubdtype(tensor_value.dtype, np.floating):
|
| 172 |
+
return
|
| 173 |
+
is_inf = np.isinf(tensor_value)
|
| 174 |
+
num_neg_inf = np.count_nonzero(
|
| 175 |
+
np.logical_and(is_inf, np.less(tensor_value, 0.0)))
|
| 176 |
+
num_pos_inf = np.count_nonzero(
|
| 177 |
+
np.logical_and(is_inf, np.greater(tensor_value, 0.0)))
|
| 178 |
+
num_nan = np.count_nonzero(np.isnan(tensor_value))
|
| 179 |
+
if num_neg_inf or num_pos_inf or num_nan:
|
| 180 |
+
self._alerts.append(InfNanAlert(
|
| 181 |
+
wall_time,
|
| 182 |
+
op_type,
|
| 183 |
+
output_slot,
|
| 184 |
+
size=size,
|
| 185 |
+
num_neg_inf=num_neg_inf,
|
| 186 |
+
num_pos_inf=num_pos_inf,
|
| 187 |
+
num_nan=num_nan,
|
| 188 |
+
execution_index=execution_index,
|
| 189 |
+
graph_execution_trace_index=graph_execution_trace_index))
|
| 190 |
+
|
| 191 |
+
def _check_debug_tensor_value(self,
|
| 192 |
+
tensor_debug_mode,
|
| 193 |
+
debug_tensor_value,
|
| 194 |
+
wall_time,
|
| 195 |
+
op_type,
|
| 196 |
+
output_slot,
|
| 197 |
+
execution_index=None,
|
| 198 |
+
graph_execution_trace_index=None):
|
| 199 |
+
"""Check for bad numerical values based on debug summary of tensor value.
|
| 200 |
+
|
| 201 |
+
If tensor_debug_mode is one in which debug_tensor_value does not carry
|
| 202 |
+
information about the presence or count of inf / nan values (e.g., SHAPE),
|
| 203 |
+
this method is a no-op.
|
| 204 |
+
|
| 205 |
+
When infs and/or nans are found, `InfNanAlert` objects are created and
|
| 206 |
+
appended to `self._alerts`.
|
| 207 |
+
|
| 208 |
+
Args:
|
| 209 |
+
tensor_debug_mode: TensorDebugMode proto enum.
|
| 210 |
+
debug_tensor_value: Debug tensor value as a list of numbers.
|
| 211 |
+
wall_time: Wall timestamp for the tensor event.
|
| 212 |
+
op_type: Type of the op that generated the tensor (e.g., "Conv2D").
|
| 213 |
+
output_slot: Output slot index of the tensor for the op.
|
| 214 |
+
execution_index: Top-level execution index.
|
| 215 |
+
graph_execution_trace_index: Intra-graph execution index.
|
| 216 |
+
"""
|
| 217 |
+
# FULL_TENSOR mode is handled by a separate code path.
|
| 218 |
+
assert tensor_debug_mode != debug_event_pb2.TensorDebugMode.FULL_TENSOR
|
| 219 |
+
if not debug_tensor_value:
|
| 220 |
+
return
|
| 221 |
+
if tensor_debug_mode == debug_event_pb2.TensorDebugMode.CURT_HEALTH:
|
| 222 |
+
_, any_nan_inf = debug_tensor_value
|
| 223 |
+
if any_nan_inf:
|
| 224 |
+
self._alerts.append(InfNanAlert(
|
| 225 |
+
wall_time,
|
| 226 |
+
op_type,
|
| 227 |
+
output_slot,
|
| 228 |
+
execution_index=execution_index,
|
| 229 |
+
graph_execution_trace_index=graph_execution_trace_index))
|
| 230 |
+
elif tensor_debug_mode == debug_event_pb2.TensorDebugMode.CONCISE_HEALTH:
|
| 231 |
+
_, size, num_neg_inf, num_pos_inf, num_nan = debug_tensor_value
|
| 232 |
+
if num_neg_inf or num_pos_inf or num_nan:
|
| 233 |
+
self._alerts.append(InfNanAlert(
|
| 234 |
+
wall_time,
|
| 235 |
+
op_type,
|
| 236 |
+
output_slot,
|
| 237 |
+
size=size,
|
| 238 |
+
num_neg_inf=num_neg_inf,
|
| 239 |
+
num_pos_inf=num_pos_inf,
|
| 240 |
+
num_nan=num_nan,
|
| 241 |
+
execution_index=execution_index,
|
| 242 |
+
graph_execution_trace_index=graph_execution_trace_index))
|
| 243 |
+
elif tensor_debug_mode == debug_event_pb2.TensorDebugMode.FULL_HEALTH:
|
| 244 |
+
(_, _, _, _, size, num_neg_inf, num_pos_inf, num_nan,
|
| 245 |
+
_, _, _) = debug_tensor_value
|
| 246 |
+
if num_neg_inf or num_pos_inf or num_nan:
|
| 247 |
+
self._alerts.append(InfNanAlert(
|
| 248 |
+
wall_time,
|
| 249 |
+
op_type,
|
| 250 |
+
output_slot,
|
| 251 |
+
size=size,
|
| 252 |
+
num_neg_inf=num_neg_inf,
|
| 253 |
+
num_pos_inf=num_pos_inf,
|
| 254 |
+
num_nan=num_nan,
|
| 255 |
+
execution_index=execution_index,
|
| 256 |
+
graph_execution_trace_index=graph_execution_trace_index))
|
| 257 |
+
|
| 258 |
+
def on_execution(self,
|
| 259 |
+
execution_index,
|
| 260 |
+
execution):
|
| 261 |
+
if self._limit > 0 and len(self._alerts) >= self._limit:
|
| 262 |
+
return
|
| 263 |
+
if (execution.tensor_debug_mode ==
|
| 264 |
+
debug_event_pb2.TensorDebugMode.FULL_TENSOR):
|
| 265 |
+
tensor_values = self._debug_data_reader.execution_to_tensor_values(
|
| 266 |
+
execution)
|
| 267 |
+
for output_slot, tensor_value in enumerate(tensor_values):
|
| 268 |
+
self._check_full_tensor_value(
|
| 269 |
+
tensor_value, execution.wall_time, execution.op_type, output_slot,
|
| 270 |
+
execution_index=execution_index)
|
| 271 |
+
elif execution.debug_tensor_values:
|
| 272 |
+
for output_slot, debug_tensor_value in enumerate(
|
| 273 |
+
execution.debug_tensor_values):
|
| 274 |
+
self._check_debug_tensor_value(
|
| 275 |
+
execution.tensor_debug_mode,
|
| 276 |
+
debug_tensor_value,
|
| 277 |
+
execution.wall_time,
|
| 278 |
+
execution.op_type,
|
| 279 |
+
output_slot,
|
| 280 |
+
execution_index=execution_index)
|
| 281 |
+
|
| 282 |
+
def on_graph_execution_trace(self,
|
| 283 |
+
graph_execution_trace_index,
|
| 284 |
+
graph_execution_trace):
|
| 285 |
+
"""Monitor method for GraphExecutionTrace data object."""
|
| 286 |
+
if self._limit > 0 and len(self._alerts) >= self._limit:
|
| 287 |
+
return
|
| 288 |
+
if (graph_execution_trace.tensor_debug_mode ==
|
| 289 |
+
debug_event_pb2.TensorDebugMode.FULL_TENSOR):
|
| 290 |
+
tensor_value = (
|
| 291 |
+
self._debug_data_reader.graph_execution_trace_to_tensor_value(
|
| 292 |
+
graph_execution_trace))
|
| 293 |
+
self._check_full_tensor_value(
|
| 294 |
+
tensor_value, graph_execution_trace.wall_time,
|
| 295 |
+
graph_execution_trace.op_type, graph_execution_trace.output_slot,
|
| 296 |
+
graph_execution_trace_index=graph_execution_trace_index)
|
| 297 |
+
elif graph_execution_trace.debug_tensor_value:
|
| 298 |
+
self._check_debug_tensor_value(
|
| 299 |
+
graph_execution_trace.tensor_debug_mode,
|
| 300 |
+
graph_execution_trace.debug_tensor_value,
|
| 301 |
+
graph_execution_trace.wall_time,
|
| 302 |
+
graph_execution_trace.op_type,
|
| 303 |
+
graph_execution_trace.output_slot,
|
| 304 |
+
graph_execution_trace_index=graph_execution_trace_index)
|
| 305 |
+
|
| 306 |
+
def alerts(self):
|
| 307 |
+
return self._alerts
|
videochat2/lib/python3.10/site-packages/tensorflow/python/debug/lib/grpc_debug_server.py
ADDED
|
@@ -0,0 +1,492 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""gRPC debug server in Python."""
|
| 16 |
+
# pylint: disable=g-bad-import-order
|
| 17 |
+
import collections
|
| 18 |
+
import json
|
| 19 |
+
import queue
|
| 20 |
+
import threading
|
| 21 |
+
import time
|
| 22 |
+
|
| 23 |
+
from concurrent import futures
|
| 24 |
+
import grpc
|
| 25 |
+
|
| 26 |
+
from tensorflow.core.debug import debug_service_pb2
|
| 27 |
+
from tensorflow.core.framework import graph_pb2
|
| 28 |
+
from tensorflow.python.debug.lib import debug_graphs
|
| 29 |
+
from tensorflow.python.debug.lib import debug_service_pb2_grpc
|
| 30 |
+
from tensorflow.python.platform import tf_logging as logging
|
| 31 |
+
from tensorflow.python.util import compat
|
| 32 |
+
|
| 33 |
+
DebugWatch = collections.namedtuple("DebugWatch",
|
| 34 |
+
["node_name", "output_slot", "debug_op"])
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def _state_change(new_state, node_name, output_slot, debug_op):
|
| 38 |
+
state_change = debug_service_pb2.EventReply.DebugOpStateChange()
|
| 39 |
+
state_change.state = new_state
|
| 40 |
+
state_change.node_name = node_name
|
| 41 |
+
state_change.output_slot = output_slot
|
| 42 |
+
state_change.debug_op = debug_op
|
| 43 |
+
return state_change
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class EventListenerBaseStreamHandler:
|
| 47 |
+
"""Per-stream handler of EventListener gRPC streams."""
|
| 48 |
+
|
| 49 |
+
def __init__(self):
|
| 50 |
+
"""Constructor of EventListenerBaseStreamHandler."""
|
| 51 |
+
|
| 52 |
+
def on_core_metadata_event(self, event):
|
| 53 |
+
"""Callback for core metadata.
|
| 54 |
+
|
| 55 |
+
Args:
|
| 56 |
+
event: The Event proto that carries a JSON string in its
|
| 57 |
+
`log_message.message` field.
|
| 58 |
+
|
| 59 |
+
Returns:
|
| 60 |
+
`None` or an `EventReply` proto to be sent back to the client. If `None`,
|
| 61 |
+
an `EventReply` proto construct with the default no-arg constructor will
|
| 62 |
+
be sent back to the client.
|
| 63 |
+
"""
|
| 64 |
+
raise NotImplementedError(
|
| 65 |
+
"on_core_metadata_event() is not implemented in the base servicer "
|
| 66 |
+
"class")
|
| 67 |
+
|
| 68 |
+
def on_graph_def(self, graph_def, device_name, wall_time):
|
| 69 |
+
"""Callback for Event proto received through the gRPC stream.
|
| 70 |
+
|
| 71 |
+
This Event proto carries a GraphDef, encoded as bytes, in its graph_def
|
| 72 |
+
field.
|
| 73 |
+
|
| 74 |
+
Args:
|
| 75 |
+
graph_def: A GraphDef object.
|
| 76 |
+
device_name: Name of the device on which the graph was created.
|
| 77 |
+
wall_time: An epoch timestamp (in microseconds) for the graph.
|
| 78 |
+
|
| 79 |
+
Returns:
|
| 80 |
+
`None` or an `EventReply` proto to be sent back to the client. If `None`,
|
| 81 |
+
an `EventReply` proto construct with the default no-arg constructor will
|
| 82 |
+
be sent back to the client.
|
| 83 |
+
"""
|
| 84 |
+
raise NotImplementedError(
|
| 85 |
+
"on_graph_def() is not implemented in the base servicer class")
|
| 86 |
+
|
| 87 |
+
def on_value_event(self, event):
|
| 88 |
+
"""Callback for Event proto received through the gRPC stream.
|
| 89 |
+
|
| 90 |
+
This Event proto carries a Tensor in its summary.value[0] field.
|
| 91 |
+
|
| 92 |
+
Args:
|
| 93 |
+
event: The Event proto from the stream to be processed.
|
| 94 |
+
"""
|
| 95 |
+
raise NotImplementedError(
|
| 96 |
+
"on_value_event() is not implemented in the base servicer class")
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
class EventListenerBaseServicer(debug_service_pb2_grpc.EventListenerServicer):
|
| 100 |
+
"""Base Python class for gRPC debug server."""
|
| 101 |
+
|
| 102 |
+
def __init__(self, server_port, stream_handler_class):
|
| 103 |
+
"""Constructor.
|
| 104 |
+
|
| 105 |
+
Args:
|
| 106 |
+
server_port: (int) Port number to bind to.
|
| 107 |
+
stream_handler_class: A class of the base class
|
| 108 |
+
`EventListenerBaseStreamHandler` that will be used to constructor
|
| 109 |
+
stream handler objects during `SendEvents` calls.
|
| 110 |
+
"""
|
| 111 |
+
|
| 112 |
+
self._server_port = server_port
|
| 113 |
+
self._stream_handler_class = stream_handler_class
|
| 114 |
+
|
| 115 |
+
self._server_lock = threading.Lock()
|
| 116 |
+
self._server_started = False
|
| 117 |
+
self._stop_requested = False
|
| 118 |
+
|
| 119 |
+
self._debug_ops_state_change_queue = queue.Queue()
|
| 120 |
+
self._gated_grpc_debug_watches = set()
|
| 121 |
+
self._breakpoints = set()
|
| 122 |
+
|
| 123 |
+
def SendEvents(self, request_iterator, context):
|
| 124 |
+
"""Implementation of the SendEvents service method.
|
| 125 |
+
|
| 126 |
+
This method receives streams of Event protos from the client, and processes
|
| 127 |
+
them in ways specified in the on_event() callback. The stream is
|
| 128 |
+
bi-directional, but currently only the client-to-server stream (i.e., the
|
| 129 |
+
stream from the debug ops to the server) is used.
|
| 130 |
+
|
| 131 |
+
Args:
|
| 132 |
+
request_iterator: The incoming stream of Event protos.
|
| 133 |
+
context: Server context.
|
| 134 |
+
|
| 135 |
+
Raises:
|
| 136 |
+
ValueError: If there are more than one core metadata events.
|
| 137 |
+
|
| 138 |
+
Yields:
|
| 139 |
+
An empty stream of responses.
|
| 140 |
+
"""
|
| 141 |
+
core_metadata_count = 0
|
| 142 |
+
|
| 143 |
+
# A map from GraphDef hash to a list of received chunks.
|
| 144 |
+
graph_def_chunks = {}
|
| 145 |
+
tensor_chunks = {}
|
| 146 |
+
|
| 147 |
+
stream_handler = None
|
| 148 |
+
for event in request_iterator:
|
| 149 |
+
if not stream_handler:
|
| 150 |
+
stream_handler = self._stream_handler_class()
|
| 151 |
+
|
| 152 |
+
if event.summary and event.summary.value:
|
| 153 |
+
# An Event proto carrying a tensor value.
|
| 154 |
+
maybe_tensor_event = self._process_tensor_event_in_chunks(
|
| 155 |
+
event, tensor_chunks)
|
| 156 |
+
if maybe_tensor_event:
|
| 157 |
+
event_reply = stream_handler.on_value_event(maybe_tensor_event)
|
| 158 |
+
if event_reply is not None:
|
| 159 |
+
yield self._process_debug_op_state_changes(event_reply)
|
| 160 |
+
else:
|
| 161 |
+
# Non-tensor-value Event.
|
| 162 |
+
if event.graph_def:
|
| 163 |
+
# GraphDef-carrying Event.
|
| 164 |
+
maybe_graph_def, maybe_device_name, maybe_wall_time = (
|
| 165 |
+
self._process_encoded_graph_def_in_chunks(
|
| 166 |
+
event, graph_def_chunks))
|
| 167 |
+
if maybe_graph_def:
|
| 168 |
+
reply = stream_handler.on_graph_def(
|
| 169 |
+
maybe_graph_def, maybe_device_name, maybe_wall_time)
|
| 170 |
+
yield self._process_debug_op_state_changes(reply)
|
| 171 |
+
elif event.log_message.message:
|
| 172 |
+
# Core metadata-carrying Event.
|
| 173 |
+
core_metadata_count += 1
|
| 174 |
+
if core_metadata_count > 1:
|
| 175 |
+
raise ValueError(
|
| 176 |
+
"Expected one core metadata event; received multiple")
|
| 177 |
+
reply = stream_handler.on_core_metadata_event(event)
|
| 178 |
+
yield self._process_debug_op_state_changes(reply)
|
| 179 |
+
|
| 180 |
+
def _process_debug_op_state_changes(self, event_reply=None):
|
| 181 |
+
"""Dequeue and process all the queued debug-op state change protos.
|
| 182 |
+
|
| 183 |
+
Include all the debug-op state change protos in a `EventReply` proto.
|
| 184 |
+
|
| 185 |
+
Args:
|
| 186 |
+
event_reply: An `EventReply` to add the `DebugOpStateChange` protos to,
|
| 187 |
+
or `None`.
|
| 188 |
+
|
| 189 |
+
Returns:
|
| 190 |
+
An `EventReply` proto with the dequeued `DebugOpStateChange` protos (if
|
| 191 |
+
any) added.
|
| 192 |
+
"""
|
| 193 |
+
if event_reply is None:
|
| 194 |
+
event_reply = debug_service_pb2.EventReply()
|
| 195 |
+
while not self._debug_ops_state_change_queue.empty():
|
| 196 |
+
state_change = self._debug_ops_state_change_queue.get()
|
| 197 |
+
debug_node_key = (state_change.node_name, state_change.output_slot,
|
| 198 |
+
state_change.debug_op)
|
| 199 |
+
if (state_change.state ==
|
| 200 |
+
debug_service_pb2.EventReply.DebugOpStateChange.READ_WRITE):
|
| 201 |
+
logging.info("Adding breakpoint %s:%d:%s", state_change.node_name,
|
| 202 |
+
state_change.output_slot, state_change.debug_op)
|
| 203 |
+
self._breakpoints.add(debug_node_key)
|
| 204 |
+
elif (state_change.state ==
|
| 205 |
+
debug_service_pb2.EventReply.DebugOpStateChange.READ_ONLY):
|
| 206 |
+
logging.info("Adding watchpoint %s:%d:%s", state_change.node_name,
|
| 207 |
+
state_change.output_slot, state_change.debug_op)
|
| 208 |
+
if debug_node_key in self._breakpoints:
|
| 209 |
+
self._breakpoints.discard(debug_node_key)
|
| 210 |
+
elif (state_change.state ==
|
| 211 |
+
debug_service_pb2.EventReply.DebugOpStateChange.DISABLED):
|
| 212 |
+
logging.info("Removing watchpoint or breakpoint: %s:%d:%s",
|
| 213 |
+
state_change.node_name, state_change.output_slot,
|
| 214 |
+
state_change.debug_op)
|
| 215 |
+
if debug_node_key in self._breakpoints:
|
| 216 |
+
self._breakpoints.discard(debug_node_key)
|
| 217 |
+
else:
|
| 218 |
+
logging.warn(
|
| 219 |
+
"Attempting to remove a non-existent debug node key: %s",
|
| 220 |
+
debug_node_key)
|
| 221 |
+
new_state_change = event_reply.debug_op_state_changes.add()
|
| 222 |
+
new_state_change.CopyFrom(state_change)
|
| 223 |
+
return event_reply
|
| 224 |
+
|
| 225 |
+
def _process_tensor_event_in_chunks(self, event, tensor_chunks):
|
| 226 |
+
"""Possibly reassemble event chunks.
|
| 227 |
+
|
| 228 |
+
Due to gRPC's message size limit, a large tensor can be encapsulated in
|
| 229 |
+
multiple Event proto chunks to be sent through the debugger stream. This
|
| 230 |
+
method keeps track of the chunks that have arrived, reassemble all chunks
|
| 231 |
+
corresponding to a tensor when they have arrived and return the reassembled
|
| 232 |
+
Event proto.
|
| 233 |
+
|
| 234 |
+
Args:
|
| 235 |
+
event: The single Event proto that has arrived.
|
| 236 |
+
tensor_chunks: A dict used to keep track of the Event protos that have
|
| 237 |
+
arrived but haven't been reassembled.
|
| 238 |
+
|
| 239 |
+
Returns:
|
| 240 |
+
If all Event protos corresponding to a tensor have arrived, returns the
|
| 241 |
+
reassembled Event proto. Otherwise, return None.
|
| 242 |
+
"""
|
| 243 |
+
|
| 244 |
+
value = event.summary.value[0]
|
| 245 |
+
debugger_plugin_metadata = json.loads(
|
| 246 |
+
compat.as_text(value.metadata.plugin_data.content))
|
| 247 |
+
device_name = debugger_plugin_metadata["device"]
|
| 248 |
+
num_chunks = debugger_plugin_metadata["numChunks"]
|
| 249 |
+
chunk_index = debugger_plugin_metadata["chunkIndex"]
|
| 250 |
+
|
| 251 |
+
if num_chunks <= 1:
|
| 252 |
+
return event
|
| 253 |
+
|
| 254 |
+
debug_node_name = value.node_name
|
| 255 |
+
timestamp = int(event.wall_time)
|
| 256 |
+
tensor_key = "%s_%s_%d" % (device_name, debug_node_name, timestamp)
|
| 257 |
+
|
| 258 |
+
if tensor_key not in tensor_chunks:
|
| 259 |
+
tensor_chunks[tensor_key] = [None] * num_chunks
|
| 260 |
+
|
| 261 |
+
chunks = tensor_chunks[tensor_key]
|
| 262 |
+
if value.tensor.tensor_content:
|
| 263 |
+
chunks[chunk_index] = value.tensor
|
| 264 |
+
elif value.tensor.string_val:
|
| 265 |
+
chunks[chunk_index] = event
|
| 266 |
+
|
| 267 |
+
if None not in chunks:
|
| 268 |
+
if value.tensor.tensor_content:
|
| 269 |
+
event.summary.value[0].tensor.tensor_content = b"".join(
|
| 270 |
+
chunk.tensor_content for chunk in chunks)
|
| 271 |
+
del tensor_chunks[tensor_key]
|
| 272 |
+
return event
|
| 273 |
+
elif value.tensor.string_val:
|
| 274 |
+
merged_event = chunks[0]
|
| 275 |
+
for chunk in chunks[1:]:
|
| 276 |
+
merged_event.summary.value[0].tensor.string_val.extend(
|
| 277 |
+
list(chunk.summary.value[0].tensor.string_val))
|
| 278 |
+
return merged_event
|
| 279 |
+
|
| 280 |
+
def _process_encoded_graph_def_in_chunks(self,
|
| 281 |
+
event,
|
| 282 |
+
graph_def_chunks):
|
| 283 |
+
"""Process an Event proto containing a chunk of encoded GraphDef.
|
| 284 |
+
|
| 285 |
+
Args:
|
| 286 |
+
event: the Event proto containing the chunk of encoded GraphDef.
|
| 287 |
+
graph_def_chunks: A dict mapping keys for GraphDefs (i.e.,
|
| 288 |
+
"<graph_def_hash>,<device_name>,<wall_time>") to a list of chunks of
|
| 289 |
+
encoded GraphDefs.
|
| 290 |
+
|
| 291 |
+
Returns:
|
| 292 |
+
If all chunks of the GraphDef have arrived,
|
| 293 |
+
return decoded GraphDef proto, device name, wall_time.
|
| 294 |
+
Otherwise,
|
| 295 |
+
return None, None, None.
|
| 296 |
+
"""
|
| 297 |
+
graph_def = graph_pb2.GraphDef()
|
| 298 |
+
index_bar_0 = event.graph_def.find(b"|")
|
| 299 |
+
index_bar_1 = event.graph_def.find(b"|", index_bar_0 + 1)
|
| 300 |
+
index_bar_2 = event.graph_def.find(b"|", index_bar_1 + 1)
|
| 301 |
+
graph_def_hash_device_timestamp = event.graph_def[:index_bar_0]
|
| 302 |
+
chunk_index = int(event.graph_def[index_bar_0 + 1 : index_bar_1])
|
| 303 |
+
num_chunks = int(event.graph_def[index_bar_1 + 1 : index_bar_2])
|
| 304 |
+
if graph_def_hash_device_timestamp not in graph_def_chunks:
|
| 305 |
+
graph_def_chunks[graph_def_hash_device_timestamp] = [None] * num_chunks
|
| 306 |
+
graph_def_chunks[graph_def_hash_device_timestamp][
|
| 307 |
+
chunk_index] = event.graph_def[index_bar_2 + 1:]
|
| 308 |
+
if all(graph_def_chunks[graph_def_hash_device_timestamp]):
|
| 309 |
+
device_name = graph_def_hash_device_timestamp.split(b",")[1]
|
| 310 |
+
wall_time = int(graph_def_hash_device_timestamp.split(b",")[2])
|
| 311 |
+
graph_def.ParseFromString(
|
| 312 |
+
b"".join(graph_def_chunks[graph_def_hash_device_timestamp]))
|
| 313 |
+
del graph_def_chunks[graph_def_hash_device_timestamp]
|
| 314 |
+
self._process_graph_def(graph_def)
|
| 315 |
+
return graph_def, device_name, wall_time
|
| 316 |
+
else:
|
| 317 |
+
return None, None, None
|
| 318 |
+
|
| 319 |
+
def _process_graph_def(self, graph_def):
|
| 320 |
+
for node_def in graph_def.node:
|
| 321 |
+
if (debug_graphs.is_debug_node(node_def.name) and
|
| 322 |
+
node_def.attr["gated_grpc"].b):
|
| 323 |
+
node_name, output_slot, _, debug_op = (
|
| 324 |
+
debug_graphs.parse_debug_node_name(node_def.name))
|
| 325 |
+
self._gated_grpc_debug_watches.add(
|
| 326 |
+
DebugWatch(node_name, output_slot, debug_op))
|
| 327 |
+
|
| 328 |
+
def run_server(self, blocking=True):
|
| 329 |
+
"""Start running the server.
|
| 330 |
+
|
| 331 |
+
Args:
|
| 332 |
+
blocking: If `True`, block until `stop_server()` is invoked.
|
| 333 |
+
|
| 334 |
+
Raises:
|
| 335 |
+
ValueError: If server stop has already been requested, or if the server
|
| 336 |
+
has already started running.
|
| 337 |
+
"""
|
| 338 |
+
self._server_lock.acquire()
|
| 339 |
+
try:
|
| 340 |
+
if self._stop_requested:
|
| 341 |
+
raise ValueError("Server has already stopped")
|
| 342 |
+
if self._server_started:
|
| 343 |
+
raise ValueError("Server has already started running")
|
| 344 |
+
|
| 345 |
+
no_max_message_sizes = [("grpc.max_receive_message_length", -1),
|
| 346 |
+
("grpc.max_send_message_length", -1)]
|
| 347 |
+
self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10),
|
| 348 |
+
options=no_max_message_sizes)
|
| 349 |
+
debug_service_pb2_grpc.add_EventListenerServicer_to_server(self,
|
| 350 |
+
self.server)
|
| 351 |
+
self.server.add_insecure_port("[::]:%d" % self._server_port)
|
| 352 |
+
self.server.start()
|
| 353 |
+
self._server_started = True
|
| 354 |
+
finally:
|
| 355 |
+
self._server_lock.release()
|
| 356 |
+
|
| 357 |
+
if blocking:
|
| 358 |
+
while not self._stop_requested:
|
| 359 |
+
time.sleep(1.0)
|
| 360 |
+
|
| 361 |
+
def stop_server(self, grace=1.0):
|
| 362 |
+
"""Request server stopping.
|
| 363 |
+
|
| 364 |
+
Once stopped, server cannot be stopped or started again. This method is
|
| 365 |
+
non-blocking. Call `wait()` on the returned event to block until the server
|
| 366 |
+
has completely stopped.
|
| 367 |
+
|
| 368 |
+
Args:
|
| 369 |
+
grace: Grace period in seconds to be used when calling `server.stop()`.
|
| 370 |
+
|
| 371 |
+
Raises:
|
| 372 |
+
ValueError: If server stop has already been requested, or if the server
|
| 373 |
+
has not started running yet.
|
| 374 |
+
|
| 375 |
+
Returns:
|
| 376 |
+
A threading.Event that will be set when the server has completely stopped.
|
| 377 |
+
"""
|
| 378 |
+
self._server_lock.acquire()
|
| 379 |
+
try:
|
| 380 |
+
if not self._server_started:
|
| 381 |
+
raise ValueError("Server has not started running")
|
| 382 |
+
if self._stop_requested:
|
| 383 |
+
raise ValueError("Server has already stopped")
|
| 384 |
+
|
| 385 |
+
self._stop_requested = True
|
| 386 |
+
return self.server.stop(grace=grace)
|
| 387 |
+
finally:
|
| 388 |
+
self._server_lock.release()
|
| 389 |
+
|
| 390 |
+
def request_watch(self, node_name, output_slot, debug_op, breakpoint=False): # pylint: disable=redefined-builtin
|
| 391 |
+
"""Request enabling a debug tensor watchpoint or breakpoint.
|
| 392 |
+
|
| 393 |
+
This will let the server send a EventReply to the client side
|
| 394 |
+
(i.e., the debugged TensorFlow runtime process) to request adding a watch
|
| 395 |
+
key (i.e., <node_name>:<output_slot>:<debug_op>) to the list of enabled
|
| 396 |
+
watch keys. The list applies only to debug ops with the attribute
|
| 397 |
+
gated_grpc=True.
|
| 398 |
+
|
| 399 |
+
To disable the watch, use `request_unwatch()`.
|
| 400 |
+
|
| 401 |
+
Args:
|
| 402 |
+
node_name: (`str`) name of the node that the to-be-watched tensor belongs
|
| 403 |
+
to, e.g., "hidden/Weights".
|
| 404 |
+
output_slot: (`int`) output slot index of the tensor to watch.
|
| 405 |
+
debug_op: (`str`) name of the debug op to enable. This should not include
|
| 406 |
+
any attribute substrings.
|
| 407 |
+
breakpoint: (`bool`) Iff `True`, the debug op will block and wait until it
|
| 408 |
+
receives an `EventReply` response from the server. The `EventReply`
|
| 409 |
+
proto may carry a TensorProto that modifies the value of the debug op's
|
| 410 |
+
output tensor.
|
| 411 |
+
"""
|
| 412 |
+
self._debug_ops_state_change_queue.put(
|
| 413 |
+
_state_change(
|
| 414 |
+
debug_service_pb2.EventReply.DebugOpStateChange.READ_WRITE
|
| 415 |
+
if breakpoint
|
| 416 |
+
else debug_service_pb2.EventReply.DebugOpStateChange.READ_ONLY,
|
| 417 |
+
node_name, output_slot, debug_op))
|
| 418 |
+
|
| 419 |
+
def request_unwatch(self, node_name, output_slot, debug_op):
|
| 420 |
+
"""Request disabling a debug tensor watchpoint or breakpoint.
|
| 421 |
+
|
| 422 |
+
This is the opposite of `request_watch()`.
|
| 423 |
+
|
| 424 |
+
Args:
|
| 425 |
+
node_name: (`str`) name of the node that the to-be-watched tensor belongs
|
| 426 |
+
to, e.g., "hidden/Weights".
|
| 427 |
+
output_slot: (`int`) output slot index of the tensor to watch.
|
| 428 |
+
debug_op: (`str`) name of the debug op to enable. This should not include
|
| 429 |
+
any attribute substrings.
|
| 430 |
+
"""
|
| 431 |
+
self._debug_ops_state_change_queue.put(
|
| 432 |
+
_state_change(
|
| 433 |
+
debug_service_pb2.EventReply.DebugOpStateChange.DISABLED, node_name,
|
| 434 |
+
output_slot, debug_op))
|
| 435 |
+
|
| 436 |
+
@property
|
| 437 |
+
def breakpoints(self):
|
| 438 |
+
"""Get a set of the currently-activated breakpoints.
|
| 439 |
+
|
| 440 |
+
Returns:
|
| 441 |
+
A `set` of 3-tuples: (node_name, output_slot, debug_op), e.g.,
|
| 442 |
+
{("MatMul", 0, "DebugIdentity")}.
|
| 443 |
+
"""
|
| 444 |
+
return self._breakpoints
|
| 445 |
+
|
| 446 |
+
def gated_grpc_debug_watches(self):
|
| 447 |
+
"""Get the list of debug watches with attribute gated_grpc=True.
|
| 448 |
+
|
| 449 |
+
Since the server receives `GraphDef` from the debugged runtime, it can only
|
| 450 |
+
return such debug watches that it has received so far.
|
| 451 |
+
|
| 452 |
+
Returns:
|
| 453 |
+
A `list` of `DebugWatch` `namedtuples` representing the debug watches with
|
| 454 |
+
gated_grpc=True. Each `namedtuple` element has the attributes:
|
| 455 |
+
`node_name` as a `str`,
|
| 456 |
+
`output_slot` as an `int`,
|
| 457 |
+
`debug_op` as a `str`.
|
| 458 |
+
"""
|
| 459 |
+
return list(self._gated_grpc_debug_watches)
|
| 460 |
+
|
| 461 |
+
def SendTracebacks(self, request, context):
|
| 462 |
+
"""Base implementation of the handling of SendTracebacks calls.
|
| 463 |
+
|
| 464 |
+
The base implementation does nothing with the incoming request.
|
| 465 |
+
Override in an implementation of the server if necessary.
|
| 466 |
+
|
| 467 |
+
Args:
|
| 468 |
+
request: A `CallTraceback` proto, containing information about the
|
| 469 |
+
type (e.g., graph vs. eager execution) and source-code traceback of the
|
| 470 |
+
call and (any) associated `tf.Graph`s.
|
| 471 |
+
context: Server context.
|
| 472 |
+
|
| 473 |
+
Returns:
|
| 474 |
+
A `EventReply` proto.
|
| 475 |
+
"""
|
| 476 |
+
return debug_service_pb2.EventReply()
|
| 477 |
+
|
| 478 |
+
def SendSourceFiles(self, request, context):
|
| 479 |
+
"""Base implementation of the handling of SendSourceFiles calls.
|
| 480 |
+
|
| 481 |
+
The base implementation does nothing with the incoming request.
|
| 482 |
+
Override in an implementation of the server if necessary.
|
| 483 |
+
|
| 484 |
+
Args:
|
| 485 |
+
request: A `DebuggedSourceFiles` proto, containing the path, content, size
|
| 486 |
+
and last-modified timestamp of source files.
|
| 487 |
+
context: Server context.
|
| 488 |
+
|
| 489 |
+
Returns:
|
| 490 |
+
A `EventReply` proto.
|
| 491 |
+
"""
|
| 492 |
+
return debug_service_pb2.EventReply()
|
videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/__init__.py
ADDED
|
File without changes
|
videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (186 Bytes). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/nn_ops/__init__.py
ADDED
|
File without changes
|
videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/nn_ops/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (193 Bytes). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/nn_ops/__pycache__/bias_op_base.cpython-310.pyc
ADDED
|
Binary file (11.1 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/nn_ops/__pycache__/cudnn_deterministic_base.cpython-310.pyc
ADDED
|
Binary file (7.62 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/nn_ops/__pycache__/depthwise_conv_op_base.cpython-310.pyc
ADDED
|
Binary file (27.7 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/nn_ops/__pycache__/xent_op_test_base.cpython-310.pyc
ADDED
|
Binary file (9.26 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/nn_ops/bias_op_base.py
ADDED
|
@@ -0,0 +1,322 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Functional tests for BiasAdd."""
|
| 16 |
+
|
| 17 |
+
import numpy as np
|
| 18 |
+
|
| 19 |
+
from tensorflow.python.eager import backprop
|
| 20 |
+
from tensorflow.python.eager import context
|
| 21 |
+
from tensorflow.python.framework import constant_op
|
| 22 |
+
from tensorflow.python.framework import dtypes
|
| 23 |
+
from tensorflow.python.framework import errors_impl
|
| 24 |
+
from tensorflow.python.framework import test_util
|
| 25 |
+
from tensorflow.python.ops import array_ops
|
| 26 |
+
from tensorflow.python.ops import gradient_checker
|
| 27 |
+
from tensorflow.python.ops import gradient_checker_v2
|
| 28 |
+
from tensorflow.python.ops import gradients_impl
|
| 29 |
+
from tensorflow.python.ops import math_ops
|
| 30 |
+
from tensorflow.python.ops import nn_ops
|
| 31 |
+
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
|
| 32 |
+
from tensorflow.python.platform import test
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@test_util.run_all_in_graph_and_eager_modes
|
| 36 |
+
class BiasAddTestBase(test.TestCase):
|
| 37 |
+
|
| 38 |
+
def _npBias(self, inputs, bias):
|
| 39 |
+
assert len(bias.shape) == 1
|
| 40 |
+
assert inputs.shape[-1] == bias.shape[0]
|
| 41 |
+
return inputs + bias.reshape(([1] *
|
| 42 |
+
(len(inputs.shape) - 1)) + [bias.shape[0]])
|
| 43 |
+
|
| 44 |
+
def testNpBias(self):
|
| 45 |
+
self.assertAllClose(
|
| 46 |
+
np.array([[11, 22, 33], [41, 52, 63]]),
|
| 47 |
+
self._npBias(
|
| 48 |
+
np.array([[10, 20, 30], [40, 50, 60]]), np.array([1, 2, 3])))
|
| 49 |
+
|
| 50 |
+
def _testBias(self, np_inputs, np_bias, use_gpu=False):
|
| 51 |
+
np_val = self._npBias(np_inputs, np_bias)
|
| 52 |
+
with self.cached_session(use_gpu=use_gpu):
|
| 53 |
+
tf_val = self.evaluate(nn_ops.bias_add(np_inputs, np_bias))
|
| 54 |
+
self.assertAllCloseAccordingToType(np_val, tf_val)
|
| 55 |
+
|
| 56 |
+
def _AtLeast3d(self, np_value):
|
| 57 |
+
# fill the input value to at least 3-dimension
|
| 58 |
+
if np_value.ndim < 3:
|
| 59 |
+
return np.reshape(np_value, (1,) * (3 - np_value.ndim) + np_value.shape)
|
| 60 |
+
return np_value
|
| 61 |
+
|
| 62 |
+
def _NHWCToNCHW(self, np_value):
|
| 63 |
+
# fill the input value to at least 3-dimension
|
| 64 |
+
np_value = self._AtLeast3d(np_value)
|
| 65 |
+
# move the last dimension to second
|
| 66 |
+
np_dim = list(range(np_value.ndim))
|
| 67 |
+
np_dim_new = list(np_dim[0:1]) + list(np_dim[-1:]) + list(np_dim[1:-1])
|
| 68 |
+
return np.transpose(np_value, np_dim_new)
|
| 69 |
+
|
| 70 |
+
def _NCHWToNHWC(self, np_value):
|
| 71 |
+
assert len(np_value.shape) >= 3
|
| 72 |
+
np_dim = list(range(np_value.ndim))
|
| 73 |
+
# move the second dimension to the last
|
| 74 |
+
np_dim_new = list(np_dim[0:1]) + list(np_dim[2:]) + list(np_dim[1:2])
|
| 75 |
+
return np.transpose(np_value, np_dim_new)
|
| 76 |
+
|
| 77 |
+
def _testBiasNCHW(self, np_inputs, np_bias, use_gpu):
|
| 78 |
+
np_val = self._npBias(np_inputs, np_bias)
|
| 79 |
+
np_inputs = self._NHWCToNCHW(np_inputs)
|
| 80 |
+
with self.cached_session(use_gpu=use_gpu):
|
| 81 |
+
tf_val = self.evaluate(
|
| 82 |
+
nn_ops.bias_add(np_inputs, np_bias, data_format="NCHW"))
|
| 83 |
+
tf_val = self._NCHWToNHWC(tf_val)
|
| 84 |
+
self.assertAllCloseAccordingToType(self._AtLeast3d(np_val), tf_val)
|
| 85 |
+
|
| 86 |
+
def _testAll(self, np_inputs, np_bias):
|
| 87 |
+
self._testBias(np_inputs, np_bias, use_gpu=False)
|
| 88 |
+
self._testBiasNCHW(np_inputs, np_bias, use_gpu=False)
|
| 89 |
+
if np_inputs.dtype in [np.float16, np.float32, np.float64, np.int32]:
|
| 90 |
+
self._testBias(np_inputs, np_bias, use_gpu=True)
|
| 91 |
+
self._testBiasNCHW(np_inputs, np_bias, use_gpu=True)
|
| 92 |
+
|
| 93 |
+
def _expectedException(self):
|
| 94 |
+
if context.executing_eagerly():
|
| 95 |
+
return errors_impl.InvalidArgumentError
|
| 96 |
+
else:
|
| 97 |
+
return ValueError
|
| 98 |
+
|
| 99 |
+
def testInputDims(self):
|
| 100 |
+
with self.assertRaises(self._expectedException()):
|
| 101 |
+
nn_ops.bias_add([1, 2], [1])
|
| 102 |
+
|
| 103 |
+
def testBiasVec(self):
|
| 104 |
+
with self.assertRaises(self._expectedException()):
|
| 105 |
+
nn_ops.bias_add(
|
| 106 |
+
array_ops.reshape([1, 2], shape=[1, 2]),
|
| 107 |
+
array_ops.reshape([1, 2], shape=[1, 2]))
|
| 108 |
+
|
| 109 |
+
def testBiasInputsMatch(self):
|
| 110 |
+
with self.assertRaises(self._expectedException()):
|
| 111 |
+
nn_ops.bias_add(
|
| 112 |
+
array_ops.reshape([1, 2], shape=[1, 2]),
|
| 113 |
+
array_ops.reshape([1], shape=[1]))
|
| 114 |
+
|
| 115 |
+
def testIntTypes(self):
|
| 116 |
+
for t in [np.int8, np.int16, np.int32, np.int64]:
|
| 117 |
+
self._testAll(
|
| 118 |
+
np.array([[10, 20, 30], [40, 50, 60]]).astype(t),
|
| 119 |
+
np.array([1, 2, 3]).astype(t))
|
| 120 |
+
|
| 121 |
+
def testFloatTypes(self):
|
| 122 |
+
for t in [
|
| 123 |
+
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
|
| 124 |
+
]:
|
| 125 |
+
self._testAll(
|
| 126 |
+
np.random.rand(4, 3, 3).astype(t),
|
| 127 |
+
np.random.rand(3).astype(t))
|
| 128 |
+
|
| 129 |
+
def test4DFloatTypes(self):
|
| 130 |
+
for t in [
|
| 131 |
+
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
|
| 132 |
+
]:
|
| 133 |
+
self._testAll(
|
| 134 |
+
np.random.rand(4, 3, 2, 3).astype(t),
|
| 135 |
+
np.random.rand(3).astype(t))
|
| 136 |
+
self._testAll(
|
| 137 |
+
np.random.rand(2048, 4, 4, 4).astype(t),
|
| 138 |
+
np.random.rand(4).astype(t))
|
| 139 |
+
self._testAll(
|
| 140 |
+
np.random.rand(4, 4, 4, 2048).astype(t),
|
| 141 |
+
np.random.rand(2048).astype(t))
|
| 142 |
+
|
| 143 |
+
def test5DFloatTypes(self):
|
| 144 |
+
for t in [
|
| 145 |
+
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
|
| 146 |
+
]:
|
| 147 |
+
self._testAll(
|
| 148 |
+
np.random.rand(4, 3, 2, 3, 4).astype(t),
|
| 149 |
+
np.random.rand(4).astype(t))
|
| 150 |
+
|
| 151 |
+
def _random_tensor(self, shape, dtype):
|
| 152 |
+
return constant_op.constant(2 * np.random.rand(*shape) - 1, dtype=dtype)
|
| 153 |
+
|
| 154 |
+
def _computeGradient(self, np_input, bias, dtype, data_format):
|
| 155 |
+
input_shape = output_shape = np_input.shape
|
| 156 |
+
bias_shape = bias.shape
|
| 157 |
+
input_tensor = constant_op.constant(
|
| 158 |
+
np_input, shape=input_shape, dtype=dtype)
|
| 159 |
+
bias_tensor = constant_op.constant(bias, shape=bias_shape, dtype=dtype)
|
| 160 |
+
|
| 161 |
+
if context.executing_eagerly():
|
| 162 |
+
|
| 163 |
+
def bias_add(input_tensor, bias_tensor):
|
| 164 |
+
return nn_ops.bias_add(
|
| 165 |
+
input_tensor, bias_tensor, data_format=data_format)
|
| 166 |
+
|
| 167 |
+
# The following is a work-around for TF issue 33660. Instead of
|
| 168 |
+
# calculating the analytical and numerical gradients for both
|
| 169 |
+
# inputs in a single call to compute_gradient, compute_gradient
|
| 170 |
+
# is called for each input separately.
|
| 171 |
+
def bias_add_1(input_tensor):
|
| 172 |
+
return bias_add(input_tensor, bias_tensor)
|
| 173 |
+
|
| 174 |
+
def bias_add_2(bias_tensor):
|
| 175 |
+
return bias_add(input_tensor, bias_tensor)
|
| 176 |
+
|
| 177 |
+
input_jacob_a, input_jacob_n = gradient_checker_v2.compute_gradient(
|
| 178 |
+
bias_add_1, [input_tensor])
|
| 179 |
+
bias_jacob_a, bias_jacob_n = gradient_checker_v2.compute_gradient(
|
| 180 |
+
bias_add_2, [bias_tensor])
|
| 181 |
+
|
| 182 |
+
# Test gradient of BiasAddGrad
|
| 183 |
+
def bias_add_grad_function(upstream_gradients):
|
| 184 |
+
with backprop.GradientTape() as tape:
|
| 185 |
+
tape.watch(bias_tensor)
|
| 186 |
+
bias_add_output = bias_add(input_tensor, bias_tensor)
|
| 187 |
+
gradient_injector_output = bias_add_output * upstream_gradients
|
| 188 |
+
return tape.gradient(gradient_injector_output, bias_tensor)
|
| 189 |
+
|
| 190 |
+
upstream_tensor = self._random_tensor(output_shape, dtype)
|
| 191 |
+
grad_jacob_a, grad_jacob_n = gradient_checker_v2.compute_gradient(
|
| 192 |
+
bias_add_grad_function, [upstream_tensor])
|
| 193 |
+
else:
|
| 194 |
+
output_tensor = nn_ops.bias_add(
|
| 195 |
+
input_tensor, bias_tensor, data_format=data_format)
|
| 196 |
+
jacobians = gradient_checker.compute_gradient([input_tensor, bias_tensor],
|
| 197 |
+
[input_shape, bias_shape],
|
| 198 |
+
output_tensor, output_shape)
|
| 199 |
+
(input_jacob_a, input_jacob_n), (bias_jacob_a, bias_jacob_n) = jacobians
|
| 200 |
+
# Test gradient of BiasAddGrad
|
| 201 |
+
if dtype == dtypes.bfloat16:
|
| 202 |
+
# L2Loss is not supported for bfloat16 on CPU.
|
| 203 |
+
output_tensor = math_ops.cast(output_tensor, dtype=dtypes.float32)
|
| 204 |
+
bias_add_grad = gradients_impl.gradients(
|
| 205 |
+
nn_ops.l2_loss(output_tensor), bias_tensor)[0]
|
| 206 |
+
grad_jacob_a, grad_jacob_n = gradient_checker.compute_gradient(
|
| 207 |
+
output_tensor, output_shape, bias_add_grad, bias_shape)
|
| 208 |
+
|
| 209 |
+
return ((input_jacob_a, bias_jacob_a, grad_jacob_a),
|
| 210 |
+
(input_jacob_n, bias_jacob_n, grad_jacob_n))
|
| 211 |
+
|
| 212 |
+
def _testGradient(self, np_input, bias, dtype, data_format, use_gpu):
|
| 213 |
+
with self.cached_session(use_gpu=use_gpu):
|
| 214 |
+
if data_format == "NCHW":
|
| 215 |
+
np_input = self._NHWCToNCHW(np_input)
|
| 216 |
+
jacob_a, jacob_n = self._computeGradient(np_input, bias, dtype,
|
| 217 |
+
data_format)
|
| 218 |
+
input_jacob_a, bias_jacob_a, grad_jacob_a = jacob_a
|
| 219 |
+
input_jacob_n, bias_jacob_n, grad_jacob_n = jacob_n
|
| 220 |
+
|
| 221 |
+
if dtype in [np.float16, dtypes.bfloat16.as_numpy_dtype]:
|
| 222 |
+
# Compare fp16/bf16 analytical gradients to fp32 numerical gradients,
|
| 223 |
+
# since fp16/bf16 numerical gradients are too imprecise unless great
|
| 224 |
+
# care is taken with choosing the inputs and the delta. This is
|
| 225 |
+
# a weaker, but pragmatic, check (in particular, it does not test
|
| 226 |
+
# the op itself, only its gradient).
|
| 227 |
+
_, jacob_n = self._computeGradient(np_input, bias, np.float32,
|
| 228 |
+
data_format)
|
| 229 |
+
input_jacob_n, bias_jacob_n, grad_jacob_n = jacob_n
|
| 230 |
+
|
| 231 |
+
if dtype == dtypes.float64:
|
| 232 |
+
threshold = 1e-10
|
| 233 |
+
elif np_input.size >= 512:
|
| 234 |
+
# The 5e-3 threshold seems to have been marginal in these cases, and
|
| 235 |
+
# small changes in the test were pushing it over the limit.
|
| 236 |
+
threshold = 5e-2
|
| 237 |
+
else:
|
| 238 |
+
threshold = 5e-3
|
| 239 |
+
self.assertAllClose(input_jacob_a, input_jacob_n, threshold, threshold)
|
| 240 |
+
self.assertAllClose(bias_jacob_a, bias_jacob_n, threshold, threshold)
|
| 241 |
+
self.assertAllClose(grad_jacob_a, grad_jacob_n, threshold, threshold)
|
| 242 |
+
|
| 243 |
+
def testGradientTensor2D(self):
|
| 244 |
+
for (data_format, use_gpu) in ("NHWC", False), ("NHWC", True):
|
| 245 |
+
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64,
|
| 246 |
+
dtypes.bfloat16):
|
| 247 |
+
np_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
|
| 248 |
+
dtype=dtype.as_numpy_dtype).reshape(3, 2)
|
| 249 |
+
bias = np.array([1.3, 2.4], dtype=dtype.as_numpy_dtype)
|
| 250 |
+
self._testGradient(np_input, bias, dtype, data_format, use_gpu)
|
| 251 |
+
|
| 252 |
+
def testGradientTensor3D(self):
|
| 253 |
+
for (data_format, use_gpu) in [("NHWC", False), ("NHWC", True),
|
| 254 |
+
("NCHW", False), ("NCHW", True)]:
|
| 255 |
+
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64,
|
| 256 |
+
dtypes.bfloat16):
|
| 257 |
+
# pylint: disable=too-many-function-args
|
| 258 |
+
np_input = np.array(
|
| 259 |
+
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
|
| 260 |
+
dtype=dtype.as_numpy_dtype).reshape(1, 3, 2)
|
| 261 |
+
bias = np.array([1.3, 2.4], dtype=dtype.as_numpy_dtype)
|
| 262 |
+
self._testGradient(np_input, bias, dtype, data_format, use_gpu)
|
| 263 |
+
|
| 264 |
+
def testGradientTensor4D(self):
|
| 265 |
+
for (data_format, use_gpu) in [("NHWC", False)]:
|
| 266 |
+
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64,
|
| 267 |
+
dtypes.bfloat16):
|
| 268 |
+
np_input = np.arange(
|
| 269 |
+
1.0, 49.0,
|
| 270 |
+
dtype=dtype.as_numpy_dtype).reshape([2, 3, 4, 2]).astype(np.float32)
|
| 271 |
+
bias = np.array([1.3, 2.4], dtype=dtype.as_numpy_dtype)
|
| 272 |
+
self._testGradient(np_input, bias, dtype, data_format, use_gpu)
|
| 273 |
+
np_input = np.arange(
|
| 274 |
+
1.0, 513.0,
|
| 275 |
+
dtype=dtype.as_numpy_dtype).reshape([64, 2, 2,
|
| 276 |
+
2]).astype(np.float32)
|
| 277 |
+
self._testGradient(np_input, bias, dtype, data_format, use_gpu)
|
| 278 |
+
np_input = np.arange(
|
| 279 |
+
1.0, 513.0,
|
| 280 |
+
dtype=dtype.as_numpy_dtype).reshape([2, 2, 2,
|
| 281 |
+
64]).astype(np.float32)
|
| 282 |
+
self._testGradient(np_input,
|
| 283 |
+
np.random.rand(64).astype(dtype.as_numpy_dtype),
|
| 284 |
+
dtype, data_format, use_gpu)
|
| 285 |
+
|
| 286 |
+
def testGradientTensor5D(self):
|
| 287 |
+
for (data_format, use_gpu) in [("NHWC", False), ("NHWC", True),
|
| 288 |
+
("NCHW", False), ("NCHW", True)]:
|
| 289 |
+
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64,
|
| 290 |
+
dtypes.bfloat16):
|
| 291 |
+
np_input = np.arange(
|
| 292 |
+
1.0, 49.0,
|
| 293 |
+
dtype=dtype.as_numpy_dtype).reshape([1, 2, 3, 4,
|
| 294 |
+
2]).astype(np.float32)
|
| 295 |
+
bias = np.array([1.3, 2.4], dtype=dtype.as_numpy_dtype)
|
| 296 |
+
self._testGradient(np_input, bias, dtype, data_format, use_gpu)
|
| 297 |
+
|
| 298 |
+
def test1x1Image(self):
|
| 299 |
+
for (data_format, use_gpu) in [("NHWC", False), ("NCHW", False)]:
|
| 300 |
+
np_input = np.arange(1.0, 129.0).reshape([4, 1, 1, 32]).astype(np.float32)
|
| 301 |
+
self._testGradient(np_input,
|
| 302 |
+
np.random.rand(32).astype(np.float32), dtypes.float32,
|
| 303 |
+
data_format, use_gpu)
|
| 304 |
+
|
| 305 |
+
def testEmpty(self):
|
| 306 |
+
np.random.seed(7)
|
| 307 |
+
for shape in (0, 0), (2, 0), (0, 2), (4, 3, 0), (4, 0, 3), (0, 4, 3):
|
| 308 |
+
self._testAll(np.random.randn(*shape), np.random.randn(shape[-1]))
|
| 309 |
+
|
| 310 |
+
def testEmptyGradient(self):
|
| 311 |
+
for (data_format, use_gpu) in ("NHWC", False), ("NHWC", True):
|
| 312 |
+
for shape in (0, 0), (2, 0), (0, 2):
|
| 313 |
+
self._testGradient(
|
| 314 |
+
np.random.randn(*shape), np.random.randn(shape[-1]), dtypes.float64,
|
| 315 |
+
data_format, use_gpu)
|
| 316 |
+
|
| 317 |
+
for (data_format, use_gpu) in [("NHWC", False), ("NHWC", True),
|
| 318 |
+
("NCHW", False), ("NCHW", True)]:
|
| 319 |
+
for shape in (4, 3, 0), (4, 0, 3), (0, 4, 3):
|
| 320 |
+
self._testGradient(
|
| 321 |
+
np.random.randn(*shape), np.random.randn(shape[-1]), dtypes.float64,
|
| 322 |
+
data_format, use_gpu)
|
videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/nn_ops/cudnn_deterministic_base.py
ADDED
|
@@ -0,0 +1,292 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for deterministic cuDNN functionality."""
|
| 16 |
+
|
| 17 |
+
import collections
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
|
| 21 |
+
from tensorflow.python.eager import backprop
|
| 22 |
+
from tensorflow.python.framework import constant_op
|
| 23 |
+
from tensorflow.python.framework import dtypes
|
| 24 |
+
from tensorflow.python.framework import test_util
|
| 25 |
+
from tensorflow.python.ops import nn_ops
|
| 26 |
+
from tensorflow.python.platform import test
|
| 27 |
+
|
| 28 |
+
# Notes:
|
| 29 |
+
#
|
| 30 |
+
# TensorFlow makes cuDNN run deterministically when op determinism is enabled
|
| 31 |
+
# via tf.config.experimental.enable_op_determinism(). Additionally, setting the
|
| 32 |
+
# environmental variable TF_CUDNN_DETERMINISTIC to 'true' or '1' makes cuDNN run
|
| 33 |
+
# deterministically, although this environemtnal variable is deprecated and will
|
| 34 |
+
# be removed in a future TensorFlow version. Unlike the enable_op_determinism()
|
| 35 |
+
# function, the environmental variable only makes ops using cuDNN deterministic,
|
| 36 |
+
# not all TensorFlow ops.
|
| 37 |
+
#
|
| 38 |
+
# Where both deterministic and non-deterministic cuDNN algorithms are available,
|
| 39 |
+
# selecting determinitic operation will lead to only the deterministic
|
| 40 |
+
# algorithms being chosen. Additionally, selecting deterministic operation will
|
| 41 |
+
# result in a deterministic, or reproducible, selection of algorithms (for any
|
| 42 |
+
# given layer configuration) for each of the forward and the two backward paths.
|
| 43 |
+
#
|
| 44 |
+
# These tests intend to confirm that deterministic algorithms are chosen (for
|
| 45 |
+
# the back-prop paths) when desterministic operation is selected. The tested
|
| 46 |
+
# configurations were first confirmed to produce non-deterministic results when
|
| 47 |
+
# the above-mentioned environment variables are not set.
|
| 48 |
+
#
|
| 49 |
+
# Even though selecting determinitic operation should ensure that the same
|
| 50 |
+
# algorithms, for a given layer configuration, are always used (i.e. that
|
| 51 |
+
# algorithm selection is deterministic / reproducible), this is not tested.
|
| 52 |
+
|
| 53 |
+
# TODO(duncanriach): Add test for deterministic cuDNN max-pooling
|
| 54 |
+
|
| 55 |
+
LayerShapeNHWC = collections.namedtuple('LayerShapeNHWC',
|
| 56 |
+
'batch, height, width, channels')
|
| 57 |
+
FilterShape2D = collections.namedtuple(
|
| 58 |
+
'FilterShape2D', 'height, width, in_channels, out_channels')
|
| 59 |
+
FilterShape2DTranspose = collections.namedtuple(
|
| 60 |
+
'FilterShape2DTranspose', 'height, width, out_channels, in_channels')
|
| 61 |
+
|
| 62 |
+
LayerShapeNCDHW = collections.namedtuple(
|
| 63 |
+
'LayerShapeNCDHW', 'batch, channels, depth, height, width')
|
| 64 |
+
FilterShape3D = collections.namedtuple(
|
| 65 |
+
'FilterShape3D', 'depth, height, width, in_channels, out_channels')
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class ConvolutionTest(test.TestCase):
|
| 69 |
+
"""Tests for deterministic cuDNN functionality."""
|
| 70 |
+
|
| 71 |
+
def _random_data_op(self, shape):
|
| 72 |
+
# np.random.random_sample can properly interpret either tf.TensorShape or
|
| 73 |
+
# namedtuple as a list.
|
| 74 |
+
return constant_op.constant(
|
| 75 |
+
2 * np.random.random_sample(shape) - 1, dtype=dtypes.float32)
|
| 76 |
+
|
| 77 |
+
def _random_out_op(self, in_shape, filter_shape, strides, padding, dilations):
|
| 78 |
+
# Choosing not to use array_op.zeros() to prevent possible removal by
|
| 79 |
+
# optimization
|
| 80 |
+
in_op = self._random_data_op(in_shape)
|
| 81 |
+
filter_op = self._random_data_op(filter_shape)
|
| 82 |
+
# Use the forward op's shape-inference
|
| 83 |
+
conv_op = nn_ops.conv2d(
|
| 84 |
+
in_op, filter_op, strides=strides, padding=padding, dilations=dilations)
|
| 85 |
+
out_shape = conv_op.get_shape()
|
| 86 |
+
out_op = self._random_data_op(out_shape)
|
| 87 |
+
return out_op
|
| 88 |
+
|
| 89 |
+
def _assert_reproducible(self, operation):
|
| 90 |
+
with test_util.force_gpu():
|
| 91 |
+
result_1 = operation()
|
| 92 |
+
result_2 = operation()
|
| 93 |
+
self.assertAllEqual(result_1, result_2)
|
| 94 |
+
|
| 95 |
+
# The default forward algorithm choice, when using cuDNN 7, does not support
|
| 96 |
+
# the following layer configuration. This test case intends to confirm that
|
| 97 |
+
# an alternative algorithm is selected. Note that, in cuDNN 7, all forward
|
| 98 |
+
# algorithms are determnistic.
|
| 99 |
+
@test_util.run_cuda_only
|
| 100 |
+
def testConvForwardDefaultAlgorithmChoice(self):
|
| 101 |
+
in_shape = LayerShapeNCDHW(batch=2, channels=3, depth=5, height=7, width=6)
|
| 102 |
+
filter_shape = FilterShape3D(
|
| 103 |
+
depth=3, height=3, width=3, in_channels=3, out_channels=2)
|
| 104 |
+
in_op = self._random_data_op(in_shape)
|
| 105 |
+
filter_op = self._random_data_op(filter_shape)
|
| 106 |
+
self._assert_reproducible(lambda: nn_ops.conv3d(
|
| 107 |
+
in_op,
|
| 108 |
+
filter_op,
|
| 109 |
+
strides=[1, 1, 1, 1, 1],
|
| 110 |
+
padding='VALID',
|
| 111 |
+
data_format='NCDHW',
|
| 112 |
+
dilations=[1, 1, 2, 2, 2]))
|
| 113 |
+
|
| 114 |
+
# This test is primarily testing XLA since cuDNN forward convolutions are
|
| 115 |
+
# always deterministic, even when determinism is not enabled. The convolution
|
| 116 |
+
# configuration tested is nondeterministic with XLA when determinism is not
|
| 117 |
+
# enabled.
|
| 118 |
+
@test_util.run_cuda_only
|
| 119 |
+
def testConvForwardXLA(self):
|
| 120 |
+
in_shape = LayerShapeNCDHW(
|
| 121 |
+
batch=2, channels=8, depth=5, height=12, width=15)
|
| 122 |
+
filter_shape = FilterShape3D(
|
| 123 |
+
depth=3, height=3, width=3, in_channels=8, out_channels=1)
|
| 124 |
+
in_op = self._random_data_op(in_shape)
|
| 125 |
+
filter_op = self._random_data_op(filter_shape)
|
| 126 |
+
self._assert_reproducible(lambda: nn_ops.conv3d(
|
| 127 |
+
in_op,
|
| 128 |
+
filter_op,
|
| 129 |
+
strides=[1, 1, 1, 1, 1],
|
| 130 |
+
padding='VALID',
|
| 131 |
+
data_format='NCDHW',
|
| 132 |
+
dilations=[1, 1, 2, 2, 2]))
|
| 133 |
+
|
| 134 |
+
@test_util.run_cuda_only
|
| 135 |
+
def testConvBackwardFilterGradient(self, rate=1):
|
| 136 |
+
in_shape = LayerShapeNHWC(batch=8, height=64, width=64, channels=8)
|
| 137 |
+
filter_shape = FilterShape2D(
|
| 138 |
+
height=3, width=3, in_channels=8, out_channels=8)
|
| 139 |
+
in_op = self._random_data_op(in_shape)
|
| 140 |
+
strides = [1, 1, 1, 1]
|
| 141 |
+
padding = 'SAME'
|
| 142 |
+
dilations = [1, rate, rate, 1]
|
| 143 |
+
out_op = self._random_out_op(in_shape, filter_shape, strides, padding,
|
| 144 |
+
dilations)
|
| 145 |
+
self._assert_reproducible(lambda: nn_ops.conv2d_backprop_filter(
|
| 146 |
+
in_op,
|
| 147 |
+
filter_shape,
|
| 148 |
+
out_op,
|
| 149 |
+
strides=strides,
|
| 150 |
+
padding=padding,
|
| 151 |
+
dilations=dilations))
|
| 152 |
+
|
| 153 |
+
# A configuration for this test could not be found that exercises
|
| 154 |
+
# nondeterminism when using XLA with determinism not enabled.
|
| 155 |
+
@test_util.run_cuda_only
|
| 156 |
+
def testConvBackwardFilterGradientWithDilations(self):
|
| 157 |
+
self.testConvBackwardFilterGradient(rate=2)
|
| 158 |
+
|
| 159 |
+
@test_util.run_cuda_only
|
| 160 |
+
def testConvBackwardInputGradient(self, rate=1):
|
| 161 |
+
in_shape = LayerShapeNHWC(batch=1, height=16, width=16, channels=1)
|
| 162 |
+
filter_shape = FilterShape2D(
|
| 163 |
+
height=7, width=7, in_channels=1, out_channels=3)
|
| 164 |
+
filter_op = self._random_data_op(filter_shape)
|
| 165 |
+
strides = [1, 1, 1, 1]
|
| 166 |
+
padding = 'SAME'
|
| 167 |
+
dilations = [1, rate, rate, 1]
|
| 168 |
+
out_op = self._random_out_op(in_shape, filter_shape, strides, padding,
|
| 169 |
+
dilations)
|
| 170 |
+
self._assert_reproducible(lambda: nn_ops.conv2d_backprop_input(
|
| 171 |
+
in_shape,
|
| 172 |
+
filter_op,
|
| 173 |
+
out_op,
|
| 174 |
+
strides=strides,
|
| 175 |
+
padding=padding,
|
| 176 |
+
dilations=dilations))
|
| 177 |
+
|
| 178 |
+
# A configuration for this test could not be found that exercises
|
| 179 |
+
# nondeterminism when using XLA with determinism not enabled.
|
| 180 |
+
@test_util.run_cuda_only
|
| 181 |
+
def testConvBackwardInputGradientWithDilations(self):
|
| 182 |
+
self.testConvBackwardInputGradient(rate=2)
|
| 183 |
+
|
| 184 |
+
@test_util.run_cuda_only
|
| 185 |
+
def testConvTransposeForward(self, rate=1):
|
| 186 |
+
in_channels = 3
|
| 187 |
+
out_channels = 1
|
| 188 |
+
in_shape = LayerShapeNHWC(
|
| 189 |
+
batch=1, height=16, width=16, channels=in_channels)
|
| 190 |
+
filter_shape = FilterShape2DTranspose(
|
| 191 |
+
height=7, width=7, out_channels=out_channels, in_channels=in_channels)
|
| 192 |
+
in_op = self._random_data_op(in_shape)
|
| 193 |
+
filter_op = self._random_data_op(filter_shape)
|
| 194 |
+
out_shape = LayerShapeNHWC(
|
| 195 |
+
batch=in_shape.batch,
|
| 196 |
+
height=in_shape.height,
|
| 197 |
+
width=in_shape.width,
|
| 198 |
+
channels=out_channels)
|
| 199 |
+
self._assert_reproducible(lambda: nn_ops.conv2d_transpose_v2(
|
| 200 |
+
in_op,
|
| 201 |
+
filter_op,
|
| 202 |
+
out_shape,
|
| 203 |
+
strides=1,
|
| 204 |
+
padding='SAME',
|
| 205 |
+
data_format='NHWC',
|
| 206 |
+
dilations=[1, rate, rate, 1]))
|
| 207 |
+
|
| 208 |
+
# A configuration for this test could not be found that exercises
|
| 209 |
+
# nondeterminism when using XLA with determinism not enabled.
|
| 210 |
+
@test_util.run_cuda_only
|
| 211 |
+
def testConvTransposeForwardWithDilations(self):
|
| 212 |
+
self.testConvTransposeForward(rate=2)
|
| 213 |
+
|
| 214 |
+
@test_util.run_cuda_only
|
| 215 |
+
def testConvTransposeBackwardFilterGradient(self, rate=1):
|
| 216 |
+
in_channels = 8
|
| 217 |
+
out_channels = 8
|
| 218 |
+
in_shape = LayerShapeNHWC(
|
| 219 |
+
batch=8, height=64, width=64, channels=in_channels)
|
| 220 |
+
filter_shape = FilterShape2DTranspose(
|
| 221 |
+
height=3, width=3, out_channels=out_channels, in_channels=in_channels)
|
| 222 |
+
in_op = self._random_data_op(in_shape)
|
| 223 |
+
filter_op = self._random_data_op(filter_shape)
|
| 224 |
+
out_shape = LayerShapeNHWC(
|
| 225 |
+
batch=in_shape.batch,
|
| 226 |
+
height=in_shape.height,
|
| 227 |
+
width=in_shape.width,
|
| 228 |
+
channels=out_channels)
|
| 229 |
+
upstream_gradients = self._random_data_op(out_shape)
|
| 230 |
+
|
| 231 |
+
def gradient():
|
| 232 |
+
with backprop.GradientTape() as tape:
|
| 233 |
+
tape.watch(filter_op)
|
| 234 |
+
op_output = nn_ops.conv2d_transpose_v2(
|
| 235 |
+
in_op,
|
| 236 |
+
filter_op,
|
| 237 |
+
out_shape,
|
| 238 |
+
strides=1,
|
| 239 |
+
padding='SAME',
|
| 240 |
+
data_format='NHWC',
|
| 241 |
+
dilations=[1, rate, rate, 1])
|
| 242 |
+
gradient_injector_output = op_output * upstream_gradients
|
| 243 |
+
return tape.gradient(gradient_injector_output, [filter_op])[0]
|
| 244 |
+
|
| 245 |
+
self._assert_reproducible(gradient)
|
| 246 |
+
|
| 247 |
+
# A configuration for this test could not be found that exercises
|
| 248 |
+
# nondeterminism when using XLA with determinism not enabled.
|
| 249 |
+
@test_util.run_cuda_only
|
| 250 |
+
def testConvTransposeBackwardFilterGradientWithDilations(self):
|
| 251 |
+
self.testConvTransposeBackwardFilterGradient(rate=2)
|
| 252 |
+
|
| 253 |
+
# A configuration for this test could not be found that exercises
|
| 254 |
+
# nondeterminism when determinism is not enabled (for either XLA or non-XLA).
|
| 255 |
+
@test_util.run_cuda_only
|
| 256 |
+
def testConvTransposeBackwardInputGradient(self, rate=1):
|
| 257 |
+
in_channels = 1
|
| 258 |
+
out_channels = 3
|
| 259 |
+
in_shape = LayerShapeNHWC(
|
| 260 |
+
batch=1, height=16, width=16, channels=in_channels)
|
| 261 |
+
filter_shape = FilterShape2DTranspose(
|
| 262 |
+
height=7, width=7, out_channels=out_channels, in_channels=in_channels)
|
| 263 |
+
in_op = self._random_data_op(in_shape)
|
| 264 |
+
filter_op = self._random_data_op(filter_shape)
|
| 265 |
+
out_shape = LayerShapeNHWC(
|
| 266 |
+
batch=in_shape.batch,
|
| 267 |
+
height=in_shape.height,
|
| 268 |
+
width=in_shape.width,
|
| 269 |
+
channels=out_channels)
|
| 270 |
+
upstream_gradients = self._random_data_op(out_shape)
|
| 271 |
+
|
| 272 |
+
def gradient():
|
| 273 |
+
with backprop.GradientTape() as tape:
|
| 274 |
+
tape.watch(in_op)
|
| 275 |
+
op_output = nn_ops.conv2d_transpose_v2(
|
| 276 |
+
in_op,
|
| 277 |
+
filter_op,
|
| 278 |
+
out_shape,
|
| 279 |
+
strides=1,
|
| 280 |
+
padding='SAME',
|
| 281 |
+
data_format='NHWC',
|
| 282 |
+
dilations=[1, rate, rate, 1])
|
| 283 |
+
gradient_injector_output = op_output * upstream_gradients
|
| 284 |
+
return tape.gradient(gradient_injector_output, [in_op])[0]
|
| 285 |
+
|
| 286 |
+
self._assert_reproducible(gradient)
|
| 287 |
+
|
| 288 |
+
# A configuration for this test could not be found that exercises
|
| 289 |
+
# nondeterminism when determinism is not enabled (for either XLA or non-XLA).
|
| 290 |
+
@test_util.run_cuda_only
|
| 291 |
+
def testConvTransposeBackwardInputGradientWithDilations(self):
|
| 292 |
+
self.testConvTransposeBackwardInputGradient(rate=2)
|
videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/nn_ops/depthwise_conv_op_base.py
ADDED
|
@@ -0,0 +1,1172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Functional tests for depthwise convolutional operations."""
|
| 16 |
+
|
| 17 |
+
import numpy as np
|
| 18 |
+
|
| 19 |
+
from tensorflow.python.framework import constant_op
|
| 20 |
+
from tensorflow.python.framework import dtypes
|
| 21 |
+
from tensorflow.python.framework import errors
|
| 22 |
+
from tensorflow.python.framework import ops
|
| 23 |
+
from tensorflow.python.framework import test_util
|
| 24 |
+
from tensorflow.python.ops import array_ops
|
| 25 |
+
from tensorflow.python.ops import gradient_checker
|
| 26 |
+
from tensorflow.python.ops import nn_impl
|
| 27 |
+
from tensorflow.python.ops import nn_ops
|
| 28 |
+
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
|
| 29 |
+
from tensorflow.python.platform import test
|
| 30 |
+
from tensorflow.python.platform import tf_logging
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def _DepthwiseConv2dNumpyBasic(x1, x2, strides):
|
| 34 |
+
"""Compute depthwise_conv2d using Numpy.
|
| 35 |
+
|
| 36 |
+
This allows use to test TensorFlow's depthwise_conv2d by comparing to the
|
| 37 |
+
Numpy version.
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
x1: The input Numpy array, in NHWC format.
|
| 41 |
+
x2: The filter Numpy array.
|
| 42 |
+
strides: A Python list of 4 elements representing the strides.
|
| 43 |
+
|
| 44 |
+
Returns:
|
| 45 |
+
The depthwise conv2d output as a Numpy array.
|
| 46 |
+
"""
|
| 47 |
+
n, h, w, c = x1.shape
|
| 48 |
+
fh, fw, c2, o = x2.shape
|
| 49 |
+
assert c == c2
|
| 50 |
+
_, sh, sw, _ = strides
|
| 51 |
+
out_rows = (h - fh + sh) // sh
|
| 52 |
+
out_cols = (w - fw + sw) // sw
|
| 53 |
+
out = np.zeros([n, out_rows, out_cols, c * o])
|
| 54 |
+
for i in range(out_rows):
|
| 55 |
+
for j in range(out_cols):
|
| 56 |
+
for k in range(c):
|
| 57 |
+
start_height = i * sh
|
| 58 |
+
end_height = start_height + fh
|
| 59 |
+
start_width = j * sw
|
| 60 |
+
end_width = start_width + fw
|
| 61 |
+
# multiplied_slice.shape: (b, fh, fw, o)
|
| 62 |
+
multiplied_slice = (
|
| 63 |
+
x1[:, start_height:end_height, start_width:end_width, k, np.newaxis]
|
| 64 |
+
* x2[:, :, k, :])
|
| 65 |
+
# Set a slice of b * o elements of 'out'.
|
| 66 |
+
out[:, i, j, k * o:(k + 1) * o] = np.sum(multiplied_slice, axis=(1, 2))
|
| 67 |
+
return out
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def _DepthwiseConv2dNumpy(x1, x2, strides, padding, data_format, dilations):
|
| 71 |
+
"""Compute depthwise_conv2d using Numpy.
|
| 72 |
+
|
| 73 |
+
This allows use to test TensorFlow's depthwise_conv2d by comparing to the
|
| 74 |
+
Numpy version.
|
| 75 |
+
|
| 76 |
+
Unlike `_DepthwiseConv2dNumpyBasic`, this supports more advanced features
|
| 77 |
+
like padding.
|
| 78 |
+
|
| 79 |
+
Args:
|
| 80 |
+
x1: The input Numpy array.
|
| 81 |
+
x2: The filter Numpy array.
|
| 82 |
+
strides: A Python list of 4 elements representing the strides.
|
| 83 |
+
padding: The padding. "SAME", "VALID", or a list of explicit paddings.
|
| 84 |
+
data_format: "NHWC" or "NCHW".
|
| 85 |
+
dilations: A list of 2 elements, representing the dilations.
|
| 86 |
+
|
| 87 |
+
Returns:
|
| 88 |
+
The depthwise conv2d as a Numpy array.
|
| 89 |
+
"""
|
| 90 |
+
if data_format == "NCHW":
|
| 91 |
+
# Transpose arguments to NHWC format.
|
| 92 |
+
x1 = np.transpose(x1, (0, 3, 1, 2))
|
| 93 |
+
strides = [strides[0], strides[3], strides[1], strides[2]]
|
| 94 |
+
if dilations:
|
| 95 |
+
dilations = [dilations[0], dilations[3], dilations[1], dilations[2]]
|
| 96 |
+
|
| 97 |
+
if dilations:
|
| 98 |
+
# Dilate the filter so _DepthwiseConv2dNumpyBasic doesn't have to deal with
|
| 99 |
+
# dilations.
|
| 100 |
+
fh, fw, c, o = x2.shape
|
| 101 |
+
new_fh = (fh - 1) * dilations[0] + 1
|
| 102 |
+
new_fw = (fw - 1) * dilations[1] + 1
|
| 103 |
+
new_x2 = np.zeros((new_fh, new_fw, c, o))
|
| 104 |
+
for i in range(fh):
|
| 105 |
+
for j in range(fw):
|
| 106 |
+
new_x2[i * dilations[0], j * dilations[1], ::] = x2[i, j, :, :]
|
| 107 |
+
x2 = new_x2
|
| 108 |
+
|
| 109 |
+
# Pad input so _DepthwiseConv2dNumpyBasic doesn't have to deal with padding.
|
| 110 |
+
if padding == "SAME":
|
| 111 |
+
|
| 112 |
+
def PaddingsForDim(input_dim, filter_dim, stride):
|
| 113 |
+
"""Computes paddings for a single dimension."""
|
| 114 |
+
if input_dim % stride == 0:
|
| 115 |
+
total_padding = max(filter_dim - stride, 0)
|
| 116 |
+
else:
|
| 117 |
+
total_padding = max(filter_dim - (input_dim % stride), 0)
|
| 118 |
+
pad_before = total_padding // 2
|
| 119 |
+
pad_after = total_padding - pad_before
|
| 120 |
+
return pad_before, pad_after
|
| 121 |
+
|
| 122 |
+
padding = [(0, 0),
|
| 123 |
+
PaddingsForDim(x1.shape[1], x2.shape[0], strides[1]),
|
| 124 |
+
PaddingsForDim(x1.shape[2], x2.shape[1], strides[2]), (0, 0)]
|
| 125 |
+
elif padding == "VALID":
|
| 126 |
+
padding = [(0, 0)] * 4
|
| 127 |
+
x1 = np.pad(x1, padding, "constant")
|
| 128 |
+
|
| 129 |
+
y = _DepthwiseConv2dNumpyBasic(x1, x2, strides)
|
| 130 |
+
|
| 131 |
+
if data_format == "NCHW":
|
| 132 |
+
# Transpose back to NCHW format.
|
| 133 |
+
y = np.transpose(y, (0, 2, 3, 1))
|
| 134 |
+
|
| 135 |
+
return y
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def ConfigsToTest():
|
| 139 |
+
"""Iterator for different convolution shapes, strides and paddings.
|
| 140 |
+
|
| 141 |
+
Returns:
|
| 142 |
+
List of tuples (input_size, filter_size, out_size, stride, padding,
|
| 143 |
+
dilations), the depthwise convolution parameters.
|
| 144 |
+
"""
|
| 145 |
+
|
| 146 |
+
def Config(input_size,
|
| 147 |
+
filter_size,
|
| 148 |
+
out_size,
|
| 149 |
+
stride=1,
|
| 150 |
+
padding="SAME",
|
| 151 |
+
dilations=None):
|
| 152 |
+
return input_size, filter_size, out_size, stride, padding, dilations
|
| 153 |
+
|
| 154 |
+
return [
|
| 155 |
+
Config([4, 5, 5, 48], [1, 1, 48, 2], [4, 5, 5, 96]),
|
| 156 |
+
Config([4, 8, 8, 84], [1, 3, 84, 1], [4, 8, 8, 84]),
|
| 157 |
+
Config([4, 17, 17, 48], [3, 1, 48, 4], [4, 17, 17, 192]),
|
| 158 |
+
Config([4, 9, 27, 8], [3, 3, 8, 1], [4, 9, 27, 8]),
|
| 159 |
+
Config([4, 31, 31, 7], [3, 3, 7, 1], [4, 31, 31, 7]),
|
| 160 |
+
Config([4, 35, 35, 2], [5, 5, 2, 1], [4, 35, 35, 2]),
|
| 161 |
+
Config([4, 147, 147, 2], [3, 3, 2, 8], [4, 49, 49, 16],
|
| 162 |
+
3,
|
| 163 |
+
padding="VALID"),
|
| 164 |
+
Config([3, 299, 299, 3], [3, 2, 3, 8], [3, 150, 150, 24], 2),
|
| 165 |
+
Config([5, 183, 183, 1], [5, 5, 1, 2], [5, 92, 92, 2], 2),
|
| 166 |
+
Config([5, 183, 183, 1], [5, 5, 1, 2], [5, 183, 183, 2], dilations=[2,
|
| 167 |
+
2]),
|
| 168 |
+
Config([5, 41, 35, 2], [4, 7, 2, 2], [5, 32, 23, 4],
|
| 169 |
+
padding="VALID",
|
| 170 |
+
dilations=[3, 2]),
|
| 171 |
+
]
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def ConfigsToTestExplicit():
|
| 175 |
+
"""Iterator for different convolution shapes, strides and explicit paddings.
|
| 176 |
+
|
| 177 |
+
Returns:
|
| 178 |
+
List of tuples (input_size, filter_size, out_size, stride, padding,
|
| 179 |
+
dilations), the depthwise convolution parameters.
|
| 180 |
+
"""
|
| 181 |
+
|
| 182 |
+
def Config(input_size,
|
| 183 |
+
filter_size,
|
| 184 |
+
out_size,
|
| 185 |
+
stride=1,
|
| 186 |
+
padding=None,
|
| 187 |
+
dilations=None):
|
| 188 |
+
return input_size, filter_size, out_size, stride, padding, dilations
|
| 189 |
+
|
| 190 |
+
return [
|
| 191 |
+
Config([4, 5, 5, 48], [1, 1, 48, 2], [4, 8, 12, 96],
|
| 192 |
+
padding=[[1, 2], [3, 4]]),
|
| 193 |
+
Config([4, 1, 1, 3], [3, 3, 3, 2], [4, 29, 39, 6],
|
| 194 |
+
padding=[[10, 20], [15, 25]]),
|
| 195 |
+
Config([4, 9, 27, 8], [3, 3, 8, 1], [4, 14, 31, 8],
|
| 196 |
+
padding=[[3, 4], [4, 2]]),
|
| 197 |
+
Config([4, 31, 31, 7], [3, 3, 7, 1], [4, 29, 29, 7],
|
| 198 |
+
padding=[[0, 0], [0, 0]]),
|
| 199 |
+
Config([3, 299, 299, 3], [3, 2, 3, 8], [3, 150, 153, 24],
|
| 200 |
+
2,
|
| 201 |
+
padding=[[1, 2], [3, 5]]),
|
| 202 |
+
Config([5, 183, 183, 1], [5, 5, 1, 2], [5, 62, 60, 2],
|
| 203 |
+
3,
|
| 204 |
+
padding=[[3, 2], [1, 0]]),
|
| 205 |
+
Config([5, 29, 31, 1], [5, 4, 1, 2], [5, 26, 23, 2],
|
| 206 |
+
padding=[[3, 2], [1, 0]],
|
| 207 |
+
dilations=[2, 3]),
|
| 208 |
+
# These cases test the kernels in depthwise_conv_op_gpu.h which are used
|
| 209 |
+
# if the input size is small.
|
| 210 |
+
Config([4, 5, 5, 48], [3, 3, 48, 1], [4, 5, 5, 48],
|
| 211 |
+
padding=[[0, 2], [0, 2]]),
|
| 212 |
+
Config([1, 8, 7, 2], [8, 7, 2, 1], [1, 8, 7, 2], padding=[[0, 7], [3,
|
| 213 |
+
3]]),
|
| 214 |
+
Config([2, 4, 3, 2], [3, 2, 2, 1], [2, 4, 3, 2], padding=[[2, 0], [1,
|
| 215 |
+
0]]),
|
| 216 |
+
]
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
def CheckGradConfigsToTest():
|
| 220 |
+
"""Iterator for different convolution shapes, strides and paddings.
|
| 221 |
+
|
| 222 |
+
compute_gradient_error() is very expensive. So the configs should be
|
| 223 |
+
relatively small.
|
| 224 |
+
|
| 225 |
+
Returns:
|
| 226 |
+
List of tuples (input_size, filter_size, out_size, stride, padding,
|
| 227 |
+
dilations), the depthwise convolution parameters.
|
| 228 |
+
"""
|
| 229 |
+
|
| 230 |
+
def Config(input_size,
|
| 231 |
+
filter_size,
|
| 232 |
+
out_size,
|
| 233 |
+
stride=1,
|
| 234 |
+
padding="SAME",
|
| 235 |
+
dilations=None):
|
| 236 |
+
return input_size, filter_size, out_size, stride, padding, dilations
|
| 237 |
+
|
| 238 |
+
return [
|
| 239 |
+
Config([2, 5, 8, 1], [4, 4, 1, 2], [2, 5, 8, 2]),
|
| 240 |
+
Config([4, 5, 5, 1], [2, 2, 1, 2], [4, 2, 2, 2], 2, padding="VALID"),
|
| 241 |
+
Config([2, 4, 4, 2], [3, 1, 2, 2], [2, 4, 4, 4]),
|
| 242 |
+
Config([1, 15, 15, 2], [1, 3, 2, 1], [1, 15, 15, 2]),
|
| 243 |
+
Config([2, 15, 16, 1], [3, 3, 1, 2], [2, 5, 5, 2], 3, padding="VALID"),
|
| 244 |
+
Config([2, 5, 8, 1], [4, 3, 1, 2], [2, 5, 8, 2], dilations=[1, 2]),
|
| 245 |
+
# These cases test the kernels in depthwise_conv_op_gpu.h which are used
|
| 246 |
+
# if the input size is small.
|
| 247 |
+
Config([1, 3, 1, 2], [2, 1, 2, 1], [1, 3, 1, 2]),
|
| 248 |
+
Config([2, 2, 3, 2], [2, 1, 2, 1], [2, 2, 3, 2]),
|
| 249 |
+
Config([2, 2, 3, 1], [2, 2, 1, 1], [2, 2, 3, 1]),
|
| 250 |
+
]
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def CheckGradConfigsToTestExplicit():
|
| 254 |
+
"""Iterator for different convolution shapes, strides and explicit paddings.
|
| 255 |
+
|
| 256 |
+
compute_gradient_error() is very expensive. So the configs should be
|
| 257 |
+
relatively small.
|
| 258 |
+
|
| 259 |
+
Returns:
|
| 260 |
+
List of tuples (input_size, filter_size, out_size, stride, padding,
|
| 261 |
+
dilations), the depthwise convolution parameters.
|
| 262 |
+
"""
|
| 263 |
+
|
| 264 |
+
def Config(input_size,
|
| 265 |
+
filter_size,
|
| 266 |
+
out_size,
|
| 267 |
+
stride=1,
|
| 268 |
+
padding=None,
|
| 269 |
+
dilations=None):
|
| 270 |
+
return input_size, filter_size, out_size, stride, padding, dilations
|
| 271 |
+
|
| 272 |
+
return [
|
| 273 |
+
Config([2, 5, 8, 1], [4, 4, 1, 2], [2, 3, 10, 2],
|
| 274 |
+
padding=[[0, 1], [2, 3]]),
|
| 275 |
+
Config([4, 5, 5, 1], [2, 2, 1, 2], [4, 4, 5, 2],
|
| 276 |
+
2,
|
| 277 |
+
padding=[[3, 1], [5, 0]]),
|
| 278 |
+
Config([2, 4, 4, 2], [3, 1, 2, 2], [2, 7, 11, 4],
|
| 279 |
+
padding=[[4, 1], [3, 4]]),
|
| 280 |
+
Config([1, 15, 15, 2], [1, 3, 2, 1], [1, 18, 23, 2],
|
| 281 |
+
padding=[[3, 0], [2, 8]]),
|
| 282 |
+
Config([2, 15, 16, 1], [3, 3, 1, 2], [2, 5, 8, 2],
|
| 283 |
+
3,
|
| 284 |
+
padding=[[0, 0], [10, 0]]),
|
| 285 |
+
Config([2, 5, 8, 1], [3, 4, 1, 2], [2, 5, 10, 2],
|
| 286 |
+
padding=[[3, 1], [2, 3]],
|
| 287 |
+
dilations=[2, 1]),
|
| 288 |
+
# These cases test the kernels in depthwise_conv_op_gpu.h which are used
|
| 289 |
+
# if the input size is small.
|
| 290 |
+
Config([2, 4, 3, 2], [3, 2, 2, 1], [2, 4, 3, 2], padding=[[2, 0], [1,
|
| 291 |
+
0]]),
|
| 292 |
+
]
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
class DepthwiseConv2DBase(test.TestCase):
|
| 296 |
+
"""Base test class for depthwise Conv2D tests."""
|
| 297 |
+
|
| 298 |
+
# This tests depthwise_conv2d and depthwise_conv2d_native
|
| 299 |
+
def _VerifyValues(self,
|
| 300 |
+
tensor_in_sizes,
|
| 301 |
+
filter_in_sizes,
|
| 302 |
+
stride,
|
| 303 |
+
padding,
|
| 304 |
+
data_type,
|
| 305 |
+
use_gpu,
|
| 306 |
+
grouped_conv=False,
|
| 307 |
+
data_format="NHWC",
|
| 308 |
+
dilations=None,
|
| 309 |
+
tolerance=None):
|
| 310 |
+
"""Verifies the output values of the convolution function.
|
| 311 |
+
|
| 312 |
+
Args:
|
| 313 |
+
tensor_in_sizes: Input tensor dimensions in [batch, input_rows,
|
| 314 |
+
input_cols, input_depth].
|
| 315 |
+
filter_in_sizes: Filter tensor dimensions in [filter_rows, filter_cols,
|
| 316 |
+
input_depth, depth_multiplier].
|
| 317 |
+
stride: Stride.
|
| 318 |
+
padding: Padding type.
|
| 319 |
+
data_type: The data type to use.
|
| 320 |
+
use_gpu: Whether to use GPU.
|
| 321 |
+
grouped_conv: Whether to use cuDNN 7's grouped convolution.
|
| 322 |
+
data_format: The data_format of the input. "NHWC" or "NCHW".
|
| 323 |
+
dilations: A list of 2 elements, representing the dilations.
|
| 324 |
+
tolerance: The absolute and relative tolarance when verifying the output.
|
| 325 |
+
"""
|
| 326 |
+
input_size = 1
|
| 327 |
+
filter_size = 1
|
| 328 |
+
for s in tensor_in_sizes:
|
| 329 |
+
input_size *= s
|
| 330 |
+
for s in filter_in_sizes:
|
| 331 |
+
filter_size *= s
|
| 332 |
+
# Initializes the input and filter tensor with numbers incrementing to 1.0.
|
| 333 |
+
x1 = [f * 1.0 / input_size for f in range(1, input_size + 1)]
|
| 334 |
+
x1 = np.array(x1).reshape(tensor_in_sizes)
|
| 335 |
+
x2 = [f * 1.0 / filter_size for f in range(1, filter_size + 1)]
|
| 336 |
+
x2 = np.array(x2).reshape(filter_in_sizes)
|
| 337 |
+
# Compute reference result
|
| 338 |
+
strides = [1, stride, stride, 1]
|
| 339 |
+
if isinstance(padding, list):
|
| 340 |
+
padding = [(0, 0)] + padding + [(0, 0)]
|
| 341 |
+
np_result = _DepthwiseConv2dNumpy(x1, x2, strides, padding, "NHWC",
|
| 342 |
+
dilations)
|
| 343 |
+
|
| 344 |
+
ops.reset_default_graph()
|
| 345 |
+
graph = ops.get_default_graph()
|
| 346 |
+
with self.session(graph=graph, use_gpu=use_gpu) as sess:
|
| 347 |
+
tolerance = tolerance or {
|
| 348 |
+
dtypes.float16: 4e-2,
|
| 349 |
+
dtypes.float32: 1e-5,
|
| 350 |
+
dtypes.float64: 1e-12,
|
| 351 |
+
dtypes.bfloat16: 1e-2,
|
| 352 |
+
}[data_type]
|
| 353 |
+
|
| 354 |
+
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=data_type)
|
| 355 |
+
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=data_type)
|
| 356 |
+
|
| 357 |
+
if data_format == "NCHW":
|
| 358 |
+
# Transpose from NHWC input to NCHW
|
| 359 |
+
# Ex. [4, 5, 5, 48] to [4, 48, 5, 5]
|
| 360 |
+
t1 = array_ops.transpose(t1, [0, 3, 1, 2])
|
| 361 |
+
strides = [1, 1, stride, stride]
|
| 362 |
+
if isinstance(padding, list):
|
| 363 |
+
padding = [padding[0], padding[3], padding[1], padding[2]]
|
| 364 |
+
|
| 365 |
+
# depthwise_conv2d_native does not support dilations except on TPUs.
|
| 366 |
+
if dilations is None:
|
| 367 |
+
with sess.graph._kernel_label_map( # pylint: disable=protected-access
|
| 368 |
+
{"DepthwiseConv2dNative": "cudnn_grouped_convolution"}
|
| 369 |
+
if grouped_conv else {}):
|
| 370 |
+
conv_native = nn_ops.depthwise_conv2d_native(
|
| 371 |
+
t1, t2, strides=strides, data_format=data_format, padding=padding)
|
| 372 |
+
|
| 373 |
+
if data_format == "NCHW":
|
| 374 |
+
# Transpose back from NCHW to NHWC
|
| 375 |
+
conv_native = array_ops.transpose(conv_native, [0, 2, 3, 1])
|
| 376 |
+
|
| 377 |
+
try:
|
| 378 |
+
# The Numpy array from calling depthwise_conv2d_native
|
| 379 |
+
native_result = self.evaluate(conv_native)
|
| 380 |
+
except errors.InvalidArgumentError as e:
|
| 381 |
+
# Grouped convolution kernel is only registered for cuDNN 7. Silently
|
| 382 |
+
# return when we are running on an earlier version or without GPU.
|
| 383 |
+
if ("No OpKernel was registered to support Op "
|
| 384 |
+
"'DepthwiseConv2dNative'") in e.message:
|
| 385 |
+
tf_logging.warn("Skipping grouped convolution test")
|
| 386 |
+
return
|
| 387 |
+
raise e
|
| 388 |
+
|
| 389 |
+
conv_interface = nn_impl.depthwise_conv2d(
|
| 390 |
+
t1,
|
| 391 |
+
t2,
|
| 392 |
+
strides=strides,
|
| 393 |
+
padding=padding,
|
| 394 |
+
data_format=data_format,
|
| 395 |
+
dilations=dilations)
|
| 396 |
+
if data_format == "NCHW":
|
| 397 |
+
# Transpose back from NCHW to NHWC
|
| 398 |
+
conv_interface = array_ops.transpose(conv_interface, [0, 2, 3, 1])
|
| 399 |
+
|
| 400 |
+
# The Numpy array from calling depthwise_conv2d
|
| 401 |
+
interface_result = self.evaluate(conv_interface)
|
| 402 |
+
|
| 403 |
+
if dilations is None:
|
| 404 |
+
self.assertAllClose(
|
| 405 |
+
native_result, np_result, atol=tolerance, rtol=tolerance)
|
| 406 |
+
self.assertAllClose(
|
| 407 |
+
interface_result, np_result, atol=tolerance, rtol=tolerance)
|
| 408 |
+
|
| 409 |
+
@test_util.run_v1_only("b/120545219")
|
| 410 |
+
@test_util.run_gpu_only
|
| 411 |
+
def testDepthwiseConv2DCudnn(self):
|
| 412 |
+
for index, (input_size, filter_size, _, stride, padding,
|
| 413 |
+
dilations) in enumerate(ConfigsToTest()):
|
| 414 |
+
# The CuDNN depthwise conv is turned on only when input/output is NCHW and
|
| 415 |
+
# float16(half). See cudnn release note 7.6.3.
|
| 416 |
+
tf_logging.info(
|
| 417 |
+
"Testing DepthwiseConv2DCudnn, %dth config: %r * %r, stride: %d, "
|
| 418 |
+
"padding: %s", index, input_size, filter_size, stride, padding)
|
| 419 |
+
data_types = [dtypes.float16, dtypes.bfloat16]
|
| 420 |
+
for data_type in data_types:
|
| 421 |
+
self._VerifyValues(
|
| 422 |
+
input_size,
|
| 423 |
+
filter_size,
|
| 424 |
+
stride,
|
| 425 |
+
padding,
|
| 426 |
+
data_type,
|
| 427 |
+
use_gpu=True,
|
| 428 |
+
data_format="NCHW",
|
| 429 |
+
dilations=dilations)
|
| 430 |
+
|
| 431 |
+
@test_util.run_v1_only("b/120545219")
|
| 432 |
+
def testDepthwiseConv2D(self):
|
| 433 |
+
for index, (input_size, filter_size, _, stride, padding,
|
| 434 |
+
dilations) in enumerate(ConfigsToTest()):
|
| 435 |
+
tf_logging.info(
|
| 436 |
+
"Testing DepthwiseConv2D, %dth config: %r * %r, stride: %d, padding: "
|
| 437 |
+
"%s", index, input_size, filter_size, stride, padding)
|
| 438 |
+
# double datatype is currently not supported for convolution ops
|
| 439 |
+
# on the ROCm platform
|
| 440 |
+
optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]
|
| 441 |
+
for data_type in ([dtypes.float32] + optional_float64):
|
| 442 |
+
tf_logging.info("Testing without grouped_conv")
|
| 443 |
+
tolerance = 1e-4 if data_type == dtypes.float32 else 1e-12
|
| 444 |
+
self._VerifyValues(
|
| 445 |
+
input_size,
|
| 446 |
+
filter_size,
|
| 447 |
+
stride,
|
| 448 |
+
padding,
|
| 449 |
+
data_type,
|
| 450 |
+
use_gpu=True,
|
| 451 |
+
dilations=dilations,
|
| 452 |
+
tolerance=tolerance)
|
| 453 |
+
tf_logging.info("Testing with grouped_conv")
|
| 454 |
+
self._VerifyValues(
|
| 455 |
+
input_size,
|
| 456 |
+
filter_size,
|
| 457 |
+
stride,
|
| 458 |
+
padding,
|
| 459 |
+
data_type,
|
| 460 |
+
use_gpu=True,
|
| 461 |
+
grouped_conv=True,
|
| 462 |
+
dilations=dilations,
|
| 463 |
+
tolerance=tolerance)
|
| 464 |
+
|
| 465 |
+
@test_util.run_v1_only("b/120545219")
|
| 466 |
+
def testDepthwiseConv2DWithUnknownShape(self):
|
| 467 |
+
# GitHub issue 22110.
|
| 468 |
+
if not test.is_gpu_available():
|
| 469 |
+
return
|
| 470 |
+
with self.session():
|
| 471 |
+
x = array_ops.placeholder(dtypes.float32)
|
| 472 |
+
f = np.ones([1, 1, 1, 1], np.float32)
|
| 473 |
+
v = nn_impl.depthwise_conv2d(
|
| 474 |
+
x, f, [1, 1, 1, 1], "VALID", rate=[2, 1], data_format="NCHW")
|
| 475 |
+
self.assertAllEqual(
|
| 476 |
+
np.ones([1, 1, 1, 1], np.float32),
|
| 477 |
+
v.eval(feed_dict={x: np.ones([1, 1, 1, 1], np.float32)}))
|
| 478 |
+
|
| 479 |
+
@test_util.run_v1_only("b/120545219")
|
| 480 |
+
def testDepthwiseConv2DFormat(self):
|
| 481 |
+
if not test.is_gpu_available():
|
| 482 |
+
return
|
| 483 |
+
|
| 484 |
+
for index, (input_size, filter_size, _, stride, padding,
|
| 485 |
+
dilations) in enumerate(ConfigsToTest()):
|
| 486 |
+
tf_logging.info(
|
| 487 |
+
"Testing DepthwiseConv2DFormat, %dth config: %r * %r, stride: %d, "
|
| 488 |
+
"padding: %s", index, input_size, filter_size, stride, padding)
|
| 489 |
+
# double datatype is currently not supported for convolution ops
|
| 490 |
+
# on the ROCm platform
|
| 491 |
+
optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]
|
| 492 |
+
for data_type in ([dtypes.float32] + optional_float64):
|
| 493 |
+
tolerance = 1e-4 if data_type == dtypes.float32 else 1e-12
|
| 494 |
+
self._VerifyValues(
|
| 495 |
+
input_size,
|
| 496 |
+
filter_size,
|
| 497 |
+
stride,
|
| 498 |
+
padding,
|
| 499 |
+
data_type,
|
| 500 |
+
use_gpu=True,
|
| 501 |
+
data_format="NCHW",
|
| 502 |
+
dilations=dilations,
|
| 503 |
+
tolerance=tolerance)
|
| 504 |
+
|
| 505 |
+
@test_util.run_v1_only("b/120545219")
|
| 506 |
+
def testDepthwiseConv2DExplicit(self):
|
| 507 |
+
for index, (input_size, filter_size, _, stride, padding,
|
| 508 |
+
dilations) in enumerate(ConfigsToTestExplicit()):
|
| 509 |
+
tf_logging.info(
|
| 510 |
+
"Testing DepthwiseConv2D, %dth config: %r * %r, stride: %d, padding: "
|
| 511 |
+
"%s", index, input_size, filter_size, stride, padding)
|
| 512 |
+
# double datatype is currently not supported for convolution ops
|
| 513 |
+
# on the ROCm platform
|
| 514 |
+
data_types = [dtypes.float16, dtypes.float32, dtypes.bfloat16]
|
| 515 |
+
if not test.is_built_with_rocm():
|
| 516 |
+
data_types.extend([dtypes.float64])
|
| 517 |
+
data_formats = ["NHWC", "NCHW"] if test.is_gpu_available() else ["NHWC"]
|
| 518 |
+
for data_type in data_types:
|
| 519 |
+
for data_format in data_formats:
|
| 520 |
+
tolerance = 2e-2 if data_type == dtypes.bfloat16 else None
|
| 521 |
+
self._VerifyValues(
|
| 522 |
+
input_size,
|
| 523 |
+
filter_size,
|
| 524 |
+
stride,
|
| 525 |
+
padding,
|
| 526 |
+
data_type,
|
| 527 |
+
use_gpu=True,
|
| 528 |
+
data_format=data_format,
|
| 529 |
+
dilations=dilations,
|
| 530 |
+
tolerance=tolerance)
|
| 531 |
+
|
| 532 |
+
# This is testing against hand calculated results.
|
| 533 |
+
|
| 534 |
+
def _VerifyHandValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
|
| 535 |
+
expected, use_gpu):
|
| 536 |
+
"""Verifies the output values of the depthwise convolution function.
|
| 537 |
+
|
| 538 |
+
Args:
|
| 539 |
+
tensor_in_sizes: Input tensor dimensions in [batch, input_rows,
|
| 540 |
+
input_cols, input_depth].
|
| 541 |
+
filter_in_sizes: Filter tensor dimensions in [filter_rows, filter_cols,
|
| 542 |
+
input_depth, depth_multiplier].
|
| 543 |
+
stride: Stride.
|
| 544 |
+
padding: Padding type.
|
| 545 |
+
expected: An array containing the expected operation outputs.
|
| 546 |
+
use_gpu: Whether to use GPU.
|
| 547 |
+
"""
|
| 548 |
+
total_size_1 = 1
|
| 549 |
+
total_size_2 = 1
|
| 550 |
+
for s in tensor_in_sizes:
|
| 551 |
+
total_size_1 *= s
|
| 552 |
+
for s in filter_in_sizes:
|
| 553 |
+
total_size_2 *= s
|
| 554 |
+
# Initializes the input tensor with array containing incrementing
|
| 555 |
+
# numbers from 1.
|
| 556 |
+
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
|
| 557 |
+
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
|
| 558 |
+
with self.cached_session(use_gpu=use_gpu) as sess:
|
| 559 |
+
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
|
| 560 |
+
t1.set_shape(tensor_in_sizes)
|
| 561 |
+
t2 = constant_op.constant(x2, shape=filter_in_sizes)
|
| 562 |
+
conv = nn_ops.depthwise_conv2d_native(
|
| 563 |
+
t1, t2, strides=[1, stride, stride, 1], padding=padding)
|
| 564 |
+
value = self.evaluate(conv)
|
| 565 |
+
tf_logging.info("value = %r", value)
|
| 566 |
+
self.assertArrayNear(expected, np.ravel(value), 1e-5)
|
| 567 |
+
self.assertShapeEqual(value, conv)
|
| 568 |
+
|
| 569 |
+
def testConv2D2x2Filter(self):
|
| 570 |
+
# The inputs look like this (it's a 3 x 2 matrix, each of depth 2):
|
| 571 |
+
#
|
| 572 |
+
# [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ]
|
| 573 |
+
# [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ]
|
| 574 |
+
# We can view this as two inputs
|
| 575 |
+
#
|
| 576 |
+
# input depth 0:
|
| 577 |
+
#
|
| 578 |
+
# [ 1.0, 3.0, 5.0 ]
|
| 579 |
+
# [ 7.0, 9.0, 11.0 ]
|
| 580 |
+
#
|
| 581 |
+
# input depth 1:
|
| 582 |
+
#
|
| 583 |
+
# [ 2.0, 4.0, 6.0 ]
|
| 584 |
+
# [ 8.0, 10.0, 12.0 ]
|
| 585 |
+
#
|
| 586 |
+
# The filter looks like this (it has two 2 x 2 patches, each generating 2
|
| 587 |
+
# depths):
|
| 588 |
+
#
|
| 589 |
+
# filter #0:
|
| 590 |
+
#
|
| 591 |
+
# [ (1.0, 3.0), ( 5.0, 7.0)]
|
| 592 |
+
# [ (9.0, 11.0), (13.0, 15.0)]
|
| 593 |
+
#
|
| 594 |
+
# filter #1:
|
| 595 |
+
#
|
| 596 |
+
# [ ( 2.0, 4.0), ( 6.0, 8.0)]
|
| 597 |
+
# [ (10.0, 12.0), (14.0, 16.0)]
|
| 598 |
+
#
|
| 599 |
+
# So the outputs are:
|
| 600 |
+
#
|
| 601 |
+
# (position 0, 0: in_depth 0, output_depth 0 -- using filter #0)
|
| 602 |
+
# 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196
|
| 603 |
+
# (position 0, 0: in_depth 0, output_depth 1 -- using filter #1)
|
| 604 |
+
# 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216
|
| 605 |
+
# (position 0, 0: in_depth 1, output_depth 2 -- using filter #0)
|
| 606 |
+
# 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272
|
| 607 |
+
# (position 0, 0: in_depth 1, output_depth 3 -- using filter #1)
|
| 608 |
+
# 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296
|
| 609 |
+
#
|
| 610 |
+
# (position 1, 0: in_depth 0, output_depth 0 -- using filter #0)
|
| 611 |
+
# 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252
|
| 612 |
+
# (position 1, 0: in_depth 0, output_depth 1 -- using filter #1)
|
| 613 |
+
# 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280
|
| 614 |
+
# (position 1, 0: in_depth 1, output_depth 2 -- using filter #0)
|
| 615 |
+
# 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344
|
| 616 |
+
# (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)
|
| 617 |
+
# 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376
|
| 618 |
+
expected_output = [196, 216, 272, 296, 252, 280, 344, 376]
|
| 619 |
+
self._VerifyHandValues(
|
| 620 |
+
tensor_in_sizes=[1, 2, 3, 2],
|
| 621 |
+
filter_in_sizes=[2, 2, 2, 2],
|
| 622 |
+
stride=1,
|
| 623 |
+
padding="VALID",
|
| 624 |
+
expected=expected_output,
|
| 625 |
+
use_gpu=False)
|
| 626 |
+
|
| 627 |
+
self._VerifyHandValues(
|
| 628 |
+
tensor_in_sizes=[1, 2, 3, 2],
|
| 629 |
+
filter_in_sizes=[2, 2, 2, 2],
|
| 630 |
+
stride=1,
|
| 631 |
+
padding="VALID",
|
| 632 |
+
expected=expected_output,
|
| 633 |
+
use_gpu=True)
|
| 634 |
+
|
| 635 |
+
# Gradient checkers. This tests depthwise gradient computations for both
|
| 636 |
+
# BackpropFilter and BackpropInput by comparing gradients computed by the
|
| 637 |
+
# depthwise gradient ops with the gradients computed numerically (details can
|
| 638 |
+
# be found in the compute_gradient_error().
|
| 639 |
+
# Note this check is very expensive so the input should not be too big.
|
| 640 |
+
def _ConstructAndTestGradient(self,
|
| 641 |
+
input_shape,
|
| 642 |
+
filter_shape,
|
| 643 |
+
output_shape,
|
| 644 |
+
stride,
|
| 645 |
+
padding,
|
| 646 |
+
data_type,
|
| 647 |
+
test_input,
|
| 648 |
+
use_gpu,
|
| 649 |
+
grouped_conv=False,
|
| 650 |
+
data_format="NHWC",
|
| 651 |
+
dilations=None):
|
| 652 |
+
input_size = 1
|
| 653 |
+
for x in input_shape:
|
| 654 |
+
input_size *= x
|
| 655 |
+
filter_size = 1
|
| 656 |
+
for x in filter_shape:
|
| 657 |
+
filter_size *= x
|
| 658 |
+
input_data = [x * 1.0 / input_size for x in range(0, input_size)]
|
| 659 |
+
input_np = np.array(input_data).reshape(input_shape)
|
| 660 |
+
filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
|
| 661 |
+
filter_np = np.array(filter_data).reshape(filter_shape)
|
| 662 |
+
ops.reset_default_graph()
|
| 663 |
+
graph = ops.get_default_graph()
|
| 664 |
+
with self.session(graph=graph, use_gpu=use_gpu) as sess:
|
| 665 |
+
tolerance = {
|
| 666 |
+
dtypes.float16: 4e-0,
|
| 667 |
+
dtypes.float32: 8e-4,
|
| 668 |
+
dtypes.float64: 1e-12,
|
| 669 |
+
dtypes.bfloat16: 1e-0,
|
| 670 |
+
}[data_type]
|
| 671 |
+
|
| 672 |
+
input_tensor = constant_op.constant(
|
| 673 |
+
input_np, shape=input_shape, dtype=data_type, name="input")
|
| 674 |
+
filter_tensor = constant_op.constant(
|
| 675 |
+
filter_np, shape=filter_shape, dtype=data_type, name="filter")
|
| 676 |
+
|
| 677 |
+
native_input = input_tensor
|
| 678 |
+
strides = [1, stride, stride, 1]
|
| 679 |
+
if isinstance(padding, list):
|
| 680 |
+
padding = [(0, 0)] + padding + [(0, 0)]
|
| 681 |
+
if data_format == "NCHW":
|
| 682 |
+
# Transpose from NHWC input to NCHW
|
| 683 |
+
# Ex. [4, 5, 5, 48] to [4, 48, 5, 5]
|
| 684 |
+
native_input = array_ops.transpose(input_tensor, [0, 3, 1, 2])
|
| 685 |
+
input_shape = [
|
| 686 |
+
input_shape[0], input_shape[3], input_shape[1], input_shape[2]
|
| 687 |
+
]
|
| 688 |
+
output_shape = [
|
| 689 |
+
output_shape[0], output_shape[3], output_shape[1], output_shape[2]
|
| 690 |
+
]
|
| 691 |
+
strides = [1, 1, stride, stride]
|
| 692 |
+
if isinstance(padding, list):
|
| 693 |
+
padding = [padding[0], padding[3], padding[1], padding[2]]
|
| 694 |
+
|
| 695 |
+
with sess.graph._kernel_label_map({ # pylint: disable=protected-access,g-long-ternary
|
| 696 |
+
"DepthwiseConv2dNative": "cudnn_grouped_convolution",
|
| 697 |
+
"DepthwiseConv2dNativeBackpropInput": "cudnn_grouped_convolution",
|
| 698 |
+
"DepthwiseConv2dNativeBackpropFilter": "cudnn_grouped_convolution",
|
| 699 |
+
} if grouped_conv else {}):
|
| 700 |
+
depthwise_conv2d = nn_impl.depthwise_conv2d(
|
| 701 |
+
native_input,
|
| 702 |
+
filter_tensor,
|
| 703 |
+
strides,
|
| 704 |
+
padding,
|
| 705 |
+
data_format=data_format,
|
| 706 |
+
dilations=dilations,
|
| 707 |
+
name="depthwise_conv2d")
|
| 708 |
+
|
| 709 |
+
self.assertEqual(output_shape, depthwise_conv2d.get_shape())
|
| 710 |
+
|
| 711 |
+
try:
|
| 712 |
+
if test_input:
|
| 713 |
+
err = gradient_checker.compute_gradient_error(native_input,
|
| 714 |
+
input_shape,
|
| 715 |
+
depthwise_conv2d,
|
| 716 |
+
output_shape)
|
| 717 |
+
else:
|
| 718 |
+
err = gradient_checker.compute_gradient_error(filter_tensor,
|
| 719 |
+
filter_shape,
|
| 720 |
+
depthwise_conv2d,
|
| 721 |
+
output_shape)
|
| 722 |
+
except errors.InvalidArgumentError as e:
|
| 723 |
+
# TODO(xjun): Tests depend on error messages could be brittle.
|
| 724 |
+
# Grouped convolution kernel is only registered for cuDNN 7. Silently
|
| 725 |
+
# return when we are running on an earlier version or without GPU.
|
| 726 |
+
if grouped_conv and ("No OpKernel was registered to support Op "
|
| 727 |
+
"'DepthwiseConv2dNative'") in e.message:
|
| 728 |
+
tf_logging.warn("Skipping grouped convolution test")
|
| 729 |
+
return
|
| 730 |
+
raise e
|
| 731 |
+
|
| 732 |
+
tf_logging.info(
|
| 733 |
+
"data_type: %r, use_gpu: %r, grouped_conv: %r, error = %f", data_type,
|
| 734 |
+
use_gpu, grouped_conv, err)
|
| 735 |
+
self.assertLess(err, tolerance)
|
| 736 |
+
|
| 737 |
+
@test_util.run_v1_only("b/120545219")
|
| 738 |
+
@test_util.run_gpu_only
|
| 739 |
+
def testDepthwiseConv2DInputGradCudnn(self):
|
| 740 |
+
for index, (input_size, filter_size, output_size, stride, padding,
|
| 741 |
+
dilations) in enumerate(CheckGradConfigsToTest()):
|
| 742 |
+
# The CuDNN depthwise conv (input gradient) is turned on only when
|
| 743 |
+
# stride = 1, input/output is NCHW and float16(half). See cudnn release
|
| 744 |
+
# note 7.6.3.
|
| 745 |
+
if stride != 1:
|
| 746 |
+
continue
|
| 747 |
+
tf_logging.info(
|
| 748 |
+
"Testing DepthwiseConv2DInputGradCudnn, %dth config: %r * %r, "
|
| 749 |
+
"stride: %d, padding: %s", index, input_size, filter_size, stride,
|
| 750 |
+
padding)
|
| 751 |
+
data_types = [dtypes.float16, dtypes.bfloat16]
|
| 752 |
+
for data_type in data_types:
|
| 753 |
+
self._ConstructAndTestGradient(
|
| 754 |
+
input_size,
|
| 755 |
+
filter_size,
|
| 756 |
+
output_size,
|
| 757 |
+
stride,
|
| 758 |
+
padding,
|
| 759 |
+
data_type,
|
| 760 |
+
test_input=True,
|
| 761 |
+
use_gpu=True,
|
| 762 |
+
data_format="NCHW",
|
| 763 |
+
dilations=dilations)
|
| 764 |
+
|
| 765 |
+
@test_util.run_v1_only("b/120545219")
|
| 766 |
+
def testDepthwiseConv2DInputGrad(self):
|
| 767 |
+
for index, (input_size, filter_size, output_size, stride, padding,
|
| 768 |
+
dilations) in enumerate(CheckGradConfigsToTest()):
|
| 769 |
+
tf_logging.info(
|
| 770 |
+
"Testing DepthwiseConv2DInputGrad, %dth config: %r * %r, stride: %d, "
|
| 771 |
+
"padding: %s", index, input_size, filter_size, stride, padding)
|
| 772 |
+
# double datatype is currently not supported for convolution ops
|
| 773 |
+
# on the ROCm platform
|
| 774 |
+
optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]
|
| 775 |
+
for data_type in ([dtypes.float32] + optional_float64):
|
| 776 |
+
self._ConstructAndTestGradient(
|
| 777 |
+
input_size,
|
| 778 |
+
filter_size,
|
| 779 |
+
output_size,
|
| 780 |
+
stride,
|
| 781 |
+
padding,
|
| 782 |
+
data_type,
|
| 783 |
+
test_input=True,
|
| 784 |
+
use_gpu=True,
|
| 785 |
+
dilations=dilations)
|
| 786 |
+
self._ConstructAndTestGradient(
|
| 787 |
+
input_size,
|
| 788 |
+
filter_size,
|
| 789 |
+
output_size,
|
| 790 |
+
stride,
|
| 791 |
+
padding,
|
| 792 |
+
data_type,
|
| 793 |
+
test_input=True,
|
| 794 |
+
use_gpu=True,
|
| 795 |
+
grouped_conv=True,
|
| 796 |
+
dilations=dilations)
|
| 797 |
+
|
| 798 |
+
@test_util.run_v1_only("b/120545219")
|
| 799 |
+
def testDepthwiseConv2DInputGradFormat(self):
|
| 800 |
+
if not test.is_gpu_available():
|
| 801 |
+
return
|
| 802 |
+
|
| 803 |
+
for index, (input_size, filter_size, output_size, stride, padding,
|
| 804 |
+
dilations) in enumerate(CheckGradConfigsToTest()):
|
| 805 |
+
tf_logging.info(
|
| 806 |
+
"Testing DepthwiseConv2DInputGradFormat, %dth config: %r * %r, "
|
| 807 |
+
"stride: %d, padding: %s", index, input_size, filter_size, stride,
|
| 808 |
+
padding)
|
| 809 |
+
# double datatype is currently not supported for convolution ops
|
| 810 |
+
# on the ROCm platform
|
| 811 |
+
optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]
|
| 812 |
+
for data_type in ([dtypes.float32] + optional_float64):
|
| 813 |
+
self._ConstructAndTestGradient(
|
| 814 |
+
input_size,
|
| 815 |
+
filter_size,
|
| 816 |
+
output_size,
|
| 817 |
+
stride,
|
| 818 |
+
padding,
|
| 819 |
+
data_type,
|
| 820 |
+
test_input=True,
|
| 821 |
+
use_gpu=True,
|
| 822 |
+
data_format="NCHW",
|
| 823 |
+
dilations=dilations)
|
| 824 |
+
|
| 825 |
+
@test_util.run_v1_only("b/120545219")
|
| 826 |
+
def testDepthwiseConv2DInputGradExplicit(self):
|
| 827 |
+
for index, (input_size, filter_size, output_size, stride, padding,
|
| 828 |
+
dilations) in enumerate(CheckGradConfigsToTestExplicit()):
|
| 829 |
+
tf_logging.info(
|
| 830 |
+
"Testing DepthwiseConv2DInputGradExplicit, %dth config: %r * %r, "
|
| 831 |
+
"stride: %d, padding: %s", index, input_size, filter_size, stride,
|
| 832 |
+
padding)
|
| 833 |
+
# double datatype is currently not supported for convolution ops
|
| 834 |
+
# on the ROCm platform
|
| 835 |
+
data_types = [dtypes.float16, dtypes.float32, dtypes.bfloat16]
|
| 836 |
+
if not test.is_built_with_rocm():
|
| 837 |
+
data_types.extend([dtypes.float64])
|
| 838 |
+
data_formats = ["NHWC", "NCHW"] if test.is_gpu_available() else ["NHWC"]
|
| 839 |
+
for data_type in data_types:
|
| 840 |
+
for data_format in data_formats:
|
| 841 |
+
self._ConstructAndTestGradient(
|
| 842 |
+
input_size,
|
| 843 |
+
filter_size,
|
| 844 |
+
output_size,
|
| 845 |
+
stride,
|
| 846 |
+
padding,
|
| 847 |
+
data_type,
|
| 848 |
+
test_input=True,
|
| 849 |
+
use_gpu=True,
|
| 850 |
+
data_format=data_format,
|
| 851 |
+
dilations=dilations)
|
| 852 |
+
|
| 853 |
+
@test_util.run_v1_only("b/120545219")
|
| 854 |
+
@test_util.run_gpu_only
|
| 855 |
+
def testDepthwiseConv2DFilterGradCudnn(self):
|
| 856 |
+
for index, (input_size, filter_size, output_size, stride, padding,
|
| 857 |
+
dilations) in enumerate(CheckGradConfigsToTest()):
|
| 858 |
+
# The CuDNN depthwise conv (filter gradient) is turned on only when
|
| 859 |
+
# input/output is float16(half). See cudnn release note 7.6.3.
|
| 860 |
+
tf_logging.info(
|
| 861 |
+
"Testing DepthwiseConv2DFilterGradCudnn, %dth config: %r * %r, "
|
| 862 |
+
"stride: %d, padding: %s", index, input_size, filter_size, stride,
|
| 863 |
+
padding)
|
| 864 |
+
data_types = [dtypes.float16, dtypes.bfloat16]
|
| 865 |
+
for data_type in data_types:
|
| 866 |
+
self._ConstructAndTestGradient(
|
| 867 |
+
input_size,
|
| 868 |
+
filter_size,
|
| 869 |
+
output_size,
|
| 870 |
+
stride,
|
| 871 |
+
padding,
|
| 872 |
+
data_type,
|
| 873 |
+
test_input=False,
|
| 874 |
+
use_gpu=True,
|
| 875 |
+
data_format="NCHW",
|
| 876 |
+
dilations=dilations)
|
| 877 |
+
self._ConstructAndTestGradient(
|
| 878 |
+
input_size,
|
| 879 |
+
filter_size,
|
| 880 |
+
output_size,
|
| 881 |
+
stride,
|
| 882 |
+
padding,
|
| 883 |
+
data_type,
|
| 884 |
+
test_input=False,
|
| 885 |
+
use_gpu=True,
|
| 886 |
+
data_format="NHWC",
|
| 887 |
+
dilations=dilations)
|
| 888 |
+
|
| 889 |
+
@test_util.run_v1_only("b/120545219")
|
| 890 |
+
def testDepthwiseConv2DFilterGrad(self):
|
| 891 |
+
for index, (input_size, filter_size, output_size, stride, padding,
|
| 892 |
+
dilations) in enumerate(CheckGradConfigsToTest()):
|
| 893 |
+
tf_logging.info(
|
| 894 |
+
"Testing DepthwiseConv2DFilterGrad, %dth config: %r * %r, stride: "
|
| 895 |
+
"%d, padding: %s", index, input_size, filter_size, stride, padding)
|
| 896 |
+
# double datatype is currently not supported for convolution ops
|
| 897 |
+
# on the ROCm platform
|
| 898 |
+
optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]
|
| 899 |
+
for data_type in ([dtypes.float16, dtypes.float32] + optional_float64):
|
| 900 |
+
self._ConstructAndTestGradient(
|
| 901 |
+
input_size,
|
| 902 |
+
filter_size,
|
| 903 |
+
output_size,
|
| 904 |
+
stride,
|
| 905 |
+
padding,
|
| 906 |
+
data_type,
|
| 907 |
+
test_input=False,
|
| 908 |
+
use_gpu=True,
|
| 909 |
+
dilations=dilations)
|
| 910 |
+
|
| 911 |
+
@test_util.run_v1_only("b/120545219")
|
| 912 |
+
def testDepthwiseConv2DFilterGradFormat(self):
|
| 913 |
+
if not test.is_gpu_available():
|
| 914 |
+
return
|
| 915 |
+
|
| 916 |
+
for index, (input_size, filter_size, output_size, stride, padding,
|
| 917 |
+
dilations) in enumerate(CheckGradConfigsToTest()):
|
| 918 |
+
tf_logging.info(
|
| 919 |
+
"Testing DepthwiseConv2DFilterGradFormat, %dth config: %r * %r, "
|
| 920 |
+
"stride: %d, padding: %s", index, input_size, filter_size, stride,
|
| 921 |
+
padding)
|
| 922 |
+
# double datatype is currently not supported for convolution ops
|
| 923 |
+
# on the ROCm platform
|
| 924 |
+
optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]
|
| 925 |
+
for data_type in ([dtypes.float32] + optional_float64):
|
| 926 |
+
self._ConstructAndTestGradient(
|
| 927 |
+
input_size,
|
| 928 |
+
filter_size,
|
| 929 |
+
output_size,
|
| 930 |
+
stride,
|
| 931 |
+
padding,
|
| 932 |
+
data_type,
|
| 933 |
+
test_input=False,
|
| 934 |
+
use_gpu=True,
|
| 935 |
+
data_format="NCHW",
|
| 936 |
+
dilations=dilations)
|
| 937 |
+
|
| 938 |
+
@test_util.run_v1_only("b/120545219")
|
| 939 |
+
def testDepthwiseConv2DFilterGradExplicit(self):
|
| 940 |
+
for index, (input_size, filter_size, output_size, stride, padding,
|
| 941 |
+
dilations) in enumerate(CheckGradConfigsToTestExplicit()):
|
| 942 |
+
tf_logging.info(
|
| 943 |
+
"Testing DepthwiseConv2DFilterGradExplicit, %dth config: %r * %r, "
|
| 944 |
+
"stride: %d, padding: %s", index, input_size, filter_size, stride,
|
| 945 |
+
padding)
|
| 946 |
+
# double datatype is currently not supported for convolution ops
|
| 947 |
+
# on the ROCm platform
|
| 948 |
+
data_types = [dtypes.float16, dtypes.float32, dtypes.bfloat16]
|
| 949 |
+
if not test.is_built_with_rocm():
|
| 950 |
+
data_types.extend([dtypes.float64])
|
| 951 |
+
data_formats = ["NHWC", "NCHW"] if test.is_gpu_available() else ["NHWC"]
|
| 952 |
+
for data_type in data_types:
|
| 953 |
+
for data_format in data_formats:
|
| 954 |
+
self._ConstructAndTestGradient(
|
| 955 |
+
input_size,
|
| 956 |
+
filter_size,
|
| 957 |
+
output_size,
|
| 958 |
+
stride,
|
| 959 |
+
padding,
|
| 960 |
+
data_type,
|
| 961 |
+
test_input=False,
|
| 962 |
+
use_gpu=True,
|
| 963 |
+
data_format=data_format,
|
| 964 |
+
dilations=dilations)
|
| 965 |
+
|
| 966 |
+
def _CompareBackpropInput(self, input_sizes, filter_sizes, output_sizes,
|
| 967 |
+
stride, padding, dtype):
|
| 968 |
+
x1 = np.random.rand(*filter_sizes)
|
| 969 |
+
x2 = np.random.rand(*output_sizes)
|
| 970 |
+
if isinstance(padding, list):
|
| 971 |
+
padding = [(0, 0)] + padding + [(0, 0)]
|
| 972 |
+
|
| 973 |
+
def _GetVal(use_gpu, dtype):
|
| 974 |
+
with self.cached_session(use_gpu=use_gpu):
|
| 975 |
+
t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
|
| 976 |
+
t1 = constant_op.constant(x1, shape=filter_sizes, dtype=dtype)
|
| 977 |
+
t2 = constant_op.constant(x2, shape=output_sizes, dtype=dtype)
|
| 978 |
+
backprop = nn_ops.depthwise_conv2d_native_backprop_input(
|
| 979 |
+
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
|
| 980 |
+
ret = self.evaluate(backprop)
|
| 981 |
+
self.assertShapeEqual(ret, backprop)
|
| 982 |
+
return ret
|
| 983 |
+
|
| 984 |
+
rtol, atol = (1e-1, 1e-1) if dtype == "bfloat16" else (1e-4, 1e-4)
|
| 985 |
+
gpu_value = _GetVal(use_gpu=True, dtype=dtype)
|
| 986 |
+
cpu_value = _GetVal(use_gpu=False, dtype=dtype)
|
| 987 |
+
self.assertAllClose(cpu_value, gpu_value, rtol=rtol, atol=atol)
|
| 988 |
+
|
| 989 |
+
@test_util.run_gpu_only
|
| 990 |
+
def testDepthwiseConv2DInputGradCompare(self):
|
| 991 |
+
for index, (input_size, filter_size, output_size, stride, padding,
|
| 992 |
+
dilations) in enumerate(ConfigsToTest()):
|
| 993 |
+
if dilations:
|
| 994 |
+
continue
|
| 995 |
+
tf_logging.info(
|
| 996 |
+
"Testing DepthwiseConv2DInputGradCompare, %dth config: %r * %r, "
|
| 997 |
+
"stride: %d, padding: %s", index, input_size, filter_size, stride,
|
| 998 |
+
padding)
|
| 999 |
+
self._CompareBackpropInput(input_size, filter_size, output_size, stride,
|
| 1000 |
+
padding, "float32")
|
| 1001 |
+
self._CompareBackpropInput(input_size, filter_size, output_size, stride,
|
| 1002 |
+
padding, "bfloat16")
|
| 1003 |
+
# Convolutions on the ROCm platform don't support double dtype.
|
| 1004 |
+
# So, we skip these tests.
|
| 1005 |
+
if not test.is_built_with_rocm():
|
| 1006 |
+
self._CompareBackpropInput(
|
| 1007 |
+
input_size, filter_size, output_size, stride, padding, "float64"
|
| 1008 |
+
)
|
| 1009 |
+
|
| 1010 |
+
@test_util.run_gpu_only
|
| 1011 |
+
def testDepthwiseConv2DInputGradExplicitCompare(self):
|
| 1012 |
+
for index, (input_size, filter_size, output_size, stride, padding,
|
| 1013 |
+
dilations) in enumerate(ConfigsToTestExplicit()):
|
| 1014 |
+
if dilations:
|
| 1015 |
+
continue
|
| 1016 |
+
tf_logging.info(
|
| 1017 |
+
"Testing DepthwiseConv2DInputGradCompare, %dth config: %r * %r, "
|
| 1018 |
+
"stride: %d, padding: %s", index, input_size, filter_size, stride,
|
| 1019 |
+
padding)
|
| 1020 |
+
self._CompareBackpropInput(input_size, filter_size, output_size, stride,
|
| 1021 |
+
padding, "float32")
|
| 1022 |
+
self._CompareBackpropInput(input_size, filter_size, output_size, stride,
|
| 1023 |
+
padding, "bfloat16")
|
| 1024 |
+
# Convolutions on the ROCm platform don't support double dtype.
|
| 1025 |
+
if not test.is_built_with_rocm():
|
| 1026 |
+
self._CompareBackpropInput(
|
| 1027 |
+
input_size, filter_size, output_size, stride, padding, "float64"
|
| 1028 |
+
)
|
| 1029 |
+
|
| 1030 |
+
def _CompareBackpropFilter(self, input_sizes, filter_sizes, output_sizes,
|
| 1031 |
+
stride, padding, dtype):
|
| 1032 |
+
x0 = np.random.rand(*input_sizes)
|
| 1033 |
+
x2 = np.random.rand(*output_sizes)
|
| 1034 |
+
padding_nhwc = padding
|
| 1035 |
+
padding_nchw = padding
|
| 1036 |
+
if isinstance(padding, list):
|
| 1037 |
+
padding_nhwc = [(0, 0)] + padding + [(0, 0)]
|
| 1038 |
+
padding_nchw = [(0, 0)] + [(0, 0)] + padding
|
| 1039 |
+
|
| 1040 |
+
def _GetVal(use_gpu, dtype, data_format="NHWC"):
|
| 1041 |
+
with self.cached_session(use_gpu=use_gpu):
|
| 1042 |
+
t0 = constant_op.constant(x0, shape=input_sizes, dtype=dtype)
|
| 1043 |
+
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
|
| 1044 |
+
t2 = constant_op.constant(x2, shape=output_sizes, dtype=dtype)
|
| 1045 |
+
strides = [1, stride, stride, 1]
|
| 1046 |
+
padding = padding_nhwc
|
| 1047 |
+
if data_format == "NCHW":
|
| 1048 |
+
t0 = array_ops.transpose(t0, [0, 3, 1, 2])
|
| 1049 |
+
t2 = array_ops.transpose(t2, [0, 3, 1, 2])
|
| 1050 |
+
strides = [1, 1, stride, stride]
|
| 1051 |
+
padding = padding_nchw
|
| 1052 |
+
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
|
| 1053 |
+
t0,
|
| 1054 |
+
t1,
|
| 1055 |
+
t2,
|
| 1056 |
+
strides=strides,
|
| 1057 |
+
padding=padding,
|
| 1058 |
+
data_format=data_format)
|
| 1059 |
+
ret = self.evaluate(backprop)
|
| 1060 |
+
self.assertShapeEqual(ret, backprop)
|
| 1061 |
+
return ret
|
| 1062 |
+
|
| 1063 |
+
cpu_value = _GetVal(use_gpu=False, dtype=dtype)
|
| 1064 |
+
for data_format in ["NHWC", "NCHW"]:
|
| 1065 |
+
gpu_value = _GetVal(use_gpu=True, dtype=dtype, data_format=data_format)
|
| 1066 |
+
self.assertAllCloseAccordingToType(
|
| 1067 |
+
cpu_value, gpu_value, rtol=1e-4, atol=1e-4, bfloat16_rtol=1e-0)
|
| 1068 |
+
|
| 1069 |
+
@test_util.run_gpu_only
|
| 1070 |
+
def testDepthwiseConv2DFilterGradCompare(self):
|
| 1071 |
+
for index, (input_size, filter_size, output_size, stride, padding,
|
| 1072 |
+
dilations) in enumerate(ConfigsToTest()):
|
| 1073 |
+
if dilations:
|
| 1074 |
+
continue
|
| 1075 |
+
tf_logging.info(
|
| 1076 |
+
"Testing DepthwiseConv2DFilterGradCompare, %dth config: %r * %r, "
|
| 1077 |
+
"stride: %d, padding: %s", index, input_size, filter_size, stride,
|
| 1078 |
+
padding)
|
| 1079 |
+
self._CompareBackpropFilter(input_size, filter_size, output_size, stride,
|
| 1080 |
+
padding, "float32")
|
| 1081 |
+
self._CompareBackpropFilter(input_size, filter_size, output_size, stride,
|
| 1082 |
+
padding, "bfloat16")
|
| 1083 |
+
# Convolutions on the ROCm platform don't support double dtype.
|
| 1084 |
+
if not test.is_built_with_rocm():
|
| 1085 |
+
self._CompareBackpropFilter(
|
| 1086 |
+
input_size, filter_size, output_size, stride, padding, "float64"
|
| 1087 |
+
)
|
| 1088 |
+
|
| 1089 |
+
@test_util.run_gpu_only
|
| 1090 |
+
def testDepthwiseConv2DFilterGradExplicitCompare(self):
|
| 1091 |
+
for index, (input_size, filter_size, output_size, stride, padding,
|
| 1092 |
+
dilations) in enumerate(ConfigsToTestExplicit()):
|
| 1093 |
+
if dilations:
|
| 1094 |
+
continue
|
| 1095 |
+
tf_logging.info(
|
| 1096 |
+
"Testing DepthwiseConv2DFilterGradCompare, %dth config: %r * %r, "
|
| 1097 |
+
"stride: %d, padding: %s", index, input_size, filter_size, stride,
|
| 1098 |
+
padding)
|
| 1099 |
+
self._CompareBackpropFilter(input_size, filter_size, output_size, stride,
|
| 1100 |
+
padding, "float32")
|
| 1101 |
+
self._CompareBackpropFilter(input_size, filter_size, output_size, stride,
|
| 1102 |
+
padding, "bfloat16")
|
| 1103 |
+
# Convolutions on the ROCm platform don't support double dtype.
|
| 1104 |
+
if not test.is_built_with_rocm():
|
| 1105 |
+
self._CompareBackpropFilter(
|
| 1106 |
+
input_size, filter_size, output_size, stride, padding, "float64"
|
| 1107 |
+
)
|
| 1108 |
+
|
| 1109 |
+
def _CompareForward(self, input_sizes, filter_sizes, output_sizes, stride,
|
| 1110 |
+
padding, dtype):
|
| 1111 |
+
x1 = np.random.rand(*input_sizes)
|
| 1112 |
+
x2 = np.random.rand(*filter_sizes)
|
| 1113 |
+
if isinstance(padding, list):
|
| 1114 |
+
padding = [(0, 0)] + padding + [(0, 0)]
|
| 1115 |
+
|
| 1116 |
+
def _GetVal(use_gpu, dtype):
|
| 1117 |
+
with self.cached_session(use_gpu=use_gpu):
|
| 1118 |
+
t1 = constant_op.constant(x1, shape=input_sizes, dtype=dtype)
|
| 1119 |
+
t2 = constant_op.constant(x2, shape=filter_sizes, dtype=dtype)
|
| 1120 |
+
output = nn_ops.depthwise_conv2d_native(
|
| 1121 |
+
t1, t2, strides=[1, stride, stride, 1], padding=padding)
|
| 1122 |
+
ret = self.evaluate(output)
|
| 1123 |
+
self.assertShapeEqual(ret, output)
|
| 1124 |
+
return ret
|
| 1125 |
+
|
| 1126 |
+
gpu_value = _GetVal(use_gpu=True, dtype=dtype)
|
| 1127 |
+
cpu_value = _GetVal(use_gpu=False, dtype=dtype)
|
| 1128 |
+
self.assertAllCloseAccordingToType(
|
| 1129 |
+
cpu_value, gpu_value, rtol=1e-4, atol=1e-4, bfloat16_rtol=1e-1)
|
| 1130 |
+
|
| 1131 |
+
@test_util.run_gpu_only
|
| 1132 |
+
def testDepthwiseConv2DForwardCompare(self):
|
| 1133 |
+
for index, (input_size, filter_size, output_size, stride, padding,
|
| 1134 |
+
dilations) in enumerate(ConfigsToTest()):
|
| 1135 |
+
if dilations:
|
| 1136 |
+
continue
|
| 1137 |
+
tf_logging.info(
|
| 1138 |
+
"Testing DepthwiseConv2DForwardCompare, %dth config: %r * %r, "
|
| 1139 |
+
"stride: %d, padding: %s", index, input_size, filter_size, stride,
|
| 1140 |
+
padding)
|
| 1141 |
+
self._CompareForward(input_size, filter_size, output_size, stride,
|
| 1142 |
+
padding, "float32")
|
| 1143 |
+
self._CompareForward(input_size, filter_size, output_size, stride,
|
| 1144 |
+
padding, "bfloat16")
|
| 1145 |
+
|
| 1146 |
+
# Convolutions on the ROCm platform don't support double dtype.
|
| 1147 |
+
if not test.is_built_with_rocm():
|
| 1148 |
+
self._CompareForward(
|
| 1149 |
+
input_size, filter_size, output_size, stride, padding, "float64"
|
| 1150 |
+
)
|
| 1151 |
+
|
| 1152 |
+
@test_util.run_gpu_only
|
| 1153 |
+
def testDepthwiseConv2DForwardExplicitCompare(self):
|
| 1154 |
+
for index, (input_size, filter_size, output_size, stride, padding,
|
| 1155 |
+
dilations) in enumerate(ConfigsToTestExplicit()):
|
| 1156 |
+
if dilations:
|
| 1157 |
+
continue
|
| 1158 |
+
tf_logging.info(
|
| 1159 |
+
"Testing DepthwiseConv2DForwardCompare, %dth config: %r * %r, "
|
| 1160 |
+
"stride: %d, padding: %s", index, input_size, filter_size, stride,
|
| 1161 |
+
padding)
|
| 1162 |
+
|
| 1163 |
+
self._CompareForward(input_size, filter_size, output_size, stride,
|
| 1164 |
+
padding, "float32")
|
| 1165 |
+
self._CompareForward(input_size, filter_size, output_size, stride,
|
| 1166 |
+
padding, "bfloat16")
|
| 1167 |
+
|
| 1168 |
+
# Convolutions on the ROCm platform don't support double dtype.
|
| 1169 |
+
if not test.is_built_with_rocm():
|
| 1170 |
+
self._CompareForward(
|
| 1171 |
+
input_size, filter_size, output_size, stride, padding, "float64"
|
| 1172 |
+
)
|
videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/nn_ops/xent_op_test_base.py
ADDED
|
@@ -0,0 +1,293 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2015-2021 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for SoftmaxCrossEntropyWithLogits op."""
|
| 16 |
+
|
| 17 |
+
import numpy as np
|
| 18 |
+
|
| 19 |
+
from tensorflow.python.eager import backprop
|
| 20 |
+
from tensorflow.python.framework import config
|
| 21 |
+
from tensorflow.python.framework import constant_op
|
| 22 |
+
from tensorflow.python.framework import dtypes
|
| 23 |
+
from tensorflow.python.framework import ops
|
| 24 |
+
from tensorflow.python.framework import test_util
|
| 25 |
+
from tensorflow.python.ops import array_ops
|
| 26 |
+
from tensorflow.python.ops import gradient_checker
|
| 27 |
+
from tensorflow.python.ops import gradients_impl
|
| 28 |
+
from tensorflow.python.ops import math_ops
|
| 29 |
+
from tensorflow.python.ops import nn_ops
|
| 30 |
+
# The following import is required to register the gradient function.
|
| 31 |
+
from tensorflow.python.ops.nn_grad import _SoftmaxCrossEntropyWithLogitsGrad # pylint: disable=unused-import
|
| 32 |
+
from tensorflow.python.platform import test
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class XentOpTestBase(test.TestCase):
|
| 36 |
+
|
| 37 |
+
def _opFwdBwd(self, labels, logits, axis=-1):
|
| 38 |
+
""" Runs the op-under-test both forwards and backwards."""
|
| 39 |
+
logits = ops.convert_to_tensor(logits) # needed for the gradient tape
|
| 40 |
+
with backprop.GradientTape() as tape:
|
| 41 |
+
tape.watch(logits)
|
| 42 |
+
loss = nn_ops.softmax_cross_entropy_with_logits(
|
| 43 |
+
labels=labels, logits=logits, dim=axis)
|
| 44 |
+
return loss, tape.gradient(loss, logits)
|
| 45 |
+
|
| 46 |
+
def _npXent(self, labels, logits, dim=-1):
|
| 47 |
+
if dim == -1:
|
| 48 |
+
dim = len(logits.shape) - 1
|
| 49 |
+
one_only_on_dim = list(logits.shape)
|
| 50 |
+
one_only_on_dim[dim] = 1
|
| 51 |
+
e = np.exp(logits - np.reshape(np.amax(logits, axis=dim), one_only_on_dim))
|
| 52 |
+
probs = e / np.reshape(np.sum(e, axis=dim), one_only_on_dim)
|
| 53 |
+
bp = (probs - labels)
|
| 54 |
+
l = -np.sum(labels * np.log(probs + 1.0e-20), axis=dim)
|
| 55 |
+
return l, bp
|
| 56 |
+
|
| 57 |
+
# TODO(b/123860949): The values are constant folded for XLA, so placeholders
|
| 58 |
+
# are needed.
|
| 59 |
+
def _testXent2D(self,
|
| 60 |
+
np_labels,
|
| 61 |
+
np_logits,
|
| 62 |
+
with_placeholders=False,
|
| 63 |
+
expected_gradient=None):
|
| 64 |
+
np_loss, np_gradient = self._npXent(labels=np_labels, logits=np_logits)
|
| 65 |
+
if expected_gradient is not None:
|
| 66 |
+
np_gradient = expected_gradient
|
| 67 |
+
with self.cached_session() as sess:
|
| 68 |
+
if with_placeholders:
|
| 69 |
+
logits_placeholder = array_ops.placeholder(np_logits.dtype)
|
| 70 |
+
labels_placeholder = array_ops.placeholder(np_labels.dtype)
|
| 71 |
+
loss, gradient = self._opFwdBwd(labels_placeholder, logits_placeholder)
|
| 72 |
+
tf_loss, tf_gradient = sess.run([loss, gradient],
|
| 73 |
+
feed_dict={
|
| 74 |
+
labels_placeholder: np_labels,
|
| 75 |
+
logits_placeholder: np_logits
|
| 76 |
+
})
|
| 77 |
+
else:
|
| 78 |
+
loss, gradient = self._opFwdBwd(np_labels, np_logits)
|
| 79 |
+
tf_loss, tf_gradient = self.evaluate([loss, gradient])
|
| 80 |
+
self.assertAllCloseAccordingToType(np_loss, tf_loss, half_rtol=1e-2)
|
| 81 |
+
self.assertAllCloseAccordingToType(np_gradient, tf_gradient)
|
| 82 |
+
|
| 83 |
+
def _testXentND(self, np_labels, np_logits, dim=-1):
|
| 84 |
+
np_loss, _ = self._npXent(np_labels, np_logits, dim=dim)
|
| 85 |
+
loss = nn_ops.softmax_cross_entropy_with_logits(
|
| 86 |
+
labels=np_labels, logits=np_logits, dim=dim)
|
| 87 |
+
tf_loss = self.evaluate(loss)
|
| 88 |
+
self.assertAllCloseAccordingToType(np_loss, tf_loss)
|
| 89 |
+
|
| 90 |
+
def _testSingleClass(self, expected_gradient=[[2.0], [1.0], [0.0], [0.0]]):
|
| 91 |
+
for dtype in np.float16, np.float32, dtypes.bfloat16.as_numpy_dtype:
|
| 92 |
+
loss, gradient = self._opFwdBwd(
|
| 93 |
+
labels=np.array([[-1.], [0.], [1.], [1.]]).astype(dtype),
|
| 94 |
+
logits=np.array([[1.], [-1.], [0.], [1.]]).astype(dtype))
|
| 95 |
+
self.assertAllClose([0.0, 0.0, 0.0, 0.0], loss)
|
| 96 |
+
self.assertAllClose(expected_gradient, gradient)
|
| 97 |
+
|
| 98 |
+
def testSingleClass(self):
|
| 99 |
+
"""This method is structured to be easily overridden by a child class."""
|
| 100 |
+
self._testSingleClass()
|
| 101 |
+
|
| 102 |
+
def testNpXent(self):
|
| 103 |
+
# We create 2 batches of logits for testing.
|
| 104 |
+
# batch 0 is the boring uniform distribution: 1, 1, 1, 1, with target 3.
|
| 105 |
+
# batch 1 has a bit of difference: 1, 2, 3, 4, with soft targets (1, 2).
|
| 106 |
+
logits = [[1., 1., 1., 1.], [1., 2., 3., 4.]]
|
| 107 |
+
labels = [[0., 0., 0., 1.], [0., .5, .5, 0.]]
|
| 108 |
+
|
| 109 |
+
# For batch 0, we expect the uniform distribution: 0.25, 0.25, 0.25, 0.25
|
| 110 |
+
# With a hard target 3, the gradient is [0.25, 0.25, 0.25, -0.75]
|
| 111 |
+
# The loss for this batch is -log(0.25) = 1.386
|
| 112 |
+
#
|
| 113 |
+
# For batch 1, we have:
|
| 114 |
+
# exp(0) = 1
|
| 115 |
+
# exp(1) = 2.718
|
| 116 |
+
# exp(2) = 7.389
|
| 117 |
+
# exp(3) = 20.085
|
| 118 |
+
# SUM = 31.192
|
| 119 |
+
# So we have as probabilities:
|
| 120 |
+
# exp(0) / SUM = 0.032
|
| 121 |
+
# exp(1) / SUM = 0.087
|
| 122 |
+
# exp(2) / SUM = 0.237
|
| 123 |
+
# exp(3) / SUM = 0.644
|
| 124 |
+
# With a soft target (1, 2), the gradient is
|
| 125 |
+
# [0.032, 0.087 - 0.5 = -0.413, 0.237 - 0.5 = -0.263, 0.644]
|
| 126 |
+
# The loss for this batch is [0.5 * -log(0.087), 0.5 * -log(0.237)]
|
| 127 |
+
# = [1.3862, 1.9401]
|
| 128 |
+
np_loss, np_gradient = self._npXent(np.array(labels), np.array(logits))
|
| 129 |
+
self.assertAllClose(
|
| 130 |
+
np.array([[0.25, 0.25, 0.25, -0.75], [0.0321, -0.4129, -0.2632,
|
| 131 |
+
0.6439]]),
|
| 132 |
+
np_gradient,
|
| 133 |
+
rtol=1.e-3,
|
| 134 |
+
atol=1.e-3)
|
| 135 |
+
self.assertAllClose(
|
| 136 |
+
np.array([1.3862, 1.9401]), np_loss, rtol=1.e-3, atol=1.e-3)
|
| 137 |
+
|
| 138 |
+
# TODO(b/123860949): The values are constant folded for XLA, so placeholders
|
| 139 |
+
# are needed.
|
| 140 |
+
@test_util.run_deprecated_v1
|
| 141 |
+
def _testLabelsBroadcast(self, uniform_labels_gradient):
|
| 142 |
+
labels = np.array([[0., 0., 0., 1.]]).astype(np.float16)
|
| 143 |
+
logits = np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16)
|
| 144 |
+
self._testXent2D(labels, logits, with_placeholders=True)
|
| 145 |
+
labels = np.array([[1.]]).astype(np.float16)
|
| 146 |
+
logits = np.array([[1.], [2.]]).astype(np.float16)
|
| 147 |
+
self._testXent2D(labels, logits, with_placeholders=True)
|
| 148 |
+
labels = np.array([[0.], [2.], [0.25]]).astype(np.float16)
|
| 149 |
+
logits = np.array([[1., 1., 1., 1.], [1., 2., 3., 4.],
|
| 150 |
+
[1., 2., 3., 4.]]).astype(np.float16)
|
| 151 |
+
self._testXent2D(
|
| 152 |
+
labels,
|
| 153 |
+
logits,
|
| 154 |
+
with_placeholders=True,
|
| 155 |
+
expected_gradient=uniform_labels_gradient)
|
| 156 |
+
|
| 157 |
+
def testLabelsBroadcast(self):
|
| 158 |
+
"""This method is structured to be easily overridden by a child class."""
|
| 159 |
+
self._testLabelsBroadcast(uniform_labels_gradient=[[
|
| 160 |
+
0.25, 0.25, 0.25, 0.25
|
| 161 |
+
], [-1.968, -1.913, -1.763, -1.355], [-0.218, -0.163, -0.013, 0.394]])
|
| 162 |
+
|
| 163 |
+
@test_util.run_deprecated_v1
|
| 164 |
+
def testShapeMismatch(self):
|
| 165 |
+
with self.cached_session():
|
| 166 |
+
with self.assertRaises(ValueError):
|
| 167 |
+
self._opFwdBwd(
|
| 168 |
+
labels=[[0., 1., 0.], [1., 0., 0.]], logits=[[0., 1.], [2., 3.]])
|
| 169 |
+
|
| 170 |
+
def testHalf(self):
|
| 171 |
+
labels = np.array([[0., 0., 0., 1.], [0., .5, .5, 0.]]).astype(np.float16)
|
| 172 |
+
logits = np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16)
|
| 173 |
+
self._testXent2D(labels, logits)
|
| 174 |
+
|
| 175 |
+
def testBfloat16(self):
|
| 176 |
+
labels = np.array([[0., 0., 0., 1.],
|
| 177 |
+
[0., .5, .5, 0.]]).astype(dtypes.bfloat16.as_numpy_dtype)
|
| 178 |
+
logits = np.array([[1., 1., 1., 1.],
|
| 179 |
+
[1., 2., 3., 4.]]).astype(dtypes.bfloat16.as_numpy_dtype)
|
| 180 |
+
self._testXent2D(labels, logits)
|
| 181 |
+
|
| 182 |
+
def testFloat(self):
|
| 183 |
+
labels = np.array([[0., 0., 0., 1.], [0., .5, .5, 0.]]).astype(np.float32)
|
| 184 |
+
logits = np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32)
|
| 185 |
+
self._testXent2D(labels, logits)
|
| 186 |
+
|
| 187 |
+
def testDouble(self):
|
| 188 |
+
labels = np.array([[0., 0., 0., 1.], [0., .5, .5, 0.]]).astype(np.float64)
|
| 189 |
+
logits = np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float64)
|
| 190 |
+
self._testXent2D(labels, logits)
|
| 191 |
+
|
| 192 |
+
@test_util.run_deprecated_v1
|
| 193 |
+
def testGradient(self):
|
| 194 |
+
with self.cached_session() as sess:
|
| 195 |
+
labels = constant_op.constant(
|
| 196 |
+
[0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.5],
|
| 197 |
+
shape=[3, 4],
|
| 198 |
+
dtype=dtypes.float64,
|
| 199 |
+
name="labels")
|
| 200 |
+
logits = constant_op.constant(
|
| 201 |
+
[0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
|
| 202 |
+
shape=[3, 4],
|
| 203 |
+
dtype=dtypes.float64,
|
| 204 |
+
name="logits")
|
| 205 |
+
x = nn_ops.softmax_cross_entropy_with_logits(
|
| 206 |
+
labels=labels, logits=logits, name="xent")
|
| 207 |
+
err = gradient_checker.compute_gradient_error(logits, [3, 4], x, [3])
|
| 208 |
+
|
| 209 |
+
# Check that no extra computation gets performed. When only the first
|
| 210 |
+
# derivative is requested, the second derivative must not be computed.
|
| 211 |
+
# So when there is no second derivative, there is no `BatchMatMul` op
|
| 212 |
+
# in the graph.
|
| 213 |
+
op_names = [
|
| 214 |
+
op.op_def.name for op in sess.graph.get_operations() if op.op_def
|
| 215 |
+
]
|
| 216 |
+
self.assertNotIn("BatchMatMul", op_names)
|
| 217 |
+
self.assertNotIn("BatchMatMulV2", op_names)
|
| 218 |
+
|
| 219 |
+
self.assertLess(err, 5e-8)
|
| 220 |
+
|
| 221 |
+
@test_util.run_deprecated_v1
|
| 222 |
+
def testGradientLabelWithV2(self):
|
| 223 |
+
with self.cached_session():
|
| 224 |
+
labels = constant_op.constant(
|
| 225 |
+
[0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.5],
|
| 226 |
+
shape=[3, 4],
|
| 227 |
+
dtype=dtypes.float64,
|
| 228 |
+
name="labels")
|
| 229 |
+
logits = constant_op.constant(
|
| 230 |
+
[0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
|
| 231 |
+
shape=[3, 4],
|
| 232 |
+
dtype=dtypes.float64,
|
| 233 |
+
name="logits")
|
| 234 |
+
x = nn_ops.softmax_cross_entropy_with_logits_v2(
|
| 235 |
+
labels=labels, logits=logits, name="xent")
|
| 236 |
+
err = gradient_checker.compute_gradient_error(labels, [3, 4], x, [3])
|
| 237 |
+
|
| 238 |
+
self.assertLess(err, 5e-8)
|
| 239 |
+
|
| 240 |
+
@test_util.run_deprecated_v1
|
| 241 |
+
def testSecondGradient(self):
|
| 242 |
+
with self.cached_session() as sess:
|
| 243 |
+
labels = constant_op.constant([
|
| 244 |
+
0.0, 0.0, 1.0 / 3, 0.0, 1.0 / 3, 0.0, 0.0, 0.0, 0.0, 0.5 / 3, 0.0,
|
| 245 |
+
0.5 / 3
|
| 246 |
+
],
|
| 247 |
+
shape=[12],
|
| 248 |
+
dtype=dtypes.float64,
|
| 249 |
+
name="labels")
|
| 250 |
+
logits = constant_op.constant(
|
| 251 |
+
[0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
|
| 252 |
+
shape=[12],
|
| 253 |
+
dtype=dtypes.float64,
|
| 254 |
+
name="logits")
|
| 255 |
+
x = nn_ops.softmax_cross_entropy_with_logits(
|
| 256 |
+
labels=labels, logits=logits, name="xent")
|
| 257 |
+
loss = math_ops.reduce_sum(x)
|
| 258 |
+
|
| 259 |
+
gradients = gradients_impl.gradients(loss, [logits])[0]
|
| 260 |
+
|
| 261 |
+
err = gradient_checker.compute_gradient_error(logits, [12], gradients,
|
| 262 |
+
[12])
|
| 263 |
+
|
| 264 |
+
if not config.is_op_determinism_enabled():
|
| 265 |
+
# Check how second derivative is calculated.
|
| 266 |
+
# (it is equivalent to a `BatchMatMul` op being in the graph because of
|
| 267 |
+
# the implementation in SoftmaxCrossEntropyWithLogitsGrad)
|
| 268 |
+
op_names = [
|
| 269 |
+
op.op_def.name for op in sess.graph.get_operations() if op.op_def
|
| 270 |
+
]
|
| 271 |
+
self.assertIn("BatchMatMulV2", op_names)
|
| 272 |
+
|
| 273 |
+
self.assertLess(err, 5e-8)
|
| 274 |
+
|
| 275 |
+
def test3D(self):
|
| 276 |
+
labels = np.array([[[0., 0., 0., 1.], [0., 1., 0., 0.]],
|
| 277 |
+
[[0., 0.5, 0.5, 0.], [0.5, 0.5, 0., 0.]],
|
| 278 |
+
[[0., 1., 0., 0.], [0., 0., 1., 0.]]]).astype(np.float32)
|
| 279 |
+
logits = np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
|
| 280 |
+
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
|
| 281 |
+
[[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32)
|
| 282 |
+
self._testXentND(labels, logits, dim=0)
|
| 283 |
+
self._testXentND(labels, logits, dim=1)
|
| 284 |
+
self._testXentND(labels, logits, dim=-1)
|
| 285 |
+
|
| 286 |
+
def testZeroDimension(self):
|
| 287 |
+
labels = np.zeros([0, 2, 4]).astype(np.float32)
|
| 288 |
+
logits = np.zeros([0, 2, 4]).astype(np.float32)
|
| 289 |
+
np_loss, _ = self._npXent(labels=labels, logits=logits)
|
| 290 |
+
loss = nn_ops.softmax_cross_entropy_with_logits(
|
| 291 |
+
labels=labels, logits=logits)
|
| 292 |
+
tf_loss = self.evaluate(loss)
|
| 293 |
+
self.assertAllEqual(np_loss, tf_loss)
|
videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/random/__init__.py
ADDED
|
File without changes
|
videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/random/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (193 Bytes). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/random/__pycache__/util.cpython-310.pyc
ADDED
|
Binary file (4.22 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/random/util.py
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Utilities for testing random variables."""
|
| 16 |
+
|
| 17 |
+
import math
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
|
| 21 |
+
from tensorflow.python.ops.distributions import special_math
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def test_moment_matching(
|
| 25 |
+
samples,
|
| 26 |
+
number_moments,
|
| 27 |
+
dist,
|
| 28 |
+
stride=0):
|
| 29 |
+
"""Return z-test scores for sample moments to match analytic moments.
|
| 30 |
+
|
| 31 |
+
Given `samples`, check that the first sample `number_moments` match
|
| 32 |
+
the given `dist` moments by doing a z-test.
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
samples: Samples from target distribution.
|
| 36 |
+
number_moments: Python `int` describing how many sample moments to check.
|
| 37 |
+
dist: SciPy distribution object that provides analytic moments.
|
| 38 |
+
stride: Distance between samples to check for statistical properties.
|
| 39 |
+
A stride of 0 means to use all samples, while other strides test for
|
| 40 |
+
spatial correlation.
|
| 41 |
+
Returns:
|
| 42 |
+
Array of z_test scores.
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
sample_moments = []
|
| 46 |
+
expected_moments = []
|
| 47 |
+
variance_sample_moments = []
|
| 48 |
+
for i in range(1, number_moments + 1):
|
| 49 |
+
if len(samples.shape) == 2:
|
| 50 |
+
strided_range = samples.flat[::(i - 1) * stride + 1]
|
| 51 |
+
else:
|
| 52 |
+
strided_range = samples[::(i - 1) * stride + 1, ...]
|
| 53 |
+
sample_moments.append(np.mean(strided_range**i, axis=0))
|
| 54 |
+
expected_moments.append(dist.moment(i))
|
| 55 |
+
variance_sample_moments.append(
|
| 56 |
+
(dist.moment(2 * i) - dist.moment(i) ** 2) / len(strided_range))
|
| 57 |
+
|
| 58 |
+
z_test_scores = []
|
| 59 |
+
for i in range(1, number_moments + 1):
|
| 60 |
+
# Assume every operation has a small numerical error.
|
| 61 |
+
# It takes i multiplications to calculate one i-th moment.
|
| 62 |
+
total_variance = (
|
| 63 |
+
variance_sample_moments[i - 1] +
|
| 64 |
+
i * np.finfo(samples.dtype).eps)
|
| 65 |
+
tiny = np.finfo(samples.dtype).tiny
|
| 66 |
+
assert np.all(total_variance > 0)
|
| 67 |
+
total_variance = np.where(total_variance < tiny, tiny, total_variance)
|
| 68 |
+
# z_test is approximately a unit normal distribution.
|
| 69 |
+
z_test_scores.append(abs(
|
| 70 |
+
(sample_moments[i - 1] - expected_moments[i - 1]) / np.sqrt(
|
| 71 |
+
total_variance)))
|
| 72 |
+
return z_test_scores
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def chi_squared(x, bins):
|
| 76 |
+
"""Pearson's Chi-squared test."""
|
| 77 |
+
x = np.ravel(x)
|
| 78 |
+
n = len(x)
|
| 79 |
+
histogram, _ = np.histogram(x, bins=bins, range=(0, 1))
|
| 80 |
+
expected = n / float(bins)
|
| 81 |
+
return np.sum(np.square(histogram - expected) / expected)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def normal_cdf(x):
|
| 85 |
+
"""Cumulative distribution function for a standard normal distribution."""
|
| 86 |
+
return 0.5 + 0.5 * np.vectorize(math.erf)(x / math.sqrt(2))
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def anderson_darling(x):
|
| 90 |
+
"""Anderson-Darling test for a standard normal distribution."""
|
| 91 |
+
x = np.sort(np.ravel(x))
|
| 92 |
+
n = len(x)
|
| 93 |
+
i = np.linspace(1, n, n)
|
| 94 |
+
z = np.sum((2 * i - 1) * np.log(normal_cdf(x)) +
|
| 95 |
+
(2 * (n - i) + 1) * np.log(1 - normal_cdf(x)))
|
| 96 |
+
return -n - z / n
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def test_truncated_normal(assert_equal,
|
| 100 |
+
assert_all_close,
|
| 101 |
+
n,
|
| 102 |
+
y,
|
| 103 |
+
means=None,
|
| 104 |
+
stddevs=None,
|
| 105 |
+
minvals=None,
|
| 106 |
+
maxvals=None,
|
| 107 |
+
mean_atol=5e-4,
|
| 108 |
+
median_atol=8e-4,
|
| 109 |
+
variance_rtol=1e-3):
|
| 110 |
+
"""Tests truncated normal distribution's statistics."""
|
| 111 |
+
def _normal_cdf(x):
|
| 112 |
+
return .5 * math.erfc(-x / math.sqrt(2))
|
| 113 |
+
|
| 114 |
+
def normal_pdf(x):
|
| 115 |
+
return math.exp(-(x**2) / 2.) / math.sqrt(2 * math.pi)
|
| 116 |
+
|
| 117 |
+
def probit(x):
|
| 118 |
+
return special_math.ndtri(x)
|
| 119 |
+
|
| 120 |
+
a = -2.
|
| 121 |
+
b = 2.
|
| 122 |
+
mu = 0.
|
| 123 |
+
sigma = 1.
|
| 124 |
+
|
| 125 |
+
if minvals is not None:
|
| 126 |
+
a = minvals
|
| 127 |
+
|
| 128 |
+
if maxvals is not None:
|
| 129 |
+
b = maxvals
|
| 130 |
+
|
| 131 |
+
if means is not None:
|
| 132 |
+
mu = means
|
| 133 |
+
|
| 134 |
+
if stddevs is not None:
|
| 135 |
+
sigma = stddevs
|
| 136 |
+
|
| 137 |
+
alpha = (a - mu) / sigma
|
| 138 |
+
beta = (b - mu) / sigma
|
| 139 |
+
z = _normal_cdf(beta) - _normal_cdf(alpha)
|
| 140 |
+
|
| 141 |
+
assert_equal((y >= a).sum(), n)
|
| 142 |
+
assert_equal((y <= b).sum(), n)
|
| 143 |
+
|
| 144 |
+
# For more information on these calculations, see:
|
| 145 |
+
# Burkardt, John. "The Truncated Normal Distribution".
|
| 146 |
+
# Department of Scientific Computing website. Florida State University.
|
| 147 |
+
expected_mean = mu + (normal_pdf(alpha) - normal_pdf(beta)) / z * sigma
|
| 148 |
+
y = y.astype(float)
|
| 149 |
+
actual_mean = np.mean(y)
|
| 150 |
+
assert_all_close(actual_mean, expected_mean, atol=mean_atol)
|
| 151 |
+
|
| 152 |
+
expected_median = mu + probit(
|
| 153 |
+
(_normal_cdf(alpha) + _normal_cdf(beta)) / 2.) * sigma
|
| 154 |
+
actual_median = np.median(y)
|
| 155 |
+
assert_all_close(actual_median, expected_median, atol=median_atol)
|
| 156 |
+
|
| 157 |
+
expected_variance = sigma**2 * (1 + (
|
| 158 |
+
(alpha * normal_pdf(alpha) - beta * normal_pdf(beta)) / z) - (
|
| 159 |
+
(normal_pdf(alpha) - normal_pdf(beta)) / z)**2)
|
| 160 |
+
actual_variance = np.var(y)
|
| 161 |
+
assert_all_close(
|
| 162 |
+
actual_variance,
|
| 163 |
+
expected_variance,
|
| 164 |
+
rtol=variance_rtol)
|
videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/signal/__init__.py
ADDED
|
File without changes
|
videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/signal/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (193 Bytes). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/signal/__pycache__/test_util.cpython-310.pyc
ADDED
|
Binary file (3.3 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/signal/test_util.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Test utilities for tf.signal."""
|
| 16 |
+
|
| 17 |
+
from tensorflow.core.protobuf import config_pb2
|
| 18 |
+
from tensorflow.lite.python import interpreter
|
| 19 |
+
from tensorflow.lite.python import lite
|
| 20 |
+
from tensorflow.python.eager import def_function
|
| 21 |
+
from tensorflow.python.grappler import tf_optimizer
|
| 22 |
+
from tensorflow.python.training import saver
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def grappler_optimize(graph, fetches=None, config_proto=None):
|
| 26 |
+
"""Tries to optimize the provided graph using grappler.
|
| 27 |
+
|
| 28 |
+
Args:
|
| 29 |
+
graph: A `tf.Graph` instance containing the graph to optimize.
|
| 30 |
+
fetches: An optional list of `Tensor`s to fetch (i.e. not optimize away).
|
| 31 |
+
Grappler uses the 'train_op' collection to look for fetches, so if not
|
| 32 |
+
provided this collection should be non-empty.
|
| 33 |
+
config_proto: An optional `tf.compat.v1.ConfigProto` to use when rewriting
|
| 34 |
+
the graph.
|
| 35 |
+
|
| 36 |
+
Returns:
|
| 37 |
+
A `tf.compat.v1.GraphDef` containing the rewritten graph.
|
| 38 |
+
"""
|
| 39 |
+
if config_proto is None:
|
| 40 |
+
config_proto = config_pb2.ConfigProto()
|
| 41 |
+
config_proto.graph_options.rewrite_options.min_graph_nodes = -1
|
| 42 |
+
if fetches is not None:
|
| 43 |
+
for fetch in fetches:
|
| 44 |
+
graph.add_to_collection('train_op', fetch)
|
| 45 |
+
metagraph = saver.export_meta_graph(graph_def=graph.as_graph_def())
|
| 46 |
+
return tf_optimizer.OptimizeGraph(config_proto, metagraph)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def tflite_convert(fn, input_templates):
|
| 50 |
+
"""Converts the provided fn to tf.lite model.
|
| 51 |
+
|
| 52 |
+
Args:
|
| 53 |
+
fn: A callable that expects a list of inputs like input_templates that
|
| 54 |
+
returns a tensor or structure of tensors.
|
| 55 |
+
input_templates: A list of Tensors, ndarrays or TensorSpecs describing the
|
| 56 |
+
inputs that fn expects. The actual values of the Tensors or ndarrays are
|
| 57 |
+
unused.
|
| 58 |
+
|
| 59 |
+
Returns:
|
| 60 |
+
The serialized tf.lite model.
|
| 61 |
+
"""
|
| 62 |
+
fn = def_function.function(fn)
|
| 63 |
+
concrete_func = fn.get_concrete_function(*input_templates)
|
| 64 |
+
converter = lite.TFLiteConverterV2([concrete_func])
|
| 65 |
+
return converter.convert()
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def evaluate_tflite_model(tflite_model, input_ndarrays):
|
| 69 |
+
"""Evaluates the provided tf.lite model with the given input ndarrays.
|
| 70 |
+
|
| 71 |
+
Args:
|
| 72 |
+
tflite_model: bytes. The serialized tf.lite model.
|
| 73 |
+
input_ndarrays: A list of NumPy arrays to feed as input to the model.
|
| 74 |
+
|
| 75 |
+
Returns:
|
| 76 |
+
A list of ndarrays produced by the model.
|
| 77 |
+
|
| 78 |
+
Raises:
|
| 79 |
+
ValueError: If the number of input arrays does not match the number of
|
| 80 |
+
inputs the model expects.
|
| 81 |
+
"""
|
| 82 |
+
the_interpreter = interpreter.Interpreter(model_content=tflite_model)
|
| 83 |
+
the_interpreter.allocate_tensors()
|
| 84 |
+
|
| 85 |
+
input_details = the_interpreter.get_input_details()
|
| 86 |
+
output_details = the_interpreter.get_output_details()
|
| 87 |
+
|
| 88 |
+
if len(input_details) != len(input_ndarrays):
|
| 89 |
+
raise ValueError('Wrong number of inputs: provided=%s, '
|
| 90 |
+
'input_details=%s output_details=%s' % (
|
| 91 |
+
input_ndarrays, input_details, output_details))
|
| 92 |
+
for input_tensor, data in zip(input_details, input_ndarrays):
|
| 93 |
+
the_interpreter.set_tensor(input_tensor['index'], data)
|
| 94 |
+
the_interpreter.invoke()
|
| 95 |
+
return [the_interpreter.get_tensor(details['index'])
|
| 96 |
+
for details in output_details]
|
videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/sparse_ops/__init__.py
ADDED
|
File without changes
|
videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/sparse_ops/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (197 Bytes). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/sparse_ops/__pycache__/sparse_xent_op_test_base.cpython-310.pyc
ADDED
|
Binary file (11.5 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/kernel_tests/sparse_ops/sparse_xent_op_test_base.py
ADDED
|
@@ -0,0 +1,318 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2015-2021 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for SparseSoftmaxCrossEntropyWithLogits op."""
|
| 16 |
+
|
| 17 |
+
import numpy as np
|
| 18 |
+
|
| 19 |
+
from tensorflow.python.eager import backprop as backprop_lib
|
| 20 |
+
from tensorflow.python.eager import context
|
| 21 |
+
from tensorflow.python.framework import config
|
| 22 |
+
from tensorflow.python.framework import constant_op
|
| 23 |
+
from tensorflow.python.framework import dtypes
|
| 24 |
+
from tensorflow.python.framework import errors_impl
|
| 25 |
+
from tensorflow.python.framework import ops as ops_lib
|
| 26 |
+
from tensorflow.python.framework import test_util
|
| 27 |
+
from tensorflow.python.ops import array_ops
|
| 28 |
+
from tensorflow.python.ops import gradient_checker_v2
|
| 29 |
+
from tensorflow.python.ops import nn_ops
|
| 30 |
+
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
|
| 31 |
+
from tensorflow.python.platform import test
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class SparseXentOpTestBase(test.TestCase):
|
| 35 |
+
|
| 36 |
+
def _opFwdBwd(self, labels, logits):
|
| 37 |
+
"""Runs the op-under-test both forwards and backwards"""
|
| 38 |
+
logits = ops_lib.convert_to_tensor(logits) # needed for the gradient tape
|
| 39 |
+
with backprop_lib.GradientTape() as tape:
|
| 40 |
+
tape.watch(logits)
|
| 41 |
+
loss = nn_ops.sparse_softmax_cross_entropy_with_logits_v2(
|
| 42 |
+
labels=labels, logits=logits)
|
| 43 |
+
return loss, tape.gradient(loss, logits)
|
| 44 |
+
|
| 45 |
+
def _npXent(self, labels, logits):
|
| 46 |
+
logits = np.reshape(logits, [-1, logits.shape[-1]])
|
| 47 |
+
labels = np.reshape(labels, [-1])
|
| 48 |
+
batch_dim = 0
|
| 49 |
+
class_dim = 1
|
| 50 |
+
batch_size = logits.shape[batch_dim]
|
| 51 |
+
e = np.exp(logits -
|
| 52 |
+
np.reshape(np.amax(logits, axis=class_dim), [batch_size, 1]))
|
| 53 |
+
probs = e / np.reshape(np.sum(e, axis=class_dim), [batch_size, 1])
|
| 54 |
+
labels_mat = np.zeros_like(probs).astype(probs.dtype)
|
| 55 |
+
labels_mat[np.arange(batch_size), labels] = 1.0
|
| 56 |
+
gradient = (probs - labels_mat)
|
| 57 |
+
loss = -np.sum(labels_mat * np.log(probs + 1.0e-20), axis=1)
|
| 58 |
+
return loss, gradient
|
| 59 |
+
|
| 60 |
+
def _testXent(self, np_labels, np_logits):
|
| 61 |
+
np_loss, np_gradient = self._npXent(labels=np_labels, logits=np_logits)
|
| 62 |
+
tf_loss, tf_gradient = self._opFwdBwd(labels=np_labels, logits=np_logits)
|
| 63 |
+
self.assertAllCloseAccordingToType(np_loss, tf_loss)
|
| 64 |
+
self.assertAllCloseAccordingToType(np_gradient, tf_gradient)
|
| 65 |
+
|
| 66 |
+
def testSingleClass(self):
|
| 67 |
+
for label_dtype in np.int32, np.int64:
|
| 68 |
+
tf_loss, tf_gradient = self._opFwdBwd(
|
| 69 |
+
labels=np.array([0, 0, 0]).astype(label_dtype),
|
| 70 |
+
logits=np.array([[1.], [-1.], [0.]]).astype(np.float32))
|
| 71 |
+
self.assertAllClose([0.0, 0.0, 0.0], tf_loss)
|
| 72 |
+
self.assertAllClose([[0.0], [0.0], [0.0]], tf_gradient)
|
| 73 |
+
|
| 74 |
+
@test_util.run_gpu_only
|
| 75 |
+
def _testInvalidLabelGPU(self, invalid_label_gradient=np.nan):
|
| 76 |
+
labels = [4, 3, 0, -1]
|
| 77 |
+
logits = [[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 2., 3., 4.],
|
| 78 |
+
[1., 2., 3., 4.]]
|
| 79 |
+
loss, gradient = self._opFwdBwd(labels=labels, logits=logits)
|
| 80 |
+
self.assertAllClose([np.nan, 1.3862, 3.4420, np.nan],
|
| 81 |
+
loss,
|
| 82 |
+
rtol=1e-3,
|
| 83 |
+
atol=1e-3)
|
| 84 |
+
self.assertAllClose(
|
| 85 |
+
[[invalid_label_gradient] * 4, [0.25, 0.25, 0.25, -0.75],
|
| 86 |
+
[-0.968, 0.087, 0.237, 0.6439], [invalid_label_gradient] * 4],
|
| 87 |
+
gradient,
|
| 88 |
+
rtol=1e-3,
|
| 89 |
+
atol=1e-3)
|
| 90 |
+
|
| 91 |
+
def testInvalidLabelGPU(self):
|
| 92 |
+
"""This method is structured to be easily overridden by a child class."""
|
| 93 |
+
self._testInvalidLabelGPU()
|
| 94 |
+
|
| 95 |
+
@test_util.run_in_graph_and_eager_modes(use_gpu=False)
|
| 96 |
+
@test_util.disable_xla("XLA cannot assert inside of a kernel.")
|
| 97 |
+
def _testInvalidLabelCPU(self, expected_regex="Received a label value of"):
|
| 98 |
+
labels = [4, 3, 0, -1]
|
| 99 |
+
logits = [[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 2., 3., 4.],
|
| 100 |
+
[1., 2., 3., 4.]]
|
| 101 |
+
with self.assertRaisesRegex(
|
| 102 |
+
(errors_impl.InvalidArgumentError, errors_impl.UnknownError),
|
| 103 |
+
expected_regex):
|
| 104 |
+
self.evaluate(
|
| 105 |
+
nn_ops.sparse_softmax_cross_entropy_with_logits_v2(
|
| 106 |
+
labels=labels, logits=logits))
|
| 107 |
+
|
| 108 |
+
def testInvalidLabelCPU(self):
|
| 109 |
+
"""This method is structured to be easily overridden by a child class."""
|
| 110 |
+
self._testInvalidLabelCPU()
|
| 111 |
+
|
| 112 |
+
def testNpXent(self):
|
| 113 |
+
# We create 2 batches of logits for testing.
|
| 114 |
+
# batch 0 is the boring uniform distribution: 1, 1, 1, 1, with target 3.
|
| 115 |
+
# batch 1 has a bit of difference: 1, 2, 3, 4, with target 0.
|
| 116 |
+
labels = [3, 0]
|
| 117 |
+
logits = [[1., 1., 1., 1.], [1., 2., 3., 4.]]
|
| 118 |
+
|
| 119 |
+
# For batch 0, we expect the uniform distribution: 0.25, 0.25, 0.25, 0.25
|
| 120 |
+
# With a hard target 3, the gradient is [0.25, 0.25, 0.25, -0.75]
|
| 121 |
+
# The loss for this batch is -log(0.25) = 1.386
|
| 122 |
+
#
|
| 123 |
+
# For batch 1, we have:
|
| 124 |
+
# exp(0) = 1
|
| 125 |
+
# exp(1) = 2.718
|
| 126 |
+
# exp(2) = 7.389
|
| 127 |
+
# exp(3) = 20.085
|
| 128 |
+
# SUM = 31.192
|
| 129 |
+
# So we have as probabilities:
|
| 130 |
+
# exp(0) / SUM = 0.032
|
| 131 |
+
# exp(1) / SUM = 0.087
|
| 132 |
+
# exp(2) / SUM = 0.237
|
| 133 |
+
# exp(3) / SUM = 0.644
|
| 134 |
+
# With a hard 1, the gradient is [0.032 - 1.0 = -0.968, 0.087, 0.237, 0.644]
|
| 135 |
+
# The loss for this batch is [1.0 * -log(0.25), 1.0 * -log(0.032)]
|
| 136 |
+
# = [1.3862, 3.4420]
|
| 137 |
+
np_loss, np_gradient = self._npXent(
|
| 138 |
+
labels=np.array(labels), logits=np.array(logits))
|
| 139 |
+
self.assertAllClose(
|
| 140 |
+
np.array([[0.25, 0.25, 0.25, -0.75], [-0.968, 0.087, 0.237, 0.6439]]),
|
| 141 |
+
np_gradient,
|
| 142 |
+
rtol=1.e-3,
|
| 143 |
+
atol=1.e-3)
|
| 144 |
+
self.assertAllClose(
|
| 145 |
+
np.array([1.3862, 3.4420]), np_loss, rtol=1.e-3, atol=1.e-3)
|
| 146 |
+
|
| 147 |
+
def testShapeMismatch(self):
|
| 148 |
+
with self.assertRaisesRegex(
|
| 149 |
+
ValueError, "`labels.shape.rank` must equal `logits.shape.rank - 1`"):
|
| 150 |
+
nn_ops.sparse_softmax_cross_entropy_with_logits_v2(
|
| 151 |
+
labels=[[0, 2]], logits=[[0., 1.], [2., 3.], [2., 3.]])
|
| 152 |
+
|
| 153 |
+
def testScalar(self):
|
| 154 |
+
with self.assertRaisesRegex(ValueError, "`logits` cannot be a scalar"):
|
| 155 |
+
nn_ops.sparse_softmax_cross_entropy_with_logits_v2(
|
| 156 |
+
labels=constant_op.constant(0), logits=constant_op.constant(1.0))
|
| 157 |
+
|
| 158 |
+
def _testLabelsPlaceholderScalar(self, expected_error_message):
|
| 159 |
+
with ops_lib.Graph().as_default(), self.session():
|
| 160 |
+
labels = array_ops.placeholder(np.int32)
|
| 161 |
+
y = nn_ops.sparse_softmax_cross_entropy_with_logits_v2(
|
| 162 |
+
labels=labels, logits=[[7.]])
|
| 163 |
+
with self.assertRaisesOpError(expected_error_message):
|
| 164 |
+
y.eval(feed_dict={labels: 0})
|
| 165 |
+
|
| 166 |
+
def testLabelsPlaceholderScalar(self):
|
| 167 |
+
"""This method is structured to be easily overridden by a child class."""
|
| 168 |
+
self._testLabelsPlaceholderScalar(
|
| 169 |
+
expected_error_message="labels must be 1-D")
|
| 170 |
+
|
| 171 |
+
def testVector(self):
|
| 172 |
+
loss = nn_ops.sparse_softmax_cross_entropy_with_logits_v2(
|
| 173 |
+
labels=constant_op.constant(0), logits=constant_op.constant([1.0]))
|
| 174 |
+
self.assertAllClose(0.0, loss)
|
| 175 |
+
|
| 176 |
+
def testFloat(self):
|
| 177 |
+
for label_dtype in np.int32, np.int64:
|
| 178 |
+
self._testXent(
|
| 179 |
+
np_labels=np.array([3, 0]).astype(label_dtype),
|
| 180 |
+
np_logits=np.array([[1., 1., 1., 1.], [1., 2., 3.,
|
| 181 |
+
4.]]).astype(np.float32))
|
| 182 |
+
|
| 183 |
+
def testDouble(self):
|
| 184 |
+
for label_dtype in np.int32, np.int64:
|
| 185 |
+
self._testXent(
|
| 186 |
+
np_labels=np.array([0, 3]).astype(label_dtype),
|
| 187 |
+
np_logits=np.array([[1., 1., 1., 1.], [1., 2., 3.,
|
| 188 |
+
4.]]).astype(np.float64))
|
| 189 |
+
|
| 190 |
+
def testHalf(self):
|
| 191 |
+
for label_dtype in np.int32, np.int64:
|
| 192 |
+
self._testXent(
|
| 193 |
+
np_labels=np.array([3, 0]).astype(label_dtype),
|
| 194 |
+
np_logits=np.array([[1., 1., 1., 1.], [1., 2., 3.,
|
| 195 |
+
4.]]).astype(np.float16))
|
| 196 |
+
|
| 197 |
+
def testBfloat16(self):
|
| 198 |
+
for label_dtype in np.int32, np.int64:
|
| 199 |
+
self._testXent(
|
| 200 |
+
np_labels=np.array([3, 0]).astype(label_dtype),
|
| 201 |
+
np_logits=np.array([[1., 1., 1., 1.],
|
| 202 |
+
[1., 2., 3.,
|
| 203 |
+
4.]]).astype(dtypes.bfloat16.as_numpy_dtype))
|
| 204 |
+
|
| 205 |
+
def testEmpty(self):
|
| 206 |
+
self._testXent(
|
| 207 |
+
np_labels=np.zeros((0,), dtype=np.int32), np_logits=np.zeros((0, 3)))
|
| 208 |
+
|
| 209 |
+
@test_util.run_in_graph_and_eager_modes()
|
| 210 |
+
def testGradient(self):
|
| 211 |
+
with self.session() as sess:
|
| 212 |
+
labels = constant_op.constant([3, 0, 1], name="labels")
|
| 213 |
+
logits = constant_op.constant(
|
| 214 |
+
[0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
|
| 215 |
+
shape=[3, 4],
|
| 216 |
+
dtype=dtypes.float64,
|
| 217 |
+
name="logits")
|
| 218 |
+
|
| 219 |
+
def xent(logits):
|
| 220 |
+
# gradient_checker_v2.computee_gradient doesn't take int32/int64.
|
| 221 |
+
# labels must be of type int32/int64, so passing them separately here.
|
| 222 |
+
return nn_ops.sparse_softmax_cross_entropy_with_logits_v2(
|
| 223 |
+
labels=labels, logits=logits, name="xent")
|
| 224 |
+
|
| 225 |
+
analytical, numerical = gradient_checker_v2.compute_gradient(
|
| 226 |
+
xent, [logits])
|
| 227 |
+
|
| 228 |
+
if not context.executing_eagerly():
|
| 229 |
+
# Check that no extra computation performed. When only first derivative
|
| 230 |
+
# is requested, second derivative must not be computed. So when there is
|
| 231 |
+
# no second derivative, there is no `BatchMatMul` op in the graph.
|
| 232 |
+
op_names = [
|
| 233 |
+
op.op_def.name for op in sess.graph.get_operations() if op.op_def
|
| 234 |
+
]
|
| 235 |
+
self.assertNotIn("BatchMatMul", op_names)
|
| 236 |
+
self.assertNotIn("BatchMatMulV2", op_names)
|
| 237 |
+
|
| 238 |
+
tol = 5e-8
|
| 239 |
+
self.assertAllClose(analytical, numerical, atol=tol, rtol=tol)
|
| 240 |
+
|
| 241 |
+
@test_util.run_in_graph_and_eager_modes()
|
| 242 |
+
def testSecondGradient(self):
|
| 243 |
+
with self.session() as sess:
|
| 244 |
+
labels = constant_op.constant([3, 0, 1], name="labels")
|
| 245 |
+
logits = constant_op.constant(
|
| 246 |
+
[0.3, 0.4, 0.1, 1.2, 0.1, 1.9, 0.1, 0.7, 0.8, 0.2, 1.3, 1.3],
|
| 247 |
+
shape=[3, 4],
|
| 248 |
+
dtype=dtypes.float64,
|
| 249 |
+
name="logits")
|
| 250 |
+
|
| 251 |
+
def xent_grad(logits):
|
| 252 |
+
with backprop_lib.GradientTape() as tape:
|
| 253 |
+
tape.watch(logits)
|
| 254 |
+
return tape.gradient(
|
| 255 |
+
nn_ops.sparse_softmax_cross_entropy_with_logits_v2(
|
| 256 |
+
labels=labels, logits=logits, name="xent"), [logits])[0]
|
| 257 |
+
|
| 258 |
+
analytical, numerical = gradient_checker_v2.compute_gradient(
|
| 259 |
+
xent_grad, [logits])
|
| 260 |
+
|
| 261 |
+
if (not context.executing_eagerly() and
|
| 262 |
+
not config.is_op_determinism_enabled()):
|
| 263 |
+
# Check that second derivative is calculated.
|
| 264 |
+
# (it is equivalent to being `BatchMatMul` op in the graph because of
|
| 265 |
+
# implementation of xentropy grad)
|
| 266 |
+
op_names = [
|
| 267 |
+
op.op_def.name for op in sess.graph.get_operations() if op.op_def
|
| 268 |
+
]
|
| 269 |
+
self.assertIn("BatchMatMulV2", op_names)
|
| 270 |
+
|
| 271 |
+
tol = 5e-8
|
| 272 |
+
self.assertAllClose(analytical, numerical, atol=tol, rtol=tol)
|
| 273 |
+
|
| 274 |
+
@test_util.run_in_graph_and_eager_modes()
|
| 275 |
+
def _testHighDim(self, labels, logits):
|
| 276 |
+
np_loss, np_gradient = self._npXent(
|
| 277 |
+
labels=np.array(labels), logits=np.array(logits))
|
| 278 |
+
# manually reshape loss
|
| 279 |
+
np_loss = np.reshape(np_loss, np.array(labels).shape)
|
| 280 |
+
tf_loss = nn_ops.sparse_softmax_cross_entropy_with_logits_v2(
|
| 281 |
+
labels=labels, logits=logits)
|
| 282 |
+
with backprop_lib.GradientTape() as tape:
|
| 283 |
+
logits = constant_op.constant(logits)
|
| 284 |
+
tape.watch(logits)
|
| 285 |
+
tf_gradient = tape.gradient(
|
| 286 |
+
nn_ops.sparse_softmax_cross_entropy_with_logits_v2(
|
| 287 |
+
labels=labels, logits=logits), [logits])[0]
|
| 288 |
+
tf_gradient = array_ops.reshape(tf_gradient, np_gradient.shape)
|
| 289 |
+
|
| 290 |
+
self.assertAllCloseAccordingToType(np_loss, tf_loss)
|
| 291 |
+
self.assertAllCloseAccordingToType(np_gradient, tf_gradient)
|
| 292 |
+
|
| 293 |
+
def testHighDim(self):
|
| 294 |
+
labels = [[3], [0]]
|
| 295 |
+
logits = [[[1., 1., 1., 1.]], [[1., 2., 3., 4.]]]
|
| 296 |
+
self._testHighDim(labels, logits)
|
| 297 |
+
|
| 298 |
+
def testHighDim2(self):
|
| 299 |
+
labels = [[3, 2], [0, 3]]
|
| 300 |
+
logits = [[[1., 1., 1., 1.], [2., 2., 2., 2.]],
|
| 301 |
+
[[1., 2., 3., 4.], [5., 6., 7., 8.]]]
|
| 302 |
+
self._testHighDim(labels, logits)
|
| 303 |
+
|
| 304 |
+
def _testScalarHandling(self, expected_regex):
|
| 305 |
+
with ops_lib.Graph().as_default(), self.session(use_gpu=False) as sess:
|
| 306 |
+
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
|
| 307 |
+
expected_regex):
|
| 308 |
+
labels = array_ops.placeholder(dtypes.int32, shape=[None, 1])
|
| 309 |
+
logits = array_ops.placeholder(dtypes.float32, shape=[None, 3])
|
| 310 |
+
ce = nn_ops.sparse_softmax_cross_entropy_with_logits_v2(
|
| 311 |
+
labels=array_ops.squeeze(labels), logits=logits)
|
| 312 |
+
labels_v2 = np.zeros((1, 1), dtype=np.int32)
|
| 313 |
+
logits_v2 = np.random.randn(1, 3)
|
| 314 |
+
sess.run([ce], feed_dict={labels: labels_v2, logits: logits_v2})
|
| 315 |
+
|
| 316 |
+
def testScalarHandling(self):
|
| 317 |
+
"""This method is structured to be easily overridden by a child class."""
|
| 318 |
+
self._testScalarHandling(expected_regex=".*labels must be 1-D.*")
|
videochat2/lib/python3.10/site-packages/tensorflow/python/profiler/internal/_pywrap_traceme.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ef8fd8dd6ce0ba707eda4b604b05848c1c0f3d1b3541d094c53a1cb9a6e5d5ac
|
| 3 |
+
size 220304
|
videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (297 Bytes). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/__pycache__/api.cpython-310.pyc
ADDED
|
Binary file (752 Bytes). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/__pycache__/bfloat16.cpython-310.pyc
ADDED
|
Binary file (2.39 kB). View file
|
|
|