diff --git a/.gitattributes b/.gitattributes index 218f87aabe9997857a0e671f344ac168fb0aea08..bfdc59e57505a9547ab87a342d566e28ee3f2f53 100644 --- a/.gitattributes +++ b/.gitattributes @@ -840,3 +840,5 @@ videochat2/lib/python3.10/site-packages/tensorflow/lite/python/metrics/_pywrap_t openflamingo/lib/python3.10/site-packages/nvidia/nccl/lib/libnccl.so.2 filter=lfs diff=lfs merge=lfs -text openflamingo/lib/python3.10/site-packages/nvidia/cusparse/lib/libcusparse.so.11 filter=lfs diff=lfs merge=lfs -text videochat2/lib/python3.10/site-packages/tensorflow/python/platform/_pywrap_cpu_feature_guard.so filter=lfs diff=lfs merge=lfs -text +videochat2/lib/python3.10/site-packages/tensorflow/python/framework/__pycache__/test_util.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +videochat2/lib/python3.10/site-packages/tensorflow/python/framework/__pycache__/test_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text diff --git a/llava_next/share/terminfo/a/aaa+dec b/llava_next/share/terminfo/a/aaa+dec new file mode 100644 index 0000000000000000000000000000000000000000..0316cdf46fced10a66c95bac5ca54b4b47f9ec46 Binary files /dev/null and b/llava_next/share/terminfo/a/aaa+dec differ diff --git a/llava_next/share/terminfo/a/aaa-20 b/llava_next/share/terminfo/a/aaa-20 new file mode 100644 index 0000000000000000000000000000000000000000..c3f1590c48028fba01b9cde3f44491b4da3b91b9 Binary files /dev/null and b/llava_next/share/terminfo/a/aaa-20 differ diff --git a/llava_next/share/terminfo/a/aaa-30-rv-ctxt b/llava_next/share/terminfo/a/aaa-30-rv-ctxt new file mode 100644 index 0000000000000000000000000000000000000000..fb8f018f418f862c0d8aa524e2806d7b08594339 Binary files /dev/null and b/llava_next/share/terminfo/a/aaa-30-rv-ctxt differ diff --git a/llava_next/share/terminfo/a/aaa-40 b/llava_next/share/terminfo/a/aaa-40 new file mode 100644 index 0000000000000000000000000000000000000000..c76c952e9e9dbd3dfbf9c7eab91a6f481b856992 Binary files /dev/null and b/llava_next/share/terminfo/a/aaa-40 differ diff --git a/llava_next/share/terminfo/a/aaa-60-s-rv b/llava_next/share/terminfo/a/aaa-60-s-rv new file mode 100644 index 0000000000000000000000000000000000000000..41b6c222ebd2efd3269883fe3224e5e4c6fa0841 Binary files /dev/null and b/llava_next/share/terminfo/a/aaa-60-s-rv differ diff --git a/llava_next/share/terminfo/a/aaa-unk b/llava_next/share/terminfo/a/aaa-unk new file mode 100644 index 0000000000000000000000000000000000000000..ba98f3abcd7ba27e2c216d00a0a28323534e9277 Binary files /dev/null and b/llava_next/share/terminfo/a/aaa-unk differ diff --git a/llava_next/share/terminfo/a/adm1178 b/llava_next/share/terminfo/a/adm1178 new file mode 100644 index 0000000000000000000000000000000000000000..a78743db03a01ccafca832f167635bc03d601fbb Binary files /dev/null and b/llava_next/share/terminfo/a/adm1178 differ diff --git a/llava_next/share/terminfo/a/adm3a b/llava_next/share/terminfo/a/adm3a new file mode 100644 index 0000000000000000000000000000000000000000..a19b9889dcb2290401c3f2802d4616a5bb76481d Binary files /dev/null and b/llava_next/share/terminfo/a/adm3a differ diff --git a/llava_next/share/terminfo/a/aepro b/llava_next/share/terminfo/a/aepro new file mode 100644 index 0000000000000000000000000000000000000000..d8b036ccbb216dec745ae7d7610db44b583c09b2 Binary files /dev/null and b/llava_next/share/terminfo/a/aepro differ diff --git a/llava_next/share/terminfo/a/aixterm-m-old b/llava_next/share/terminfo/a/aixterm-m-old new file mode 100644 index 0000000000000000000000000000000000000000..5b47369e480a4567433cb1bb9a957b31a18f4e0b Binary files /dev/null and b/llava_next/share/terminfo/a/aixterm-m-old differ diff --git a/llava_next/share/terminfo/a/alacritty b/llava_next/share/terminfo/a/alacritty new file mode 100644 index 0000000000000000000000000000000000000000..3f8a12143b7b56e402d75eaa6694d05a6e6ac24f Binary files /dev/null and b/llava_next/share/terminfo/a/alacritty differ diff --git a/llava_next/share/terminfo/a/alacritty+common b/llava_next/share/terminfo/a/alacritty+common new file mode 100644 index 0000000000000000000000000000000000000000..5b327d028f787dba425af372046268484c68edcd Binary files /dev/null and b/llava_next/share/terminfo/a/alacritty+common differ diff --git a/llava_next/share/terminfo/a/altos7pc b/llava_next/share/terminfo/a/altos7pc new file mode 100644 index 0000000000000000000000000000000000000000..b8f9c5cb1a6e7645b743b9e662c986e40971d470 Binary files /dev/null and b/llava_next/share/terminfo/a/altos7pc differ diff --git a/llava_next/share/terminfo/a/ansi+tabs b/llava_next/share/terminfo/a/ansi+tabs new file mode 100644 index 0000000000000000000000000000000000000000..6ecec5c0565b983bdf58cfdeff0fa7edc49914be Binary files /dev/null and b/llava_next/share/terminfo/a/ansi+tabs differ diff --git a/llava_next/share/terminfo/a/ansi-emx b/llava_next/share/terminfo/a/ansi-emx new file mode 100644 index 0000000000000000000000000000000000000000..837380764870f95940191fe6add38e1cec3ef6be Binary files /dev/null and b/llava_next/share/terminfo/a/ansi-emx differ diff --git a/llava_next/share/terminfo/a/ansil b/llava_next/share/terminfo/a/ansil new file mode 100644 index 0000000000000000000000000000000000000000..60c9bd0f233a4d972ca2c9d5fdce164f475e9423 Binary files /dev/null and b/llava_next/share/terminfo/a/ansil differ diff --git a/llava_next/share/terminfo/a/ansis b/llava_next/share/terminfo/a/ansis new file mode 100644 index 0000000000000000000000000000000000000000..4043099f37cef6decd107ceae59ad5c134b69d18 Binary files /dev/null and b/llava_next/share/terminfo/a/ansis differ diff --git a/llava_next/share/terminfo/a/ansiw b/llava_next/share/terminfo/a/ansiw new file mode 100644 index 0000000000000000000000000000000000000000..e3621f7c22fb8c7e28d708217db550269bb9d068 Binary files /dev/null and b/llava_next/share/terminfo/a/ansiw differ diff --git a/llava_next/share/terminfo/a/apple-videx b/llava_next/share/terminfo/a/apple-videx new file mode 100644 index 0000000000000000000000000000000000000000..9ab2869da1de5b91f62827534e2fffbebf116f35 Binary files /dev/null and b/llava_next/share/terminfo/a/apple-videx differ diff --git a/llava_next/share/terminfo/a/appleIIc b/llava_next/share/terminfo/a/appleIIc new file mode 100644 index 0000000000000000000000000000000000000000..1311b90c397a4ccf9d4c22a318cb6fa8b45c16c9 Binary files /dev/null and b/llava_next/share/terminfo/a/appleIIc differ diff --git a/llava_next/share/terminfo/a/at386 b/llava_next/share/terminfo/a/at386 new file mode 100644 index 0000000000000000000000000000000000000000..3da9542721b9b610a74e7fe73a11d6b912ed5056 Binary files /dev/null and b/llava_next/share/terminfo/a/at386 differ diff --git a/llava_next/share/terminfo/a/att4415-rv-nl b/llava_next/share/terminfo/a/att4415-rv-nl new file mode 100644 index 0000000000000000000000000000000000000000..e6dff515d48a8154efc0d0c663f9397dfd195a67 Binary files /dev/null and b/llava_next/share/terminfo/a/att4415-rv-nl differ diff --git a/llava_next/share/terminfo/a/att4424-1 b/llava_next/share/terminfo/a/att4424-1 new file mode 100644 index 0000000000000000000000000000000000000000..860c736e5b2dfe0e137afd9e5a26dfa69ce81683 Binary files /dev/null and b/llava_next/share/terminfo/a/att4424-1 differ diff --git a/llava_next/share/terminfo/a/att505-24 b/llava_next/share/terminfo/a/att505-24 new file mode 100644 index 0000000000000000000000000000000000000000..ead33f945ac23d717bf22d18f000b965418d4631 Binary files /dev/null and b/llava_next/share/terminfo/a/att505-24 differ diff --git a/llava_next/share/terminfo/a/att5320 b/llava_next/share/terminfo/a/att5320 new file mode 100644 index 0000000000000000000000000000000000000000..fd5f22e74a809659b0327acb15aa8a3863a55821 Binary files /dev/null and b/llava_next/share/terminfo/a/att5320 differ diff --git a/llava_next/share/terminfo/a/att5420+nl b/llava_next/share/terminfo/a/att5420+nl new file mode 100644 index 0000000000000000000000000000000000000000..b9a8de81d91b5e2ef564b80dcd47cf4ee40510f0 Binary files /dev/null and b/llava_next/share/terminfo/a/att5420+nl differ diff --git a/llava_next/share/terminfo/a/att5420-w b/llava_next/share/terminfo/a/att5420-w new file mode 100644 index 0000000000000000000000000000000000000000..dbed57e997eebc80c16a322d7d851b12ab50ba8c Binary files /dev/null and b/llava_next/share/terminfo/a/att5420-w differ diff --git a/llava_next/share/terminfo/a/att5420-w-rv-n b/llava_next/share/terminfo/a/att5420-w-rv-n new file mode 100644 index 0000000000000000000000000000000000000000..3d13a44cf410a9c49ab6d579c0e733e4670c3091 Binary files /dev/null and b/llava_next/share/terminfo/a/att5420-w-rv-n differ diff --git a/llava_next/share/terminfo/a/att5420_2 b/llava_next/share/terminfo/a/att5420_2 new file mode 100644 index 0000000000000000000000000000000000000000..f46c163524b85cb8bee09228b36659b0b90f6803 Binary files /dev/null and b/llava_next/share/terminfo/a/att5420_2 differ diff --git a/llava_next/share/terminfo/a/avatar b/llava_next/share/terminfo/a/avatar new file mode 100644 index 0000000000000000000000000000000000000000..15b4ae0edb0951c27572fb3644beec9b429a1d35 Binary files /dev/null and b/llava_next/share/terminfo/a/avatar differ diff --git a/llava_next/share/terminfo/a/avt-ns b/llava_next/share/terminfo/a/avt-ns new file mode 100644 index 0000000000000000000000000000000000000000..67312e8ab6bf43efae9fd535c71a6ede58b77505 Binary files /dev/null and b/llava_next/share/terminfo/a/avt-ns differ diff --git a/llava_next/share/terminfo/a/avt-w-ns b/llava_next/share/terminfo/a/avt-w-ns new file mode 100644 index 0000000000000000000000000000000000000000..98ba135d276a350df757a3fe19e73a16b8a3f4ce Binary files /dev/null and b/llava_next/share/terminfo/a/avt-w-ns differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/framework/__pycache__/test_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/framework/__pycache__/test_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8b87938515df11ecf2e575d0740fc55c8be78ee --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/framework/__pycache__/test_ops.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d91e9c57dba953195dff64d034e0001583166edb237e1a815a41caabce9667d1 +size 131839 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/framework/__pycache__/test_util.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/framework/__pycache__/test_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55d333c19a95ac0c1f1f6f528dac7a5bb4db20a4 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/framework/__pycache__/test_util.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d850974f9573bed38ae533dce4a37a93aabfef17665ee646f2487407f7741371 +size 134177 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8365f2485eeb95eec0512770d1c747fcf367582d --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Core module for TensorFlow distribution objects and helpers.""" +from tensorflow.python.ops.distributions import distributions diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/bernoulli.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/bernoulli.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7831f2204e04a3be10a60bcff41aa993dac59fc Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/bernoulli.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/bijector_impl.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/bijector_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ddc7104678870599de318ba3e83c828dad6c83f Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/bijector_impl.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/bijector_test_util.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/bijector_test_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ba3733330a9b730d2f663f1e9aab427c4449704 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/bijector_test_util.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/categorical.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/categorical.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89df7bf1454235cb21528c7e9c0204abb1c1f04f Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/categorical.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/dirichlet.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/dirichlet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84e32e0128f057fb2048ea41ad0a8a013340fabc Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/dirichlet.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/dirichlet_multinomial.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/dirichlet_multinomial.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eccd716c3f18e947ef89751ac0c54c6120ec3995 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/dirichlet_multinomial.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/distribution.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/distribution.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de52436b1242d1268b7ac40c97f5fa3503ca5c46 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/distribution.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/exponential.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/exponential.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09571074efc52b56761773b37d2e6498eadd90f2 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/exponential.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/kullback_leibler.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/kullback_leibler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8efb67211c6162e86706799f3c72f4dbae4a8f1c Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/kullback_leibler.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/laplace.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/laplace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00ad2305a2fdb68b830ba979ecffaf916d38eb2a Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/laplace.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/multinomial.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/multinomial.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fdb5095f260d4b83ef76e37446805dc887a2f0d7 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/multinomial.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/normal.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/normal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e7bd65f142e589628a091adbd90853b5509a297 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/normal.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/special_math.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/special_math.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd30e35f70f41dc416a6e7a83995755f00e0b4f9 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/special_math.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/student_t.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/student_t.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f80b0e72385bc04b16d828793a0efe484c069418 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/student_t.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/uniform.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/uniform.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9565ea422182d161cdad4f152d156ebb31f15b85 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/uniform.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/bernoulli.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/bernoulli.py new file mode 100644 index 0000000000000000000000000000000000000000..7e5d875c8cf285f25071edf175b4ecfe1d8f5baa --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/bernoulli.py @@ -0,0 +1,183 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The Bernoulli distribution class.""" + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn +from tensorflow.python.ops import random_ops +from tensorflow.python.ops.distributions import distribution +from tensorflow.python.ops.distributions import kullback_leibler +from tensorflow.python.ops.distributions import util as distribution_util +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +@tf_export(v1=["distributions.Bernoulli"]) +class Bernoulli(distribution.Distribution): + """Bernoulli distribution. + + The Bernoulli distribution with `probs` parameter, i.e., the probability of a + `1` outcome (vs a `0` outcome). + """ + + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__(self, + logits=None, + probs=None, + dtype=dtypes.int32, + validate_args=False, + allow_nan_stats=True, + name="Bernoulli"): + """Construct Bernoulli distributions. + + Args: + logits: An N-D `Tensor` representing the log-odds of a `1` event. Each + entry in the `Tensor` parametrizes an independent Bernoulli distribution + where the probability of an event is sigmoid(logits). Only one of + `logits` or `probs` should be passed in. + probs: An N-D `Tensor` representing the probability of a `1` + event. Each entry in the `Tensor` parameterizes an independent + Bernoulli distribution. Only one of `logits` or `probs` should be passed + in. + dtype: The type of the event samples. Default: `int32`. + validate_args: Python `bool`, default `False`. When `True` distribution + parameters are checked for validity despite possibly degrading runtime + performance. When `False` invalid inputs may silently render incorrect + outputs. + allow_nan_stats: Python `bool`, default `True`. When `True`, + statistics (e.g., mean, mode, variance) use the value "`NaN`" to + indicate the result is undefined. When `False`, an exception is raised + if one or more of the statistic's batch members are undefined. + name: Python `str` name prefixed to Ops created by this class. + + Raises: + ValueError: If p and logits are passed, or if neither are passed. + """ + parameters = dict(locals()) + with ops.name_scope(name) as name: + self._logits, self._probs = distribution_util.get_logits_and_probs( + logits=logits, + probs=probs, + validate_args=validate_args, + name=name) + super(Bernoulli, self).__init__( + dtype=dtype, + reparameterization_type=distribution.NOT_REPARAMETERIZED, + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + parameters=parameters, + graph_parents=[self._logits, self._probs], + name=name) + + @staticmethod + def _param_shapes(sample_shape): + return {"logits": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)} + + @property + def logits(self): + """Log-odds of a `1` outcome (vs `0`).""" + return self._logits + + @property + def probs(self): + """Probability of a `1` outcome (vs `0`).""" + return self._probs + + def _batch_shape_tensor(self): + return array_ops.shape(self._logits) + + def _batch_shape(self): + return self._logits.get_shape() + + def _event_shape_tensor(self): + return array_ops.constant([], dtype=dtypes.int32) + + def _event_shape(self): + return tensor_shape.TensorShape([]) + + def _sample_n(self, n, seed=None): + new_shape = array_ops.concat([[n], self.batch_shape_tensor()], 0) + uniform = random_ops.random_uniform( + new_shape, seed=seed, dtype=self.probs.dtype) + sample = math_ops.less(uniform, self.probs) + return math_ops.cast(sample, self.dtype) + + def _log_prob(self, event): + if self.validate_args: + event = distribution_util.embed_check_integer_casting_closed( + event, target_dtype=dtypes.bool) + + # TODO(jaana): The current sigmoid_cross_entropy_with_logits has + # inconsistent behavior for logits = inf/-inf. + event = math_ops.cast(event, self.logits.dtype) + logits = self.logits + # sigmoid_cross_entropy_with_logits doesn't broadcast shape, + # so we do this here. + + def _broadcast(logits, event): + return (array_ops.ones_like(event) * logits, + array_ops.ones_like(logits) * event) + + if not (event.get_shape().is_fully_defined() and + logits.get_shape().is_fully_defined() and + event.get_shape() == logits.get_shape()): + logits, event = _broadcast(logits, event) + return -nn.sigmoid_cross_entropy_with_logits(labels=event, logits=logits) + + def _entropy(self): + return (-self.logits * (math_ops.sigmoid(self.logits) - 1) + # pylint: disable=invalid-unary-operand-type + nn.softplus(-self.logits)) # pylint: disable=invalid-unary-operand-type + + def _mean(self): + return array_ops.identity(self.probs) + + def _variance(self): + return self._mean() * (1. - self.probs) + + def _mode(self): + """Returns `1` if `prob > 0.5` and `0` otherwise.""" + return math_ops.cast(self.probs > 0.5, self.dtype) + + +@kullback_leibler.RegisterKL(Bernoulli, Bernoulli) +def _kl_bernoulli_bernoulli(a, b, name=None): + """Calculate the batched KL divergence KL(a || b) with a and b Bernoulli. + + Args: + a: instance of a Bernoulli distribution object. + b: instance of a Bernoulli distribution object. + name: (optional) Name to use for created operations. + default is "kl_bernoulli_bernoulli". + + Returns: + Batchwise KL(a || b) + """ + with ops.name_scope(name, "kl_bernoulli_bernoulli", + values=[a.logits, b.logits]): + delta_probs0 = nn.softplus(-b.logits) - nn.softplus(-a.logits) + delta_probs1 = nn.softplus(b.logits) - nn.softplus(a.logits) + return (math_ops.sigmoid(a.logits) * delta_probs0 + + math_ops.sigmoid(-a.logits) * delta_probs1) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/beta.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/beta.py new file mode 100644 index 0000000000000000000000000000000000000000..ce89d662cb7792a6a81712f62e687e0a7fcba093 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/beta.py @@ -0,0 +1,407 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The Beta distribution class.""" + +import numpy as np + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn +from tensorflow.python.ops import random_ops +from tensorflow.python.ops.distributions import distribution +from tensorflow.python.ops.distributions import kullback_leibler +from tensorflow.python.ops.distributions import util as distribution_util +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +__all__ = [ + "Beta", + "BetaWithSoftplusConcentration", +] + + +_beta_sample_note = """Note: `x` must have dtype `self.dtype` and be in +`[0, 1].` It must have a shape compatible with `self.batch_shape()`.""" + + +@tf_export(v1=["distributions.Beta"]) +class Beta(distribution.Distribution): + """Beta distribution. + + The Beta distribution is defined over the `(0, 1)` interval using parameters + `concentration1` (aka "alpha") and `concentration0` (aka "beta"). + + #### Mathematical Details + + The probability density function (pdf) is, + + ```none + pdf(x; alpha, beta) = x**(alpha - 1) (1 - x)**(beta - 1) / Z + Z = Gamma(alpha) Gamma(beta) / Gamma(alpha + beta) + ``` + + where: + + * `concentration1 = alpha`, + * `concentration0 = beta`, + * `Z` is the normalization constant, and, + * `Gamma` is the [gamma function]( + https://en.wikipedia.org/wiki/Gamma_function). + + The concentration parameters represent mean total counts of a `1` or a `0`, + i.e., + + ```none + concentration1 = alpha = mean * total_concentration + concentration0 = beta = (1. - mean) * total_concentration + ``` + + where `mean` in `(0, 1)` and `total_concentration` is a positive real number + representing a mean `total_count = concentration1 + concentration0`. + + Distribution parameters are automatically broadcast in all functions; see + examples for details. + + Warning: The samples can be zero due to finite precision. + This happens more often when some of the concentrations are very small. + Make sure to round the samples to `np.finfo(dtype).tiny` before computing the + density. + + Samples of this distribution are reparameterized (pathwise differentiable). + The derivatives are computed using the approach described in + (Figurnov et al., 2018). + + #### Examples + + ```python + import tensorflow_probability as tfp + tfd = tfp.distributions + + # Create a batch of three Beta distributions. + alpha = [1, 2, 3] + beta = [1, 2, 3] + dist = tfd.Beta(alpha, beta) + + dist.sample([4, 5]) # Shape [4, 5, 3] + + # `x` has three batch entries, each with two samples. + x = [[.1, .4, .5], + [.2, .3, .5]] + # Calculate the probability of each pair of samples under the corresponding + # distribution in `dist`. + dist.prob(x) # Shape [2, 3] + ``` + + ```python + # Create batch_shape=[2, 3] via parameter broadcast: + alpha = [[1.], [2]] # Shape [2, 1] + beta = [3., 4, 5] # Shape [3] + dist = tfd.Beta(alpha, beta) + + # alpha broadcast as: [[1., 1, 1,], + # [2, 2, 2]] + # beta broadcast as: [[3., 4, 5], + # [3, 4, 5]] + # batch_Shape [2, 3] + dist.sample([4, 5]) # Shape [4, 5, 2, 3] + + x = [.2, .3, .5] + # x will be broadcast as [[.2, .3, .5], + # [.2, .3, .5]], + # thus matching batch_shape [2, 3]. + dist.prob(x) # Shape [2, 3] + ``` + + Compute the gradients of samples w.r.t. the parameters: + + ```python + alpha = tf.constant(1.0) + beta = tf.constant(2.0) + dist = tfd.Beta(alpha, beta) + samples = dist.sample(5) # Shape [5] + loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function + # Unbiased stochastic gradients of the loss function + grads = tf.gradients(loss, [alpha, beta]) + ``` + + References: + Implicit Reparameterization Gradients: + [Figurnov et al., 2018] + (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients) + ([pdf] + (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients.pdf)) + """ + + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__(self, + concentration1=None, + concentration0=None, + validate_args=False, + allow_nan_stats=True, + name="Beta"): + """Initialize a batch of Beta distributions. + + Args: + concentration1: Positive floating-point `Tensor` indicating mean + number of successes; aka "alpha". Implies `self.dtype` and + `self.batch_shape`, i.e., + `concentration1.shape = [N1, N2, ..., Nm] = self.batch_shape`. + concentration0: Positive floating-point `Tensor` indicating mean + number of failures; aka "beta". Otherwise has same semantics as + `concentration1`. + validate_args: Python `bool`, default `False`. When `True` distribution + parameters are checked for validity despite possibly degrading runtime + performance. When `False` invalid inputs may silently render incorrect + outputs. + allow_nan_stats: Python `bool`, default `True`. When `True`, statistics + (e.g., mean, mode, variance) use the value "`NaN`" to indicate the + result is undefined. When `False`, an exception is raised if one or + more of the statistic's batch members are undefined. + name: Python `str` name prefixed to Ops created by this class. + """ + parameters = dict(locals()) + with ops.name_scope(name, values=[concentration1, concentration0]) as name: + self._concentration1 = self._maybe_assert_valid_concentration( + ops.convert_to_tensor(concentration1, name="concentration1"), + validate_args) + self._concentration0 = self._maybe_assert_valid_concentration( + ops.convert_to_tensor(concentration0, name="concentration0"), + validate_args) + check_ops.assert_same_float_dtype([ + self._concentration1, self._concentration0]) + self._total_concentration = self._concentration1 + self._concentration0 + super(Beta, self).__init__( + dtype=self._total_concentration.dtype, + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + reparameterization_type=distribution.FULLY_REPARAMETERIZED, + parameters=parameters, + graph_parents=[self._concentration1, + self._concentration0, + self._total_concentration], + name=name) + + @staticmethod + def _param_shapes(sample_shape): + return dict(zip( + ["concentration1", "concentration0"], + [ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)] * 2)) + + @property + def concentration1(self): + """Concentration parameter associated with a `1` outcome.""" + return self._concentration1 + + @property + def concentration0(self): + """Concentration parameter associated with a `0` outcome.""" + return self._concentration0 + + @property + def total_concentration(self): + """Sum of concentration parameters.""" + return self._total_concentration + + def _batch_shape_tensor(self): + return array_ops.shape(self.total_concentration) + + def _batch_shape(self): + return self.total_concentration.get_shape() + + def _event_shape_tensor(self): + return constant_op.constant([], dtype=dtypes.int32) + + def _event_shape(self): + return tensor_shape.TensorShape([]) + + def _sample_n(self, n, seed=None): + expanded_concentration1 = array_ops.ones_like( + self.total_concentration, dtype=self.dtype) * self.concentration1 + expanded_concentration0 = array_ops.ones_like( + self.total_concentration, dtype=self.dtype) * self.concentration0 + gamma1_sample = random_ops.random_gamma( + shape=[n], + alpha=expanded_concentration1, + dtype=self.dtype, + seed=seed) + gamma2_sample = random_ops.random_gamma( + shape=[n], + alpha=expanded_concentration0, + dtype=self.dtype, + seed=distribution_util.gen_new_seed(seed, "beta")) + beta_sample = gamma1_sample / (gamma1_sample + gamma2_sample) + return beta_sample + + @distribution_util.AppendDocstring(_beta_sample_note) + def _log_prob(self, x): + return self._log_unnormalized_prob(x) - self._log_normalization() + + @distribution_util.AppendDocstring(_beta_sample_note) + def _prob(self, x): + return math_ops.exp(self._log_prob(x)) + + @distribution_util.AppendDocstring(_beta_sample_note) + def _log_cdf(self, x): + return math_ops.log(self._cdf(x)) + + @distribution_util.AppendDocstring(_beta_sample_note) + def _cdf(self, x): + return math_ops.betainc(self.concentration1, self.concentration0, x) + + def _log_unnormalized_prob(self, x): + x = self._maybe_assert_valid_sample(x) + return (math_ops.xlogy(self.concentration1 - 1., x) + + (self.concentration0 - 1.) * math_ops.log1p(-x)) # pylint: disable=invalid-unary-operand-type + + def _log_normalization(self): + return (math_ops.lgamma(self.concentration1) + + math_ops.lgamma(self.concentration0) + - math_ops.lgamma(self.total_concentration)) + + def _entropy(self): + return ( + self._log_normalization() + - (self.concentration1 - 1.) * math_ops.digamma(self.concentration1) + - (self.concentration0 - 1.) * math_ops.digamma(self.concentration0) + + ((self.total_concentration - 2.) * + math_ops.digamma(self.total_concentration))) + + def _mean(self): + return self._concentration1 / self._total_concentration + + def _variance(self): + return self._mean() * (1. - self._mean()) / (1. + self.total_concentration) + + @distribution_util.AppendDocstring( + """Note: The mode is undefined when `concentration1 <= 1` or + `concentration0 <= 1`. If `self.allow_nan_stats` is `True`, `NaN` + is used for undefined modes. If `self.allow_nan_stats` is `False` an + exception is raised when one or more modes are undefined.""") + def _mode(self): + mode = (self.concentration1 - 1.) / (self.total_concentration - 2.) + if self.allow_nan_stats: + nan = array_ops.fill( + self.batch_shape_tensor(), + np.array(np.nan, dtype=self.dtype.as_numpy_dtype()), + name="nan") + is_defined = math_ops.logical_and(self.concentration1 > 1., + self.concentration0 > 1.) + return array_ops.where_v2(is_defined, mode, nan) + return control_flow_ops.with_dependencies([ + check_ops.assert_less( + array_ops.ones([], dtype=self.dtype), + self.concentration1, + message="Mode undefined for concentration1 <= 1."), + check_ops.assert_less( + array_ops.ones([], dtype=self.dtype), + self.concentration0, + message="Mode undefined for concentration0 <= 1.") + ], mode) + + def _maybe_assert_valid_concentration(self, concentration, validate_args): + """Checks the validity of a concentration parameter.""" + if not validate_args: + return concentration + return control_flow_ops.with_dependencies([ + check_ops.assert_positive( + concentration, + message="Concentration parameter must be positive."), + ], concentration) + + def _maybe_assert_valid_sample(self, x): + """Checks the validity of a sample.""" + if not self.validate_args: + return x + return control_flow_ops.with_dependencies([ + check_ops.assert_positive(x, message="sample must be positive"), + check_ops.assert_less( + x, + array_ops.ones([], self.dtype), + message="sample must be less than `1`."), + ], x) + + +class BetaWithSoftplusConcentration(Beta): + """Beta with softplus transform of `concentration1` and `concentration0`.""" + + @deprecation.deprecated( + "2019-01-01", + "Use `tfd.Beta(tf.nn.softplus(concentration1), " + "tf.nn.softplus(concentration2))` instead.", + warn_once=True) + def __init__(self, + concentration1, + concentration0, + validate_args=False, + allow_nan_stats=True, + name="BetaWithSoftplusConcentration"): + parameters = dict(locals()) + with ops.name_scope(name, values=[concentration1, + concentration0]) as name: + super(BetaWithSoftplusConcentration, self).__init__( + concentration1=nn.softplus(concentration1, + name="softplus_concentration1"), + concentration0=nn.softplus(concentration0, + name="softplus_concentration0"), + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + name=name) + self._parameters = parameters + + +@kullback_leibler.RegisterKL(Beta, Beta) +def _kl_beta_beta(d1, d2, name=None): + """Calculate the batchwise KL divergence KL(d1 || d2) with d1 and d2 Beta. + + Args: + d1: instance of a Beta distribution object. + d2: instance of a Beta distribution object. + name: (optional) Name to use for created operations. + default is "kl_beta_beta". + + Returns: + Batchwise KL(d1 || d2) + """ + def delta(fn, is_property=True): + fn1 = getattr(d1, fn) + fn2 = getattr(d2, fn) + return (fn2 - fn1) if is_property else (fn2() - fn1()) + with ops.name_scope(name, "kl_beta_beta", values=[ + d1.concentration1, + d1.concentration0, + d1.total_concentration, + d2.concentration1, + d2.concentration0, + d2.total_concentration, + ]): + return (delta("_log_normalization", is_property=False) + - math_ops.digamma(d1.concentration1) * delta("concentration1") + - math_ops.digamma(d1.concentration0) * delta("concentration0") + + (math_ops.digamma(d1.total_concentration) + * delta("total_concentration"))) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/bijector.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/bijector.py new file mode 100644 index 0000000000000000000000000000000000000000..bdf3dde499db2678923bd9b0e85ed7299de295f8 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/bijector.py @@ -0,0 +1,21 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Bijector base.""" + +# go/tf-wildcard-import +# pylint: disable=wildcard-import,unused-import +from tensorflow.python.ops.distributions.bijector_impl import Bijector + +# pylint: enable=wildcard-import,unused-import diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/bijector_impl.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/bijector_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..99d7a5ab106578b3ea653fbbeec23c10e40ad292 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/bijector_impl.py @@ -0,0 +1,1113 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Bijector base.""" + +import abc +import collections +import contextlib +import re + +import numpy as np + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.distributions import util as distribution_util +from tensorflow.python.util import object_identity + + +__all__ = [ + "Bijector", +] + + +class _Mapping(collections.namedtuple( + "_Mapping", ["x", "y", "ildj_map", "kwargs"])): + """Helper class to make it easier to manage caching in `Bijector`.""" + + def __new__(cls, x=None, y=None, ildj_map=None, kwargs=None): + """Custom __new__ so namedtuple items have defaults. + + Args: + x: `Tensor`. Forward. + y: `Tensor`. Inverse. + ildj_map: `Dictionary`. This is a mapping from event_ndims to a `Tensor` + representing the inverse log det jacobian. + kwargs: Python dictionary. Extra args supplied to + forward/inverse/etc functions. + + Returns: + mapping: New instance of _Mapping. + """ + return super(_Mapping, cls).__new__(cls, x, y, ildj_map, kwargs) + + @property + def x_key(self): + """Returns key used for caching Y=g(X).""" + return ((object_identity.Reference(self.x),) + + self._deep_tuple(tuple(sorted(self.kwargs.items())))) + + @property + def y_key(self): + """Returns key used for caching X=g^{-1}(Y).""" + return ((object_identity.Reference(self.y),) + + self._deep_tuple(tuple(sorted(self.kwargs.items())))) + + def merge(self, x=None, y=None, ildj_map=None, kwargs=None, mapping=None): + """Returns new _Mapping with args merged with self. + + Args: + x: `Tensor`. Forward. + y: `Tensor`. Inverse. + ildj_map: `Dictionary`. This is a mapping from event_ndims to a `Tensor` + representing the inverse log det jacobian. + kwargs: Python dictionary. Extra args supplied to + forward/inverse/etc functions. + mapping: Instance of _Mapping to merge. Can only be specified if no other + arg is specified. + + Returns: + mapping: New instance of `_Mapping` which has inputs merged with self. + + Raises: + ValueError: if mapping and any other arg is not `None`. + """ + if mapping is None: + mapping = _Mapping(x=x, y=y, ildj_map=ildj_map, kwargs=kwargs) + elif any(arg is not None for arg in [x, y, ildj_map, kwargs]): + raise ValueError("Cannot simultaneously specify mapping and individual " + "arguments.") + + return _Mapping( + x=self._merge(self.x, mapping.x), + y=self._merge(self.y, mapping.y), + ildj_map=self._merge_dicts(self.ildj_map, mapping.ildj_map), + kwargs=self._merge(self.kwargs, mapping.kwargs)) + + def _merge_dicts(self, old=None, new=None): + """Helper to merge two dictionaries.""" + old = {} if old is None else old + new = {} if new is None else new + for k, v in new.items(): + val = old.get(k, None) + if val is not None and val is not v: + raise ValueError("Found different value for existing key " + "(key:{} old_value:{} new_value:{}".format( + k, old[k], v)) + old[k] = v + return old + + def _merge(self, old, new): + """Helper to merge which handles merging one value.""" + if old is None: + return new + elif new is not None and old is not new: + raise ValueError("Incompatible values: %s != %s" % (old, new)) + return old + + def _deep_tuple(self, x): + """Converts lists of lists to tuples of tuples.""" + return (tuple(map(self._deep_tuple, x)) + if isinstance(x, (list, tuple)) else x) + + +class Bijector(metaclass=abc.ABCMeta): + r"""Interface for transformations of a `Distribution` sample. + + Bijectors can be used to represent any differentiable and injective + (one to one) function defined on an open subset of `R^n`. Some non-injective + transformations are also supported (see "Non Injective Transforms" below). + + #### Mathematical Details + + A `Bijector` implements a [smooth covering map]( + https://en.wikipedia.org/wiki/Local_diffeomorphism), i.e., a local + diffeomorphism such that every point in the target has a neighborhood evenly + covered by a map ([see also]( + https://en.wikipedia.org/wiki/Covering_space#Covering_of_a_manifold)). + A `Bijector` is used by `TransformedDistribution` but can be generally used + for transforming a `Distribution` generated `Tensor`. A `Bijector` is + characterized by three operations: + + 1. Forward + + Useful for turning one random outcome into another random outcome from a + different distribution. + + 2. Inverse + + Useful for "reversing" a transformation to compute one probability in + terms of another. + + 3. `log_det_jacobian(x)` + + "The log of the absolute value of the determinant of the matrix of all + first-order partial derivatives of the inverse function." + + Useful for inverting a transformation to compute one probability in terms + of another. Geometrically, the Jacobian determinant is the volume of the + transformation and is used to scale the probability. + + We take the absolute value of the determinant before log to avoid NaN + values. Geometrically, a negative determinant corresponds to an + orientation-reversing transformation. It is ok for us to discard the sign + of the determinant because we only integrate everywhere-nonnegative + functions (probability densities) and the correct orientation is always the + one that produces a nonnegative integrand. + + By convention, transformations of random variables are named in terms of the + forward transformation. The forward transformation creates samples, the + inverse is useful for computing probabilities. + + #### Example Uses + + - Basic properties: + + ```python + x = ... # A tensor. + # Evaluate forward transformation. + fwd_x = my_bijector.forward(x) + x == my_bijector.inverse(fwd_x) + x != my_bijector.forward(fwd_x) # Not equal because x != g(g(x)). + ``` + + - Computing a log-likelihood: + + ```python + def transformed_log_prob(bijector, log_prob, x): + return (bijector.inverse_log_det_jacobian(x, event_ndims=0) + + log_prob(bijector.inverse(x))) + ``` + + - Transforming a random outcome: + + ```python + def transformed_sample(bijector, x): + return bijector.forward(x) + ``` + + #### Example Bijectors + + - "Exponential" + + ```none + Y = g(X) = exp(X) + X ~ Normal(0, 1) # Univariate. + ``` + + Implies: + + ```none + g^{-1}(Y) = log(Y) + |Jacobian(g^{-1})(y)| = 1 / y + Y ~ LogNormal(0, 1), i.e., + prob(Y=y) = |Jacobian(g^{-1})(y)| * prob(X=g^{-1}(y)) + = (1 / y) Normal(log(y); 0, 1) + ``` + + Here is an example of how one might implement the `Exp` bijector: + + ```python + class Exp(Bijector): + + def __init__(self, validate_args=False, name="exp"): + super(Exp, self).__init__( + validate_args=validate_args, + forward_min_event_ndims=0, + name=name) + + def _forward(self, x): + return math_ops.exp(x) + + def _inverse(self, y): + return math_ops.log(y) + + def _inverse_log_det_jacobian(self, y): + return -self._forward_log_det_jacobian(self._inverse(y)) + + def _forward_log_det_jacobian(self, x): + # Notice that we needn't do any reducing, even when`event_ndims > 0`. + # The base Bijector class will handle reducing for us; it knows how + # to do so because we called `super` `__init__` with + # `forward_min_event_ndims = 0`. + return x + ``` + + - "Affine" + + ```none + Y = g(X) = sqrtSigma * X + mu + X ~ MultivariateNormal(0, I_d) + ``` + + Implies: + + ```none + g^{-1}(Y) = inv(sqrtSigma) * (Y - mu) + |Jacobian(g^{-1})(y)| = det(inv(sqrtSigma)) + Y ~ MultivariateNormal(mu, sqrtSigma) , i.e., + prob(Y=y) = |Jacobian(g^{-1})(y)| * prob(X=g^{-1}(y)) + = det(sqrtSigma)^(-d) * + MultivariateNormal(inv(sqrtSigma) * (y - mu); 0, I_d) + ``` + + #### Min_event_ndims and Naming + + Bijectors are named for the dimensionality of data they act on (i.e. without + broadcasting). We can think of bijectors having an intrinsic `min_event_ndims` + , which is the minimum number of dimensions for the bijector act on. For + instance, a Cholesky decomposition requires a matrix, and hence + `min_event_ndims=2`. + + Some examples: + + `AffineScalar: min_event_ndims=0` + `Affine: min_event_ndims=1` + `Cholesky: min_event_ndims=2` + `Exp: min_event_ndims=0` + `Sigmoid: min_event_ndims=0` + `SoftmaxCentered: min_event_ndims=1` + + Note the difference between `Affine` and `AffineScalar`. `AffineScalar` + operates on scalar events, whereas `Affine` operates on vector-valued events. + + More generally, there is a `forward_min_event_ndims` and an + `inverse_min_event_ndims`. In most cases, these will be the same. + However, for some shape changing bijectors, these will be different + (e.g. a bijector which pads an extra dimension at the end, might have + `forward_min_event_ndims=0` and `inverse_min_event_ndims=1`. + + + #### Jacobian Determinant + + The Jacobian determinant is a reduction over `event_ndims - min_event_ndims` + (`forward_min_event_ndims` for `forward_log_det_jacobian` and + `inverse_min_event_ndims` for `inverse_log_det_jacobian`). + To see this, consider the `Exp` `Bijector` applied to a `Tensor` which has + sample, batch, and event (S, B, E) shape semantics. Suppose the `Tensor`'s + partitioned-shape is `(S=[4], B=[2], E=[3, 3])`. The shape of the `Tensor` + returned by `forward` and `inverse` is unchanged, i.e., `[4, 2, 3, 3]`. + However the shape returned by `inverse_log_det_jacobian` is `[4, 2]` because + the Jacobian determinant is a reduction over the event dimensions. + + Another example is the `Affine` `Bijector`. Because `min_event_ndims = 1`, the + Jacobian determinant reduction is over `event_ndims - 1`. + + It is sometimes useful to implement the inverse Jacobian determinant as the + negative forward Jacobian determinant. For example, + + ```python + def _inverse_log_det_jacobian(self, y): + return -self._forward_log_det_jac(self._inverse(y)) # Note negation. + ``` + + The correctness of this approach can be seen from the following claim. + + - Claim: + + Assume `Y = g(X)` is a bijection whose derivative exists and is nonzero + for its domain, i.e., `dY/dX = d/dX g(X) != 0`. Then: + + ```none + (log o det o jacobian o g^{-1})(Y) = -(log o det o jacobian o g)(X) + ``` + + - Proof: + + From the bijective, nonzero differentiability of `g`, the + [inverse function theorem]( + https://en.wikipedia.org/wiki/Inverse_function_theorem) + implies `g^{-1}` is differentiable in the image of `g`. + Applying the chain rule to `y = g(x) = g(g^{-1}(y))` yields + `I = g'(g^{-1}(y))*g^{-1}'(y)`. + The same theorem also implies `g^{-1}'` is non-singular therefore: + `inv[ g'(g^{-1}(y)) ] = g^{-1}'(y)`. + The claim follows from [properties of determinant]( + https://en.wikipedia.org/wiki/Determinant#Multiplicativity_and_matrix_groups). + + Generally its preferable to directly implement the inverse Jacobian + determinant. This should have superior numerical stability and will often + share subgraphs with the `_inverse` implementation. + + #### Is_constant_jacobian + + Certain bijectors will have constant jacobian matrices. For instance, the + `Affine` bijector encodes multiplication by a matrix plus a shift, with + jacobian matrix, the same aforementioned matrix. + + `is_constant_jacobian` encodes the fact that the jacobian matrix is constant. + The semantics of this argument are the following: + + * Repeated calls to "log_det_jacobian" functions with the same + `event_ndims` (but not necessarily same input), will return the first + computed jacobian (because the matrix is constant, and hence is input + independent). + * `log_det_jacobian` implementations are merely broadcastable to the true + `log_det_jacobian` (because, again, the jacobian matrix is input + independent). Specifically, `log_det_jacobian` is implemented as the + log jacobian determinant for a single input. + + ```python + class Identity(Bijector): + + def __init__(self, validate_args=False, name="identity"): + super(Identity, self).__init__( + is_constant_jacobian=True, + validate_args=validate_args, + forward_min_event_ndims=0, + name=name) + + def _forward(self, x): + return x + + def _inverse(self, y): + return y + + def _inverse_log_det_jacobian(self, y): + return -self._forward_log_det_jacobian(self._inverse(y)) + + def _forward_log_det_jacobian(self, x): + # The full log jacobian determinant would be array_ops.zero_like(x). + # However, we circumvent materializing that, since the jacobian + # calculation is input independent, and we specify it for one input. + return constant_op.constant(0., x.dtype.base_dtype) + + ``` + + #### Subclass Requirements + + - Subclasses typically implement: + + - `_forward`, + - `_inverse`, + - `_inverse_log_det_jacobian`, + - `_forward_log_det_jacobian` (optional). + + The `_forward_log_det_jacobian` is called when the bijector is inverted via + the `Invert` bijector. If undefined, a slightly less efficiently + calculation, `-1 * _inverse_log_det_jacobian`, is used. + + If the bijector changes the shape of the input, you must also implement: + + - _forward_event_shape_tensor, + - _forward_event_shape (optional), + - _inverse_event_shape_tensor, + - _inverse_event_shape (optional). + + By default the event-shape is assumed unchanged from input. + + - If the `Bijector`'s use is limited to `TransformedDistribution` (or friends + like `QuantizedDistribution`) then depending on your use, you may not need + to implement all of `_forward` and `_inverse` functions. + + Examples: + + 1. Sampling (e.g., `sample`) only requires `_forward`. + 2. Probability functions (e.g., `prob`, `cdf`, `survival`) only require + `_inverse` (and related). + 3. Only calling probability functions on the output of `sample` means + `_inverse` can be implemented as a cache lookup. + + See "Example Uses" [above] which shows how these functions are used to + transform a distribution. (Note: `_forward` could theoretically be + implemented as a cache lookup but this would require controlling the + underlying sample generation mechanism.) + + #### Non Injective Transforms + + **WARNING** Handing of non-injective transforms is subject to change. + + Non injective maps `g` are supported, provided their domain `D` can be + partitioned into `k` disjoint subsets, `Union{D1, ..., Dk}`, such that, + ignoring sets of measure zero, the restriction of `g` to each subset is a + differentiable bijection onto `g(D)`. In particular, this implies that for + `y in g(D)`, the set inverse, i.e. `g^{-1}(y) = {x in D : g(x) = y}`, always + contains exactly `k` distinct points. + + The property, `_is_injective` is set to `False` to indicate that the bijector + is not injective, yet satisfies the above condition. + + The usual bijector API is modified in the case `_is_injective is False` (see + method docstrings for specifics). Here we show by example the `AbsoluteValue` + bijector. In this case, the domain `D = (-inf, inf)`, can be partitioned + into `D1 = (-inf, 0)`, `D2 = {0}`, and `D3 = (0, inf)`. Let `gi` be the + restriction of `g` to `Di`, then both `g1` and `g3` are bijections onto + `(0, inf)`, with `g1^{-1}(y) = -y`, and `g3^{-1}(y) = y`. We will use + `g1` and `g3` to define bijector methods over `D1` and `D3`. `D2 = {0}` is + an oddball in that `g2` is one to one, and the derivative is not well defined. + Fortunately, when considering transformations of probability densities + (e.g. in `TransformedDistribution`), sets of measure zero have no effect in + theory, and only a small effect in 32 or 64 bit precision. For that reason, + we define `inverse(0)` and `inverse_log_det_jacobian(0)` both as `[0, 0]`, + which is convenient and results in a left-semicontinuous pdf. + + + ```python + abs = tfp.distributions.bijectors.AbsoluteValue() + + abs.forward(-1.) + ==> 1. + + abs.forward(1.) + ==> 1. + + abs.inverse(1.) + ==> (-1., 1.) + + # The |dX/dY| is constant, == 1. So Log|dX/dY| == 0. + abs.inverse_log_det_jacobian(1., event_ndims=0) + ==> (0., 0.) + + # Special case handling of 0. + abs.inverse(0.) + ==> (0., 0.) + + abs.inverse_log_det_jacobian(0., event_ndims=0) + ==> (0., 0.) + ``` + + """ + + @abc.abstractmethod + def __init__(self, + graph_parents=None, + is_constant_jacobian=False, + validate_args=False, + dtype=None, + forward_min_event_ndims=None, + inverse_min_event_ndims=None, + name=None): + """Constructs Bijector. + + A `Bijector` transforms random variables into new random variables. + + Examples: + + ```python + # Create the Y = g(X) = X transform. + identity = Identity() + + # Create the Y = g(X) = exp(X) transform. + exp = Exp() + ``` + + See `Bijector` subclass docstring for more details and specific examples. + + Args: + graph_parents: Python list of graph prerequisites of this `Bijector`. + is_constant_jacobian: Python `bool` indicating that the Jacobian matrix is + not a function of the input. + validate_args: Python `bool`, default `False`. Whether to validate input + with asserts. If `validate_args` is `False`, and the inputs are invalid, + correct behavior is not guaranteed. + dtype: `tf.dtype` supported by this `Bijector`. `None` means dtype is not + enforced. + forward_min_event_ndims: Python `integer` indicating the minimum number of + dimensions `forward` operates on. + inverse_min_event_ndims: Python `integer` indicating the minimum number of + dimensions `inverse` operates on. Will be set to + `forward_min_event_ndims` by default, if no value is provided. + name: The name to give Ops created by the initializer. + + Raises: + ValueError: If neither `forward_min_event_ndims` and + `inverse_min_event_ndims` are specified, or if either of them is + negative. + ValueError: If a member of `graph_parents` is not a `Tensor`. + """ + self._graph_parents = graph_parents or [] + + if forward_min_event_ndims is None and inverse_min_event_ndims is None: + raise ValueError("Must specify at least one of `forward_min_event_ndims` " + "and `inverse_min_event_ndims`.") + elif inverse_min_event_ndims is None: + inverse_min_event_ndims = forward_min_event_ndims + elif forward_min_event_ndims is None: + forward_min_event_ndims = inverse_min_event_ndims + + if not isinstance(forward_min_event_ndims, int): + raise TypeError("Expected forward_min_event_ndims to be of " + "type int, got {}".format( + type(forward_min_event_ndims).__name__)) + + if not isinstance(inverse_min_event_ndims, int): + raise TypeError("Expected inverse_min_event_ndims to be of " + "type int, got {}".format( + type(inverse_min_event_ndims).__name__)) + + if forward_min_event_ndims < 0: + raise ValueError("forward_min_event_ndims must be a non-negative " + "integer.") + if inverse_min_event_ndims < 0: + raise ValueError("inverse_min_event_ndims must be a non-negative " + "integer.") + + self._forward_min_event_ndims = forward_min_event_ndims + self._inverse_min_event_ndims = inverse_min_event_ndims + self._is_constant_jacobian = is_constant_jacobian + self._constant_ildj_map = {} + self._validate_args = validate_args + self._dtype = dtype + # These dicts can only be accessed using _Mapping.x_key or _Mapping.y_key + self._from_y = {} + self._from_x = {} + if name: + self._name = name + else: + # We want the default convention to be snake_case rather than CamelCase + # since `Chain` uses bijector.name as the kwargs dictionary key. + def camel_to_snake(name): + s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name) + return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower() + self._name = camel_to_snake(type(self).__name__.lstrip("_")) + + for i, t in enumerate(self._graph_parents): + if t is None or not tensor_util.is_tf_type(t): + raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t)) + + @property + def graph_parents(self): + """Returns this `Bijector`'s graph_parents as a Python list.""" + return self._graph_parents + + @property + def forward_min_event_ndims(self): + """Returns the minimal number of dimensions bijector.forward operates on.""" + return self._forward_min_event_ndims + + @property + def inverse_min_event_ndims(self): + """Returns the minimal number of dimensions bijector.inverse operates on.""" + return self._inverse_min_event_ndims + + @property + def is_constant_jacobian(self): + """Returns true iff the Jacobian matrix is not a function of x. + + Note: Jacobian matrix is either constant for both forward and inverse or + neither. + + Returns: + is_constant_jacobian: Python `bool`. + """ + return self._is_constant_jacobian + + @property + def _is_injective(self): + """Returns true iff the forward map `g` is injective (one-to-one function). + + **WARNING** This hidden property and its behavior are subject to change. + + Note: Non-injective maps `g` are supported, provided their domain `D` can + be partitioned into `k` disjoint subsets, `Union{D1, ..., Dk}`, such that, + ignoring sets of measure zero, the restriction of `g` to each subset is a + differentiable bijection onto `g(D)`. + + Returns: + is_injective: Python `bool`. + """ + return True + + @property + def validate_args(self): + """Returns True if Tensor arguments will be validated.""" + return self._validate_args + + @property + def dtype(self): + """dtype of `Tensor`s transformable by this distribution.""" + return self._dtype + + @property + def name(self): + """Returns the string name of this `Bijector`.""" + return self._name + + def _forward_event_shape_tensor(self, input_shape): + """Subclass implementation for `forward_event_shape_tensor` function.""" + # By default, we assume event_shape is unchanged. + return input_shape + + def forward_event_shape_tensor(self, + input_shape, + name="forward_event_shape_tensor"): + """Shape of a single sample from a single batch as an `int32` 1D `Tensor`. + + Args: + input_shape: `Tensor`, `int32` vector indicating event-portion shape + passed into `forward` function. + name: name to give to the op + + Returns: + forward_event_shape_tensor: `Tensor`, `int32` vector indicating + event-portion shape after applying `forward`. + """ + with self._name_scope(name, [input_shape]): + input_shape = ops.convert_to_tensor(input_shape, dtype=dtypes.int32, + name="input_shape") + return self._forward_event_shape_tensor(input_shape) + + def _forward_event_shape(self, input_shape): + """Subclass implementation for `forward_event_shape` public function.""" + # By default, we assume event_shape is unchanged. + return input_shape + + def forward_event_shape(self, input_shape): + """Shape of a single sample from a single batch as a `TensorShape`. + + Same meaning as `forward_event_shape_tensor`. May be only partially defined. + + Args: + input_shape: `TensorShape` indicating event-portion shape passed into + `forward` function. + + Returns: + forward_event_shape_tensor: `TensorShape` indicating event-portion shape + after applying `forward`. Possibly unknown. + """ + return self._forward_event_shape(tensor_shape.TensorShape(input_shape)) + + def _inverse_event_shape_tensor(self, output_shape): + """Subclass implementation for `inverse_event_shape_tensor` function.""" + # By default, we assume event_shape is unchanged. + return output_shape + + def inverse_event_shape_tensor(self, + output_shape, + name="inverse_event_shape_tensor"): + """Shape of a single sample from a single batch as an `int32` 1D `Tensor`. + + Args: + output_shape: `Tensor`, `int32` vector indicating event-portion shape + passed into `inverse` function. + name: name to give to the op + + Returns: + inverse_event_shape_tensor: `Tensor`, `int32` vector indicating + event-portion shape after applying `inverse`. + """ + with self._name_scope(name, [output_shape]): + output_shape = ops.convert_to_tensor(output_shape, dtype=dtypes.int32, + name="output_shape") + return self._inverse_event_shape_tensor(output_shape) + + def _inverse_event_shape(self, output_shape): + """Subclass implementation for `inverse_event_shape` public function.""" + # By default, we assume event_shape is unchanged. + return tensor_shape.TensorShape(output_shape) + + def inverse_event_shape(self, output_shape): + """Shape of a single sample from a single batch as a `TensorShape`. + + Same meaning as `inverse_event_shape_tensor`. May be only partially defined. + + Args: + output_shape: `TensorShape` indicating event-portion shape passed into + `inverse` function. + + Returns: + inverse_event_shape_tensor: `TensorShape` indicating event-portion shape + after applying `inverse`. Possibly unknown. + """ + return self._inverse_event_shape(output_shape) + + def _forward(self, x): + """Subclass implementation for `forward` public function.""" + raise NotImplementedError("forward not implemented.") + + def _call_forward(self, x, name, **kwargs): + with self._name_scope(name, [x]): + x = ops.convert_to_tensor(x, name="x") + self._maybe_assert_dtype(x) + if not self._is_injective: # No caching for non-injective + return self._forward(x, **kwargs) + mapping = self._lookup(x=x, kwargs=kwargs) + if mapping.y is not None: + return mapping.y + mapping = mapping.merge(y=self._forward(x, **kwargs)) + self._cache(mapping) + return mapping.y + + def forward(self, x, name="forward"): + """Returns the forward `Bijector` evaluation, i.e., X = g(Y). + + Args: + x: `Tensor`. The input to the "forward" evaluation. + name: The name to give this op. + + Returns: + `Tensor`. + + Raises: + TypeError: if `self.dtype` is specified and `x.dtype` is not + `self.dtype`. + NotImplementedError: if `_forward` is not implemented. + """ + return self._call_forward(x, name) + + def _inverse(self, y): + """Subclass implementation for `inverse` public function.""" + raise NotImplementedError("inverse not implemented") + + def _call_inverse(self, y, name, **kwargs): + with self._name_scope(name, [y]): + y = ops.convert_to_tensor(y, name="y") + self._maybe_assert_dtype(y) + if not self._is_injective: # No caching for non-injective + return self._inverse(y, **kwargs) + mapping = self._lookup(y=y, kwargs=kwargs) + if mapping.x is not None: + return mapping.x + mapping = mapping.merge(x=self._inverse(y, **kwargs)) + self._cache(mapping) + return mapping.x + + def inverse(self, y, name="inverse"): + """Returns the inverse `Bijector` evaluation, i.e., X = g^{-1}(Y). + + Args: + y: `Tensor`. The input to the "inverse" evaluation. + name: The name to give this op. + + Returns: + `Tensor`, if this bijector is injective. + If not injective, returns the k-tuple containing the unique + `k` points `(x1, ..., xk)` such that `g(xi) = y`. + + Raises: + TypeError: if `self.dtype` is specified and `y.dtype` is not + `self.dtype`. + NotImplementedError: if `_inverse` is not implemented. + """ + return self._call_inverse(y, name) + + def _inverse_log_det_jacobian(self, y): + """Subclass implementation of `inverse_log_det_jacobian` public function. + + In particular, this method differs from the public function, in that it + does not take `event_ndims`. Thus, this implements the minimal Jacobian + determinant calculation (i.e. over `inverse_min_event_ndims`). + + Args: + y: `Tensor`. The input to the "inverse_log_det_jacobian" evaluation. + Returns: + inverse_log_det_jacobian: `Tensor`, if this bijector is injective. + If not injective, returns the k-tuple containing jacobians for the + unique `k` points `(x1, ..., xk)` such that `g(xi) = y`. + """ + raise NotImplementedError("inverse_log_det_jacobian not implemented.") + + def _call_inverse_log_det_jacobian(self, y, event_ndims, name, **kwargs): + with self._name_scope(name, [y]): + if event_ndims in self._constant_ildj_map: + return self._constant_ildj_map[event_ndims] + y = ops.convert_to_tensor(y, name="y") + self._maybe_assert_dtype(y) + with ops.control_dependencies(self._check_valid_event_ndims( + min_event_ndims=self.inverse_min_event_ndims, + event_ndims=event_ndims)): + if not self._is_injective: # No caching for non-injective + try: + ildjs = self._inverse_log_det_jacobian(y, **kwargs) + return tuple(self._reduce_jacobian_det_over_event( + y, ildj, self.inverse_min_event_ndims, event_ndims) + for ildj in ildjs) + except NotImplementedError as original_exception: + try: + x = self._inverse(y, **kwargs) + fldjs = self._forward_log_det_jacobian(x, **kwargs) + return tuple(self._reduce_jacobian_det_over_event( + x, -fldj, self.forward_min_event_ndims, event_ndims) + for fldj in fldjs) + except NotImplementedError: + raise original_exception + + mapping = self._lookup(y=y, kwargs=kwargs) + if mapping.ildj_map is not None and event_ndims in mapping.ildj_map: + return mapping.ildj_map[event_ndims] + try: + x = None # Not needed; leave cache as is. + ildj = self._inverse_log_det_jacobian(y, **kwargs) + ildj = self._reduce_jacobian_det_over_event( + y, ildj, self.inverse_min_event_ndims, event_ndims) + except NotImplementedError as original_exception: + try: + x = (mapping.x if mapping.x is not None + else self._inverse(y, **kwargs)) + ildj = -self._forward_log_det_jacobian(x, **kwargs) + ildj = self._reduce_jacobian_det_over_event( + x, ildj, self.forward_min_event_ndims, event_ndims) + except NotImplementedError: + raise original_exception + + mapping = mapping.merge(x=x, ildj_map={event_ndims: ildj}) + self._cache(mapping) + if self.is_constant_jacobian: + self._constant_ildj_map[event_ndims] = ildj + return ildj + + def inverse_log_det_jacobian( + self, y, event_ndims, name="inverse_log_det_jacobian"): + """Returns the (log o det o Jacobian o inverse)(y). + + Mathematically, returns: `log(det(dX/dY))(Y)`. (Recall that: `X=g^{-1}(Y)`.) + + Note that `forward_log_det_jacobian` is the negative of this function, + evaluated at `g^{-1}(y)`. + + Args: + y: `Tensor`. The input to the "inverse" Jacobian determinant evaluation. + event_ndims: Number of dimensions in the probabilistic events being + transformed. Must be greater than or equal to + `self.inverse_min_event_ndims`. The result is summed over the final + dimensions to produce a scalar Jacobian determinant for each event, + i.e. it has shape `y.shape.ndims - event_ndims` dimensions. + name: The name to give this op. + + Returns: + `Tensor`, if this bijector is injective. + If not injective, returns the tuple of local log det + Jacobians, `log(det(Dg_i^{-1}(y)))`, where `g_i` is the restriction + of `g` to the `ith` partition `Di`. + + Raises: + TypeError: if `self.dtype` is specified and `y.dtype` is not + `self.dtype`. + NotImplementedError: if `_inverse_log_det_jacobian` is not implemented. + """ + return self._call_inverse_log_det_jacobian(y, event_ndims, name) + + def _forward_log_det_jacobian(self, x): + """Subclass implementation of `forward_log_det_jacobian` public function. + + In particular, this method differs from the public function, in that it + does not take `event_ndims`. Thus, this implements the minimal Jacobian + determinant calculation (i.e. over `forward_min_event_ndims`). + + Args: + x: `Tensor`. The input to the "forward_log_det_jacobian" evaluation. + + Returns: + forward_log_det_jacobian: `Tensor`, if this bijector is injective. + If not injective, returns the k-tuple containing jacobians for the + unique `k` points `(x1, ..., xk)` such that `g(xi) = y`. + """ + + raise NotImplementedError( + "forward_log_det_jacobian not implemented.") + + def _call_forward_log_det_jacobian(self, x, event_ndims, name, **kwargs): + if not self._is_injective: + raise NotImplementedError( + "forward_log_det_jacobian cannot be implemented for non-injective " + "transforms.") + with self._name_scope(name, [x]): + with ops.control_dependencies(self._check_valid_event_ndims( + min_event_ndims=self.forward_min_event_ndims, + event_ndims=event_ndims)): + if event_ndims in self._constant_ildj_map: + # Need "-1. *" to avoid invalid-unary-operand-type linter warning. + return -1. * self._constant_ildj_map[event_ndims] + x = ops.convert_to_tensor(x, name="x") + self._maybe_assert_dtype(x) + if not self._is_injective: # No caching for non-injective + try: + fldjs = self._forward_log_det_jacobian(x, **kwargs) # No caching. + return tuple(self._reduce_jacobian_det_over_event( + x, fldj, self.forward_min_event_ndims, event_ndims) + for fldj in fldjs) + except NotImplementedError as original_exception: + try: + y = self._forward(x, **kwargs) + ildjs = self._inverse_log_det_jacobian(y, **kwargs) + return tuple(self._reduce_jacobian_det_over_event( + y, -ildj, self.inverse_min_event_ndims, event_ndims) + for ildj in ildjs) + except NotImplementedError: + raise original_exception + mapping = self._lookup(x=x, kwargs=kwargs) + if mapping.ildj_map is not None and event_ndims in mapping.ildj_map: + return -mapping.ildj_map[event_ndims] + try: + y = None # Not needed; leave cache as is. + ildj = -self._forward_log_det_jacobian(x, **kwargs) + ildj = self._reduce_jacobian_det_over_event( + x, ildj, self.forward_min_event_ndims, event_ndims) + except NotImplementedError as original_exception: + try: + y = (mapping.y if mapping.y is not None + else self._forward(x, **kwargs)) + ildj = self._inverse_log_det_jacobian(y, **kwargs) + ildj = self._reduce_jacobian_det_over_event( + y, ildj, self.inverse_min_event_ndims, event_ndims) + except NotImplementedError: + raise original_exception + mapping = mapping.merge(y=y, ildj_map={event_ndims: ildj}) + self._cache(mapping) + if self.is_constant_jacobian: + self._constant_ildj_map[event_ndims] = ildj + return -ildj + + def forward_log_det_jacobian( + self, x, event_ndims, name="forward_log_det_jacobian"): + """Returns both the forward_log_det_jacobian. + + Args: + x: `Tensor`. The input to the "forward" Jacobian determinant evaluation. + event_ndims: Number of dimensions in the probabilistic events being + transformed. Must be greater than or equal to + `self.forward_min_event_ndims`. The result is summed over the final + dimensions to produce a scalar Jacobian determinant for each event, + i.e. it has shape `x.shape.ndims - event_ndims` dimensions. + name: The name to give this op. + + Returns: + `Tensor`, if this bijector is injective. + If not injective this is not implemented. + + Raises: + TypeError: if `self.dtype` is specified and `y.dtype` is not + `self.dtype`. + NotImplementedError: if neither `_forward_log_det_jacobian` + nor {`_inverse`, `_inverse_log_det_jacobian`} are implemented, or + this is a non-injective bijector. + """ + return self._call_forward_log_det_jacobian(x, event_ndims, name) + + @contextlib.contextmanager + def _name_scope(self, name=None, values=None): + """Helper function to standardize op scope.""" + with ops.name_scope(self.name): + with ops.name_scope( + name, values=(values or []) + self.graph_parents) as scope: + yield scope + + def _maybe_assert_dtype(self, x): + """Helper to check dtype when self.dtype is known.""" + if self.dtype is not None and self.dtype.base_dtype != x.dtype.base_dtype: + raise TypeError("Input had dtype %s but expected %s." % + (self.dtype, x.dtype)) + + def _cache(self, mapping): + """Helper which stores mapping info in forward/inverse dicts.""" + # Merging from lookup is an added check that we're not overwriting anything + # which is not None. + mapping = mapping.merge(mapping=self._lookup( + mapping.x, mapping.y, mapping.kwargs)) + if mapping.x is None and mapping.y is None: + raise ValueError("Caching expects at least one of (x,y) to be known, " + "i.e., not None.") + self._from_x[mapping.x_key] = mapping + self._from_y[mapping.y_key] = mapping + + def _lookup(self, x=None, y=None, kwargs=None): + """Helper which retrieves mapping info from forward/inverse dicts.""" + mapping = _Mapping(x=x, y=y, kwargs=kwargs) + # Since _cache requires both x,y to be set, we only need to do one cache + # lookup since the mapping is always in both or neither. + if mapping.x is not None: + return self._from_x.get(mapping.x_key, mapping) + if mapping.y is not None: + return self._from_y.get(mapping.y_key, mapping) + return mapping + + def _reduce_jacobian_det_over_event( + self, y, ildj, min_event_ndims, event_ndims): + """Reduce jacobian over event_ndims - min_event_ndims.""" + # In this case, we need to tile the Jacobian over the event and reduce. + y_rank = array_ops.rank(y) + y_shape = array_ops.shape(y)[ + y_rank - event_ndims : y_rank - min_event_ndims] + + ones = array_ops.ones(y_shape, ildj.dtype) + reduced_ildj = math_ops.reduce_sum( + ones * ildj, + axis=self._get_event_reduce_dims(min_event_ndims, event_ndims)) + # The multiplication by ones can change the inferred static shape so we try + # to recover as much as possible. + event_ndims_ = self._maybe_get_static_event_ndims(event_ndims) + if (event_ndims_ is not None and + y.shape.ndims is not None and + ildj.shape.ndims is not None): + y_shape = y.shape[y.shape.ndims - event_ndims_ : + y.shape.ndims - min_event_ndims] + broadcast_shape = array_ops.broadcast_static_shape(ildj.shape, y_shape) + reduced_ildj.set_shape( + broadcast_shape[: broadcast_shape.ndims - ( + event_ndims_ - min_event_ndims)]) + + return reduced_ildj + + def _get_event_reduce_dims(self, min_event_ndims, event_ndims): + """Compute the reduction dimensions given event_ndims.""" + event_ndims_ = self._maybe_get_static_event_ndims(event_ndims) + + if event_ndims_ is not None: + return [-index for index in range(1, event_ndims_ - min_event_ndims + 1)] + else: + reduce_ndims = event_ndims - min_event_ndims + return math_ops.range(-reduce_ndims, 0) + + def _check_valid_event_ndims(self, min_event_ndims, event_ndims): + """Check whether event_ndims is at least min_event_ndims.""" + event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims") + event_ndims_ = tensor_util.constant_value(event_ndims) + assertions = [] + + if not event_ndims.dtype.is_integer: + raise ValueError("Expected integer dtype, got dtype {}".format( + event_ndims.dtype)) + + if event_ndims_ is not None: + if event_ndims.shape.ndims != 0: + raise ValueError("Expected scalar event_ndims, got shape {}".format( + event_ndims.shape)) + if min_event_ndims > event_ndims_: + raise ValueError("event_ndims ({}) must be larger than " + "min_event_ndims ({})".format( + event_ndims_, min_event_ndims)) + elif self.validate_args: + assertions += [ + check_ops.assert_greater_equal(event_ndims, min_event_ndims)] + + if event_ndims.shape.is_fully_defined(): + if event_ndims.shape.ndims != 0: + raise ValueError("Expected scalar shape, got ndims {}".format( + event_ndims.shape.ndims)) + + elif self.validate_args: + assertions += [ + check_ops.assert_rank(event_ndims, 0, message="Expected scalar.")] + return assertions + + def _maybe_get_static_event_ndims(self, event_ndims): + """Helper which returns tries to return an integer static value.""" + event_ndims_ = distribution_util.maybe_get_static_value(event_ndims) + + if isinstance(event_ndims_, (np.generic, np.ndarray)): + if event_ndims_.dtype not in (np.int32, np.int64): + raise ValueError("Expected integer dtype, got dtype {}".format( + event_ndims_.dtype)) + + if isinstance(event_ndims_, np.ndarray) and len(event_ndims_.shape): + raise ValueError("Expected a scalar integer, got {}".format( + event_ndims_)) + event_ndims_ = int(event_ndims_) + + return event_ndims_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/bijector_test_util.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/bijector_test_util.py new file mode 100644 index 0000000000000000000000000000000000000000..48433c568e60205ab4bb4625986baa14ee98af1c --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/bijector_test_util.py @@ -0,0 +1,221 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Bijector unit-test utilities.""" + +import numpy as np + +from tensorflow.python.framework import ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.distributions import uniform as uniform_lib + + +def assert_finite(array): + if not np.isfinite(array).all(): + raise AssertionError("array was not all finite. %s" % array[:15]) + + +def assert_strictly_increasing(array): + np.testing.assert_array_less(0., np.diff(array)) + + +def assert_strictly_decreasing(array): + np.testing.assert_array_less(np.diff(array), 0.) + + +def assert_strictly_monotonic(array): + if array[0] < array[-1]: + assert_strictly_increasing(array) + else: + assert_strictly_decreasing(array) + + +def assert_scalar_congruency(bijector, + lower_x, + upper_x, + n=int(10e3), + rtol=0.01, + sess=None): + """Assert `bijector`'s forward/inverse/inverse_log_det_jacobian are congruent. + + We draw samples `X ~ U(lower_x, upper_x)`, then feed these through the + `bijector` in order to check that: + + 1. the forward is strictly monotonic. + 2. the forward/inverse methods are inverses of each other. + 3. the jacobian is the correct change of measure. + + This can only be used for a Bijector mapping open subsets of the real line + to themselves. This is due to the fact that this test compares the `prob` + before/after transformation with the Lebesgue measure on the line. + + Args: + bijector: Instance of Bijector + lower_x: Python scalar. + upper_x: Python scalar. Must have `lower_x < upper_x`, and both must be in + the domain of the `bijector`. The `bijector` should probably not produce + huge variation in values in the interval `(lower_x, upper_x)`, or else + the variance based check of the Jacobian will require small `rtol` or + huge `n`. + n: Number of samples to draw for the checks. + rtol: Positive number. Used for the Jacobian check. + sess: `tf.compat.v1.Session`. Defaults to the default session. + + Raises: + AssertionError: If tests fail. + """ + # Checks and defaults. + if sess is None: + sess = ops.get_default_session() + + # Should be monotonic over this interval + ten_x_pts = np.linspace(lower_x, upper_x, num=10).astype(np.float32) + if bijector.dtype is not None: + ten_x_pts = ten_x_pts.astype(bijector.dtype.as_numpy_dtype) + forward_on_10_pts = bijector.forward(ten_x_pts) + + # Set the lower/upper limits in the range of the bijector. + lower_y, upper_y = sess.run( + [bijector.forward(lower_x), bijector.forward(upper_x)]) + if upper_y < lower_y: # If bijector.forward is a decreasing function. + lower_y, upper_y = upper_y, lower_y + + # Uniform samples from the domain, range. + uniform_x_samps = uniform_lib.Uniform( + low=lower_x, high=upper_x).sample(n, seed=0) + uniform_y_samps = uniform_lib.Uniform( + low=lower_y, high=upper_y).sample(n, seed=1) + + # These compositions should be the identity. + inverse_forward_x = bijector.inverse(bijector.forward(uniform_x_samps)) + forward_inverse_y = bijector.forward(bijector.inverse(uniform_y_samps)) + + # For a < b, and transformation y = y(x), + # (b - a) = \int_a^b dx = \int_{y(a)}^{y(b)} |dx/dy| dy + # "change_measure_dy_dx" below is a Monte Carlo approximation to the right + # hand side, which should then be close to the left, which is (b - a). + # We assume event_ndims=0 because we assume scalar -> scalar. The log_det + # methods will handle whether they expect event_ndims > 0. + dy_dx = math_ops.exp(bijector.inverse_log_det_jacobian( + uniform_y_samps, event_ndims=0)) + # E[|dx/dy|] under Uniform[lower_y, upper_y] + # = \int_{y(a)}^{y(b)} |dx/dy| dP(u), where dP(u) is the uniform measure + expectation_of_dy_dx_under_uniform = math_ops.reduce_mean(dy_dx) + # dy = dP(u) * (upper_y - lower_y) + change_measure_dy_dx = ( + (upper_y - lower_y) * expectation_of_dy_dx_under_uniform) + + # We'll also check that dy_dx = 1 / dx_dy. + dx_dy = math_ops.exp( + bijector.forward_log_det_jacobian( + bijector.inverse(uniform_y_samps), event_ndims=0)) + + [ + forward_on_10_pts_v, + dy_dx_v, + dx_dy_v, + change_measure_dy_dx_v, + uniform_x_samps_v, + uniform_y_samps_v, + inverse_forward_x_v, + forward_inverse_y_v, + ] = sess.run([ + forward_on_10_pts, + dy_dx, + dx_dy, + change_measure_dy_dx, + uniform_x_samps, + uniform_y_samps, + inverse_forward_x, + forward_inverse_y, + ]) + + assert_strictly_monotonic(forward_on_10_pts_v) + # Composition of forward/inverse should be the identity. + np.testing.assert_allclose( + inverse_forward_x_v, uniform_x_samps_v, atol=1e-5, rtol=1e-3) + np.testing.assert_allclose( + forward_inverse_y_v, uniform_y_samps_v, atol=1e-5, rtol=1e-3) + # Change of measure should be correct. + np.testing.assert_allclose( + upper_x - lower_x, change_measure_dy_dx_v, atol=0, rtol=rtol) + # Inverse Jacobian should be equivalent to the reciprocal of the forward + # Jacobian. + np.testing.assert_allclose( + dy_dx_v, np.divide(1., dx_dy_v), atol=1e-5, rtol=1e-3) + + +def assert_bijective_and_finite( + bijector, x, y, event_ndims, atol=0, rtol=1e-5, sess=None): + """Assert that forward/inverse (along with jacobians) are inverses and finite. + + It is recommended to use x and y values that are very very close to the edge + of the Bijector's domain. + + Args: + bijector: A Bijector instance. + x: np.array of values in the domain of bijector.forward. + y: np.array of values in the domain of bijector.inverse. + event_ndims: Integer describing the number of event dimensions this bijector + operates on. + atol: Absolute tolerance. + rtol: Relative tolerance. + sess: TensorFlow session. Defaults to the default session. + + Raises: + AssertionError: If tests fail. + """ + sess = sess or ops.get_default_session() + + # These are the incoming points, but people often create a crazy range of + # values for which these end up being bad, especially in 16bit. + assert_finite(x) + assert_finite(y) + + f_x = bijector.forward(x) + g_y = bijector.inverse(y) + + [ + x_from_x, + y_from_y, + ildj_f_x, + fldj_x, + ildj_y, + fldj_g_y, + f_x_v, + g_y_v, + ] = sess.run([ + bijector.inverse(f_x), + bijector.forward(g_y), + bijector.inverse_log_det_jacobian(f_x, event_ndims=event_ndims), + bijector.forward_log_det_jacobian(x, event_ndims=event_ndims), + bijector.inverse_log_det_jacobian(y, event_ndims=event_ndims), + bijector.forward_log_det_jacobian(g_y, event_ndims=event_ndims), + f_x, + g_y, + ]) + + assert_finite(x_from_x) + assert_finite(y_from_y) + assert_finite(ildj_f_x) + assert_finite(fldj_x) + assert_finite(ildj_y) + assert_finite(fldj_g_y) + assert_finite(f_x_v) + assert_finite(g_y_v) + + np.testing.assert_allclose(x_from_x, x, atol=atol, rtol=rtol) + np.testing.assert_allclose(y_from_y, y, atol=atol, rtol=rtol) + np.testing.assert_allclose(-ildj_f_x, fldj_x, atol=atol, rtol=rtol) + np.testing.assert_allclose(-ildj_y, fldj_g_y, atol=atol, rtol=rtol) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/categorical.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/categorical.py new file mode 100644 index 0000000000000000000000000000000000000000..2054271c71ce81200674a005841d0fb5bc7dd789 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/categorical.py @@ -0,0 +1,345 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The Categorical distribution class.""" + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn_ops +from tensorflow.python.ops import random_ops +from tensorflow.python.ops.distributions import distribution +from tensorflow.python.ops.distributions import kullback_leibler +from tensorflow.python.ops.distributions import util as distribution_util +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +def _broadcast_cat_event_and_params(event, params, base_dtype): + """Broadcasts the event or distribution parameters.""" + if event.dtype.is_integer: + pass + elif event.dtype.is_floating: + # When `validate_args=True` we've already ensured int/float casting + # is closed. + event = math_ops.cast(event, dtype=dtypes.int32) + else: + raise TypeError("`value` should have integer `dtype` or " + "`self.dtype` ({})".format(base_dtype)) + shape_known_statically = ( + params.shape.ndims is not None and + params.shape[:-1].is_fully_defined() and + event.shape.is_fully_defined()) + if not shape_known_statically or params.shape[:-1] != event.shape: + params *= array_ops.ones_like(event[..., array_ops.newaxis], + dtype=params.dtype) + params_shape = array_ops.shape(params)[:-1] + event *= array_ops.ones(params_shape, dtype=event.dtype) + if params.shape.ndims is not None: + event.set_shape(tensor_shape.TensorShape(params.shape[:-1])) + + return event, params + + +@tf_export(v1=["distributions.Categorical"]) +class Categorical(distribution.Distribution): + """Categorical distribution. + + The Categorical distribution is parameterized by either probabilities or + log-probabilities of a set of `K` classes. It is defined over the integers + `{0, 1, ..., K}`. + + The Categorical distribution is closely related to the `OneHotCategorical` and + `Multinomial` distributions. The Categorical distribution can be intuited as + generating samples according to `argmax{ OneHotCategorical(probs) }` itself + being identical to `argmax{ Multinomial(probs, total_count=1) }`. + + #### Mathematical Details + + The probability mass function (pmf) is, + + ```none + pmf(k; pi) = prod_j pi_j**[k == j] + ``` + + #### Pitfalls + + The number of classes, `K`, must not exceed: + - the largest integer representable by `self.dtype`, i.e., + `2**(mantissa_bits+1)` (IEEE 754), + - the maximum `Tensor` index, i.e., `2**31-1`. + + In other words, + + ```python + K <= min(2**31-1, { + tf.float16: 2**11, + tf.float32: 2**24, + tf.float64: 2**53 }[param.dtype]) + ``` + + Note: This condition is validated only when `self.validate_args = True`. + + #### Examples + + Creates a 3-class distribution with the 2nd class being most likely. + + ```python + dist = Categorical(probs=[0.1, 0.5, 0.4]) + n = 1e4 + empirical_prob = tf.cast( + tf.histogram_fixed_width( + dist.sample(int(n)), + [0., 2], + nbins=3), + dtype=tf.float32) / n + # ==> array([ 0.1005, 0.5037, 0.3958], dtype=float32) + ``` + + Creates a 3-class distribution with the 2nd class being most likely. + Parameterized by [logits](https://en.wikipedia.org/wiki/Logit) rather than + probabilities. + + ```python + dist = Categorical(logits=np.log([0.1, 0.5, 0.4]) + n = 1e4 + empirical_prob = tf.cast( + tf.histogram_fixed_width( + dist.sample(int(n)), + [0., 2], + nbins=3), + dtype=tf.float32) / n + # ==> array([0.1045, 0.5047, 0.3908], dtype=float32) + ``` + + Creates a 3-class distribution with the 3rd class being most likely. + The distribution functions can be evaluated on counts. + + ```python + # counts is a scalar. + p = [0.1, 0.4, 0.5] + dist = Categorical(probs=p) + dist.prob(0) # Shape [] + + # p will be broadcast to [[0.1, 0.4, 0.5], [0.1, 0.4, 0.5]] to match counts. + counts = [1, 0] + dist.prob(counts) # Shape [2] + + # p will be broadcast to shape [3, 5, 7, 3] to match counts. + counts = [[...]] # Shape [5, 7, 3] + dist.prob(counts) # Shape [5, 7, 3] + ``` + + """ + + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__( + self, + logits=None, + probs=None, + dtype=dtypes.int32, + validate_args=False, + allow_nan_stats=True, + name="Categorical"): + """Initialize Categorical distributions using class log-probabilities. + + Args: + logits: An N-D `Tensor`, `N >= 1`, representing the log probabilities + of a set of Categorical distributions. The first `N - 1` dimensions + index into a batch of independent distributions and the last dimension + represents a vector of logits for each class. Only one of `logits` or + `probs` should be passed in. + probs: An N-D `Tensor`, `N >= 1`, representing the probabilities + of a set of Categorical distributions. The first `N - 1` dimensions + index into a batch of independent distributions and the last dimension + represents a vector of probabilities for each class. Only one of + `logits` or `probs` should be passed in. + dtype: The type of the event samples (default: int32). + validate_args: Python `bool`, default `False`. When `True` distribution + parameters are checked for validity despite possibly degrading runtime + performance. When `False` invalid inputs may silently render incorrect + outputs. + allow_nan_stats: Python `bool`, default `True`. When `True`, statistics + (e.g., mean, mode, variance) use the value "`NaN`" to indicate the + result is undefined. When `False`, an exception is raised if one or + more of the statistic's batch members are undefined. + name: Python `str` name prefixed to Ops created by this class. + """ + parameters = dict(locals()) + with ops.name_scope(name, values=[logits, probs]) as name: + self._logits, self._probs = distribution_util.get_logits_and_probs( + logits=logits, + probs=probs, + validate_args=validate_args, + multidimensional=True, + name=name) + + if validate_args: + self._logits = distribution_util.embed_check_categorical_event_shape( + self._logits) + + logits_shape_static = self._logits.get_shape().with_rank_at_least(1) + if logits_shape_static.ndims is not None: + self._batch_rank = ops.convert_to_tensor( + logits_shape_static.ndims - 1, + dtype=dtypes.int32, + name="batch_rank") + else: + with ops.name_scope(name="batch_rank"): + self._batch_rank = array_ops.rank(self._logits) - 1 + + logits_shape = array_ops.shape(self._logits, name="logits_shape") + if tensor_shape.dimension_value(logits_shape_static[-1]) is not None: + self._event_size = ops.convert_to_tensor( + logits_shape_static.dims[-1].value, + dtype=dtypes.int32, + name="event_size") + else: + with ops.name_scope(name="event_size"): + self._event_size = logits_shape[self._batch_rank] + + if logits_shape_static[:-1].is_fully_defined(): + self._batch_shape_val = constant_op.constant( + logits_shape_static[:-1].as_list(), + dtype=dtypes.int32, + name="batch_shape") + else: + with ops.name_scope(name="batch_shape"): + self._batch_shape_val = logits_shape[:-1] + super(Categorical, self).__init__( + dtype=dtype, + reparameterization_type=distribution.NOT_REPARAMETERIZED, + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + parameters=parameters, + graph_parents=[self._logits, + self._probs], + name=name) + + @property + def event_size(self): + """Scalar `int32` tensor: the number of classes.""" + return self._event_size + + @property + def logits(self): + """Vector of coordinatewise logits.""" + return self._logits + + @property + def probs(self): + """Vector of coordinatewise probabilities.""" + return self._probs + + def _batch_shape_tensor(self): + return array_ops.identity(self._batch_shape_val) + + def _batch_shape(self): + return self.logits.get_shape()[:-1] + + def _event_shape_tensor(self): + return constant_op.constant([], dtype=dtypes.int32) + + def _event_shape(self): + return tensor_shape.TensorShape([]) + + def _sample_n(self, n, seed=None): + if self.logits.get_shape().ndims == 2: + logits_2d = self.logits + else: + logits_2d = array_ops.reshape(self.logits, [-1, self.event_size]) + sample_dtype = dtypes.int64 if self.dtype.size > 4 else dtypes.int32 + draws = random_ops.multinomial( + logits_2d, n, seed=seed, output_dtype=sample_dtype) + draws = array_ops.reshape( + array_ops.transpose(draws), + array_ops.concat([[n], self.batch_shape_tensor()], 0)) + return math_ops.cast(draws, self.dtype) + + def _cdf(self, k): + k = ops.convert_to_tensor(k, name="k") + if self.validate_args: + k = distribution_util.embed_check_integer_casting_closed( + k, target_dtype=dtypes.int32) + + k, probs = _broadcast_cat_event_and_params( + k, self.probs, base_dtype=self.dtype.base_dtype) + + # batch-flatten everything in order to use `sequence_mask()`. + batch_flattened_probs = array_ops.reshape(probs, + (-1, self._event_size)) + batch_flattened_k = array_ops.reshape(k, [-1]) + + to_sum_over = array_ops.where( + array_ops.sequence_mask(batch_flattened_k, self._event_size), + batch_flattened_probs, + array_ops.zeros_like(batch_flattened_probs)) + batch_flattened_cdf = math_ops.reduce_sum(to_sum_over, axis=-1) + # Reshape back to the shape of the argument. + return array_ops.reshape(batch_flattened_cdf, array_ops.shape(k)) + + def _log_prob(self, k): + k = ops.convert_to_tensor(k, name="k") + if self.validate_args: + k = distribution_util.embed_check_integer_casting_closed( + k, target_dtype=dtypes.int32) + k, logits = _broadcast_cat_event_and_params( + k, self.logits, base_dtype=self.dtype.base_dtype) + + # pylint: disable=invalid-unary-operand-type + return -nn_ops.sparse_softmax_cross_entropy_with_logits( + labels=k, + logits=logits) + + def _entropy(self): + return -math_ops.reduce_sum( + nn_ops.log_softmax(self.logits) * self.probs, axis=-1) + + def _mode(self): + ret = math_ops.argmax(self.logits, axis=self._batch_rank) + ret = math_ops.cast(ret, self.dtype) + ret.set_shape(self.batch_shape) + return ret + + +@kullback_leibler.RegisterKL(Categorical, Categorical) +def _kl_categorical_categorical(a, b, name=None): + """Calculate the batched KL divergence KL(a || b) with a and b Categorical. + + Args: + a: instance of a Categorical distribution object. + b: instance of a Categorical distribution object. + name: (optional) Name to use for created operations. + default is "kl_categorical_categorical". + + Returns: + Batchwise KL(a || b) + """ + with ops.name_scope(name, "kl_categorical_categorical", + values=[a.logits, b.logits]): + # sum(probs log(probs / (1 - probs))) + delta_log_probs1 = (nn_ops.log_softmax(a.logits) - + nn_ops.log_softmax(b.logits)) + return math_ops.reduce_sum(nn_ops.softmax(a.logits) * delta_log_probs1, + axis=-1) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/dirichlet.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/dirichlet.py new file mode 100644 index 0000000000000000000000000000000000000000..cac99f8e0c071fd539fca7d22bd118c83a9ad5d2 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/dirichlet.py @@ -0,0 +1,410 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The Dirichlet distribution class.""" + +import numpy as np + +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import random_ops +from tensorflow.python.ops import special_math_ops +from tensorflow.python.ops.distributions import distribution +from tensorflow.python.ops.distributions import kullback_leibler +from tensorflow.python.ops.distributions import util as distribution_util +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +__all__ = [ + "Dirichlet", +] + + +_dirichlet_sample_note = """Note: `value` must be a non-negative tensor with +dtype `self.dtype` and be in the `(self.event_shape() - 1)`-simplex, i.e., +`tf.reduce_sum(value, -1) = 1`. It must have a shape compatible with +`self.batch_shape() + self.event_shape()`.""" + + +@tf_export(v1=["distributions.Dirichlet"]) +class Dirichlet(distribution.Distribution): + """Dirichlet distribution. + + The Dirichlet distribution is defined over the + [`(k-1)`-simplex](https://en.wikipedia.org/wiki/Simplex) using a positive, + length-`k` vector `concentration` (`k > 1`). The Dirichlet is identically the + Beta distribution when `k = 2`. + + #### Mathematical Details + + The Dirichlet is a distribution over the open `(k-1)`-simplex, i.e., + + ```none + S^{k-1} = { (x_0, ..., x_{k-1}) in R^k : sum_j x_j = 1 and all_j x_j > 0 }. + ``` + + The probability density function (pdf) is, + + ```none + pdf(x; alpha) = prod_j x_j**(alpha_j - 1) / Z + Z = prod_j Gamma(alpha_j) / Gamma(sum_j alpha_j) + ``` + + where: + + * `x in S^{k-1}`, i.e., the `(k-1)`-simplex, + * `concentration = alpha = [alpha_0, ..., alpha_{k-1}]`, `alpha_j > 0`, + * `Z` is the normalization constant aka the [multivariate beta function]( + https://en.wikipedia.org/wiki/Beta_function#Multivariate_beta_function), + and, + * `Gamma` is the [gamma function]( + https://en.wikipedia.org/wiki/Gamma_function). + + The `concentration` represents mean total counts of class occurrence, i.e., + + ```none + concentration = alpha = mean * total_concentration + ``` + + where `mean` in `S^{k-1}` and `total_concentration` is a positive real number + representing a mean total count. + + Distribution parameters are automatically broadcast in all functions; see + examples for details. + + Warning: Some components of the samples can be zero due to finite precision. + This happens more often when some of the concentrations are very small. + Make sure to round the samples to `np.finfo(dtype).tiny` before computing the + density. + + Samples of this distribution are reparameterized (pathwise differentiable). + The derivatives are computed using the approach described in + (Figurnov et al., 2018). + + #### Examples + + ```python + import tensorflow_probability as tfp + tfd = tfp.distributions + + # Create a single trivariate Dirichlet, with the 3rd class being three times + # more frequent than the first. I.e., batch_shape=[], event_shape=[3]. + alpha = [1., 2, 3] + dist = tfd.Dirichlet(alpha) + + dist.sample([4, 5]) # shape: [4, 5, 3] + + # x has one sample, one batch, three classes: + x = [.2, .3, .5] # shape: [3] + dist.prob(x) # shape: [] + + # x has two samples from one batch: + x = [[.1, .4, .5], + [.2, .3, .5]] + dist.prob(x) # shape: [2] + + # alpha will be broadcast to shape [5, 7, 3] to match x. + x = [[...]] # shape: [5, 7, 3] + dist.prob(x) # shape: [5, 7] + ``` + + ```python + # Create batch_shape=[2], event_shape=[3]: + alpha = [[1., 2, 3], + [4, 5, 6]] # shape: [2, 3] + dist = tfd.Dirichlet(alpha) + + dist.sample([4, 5]) # shape: [4, 5, 2, 3] + + x = [.2, .3, .5] + # x will be broadcast as [[.2, .3, .5], + # [.2, .3, .5]], + # thus matching batch_shape [2, 3]. + dist.prob(x) # shape: [2] + ``` + + Compute the gradients of samples w.r.t. the parameters: + + ```python + alpha = tf.constant([1.0, 2.0, 3.0]) + dist = tfd.Dirichlet(alpha) + samples = dist.sample(5) # Shape [5, 3] + loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function + # Unbiased stochastic gradients of the loss function + grads = tf.gradients(loss, alpha) + ``` + + References: + Implicit Reparameterization Gradients: + [Figurnov et al., 2018] + (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients) + ([pdf] + (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients.pdf)) + """ + + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__(self, + concentration, + validate_args=False, + allow_nan_stats=True, + name="Dirichlet"): + """Initialize a batch of Dirichlet distributions. + + Args: + concentration: Positive floating-point `Tensor` indicating mean number + of class occurrences; aka "alpha". Implies `self.dtype`, and + `self.batch_shape`, `self.event_shape`, i.e., if + `concentration.shape = [N1, N2, ..., Nm, k]` then + `batch_shape = [N1, N2, ..., Nm]` and + `event_shape = [k]`. + validate_args: Python `bool`, default `False`. When `True` distribution + parameters are checked for validity despite possibly degrading runtime + performance. When `False` invalid inputs may silently render incorrect + outputs. + allow_nan_stats: Python `bool`, default `True`. When `True`, statistics + (e.g., mean, mode, variance) use the value "`NaN`" to indicate the + result is undefined. When `False`, an exception is raised if one or + more of the statistic's batch members are undefined. + name: Python `str` name prefixed to Ops created by this class. + """ + parameters = dict(locals()) + with ops.name_scope(name, values=[concentration]) as name: + self._concentration = self._maybe_assert_valid_concentration( + ops.convert_to_tensor(concentration, name="concentration"), + validate_args) + self._total_concentration = math_ops.reduce_sum(self._concentration, -1) + super(Dirichlet, self).__init__( + dtype=self._concentration.dtype, + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + reparameterization_type=distribution.FULLY_REPARAMETERIZED, + parameters=parameters, + graph_parents=[self._concentration, + self._total_concentration], + name=name) + + @property + def concentration(self): + """Concentration parameter; expected counts for that coordinate.""" + return self._concentration + + @property + def total_concentration(self): + """Sum of last dim of concentration parameter.""" + return self._total_concentration + + def _batch_shape_tensor(self): + return array_ops.shape(self.total_concentration) + + def _batch_shape(self): + return self.total_concentration.get_shape() + + def _event_shape_tensor(self): + return array_ops.shape(self.concentration)[-1:] + + def _event_shape(self): + return self.concentration.get_shape().with_rank_at_least(1)[-1:] + + def _sample_n(self, n, seed=None): + gamma_sample = random_ops.random_gamma( + shape=[n], + alpha=self.concentration, + dtype=self.dtype, + seed=seed) + return gamma_sample / math_ops.reduce_sum(gamma_sample, -1, keepdims=True) + + @distribution_util.AppendDocstring(_dirichlet_sample_note) + def _log_prob(self, x): + return self._log_unnormalized_prob(x) - self._log_normalization() + + @distribution_util.AppendDocstring(_dirichlet_sample_note) + def _prob(self, x): + return math_ops.exp(self._log_prob(x)) + + def _log_unnormalized_prob(self, x): + x = self._maybe_assert_valid_sample(x) + return math_ops.reduce_sum(math_ops.xlogy(self.concentration - 1., x), -1) + + def _log_normalization(self): + return special_math_ops.lbeta(self.concentration) + + def _entropy(self): + k = math_ops.cast(self.event_shape_tensor()[0], self.dtype) + return ( + self._log_normalization() + + ((self.total_concentration - k) + * math_ops.digamma(self.total_concentration)) + - math_ops.reduce_sum( + (self.concentration - 1.) * math_ops.digamma(self.concentration), + axis=-1)) + + def _mean(self): + return self.concentration / self.total_concentration[..., array_ops.newaxis] + + def _covariance(self): + x = self._variance_scale_term() * self._mean() + # pylint: disable=invalid-unary-operand-type + return array_ops.matrix_set_diag( + -math_ops.matmul( + x[..., array_ops.newaxis], + x[..., array_ops.newaxis, :]), # outer prod + self._variance()) + + def _variance(self): + scale = self._variance_scale_term() + x = scale * self._mean() + return x * (scale - x) + + def _variance_scale_term(self): + """Helper to `_covariance` and `_variance` which computes a shared scale.""" + return math_ops.rsqrt(1. + self.total_concentration[..., array_ops.newaxis]) + + @distribution_util.AppendDocstring( + """Note: The mode is undefined when any `concentration <= 1`. If + `self.allow_nan_stats` is `True`, `NaN` is used for undefined modes. If + `self.allow_nan_stats` is `False` an exception is raised when one or more + modes are undefined.""") + def _mode(self): + k = math_ops.cast(self.event_shape_tensor()[0], self.dtype) + mode = (self.concentration - 1.) / ( + self.total_concentration[..., array_ops.newaxis] - k) + if self.allow_nan_stats: + nan = array_ops.fill( + array_ops.shape(mode), + np.array(np.nan, dtype=self.dtype.as_numpy_dtype()), + name="nan") + return array_ops.where_v2( + math_ops.reduce_all(self.concentration > 1., axis=-1), mode, nan) + return control_flow_ops.with_dependencies([ + check_ops.assert_less( + array_ops.ones([], self.dtype), + self.concentration, + message="Mode undefined when any concentration <= 1"), + ], mode) + + def _maybe_assert_valid_concentration(self, concentration, validate_args): + """Checks the validity of the concentration parameter.""" + if not validate_args: + return concentration + return control_flow_ops.with_dependencies([ + check_ops.assert_positive( + concentration, + message="Concentration parameter must be positive."), + check_ops.assert_rank_at_least( + concentration, 1, + message="Concentration parameter must have >=1 dimensions."), + check_ops.assert_less( + 1, array_ops.shape(concentration)[-1], + message="Concentration parameter must have event_size >= 2."), + ], concentration) + + def _maybe_assert_valid_sample(self, x): + """Checks the validity of a sample.""" + if not self.validate_args: + return x + return control_flow_ops.with_dependencies([ + check_ops.assert_positive(x, message="samples must be positive"), + check_ops.assert_near( + array_ops.ones([], dtype=self.dtype), + math_ops.reduce_sum(x, -1), + message="sample last-dimension must sum to `1`"), + ], x) + + +@kullback_leibler.RegisterKL(Dirichlet, Dirichlet) +def _kl_dirichlet_dirichlet(d1, d2, name=None): + """Batchwise KL divergence KL(d1 || d2) with d1 and d2 Dirichlet. + + Args: + d1: instance of a Dirichlet distribution object. + d2: instance of a Dirichlet distribution object. + name: (optional) Name to use for created operations. + default is "kl_dirichlet_dirichlet". + + Returns: + Batchwise KL(d1 || d2) + """ + with ops.name_scope(name, "kl_dirichlet_dirichlet", values=[ + d1.concentration, d2.concentration]): + # The KL between Dirichlet distributions can be derived as follows. We have + # + # Dir(x; a) = 1 / B(a) * prod_i[x[i]^(a[i] - 1)] + # + # where B(a) is the multivariate Beta function: + # + # B(a) = Gamma(a[1]) * ... * Gamma(a[n]) / Gamma(a[1] + ... + a[n]) + # + # The KL is + # + # KL(Dir(x; a), Dir(x; b)) = E_Dir(x; a){log(Dir(x; a) / Dir(x; b))} + # + # so we'll need to know the log density of the Dirichlet. This is + # + # log(Dir(x; a)) = sum_i[(a[i] - 1) log(x[i])] - log B(a) + # + # The only term that matters for the expectations is the log(x[i]). To + # compute the expectation of this term over the Dirichlet density, we can + # use the following facts about the Dirichlet in exponential family form: + # 1. log(x[i]) is a sufficient statistic + # 2. expected sufficient statistics (of any exp family distribution) are + # equal to derivatives of the log normalizer with respect to + # corresponding natural parameters: E{T[i](x)} = dA/d(eta[i]) + # + # To proceed, we can rewrite the Dirichlet density in exponential family + # form as follows: + # + # Dir(x; a) = exp{eta(a) . T(x) - A(a)} + # + # where '.' is the dot product of vectors eta and T, and A is a scalar: + # + # eta[i](a) = a[i] - 1 + # T[i](x) = log(x[i]) + # A(a) = log B(a) + # + # Now, we can use fact (2) above to write + # + # E_Dir(x; a)[log(x[i])] + # = dA(a) / da[i] + # = d/da[i] log B(a) + # = d/da[i] (sum_j lgamma(a[j])) - lgamma(sum_j a[j]) + # = digamma(a[i])) - digamma(sum_j a[j]) + # + # Putting it all together, we have + # + # KL[Dir(x; a) || Dir(x; b)] + # = E_Dir(x; a){log(Dir(x; a) / Dir(x; b)} + # = E_Dir(x; a){sum_i[(a[i] - b[i]) log(x[i])} - (lbeta(a) - lbeta(b)) + # = sum_i[(a[i] - b[i]) * E_Dir(x; a){log(x[i])}] - lbeta(a) + lbeta(b) + # = sum_i[(a[i] - b[i]) * (digamma(a[i]) - digamma(sum_j a[j]))] + # - lbeta(a) + lbeta(b)) + + digamma_sum_d1 = math_ops.digamma( + math_ops.reduce_sum(d1.concentration, axis=-1, keepdims=True)) + digamma_diff = math_ops.digamma(d1.concentration) - digamma_sum_d1 + concentration_diff = d1.concentration - d2.concentration + + return (math_ops.reduce_sum(concentration_diff * digamma_diff, axis=-1) - + special_math_ops.lbeta(d1.concentration) + + special_math_ops.lbeta(d2.concentration)) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/dirichlet_multinomial.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/dirichlet_multinomial.py new file mode 100644 index 0000000000000000000000000000000000000000..947801cf1bf66dc0b24adbfa706fc3b7d3db0e17 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/dirichlet_multinomial.py @@ -0,0 +1,353 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The DirichletMultinomial distribution class.""" + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import random_ops +from tensorflow.python.ops import special_math_ops +from tensorflow.python.ops.distributions import distribution +from tensorflow.python.ops.distributions import util as distribution_util +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +__all__ = [ + "DirichletMultinomial", +] + + +_dirichlet_multinomial_sample_note = """For each batch of counts, +`value = [n_0, ..., n_{K-1}]`, `P[value]` is the probability that after +sampling `self.total_count` draws from this Dirichlet-Multinomial distribution, +the number of draws falling in class `j` is `n_j`. Since this definition is +[exchangeable](https://en.wikipedia.org/wiki/Exchangeable_random_variables); +different sequences have the same counts so the probability includes a +combinatorial coefficient. + +Note: `value` must be a non-negative tensor with dtype `self.dtype`, have no +fractional components, and such that +`tf.reduce_sum(value, -1) = self.total_count`. Its shape must be broadcastable +with `self.concentration` and `self.total_count`.""" + + +@tf_export(v1=["distributions.DirichletMultinomial"]) +class DirichletMultinomial(distribution.Distribution): + """Dirichlet-Multinomial compound distribution. + + The Dirichlet-Multinomial distribution is parameterized by a (batch of) + length-`K` `concentration` vectors (`K > 1`) and a `total_count` number of + trials, i.e., the number of trials per draw from the DirichletMultinomial. It + is defined over a (batch of) length-`K` vector `counts` such that + `tf.reduce_sum(counts, -1) = total_count`. The Dirichlet-Multinomial is + identically the Beta-Binomial distribution when `K = 2`. + + #### Mathematical Details + + The Dirichlet-Multinomial is a distribution over `K`-class counts, i.e., a + length-`K` vector of non-negative integer `counts = n = [n_0, ..., n_{K-1}]`. + + The probability mass function (pmf) is, + + ```none + pmf(n; alpha, N) = Beta(alpha + n) / (prod_j n_j!) / Z + Z = Beta(alpha) / N! + ``` + + where: + + * `concentration = alpha = [alpha_0, ..., alpha_{K-1}]`, `alpha_j > 0`, + * `total_count = N`, `N` a positive integer, + * `N!` is `N` factorial, and, + * `Beta(x) = prod_j Gamma(x_j) / Gamma(sum_j x_j)` is the + [multivariate beta function]( + https://en.wikipedia.org/wiki/Beta_function#Multivariate_beta_function), + and, + * `Gamma` is the [gamma function]( + https://en.wikipedia.org/wiki/Gamma_function). + + Dirichlet-Multinomial is a [compound distribution]( + https://en.wikipedia.org/wiki/Compound_probability_distribution), i.e., its + samples are generated as follows. + + 1. Choose class probabilities: + `probs = [p_0,...,p_{K-1}] ~ Dir(concentration)` + 2. Draw integers: + `counts = [n_0,...,n_{K-1}] ~ Multinomial(total_count, probs)` + + The last `concentration` dimension parametrizes a single Dirichlet-Multinomial + distribution. When calling distribution functions (e.g., `dist.prob(counts)`), + `concentration`, `total_count` and `counts` are broadcast to the same shape. + The last dimension of `counts` corresponds single Dirichlet-Multinomial + distributions. + + Distribution parameters are automatically broadcast in all functions; see + examples for details. + + #### Pitfalls + + The number of classes, `K`, must not exceed: + - the largest integer representable by `self.dtype`, i.e., + `2**(mantissa_bits+1)` (IEE754), + - the maximum `Tensor` index, i.e., `2**31-1`. + + In other words, + + ```python + K <= min(2**31-1, { + tf.float16: 2**11, + tf.float32: 2**24, + tf.float64: 2**53 }[param.dtype]) + ``` + + Note: This condition is validated only when `self.validate_args = True`. + + #### Examples + + ```python + alpha = [1., 2., 3.] + n = 2. + dist = DirichletMultinomial(n, alpha) + ``` + + Creates a 3-class distribution, with the 3rd class is most likely to be + drawn. + The distribution functions can be evaluated on counts. + + ```python + # counts same shape as alpha. + counts = [0., 0., 2.] + dist.prob(counts) # Shape [] + + # alpha will be broadcast to [[1., 2., 3.], [1., 2., 3.]] to match counts. + counts = [[1., 1., 0.], [1., 0., 1.]] + dist.prob(counts) # Shape [2] + + # alpha will be broadcast to shape [5, 7, 3] to match counts. + counts = [[...]] # Shape [5, 7, 3] + dist.prob(counts) # Shape [5, 7] + ``` + + Creates a 2-batch of 3-class distributions. + + ```python + alpha = [[1., 2., 3.], [4., 5., 6.]] # Shape [2, 3] + n = [3., 3.] + dist = DirichletMultinomial(n, alpha) + + # counts will be broadcast to [[2., 1., 0.], [2., 1., 0.]] to match alpha. + counts = [2., 1., 0.] + dist.prob(counts) # Shape [2] + ``` + + """ + + # TODO(b/27419586) Change docstring for dtype of concentration once int + # allowed. + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__(self, + total_count, + concentration, + validate_args=False, + allow_nan_stats=True, + name="DirichletMultinomial"): + """Initialize a batch of DirichletMultinomial distributions. + + Args: + total_count: Non-negative floating point tensor, whose dtype is the same + as `concentration`. The shape is broadcastable to `[N1,..., Nm]` with + `m >= 0`. Defines this as a batch of `N1 x ... x Nm` different + Dirichlet multinomial distributions. Its components should be equal to + integer values. + concentration: Positive floating point tensor, whose dtype is the + same as `n` with shape broadcastable to `[N1,..., Nm, K]` `m >= 0`. + Defines this as a batch of `N1 x ... x Nm` different `K` class Dirichlet + multinomial distributions. + validate_args: Python `bool`, default `False`. When `True` distribution + parameters are checked for validity despite possibly degrading runtime + performance. When `False` invalid inputs may silently render incorrect + outputs. + allow_nan_stats: Python `bool`, default `True`. When `True`, statistics + (e.g., mean, mode, variance) use the value "`NaN`" to indicate the + result is undefined. When `False`, an exception is raised if one or + more of the statistic's batch members are undefined. + name: Python `str` name prefixed to Ops created by this class. + """ + parameters = dict(locals()) + with ops.name_scope(name, values=[total_count, concentration]) as name: + # Broadcasting works because: + # * The broadcasting convention is to prepend dimensions of size [1], and + # we use the last dimension for the distribution, whereas + # the batch dimensions are the leading dimensions, which forces the + # distribution dimension to be defined explicitly (i.e. it cannot be + # created automatically by prepending). This forces enough explicitness. + # * All calls involving `counts` eventually require a broadcast between + # `counts` and concentration. + self._total_count = ops.convert_to_tensor(total_count, name="total_count") + if validate_args: + self._total_count = ( + distribution_util.embed_check_nonnegative_integer_form( + self._total_count)) + self._concentration = self._maybe_assert_valid_concentration( + ops.convert_to_tensor(concentration, + name="concentration"), + validate_args) + self._total_concentration = math_ops.reduce_sum(self._concentration, -1) + super(DirichletMultinomial, self).__init__( + dtype=self._concentration.dtype, + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + reparameterization_type=distribution.NOT_REPARAMETERIZED, + parameters=parameters, + graph_parents=[self._total_count, + self._concentration], + name=name) + + @property + def total_count(self): + """Number of trials used to construct a sample.""" + return self._total_count + + @property + def concentration(self): + """Concentration parameter; expected prior counts for that coordinate.""" + return self._concentration + + @property + def total_concentration(self): + """Sum of last dim of concentration parameter.""" + return self._total_concentration + + def _batch_shape_tensor(self): + return array_ops.shape(self.total_concentration) + + def _batch_shape(self): + return self.total_concentration.get_shape() + + def _event_shape_tensor(self): + return array_ops.shape(self.concentration)[-1:] + + def _event_shape(self): + # Event shape depends only on total_concentration, not "n". + return self.concentration.get_shape().with_rank_at_least(1)[-1:] + + def _sample_n(self, n, seed=None): + n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32) + k = self.event_shape_tensor()[0] + unnormalized_logits = array_ops.reshape( + math_ops.log(random_ops.random_gamma( + shape=[n], + alpha=self.concentration, + dtype=self.dtype, + seed=seed)), + shape=[-1, k]) + draws = random_ops.multinomial( + logits=unnormalized_logits, + num_samples=n_draws, + seed=distribution_util.gen_new_seed(seed, salt="dirichlet_multinomial")) + x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k), -2) + final_shape = array_ops.concat([[n], self.batch_shape_tensor(), [k]], 0) + x = array_ops.reshape(x, final_shape) + return math_ops.cast(x, self.dtype) + + @distribution_util.AppendDocstring(_dirichlet_multinomial_sample_note) + def _log_prob(self, counts): + counts = self._maybe_assert_valid_sample(counts) + ordered_prob = ( + special_math_ops.lbeta(self.concentration + counts) + - special_math_ops.lbeta(self.concentration)) + return ordered_prob + distribution_util.log_combinations( + self.total_count, counts) + + @distribution_util.AppendDocstring(_dirichlet_multinomial_sample_note) + def _prob(self, counts): + return math_ops.exp(self._log_prob(counts)) + + def _mean(self): + return self.total_count * (self.concentration / + self.total_concentration[..., array_ops.newaxis]) + + @distribution_util.AppendDocstring( + """The covariance for each batch member is defined as the following: + + ```none + Var(X_j) = n * alpha_j / alpha_0 * (1 - alpha_j / alpha_0) * + (n + alpha_0) / (1 + alpha_0) + ``` + + where `concentration = alpha` and + `total_concentration = alpha_0 = sum_j alpha_j`. + + The covariance between elements in a batch is defined as: + + ```none + Cov(X_i, X_j) = -n * alpha_i * alpha_j / alpha_0 ** 2 * + (n + alpha_0) / (1 + alpha_0) + ``` + """) + def _covariance(self): + x = self._variance_scale_term() * self._mean() + # pylint: disable=invalid-unary-operand-type + return array_ops.matrix_set_diag( + -math_ops.matmul( + x[..., array_ops.newaxis], + x[..., array_ops.newaxis, :]), # outer prod + self._variance()) + + def _variance(self): + scale = self._variance_scale_term() + x = scale * self._mean() + return x * (self.total_count * scale - x) + + def _variance_scale_term(self): + """Helper to `_covariance` and `_variance` which computes a shared scale.""" + # We must take care to expand back the last dim whenever we use the + # total_concentration. + c0 = self.total_concentration[..., array_ops.newaxis] + return math_ops.sqrt((1. + c0 / self.total_count) / (1. + c0)) + + def _maybe_assert_valid_concentration(self, concentration, validate_args): + """Checks the validity of the concentration parameter.""" + if not validate_args: + return concentration + concentration = distribution_util.embed_check_categorical_event_shape( + concentration) + return control_flow_ops.with_dependencies([ + check_ops.assert_positive( + concentration, + message="Concentration parameter must be positive."), + ], concentration) + + def _maybe_assert_valid_sample(self, counts): + """Check counts for proper shape, values, then return tensor version.""" + if not self.validate_args: + return counts + counts = distribution_util.embed_check_nonnegative_integer_form(counts) + return control_flow_ops.with_dependencies([ + check_ops.assert_equal( + self.total_count, math_ops.reduce_sum(counts, -1), + message="counts last-dimension must sum to `self.total_count`"), + ], counts) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/distribution.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/distribution.py new file mode 100644 index 0000000000000000000000000000000000000000..09d9e2a507d7ded2170735b72088c59ae15ab8fa --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/distribution.py @@ -0,0 +1,1316 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Base classes for probability distributions.""" + +import abc +import contextlib +import types + +import numpy as np + +from tensorflow.python.eager import context +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.distributions import kullback_leibler +from tensorflow.python.ops.distributions import util +from tensorflow.python.util import deprecation +from tensorflow.python.util import tf_inspect +from tensorflow.python.util.tf_export import tf_export + + +__all__ = [ + "ReparameterizationType", + "FULLY_REPARAMETERIZED", + "NOT_REPARAMETERIZED", + "Distribution", +] + +_DISTRIBUTION_PUBLIC_METHOD_WRAPPERS = [ + "batch_shape", + "batch_shape_tensor", + "cdf", + "covariance", + "cross_entropy", + "entropy", + "event_shape", + "event_shape_tensor", + "kl_divergence", + "log_cdf", + "log_prob", + "log_survival_function", + "mean", + "mode", + "prob", + "sample", + "stddev", + "survival_function", + "variance", +] + + +class _BaseDistribution(metaclass=abc.ABCMeta): + """Abstract base class needed for resolving subclass hierarchy.""" + pass + + +def _copy_fn(fn): + """Create a deep copy of fn. + + Args: + fn: a callable + + Returns: + A `FunctionType`: a deep copy of fn. + + Raises: + TypeError: if `fn` is not a callable. + """ + if not callable(fn): + raise TypeError("fn is not callable: %s" % fn) + # The blessed way to copy a function. copy.deepcopy fails to create a + # non-reference copy. Since: + # types.FunctionType == type(lambda: None), + # and the docstring for the function type states: + # + # function(code, globals[, name[, argdefs[, closure]]]) + # + # Create a function object from a code object and a dictionary. + # ... + # + # Here we can use this to create a new function with the old function's + # code, globals, closure, etc. + return types.FunctionType( + code=fn.__code__, globals=fn.__globals__, + name=fn.__name__, argdefs=fn.__defaults__, + closure=fn.__closure__) + + +def _update_docstring(old_str, append_str): + """Update old_str by inserting append_str just before the "Args:" section.""" + old_str = old_str or "" + old_str_lines = old_str.split("\n") + + # Step 0: Prepend spaces to all lines of append_str. This is + # necessary for correct markdown generation. + append_str = "\n".join(" %s" % line for line in append_str.split("\n")) + + # Step 1: Find mention of "Args": + has_args_ix = [ + ix for ix, line in enumerate(old_str_lines) + if line.strip().lower() == "args:"] + if has_args_ix: + final_args_ix = has_args_ix[-1] + return ("\n".join(old_str_lines[:final_args_ix]) + + "\n\n" + append_str + "\n\n" + + "\n".join(old_str_lines[final_args_ix:])) + else: + return old_str + "\n\n" + append_str + + +def _convert_to_tensor(value, name=None, preferred_dtype=None): + """Converts to tensor avoiding an eager bug that loses float precision.""" + # TODO(b/116672045): Remove this function. + if (context.executing_eagerly() and preferred_dtype is not None and + (preferred_dtype.is_integer or preferred_dtype.is_bool)): + v = ops.convert_to_tensor(value, name=name) + if v.dtype.is_floating: + return v + return ops.convert_to_tensor( + value, name=name, preferred_dtype=preferred_dtype) + + +class _DistributionMeta(abc.ABCMeta): + + def __new__(mcs, classname, baseclasses, attrs): + """Control the creation of subclasses of the Distribution class. + + The main purpose of this method is to properly propagate docstrings + from private Distribution methods, like `_log_prob`, into their + public wrappers as inherited by the Distribution base class + (e.g. `log_prob`). + + Args: + classname: The name of the subclass being created. + baseclasses: A tuple of parent classes. + attrs: A dict mapping new attributes to their values. + + Returns: + The class object. + + Raises: + TypeError: If `Distribution` is not a subclass of `BaseDistribution`, or + the new class is derived via multiple inheritance and the first + parent class is not a subclass of `BaseDistribution`. + AttributeError: If `Distribution` does not implement e.g. `log_prob`. + ValueError: If a `Distribution` public method lacks a docstring. + """ + if not baseclasses: # Nothing to be done for Distribution + raise TypeError("Expected non-empty baseclass. Does Distribution " + "not subclass _BaseDistribution?") + which_base = [ + base for base in baseclasses + if base == _BaseDistribution or issubclass(base, Distribution)] + base = which_base[0] + if base == _BaseDistribution: # Nothing to be done for Distribution + return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs) + if not issubclass(base, Distribution): + raise TypeError("First parent class declared for %s must be " + "Distribution, but saw '%s'" % (classname, base.__name__)) + for attr in _DISTRIBUTION_PUBLIC_METHOD_WRAPPERS: + special_attr = "_%s" % attr + class_attr_value = attrs.get(attr, None) + if attr in attrs: + # The method is being overridden, do not update its docstring + continue + base_attr_value = getattr(base, attr, None) + if not base_attr_value: + raise AttributeError( + "Internal error: expected base class '%s' to implement method '%s'" + % (base.__name__, attr)) + class_special_attr_value = attrs.get(special_attr, None) + if class_special_attr_value is None: + # No _special method available, no need to update the docstring. + continue + class_special_attr_docstring = tf_inspect.getdoc(class_special_attr_value) + if not class_special_attr_docstring: + # No docstring to append. + continue + class_attr_value = _copy_fn(base_attr_value) + class_attr_docstring = tf_inspect.getdoc(base_attr_value) + if class_attr_docstring is None: + raise ValueError( + "Expected base class fn to contain a docstring: %s.%s" + % (base.__name__, attr)) + class_attr_value.__doc__ = _update_docstring( + class_attr_value.__doc__, + ("Additional documentation from `%s`:\n\n%s" + % (classname, class_special_attr_docstring))) + attrs[attr] = class_attr_value + + return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs) + + +@tf_export(v1=["distributions.ReparameterizationType"]) +class ReparameterizationType: + """Instances of this class represent how sampling is reparameterized. + + Two static instances exist in the distributions library, signifying + one of two possible properties for samples from a distribution: + + `FULLY_REPARAMETERIZED`: Samples from the distribution are fully + reparameterized, and straight-through gradients are supported. + + `NOT_REPARAMETERIZED`: Samples from the distribution are not fully + reparameterized, and straight-through gradients are either partially + unsupported or are not supported at all. In this case, for purposes of + e.g. RL or variational inference, it is generally safest to wrap the + sample results in a `stop_gradients` call and use policy + gradients / surrogate loss instead. + """ + + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__(self, rep_type): + self._rep_type = rep_type + + def __repr__(self): + return "" % self._rep_type + + def __eq__(self, other): + """Determine if this `ReparameterizationType` is equal to another. + + Since ReparameterizationType instances are constant static global + instances, equality checks if two instances' id() values are equal. + + Args: + other: Object to compare against. + + Returns: + `self is other`. + """ + return self is other + + +# Fully reparameterized distribution: samples from a fully +# reparameterized distribution support straight-through gradients with +# respect to all parameters. +FULLY_REPARAMETERIZED = ReparameterizationType("FULLY_REPARAMETERIZED") +tf_export(v1=["distributions.FULLY_REPARAMETERIZED"]).export_constant( + __name__, "FULLY_REPARAMETERIZED") + + +# Not reparameterized distribution: samples from a non- +# reparameterized distribution do not support straight-through gradients for +# at least some of the parameters. +NOT_REPARAMETERIZED = ReparameterizationType("NOT_REPARAMETERIZED") +tf_export(v1=["distributions.NOT_REPARAMETERIZED"]).export_constant( + __name__, "NOT_REPARAMETERIZED") + + +@tf_export(v1=["distributions.Distribution"]) +class Distribution(_BaseDistribution, metaclass=_DistributionMeta): + """A generic probability distribution base class. + + `Distribution` is a base class for constructing and organizing properties + (e.g., mean, variance) of random variables (e.g, Bernoulli, Gaussian). + + #### Subclassing + + Subclasses are expected to implement a leading-underscore version of the + same-named function. The argument signature should be identical except for + the omission of `name="..."`. For example, to enable `log_prob(value, + name="log_prob")` a subclass should implement `_log_prob(value)`. + + Subclasses can append to public-level docstrings by providing + docstrings for their method specializations. For example: + + ```python + @util.AppendDocstring("Some other details.") + def _log_prob(self, value): + ... + ``` + + would add the string "Some other details." to the `log_prob` function + docstring. This is implemented as a simple decorator to avoid python + linter complaining about missing Args/Returns/Raises sections in the + partial docstrings. + + #### Broadcasting, batching, and shapes + + All distributions support batches of independent distributions of that type. + The batch shape is determined by broadcasting together the parameters. + + The shape of arguments to `__init__`, `cdf`, `log_cdf`, `prob`, and + `log_prob` reflect this broadcasting, as does the return value of `sample` and + `sample_n`. + + `sample_n_shape = [n] + batch_shape + event_shape`, where `sample_n_shape` is + the shape of the `Tensor` returned from `sample_n`, `n` is the number of + samples, `batch_shape` defines how many independent distributions there are, + and `event_shape` defines the shape of samples from each of those independent + distributions. Samples are independent along the `batch_shape` dimensions, but + not necessarily so along the `event_shape` dimensions (depending on the + particulars of the underlying distribution). + + Using the `Uniform` distribution as an example: + + ```python + minval = 3.0 + maxval = [[4.0, 6.0], + [10.0, 12.0]] + + # Broadcasting: + # This instance represents 4 Uniform distributions. Each has a lower bound at + # 3.0 as the `minval` parameter was broadcasted to match `maxval`'s shape. + u = Uniform(minval, maxval) + + # `event_shape` is `TensorShape([])`. + event_shape = u.event_shape + # `event_shape_t` is a `Tensor` which will evaluate to []. + event_shape_t = u.event_shape_tensor() + + # Sampling returns a sample per distribution. `samples` has shape + # [5, 2, 2], which is [n] + batch_shape + event_shape, where n=5, + # batch_shape=[2, 2], and event_shape=[]. + samples = u.sample_n(5) + + # The broadcasting holds across methods. Here we use `cdf` as an example. The + # same holds for `log_cdf` and the likelihood functions. + + # `cum_prob` has shape [2, 2] as the `value` argument was broadcasted to the + # shape of the `Uniform` instance. + cum_prob_broadcast = u.cdf(4.0) + + # `cum_prob`'s shape is [2, 2], one per distribution. No broadcasting + # occurred. + cum_prob_per_dist = u.cdf([[4.0, 5.0], + [6.0, 7.0]]) + + # INVALID as the `value` argument is not broadcastable to the distribution's + # shape. + cum_prob_invalid = u.cdf([4.0, 5.0, 6.0]) + ``` + + #### Shapes + + There are three important concepts associated with TensorFlow Distributions + shapes: + - Event shape describes the shape of a single draw from the distribution; + it may be dependent across dimensions. For scalar distributions, the event + shape is `[]`. For a 5-dimensional MultivariateNormal, the event shape is + `[5]`. + - Batch shape describes independent, not identically distributed draws, aka a + "collection" or "bunch" of distributions. + - Sample shape describes independent, identically distributed draws of batches + from the distribution family. + + The event shape and the batch shape are properties of a Distribution object, + whereas the sample shape is associated with a specific call to `sample` or + `log_prob`. + + For detailed usage examples of TensorFlow Distributions shapes, see + [this tutorial]( + https://github.com/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Understanding_TensorFlow_Distributions_Shapes.ipynb) + + #### Parameter values leading to undefined statistics or distributions. + + Some distributions do not have well-defined statistics for all initialization + parameter values. For example, the beta distribution is parameterized by + positive real numbers `concentration1` and `concentration0`, and does not have + well-defined mode if `concentration1 < 1` or `concentration0 < 1`. + + The user is given the option of raising an exception or returning `NaN`. + + ```python + a = tf.exp(tf.matmul(logits, weights_a)) + b = tf.exp(tf.matmul(logits, weights_b)) + + # Will raise exception if ANY batch member has a < 1 or b < 1. + dist = distributions.beta(a, b, allow_nan_stats=False) + mode = dist.mode().eval() + + # Will return NaN for batch members with either a < 1 or b < 1. + dist = distributions.beta(a, b, allow_nan_stats=True) # Default behavior + mode = dist.mode().eval() + ``` + + In all cases, an exception is raised if *invalid* parameters are passed, e.g. + + ```python + # Will raise an exception if any Op is run. + negative_a = -1.0 * a # beta distribution by definition has a > 0. + dist = distributions.beta(negative_a, b, allow_nan_stats=True) + dist.mean().eval() + ``` + + """ + + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__(self, + dtype, + reparameterization_type, + validate_args, + allow_nan_stats, + parameters=None, + graph_parents=None, + name=None): + """Constructs the `Distribution`. + + **This is a private method for subclass use.** + + Args: + dtype: The type of the event samples. `None` implies no type-enforcement. + reparameterization_type: Instance of `ReparameterizationType`. + If `distributions.FULLY_REPARAMETERIZED`, this + `Distribution` can be reparameterized in terms of some standard + distribution with a function whose Jacobian is constant for the support + of the standard distribution. If `distributions.NOT_REPARAMETERIZED`, + then no such reparameterization is available. + validate_args: Python `bool`, default `False`. When `True` distribution + parameters are checked for validity despite possibly degrading runtime + performance. When `False` invalid inputs may silently render incorrect + outputs. + allow_nan_stats: Python `bool`, default `True`. When `True`, statistics + (e.g., mean, mode, variance) use the value "`NaN`" to indicate the + result is undefined. When `False`, an exception is raised if one or + more of the statistic's batch members are undefined. + parameters: Python `dict` of parameters used to instantiate this + `Distribution`. + graph_parents: Python `list` of graph prerequisites of this + `Distribution`. + name: Python `str` name prefixed to Ops created by this class. Default: + subclass name. + + Raises: + ValueError: if any member of graph_parents is `None` or not a `Tensor`. + """ + graph_parents = [] if graph_parents is None else graph_parents + for i, t in enumerate(graph_parents): + if t is None or not tensor_util.is_tf_type(t): + raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t)) + if not name or name[-1] != "/": # `name` is not a name scope + non_unique_name = name or type(self).__name__ + with ops.name_scope(non_unique_name) as name: + pass + self._dtype = dtype + self._reparameterization_type = reparameterization_type + self._allow_nan_stats = allow_nan_stats + self._validate_args = validate_args + self._parameters = parameters or {} + self._graph_parents = graph_parents + self._name = name + + @property + def _parameters(self): + return self._parameter_dict + + @_parameters.setter + def _parameters(self, value): + """Intercept assignments to self._parameters to avoid reference cycles. + + Parameters are often created using locals(), so we need to clean out any + references to `self` before assigning it to an attribute. + + Args: + value: A dictionary of parameters to assign to the `_parameters` property. + """ + if "self" in value: + del value["self"] + self._parameter_dict = value + + @classmethod + def param_shapes(cls, sample_shape, name="DistributionParamShapes"): + """Shapes of parameters given the desired shape of a call to `sample()`. + + This is a class method that describes what key/value arguments are required + to instantiate the given `Distribution` so that a particular shape is + returned for that instance's call to `sample()`. + + Subclasses should override class method `_param_shapes`. + + Args: + sample_shape: `Tensor` or python list/tuple. Desired shape of a call to + `sample()`. + name: name to prepend ops with. + + Returns: + `dict` of parameter name to `Tensor` shapes. + """ + with ops.name_scope(name, values=[sample_shape]): + return cls._param_shapes(sample_shape) + + @classmethod + def param_static_shapes(cls, sample_shape): + """param_shapes with static (i.e. `TensorShape`) shapes. + + This is a class method that describes what key/value arguments are required + to instantiate the given `Distribution` so that a particular shape is + returned for that instance's call to `sample()`. Assumes that the sample's + shape is known statically. + + Subclasses should override class method `_param_shapes` to return + constant-valued tensors when constant values are fed. + + Args: + sample_shape: `TensorShape` or python list/tuple. Desired shape of a call + to `sample()`. + + Returns: + `dict` of parameter name to `TensorShape`. + + Raises: + ValueError: if `sample_shape` is a `TensorShape` and is not fully defined. + """ + if isinstance(sample_shape, tensor_shape.TensorShape): + if not sample_shape.is_fully_defined(): + raise ValueError("TensorShape sample_shape must be fully defined") + sample_shape = sample_shape.as_list() + + params = cls.param_shapes(sample_shape) + + static_params = {} + for name, shape in params.items(): + static_shape = tensor_util.constant_value(shape) + if static_shape is None: + raise ValueError( + "sample_shape must be a fully-defined TensorShape or list/tuple") + static_params[name] = tensor_shape.TensorShape(static_shape) + + return static_params + + @staticmethod + def _param_shapes(sample_shape): + raise NotImplementedError("_param_shapes not implemented") + + @property + def name(self): + """Name prepended to all ops created by this `Distribution`.""" + return self._name + + @property + def dtype(self): + """The `DType` of `Tensor`s handled by this `Distribution`.""" + return self._dtype + + @property + def parameters(self): + """Dictionary of parameters used to instantiate this `Distribution`.""" + # Remove "self", "__class__", or other special variables. These can appear + # if the subclass used: + # `parameters = dict(locals())`. + return {k: v for k, v in self._parameters.items() + if not k.startswith("__") and k != "self"} + + @property + def reparameterization_type(self): + """Describes how samples from the distribution are reparameterized. + + Currently this is one of the static instances + `distributions.FULLY_REPARAMETERIZED` + or `distributions.NOT_REPARAMETERIZED`. + + Returns: + An instance of `ReparameterizationType`. + """ + return self._reparameterization_type + + @property + def allow_nan_stats(self): + """Python `bool` describing behavior when a stat is undefined. + + Stats return +/- infinity when it makes sense. E.g., the variance of a + Cauchy distribution is infinity. However, sometimes the statistic is + undefined, e.g., if a distribution's pdf does not achieve a maximum within + the support of the distribution, the mode is undefined. If the mean is + undefined, then by definition the variance is undefined. E.g. the mean for + Student's T for df = 1 is undefined (no clear way to say it is either + or - + infinity), so the variance = E[(X - mean)**2] is also undefined. + + Returns: + allow_nan_stats: Python `bool`. + """ + return self._allow_nan_stats + + @property + def validate_args(self): + """Python `bool` indicating possibly expensive checks are enabled.""" + return self._validate_args + + def copy(self, **override_parameters_kwargs): + """Creates a deep copy of the distribution. + + Note: the copy distribution may continue to depend on the original + initialization arguments. + + Args: + **override_parameters_kwargs: String/value dictionary of initialization + arguments to override with new values. + + Returns: + distribution: A new instance of `type(self)` initialized from the union + of self.parameters and override_parameters_kwargs, i.e., + `dict(self.parameters, **override_parameters_kwargs)`. + """ + parameters = dict(self.parameters, **override_parameters_kwargs) + return type(self)(**parameters) + + def _batch_shape_tensor(self): + raise NotImplementedError( + "batch_shape_tensor is not implemented: {}".format(type(self).__name__)) + + def batch_shape_tensor(self, name="batch_shape_tensor"): + """Shape of a single sample from a single event index as a 1-D `Tensor`. + + The batch dimensions are indexes into independent, non-identical + parameterizations of this distribution. + + Args: + name: name to give to the op + + Returns: + batch_shape: `Tensor`. + """ + with self._name_scope(name): + if self.batch_shape.is_fully_defined(): + return ops.convert_to_tensor(self.batch_shape.as_list(), + dtype=dtypes.int32, + name="batch_shape") + return self._batch_shape_tensor() + + def _batch_shape(self): + return tensor_shape.TensorShape(None) + + @property + def batch_shape(self): + """Shape of a single sample from a single event index as a `TensorShape`. + + May be partially defined or unknown. + + The batch dimensions are indexes into independent, non-identical + parameterizations of this distribution. + + Returns: + batch_shape: `TensorShape`, possibly unknown. + """ + return tensor_shape.as_shape(self._batch_shape()) + + def _event_shape_tensor(self): + raise NotImplementedError( + "event_shape_tensor is not implemented: {}".format(type(self).__name__)) + + def event_shape_tensor(self, name="event_shape_tensor"): + """Shape of a single sample from a single batch as a 1-D int32 `Tensor`. + + Args: + name: name to give to the op + + Returns: + event_shape: `Tensor`. + """ + with self._name_scope(name): + if self.event_shape.is_fully_defined(): + return ops.convert_to_tensor(self.event_shape.as_list(), + dtype=dtypes.int32, + name="event_shape") + return self._event_shape_tensor() + + def _event_shape(self): + return tensor_shape.TensorShape(None) + + @property + def event_shape(self): + """Shape of a single sample from a single batch as a `TensorShape`. + + May be partially defined or unknown. + + Returns: + event_shape: `TensorShape`, possibly unknown. + """ + return tensor_shape.as_shape(self._event_shape()) + + def is_scalar_event(self, name="is_scalar_event"): + """Indicates that `event_shape == []`. + + Args: + name: Python `str` prepended to names of ops created by this function. + + Returns: + is_scalar_event: `bool` scalar `Tensor`. + """ + with self._name_scope(name): + return ops.convert_to_tensor( + self._is_scalar_helper(self.event_shape, self.event_shape_tensor), + name="is_scalar_event") + + def is_scalar_batch(self, name="is_scalar_batch"): + """Indicates that `batch_shape == []`. + + Args: + name: Python `str` prepended to names of ops created by this function. + + Returns: + is_scalar_batch: `bool` scalar `Tensor`. + """ + with self._name_scope(name): + return ops.convert_to_tensor( + self._is_scalar_helper(self.batch_shape, self.batch_shape_tensor), + name="is_scalar_batch") + + def _sample_n(self, n, seed=None): + raise NotImplementedError("sample_n is not implemented: {}".format( + type(self).__name__)) + + def _call_sample_n(self, sample_shape, seed, name, **kwargs): + with self._name_scope(name, values=[sample_shape]): + sample_shape = ops.convert_to_tensor( + sample_shape, dtype=dtypes.int32, name="sample_shape") + sample_shape, n = self._expand_sample_shape_to_vector( + sample_shape, "sample_shape") + samples = self._sample_n(n, seed, **kwargs) + batch_event_shape = array_ops.shape(samples)[1:] + final_shape = array_ops.concat([sample_shape, batch_event_shape], 0) + samples = array_ops.reshape(samples, final_shape) + samples = self._set_sample_static_shape(samples, sample_shape) + return samples + + def sample(self, sample_shape=(), seed=None, name="sample"): + """Generate samples of the specified shape. + + Note that a call to `sample()` without arguments will generate a single + sample. + + Args: + sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples. + seed: Python integer seed for RNG + name: name to give to the op. + + Returns: + samples: a `Tensor` with prepended dimensions `sample_shape`. + """ + return self._call_sample_n(sample_shape, seed, name) + + def _log_prob(self, value): + raise NotImplementedError("log_prob is not implemented: {}".format( + type(self).__name__)) + + def _call_log_prob(self, value, name, **kwargs): + with self._name_scope(name, values=[value]): + value = _convert_to_tensor( + value, name="value", preferred_dtype=self.dtype) + try: + return self._log_prob(value, **kwargs) + except NotImplementedError as original_exception: + try: + return math_ops.log(self._prob(value, **kwargs)) + except NotImplementedError: + raise original_exception + + def log_prob(self, value, name="log_prob"): + """Log probability density/mass function. + + Args: + value: `float` or `double` `Tensor`. + name: Python `str` prepended to names of ops created by this function. + + Returns: + log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with + values of type `self.dtype`. + """ + return self._call_log_prob(value, name) + + def _prob(self, value): + raise NotImplementedError("prob is not implemented: {}".format( + type(self).__name__)) + + def _call_prob(self, value, name, **kwargs): + with self._name_scope(name, values=[value]): + value = _convert_to_tensor( + value, name="value", preferred_dtype=self.dtype) + try: + return self._prob(value, **kwargs) + except NotImplementedError as original_exception: + try: + return math_ops.exp(self._log_prob(value, **kwargs)) + except NotImplementedError: + raise original_exception + + def prob(self, value, name="prob"): + """Probability density/mass function. + + Args: + value: `float` or `double` `Tensor`. + name: Python `str` prepended to names of ops created by this function. + + Returns: + prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with + values of type `self.dtype`. + """ + return self._call_prob(value, name) + + def _log_cdf(self, value): + raise NotImplementedError("log_cdf is not implemented: {}".format( + type(self).__name__)) + + def _call_log_cdf(self, value, name, **kwargs): + with self._name_scope(name, values=[value]): + value = _convert_to_tensor( + value, name="value", preferred_dtype=self.dtype) + try: + return self._log_cdf(value, **kwargs) + except NotImplementedError as original_exception: + try: + return math_ops.log(self._cdf(value, **kwargs)) + except NotImplementedError: + raise original_exception + + def log_cdf(self, value, name="log_cdf"): + """Log cumulative distribution function. + + Given random variable `X`, the cumulative distribution function `cdf` is: + + ```none + log_cdf(x) := Log[ P[X <= x] ] + ``` + + Often, a numerical approximation can be used for `log_cdf(x)` that yields + a more accurate answer than simply taking the logarithm of the `cdf` when + `x << -1`. + + Args: + value: `float` or `double` `Tensor`. + name: Python `str` prepended to names of ops created by this function. + + Returns: + logcdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with + values of type `self.dtype`. + """ + return self._call_log_cdf(value, name) + + def _cdf(self, value): + raise NotImplementedError("cdf is not implemented: {}".format( + type(self).__name__)) + + def _call_cdf(self, value, name, **kwargs): + with self._name_scope(name, values=[value]): + value = _convert_to_tensor( + value, name="value", preferred_dtype=self.dtype) + try: + return self._cdf(value, **kwargs) + except NotImplementedError as original_exception: + try: + return math_ops.exp(self._log_cdf(value, **kwargs)) + except NotImplementedError: + raise original_exception + + def cdf(self, value, name="cdf"): + """Cumulative distribution function. + + Given random variable `X`, the cumulative distribution function `cdf` is: + + ```none + cdf(x) := P[X <= x] + ``` + + Args: + value: `float` or `double` `Tensor`. + name: Python `str` prepended to names of ops created by this function. + + Returns: + cdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with + values of type `self.dtype`. + """ + return self._call_cdf(value, name) + + def _log_survival_function(self, value): + raise NotImplementedError( + "log_survival_function is not implemented: {}".format( + type(self).__name__)) + + def _call_log_survival_function(self, value, name, **kwargs): + with self._name_scope(name, values=[value]): + value = _convert_to_tensor( + value, name="value", preferred_dtype=self.dtype) + try: + return self._log_survival_function(value, **kwargs) + except NotImplementedError as original_exception: + try: + return math_ops.log1p(-self.cdf(value, **kwargs)) + except NotImplementedError: + raise original_exception + + def log_survival_function(self, value, name="log_survival_function"): + """Log survival function. + + Given random variable `X`, the survival function is defined: + + ```none + log_survival_function(x) = Log[ P[X > x] ] + = Log[ 1 - P[X <= x] ] + = Log[ 1 - cdf(x) ] + ``` + + Typically, different numerical approximations can be used for the log + survival function, which are more accurate than `1 - cdf(x)` when `x >> 1`. + + Args: + value: `float` or `double` `Tensor`. + name: Python `str` prepended to names of ops created by this function. + + Returns: + `Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type + `self.dtype`. + """ + return self._call_log_survival_function(value, name) + + def _survival_function(self, value): + raise NotImplementedError("survival_function is not implemented: {}".format( + type(self).__name__)) + + def _call_survival_function(self, value, name, **kwargs): + with self._name_scope(name, values=[value]): + value = _convert_to_tensor( + value, name="value", preferred_dtype=self.dtype) + try: + return self._survival_function(value, **kwargs) + except NotImplementedError as original_exception: + try: + return 1. - self.cdf(value, **kwargs) + except NotImplementedError: + raise original_exception + + def survival_function(self, value, name="survival_function"): + """Survival function. + + Given random variable `X`, the survival function is defined: + + ```none + survival_function(x) = P[X > x] + = 1 - P[X <= x] + = 1 - cdf(x). + ``` + + Args: + value: `float` or `double` `Tensor`. + name: Python `str` prepended to names of ops created by this function. + + Returns: + `Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type + `self.dtype`. + """ + return self._call_survival_function(value, name) + + def _entropy(self): + raise NotImplementedError("entropy is not implemented: {}".format( + type(self).__name__)) + + def entropy(self, name="entropy"): + """Shannon entropy in nats.""" + with self._name_scope(name): + return self._entropy() + + def _mean(self): + raise NotImplementedError("mean is not implemented: {}".format( + type(self).__name__)) + + def mean(self, name="mean"): + """Mean.""" + with self._name_scope(name): + return self._mean() + + def _quantile(self, value): + raise NotImplementedError("quantile is not implemented: {}".format( + type(self).__name__)) + + def _call_quantile(self, value, name, **kwargs): + with self._name_scope(name, values=[value]): + value = _convert_to_tensor( + value, name="value", preferred_dtype=self.dtype) + return self._quantile(value, **kwargs) + + def quantile(self, value, name="quantile"): + """Quantile function. Aka "inverse cdf" or "percent point function". + + Given random variable `X` and `p in [0, 1]`, the `quantile` is: + + ```none + quantile(p) := x such that P[X <= x] == p + ``` + + Args: + value: `float` or `double` `Tensor`. + name: Python `str` prepended to names of ops created by this function. + + Returns: + quantile: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with + values of type `self.dtype`. + """ + return self._call_quantile(value, name) + + def _variance(self): + raise NotImplementedError("variance is not implemented: {}".format( + type(self).__name__)) + + def variance(self, name="variance"): + """Variance. + + Variance is defined as, + + ```none + Var = E[(X - E[X])**2] + ``` + + where `X` is the random variable associated with this distribution, `E` + denotes expectation, and `Var.shape = batch_shape + event_shape`. + + Args: + name: Python `str` prepended to names of ops created by this function. + + Returns: + variance: Floating-point `Tensor` with shape identical to + `batch_shape + event_shape`, i.e., the same shape as `self.mean()`. + """ + with self._name_scope(name): + try: + return self._variance() + except NotImplementedError as original_exception: + try: + return math_ops.square(self._stddev()) + except NotImplementedError: + raise original_exception + + def _stddev(self): + raise NotImplementedError("stddev is not implemented: {}".format( + type(self).__name__)) + + def stddev(self, name="stddev"): + """Standard deviation. + + Standard deviation is defined as, + + ```none + stddev = E[(X - E[X])**2]**0.5 + ``` + + where `X` is the random variable associated with this distribution, `E` + denotes expectation, and `stddev.shape = batch_shape + event_shape`. + + Args: + name: Python `str` prepended to names of ops created by this function. + + Returns: + stddev: Floating-point `Tensor` with shape identical to + `batch_shape + event_shape`, i.e., the same shape as `self.mean()`. + """ + + with self._name_scope(name): + try: + return self._stddev() + except NotImplementedError as original_exception: + try: + return math_ops.sqrt(self._variance()) + except NotImplementedError: + raise original_exception + + def _covariance(self): + raise NotImplementedError("covariance is not implemented: {}".format( + type(self).__name__)) + + def covariance(self, name="covariance"): + """Covariance. + + Covariance is (possibly) defined only for non-scalar-event distributions. + + For example, for a length-`k`, vector-valued distribution, it is calculated + as, + + ```none + Cov[i, j] = Covariance(X_i, X_j) = E[(X_i - E[X_i]) (X_j - E[X_j])] + ``` + + where `Cov` is a (batch of) `k x k` matrix, `0 <= (i, j) < k`, and `E` + denotes expectation. + + Alternatively, for non-vector, multivariate distributions (e.g., + matrix-valued, Wishart), `Covariance` shall return a (batch of) matrices + under some vectorization of the events, i.e., + + ```none + Cov[i, j] = Covariance(Vec(X)_i, Vec(X)_j) = [as above] + ``` + + where `Cov` is a (batch of) `k' x k'` matrices, + `0 <= (i, j) < k' = reduce_prod(event_shape)`, and `Vec` is some function + mapping indices of this distribution's event dimensions to indices of a + length-`k'` vector. + + Args: + name: Python `str` prepended to names of ops created by this function. + + Returns: + covariance: Floating-point `Tensor` with shape `[B1, ..., Bn, k', k']` + where the first `n` dimensions are batch coordinates and + `k' = reduce_prod(self.event_shape)`. + """ + with self._name_scope(name): + return self._covariance() + + def _mode(self): + raise NotImplementedError("mode is not implemented: {}".format( + type(self).__name__)) + + def mode(self, name="mode"): + """Mode.""" + with self._name_scope(name): + return self._mode() + + def _cross_entropy(self, other): + return kullback_leibler.cross_entropy( + self, other, allow_nan_stats=self.allow_nan_stats) + + def cross_entropy(self, other, name="cross_entropy"): + """Computes the (Shannon) cross entropy. + + Denote this distribution (`self`) by `P` and the `other` distribution by + `Q`. Assuming `P, Q` are absolutely continuous with respect to + one another and permit densities `p(x) dr(x)` and `q(x) dr(x)`, (Shanon) + cross entropy is defined as: + + ```none + H[P, Q] = E_p[-log q(X)] = -int_F p(x) log q(x) dr(x) + ``` + + where `F` denotes the support of the random variable `X ~ P`. + + Args: + other: `tfp.distributions.Distribution` instance. + name: Python `str` prepended to names of ops created by this function. + + Returns: + cross_entropy: `self.dtype` `Tensor` with shape `[B1, ..., Bn]` + representing `n` different calculations of (Shanon) cross entropy. + """ + with self._name_scope(name): + return self._cross_entropy(other) + + def _kl_divergence(self, other): + return kullback_leibler.kl_divergence( + self, other, allow_nan_stats=self.allow_nan_stats) + + def kl_divergence(self, other, name="kl_divergence"): + """Computes the Kullback--Leibler divergence. + + Denote this distribution (`self`) by `p` and the `other` distribution by + `q`. Assuming `p, q` are absolutely continuous with respect to reference + measure `r`, the KL divergence is defined as: + + ```none + KL[p, q] = E_p[log(p(X)/q(X))] + = -int_F p(x) log q(x) dr(x) + int_F p(x) log p(x) dr(x) + = H[p, q] - H[p] + ``` + + where `F` denotes the support of the random variable `X ~ p`, `H[., .]` + denotes (Shanon) cross entropy, and `H[.]` denotes (Shanon) entropy. + + Args: + other: `tfp.distributions.Distribution` instance. + name: Python `str` prepended to names of ops created by this function. + + Returns: + kl_divergence: `self.dtype` `Tensor` with shape `[B1, ..., Bn]` + representing `n` different calculations of the Kullback-Leibler + divergence. + """ + with self._name_scope(name): + return self._kl_divergence(other) + + def __str__(self): + return ("tfp.distributions.{type_name}(" + "\"{self_name}\"" + "{maybe_batch_shape}" + "{maybe_event_shape}" + ", dtype={dtype})".format( + type_name=type(self).__name__, + self_name=self.name, + maybe_batch_shape=(", batch_shape={}".format(self.batch_shape) + if self.batch_shape.ndims is not None + else ""), + maybe_event_shape=(", event_shape={}".format(self.event_shape) + if self.event_shape.ndims is not None + else ""), + dtype=self.dtype.name)) + + def __repr__(self): + return ("".format( + type_name=type(self).__name__, + self_name=self.name, + batch_shape=self.batch_shape, + event_shape=self.event_shape, + dtype=self.dtype.name)) + + @contextlib.contextmanager + def _name_scope(self, name=None, values=None): + """Helper function to standardize op scope.""" + with ops.name_scope(self.name): + with ops.name_scope(name, values=( + ([] if values is None else values) + self._graph_parents)) as scope: + yield scope + + def _expand_sample_shape_to_vector(self, x, name): + """Helper to `sample` which ensures input is 1D.""" + x_static_val = tensor_util.constant_value(x) + if x_static_val is None: + prod = math_ops.reduce_prod(x) + else: + prod = np.prod(x_static_val, dtype=x.dtype.as_numpy_dtype()) + + ndims = x.get_shape().ndims # != sample_ndims + if ndims is None: + # Maybe expand_dims. + ndims = array_ops.rank(x) + expanded_shape = util.pick_vector( + math_ops.equal(ndims, 0), + np.array([1], dtype=np.int32), array_ops.shape(x)) + x = array_ops.reshape(x, expanded_shape) + elif ndims == 0: + # Definitely expand_dims. + if x_static_val is not None: + x = ops.convert_to_tensor( + np.array([x_static_val], dtype=x.dtype.as_numpy_dtype()), + name=name) + else: + x = array_ops.reshape(x, [1]) + elif ndims != 1: + raise ValueError("Input is neither scalar nor vector.") + + return x, prod + + def _set_sample_static_shape(self, x, sample_shape): + """Helper to `sample`; sets static shape info.""" + # Set shape hints. + sample_shape = tensor_shape.TensorShape( + tensor_util.constant_value(sample_shape)) + + ndims = x.get_shape().ndims + sample_ndims = sample_shape.ndims + batch_ndims = self.batch_shape.ndims + event_ndims = self.event_shape.ndims + + # Infer rank(x). + if (ndims is None and + sample_ndims is not None and + batch_ndims is not None and + event_ndims is not None): + ndims = sample_ndims + batch_ndims + event_ndims + x.set_shape([None] * ndims) + + # Infer sample shape. + if ndims is not None and sample_ndims is not None: + shape = sample_shape.concatenate([None]*(ndims - sample_ndims)) + x.set_shape(x.get_shape().merge_with(shape)) + + # Infer event shape. + if ndims is not None and event_ndims is not None: + shape = tensor_shape.TensorShape( + [None]*(ndims - event_ndims)).concatenate(self.event_shape) + x.set_shape(x.get_shape().merge_with(shape)) + + # Infer batch shape. + if batch_ndims is not None: + if ndims is not None: + if sample_ndims is None and event_ndims is not None: + sample_ndims = ndims - batch_ndims - event_ndims + elif event_ndims is None and sample_ndims is not None: + event_ndims = ndims - batch_ndims - sample_ndims + if sample_ndims is not None and event_ndims is not None: + shape = tensor_shape.TensorShape([None]*sample_ndims).concatenate( + self.batch_shape).concatenate([None]*event_ndims) + x.set_shape(x.get_shape().merge_with(shape)) + + return x + + def _is_scalar_helper(self, static_shape, dynamic_shape_fn): + """Implementation for `is_scalar_batch` and `is_scalar_event`.""" + if static_shape.ndims is not None: + return static_shape.ndims == 0 + shape = dynamic_shape_fn() + if (shape.get_shape().ndims is not None and + shape.get_shape().dims[0].value is not None): + # If the static_shape is correctly written then we should never execute + # this branch. We keep it just in case there's some unimagined corner + # case. + return shape.get_shape().as_list() == [0] + return math_ops.equal(array_ops.shape(shape)[0], 0) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/distributions.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/distributions.py new file mode 100644 index 0000000000000000000000000000000000000000..cdc015295f5872f14d1373ca1db10892789924e0 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/distributions.py @@ -0,0 +1,36 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Core module for TensorFlow distribution objects and helpers.""" +from tensorflow.python.util import deprecation + + +# pylint: disable=wildcard-import,unused-import,g-import-not-at-top +with deprecation.silence(): + from tensorflow.python.ops.distributions.bernoulli import Bernoulli + from tensorflow.python.ops.distributions.beta import Beta + from tensorflow.python.ops.distributions.categorical import Categorical + from tensorflow.python.ops.distributions.dirichlet import Dirichlet + from tensorflow.python.ops.distributions.dirichlet_multinomial import DirichletMultinomial + from tensorflow.python.ops.distributions.distribution import * + from tensorflow.python.ops.distributions.exponential import Exponential + from tensorflow.python.ops.distributions.gamma import Gamma + from tensorflow.python.ops.distributions.kullback_leibler import * + from tensorflow.python.ops.distributions.laplace import Laplace + from tensorflow.python.ops.distributions.multinomial import Multinomial + from tensorflow.python.ops.distributions.normal import Normal + from tensorflow.python.ops.distributions.student_t import StudentT + from tensorflow.python.ops.distributions.uniform import Uniform +# pylint: enable=wildcard-import,unused-import +del deprecation diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/exponential.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/exponential.py new file mode 100644 index 0000000000000000000000000000000000000000..729ae866dbc7aea8c654a41ea00db4043afe70b7 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/exponential.py @@ -0,0 +1,162 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The Exponential distribution class.""" + +import numpy as np + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn +from tensorflow.python.ops import random_ops +from tensorflow.python.ops.distributions import gamma +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +__all__ = [ + "Exponential", + "ExponentialWithSoftplusRate", +] + + +@tf_export(v1=["distributions.Exponential"]) +class Exponential(gamma.Gamma): + """Exponential distribution. + + The Exponential distribution is parameterized by an event `rate` parameter. + + #### Mathematical Details + + The probability density function (pdf) is, + + ```none + pdf(x; lambda, x > 0) = exp(-lambda x) / Z + Z = 1 / lambda + ``` + + where `rate = lambda` and `Z` is the normalizaing constant. + + The Exponential distribution is a special case of the Gamma distribution, + i.e., + + ```python + Exponential(rate) = Gamma(concentration=1., rate) + ``` + + The Exponential distribution uses a `rate` parameter, or "inverse scale", + which can be intuited as, + + ```none + X ~ Exponential(rate=1) + Y = X / rate + ``` + + """ + + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__(self, + rate, + validate_args=False, + allow_nan_stats=True, + name="Exponential"): + """Construct Exponential distribution with parameter `rate`. + + Args: + rate: Floating point tensor, equivalent to `1 / mean`. Must contain only + positive values. + validate_args: Python `bool`, default `False`. When `True` distribution + parameters are checked for validity despite possibly degrading runtime + performance. When `False` invalid inputs may silently render incorrect + outputs. + allow_nan_stats: Python `bool`, default `True`. When `True`, statistics + (e.g., mean, mode, variance) use the value "`NaN`" to indicate the + result is undefined. When `False`, an exception is raised if one or + more of the statistic's batch members are undefined. + name: Python `str` name prefixed to Ops created by this class. + """ + parameters = dict(locals()) + # Even though all statistics of are defined for valid inputs, this is not + # true in the parent class "Gamma." Therefore, passing + # allow_nan_stats=True + # through to the parent class results in unnecessary asserts. + with ops.name_scope(name, values=[rate]) as name: + self._rate = ops.convert_to_tensor(rate, name="rate") + super(Exponential, self).__init__( + concentration=array_ops.ones([], dtype=self._rate.dtype), + rate=self._rate, + allow_nan_stats=allow_nan_stats, + validate_args=validate_args, + name=name) + self._parameters = parameters + self._graph_parents += [self._rate] + + @staticmethod + def _param_shapes(sample_shape): + return {"rate": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)} + + @property + def rate(self): + return self._rate + + def _log_survival_function(self, value): + return self._log_prob(value) - math_ops.log(self._rate) + + def _sample_n(self, n, seed=None): + shape = array_ops.concat([[n], array_ops.shape(self._rate)], 0) + # Uniform variates must be sampled from the open-interval `(0, 1)` rather + # than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny` + # because it is the smallest, positive, "normal" number. A "normal" number + # is such that the mantissa has an implicit leading 1. Normal, positive + # numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In + # this case, a subnormal number (i.e., np.nextafter) can cause us to sample + # 0. + sampled = random_ops.random_uniform( + shape, + minval=np.finfo(self.dtype.as_numpy_dtype).tiny, + maxval=1., + seed=seed, + dtype=self.dtype) + return -math_ops.log(sampled) / self._rate + + +class ExponentialWithSoftplusRate(Exponential): + """Exponential with softplus transform on `rate`.""" + + @deprecation.deprecated( + "2019-01-01", + "Use `tfd.Exponential(tf.nn.softplus(rate)).", + warn_once=True) + def __init__(self, + rate, + validate_args=False, + allow_nan_stats=True, + name="ExponentialWithSoftplusRate"): + parameters = dict(locals()) + with ops.name_scope(name, values=[rate]) as name: + super(ExponentialWithSoftplusRate, self).__init__( + rate=nn.softplus(rate, name="softplus_rate"), + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + name=name) + self._parameters = parameters diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/gamma.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/gamma.py new file mode 100644 index 0000000000000000000000000000000000000000..c84caebf92f90fe1f47d63f282658cd95cdd9345 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/gamma.py @@ -0,0 +1,338 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The Gamma distribution class.""" + +import numpy as np + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn +from tensorflow.python.ops import random_ops +from tensorflow.python.ops.distributions import distribution +from tensorflow.python.ops.distributions import kullback_leibler +from tensorflow.python.ops.distributions import util as distribution_util +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +__all__ = [ + "Gamma", + "GammaWithSoftplusConcentrationRate", +] + + +@tf_export(v1=["distributions.Gamma"]) +class Gamma(distribution.Distribution): + """Gamma distribution. + + The Gamma distribution is defined over positive real numbers using + parameters `concentration` (aka "alpha") and `rate` (aka "beta"). + + #### Mathematical Details + + The probability density function (pdf) is, + + ```none + pdf(x; alpha, beta, x > 0) = x**(alpha - 1) exp(-x beta) / Z + Z = Gamma(alpha) beta**(-alpha) + ``` + + where: + + * `concentration = alpha`, `alpha > 0`, + * `rate = beta`, `beta > 0`, + * `Z` is the normalizing constant, and, + * `Gamma` is the [gamma function]( + https://en.wikipedia.org/wiki/Gamma_function). + + The cumulative density function (cdf) is, + + ```none + cdf(x; alpha, beta, x > 0) = GammaInc(alpha, beta x) / Gamma(alpha) + ``` + + where `GammaInc` is the [lower incomplete Gamma function]( + https://en.wikipedia.org/wiki/Incomplete_gamma_function). + + The parameters can be intuited via their relationship to mean and stddev, + + ```none + concentration = alpha = (mean / stddev)**2 + rate = beta = mean / stddev**2 = concentration / mean + ``` + + Distribution parameters are automatically broadcast in all functions; see + examples for details. + + Warning: The samples of this distribution are always non-negative. However, + the samples that are smaller than `np.finfo(dtype).tiny` are rounded + to this value, so it appears more often than it should. + This should only be noticeable when the `concentration` is very small, or the + `rate` is very large. See note in `tf.random.gamma` docstring. + + Samples of this distribution are reparameterized (pathwise differentiable). + The derivatives are computed using the approach described in + (Figurnov et al., 2018). + + #### Examples + + ```python + import tensorflow_probability as tfp + tfd = tfp.distributions + + dist = tfd.Gamma(concentration=3.0, rate=2.0) + dist2 = tfd.Gamma(concentration=[3.0, 4.0], rate=[2.0, 3.0]) + ``` + + Compute the gradients of samples w.r.t. the parameters: + + ```python + concentration = tf.constant(3.0) + rate = tf.constant(2.0) + dist = tfd.Gamma(concentration, rate) + samples = dist.sample(5) # Shape [5] + loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function + # Unbiased stochastic gradients of the loss function + grads = tf.gradients(loss, [concentration, rate]) + ``` + + References: + Implicit Reparameterization Gradients: + [Figurnov et al., 2018] + (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients) + ([pdf](http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients.pdf)) + """ + + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__(self, + concentration, + rate, + validate_args=False, + allow_nan_stats=True, + name="Gamma"): + """Construct Gamma with `concentration` and `rate` parameters. + + The parameters `concentration` and `rate` must be shaped in a way that + supports broadcasting (e.g. `concentration + rate` is a valid operation). + + Args: + concentration: Floating point tensor, the concentration params of the + distribution(s). Must contain only positive values. + rate: Floating point tensor, the inverse scale params of the + distribution(s). Must contain only positive values. + validate_args: Python `bool`, default `False`. When `True` distribution + parameters are checked for validity despite possibly degrading runtime + performance. When `False` invalid inputs may silently render incorrect + outputs. + allow_nan_stats: Python `bool`, default `True`. When `True`, statistics + (e.g., mean, mode, variance) use the value "`NaN`" to indicate the + result is undefined. When `False`, an exception is raised if one or + more of the statistic's batch members are undefined. + name: Python `str` name prefixed to Ops created by this class. + + Raises: + TypeError: if `concentration` and `rate` are different dtypes. + """ + parameters = dict(locals()) + with ops.name_scope(name, values=[concentration, rate]) as name: + with ops.control_dependencies([ + check_ops.assert_positive(concentration), + check_ops.assert_positive(rate), + ] if validate_args else []): + self._concentration = array_ops.identity( + concentration, name="concentration") + self._rate = array_ops.identity(rate, name="rate") + check_ops.assert_same_float_dtype( + [self._concentration, self._rate]) + super(Gamma, self).__init__( + dtype=self._concentration.dtype, + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + reparameterization_type=distribution.FULLY_REPARAMETERIZED, + parameters=parameters, + graph_parents=[self._concentration, + self._rate], + name=name) + + @staticmethod + def _param_shapes(sample_shape): + return dict( + zip(("concentration", "rate"), ([ops.convert_to_tensor( + sample_shape, dtype=dtypes.int32)] * 2))) + + @property + def concentration(self): + """Concentration parameter.""" + return self._concentration + + @property + def rate(self): + """Rate parameter.""" + return self._rate + + def _batch_shape_tensor(self): + return array_ops.broadcast_dynamic_shape( + array_ops.shape(self.concentration), + array_ops.shape(self.rate)) + + def _batch_shape(self): + return array_ops.broadcast_static_shape( + self.concentration.get_shape(), + self.rate.get_shape()) + + def _event_shape_tensor(self): + return constant_op.constant([], dtype=dtypes.int32) + + def _event_shape(self): + return tensor_shape.TensorShape([]) + + @distribution_util.AppendDocstring( + """Note: See `tf.random.gamma` docstring for sampling details and + caveats.""") + def _sample_n(self, n, seed=None): + return random_ops.random_gamma( + shape=[n], + alpha=self.concentration, + beta=self.rate, + dtype=self.dtype, + seed=seed) + + def _log_prob(self, x): + return self._log_unnormalized_prob(x) - self._log_normalization() + + def _cdf(self, x): + x = self._maybe_assert_valid_sample(x) + # Note that igamma returns the regularized incomplete gamma function, + # which is what we want for the CDF. + return math_ops.igamma(self.concentration, self.rate * x) + + def _log_unnormalized_prob(self, x): + x = self._maybe_assert_valid_sample(x) + return math_ops.xlogy(self.concentration - 1., x) - self.rate * x + + def _log_normalization(self): + return (math_ops.lgamma(self.concentration) + - self.concentration * math_ops.log(self.rate)) + + def _entropy(self): + return (self.concentration + - math_ops.log(self.rate) + + math_ops.lgamma(self.concentration) + + ((1. - self.concentration) * + math_ops.digamma(self.concentration))) + + def _mean(self): + return self.concentration / self.rate + + def _variance(self): + return self.concentration / math_ops.square(self.rate) + + def _stddev(self): + return math_ops.sqrt(self.concentration) / self.rate + + @distribution_util.AppendDocstring( + """The mode of a gamma distribution is `(shape - 1) / rate` when + `shape > 1`, and `NaN` otherwise. If `self.allow_nan_stats` is `False`, + an exception will be raised rather than returning `NaN`.""") + def _mode(self): + mode = (self.concentration - 1.) / self.rate + if self.allow_nan_stats: + nan = array_ops.fill( + self.batch_shape_tensor(), + np.array(np.nan, dtype=self.dtype.as_numpy_dtype()), + name="nan") + return array_ops.where_v2(self.concentration > 1., mode, nan) + else: + return control_flow_ops.with_dependencies([ + check_ops.assert_less( + array_ops.ones([], self.dtype), + self.concentration, + message="mode not defined when any concentration <= 1"), + ], mode) + + def _maybe_assert_valid_sample(self, x): + check_ops.assert_same_float_dtype(tensors=[x], dtype=self.dtype) + if not self.validate_args: + return x + return control_flow_ops.with_dependencies([ + check_ops.assert_positive(x), + ], x) + + +class GammaWithSoftplusConcentrationRate(Gamma): + """`Gamma` with softplus of `concentration` and `rate`.""" + + @deprecation.deprecated( + "2019-01-01", + "Use `tfd.Gamma(tf.nn.softplus(concentration), " + "tf.nn.softplus(rate))` instead.", + warn_once=True) + def __init__(self, + concentration, + rate, + validate_args=False, + allow_nan_stats=True, + name="GammaWithSoftplusConcentrationRate"): + parameters = dict(locals()) + with ops.name_scope(name, values=[concentration, rate]) as name: + super(GammaWithSoftplusConcentrationRate, self).__init__( + concentration=nn.softplus(concentration, + name="softplus_concentration"), + rate=nn.softplus(rate, name="softplus_rate"), + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + name=name) + self._parameters = parameters + + +@kullback_leibler.RegisterKL(Gamma, Gamma) +def _kl_gamma_gamma(g0, g1, name=None): + """Calculate the batched KL divergence KL(g0 || g1) with g0 and g1 Gamma. + + Args: + g0: instance of a Gamma distribution object. + g1: instance of a Gamma distribution object. + name: (optional) Name to use for created operations. + Default is "kl_gamma_gamma". + + Returns: + kl_gamma_gamma: `Tensor`. The batchwise KL(g0 || g1). + """ + with ops.name_scope(name, "kl_gamma_gamma", values=[ + g0.concentration, g0.rate, g1.concentration, g1.rate]): + # Result from: + # http://www.fil.ion.ucl.ac.uk/~wpenny/publications/densities.ps + # For derivation see: + # http://stats.stackexchange.com/questions/11646/kullback-leibler-divergence-between-two-gamma-distributions pylint: disable=line-too-long + return (((g0.concentration - g1.concentration) + * math_ops.digamma(g0.concentration)) + + math_ops.lgamma(g1.concentration) + - math_ops.lgamma(g0.concentration) + + g1.concentration * math_ops.log(g0.rate) + - g1.concentration * math_ops.log(g1.rate) + + g0.concentration * (g1.rate / g0.rate - 1.)) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/identity_bijector.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/identity_bijector.py new file mode 100644 index 0000000000000000000000000000000000000000..0c6ae896567a3b4a6a11d16fd1a521b765e229d6 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/identity_bijector.py @@ -0,0 +1,68 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Identity bijector.""" + +from tensorflow.python.framework import constant_op +from tensorflow.python.ops.distributions import bijector +from tensorflow.python.util import deprecation + + +__all__ = [ + "Identity", +] + + +class Identity(bijector.Bijector): + """Compute Y = g(X) = X. + + Example Use: + + ```python + # Create the Y=g(X)=X transform which is intended for Tensors with 1 batch + # ndim and 1 event ndim (i.e., vector of vectors). + identity = Identity() + x = [[1., 2], + [3, 4]] + x == identity.forward(x) == identity.inverse(x) + ``` + + """ + + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__(self, validate_args=False, name="identity"): + super(Identity, self).__init__( + forward_min_event_ndims=0, + is_constant_jacobian=True, + validate_args=validate_args, + name=name) + + def _forward(self, x): + return x + + def _inverse(self, y): + return y + + def _inverse_log_det_jacobian(self, y): + return constant_op.constant(0., dtype=y.dtype) + + def _forward_log_det_jacobian(self, x): + return constant_op.constant(0., dtype=x.dtype) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/kullback_leibler.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/kullback_leibler.py new file mode 100644 index 0000000000000000000000000000000000000000..c8dfb2157e9ff252cce3294c70b6059380d28a60 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/kullback_leibler.py @@ -0,0 +1,210 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Registration and usage mechanisms for KL-divergences.""" + +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import control_flow_assert +from tensorflow.python.ops import math_ops +from tensorflow.python.util import deprecation +from tensorflow.python.util import tf_inspect +from tensorflow.python.util.tf_export import tf_export + + +_DIVERGENCES = {} + + +__all__ = [ + "RegisterKL", + "kl_divergence", +] + + +def _registered_kl(type_a, type_b): + """Get the KL function registered for classes a and b.""" + hierarchy_a = tf_inspect.getmro(type_a) + hierarchy_b = tf_inspect.getmro(type_b) + dist_to_children = None + kl_fn = None + for mro_to_a, parent_a in enumerate(hierarchy_a): + for mro_to_b, parent_b in enumerate(hierarchy_b): + candidate_dist = mro_to_a + mro_to_b + candidate_kl_fn = _DIVERGENCES.get((parent_a, parent_b), None) + if not kl_fn or (candidate_kl_fn and candidate_dist < dist_to_children): + dist_to_children = candidate_dist + kl_fn = candidate_kl_fn + return kl_fn + + +@deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) +@tf_export(v1=["distributions.kl_divergence"]) +def kl_divergence(distribution_a, distribution_b, + allow_nan_stats=True, name=None): + """Get the KL-divergence KL(distribution_a || distribution_b). + + If there is no KL method registered specifically for `type(distribution_a)` + and `type(distribution_b)`, then the class hierarchies of these types are + searched. + + If one KL method is registered between any pairs of classes in these two + parent hierarchies, it is used. + + If more than one such registered method exists, the method whose registered + classes have the shortest sum MRO paths to the input types is used. + + If more than one such shortest path exists, the first method + identified in the search is used (favoring a shorter MRO distance to + `type(distribution_a)`). + + Args: + distribution_a: The first distribution. + distribution_b: The second distribution. + allow_nan_stats: Python `bool`, default `True`. When `True`, + statistics (e.g., mean, mode, variance) use the value "`NaN`" to + indicate the result is undefined. When `False`, an exception is raised + if one or more of the statistic's batch members are undefined. + name: Python `str` name prefixed to Ops created by this class. + + Returns: + A Tensor with the batchwise KL-divergence between `distribution_a` + and `distribution_b`. + + Raises: + NotImplementedError: If no KL method is defined for distribution types + of `distribution_a` and `distribution_b`. + """ + kl_fn = _registered_kl(type(distribution_a), type(distribution_b)) + if kl_fn is None: + raise NotImplementedError( + "No KL(distribution_a || distribution_b) registered for distribution_a " + "type %s and distribution_b type %s" + % (type(distribution_a).__name__, type(distribution_b).__name__)) + + with ops.name_scope("KullbackLeibler"): + kl_t = kl_fn(distribution_a, distribution_b, name=name) + if allow_nan_stats: + return kl_t + + # Check KL for NaNs + kl_t = array_ops.identity(kl_t, name="kl") + + with ops.control_dependencies([ + control_flow_assert.Assert( + math_ops.logical_not(math_ops.reduce_any(math_ops.is_nan(kl_t))), [ + "KL calculation between %s and %s returned NaN values " + "(and was called with allow_nan_stats=False). Values:" % + (distribution_a.name, distribution_b.name), kl_t + ]) + ]): + return array_ops.identity(kl_t, name="checked_kl") + + +@deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) +def cross_entropy(ref, other, + allow_nan_stats=True, name=None): + """Computes the (Shannon) cross entropy. + + Denote two distributions by `P` (`ref`) and `Q` (`other`). Assuming `P, Q` + are absolutely continuous with respect to one another and permit densities + `p(x) dr(x)` and `q(x) dr(x)`, (Shanon) cross entropy is defined as: + + ```none + H[P, Q] = E_p[-log q(X)] = -int_F p(x) log q(x) dr(x) + ``` + + where `F` denotes the support of the random variable `X ~ P`. + + Args: + ref: `tfd.Distribution` instance. + other: `tfd.Distribution` instance. + allow_nan_stats: Python `bool`, default `True`. When `True`, + statistics (e.g., mean, mode, variance) use the value "`NaN`" to + indicate the result is undefined. When `False`, an exception is raised + if one or more of the statistic's batch members are undefined. + name: Python `str` prepended to names of ops created by this function. + + Returns: + cross_entropy: `ref.dtype` `Tensor` with shape `[B1, ..., Bn]` + representing `n` different calculations of (Shanon) cross entropy. + """ + with ops.name_scope(name, "cross_entropy"): + return ref.entropy() + kl_divergence( + ref, other, allow_nan_stats=allow_nan_stats) + + +@tf_export(v1=["distributions.RegisterKL"]) +class RegisterKL: + """Decorator to register a KL divergence implementation function. + + Usage: + + @distributions.RegisterKL(distributions.Normal, distributions.Normal) + def _kl_normal_mvn(norm_a, norm_b): + # Return KL(norm_a || norm_b) + """ + + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__(self, dist_cls_a, dist_cls_b): + """Initialize the KL registrar. + + Args: + dist_cls_a: the class of the first argument of the KL divergence. + dist_cls_b: the class of the second argument of the KL divergence. + """ + self._key = (dist_cls_a, dist_cls_b) + + def __call__(self, kl_fn): + """Perform the KL registration. + + Args: + kl_fn: The function to use for the KL divergence. + + Returns: + kl_fn + + Raises: + TypeError: if kl_fn is not a callable. + ValueError: if a KL divergence function has already been registered for + the given argument classes. + """ + if not callable(kl_fn): + raise TypeError("kl_fn must be callable, received: %s" % kl_fn) + if self._key in _DIVERGENCES: + raise ValueError("KL(%s || %s) has already been registered to: %s" + % (self._key[0].__name__, self._key[1].__name__, + _DIVERGENCES[self._key])) + _DIVERGENCES[self._key] = kl_fn + return kl_fn diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/laplace.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/laplace.py new file mode 100644 index 0000000000000000000000000000000000000000..9cebbac5daab17b18450a05c5bfa977b4f019bbb --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/laplace.py @@ -0,0 +1,238 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The Laplace distribution class.""" + +import math + +import numpy as np + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn +from tensorflow.python.ops import random_ops +from tensorflow.python.ops.distributions import distribution +from tensorflow.python.ops.distributions import special_math +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +__all__ = [ + "Laplace", + "LaplaceWithSoftplusScale", +] + + +@tf_export(v1=["distributions.Laplace"]) +class Laplace(distribution.Distribution): + """The Laplace distribution with location `loc` and `scale` parameters. + + #### Mathematical details + + The probability density function (pdf) of this distribution is, + + ```none + pdf(x; mu, sigma) = exp(-|x - mu| / sigma) / Z + Z = 2 sigma + ``` + + where `loc = mu`, `scale = sigma`, and `Z` is the normalization constant. + + Note that the Laplace distribution can be thought of two exponential + distributions spliced together "back-to-back." + + The Lpalce distribution is a member of the [location-scale family]( + https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be + constructed as, + + ```none + X ~ Laplace(loc=0, scale=1) + Y = loc + scale * X + ``` + + """ + + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__(self, + loc, + scale, + validate_args=False, + allow_nan_stats=True, + name="Laplace"): + """Construct Laplace distribution with parameters `loc` and `scale`. + + The parameters `loc` and `scale` must be shaped in a way that supports + broadcasting (e.g., `loc / scale` is a valid operation). + + Args: + loc: Floating point tensor which characterizes the location (center) + of the distribution. + scale: Positive floating point tensor which characterizes the spread of + the distribution. + validate_args: Python `bool`, default `False`. When `True` distribution + parameters are checked for validity despite possibly degrading runtime + performance. When `False` invalid inputs may silently render incorrect + outputs. + allow_nan_stats: Python `bool`, default `True`. When `True`, + statistics (e.g., mean, mode, variance) use the value "`NaN`" to + indicate the result is undefined. When `False`, an exception is raised + if one or more of the statistic's batch members are undefined. + name: Python `str` name prefixed to Ops created by this class. + + Raises: + TypeError: if `loc` and `scale` are of different dtype. + """ + parameters = dict(locals()) + with ops.name_scope(name, values=[loc, scale]) as name: + with ops.control_dependencies([check_ops.assert_positive(scale)] if + validate_args else []): + self._loc = array_ops.identity(loc, name="loc") + self._scale = array_ops.identity(scale, name="scale") + check_ops.assert_same_float_dtype([self._loc, self._scale]) + super(Laplace, self).__init__( + dtype=self._loc.dtype, + reparameterization_type=distribution.FULLY_REPARAMETERIZED, + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + parameters=parameters, + graph_parents=[self._loc, self._scale], + name=name) + + @staticmethod + def _param_shapes(sample_shape): + return dict( + zip(("loc", "scale"), ([ops.convert_to_tensor( + sample_shape, dtype=dtypes.int32)] * 2))) + + @property + def loc(self): + """Distribution parameter for the location.""" + return self._loc + + @property + def scale(self): + """Distribution parameter for scale.""" + return self._scale + + def _batch_shape_tensor(self): + return array_ops.broadcast_dynamic_shape( + array_ops.shape(self.loc), array_ops.shape(self.scale)) + + def _batch_shape(self): + return array_ops.broadcast_static_shape( + self.loc.get_shape(), self.scale.get_shape()) + + def _event_shape_tensor(self): + return constant_op.constant([], dtype=dtypes.int32) + + def _event_shape(self): + return tensor_shape.TensorShape([]) + + def _sample_n(self, n, seed=None): + shape = array_ops.concat([[n], self.batch_shape_tensor()], 0) + # Uniform variates must be sampled from the open-interval `(-1, 1)` rather + # than `[-1, 1)`. In the case of `(0, 1)` we'd use + # `np.finfo(self.dtype.as_numpy_dtype).tiny` because it is the smallest, + # positive, "normal" number. However, the concept of subnormality exists + # only at zero; here we need the smallest usable number larger than -1, + # i.e., `-1 + eps/2`. + uniform_samples = random_ops.random_uniform( + shape=shape, + minval=np.nextafter(self.dtype.as_numpy_dtype(-1.), + self.dtype.as_numpy_dtype(0.)), + maxval=1., + dtype=self.dtype, + seed=seed) + return (self.loc - self.scale * math_ops.sign(uniform_samples) * + math_ops.log1p(-math_ops.abs(uniform_samples))) + + def _log_prob(self, x): + return self._log_unnormalized_prob(x) - self._log_normalization() + + def _prob(self, x): + return math_ops.exp(self._log_prob(x)) + + def _log_cdf(self, x): + return special_math.log_cdf_laplace(self._z(x)) + + def _log_survival_function(self, x): + return special_math.log_cdf_laplace(-self._z(x)) + + def _cdf(self, x): + z = self._z(x) + return (0.5 + 0.5 * math_ops.sign(z) * + (1. - math_ops.exp(-math_ops.abs(z)))) + + def _log_unnormalized_prob(self, x): + return -math_ops.abs(self._z(x)) + + def _log_normalization(self): + return math.log(2.) + math_ops.log(self.scale) + + def _entropy(self): + # Use broadcasting rules to calculate the full broadcast scale. + scale = self.scale + array_ops.zeros_like(self.loc) + return math.log(2.) + 1. + math_ops.log(scale) + + def _mean(self): + return self.loc + array_ops.zeros_like(self.scale) + + def _stddev(self): + return math.sqrt(2.) * self.scale + array_ops.zeros_like(self.loc) + + def _median(self): + return self._mean() + + def _mode(self): + return self._mean() + + def _z(self, x): + return (x - self.loc) / self.scale + + +class LaplaceWithSoftplusScale(Laplace): + """Laplace with softplus applied to `scale`.""" + + @deprecation.deprecated( + "2019-01-01", + "Use `tfd.Laplace(loc, tf.nn.softplus(scale)) " + "instead.", + warn_once=True) + def __init__(self, + loc, + scale, + validate_args=False, + allow_nan_stats=True, + name="LaplaceWithSoftplusScale"): + parameters = dict(locals()) + with ops.name_scope(name, values=[loc, scale]) as name: + super(LaplaceWithSoftplusScale, self).__init__( + loc=loc, + scale=nn.softplus(scale, name="softplus_scale"), + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + name=name) + self._parameters = parameters diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/multinomial.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/multinomial.py new file mode 100644 index 0000000000000000000000000000000000000000..4b889bcb288f3b23684631bff8278e1604b45643 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/multinomial.py @@ -0,0 +1,314 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The Multinomial distribution class.""" + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import map_fn +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn_ops +from tensorflow.python.ops import random_ops +from tensorflow.python.ops.distributions import distribution +from tensorflow.python.ops.distributions import util as distribution_util +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +__all__ = [ + "Multinomial", +] + + +_multinomial_sample_note = """For each batch of counts, `value = [n_0, ... +,n_{k-1}]`, `P[value]` is the probability that after sampling `self.total_count` +draws from this Multinomial distribution, the number of draws falling in class +`j` is `n_j`. Since this definition is [exchangeable]( +https://en.wikipedia.org/wiki/Exchangeable_random_variables); different +sequences have the same counts so the probability includes a combinatorial +coefficient. + +Note: `value` must be a non-negative tensor with dtype `self.dtype`, have no +fractional components, and such that +`tf.reduce_sum(value, -1) = self.total_count`. Its shape must be broadcastable +with `self.probs` and `self.total_count`.""" + + +@tf_export(v1=["distributions.Multinomial"]) +class Multinomial(distribution.Distribution): + """Multinomial distribution. + + This Multinomial distribution is parameterized by `probs`, a (batch of) + length-`K` `prob` (probability) vectors (`K > 1`) such that + `tf.reduce_sum(probs, -1) = 1`, and a `total_count` number of trials, i.e., + the number of trials per draw from the Multinomial. It is defined over a + (batch of) length-`K` vector `counts` such that + `tf.reduce_sum(counts, -1) = total_count`. The Multinomial is identically the + Binomial distribution when `K = 2`. + + #### Mathematical Details + + The Multinomial is a distribution over `K`-class counts, i.e., a length-`K` + vector of non-negative integer `counts = n = [n_0, ..., n_{K-1}]`. + + The probability mass function (pmf) is, + + ```none + pmf(n; pi, N) = prod_j (pi_j)**n_j / Z + Z = (prod_j n_j!) / N! + ``` + + where: + * `probs = pi = [pi_0, ..., pi_{K-1}]`, `pi_j > 0`, `sum_j pi_j = 1`, + * `total_count = N`, `N` a positive integer, + * `Z` is the normalization constant, and, + * `N!` denotes `N` factorial. + + Distribution parameters are automatically broadcast in all functions; see + examples for details. + + #### Pitfalls + + The number of classes, `K`, must not exceed: + - the largest integer representable by `self.dtype`, i.e., + `2**(mantissa_bits+1)` (IEE754), + - the maximum `Tensor` index, i.e., `2**31-1`. + + In other words, + + ```python + K <= min(2**31-1, { + tf.float16: 2**11, + tf.float32: 2**24, + tf.float64: 2**53 }[param.dtype]) + ``` + + Note: This condition is validated only when `self.validate_args = True`. + + #### Examples + + Create a 3-class distribution, with the 3rd class is most likely to be drawn, + using logits. + + ```python + logits = [-50., -43, 0] + dist = Multinomial(total_count=4., logits=logits) + ``` + + Create a 3-class distribution, with the 3rd class is most likely to be drawn. + + ```python + p = [.2, .3, .5] + dist = Multinomial(total_count=4., probs=p) + ``` + + The distribution functions can be evaluated on counts. + + ```python + # counts same shape as p. + counts = [1., 0, 3] + dist.prob(counts) # Shape [] + + # p will be broadcast to [[.2, .3, .5], [.2, .3, .5]] to match counts. + counts = [[1., 2, 1], [2, 2, 0]] + dist.prob(counts) # Shape [2] + + # p will be broadcast to shape [5, 7, 3] to match counts. + counts = [[...]] # Shape [5, 7, 3] + dist.prob(counts) # Shape [5, 7] + ``` + + Create a 2-batch of 3-class distributions. + + ```python + p = [[.1, .2, .7], [.3, .3, .4]] # Shape [2, 3] + dist = Multinomial(total_count=[4., 5], probs=p) + + counts = [[2., 1, 1], [3, 1, 1]] + dist.prob(counts) # Shape [2] + + dist.sample(5) # Shape [5, 2, 3] + ``` + """ + + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__(self, + total_count, + logits=None, + probs=None, + validate_args=False, + allow_nan_stats=True, + name="Multinomial"): + """Initialize a batch of Multinomial distributions. + + Args: + total_count: Non-negative floating point tensor with shape broadcastable + to `[N1,..., Nm]` with `m >= 0`. Defines this as a batch of + `N1 x ... x Nm` different Multinomial distributions. Its components + should be equal to integer values. + logits: Floating point tensor representing unnormalized log-probabilities + of a positive event with shape broadcastable to + `[N1,..., Nm, K]` `m >= 0`, and the same dtype as `total_count`. Defines + this as a batch of `N1 x ... x Nm` different `K` class Multinomial + distributions. Only one of `logits` or `probs` should be passed in. + probs: Positive floating point tensor with shape broadcastable to + `[N1,..., Nm, K]` `m >= 0` and same dtype as `total_count`. Defines + this as a batch of `N1 x ... x Nm` different `K` class Multinomial + distributions. `probs`'s components in the last portion of its shape + should sum to `1`. Only one of `logits` or `probs` should be passed in. + validate_args: Python `bool`, default `False`. When `True` distribution + parameters are checked for validity despite possibly degrading runtime + performance. When `False` invalid inputs may silently render incorrect + outputs. + allow_nan_stats: Python `bool`, default `True`. When `True`, statistics + (e.g., mean, mode, variance) use the value "`NaN`" to indicate the + result is undefined. When `False`, an exception is raised if one or + more of the statistic's batch members are undefined. + name: Python `str` name prefixed to Ops created by this class. + """ + parameters = dict(locals()) + with ops.name_scope(name, values=[total_count, logits, probs]) as name: + self._total_count = ops.convert_to_tensor(total_count, name="total_count") + if validate_args: + self._total_count = ( + distribution_util.embed_check_nonnegative_integer_form( + self._total_count)) + self._logits, self._probs = distribution_util.get_logits_and_probs( + logits=logits, + probs=probs, + multidimensional=True, + validate_args=validate_args, + name=name) + self._mean_val = self._total_count[..., array_ops.newaxis] * self._probs + super(Multinomial, self).__init__( + dtype=self._probs.dtype, + reparameterization_type=distribution.NOT_REPARAMETERIZED, + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + parameters=parameters, + graph_parents=[self._total_count, + self._logits, + self._probs], + name=name) + + @property + def total_count(self): + """Number of trials used to construct a sample.""" + return self._total_count + + @property + def logits(self): + """Vector of coordinatewise logits.""" + return self._logits + + @property + def probs(self): + """Probability of drawing a `1` in that coordinate.""" + return self._probs + + def _batch_shape_tensor(self): + return array_ops.shape(self._mean_val)[:-1] + + def _batch_shape(self): + return self._mean_val.get_shape().with_rank_at_least(1)[:-1] + + def _event_shape_tensor(self): + return array_ops.shape(self._mean_val)[-1:] + + def _event_shape(self): + return self._mean_val.get_shape().with_rank_at_least(1)[-1:] + + def _sample_n(self, n, seed=None): + n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32) + k = self.event_shape_tensor()[0] + + # broadcast the total_count and logits to same shape + n_draws = array_ops.ones_like( + self.logits[..., 0], dtype=n_draws.dtype) * n_draws + logits = array_ops.ones_like( + n_draws[..., array_ops.newaxis], dtype=self.logits.dtype) * self.logits + + # flatten the total_count and logits + flat_logits = array_ops.reshape(logits, [-1, k]) # [B1B2...Bm, k] + flat_ndraws = n * array_ops.reshape(n_draws, [-1]) # [B1B2...Bm] + + # computes each total_count and logits situation by map_fn + def _sample_single(args): + logits, n_draw = args[0], args[1] # [K], [] + x = random_ops.multinomial(logits[array_ops.newaxis, ...], n_draw, + seed) # [1, n*n_draw] + x = array_ops.reshape(x, shape=[n, -1]) # [n, n_draw] + x = math_ops.reduce_sum(array_ops.one_hot(x, depth=k), axis=-2) # [n, k] + return x + + x = map_fn.map_fn( + _sample_single, [flat_logits, flat_ndraws], + dtype=self.dtype) # [B1B2...Bm, n, k] + + # reshape the results to proper shape + x = array_ops.transpose(x, perm=[1, 0, 2]) + final_shape = array_ops.concat([[n], self.batch_shape_tensor(), [k]], 0) + x = array_ops.reshape(x, final_shape) # [n, B1, B2,..., Bm, k] + return x + + @distribution_util.AppendDocstring(_multinomial_sample_note) + def _log_prob(self, counts): + return self._log_unnormalized_prob(counts) - self._log_normalization(counts) + + def _log_unnormalized_prob(self, counts): + counts = self._maybe_assert_valid_sample(counts) + return math_ops.reduce_sum(counts * nn_ops.log_softmax(self.logits), -1) + + def _log_normalization(self, counts): + counts = self._maybe_assert_valid_sample(counts) + return -distribution_util.log_combinations(self.total_count, counts) + + def _mean(self): + return array_ops.identity(self._mean_val) + + def _covariance(self): + p = self.probs * array_ops.ones_like( + self.total_count)[..., array_ops.newaxis] + # pylint: disable=invalid-unary-operand-type + return array_ops.matrix_set_diag( + -math_ops.matmul( + self._mean_val[..., array_ops.newaxis], + p[..., array_ops.newaxis, :]), # outer product + self._variance()) + + def _variance(self): + p = self.probs * array_ops.ones_like( + self.total_count)[..., array_ops.newaxis] + return self._mean_val - self._mean_val * p + + def _maybe_assert_valid_sample(self, counts): + """Check counts for proper shape, values, then return tensor version.""" + if not self.validate_args: + return counts + counts = distribution_util.embed_check_nonnegative_integer_form(counts) + return control_flow_ops.with_dependencies([ + check_ops.assert_equal( + self.total_count, math_ops.reduce_sum(counts, -1), + message="counts must sum to `self.total_count`"), + ], counts) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/normal.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/normal.py new file mode 100644 index 0000000000000000000000000000000000000000..4bd9f36873d455f9df6329c1693f2e9f4df5d4b0 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/normal.py @@ -0,0 +1,291 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The Normal (Gaussian) distribution class.""" + +import math + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn +from tensorflow.python.ops import random_ops +from tensorflow.python.ops.distributions import distribution +from tensorflow.python.ops.distributions import kullback_leibler +from tensorflow.python.ops.distributions import special_math +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +__all__ = [ + "Normal", + "NormalWithSoftplusScale", +] + + +@tf_export(v1=["distributions.Normal"]) +class Normal(distribution.Distribution): + """The Normal distribution with location `loc` and `scale` parameters. + + #### Mathematical details + + The probability density function (pdf) is, + + ```none + pdf(x; mu, sigma) = exp(-0.5 (x - mu)**2 / sigma**2) / Z + Z = (2 pi sigma**2)**0.5 + ``` + + where `loc = mu` is the mean, `scale = sigma` is the std. deviation, and, `Z` + is the normalization constant. + + The Normal distribution is a member of the [location-scale family]( + https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be + constructed as, + + ```none + X ~ Normal(loc=0, scale=1) + Y = loc + scale * X + ``` + + #### Examples + + Examples of initialization of one or a batch of distributions. + + ```python + import tensorflow_probability as tfp + tfd = tfp.distributions + + # Define a single scalar Normal distribution. + dist = tfd.Normal(loc=0., scale=3.) + + # Evaluate the cdf at 1, returning a scalar. + dist.cdf(1.) + + # Define a batch of two scalar valued Normals. + # The first has mean 1 and standard deviation 11, the second 2 and 22. + dist = tfd.Normal(loc=[1, 2.], scale=[11, 22.]) + + # Evaluate the pdf of the first distribution on 0, and the second on 1.5, + # returning a length two tensor. + dist.prob([0, 1.5]) + + # Get 3 samples, returning a 3 x 2 tensor. + dist.sample([3]) + ``` + + Arguments are broadcast when possible. + + ```python + # Define a batch of two scalar valued Normals. + # Both have mean 1, but different standard deviations. + dist = tfd.Normal(loc=1., scale=[11, 22.]) + + # Evaluate the pdf of both distributions on the same point, 3.0, + # returning a length 2 tensor. + dist.prob(3.0) + ``` + + """ + + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__(self, + loc, + scale, + validate_args=False, + allow_nan_stats=True, + name="Normal"): + """Construct Normal distributions with mean and stddev `loc` and `scale`. + + The parameters `loc` and `scale` must be shaped in a way that supports + broadcasting (e.g. `loc + scale` is a valid operation). + + Args: + loc: Floating point tensor; the means of the distribution(s). + scale: Floating point tensor; the stddevs of the distribution(s). + Must contain only positive values. + validate_args: Python `bool`, default `False`. When `True` distribution + parameters are checked for validity despite possibly degrading runtime + performance. When `False` invalid inputs may silently render incorrect + outputs. + allow_nan_stats: Python `bool`, default `True`. When `True`, + statistics (e.g., mean, mode, variance) use the value "`NaN`" to + indicate the result is undefined. When `False`, an exception is raised + if one or more of the statistic's batch members are undefined. + name: Python `str` name prefixed to Ops created by this class. + + Raises: + TypeError: if `loc` and `scale` have different `dtype`. + """ + parameters = dict(locals()) + with ops.name_scope(name, values=[loc, scale]) as name: + with ops.control_dependencies([check_ops.assert_positive(scale)] if + validate_args else []): + self._loc = array_ops.identity(loc, name="loc") + self._scale = array_ops.identity(scale, name="scale") + check_ops.assert_same_float_dtype([self._loc, self._scale]) + super(Normal, self).__init__( + dtype=self._scale.dtype, + reparameterization_type=distribution.FULLY_REPARAMETERIZED, + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + parameters=parameters, + graph_parents=[self._loc, self._scale], + name=name) + + @staticmethod + def _param_shapes(sample_shape): + return dict( + zip(("loc", "scale"), ([ops.convert_to_tensor( + sample_shape, dtype=dtypes.int32)] * 2))) + + @property + def loc(self): + """Distribution parameter for the mean.""" + return self._loc + + @property + def scale(self): + """Distribution parameter for standard deviation.""" + return self._scale + + def _batch_shape_tensor(self): + return array_ops.broadcast_dynamic_shape( + array_ops.shape(self.loc), + array_ops.shape(self.scale)) + + def _batch_shape(self): + return array_ops.broadcast_static_shape( + self.loc.get_shape(), + self.scale.get_shape()) + + def _event_shape_tensor(self): + return constant_op.constant([], dtype=dtypes.int32) + + def _event_shape(self): + return tensor_shape.TensorShape([]) + + def _sample_n(self, n, seed=None): + shape = array_ops.concat([[n], self.batch_shape_tensor()], 0) + sampled = random_ops.random_normal( + shape=shape, mean=0., stddev=1., dtype=self.loc.dtype, seed=seed) + return sampled * self.scale + self.loc + + def _log_prob(self, x): + return self._log_unnormalized_prob(x) - self._log_normalization() + + def _log_cdf(self, x): + return special_math.log_ndtr(self._z(x)) + + def _cdf(self, x): + return special_math.ndtr(self._z(x)) + + def _log_survival_function(self, x): + return special_math.log_ndtr(-self._z(x)) + + def _survival_function(self, x): + return special_math.ndtr(-self._z(x)) + + def _log_unnormalized_prob(self, x): + return -0.5 * math_ops.square(self._z(x)) + + def _log_normalization(self): + return 0.5 * math.log(2. * math.pi) + math_ops.log(self.scale) + + def _entropy(self): + # Use broadcasting rules to calculate the full broadcast scale. + scale = self.scale * array_ops.ones_like(self.loc) + return 0.5 * math.log(2. * math.pi * math.e) + math_ops.log(scale) + + def _mean(self): + return self.loc * array_ops.ones_like(self.scale) + + def _quantile(self, p): + return self._inv_z(special_math.ndtri(p)) + + def _stddev(self): + return self.scale * array_ops.ones_like(self.loc) + + def _mode(self): + return self._mean() + + def _z(self, x): + """Standardize input `x` to a unit normal.""" + with ops.name_scope("standardize", values=[x]): + return (x - self.loc) / self.scale + + def _inv_z(self, z): + """Reconstruct input `x` from a its normalized version.""" + with ops.name_scope("reconstruct", values=[z]): + return z * self.scale + self.loc + + +class NormalWithSoftplusScale(Normal): + """Normal with softplus applied to `scale`.""" + + @deprecation.deprecated( + "2019-01-01", + "Use `tfd.Normal(loc, tf.nn.softplus(scale)) " + "instead.", + warn_once=True) + def __init__(self, + loc, + scale, + validate_args=False, + allow_nan_stats=True, + name="NormalWithSoftplusScale"): + parameters = dict(locals()) + with ops.name_scope(name, values=[scale]) as name: + super(NormalWithSoftplusScale, self).__init__( + loc=loc, + scale=nn.softplus(scale, name="softplus_scale"), + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + name=name) + self._parameters = parameters + + +@kullback_leibler.RegisterKL(Normal, Normal) +def _kl_normal_normal(n_a, n_b, name=None): + """Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal. + + Args: + n_a: instance of a Normal distribution object. + n_b: instance of a Normal distribution object. + name: (optional) Name to use for created operations. + default is "kl_normal_normal". + + Returns: + Batchwise KL(n_a || n_b) + """ + with ops.name_scope(name, "kl_normal_normal", [n_a.loc, n_b.loc]): + one = constant_op.constant(1, dtype=n_a.dtype) + two = constant_op.constant(2, dtype=n_a.dtype) + half = constant_op.constant(0.5, dtype=n_a.dtype) + s_a_squared = math_ops.square(n_a.scale) + s_b_squared = math_ops.square(n_b.scale) + ratio = s_a_squared / s_b_squared + return (math_ops.squared_difference(n_a.loc, n_b.loc) / (two * s_b_squared) + + half * (ratio - one - math_ops.log(ratio))) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/special_math.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/special_math.py new file mode 100644 index 0000000000000000000000000000000000000000..797270f3e143e407afdc4d23837574a137bbc0db --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/special_math.py @@ -0,0 +1,470 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Functions "ndtr" and "ndtri" are derived from calculations made in: +# https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html +# In the following email exchange, the author gives his consent to redistribute +# derived works under an Apache 2.0 license. +# +# From: Stephen Moshier +# Date: Sat, Jun 9, 2018 at 2:36 PM +# Subject: Re: Licensing cephes under Apache (BSD-like) license. +# To: rif +# +# +# +# Hello Rif, +# +# Yes, Google may distribute Cephes files under the Apache 2 license. +# +# If clarification is needed, I do not favor BSD over other free licenses. +# I would agree that Apache 2 seems to cover the concern you mentioned +# about sublicensees. +# +# Best wishes for good luck with your projects! +# Steve Moshier +# +# +# +# On Thu, 31 May 2018, rif wrote: +# +# > Hello Steve. +# > My name is Rif. I work on machine learning software at Google. +# > +# > Your cephes software continues to be incredibly useful and widely used. I +# > was wondering whether it would be permissible for us to use the Cephes code +# > under the Apache 2.0 license, which is extremely similar in permissions to +# > the BSD license (Wikipedia comparisons). This would be quite helpful to us +# > in terms of avoiding multiple licenses on software. +# > +# > I'm sorry to bother you with this (I can imagine you're sick of hearing +# > about this by now), but I want to be absolutely clear we're on the level and +# > not misusing your important software. In former conversation with Eugene +# > Brevdo (ebrevdo@google.com), you wrote "If your licensing is similar to BSD, +# > the formal way that has been handled is simply to add a statement to the +# > effect that you are incorporating the Cephes software by permission of the +# > author." I wanted to confirm that (a) we could use the Apache license, (b) +# > that we don't need to (and probably you don't want to) keep getting +# > contacted about individual uses, because your intent is generally to allow +# > this software to be reused under "BSD-like" license, and (c) you're OK +# > letting incorporators decide whether a license is sufficiently BSD-like? +# > +# > Best, +# > +# > rif +# > +# > +# > + +"""Special Math Ops.""" + +import numpy as np + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops + +__all__ = [ + "erfinv", + "ndtr", + "ndtri", + "log_ndtr", + "log_cdf_laplace", +] + + +# log_ndtr uses different functions over the ranges +# (-infty, lower](lower, upper](upper, infty) +# Lower bound values were chosen by examining where the support of ndtr +# appears to be zero, relative to scipy's (which is always 64bit). They were +# then made more conservative just to be safe. (Conservative means use the +# expansion more than we probably need to.) See `NdtrTest` in +# special_math_test.py. +LOGNDTR_FLOAT64_LOWER = np.array(-20, np.float64) +LOGNDTR_FLOAT32_LOWER = np.array(-10, np.float32) + +# Upper bound values were chosen by examining for which values of 'x' +# Log[cdf(x)] is 0, after which point we need to use the approximation +# Log[cdf(x)] = Log[1 - cdf(-x)] approx -cdf(-x). We chose a value slightly +# conservative, meaning we use the approximation earlier than needed. +LOGNDTR_FLOAT64_UPPER = np.array(8, np.float64) +LOGNDTR_FLOAT32_UPPER = np.array(5, np.float32) + + +def ndtr(x, name="ndtr"): + """Normal distribution function. + + Returns the area under the Gaussian probability density function, integrated + from minus infinity to x: + + ``` + 1 / x + ndtr(x) = ---------- | exp(-0.5 t**2) dt + sqrt(2 pi) /-inf + + = 0.5 (1 + erf(x / sqrt(2))) + = 0.5 erfc(x / sqrt(2)) + ``` + + Args: + x: `Tensor` of type `float32`, `float64`. + name: Python string. A name for the operation (default="ndtr"). + + Returns: + ndtr: `Tensor` with `dtype=x.dtype`. + + Raises: + TypeError: if `x` is not floating-type. + """ + + with ops.name_scope(name, values=[x]): + x = ops.convert_to_tensor(x, name="x") + if x.dtype.as_numpy_dtype not in [np.float32, np.float64]: + raise TypeError( + "x.dtype=%s is not handled, see docstring for supported types." + % x.dtype) + return _ndtr(x) + + +def _ndtr(x): + """Implements ndtr core logic.""" + half_sqrt_2 = constant_op.constant( + 0.5 * np.sqrt(2.), dtype=x.dtype, name="half_sqrt_2") + w = x * half_sqrt_2 + z = math_ops.abs(w) + y = array_ops.where_v2( + math_ops.less(z, half_sqrt_2), 1. + math_ops.erf(w), + array_ops.where_v2( + math_ops.greater(w, 0.), 2. - math_ops.erfc(z), math_ops.erfc(z))) + return 0.5 * y + + +def ndtri(p, name="ndtri"): + """The inverse of the CDF of the Normal distribution function. + + Returns x such that the area under the pdf from minus infinity to x is equal + to p. + + A piece-wise rational approximation is done for the function. + This is a port of the implementation in netlib. + + Args: + p: `Tensor` of type `float32`, `float64`. + name: Python string. A name for the operation (default="ndtri"). + + Returns: + x: `Tensor` with `dtype=p.dtype`. + + Raises: + TypeError: if `p` is not floating-type. + """ + + with ops.name_scope(name, values=[p]): + p = ops.convert_to_tensor(p, name="p") + if p.dtype.as_numpy_dtype not in [np.float32, np.float64]: + raise TypeError( + "p.dtype=%s is not handled, see docstring for supported types." + % p.dtype) + return _ndtri(p) + + +def _ndtri(p): + """Implements ndtri core logic.""" + + # Constants used in piece-wise rational approximations. Taken from the cephes + # library: + # https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html + + p0 = [ + -1.23916583867381258016E0, 1.39312609387279679503E1, + -5.66762857469070293439E1, 9.80010754185999661536E1, + -5.99633501014107895267E1 + ] + q0 = [ + -1.18331621121330003142E0, 1.59056225126211695515E1, + -8.20372256168333339912E1, 2.00260212380060660359E2, + -2.25462687854119370527E2, 8.63602421390890590575E1, + 4.67627912898881538453E0, 1.95448858338141759834E0, 1.0 + ] + p1 = [ + -8.57456785154685413611E-4, -3.50424626827848203418E-2, + -1.40256079171354495875E-1, 2.18663306850790267539E0, + 1.46849561928858024014E1, 4.40805073893200834700E1, + 5.71628192246421288162E1, 3.15251094599893866154E1, + 4.05544892305962419923E0 + ] + q1 = [ + -9.33259480895457427372E-4, -3.80806407691578277194E-2, + -1.42182922854787788574E-1, 2.50464946208309415979E0, + 1.50425385692907503408E1, 4.13172038254672030440E1, + 4.53907635128879210584E1, 1.57799883256466749731E1, 1.0 + ] + p2 = [ + 6.23974539184983293730E-9, 2.65806974686737550832E-6, + 3.01581553508235416007E-4, 1.23716634817820021358E-2, + 2.01485389549179081538E-1, 1.33303460815807542389E0, + 3.93881025292474443415E0, 6.91522889068984211695E0, + 3.23774891776946035970E0 + ] + q2 = [ + 6.79019408009981274425E-9, 2.89247864745380683936E-6, + 3.28014464682127739104E-4, 1.34204006088543189037E-2, + 2.16236993594496635890E-1, 1.37702099489081330271E0, + 3.67983563856160859403E0, 6.02427039364742014255E0, 1.0 + ] + + def _create_polynomial(var, coeffs): + """Compute n_th order polynomial via Horner's method.""" + coeffs = np.array(coeffs, var.dtype.as_numpy_dtype) + if not coeffs.size: + return array_ops.zeros_like(var) + return coeffs[0] + _create_polynomial(var, coeffs[1:]) * var + + maybe_complement_p = array_ops.where_v2(p > -np.expm1(-2.), 1. - p, p) + # Write in an arbitrary value in place of 0 for p since 0 will cause NaNs + # later on. The result from the computation when p == 0 is not used so any + # number that doesn't result in NaNs is fine. + sanitized_mcp = array_ops.where_v2( + maybe_complement_p <= 0., + array_ops.fill(array_ops.shape(p), np.array(0.5, p.dtype.as_numpy_dtype)), + maybe_complement_p) + + # Compute x for p > exp(-2): x/sqrt(2pi) = w + w**3 P0(w**2)/Q0(w**2). + w = sanitized_mcp - 0.5 + ww = w ** 2 + x_for_big_p = w + w * ww * (_create_polynomial(ww, p0) + / _create_polynomial(ww, q0)) + x_for_big_p *= -np.sqrt(2. * np.pi) + + # Compute x for p <= exp(-2): x = z - log(z)/z - (1/z) P(1/z) / Q(1/z), + # where z = sqrt(-2. * log(p)), and P/Q are chosen between two different + # arrays based on whether p < exp(-32). + z = math_ops.sqrt(-2. * math_ops.log(sanitized_mcp)) + first_term = z - math_ops.log(z) / z + second_term_small_p = ( + _create_polynomial(1. / z, p2) / + _create_polynomial(1. / z, q2) / z) + second_term_otherwise = ( + _create_polynomial(1. / z, p1) / + _create_polynomial(1. / z, q1) / z) + x_for_small_p = first_term - second_term_small_p + x_otherwise = first_term - second_term_otherwise + + x = array_ops.where_v2( + sanitized_mcp > np.exp(-2.), x_for_big_p, + array_ops.where_v2(z >= 8.0, x_for_small_p, x_otherwise)) + + x = array_ops.where_v2(p > 1. - np.exp(-2.), x, -x) + infinity_scalar = constant_op.constant(np.inf, dtype=p.dtype) + infinity = array_ops.fill(array_ops.shape(p), infinity_scalar) + x_nan_replaced = array_ops.where_v2(p <= 0.0, -infinity, + array_ops.where_v2(p >= 1.0, infinity, x)) + return x_nan_replaced + + +def log_ndtr(x, series_order=3, name="log_ndtr"): + """Log Normal distribution function. + + For details of the Normal distribution function see `ndtr`. + + This function calculates `(log o ndtr)(x)` by either calling `log(ndtr(x))` or + using an asymptotic series. Specifically: + - For `x > upper_segment`, use the approximation `-ndtr(-x)` based on + `log(1-x) ~= -x, x << 1`. + - For `lower_segment < x <= upper_segment`, use the existing `ndtr` technique + and take a log. + - For `x <= lower_segment`, we use the series approximation of erf to compute + the log CDF directly. + + The `lower_segment` is set based on the precision of the input: + + ``` + lower_segment = { -20, x.dtype=float64 + { -10, x.dtype=float32 + upper_segment = { 8, x.dtype=float64 + { 5, x.dtype=float32 + ``` + + When `x < lower_segment`, the `ndtr` asymptotic series approximation is: + + ``` + ndtr(x) = scale * (1 + sum) + R_N + scale = exp(-0.5 x**2) / (-x sqrt(2 pi)) + sum = Sum{(-1)^n (2n-1)!! / (x**2)^n, n=1:N} + R_N = O(exp(-0.5 x**2) (2N+1)!! / |x|^{2N+3}) + ``` + + where `(2n-1)!! = (2n-1) (2n-3) (2n-5) ... (3) (1)` is a + [double-factorial](https://en.wikipedia.org/wiki/Double_factorial). + + + Args: + x: `Tensor` of type `float32`, `float64`. + series_order: Positive Python `integer`. Maximum depth to + evaluate the asymptotic expansion. This is the `N` above. + name: Python string. A name for the operation (default="log_ndtr"). + + Returns: + log_ndtr: `Tensor` with `dtype=x.dtype`. + + Raises: + TypeError: if `x.dtype` is not handled. + TypeError: if `series_order` is a not Python `integer.` + ValueError: if `series_order` is not in `[0, 30]`. + """ + if not isinstance(series_order, int): + raise TypeError("series_order must be a Python integer.") + if series_order < 0: + raise ValueError("series_order must be non-negative.") + if series_order > 30: + raise ValueError("series_order must be <= 30.") + + with ops.name_scope(name, values=[x]): + x = ops.convert_to_tensor(x, name="x") + + if x.dtype.as_numpy_dtype == np.float64: + lower_segment = LOGNDTR_FLOAT64_LOWER + upper_segment = LOGNDTR_FLOAT64_UPPER + elif x.dtype.as_numpy_dtype == np.float32: + lower_segment = LOGNDTR_FLOAT32_LOWER + upper_segment = LOGNDTR_FLOAT32_UPPER + else: + raise TypeError("x.dtype=%s is not supported." % x.dtype) + + # The basic idea here was ported from: + # https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html + # We copy the main idea, with a few changes + # * For x >> 1, and X ~ Normal(0, 1), + # Log[P[X < x]] = Log[1 - P[X < -x]] approx -P[X < -x], + # which extends the range of validity of this function. + # * We use one fixed series_order for all of 'x', rather than adaptive. + # * Our docstring properly reflects that this is an asymptotic series, not a + # Taylor series. We also provided a correct bound on the remainder. + # * We need to use the max/min in the _log_ndtr_lower arg to avoid nan when + # x=0. This happens even though the branch is unchosen because when x=0 + # the gradient of a select involves the calculation 1*dy+0*(-inf)=nan + # regardless of whether dy is finite. Note that the minimum is a NOP if + # the branch is chosen. + return array_ops.where_v2( + math_ops.greater(x, upper_segment), + -_ndtr(-x), # log(1-x) ~= -x, x << 1 # pylint: disable=invalid-unary-operand-type + array_ops.where_v2( + math_ops.greater(x, lower_segment), + math_ops.log(_ndtr(math_ops.maximum(x, lower_segment))), + _log_ndtr_lower(math_ops.minimum(x, lower_segment), series_order))) + + +def _log_ndtr_lower(x, series_order): + """Asymptotic expansion version of `Log[cdf(x)]`, appropriate for `x<<-1`.""" + x_2 = math_ops.square(x) + # Log of the term multiplying (1 + sum) + log_scale = -0.5 * x_2 - math_ops.log(-x) - 0.5 * np.log(2. * np.pi) + return log_scale + math_ops.log(_log_ndtr_asymptotic_series(x, series_order)) + + +def _log_ndtr_asymptotic_series(x, series_order): + """Calculates the asymptotic series used in log_ndtr.""" + dtype = x.dtype.as_numpy_dtype + if series_order <= 0: + return np.array(1, dtype) + x_2 = math_ops.square(x) + even_sum = array_ops.zeros_like(x) + odd_sum = array_ops.zeros_like(x) + x_2n = x_2 # Start with x^{2*1} = x^{2*n} with n = 1. + for n in range(1, series_order + 1): + y = np.array(_double_factorial(2 * n - 1), dtype) / x_2n + if n % 2: + odd_sum += y + else: + even_sum += y + x_2n *= x_2 + return 1. + even_sum - odd_sum + + +def erfinv(x, name="erfinv"): + """The inverse function for erf, the error function. + + Args: + x: `Tensor` of type `float32`, `float64`. + name: Python string. A name for the operation (default="erfinv"). + + Returns: + x: `Tensor` with `dtype=x.dtype`. + + Raises: + TypeError: if `x` is not floating-type. + """ + + with ops.name_scope(name, values=[x]): + x = ops.convert_to_tensor(x, name="x") + if x.dtype.as_numpy_dtype not in [np.float32, np.float64]: + raise TypeError( + "x.dtype=%s is not handled, see docstring for supported types." + % x.dtype) + return ndtri((x + 1.0) / 2.0) / np.sqrt(2) + + +def _double_factorial(n): + """The double factorial function for small Python integer `n`.""" + return np.prod(np.arange(n, 1, -2)) + + +def log_cdf_laplace(x, name="log_cdf_laplace"): + """Log Laplace distribution function. + + This function calculates `Log[L(x)]`, where `L(x)` is the cumulative + distribution function of the Laplace distribution, i.e. + + ```L(x) := 0.5 * int_{-infty}^x e^{-|t|} dt``` + + For numerical accuracy, `L(x)` is computed in different ways depending on `x`, + + ``` + x <= 0: + Log[L(x)] = Log[0.5] + x, which is exact + + 0 < x: + Log[L(x)] = Log[1 - 0.5 * e^{-x}], which is exact + ``` + + Args: + x: `Tensor` of type `float32`, `float64`. + name: Python string. A name for the operation (default="log_ndtr"). + + Returns: + `Tensor` with `dtype=x.dtype`. + + Raises: + TypeError: if `x.dtype` is not handled. + """ + + with ops.name_scope(name, values=[x]): + x = ops.convert_to_tensor(x, name="x") + + # For x < 0, L(x) = 0.5 * exp{x} exactly, so Log[L(x)] = log(0.5) + x. + lower_solution = -np.log(2.) + x + + # safe_exp_neg_x = exp{-x} for x > 0, but is + # bounded above by 1, which avoids + # log[1 - 1] = -inf for x = log(1/2), AND + # exp{-x} --> inf, for x << -1 + safe_exp_neg_x = math_ops.exp(-math_ops.abs(x)) + + # log1p(z) = log(1 + z) approx z for |z| << 1. This approximation is used + # internally by log1p, rather than being done explicitly here. + upper_solution = math_ops.log1p(-0.5 * safe_exp_neg_x) + + return array_ops.where_v2(x < 0., lower_solution, upper_solution) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/student_t.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/student_t.py new file mode 100644 index 0000000000000000000000000000000000000000..63e4f73c2b0000825f58cb4ff0c5264a59b8946d --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/student_t.py @@ -0,0 +1,391 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Student's t distribution class.""" + +import numpy as np + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn +from tensorflow.python.ops import random_ops +from tensorflow.python.ops import special_math_ops +from tensorflow.python.ops.distributions import distribution +from tensorflow.python.ops.distributions import util as distribution_util +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +__all__ = [ + "StudentT", + "StudentTWithAbsDfSoftplusScale", +] + + +@tf_export(v1=["distributions.StudentT"]) +class StudentT(distribution.Distribution): + """Student's t-distribution. + + This distribution has parameters: degree of freedom `df`, location `loc`, + and `scale`. + + #### Mathematical details + + The probability density function (pdf) is, + + ```none + pdf(x; df, mu, sigma) = (1 + y**2 / df)**(-0.5 (df + 1)) / Z + where, + y = (x - mu) / sigma + Z = abs(sigma) sqrt(df pi) Gamma(0.5 df) / Gamma(0.5 (df + 1)) + ``` + + where: + * `loc = mu`, + * `scale = sigma`, and, + * `Z` is the normalization constant, and, + * `Gamma` is the [gamma function]( + https://en.wikipedia.org/wiki/Gamma_function). + + The StudentT distribution is a member of the [location-scale family]( + https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be + constructed as, + + ```none + X ~ StudentT(df, loc=0, scale=1) + Y = loc + scale * X + ``` + + Notice that `scale` has semantics more similar to standard deviation than + variance. However it is not actually the std. deviation; the Student's + t-distribution std. dev. is `scale sqrt(df / (df - 2))` when `df > 2`. + + Samples of this distribution are reparameterized (pathwise differentiable). + The derivatives are computed using the approach described in + (Figurnov et al., 2018). + + #### Examples + + Examples of initialization of one or a batch of distributions. + + ```python + import tensorflow_probability as tfp + tfd = tfp.distributions + + # Define a single scalar Student t distribution. + single_dist = tfd.StudentT(df=3) + + # Evaluate the pdf at 1, returning a scalar Tensor. + single_dist.prob(1.) + + # Define a batch of two scalar valued Student t's. + # The first has degrees of freedom 2, mean 1, and scale 11. + # The second 3, 2 and 22. + multi_dist = tfd.StudentT(df=[2, 3], loc=[1, 2.], scale=[11, 22.]) + + # Evaluate the pdf of the first distribution on 0, and the second on 1.5, + # returning a length two tensor. + multi_dist.prob([0, 1.5]) + + # Get 3 samples, returning a 3 x 2 tensor. + multi_dist.sample(3) + ``` + + Arguments are broadcast when possible. + + ```python + # Define a batch of two Student's t distributions. + # Both have df 2 and mean 1, but different scales. + dist = tfd.StudentT(df=2, loc=1, scale=[11, 22.]) + + # Evaluate the pdf of both distributions on the same point, 3.0, + # returning a length 2 tensor. + dist.prob(3.0) + ``` + + Compute the gradients of samples w.r.t. the parameters: + + ```python + df = tf.constant(2.0) + loc = tf.constant(2.0) + scale = tf.constant(11.0) + dist = tfd.StudentT(df=df, loc=loc, scale=scale) + samples = dist.sample(5) # Shape [5] + loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function + # Unbiased stochastic gradients of the loss function + grads = tf.gradients(loss, [df, loc, scale]) + ``` + + References: + Implicit Reparameterization Gradients: + [Figurnov et al., 2018] + (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients) + ([pdf](http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients.pdf)) + """ + + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__(self, + df, + loc, + scale, + validate_args=False, + allow_nan_stats=True, + name="StudentT"): + """Construct Student's t distributions. + + The distributions have degree of freedom `df`, mean `loc`, and scale + `scale`. + + The parameters `df`, `loc`, and `scale` must be shaped in a way that + supports broadcasting (e.g. `df + loc + scale` is a valid operation). + + Args: + df: Floating-point `Tensor`. The degrees of freedom of the + distribution(s). `df` must contain only positive values. + loc: Floating-point `Tensor`. The mean(s) of the distribution(s). + scale: Floating-point `Tensor`. The scaling factor(s) for the + distribution(s). Note that `scale` is not technically the standard + deviation of this distribution but has semantics more similar to + standard deviation than variance. + validate_args: Python `bool`, default `False`. When `True` distribution + parameters are checked for validity despite possibly degrading runtime + performance. When `False` invalid inputs may silently render incorrect + outputs. + allow_nan_stats: Python `bool`, default `True`. When `True`, + statistics (e.g., mean, mode, variance) use the value "`NaN`" to + indicate the result is undefined. When `False`, an exception is raised + if one or more of the statistic's batch members are undefined. + name: Python `str` name prefixed to Ops created by this class. + + Raises: + TypeError: if loc and scale are different dtypes. + """ + parameters = dict(locals()) + with ops.name_scope(name, values=[df, loc, scale]) as name: + with ops.control_dependencies([check_ops.assert_positive(df)] + if validate_args else []): + self._df = array_ops.identity(df, name="df") + self._loc = array_ops.identity(loc, name="loc") + self._scale = array_ops.identity(scale, name="scale") + check_ops.assert_same_float_dtype( + (self._df, self._loc, self._scale)) + super(StudentT, self).__init__( + dtype=self._scale.dtype, + reparameterization_type=distribution.FULLY_REPARAMETERIZED, + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + parameters=parameters, + graph_parents=[self._df, self._loc, self._scale], + name=name) + + @staticmethod + def _param_shapes(sample_shape): + return dict( + zip(("df", "loc", "scale"), ( + [ops.convert_to_tensor( + sample_shape, dtype=dtypes.int32)] * 3))) + + @property + def df(self): + """Degrees of freedom in these Student's t distribution(s).""" + return self._df + + @property + def loc(self): + """Locations of these Student's t distribution(s).""" + return self._loc + + @property + def scale(self): + """Scaling factors of these Student's t distribution(s).""" + return self._scale + + def _batch_shape_tensor(self): + return array_ops.broadcast_dynamic_shape( + array_ops.shape(self.df), + array_ops.broadcast_dynamic_shape( + array_ops.shape(self.loc), array_ops.shape(self.scale))) + + def _batch_shape(self): + return array_ops.broadcast_static_shape( + array_ops.broadcast_static_shape(self.df.get_shape(), + self.loc.get_shape()), + self.scale.get_shape()) + + def _event_shape_tensor(self): + return constant_op.constant([], dtype=math_ops.int32) + + def _event_shape(self): + return tensor_shape.TensorShape([]) + + def _sample_n(self, n, seed=None): + # The sampling method comes from the fact that if: + # X ~ Normal(0, 1) + # Z ~ Chi2(df) + # Y = X / sqrt(Z / df) + # then: + # Y ~ StudentT(df). + shape = array_ops.concat([[n], self.batch_shape_tensor()], 0) + normal_sample = random_ops.random_normal(shape, dtype=self.dtype, seed=seed) + df = self.df * array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype) + gamma_sample = random_ops.random_gamma( + [n], + 0.5 * df, + beta=0.5, + dtype=self.dtype, + seed=distribution_util.gen_new_seed(seed, salt="student_t")) + samples = normal_sample * math_ops.rsqrt(gamma_sample / df) + return samples * self.scale + self.loc # Abs(scale) not wanted. + + def _log_prob(self, x): + return self._log_unnormalized_prob(x) - self._log_normalization() + + def _log_unnormalized_prob(self, x): + y = (x - self.loc) / self.scale # Abs(scale) superfluous. + return -0.5 * (self.df + 1.) * math_ops.log1p(y**2. / self.df) + + def _log_normalization(self): + return (math_ops.log(math_ops.abs(self.scale)) + + 0.5 * math_ops.log(self.df) + + 0.5 * np.log(np.pi) + + math_ops.lgamma(0.5 * self.df) - + math_ops.lgamma(0.5 * (self.df + 1.))) + + def _cdf(self, x): + # Take Abs(scale) to make subsequent where work correctly. + y = (x - self.loc) / math_ops.abs(self.scale) + x_t = self.df / (y**2. + self.df) + neg_cdf = 0.5 * math_ops.betainc(0.5 * self.df, 0.5, x_t) + return array_ops.where_v2(math_ops.less(y, 0.), neg_cdf, 1. - neg_cdf) + + def _entropy(self): + v = array_ops.ones(self.batch_shape_tensor(), + dtype=self.dtype)[..., array_ops.newaxis] + u = v * self.df[..., array_ops.newaxis] + beta_arg = array_ops.concat([u, v], -1) / 2. + return (math_ops.log(math_ops.abs(self.scale)) + + 0.5 * math_ops.log(self.df) + + special_math_ops.lbeta(beta_arg) + + 0.5 * (self.df + 1.) * + (math_ops.digamma(0.5 * (self.df + 1.)) - + math_ops.digamma(0.5 * self.df))) + + @distribution_util.AppendDocstring( + """The mean of Student's T equals `loc` if `df > 1`, otherwise it is + `NaN`. If `self.allow_nan_stats=True`, then an exception will be raised + rather than returning `NaN`.""") + def _mean(self): + mean = self.loc * array_ops.ones(self.batch_shape_tensor(), + dtype=self.dtype) + if self.allow_nan_stats: + nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) + return array_ops.where_v2( + math_ops.greater( + self.df, + array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)), + mean, array_ops.fill(self.batch_shape_tensor(), nan, name="nan")) + else: + return control_flow_ops.with_dependencies( + [ + check_ops.assert_less( + array_ops.ones([], dtype=self.dtype), + self.df, + message="mean not defined for components of df <= 1"), + ], + mean) + + @distribution_util.AppendDocstring(""" + The variance for Student's T equals + + ``` + df / (df - 2), when df > 2 + infinity, when 1 < df <= 2 + NaN, when df <= 1 + ``` + """) + def _variance(self): + # We need to put the tf.where inside the outer tf.where to ensure we never + # hit a NaN in the gradient. + denom = array_ops.where_v2( + math_ops.greater(self.df, 2.), self.df - 2., + array_ops.ones_like(self.df)) + # Abs(scale) superfluous. + var = (array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype) * + math_ops.square(self.scale) * self.df / denom) + # When 1 < df <= 2, variance is infinite. + inf = np.array(np.inf, dtype=self.dtype.as_numpy_dtype()) + result_where_defined = array_ops.where_v2( + self.df > array_ops.fill(self.batch_shape_tensor(), 2.), var, + array_ops.fill(self.batch_shape_tensor(), inf, name="inf")) + + if self.allow_nan_stats: + nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) + return array_ops.where_v2( + math_ops.greater( + self.df, + array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)), + result_where_defined, + array_ops.fill(self.batch_shape_tensor(), nan, name="nan")) + else: + return control_flow_ops.with_dependencies( + [ + check_ops.assert_less( + array_ops.ones([], dtype=self.dtype), + self.df, + message="variance not defined for components of df <= 1"), + ], + result_where_defined) + + def _mode(self): + return array_ops.identity(self.loc) + + +class StudentTWithAbsDfSoftplusScale(StudentT): + """StudentT with `df = floor(abs(df))` and `scale = softplus(scale)`.""" + + @deprecation.deprecated( + "2019-01-01", + "Use `tfd.StudentT(tf.floor(tf.abs(df)), loc, " + "tf.nn.softplus(scale)) instead.", + warn_once=True) + def __init__(self, + df, + loc, + scale, + validate_args=False, + allow_nan_stats=True, + name="StudentTWithAbsDfSoftplusScale"): + parameters = dict(locals()) + with ops.name_scope(name, values=[df, scale]) as name: + super(StudentTWithAbsDfSoftplusScale, self).__init__( + df=math_ops.floor(math_ops.abs(df)), + loc=loc, + scale=nn.softplus(scale, name="softplus_scale"), + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + name=name) + self._parameters = parameters diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/uniform.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/uniform.py new file mode 100644 index 0000000000000000000000000000000000000000..9b6a8874ed1f94019b96640c0d046820ee0fdb40 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/uniform.py @@ -0,0 +1,204 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The Uniform distribution class.""" + +import math + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import random_ops +from tensorflow.python.ops.distributions import distribution +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +@tf_export(v1=["distributions.Uniform"]) +class Uniform(distribution.Distribution): + """Uniform distribution with `low` and `high` parameters. + + #### Mathematical Details + + The probability density function (pdf) is, + + ```none + pdf(x; a, b) = I[a <= x < b] / Z + Z = b - a + ``` + + where + + - `low = a`, + - `high = b`, + - `Z` is the normalizing constant, and + - `I[predicate]` is the [indicator function]( + https://en.wikipedia.org/wiki/Indicator_function) for `predicate`. + + The parameters `low` and `high` must be shaped in a way that supports + broadcasting (e.g., `high - low` is a valid operation). + + #### Examples + + ```python + # Without broadcasting: + u1 = Uniform(low=3.0, high=4.0) # a single uniform distribution [3, 4] + u2 = Uniform(low=[1.0, 2.0], + high=[3.0, 4.0]) # 2 distributions [1, 3], [2, 4] + u3 = Uniform(low=[[1.0, 2.0], + [3.0, 4.0]], + high=[[1.5, 2.5], + [3.5, 4.5]]) # 4 distributions + ``` + + ```python + # With broadcasting: + u1 = Uniform(low=3.0, high=[5.0, 6.0, 7.0]) # 3 distributions + ``` + + """ + + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__(self, + low=0., + high=1., + validate_args=False, + allow_nan_stats=True, + name="Uniform"): + """Initialize a batch of Uniform distributions. + + Args: + low: Floating point tensor, lower boundary of the output interval. Must + have `low < high`. + high: Floating point tensor, upper boundary of the output interval. Must + have `low < high`. + validate_args: Python `bool`, default `False`. When `True` distribution + parameters are checked for validity despite possibly degrading runtime + performance. When `False` invalid inputs may silently render incorrect + outputs. + allow_nan_stats: Python `bool`, default `True`. When `True`, statistics + (e.g., mean, mode, variance) use the value "`NaN`" to indicate the + result is undefined. When `False`, an exception is raised if one or + more of the statistic's batch members are undefined. + name: Python `str` name prefixed to Ops created by this class. + + Raises: + InvalidArgumentError: if `low >= high` and `validate_args=False`. + """ + parameters = dict(locals()) + with ops.name_scope(name, values=[low, high]) as name: + with ops.control_dependencies([ + check_ops.assert_less( + low, high, message="uniform not defined when low >= high.") + ] if validate_args else []): + self._low = array_ops.identity(low, name="low") + self._high = array_ops.identity(high, name="high") + check_ops.assert_same_float_dtype([self._low, self._high]) + super(Uniform, self).__init__( + dtype=self._low.dtype, + reparameterization_type=distribution.FULLY_REPARAMETERIZED, + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + parameters=parameters, + graph_parents=[self._low, + self._high], + name=name) + + @staticmethod + def _param_shapes(sample_shape): + return dict( + zip(("low", "high"), + ([ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)] * 2))) + + @property + def low(self): + """Lower boundary of the output interval.""" + return self._low + + @property + def high(self): + """Upper boundary of the output interval.""" + return self._high + + def range(self, name="range"): + """`high - low`.""" + with self._name_scope(name): + return self.high - self.low + + def _batch_shape_tensor(self): + return array_ops.broadcast_dynamic_shape( + array_ops.shape(self.low), + array_ops.shape(self.high)) + + def _batch_shape(self): + return array_ops.broadcast_static_shape( + self.low.get_shape(), + self.high.get_shape()) + + def _event_shape_tensor(self): + return constant_op.constant([], dtype=dtypes.int32) + + def _event_shape(self): + return tensor_shape.TensorShape([]) + + def _sample_n(self, n, seed=None): + shape = array_ops.concat([[n], self.batch_shape_tensor()], 0) + samples = random_ops.random_uniform(shape=shape, + dtype=self.dtype, + seed=seed) + return self.low + self.range() * samples + + def _prob(self, x): + broadcasted_x = x * array_ops.ones( + self.batch_shape_tensor(), dtype=x.dtype) + return array_ops.where_v2( + math_ops.is_nan(broadcasted_x), broadcasted_x, + array_ops.where_v2( + math_ops.logical_or(broadcasted_x < self.low, + broadcasted_x >= self.high), + array_ops.zeros_like(broadcasted_x), + array_ops.ones_like(broadcasted_x) / self.range())) + + def _cdf(self, x): + broadcast_shape = array_ops.broadcast_dynamic_shape( + array_ops.shape(x), self.batch_shape_tensor()) + zeros = array_ops.zeros(broadcast_shape, dtype=self.dtype) + ones = array_ops.ones(broadcast_shape, dtype=self.dtype) + broadcasted_x = x * ones + result_if_not_big = array_ops.where_v2( + x < self.low, zeros, (broadcasted_x - self.low) / self.range()) + return array_ops.where_v2(x >= self.high, ones, result_if_not_big) + + def _entropy(self): + return math_ops.log(self.range()) + + def _mean(self): + return (self.low + self.high) / 2. + + def _variance(self): + return math_ops.square(self.range()) / 12. + + def _stddev(self): + return self.range() / math.sqrt(12.) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/util.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/util.py new file mode 100644 index 0000000000000000000000000000000000000000..62f91b7003a8a092c38e19f7b7cfbe0139337fa0 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/distributions/util.py @@ -0,0 +1,1448 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities for probability distributions.""" + +import functools +import hashlib + +import numpy as np + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import cond as tf_cond +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import linalg_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn +from tensorflow.python.util import tf_inspect + + +def assert_integer_form(x, + data=None, + summarize=None, + message=None, + int_dtype=None, + name="assert_integer_form"): + """Assert that x has integer components (or floats equal to integers). + + Args: + x: Floating-point `Tensor` + data: The tensors to print out if the condition is `False`. Defaults to + error message and first few entries of `x` and `y`. + summarize: Print this many entries of each tensor. + message: A string to prefix to the default message. + int_dtype: A `tf.dtype` used to cast the float to. The default (`None`) + implies the smallest possible signed int will be used for casting. + name: A name for this operation (optional). + + Returns: + Op raising `InvalidArgumentError` if `cast(x, int_dtype) != x`. + """ + with ops.name_scope(name, values=[x, data]): + x = ops.convert_to_tensor(x, name="x") + if x.dtype.is_integer: + return control_flow_ops.no_op() + message = message or "{} has non-integer components".format(x) + if int_dtype is None: + try: + int_dtype = { + dtypes.float16: dtypes.int16, + dtypes.float32: dtypes.int32, + dtypes.float64: dtypes.int64, + }[x.dtype.base_dtype] + except KeyError: + raise TypeError("Unrecognized type {}".format(x.dtype.name)) + return check_ops.assert_equal( + x, + math_ops.cast(math_ops.cast(x, int_dtype), x.dtype), + data=data, + summarize=summarize, + message=message, + name=name) + + +def assert_symmetric(matrix): + matrix_t = array_ops.matrix_transpose(matrix) + return control_flow_ops.with_dependencies( + [check_ops.assert_equal(matrix, matrix_t)], matrix) + + +def embed_check_nonnegative_integer_form( + x, name="embed_check_nonnegative_integer_form"): + """Assert x is a non-negative tensor, and optionally of integers.""" + with ops.name_scope(name, values=[x]): + x = ops.convert_to_tensor(x, name="x") + assertions = [ + check_ops.assert_non_negative( + x, message="'{}' must be non-negative.".format(x)), + ] + if not x.dtype.is_integer: + assertions += [ + assert_integer_form( + x, + message="'{}' cannot contain fractional components.".format(x)), + ] + return control_flow_ops.with_dependencies(assertions, x) + + +def same_dynamic_shape(a, b): + """Returns whether a and b have the same dynamic shape. + + Args: + a: `Tensor` + b: `Tensor` + + Returns: + `bool` `Tensor` representing if both tensors have the same shape. + """ + a = ops.convert_to_tensor(a, name="a") + b = ops.convert_to_tensor(b, name="b") + + # Here we can't just do math_ops.equal(a.shape, b.shape), since + # static shape inference may break the equality comparison between + # shape(a) and shape(b) in math_ops.equal. + def all_shapes_equal(): + return math_ops.reduce_all( + math_ops.equal( + array_ops.concat( + [array_ops.shape(a), array_ops.shape(b)], 0), + array_ops.concat( + [array_ops.shape(b), array_ops.shape(a)], 0))) + + # One of the shapes isn't fully defined, so we need to use the dynamic + # shape. + return tf_cond.cond( + math_ops.equal(array_ops.rank(a), array_ops.rank(b)), + all_shapes_equal, lambda: constant_op.constant(False)) + + +def maybe_get_static_value(x, dtype=None): + """Helper which tries to return a static value. + + Given `x`, extract it's value statically, optionally casting to a specific + dtype. If this is not possible, None is returned. + + Args: + x: `Tensor` for which to extract a value statically. + dtype: Optional dtype to cast to. + + Returns: + Statically inferred value if possible, otherwise None. + """ + if x is None: + return x + try: + # This returns an np.ndarray. + x_ = tensor_util.constant_value(x) + except TypeError: + x_ = x + if x_ is None or dtype is None: + return x_ + return np.array(x_, dtype) + + +def get_logits_and_probs(logits=None, + probs=None, + multidimensional=False, + validate_args=False, + name="get_logits_and_probs", + dtype=None): + """Converts logit to probabilities (or vice-versa), and returns both. + + Args: + logits: Floating-point `Tensor` representing log-odds. + probs: Floating-point `Tensor` representing probabilities. + multidimensional: Python `bool`, default `False`. If `True`, represents + whether the last dimension of `logits` or `probs`, a `[N1, N2, ... k]` + dimensional tensor, representing the logit or probability of `shape[-1]` + classes. + validate_args: Python `bool`, default `False`. When `True`, either assert `0 + <= probs <= 1` (if not `multidimensional`) or that the last dimension of + `probs` sums to one. + name: A name for this operation (optional). + dtype: `tf.DType` to prefer when converting args to `Tensor`s. + + Returns: + logits, probs: Tuple of `Tensor`s. If `probs` has an entry that is `0` or + `1`, then the corresponding entry in the returned logit will be `-Inf` and + `Inf` respectively. + + Raises: + ValueError: if neither `probs` nor `logits` were passed in, or both were. + """ + with ops.name_scope(name, values=[probs, logits]): + if (probs is None) == (logits is None): + raise ValueError("Must pass probs or logits, but not both.") + + if probs is None: + logits = ops.convert_to_tensor(logits, name="logits", dtype=dtype) + if not logits.dtype.is_floating: + raise TypeError("logits must having floating type.") + # We can early return since we constructed probs and therefore know + # they're valid. + if multidimensional: + if validate_args: + logits = embed_check_categorical_event_shape(logits) + return logits, nn.softmax(logits, name="probs") + return logits, math_ops.sigmoid(logits, name="probs") + + probs = ops.convert_to_tensor(probs, name="probs", dtype=dtype) + if not probs.dtype.is_floating: + raise TypeError("probs must having floating type.") + + if validate_args: + with ops.name_scope("validate_probs"): + one = constant_op.constant(1., probs.dtype) + dependencies = [check_ops.assert_non_negative(probs)] + if multidimensional: + probs = embed_check_categorical_event_shape(probs) + dependencies += [ + check_ops.assert_near( + math_ops.reduce_sum(probs, -1), + one, + message="probs does not sum to 1.") + ] + else: + dependencies += [ + check_ops.assert_less_equal( + probs, one, message="probs has components greater than 1.") + ] + probs = control_flow_ops.with_dependencies(dependencies, probs) + + with ops.name_scope("logits"): + if multidimensional: + # Here we don't compute the multidimensional case, in a manner + # consistent with respect to the unidimensional case. We do so + # following the TF convention. Typically, you might expect to see + # logits = log(probs) - log(probs[pivot]). A side-effect of + # being consistent with the TF approach is that the unidimensional case + # implicitly handles the second dimension but the multidimensional case + # explicitly keeps the pivot dimension. + return math_ops.log(probs), probs + return math_ops.log(probs) - math_ops.log1p(-1. * probs), probs + + +def _is_known_unsigned_by_dtype(dt): + """Helper returning True if dtype is known to be unsigned.""" + return { + dtypes.bool: True, + dtypes.uint8: True, + dtypes.uint16: True, + }.get(dt.base_dtype, False) + + +def _is_known_signed_by_dtype(dt): + """Helper returning True if dtype is known to be signed.""" + return { + dtypes.float16: True, + dtypes.float32: True, + dtypes.float64: True, + dtypes.int8: True, + dtypes.int16: True, + dtypes.int32: True, + dtypes.int64: True, + }.get(dt.base_dtype, False) + + +def _is_known_dtype(dt): + """Helper returning True if dtype is known.""" + return _is_known_unsigned_by_dtype(dt) or _is_known_signed_by_dtype(dt) + + +def _largest_integer_by_dtype(dt): + """Helper returning the largest integer exactly representable by dtype.""" + if not _is_known_dtype(dt): + raise TypeError("Unrecognized dtype: {}".format(dt.name)) + if dt.is_floating: + return int(2**(np.finfo(dt.as_numpy_dtype).nmant + 1)) + if dt.is_integer: + return np.iinfo(dt.as_numpy_dtype).max + if dt.base_dtype == dtypes.bool: + return int(1) + # We actually can't land here but keep the case for completeness. + raise TypeError("Unrecognized dtype: {}".format(dt.name)) + + +def _smallest_integer_by_dtype(dt): + """Helper returning the smallest integer exactly representable by dtype.""" + if not _is_known_dtype(dt): + raise TypeError("Unrecognized dtype: {}".format(dt.name)) + if _is_known_unsigned_by_dtype(dt): + return 0 + return -1 * _largest_integer_by_dtype(dt) + + +def _is_integer_like_by_dtype(dt): + """Helper returning True if dtype.is_integer or is `bool`.""" + if not _is_known_dtype(dt): + raise TypeError("Unrecognized dtype: {}".format(dt.name)) + return dt.is_integer or dt.base_dtype == dtypes.bool + + +def embed_check_categorical_event_shape( + categorical_param, name="embed_check_categorical_event_shape"): + """Embeds checks that categorical distributions don't have too many classes. + + A categorical-type distribution is one which, e.g., returns the class label + rather than a one-hot encoding. E.g., `Categorical(probs)`. + + Since distributions output samples in the same dtype as the parameters, we + must ensure that casting doesn't lose precision. That is, the + `parameter.dtype` implies a maximum number of classes. However, since shape is + `int32` and categorical variables are presumed to be indexes into a `Tensor`, + we must also ensure that the number of classes is no larger than the largest + possible `int32` index, i.e., `2**31-1`. + + In other words the number of classes, `K`, must satisfy the following + condition: + + ```python + K <= min( + int(2**31 - 1), # Largest float as an index. + { + dtypes.float16: int(2**11), # Largest int as a float16. + dtypes.float32: int(2**24), + dtypes.float64: int(2**53), + }.get(categorical_param.dtype.base_dtype, 0)) + ``` + + Args: + categorical_param: Floating-point `Tensor` representing parameters of + distribution over categories. The rightmost shape is presumed to be the + number of categories. + name: A name for this operation (optional). + + Returns: + categorical_param: Input `Tensor` with appropriate assertions embedded. + + Raises: + TypeError: if `categorical_param` has an unknown `dtype`. + ValueError: if we can statically identify `categorical_param` as being too + large (for being closed under int32/float casting). + """ + with ops.name_scope(name, values=[categorical_param]): + x = ops.convert_to_tensor(categorical_param, name="categorical_param") + # The size must not exceed both of: + # - The largest possible int32 (since categorical values are presumed to be + # indexes into a Tensor). + # - The largest possible integer exactly representable under the given + # floating-point dtype (since we need to cast to/from). + # + # The chosen floating-point thresholds are 2**(1 + mantissa_bits). + # For more details, see: + # https://en.wikipedia.org/wiki/Floating-point_arithmetic#Internal_representation + x_dtype = x.dtype.base_dtype + max_event_size = ( + _largest_integer_by_dtype(x_dtype) if x_dtype.is_floating else 0) + if max_event_size == 0: + raise TypeError("Unable to validate size of unrecognized dtype " + "({}).".format(x_dtype.name)) + try: + x_shape_static = x.get_shape().with_rank_at_least(1) + except ValueError: + raise ValueError("A categorical-distribution parameter must have " + "at least 1 dimension.") + if tensor_shape.dimension_value(x_shape_static[-1]) is not None: + event_size = x_shape_static.dims[-1].value + if event_size < 2: + raise ValueError("A categorical-distribution parameter must have at " + "least 2 events.") + if event_size > max_event_size: + raise ValueError("Number of classes exceeds `dtype` precision, i.e., " + "{} implies shape ({}) cannot exceed {}.".format( + x_dtype.name, event_size, max_event_size)) + return x + else: + event_size = array_ops.shape(x, name="x_shape")[-1] + return control_flow_ops.with_dependencies([ + check_ops.assert_rank_at_least( + x, + 1, + message=("A categorical-distribution parameter must have " + "at least 1 dimension.")), + check_ops.assert_greater_equal( + array_ops.shape(x)[-1], + 2, + message=("A categorical-distribution parameter must have at " + "least 2 events.")), + check_ops.assert_less_equal( + event_size, + max_event_size, + message="Number of classes exceeds `dtype` precision, " + "i.e., {} dtype cannot exceed {} shape.".format( + x_dtype.name, max_event_size)), + ], x) + + +def embed_check_integer_casting_closed(x, + target_dtype, + assert_nonnegative=True, + name="embed_check_casting_closed"): + """Ensures integers remain unaffected despite casting to/from int/float types. + + Example integer-types: `uint8`, `int32`, `bool`. + Example floating-types: `float32`, `float64`. + + The largest possible integer representable by an IEEE754 floating-point is + `2**(1 + mantissa_bits)` yet the largest possible integer as an int-type is + `2**(bits - 1) - 1`. This function ensures that a `Tensor` purporting to have + integer-form values can be cast to some other type without loss of precision. + + The smallest representable integer is the negative of the largest + representable integer, except for types: `uint8`, `uint16`, `bool`. For these + types, the smallest representable integer is `0`. + + Args: + x: `Tensor` representing integer-form values. + target_dtype: TF `dtype` under which `x` should have identical values. + assert_nonnegative: `bool` indicating `x` should contain nonnegative values. + name: A name for this operation (optional). + + Returns: + x: Input `Tensor` with appropriate assertions embedded. + + Raises: + TypeError: if `x` is neither integer- nor floating-type. + TypeError: if `target_dtype` is neither integer- nor floating-type. + TypeError: if neither `x` nor `target_dtype` are integer-type. + """ + + with ops.name_scope(name, values=[x]): + x = ops.convert_to_tensor(x, name="x") + if (not _is_integer_like_by_dtype(x.dtype) and not x.dtype.is_floating): + raise TypeError("{}.dtype must be floating- or " + "integer-type.".format(x.dtype.name)) + if (not _is_integer_like_by_dtype(target_dtype) and + not target_dtype.is_floating): + raise TypeError("target_dtype ({}) must be floating- or " + "integer-type.".format(target_dtype.name)) + if (not _is_integer_like_by_dtype(x.dtype) and + not _is_integer_like_by_dtype(target_dtype)): + raise TypeError("At least one of {}.dtype ({}) and target_dtype ({}) " + "must be integer-type.".format(x, x.dtype.name, + target_dtype.name)) + + assertions = [] + if assert_nonnegative: + assertions += [ + check_ops.assert_non_negative( + x, message="Elements must be non-negative."), + ] + + if x.dtype.is_floating: + # Being here means _is_integer_like_by_dtype(target_dtype) = True. + # Since this check implies the magnitude check below, we need only it. + assertions += [ + assert_integer_form( + x, + int_dtype=target_dtype, + message="Elements must be {}-equivalent.".format( + target_dtype.name)), + ] + else: + if (_largest_integer_by_dtype(x.dtype) > + _largest_integer_by_dtype(target_dtype)): + # Cast may lose integer precision. + assertions += [ + check_ops.assert_less_equal( + x, + _largest_integer_by_dtype(target_dtype), + message=("Elements cannot exceed {}.".format( + _largest_integer_by_dtype(target_dtype)))), + ] + if (not assert_nonnegative and (_smallest_integer_by_dtype( + x.dtype) < _smallest_integer_by_dtype(target_dtype))): + assertions += [ + check_ops.assert_greater_equal( + x, + _smallest_integer_by_dtype(target_dtype), + message=("Elements cannot be smaller than {}.".format( + _smallest_integer_by_dtype(target_dtype)))), + ] + + if not assertions: + return x + return control_flow_ops.with_dependencies(assertions, x) + + +def log_combinations(n, counts, name="log_combinations"): + """Multinomial coefficient. + + Given `n` and `counts`, where `counts` has last dimension `k`, we compute + the multinomial coefficient as: + + ```n! / sum_i n_i!``` + + where `i` runs over all `k` classes. + + Args: + n: Floating-point `Tensor` broadcastable with `counts`. This represents `n` + outcomes. + counts: Floating-point `Tensor` broadcastable with `n`. This represents + counts in `k` classes, where `k` is the last dimension of the tensor. + name: A name for this operation (optional). + + Returns: + `Tensor` representing the multinomial coefficient between `n` and `counts`. + """ + # First a bit about the number of ways counts could have come in: + # E.g. if counts = [1, 2], then this is 3 choose 2. + # In general, this is (sum counts)! / sum(counts!) + # The sum should be along the last dimension of counts. This is the + # "distribution" dimension. Here n a priori represents the sum of counts. + with ops.name_scope(name, values=[n, counts]): + n = ops.convert_to_tensor(n, name="n") + counts = ops.convert_to_tensor(counts, name="counts") + total_permutations = math_ops.lgamma(n + 1) + counts_factorial = math_ops.lgamma(counts + 1) + redundant_permutations = math_ops.reduce_sum(counts_factorial, axis=[-1]) + return total_permutations - redundant_permutations + + +def matrix_diag_transform(matrix, transform=None, name=None): + """Transform diagonal of [batch-]matrix, leave rest of matrix unchanged. + + Create a trainable covariance defined by a Cholesky factor: + + ```python + # Transform network layer into 2 x 2 array. + matrix_values = tf.contrib.layers.fully_connected(activations, 4) + matrix = tf.reshape(matrix_values, (batch_size, 2, 2)) + + # Make the diagonal positive. If the upper triangle was zero, this would be a + # valid Cholesky factor. + chol = matrix_diag_transform(matrix, transform=tf.nn.softplus) + + # LinearOperatorLowerTriangular ignores the upper triangle. + operator = LinearOperatorLowerTriangular(chol) + ``` + + Example of heteroskedastic 2-D linear regression. + + ```python + tfd = tfp.distributions + + # Get a trainable Cholesky factor. + matrix_values = tf.contrib.layers.fully_connected(activations, 4) + matrix = tf.reshape(matrix_values, (batch_size, 2, 2)) + chol = matrix_diag_transform(matrix, transform=tf.nn.softplus) + + # Get a trainable mean. + mu = tf.contrib.layers.fully_connected(activations, 2) + + # This is a fully trainable multivariate normal! + dist = tfd.MultivariateNormalTriL(mu, chol) + + # Standard log loss. Minimizing this will "train" mu and chol, and then dist + # will be a distribution predicting labels as multivariate Gaussians. + loss = -1 * tf.reduce_mean(dist.log_prob(labels)) + ``` + + Args: + matrix: Rank `R` `Tensor`, `R >= 2`, where the last two dimensions are + equal. + transform: Element-wise function mapping `Tensors` to `Tensors`. To be + applied to the diagonal of `matrix`. If `None`, `matrix` is returned + unchanged. Defaults to `None`. + name: A name to give created ops. Defaults to "matrix_diag_transform". + + Returns: + A `Tensor` with same shape and `dtype` as `matrix`. + """ + with ops.name_scope(name, "matrix_diag_transform", [matrix]): + matrix = ops.convert_to_tensor(matrix, name="matrix") + if transform is None: + return matrix + # Replace the diag with transformed diag. + diag = array_ops.matrix_diag_part(matrix) + transformed_diag = transform(diag) + transformed_mat = array_ops.matrix_set_diag(matrix, transformed_diag) + + return transformed_mat + + +def rotate_transpose(x, shift, name="rotate_transpose"): + """Circularly moves dims left or right. + + Effectively identical to: + + ```python + numpy.transpose(x, numpy.roll(numpy.arange(len(x.shape)), shift)) + ``` + + When `validate_args=False` additional graph-runtime checks are + performed. These checks entail moving data from to GPU to CPU. + + Example: + + ```python + x = tf.random.normal([1, 2, 3, 4]) # Tensor of shape [1, 2, 3, 4]. + rotate_transpose(x, -1).shape == [2, 3, 4, 1] + rotate_transpose(x, -2).shape == [3, 4, 1, 2] + rotate_transpose(x, 1).shape == [4, 1, 2, 3] + rotate_transpose(x, 2).shape == [3, 4, 1, 2] + rotate_transpose(x, 7).shape == rotate_transpose(x, 3).shape # [2, 3, 4, 1] + rotate_transpose(x, -7).shape == rotate_transpose(x, -3).shape # [4, 1, 2, 3] + ``` + + Args: + x: `Tensor`. + shift: `Tensor`. Number of dimensions to transpose left (shift<0) or + transpose right (shift>0). + name: Python `str`. The name to give this op. + + Returns: + rotated_x: Input `Tensor` with dimensions circularly rotated by shift. + + Raises: + TypeError: if shift is not integer type. + """ + with ops.name_scope(name, values=[x, shift]): + x = ops.convert_to_tensor(x, name="x") + shift = ops.convert_to_tensor(shift, name="shift") + # We do not assign back to preserve constant-ness. + check_ops.assert_integer(shift) + shift_value_static = tensor_util.constant_value(shift) + ndims = x.get_shape().ndims + if ndims is not None and shift_value_static is not None: + if ndims < 2: + return x + shift_value_static = np.sign(shift_value_static) * ( + abs(shift_value_static) % ndims) + if shift_value_static == 0: + return x + perm = np.roll(np.arange(ndims), shift_value_static) + return array_ops.transpose(x, perm=perm) + else: + # Consider if we always had a positive shift, and some specified + # direction. + # When shifting left we want the new array: + # last(x, n-shift) + first(x, shift) + # and if shifting right then we want: + # last(x, shift) + first(x, n-shift) + # Observe that last(a) == slice(a, n) and first(a) == slice(0, a). + # Also, we can encode direction and shift as one: direction * shift. + # Combining these facts, we have: + # a = cond(shift<0, -shift, n-shift) + # last(x, n-a) + first(x, a) == x[a:n] + x[0:a] + # Finally, we transform shift by modulo length so it can be specified + # independently from the array upon which it operates (like python). + ndims = array_ops.rank(x) + shift = array_ops.where_v2( + math_ops.less(shift, 0), + math_ops.mod(-shift, ndims), # pylint: disable=invalid-unary-operand-type + ndims - math_ops.mod(shift, ndims)) + first = math_ops.range(0, shift) + last = math_ops.range(shift, ndims) + perm = array_ops.concat([last, first], 0) + return array_ops.transpose(x, perm=perm) + + +def pick_vector(cond, true_vector, false_vector, name="pick_vector"): + """Picks possibly different length row `Tensor`s based on condition. + + Value `Tensor`s should have exactly one dimension. + + If `cond` is a python Boolean or `tf.constant` then either `true_vector` or + `false_vector` is immediately returned. I.e., no graph nodes are created and + no validation happens. + + Args: + cond: `Tensor`. Must have `dtype=tf.bool` and be scalar. + true_vector: `Tensor` of one dimension. Returned when cond is `True`. + false_vector: `Tensor` of one dimension. Returned when cond is `False`. + name: Python `str`. The name to give this op. + Example: ```python pick_vector(tf.less(0, 5), tf.range(10, 12), tf.range(15, + 18)) # [10, 11] pick_vector(tf.less(5, 0), tf.range(10, 12), tf.range(15, + 18)) # [15, 16, 17] ``` + + Returns: + true_or_false_vector: `Tensor`. + + Raises: + TypeError: if `cond.dtype != tf.bool` + TypeError: if `cond` is not a constant and + `true_vector.dtype != false_vector.dtype` + """ + with ops.name_scope(name, values=(cond, true_vector, false_vector)): + cond = ops.convert_to_tensor(cond, name="cond") + if cond.dtype != dtypes.bool: + raise TypeError("%s.dtype=%s which is not %s" % + (cond, cond.dtype, dtypes.bool)) + cond_value_static = tensor_util.constant_value(cond) + if cond_value_static is not None: + return true_vector if cond_value_static else false_vector + true_vector = ops.convert_to_tensor(true_vector, name="true_vector") + false_vector = ops.convert_to_tensor(false_vector, name="false_vector") + if true_vector.dtype != false_vector.dtype: + raise TypeError( + "%s.dtype=%s does not match %s.dtype=%s" % + (true_vector, true_vector.dtype, false_vector, false_vector.dtype)) + n = array_ops.shape(true_vector)[0] + return array_ops.slice( + array_ops.concat([true_vector, false_vector], 0), + [array_ops.where_v2(cond, 0, n)], [array_ops.where(cond, n, -1)]) + + +def prefer_static_broadcast_shape(shape1, + shape2, + name="prefer_static_broadcast_shape"): + """Convenience function which statically broadcasts shape when possible. + + Args: + shape1: `1-D` integer `Tensor`. Already converted to tensor! + shape2: `1-D` integer `Tensor`. Already converted to tensor! + name: A string name to prepend to created ops. + + Returns: + The broadcast shape, either as `TensorShape` (if broadcast can be done + statically), or as a `Tensor`. + """ + with ops.name_scope(name, values=[shape1, shape2]): + + def make_shape_tensor(x): + return ops.convert_to_tensor(x, name="shape", dtype=dtypes.int32) + + def get_tensor_shape(s): + if isinstance(s, tensor_shape.TensorShape): + return s + s_ = tensor_util.constant_value(make_shape_tensor(s)) + if s_ is not None: + return tensor_shape.TensorShape(s_) + return None + + def get_shape_tensor(s): + if not isinstance(s, tensor_shape.TensorShape): + return make_shape_tensor(s) + if s.is_fully_defined(): + return make_shape_tensor(s.as_list()) + raise ValueError("Cannot broadcast from partially " + "defined `TensorShape`.") + + shape1_ = get_tensor_shape(shape1) + shape2_ = get_tensor_shape(shape2) + if shape1_ is not None and shape2_ is not None: + return array_ops.broadcast_static_shape(shape1_, shape2_) + + shape1_ = get_shape_tensor(shape1) + shape2_ = get_shape_tensor(shape2) + return array_ops.broadcast_dynamic_shape(shape1_, shape2_) + + +def prefer_static_rank(x): + """Return static rank of tensor `x` if available, else `tf.rank(x)`. + + Args: + x: `Tensor` (already converted). + + Returns: + Numpy array (if static rank is obtainable), else `Tensor`. + """ + return prefer_static_value(array_ops.rank(x)) + + +def prefer_static_shape(x): + """Return static shape of tensor `x` if available, else `tf.shape(x)`. + + Args: + x: `Tensor` (already converted). + + Returns: + Numpy array (if static shape is obtainable), else `Tensor`. + """ + return prefer_static_value(array_ops.shape(x)) + + +def prefer_static_value(x): + """Return static value of tensor `x` if available, else `x`. + + Args: + x: `Tensor` (already converted). + + Returns: + Numpy array (if static value is obtainable), else `Tensor`. + """ + static_x = tensor_util.constant_value(x) + if static_x is not None: + return static_x + return x + + +def gen_new_seed(seed, salt): + """Generate a new seed, from the given seed and salt.""" + if seed is None: + return None + string = (str(seed) + salt).encode("utf-8") + return int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF + + +def fill_triangular(x, upper=False, name=None): + """Creates a (batch of) triangular matrix from a vector of inputs. + + Created matrix can be lower- or upper-triangular. (It is more efficient to + create the matrix as upper or lower, rather than transpose.) + + Triangular matrix elements are filled in a clockwise spiral. See example, + below. + + If `x.get_shape()` is `[b1, b2, ..., bB, d]` then the output shape is + `[b1, b2, ..., bB, n, n]` where `n` is such that `d = n(n+1)/2`, i.e., + `n = int(np.sqrt(0.25 + 2. * m) - 0.5)`. + + Example: + + ```python + fill_triangular([1, 2, 3, 4, 5, 6]) + # ==> [[4, 0, 0], + # [6, 5, 0], + # [3, 2, 1]] + + fill_triangular([1, 2, 3, 4, 5, 6], upper=True) + # ==> [[1, 2, 3], + # [0, 5, 6], + # [0, 0, 4]] + ``` + + For comparison, a pure numpy version of this function can be found in + `util_test.py`, function `_fill_triangular`. + + Args: + x: `Tensor` representing lower (or upper) triangular elements. + upper: Python `bool` representing whether output matrix should be upper + triangular (`True`) or lower triangular (`False`, default). + name: Python `str`. The name to give this op. + + Returns: + tril: `Tensor` with lower (or upper) triangular elements filled from `x`. + + Raises: + ValueError: if `x` cannot be mapped to a triangular matrix. + """ + + with ops.name_scope(name, "fill_triangular", values=[x]): + x = ops.convert_to_tensor(x, name="x") + if tensor_shape.dimension_value( + x.shape.with_rank_at_least(1)[-1]) is not None: + # Formula derived by solving for n: m = n(n+1)/2. + m = np.int32(x.shape.dims[-1].value) + n = np.sqrt(0.25 + 2. * m) - 0.5 + if n != np.floor(n): + raise ValueError("Input right-most shape ({}) does not " + "correspond to a triangular matrix.".format(m)) + n = np.int32(n) + static_final_shape = x.shape[:-1].concatenate([n, n]) + else: + m = array_ops.shape(x)[-1] + # For derivation, see above. Casting automatically lops off the 0.5, so we + # omit it. We don't validate n is an integer because this has + # graph-execution cost; an error will be thrown from the reshape, below. + n = math_ops.cast( + math_ops.sqrt(0.25 + math_ops.cast(2 * m, dtype=dtypes.float32)), + dtype=dtypes.int32) + static_final_shape = x.shape.with_rank_at_least(1)[:-1].concatenate( + [None, None]) + # We now concatenate the "tail" of `x` to `x` (and reverse one of them). + # + # We do this based on the insight that the input `x` provides `ceil(n/2)` + # rows of an `n x n` matrix, some of which will get zeroed out being on the + # wrong side of the diagonal. The first row will not get zeroed out at all, + # and we need `floor(n/2)` more rows, so the first is what we omit from + # `x_tail`. If we then stack those `ceil(n/2)` rows with the `floor(n/2)` + # rows provided by a reversed tail, it is exactly the other set of elements + # of the reversed tail which will be zeroed out for being on the wrong side + # of the diagonal further up/down the matrix. And, in doing-so, we've filled + # the triangular matrix in a clock-wise spiral pattern. Neat! + # + # Try it out in numpy: + # n = 3 + # x = np.arange(n * (n + 1) / 2) + # m = x.shape[0] + # n = np.int32(np.sqrt(.25 + 2 * m) - .5) + # x_tail = x[(m - (n**2 - m)):] + # np.concatenate([x_tail, x[::-1]], 0).reshape(n, n) # lower + # # ==> array([[3, 4, 5], + # [5, 4, 3], + # [2, 1, 0]]) + # np.concatenate([x, x_tail[::-1]], 0).reshape(n, n) # upper + # # ==> array([[0, 1, 2], + # [3, 4, 5], + # [5, 4, 3]]) + # + # Note that we can't simply do `x[..., -(n**2 - m):]` because this doesn't + # correctly handle `m == n == 1`. Hence, we do nonnegative indexing. + # Furthermore observe that: + # m - (n**2 - m) + # = n**2 / 2 + n / 2 - (n**2 - n**2 / 2 + n / 2) + # = 2 (n**2 / 2 + n / 2) - n**2 + # = n**2 + n - n**2 + # = n + ndims = prefer_static_rank(x) + if upper: + x_list = [x, array_ops.reverse(x[..., n:], axis=[ndims - 1])] + else: + x_list = [x[..., n:], array_ops.reverse(x, axis=[ndims - 1])] + new_shape = ( + static_final_shape.as_list() if static_final_shape.is_fully_defined() + else array_ops.concat([array_ops.shape(x)[:-1], [n, n]], axis=0)) + x = array_ops.reshape(array_ops.concat(x_list, axis=-1), new_shape) + x = array_ops.matrix_band_part( + x, num_lower=(0 if upper else -1), num_upper=(-1 if upper else 0)) + x.set_shape(static_final_shape) + return x + + +def fill_triangular_inverse(x, upper=False, name=None): + """Creates a vector from a (batch of) triangular matrix. + + The vector is created from the lower-triangular or upper-triangular portion + depending on the value of the parameter `upper`. + + If `x.shape` is `[b1, b2, ..., bB, n, n]` then the output shape is + `[b1, b2, ..., bB, d]` where `d = n (n + 1) / 2`. + + Example: + + ```python + fill_triangular_inverse( + [[4, 0, 0], + [6, 5, 0], + [3, 2, 1]]) + + # ==> [1, 2, 3, 4, 5, 6] + + fill_triangular_inverse( + [[1, 2, 3], + [0, 5, 6], + [0, 0, 4]], upper=True) + + # ==> [1, 2, 3, 4, 5, 6] + ``` + + Args: + x: `Tensor` representing lower (or upper) triangular elements. + upper: Python `bool` representing whether output matrix should be upper + triangular (`True`) or lower triangular (`False`, default). + name: Python `str`. The name to give this op. + + Returns: + flat_tril: (Batch of) vector-shaped `Tensor` representing vectorized lower + (or upper) triangular elements from `x`. + """ + + with ops.name_scope(name, "fill_triangular_inverse", values=[x]): + x = ops.convert_to_tensor(x, name="x") + if tensor_shape.dimension_value( + x.shape.with_rank_at_least(2)[-1]) is not None: + n = np.int32(x.shape.dims[-1].value) + m = np.int32((n * (n + 1)) // 2) + static_final_shape = x.shape[:-2].concatenate([m]) + else: + n = array_ops.shape(x)[-1] + m = (n * (n + 1)) // 2 + static_final_shape = x.shape.with_rank_at_least(2)[:-2].concatenate( + [None]) + ndims = prefer_static_rank(x) + if upper: + initial_elements = x[..., 0, :] + triangular_portion = x[..., 1:, :] + else: + initial_elements = array_ops.reverse(x[..., -1, :], axis=[ndims - 2]) + triangular_portion = x[..., :-1, :] + rotated_triangular_portion = array_ops.reverse( + array_ops.reverse(triangular_portion, axis=[ndims - 1]), + axis=[ndims - 2]) + consolidated_matrix = triangular_portion + rotated_triangular_portion + end_sequence = array_ops.reshape( + consolidated_matrix, + array_ops.concat([array_ops.shape(x)[:-2], [n * (n - 1)]], axis=0)) + y = array_ops.concat([initial_elements, end_sequence[..., :m - n]], axis=-1) + y.set_shape(static_final_shape) + return y + + +def tridiag(below=None, diag=None, above=None, name=None): + """Creates a matrix with values set above, below, and on the diagonal. + + Example: + + ```python + tridiag(below=[1., 2., 3.], + diag=[4., 5., 6., 7.], + above=[8., 9., 10.]) + # ==> array([[ 4., 8., 0., 0.], + # [ 1., 5., 9., 0.], + # [ 0., 2., 6., 10.], + # [ 0., 0., 3., 7.]], dtype=float32) + ``` + + Warning: This Op is intended for convenience, not efficiency. + + Args: + below: `Tensor` of shape `[B1, ..., Bb, d-1]` corresponding to the below + diagonal part. `None` is logically equivalent to `below = 0`. + diag: `Tensor` of shape `[B1, ..., Bb, d]` corresponding to the diagonal + part. `None` is logically equivalent to `diag = 0`. + above: `Tensor` of shape `[B1, ..., Bb, d-1]` corresponding to the above + diagonal part. `None` is logically equivalent to `above = 0`. + name: Python `str`. The name to give this op. + + Returns: + tridiag: `Tensor` with values set above, below and on the diagonal. + + Raises: + ValueError: if all inputs are `None`. + """ + + def _pad(x): + """Prepends and appends a zero to every vector in a batch of vectors.""" + shape = array_ops.concat([array_ops.shape(x)[:-1], [1]], axis=0) + z = array_ops.zeros(shape, dtype=x.dtype) + return array_ops.concat([z, x, z], axis=-1) + + def _add(*x): + """Adds list of Tensors, ignoring `None`.""" + s = None + for y in x: + if y is None: + continue + elif s is None: + s = y + else: + s += y + if s is None: + raise ValueError("Must specify at least one of `below`, `diag`, `above`.") + return s + + with ops.name_scope(name, "tridiag", [below, diag, above]): + if below is not None: + below = ops.convert_to_tensor(below, name="below") + below = array_ops.matrix_diag(_pad(below))[..., :-1, 1:] + if diag is not None: + diag = ops.convert_to_tensor(diag, name="diag") + diag = array_ops.matrix_diag(diag) + if above is not None: + above = ops.convert_to_tensor(above, name="above") + above = array_ops.matrix_diag(_pad(above))[..., 1:, :-1] + # TODO(jvdillon): Consider using scatter_nd instead of creating three full + # matrices. + return _add(below, diag, above) + + +def reduce_weighted_logsumexp(logx, + w=None, + axis=None, + keep_dims=False, + return_sign=False, + name=None): + """Computes `log(abs(sum(weight * exp(elements across tensor dimensions))))`. + + If all weights `w` are known to be positive, it is more efficient to directly + use `reduce_logsumexp`, i.e., `tf.reduce_logsumexp(logx + tf.math.log(w))` is + more + efficient than `du.reduce_weighted_logsumexp(logx, w)`. + + Reduces `input_tensor` along the dimensions given in `axis`. + Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each + entry in `axis`. If `keep_dims` is true, the reduced dimensions + are retained with length 1. + + If `axis` has no entries, all dimensions are reduced, and a + tensor with a single element is returned. + + This function is more numerically stable than log(sum(w * exp(input))). It + avoids overflows caused by taking the exp of large inputs and underflows + caused by taking the log of small inputs. + + For example: + + ```python + x = tf.constant([[0., 0, 0], + [0, 0, 0]]) + + w = tf.constant([[-1., 1, 1], + [1, 1, 1]]) + + du.reduce_weighted_logsumexp(x, w) + # ==> log(-1*1 + 1*1 + 1*1 + 1*1 + 1*1 + 1*1) = log(4) + + du.reduce_weighted_logsumexp(x, w, axis=0) + # ==> [log(-1+1), log(1+1), log(1+1)] + + du.reduce_weighted_logsumexp(x, w, axis=1) + # ==> [log(-1+1+1), log(1+1+1)] + + du.reduce_weighted_logsumexp(x, w, axis=1, keep_dims=True) + # ==> [[log(-1+1+1)], [log(1+1+1)]] + + du.reduce_weighted_logsumexp(x, w, axis=[0, 1]) + # ==> log(-1+5) + ``` + + Args: + logx: The tensor to reduce. Should have numeric type. + w: The weight tensor. Should have numeric type identical to `logx`. + axis: The dimensions to reduce. If `None` (the default), reduces all + dimensions. Must be in the range `[-rank(input_tensor), + rank(input_tensor))`. + keep_dims: If true, retains reduced dimensions with length 1. + return_sign: If `True`, returns the sign of the result. + name: A name for the operation (optional). + + Returns: + lswe: The `log(abs(sum(weight * exp(x))))` reduced tensor. + sign: (Optional) The sign of `sum(weight * exp(x))`. + """ + with ops.name_scope(name, "reduce_weighted_logsumexp", [logx, w]): + logx = ops.convert_to_tensor(logx, name="logx") + if w is None: + lswe = math_ops.reduce_logsumexp(logx, axis=axis, keepdims=keep_dims) + if return_sign: + sgn = array_ops.ones_like(lswe) + return lswe, sgn + return lswe + w = ops.convert_to_tensor(w, dtype=logx.dtype, name="w") + log_absw_x = logx + math_ops.log(math_ops.abs(w)) + max_log_absw_x = math_ops.reduce_max(log_absw_x, axis=axis, keepdims=True) + # If the largest element is `-inf` or `inf` then we don't bother subtracting + # off the max. We do this because otherwise we'd get `inf - inf = NaN`. That + # this is ok follows from the fact that we're actually free to subtract any + # value we like, so long as we add it back after taking the `log(sum(...))`. + max_log_absw_x = array_ops.where_v2( + math_ops.is_inf(max_log_absw_x), array_ops.zeros_like(max_log_absw_x), + max_log_absw_x) + wx_over_max_absw_x = ( + math_ops.sign(w) * math_ops.exp(log_absw_x - max_log_absw_x)) + sum_wx_over_max_absw_x = math_ops.reduce_sum( + wx_over_max_absw_x, axis=axis, keepdims=keep_dims) + if not keep_dims: + max_log_absw_x = array_ops.squeeze(max_log_absw_x, axis) + sgn = math_ops.sign(sum_wx_over_max_absw_x) + lswe = max_log_absw_x + math_ops.log(sgn * sum_wx_over_max_absw_x) + if return_sign: + return lswe, sgn + return lswe + + +# TODO(jvdillon): Merge this test back into: +# tensorflow/python/ops/softplus_op_test.py +# once TF core is accepting new ops. +def softplus_inverse(x, name=None): + """Computes the inverse softplus, i.e., x = softplus_inverse(softplus(x)). + + Mathematically this op is equivalent to: + + ```none + softplus_inverse = log(exp(x) - 1.) + ``` + + Args: + x: `Tensor`. Non-negative (not enforced), floating-point. + name: A name for the operation (optional). + + Returns: + `Tensor`. Has the same type/shape as input `x`. + """ + with ops.name_scope(name, "softplus_inverse", values=[x]): + x = ops.convert_to_tensor(x, name="x") + # We begin by deriving a more numerically stable softplus_inverse: + # x = softplus(y) = Log[1 + exp{y}], (which means x > 0). + # ==> exp{x} = 1 + exp{y} (1) + # ==> y = Log[exp{x} - 1] (2) + # = Log[(exp{x} - 1) / exp{x}] + Log[exp{x}] + # = Log[(1 - exp{-x}) / 1] + Log[exp{x}] + # = Log[1 - exp{-x}] + x (3) + # (2) is the "obvious" inverse, but (3) is more stable than (2) for large x. + # For small x (e.g. x = 1e-10), (3) will become -inf since 1 - exp{-x} will + # be zero. To fix this, we use 1 - exp{-x} approx x for small x > 0. + # + # In addition to the numerically stable derivation above, we clamp + # small/large values to be congruent with the logic in: + # tensorflow/core/kernels/softplus_op.h + # + # Finally, we set the input to one whenever the input is too large or too + # small. This ensures that no unchosen codepath is +/- inf. This is + # necessary to ensure the gradient doesn't get NaNs. Recall that the + # gradient of `where` behaves like `pred*pred_true + (1-pred)*pred_false` + # thus an `inf` in an unselected path results in `0*inf=nan`. We are careful + # to overwrite `x` with ones only when we will never actually use this + # value. Note that we use ones and not zeros since `log(expm1(0.)) = -inf`. + threshold = np.log(np.finfo(x.dtype.as_numpy_dtype).eps) + 2. + is_too_small = math_ops.less(x, np.exp(threshold)) + is_too_large = math_ops.greater(x, -threshold) + too_small_value = math_ops.log(x) + too_large_value = x + # This `where` will ultimately be a NOP because we won't select this + # codepath whenever we used the surrogate `ones_like`. + x = array_ops.where_v2( + math_ops.logical_or(is_too_small, is_too_large), array_ops.ones_like(x), + x) + y = x + math_ops.log(-math_ops.expm1(-x)) # == log(expm1(x)) + return array_ops.where_v2( + is_too_small, too_small_value, + array_ops.where_v2(is_too_large, too_large_value, y)) + + +# TODO(b/35290280): Add unit-tests. +def dimension_size(x, axis): + """Returns the size of a specific dimension.""" + # Since tf.gather isn't "constant-in, constant-out", we must first check the + # static shape or fallback to dynamic shape. + s = tensor_shape.dimension_value( + x.shape.with_rank_at_least(np.abs(axis))[axis]) + if s is not None: + return s + return array_ops.shape(x)[axis] + + +def process_quadrature_grid_and_probs(quadrature_grid_and_probs, + dtype, + validate_args, + name=None): + """Validates quadrature grid, probs or computes them as necessary. + + Args: + quadrature_grid_and_probs: Python pair of `float`-like `Tensor`s + representing the sample points and the corresponding (possibly + normalized) weight. When `None`, defaults to: + `np.polynomial.hermite.hermgauss(deg=8)`. + dtype: The expected `dtype` of `grid` and `probs`. + validate_args: Python `bool`, default `False`. When `True` distribution + parameters are checked for validity despite possibly degrading runtime + performance. When `False` invalid inputs may silently render incorrect + outputs. + name: Python `str` name prefixed to Ops created by this class. + + Returns: + quadrature_grid_and_probs: Python pair of `float`-like `Tensor`s + representing the sample points and the corresponding (possibly + normalized) weight. + + Raises: + ValueError: if `quadrature_grid_and_probs is not None` and + `len(quadrature_grid_and_probs[0]) != len(quadrature_grid_and_probs[1])` + """ + with ops.name_scope(name, "process_quadrature_grid_and_probs", + [quadrature_grid_and_probs]): + if quadrature_grid_and_probs is None: + grid, probs = np.polynomial.hermite.hermgauss(deg=8) + grid = grid.astype(dtype.as_numpy_dtype) + probs = probs.astype(dtype.as_numpy_dtype) + probs /= np.linalg.norm(probs, ord=1, keepdims=True) + grid = ops.convert_to_tensor(grid, name="grid", dtype=dtype) + probs = ops.convert_to_tensor(probs, name="probs", dtype=dtype) + return grid, probs + + grid, probs = tuple(quadrature_grid_and_probs) + grid = ops.convert_to_tensor(grid, name="grid", dtype=dtype) + probs = ops.convert_to_tensor(probs, name="unnormalized_probs", dtype=dtype) + probs /= linalg_ops.norm(probs, ord=1, axis=-1, keepdims=True, name="probs") + + def _static_event_size(x): + """Returns the static size of a specific dimension or `None`.""" + return tensor_shape.dimension_value(x.shape.with_rank_at_least(1)[-1]) + + m, n = _static_event_size(probs), _static_event_size(grid) + if m is not None and n is not None: + if m != n: + raise ValueError("`quadrature_grid_and_probs` must be a `tuple` of " + "same-length zero-th-dimension `Tensor`s " + "(saw lengths {}, {})".format(m, n)) + elif validate_args: + assertions = [ + check_ops.assert_equal( + dimension_size(probs, axis=-1), + dimension_size(grid, axis=-1), + message=("`quadrature_grid_and_probs` must be a `tuple` of " + "same-length zero-th-dimension `Tensor`s")), + ] + with ops.control_dependencies(assertions): + grid = array_ops.identity(grid) + probs = array_ops.identity(probs) + return grid, probs + + +def pad(x, axis, front=False, back=False, value=0, count=1, name=None): + """Pads `value` to the front and/or back of a `Tensor` dim, `count` times. + + Args: + x: `Tensor` input. + axis: Scalar `int`-like `Tensor` representing the single dimension to pad. + (Negative indexing is supported.) + front: Python `bool`; if `True` the beginning of the `axis` dimension is + padded with `value`, `count` times. If `False` no front padding is made. + back: Python `bool`; if `True` the end of the `axis` dimension is padded + with `value`, `count` times. If `False` no end padding is made. + value: Scalar `int`-like `Tensor` representing the actual value added to the + front and/or back of the `axis` dimension of `x`. + count: Scalar `int`-like `Tensor` representing number of elements added to + the front and/or back of the `axis` dimension of `x`. E.g., if `front = + back = True` then `2 * count` elements are added. + name: Python `str` name prefixed to Ops created by this function. + + Returns: + pad: The padded version of input `x`. + + Raises: + ValueError: if both `front` and `back` are `False`. + TypeError: if `count` is not `int`-like. + """ + with ops.name_scope(name, "pad", [x, value, count]): + x = ops.convert_to_tensor(x, name="x") + value = ops.convert_to_tensor(value, dtype=x.dtype, name="value") + count = ops.convert_to_tensor(count, name="count") + if not count.dtype.is_integer: + raise TypeError("`count.dtype` (`{}`) must be `int`-like.".format( + count.dtype.name)) + if not front and not back: + raise ValueError("At least one of `front`, `back` must be `True`.") + ndims = ( + x.shape.ndims if x.shape.ndims is not None else array_ops.rank( + x, name="ndims")) + axis = ops.convert_to_tensor(axis, name="axis") + axis_ = tensor_util.constant_value(axis) + if axis_ is not None: + axis = axis_ + if axis < 0: + axis = ndims + axis + count_ = tensor_util.constant_value(count) + if axis_ >= 0 or x.shape.ndims is not None: + head = x.shape[:axis] + middle = tensor_shape.TensorShape(None if count_ is None else ( + tensor_shape.dimension_at_index(x.shape, axis) + count_ * + (front + back))) + tail = x.shape[axis + 1:] + final_shape = head.concatenate(middle.concatenate(tail)) + else: + final_shape = None + else: + axis = array_ops.where_v2(axis < 0, ndims + axis, axis) + final_shape = None + x = array_ops.pad( + x, + paddings=array_ops.one_hot( + indices=array_ops_stack.stack( + [axis if front else -1, axis if back else -1]), + depth=ndims, + axis=0, + on_value=count, + dtype=dtypes.int32), + constant_values=value) + if final_shape is not None: + x.set_shape(final_shape) + return x + + +def parent_frame_arguments(): + """Returns parent frame arguments. + + When called inside a function, returns a dictionary with the caller's function + arguments. These are positional arguments and keyword arguments (**kwargs), + while variable arguments (*varargs) are excluded. + + When called at global scope, this will return an empty dictionary, since there + are no arguments. + + WARNING: If caller function argument names are overloaded before invoking + this method, then values will reflect the overloaded value. For this reason, + we recommend calling `parent_frame_arguments` at the beginning of the + function. + """ + # All arguments and the names used for *varargs, and **kwargs + arg_names, variable_arg_name, keyword_arg_name, local_vars = ( + tf_inspect._inspect.getargvalues( # pylint: disable=protected-access + # Get the first frame of the caller of this method. + tf_inspect._inspect.stack()[1][0])) # pylint: disable=protected-access + + # Remove the *varargs, and flatten the **kwargs. Both are + # nested lists. + local_vars.pop(variable_arg_name, {}) + keyword_args = local_vars.pop(keyword_arg_name, {}) + + final_args = {} + # Copy over arguments and their values. In general, local_vars + # may contain more than just the arguments, since this method + # can be called anywhere in a function. + for arg_name in arg_names: + final_args[arg_name] = local_vars.pop(arg_name) + final_args.update(keyword_args) + + return final_args + + +class AppendDocstring: + """Helper class to promote private subclass docstring to public counterpart. + + Example: + + ```python + class TransformedDistribution(Distribution): + @distribution_util.AppendDocstring( + additional_note="A special note!", + kwargs_dict={"foo": "An extra arg."}) + def _prob(self, y, foo=None): + pass + ``` + + In this case, the `AppendDocstring` decorator appends the `additional_note` to + the docstring of `prob` (not `_prob`) and adds a new `kwargs` + section with each dictionary item as a bullet-point. + + For a more detailed example, see `TransformedDistribution`. + """ + + def __init__(self, additional_note="", kwargs_dict=None): + """Initializes the AppendDocstring object. + + Args: + additional_note: Python string added as additional docstring to public + version of function. + kwargs_dict: Python string/string dictionary representing specific kwargs + expanded from the **kwargs input. + + Raises: + ValueError: if kwargs_dict.key contains whitespace. + ValueError: if kwargs_dict.value contains newlines. + """ + self._additional_note = additional_note + if kwargs_dict: + bullets = [] + for key in sorted(kwargs_dict.keys()): + value = kwargs_dict[key] + if any(x.isspace() for x in key): + raise ValueError("Parameter name \"%s\" contains whitespace." % key) + value = value.lstrip() + if "\n" in value: + raise ValueError( + "Parameter description for \"%s\" contains newlines." % key) + bullets.append("* `%s`: %s" % (key, value)) + self._additional_note += ("\n\n##### `kwargs`:\n\n" + "\n".join(bullets)) + + def __call__(self, fn): + + @functools.wraps(fn) + def _fn(*args, **kwargs): + return fn(*args, **kwargs) + + if _fn.__doc__ is None: + _fn.__doc__ = self._additional_note + else: + _fn.__doc__ += "\n%s" % self._additional_note + return _fn diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/structured/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/structured/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..999265c3ec4f900ead05e2f628399ff0cba1728c Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/structured/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/structured/__pycache__/structured_array_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/structured/__pycache__/structured_array_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27ee5f6def64cee7c63b463ea7bde79dbcc432fb Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/structured/__pycache__/structured_array_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/structured/__pycache__/structured_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/structured/__pycache__/structured_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f389d7a7ac79d0e938a531d95c83c371c1642c8 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/structured/__pycache__/structured_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/structured/__pycache__/structured_tensor.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/structured/__pycache__/structured_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..445776158ef32eec38143c3d5a3ff25e5ee6e97b Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/structured/__pycache__/structured_tensor.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/structured/__pycache__/structured_tensor_dynamic.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/structured/__pycache__/structured_tensor_dynamic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc92ea05f9b6812c0ac094961e92161f08568ffb Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/structured/__pycache__/structured_tensor_dynamic.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/structured/structured_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/structured/structured_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..cf0f9f52a29d710d04630f16e90275197219dd44 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/structured/structured_ops.py @@ -0,0 +1,24 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Import all modules in the `structured` package that define exported symbols. + +We don't import these modules from structured/__init__.py, since we want to +avoid circular dependencies. +""" + + +# pylint: disable=unused-import +from tensorflow.python.ops.structured import structured_array_ops +from tensorflow.python.ops.structured import structured_tensor