diff --git a/.gitattributes b/.gitattributes index 037d41ec71c32d0acabd9a854656b6878fff7bfc..99a40b7742b2e15d2d5c2001f64269df482b887d 100644 --- a/.gitattributes +++ b/.gitattributes @@ -856,3 +856,6 @@ videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_tfprof.so videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_determinism.so filter=lfs diff=lfs merge=lfs -text videochat2/bin/python filter=lfs diff=lfs merge=lfs -text videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_stat_summarizer.so filter=lfs diff=lfs merge=lfs -text +videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_checkpoint_reader.so filter=lfs diff=lfs merge=lfs -text +videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_kernel_registry.so filter=lfs diff=lfs merge=lfs -text +videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_util_port.so filter=lfs diff=lfs merge=lfs -text diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4512fec1c7955e059333ab6dcbd78d9806cead2a Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/boosted_trees_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/boosted_trees_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8892c0556ce471de92091dffe04eafc1e26c8c2 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/boosted_trees_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/candidate_sampling_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/candidate_sampling_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..081ff7d05d9edfc1300d0d9fc30ef9693084aee8 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/candidate_sampling_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/clip_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/clip_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6d6d3a10bb502e40b3a373a2094c9e33d5453c1 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/clip_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/clustering_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/clustering_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2ffba3046efdfb83e74ad00662a109c8820b493 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/clustering_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/composite_tensor_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/composite_tensor_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7273069a876e537ec7814d414c2cba75ffdfe1a3 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/composite_tensor_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/control_flow_state.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/control_flow_state.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc4d20ad3a7c2773aa672d7f1a24f92c70e13591 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/control_flow_state.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/control_flow_v2_toggles.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/control_flow_v2_toggles.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e329fc0d80f90de624afc43d7f07177f6bfdcb4 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/control_flow_v2_toggles.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/filesystem_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/filesystem_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b7746ba57a7306d1024ee59e8454b54c363aff5 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/filesystem_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_collective_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_collective_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eee59063e11b1defb8dc036a8589ebfc2e33c3c7 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_collective_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_control_flow_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_control_flow_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8561b73fdd831e530229195e0168f765756672cb Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_control_flow_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_count_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_count_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01da8633d2a216f2d0fc65bc64c10a8d644e3db7 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_count_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_decode_proto_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_decode_proto_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c160be16bde422abe07ae294f24cbd984c58d050 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_decode_proto_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_io_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_io_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b31e43eb78e3737da7cb30b94a40f3ed886efc1a Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_io_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_nccl_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_nccl_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00fed41cbdb2b2ba7bc3c37f03025322430b9381 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_nccl_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_ragged_array_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_ragged_array_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0cafee3b9dce2b3064ec0dd760e17b72a8e20374 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_ragged_array_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_random_index_shuffle_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_random_index_shuffle_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df596253973f28b1dea6be5baeb1548c2d2a9956 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_random_index_shuffle_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_resource_variable_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_resource_variable_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2dd739037d86226e6f6d763c936e50725cc3a16 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_resource_variable_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_special_math_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_special_math_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..151b7a8e0e3e6a45db057e878b5c52345ba1624b Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_special_math_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_spectral_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_spectral_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48d7e140a06b81c036046120ec6468b0673c07d8 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_spectral_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_state_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_state_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..438198b5c02ebe8446c7a34efb9ac4a6fa46530b Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_state_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_stateless_random_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_stateless_random_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cfb6972bc09fad40238a17dfddd00056834d4916 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_stateless_random_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_stateless_random_ops_v2.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_stateless_random_ops_v2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6ae7dcd7bb612bdce32be217482dbd01a9d30f3 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_stateless_random_ops_v2.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_uniform_quant_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_uniform_quant_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36ce1f45ba375cdb1a9f2ecd347e821ec77167f0 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_uniform_quant_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gradient_checker.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gradient_checker.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24ce0918e55098d7b7a1ae056a60c13c725baed3 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gradient_checker.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gradients_impl.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gradients_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed56f583b510d4aa7f4aee3c409a65b9d06d8280 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gradients_impl.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/image_grad.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/image_grad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d43ecf313975b750873b363d4dbe8b2a90cb6efd Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/image_grad.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/linalg_grad.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/linalg_grad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ebf168dede71768ab014f1630c740cc2fd823aae Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/linalg_grad.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/nn.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/nn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc898d3730be05168afb7fd8c0b851a7f4f34ae8 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/nn.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/nn_impl_distribute.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/nn_impl_distribute.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15325e8ed11d3765d0f683dccf69300260014207 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/nn_impl_distribute.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/partitioned_variables.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/partitioned_variables.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..089b2421790056b9bf66b2cf37cb36e0945d6166 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/partitioned_variables.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/random_crop_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/random_crop_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c97db67200bc44cb93f0ae0ed58c8bbb6d54692 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/random_crop_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/resource_variables_toggle.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/resource_variables_toggle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c761a0d07db77840f0f183e3b6d52e4d8fe620d9 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/resource_variables_toggle.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/script_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/script_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2cddc03e1f5c359520f0de55ae0a50567350beb2 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/script_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/session_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/session_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14d5a8a7a7e7d40cd987b1db649ace0dd2247b34 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/session_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/sets.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/sets.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df669e1f5751516b483fcc77a9675f2590529542 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/sets.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/standard_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/standard_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e90fbd4dacaef956765f694333601dcdaa68ba4 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/standard_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/state_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/state_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db112e33248a987d79984489e3ef4f74d322d48e Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/state_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/stateful_random_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/stateful_random_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51cc1e1594dbbd78af02d909b34d55202930dae3 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/stateful_random_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/stateless_random_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/stateless_random_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26aaec7bc9c2591d7222a227cd2fd8b86aced598 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/stateless_random_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/summary_ops_v2.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/summary_ops_v2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb0ea14b7ffbcb7495389f22eedd3f9a8f15a640 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/summary_ops_v2.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..457e54641c6953a605ba89b462d6e896b9792c1e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__init__.py @@ -0,0 +1,28 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Ragged Tensors. + +This package defines ops for manipulating ragged tensors (`tf.RaggedTensor`), +which are tensors with non-uniform shapes. In particular, each `RaggedTensor` +has one or more *ragged dimensions*, which are dimensions whose slices may have +different lengths. For example, the inner (column) dimension of +`rt=[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is ragged, since the column slices +(`rt[0, :]`, ..., `rt[4, :]`) have different lengths. For a more detailed +description of ragged tensors, see the `tf.RaggedTensor` class documentation +and the [Ragged Tensor Guide](/guide/ragged_tensor). + +API docstring: tensorflow.ragged +""" +from tensorflow.python.ops.ragged import ragged_tensor diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_autograph.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_autograph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c386ccdc6f5fe3b7411c3818c62bed8fae490391 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_autograph.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_squeeze_op.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_squeeze_op.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5bb92d1083f302afaa2fe97970469c98a0f618e6 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_squeeze_op.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/dynamic_ragged_shape.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/dynamic_ragged_shape.py new file mode 100644 index 0000000000000000000000000000000000000000..30b0534accb95d353baa4b2ad54edcf7d030ba07 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/dynamic_ragged_shape.py @@ -0,0 +1,3292 @@ +# Copyright 2021 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Shapes & broadcasting for RaggedTensors. + +TODO(martinz): make this suitable for output for tf.shape +TODO(martinz): replace ragged_tensor_shape with this. +""" + +import abc +from typing import Any, Iterable, Optional, Sequence, Tuple, Union + +import numpy as np +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import extension_type +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor as tensor_lib +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import cond +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.ops.ragged.row_partition import RowPartition +from tensorflow.python.ops.ragged.row_partition import RowPartitionSpec +from tensorflow.python.types import core +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + + +class _DynamicRaggedShapeBatchEncoder(extension_type.ExtensionTypeBatchEncoder): + """A batch encoder for DynamicRaggedShape below.""" + + def batch(self, spec: "DynamicRaggedShape.Spec", + batch_size) -> "DynamicRaggedShape.Spec": + if spec.num_row_partitions: + new_head = _batch_rp_spec_head(spec._row_partitions[0], batch_size) # pylint:disable=protected-access + new_tail = [_batch_rp_spec(rp, batch_size) for rp in spec._row_partitions] # pylint:disable=protected-access + new_rp = [new_head] + new_tail + new_static_inner_shape = _batch_static_inner_shape( + spec._static_inner_shape, batch_size) # pylint:disable=protected-access + + return DynamicRaggedShape.Spec( + row_partitions=new_rp, + static_inner_shape=new_static_inner_shape, + dtype=spec.dtype) + elif batch_size is None: + if spec.inner_rank == 0: + return DynamicRaggedShape.Spec._from_tensor_shape( # pylint:disable=protected-access + [None], + 0, + dtype=spec.dtype) + else: + # Might be None + new_head = RowPartitionSpec( + uniform_row_length=spec._dimension(0), # pylint:disable=protected-access + dtype=spec.dtype) + new_static_inner_shape = _batch_static_inner_shape( + spec._static_inner_shape, batch_size) # pylint:disable=protected-access + return DynamicRaggedShape.Spec( + row_partitions=[new_head], + static_inner_shape=new_static_inner_shape, + dtype=spec.dtype) + else: + + return DynamicRaggedShape.Spec( + row_partitions=[], + static_inner_shape=_batch_tensor_shape( + spec._static_inner_shape, # pylint:disable=protected-access + batch_size), + dtype=spec.dtype) + + def unbatch(self, + spec: "DynamicRaggedShape.Spec") -> "DynamicRaggedShape.Spec": + if spec.num_row_partitions: + result = [] + head = spec._row_partitions[0] # pylint:disable=protected-access + scale = None if head.uniform_row_length is None else head.nrows + + for rp in spec._row_partitions[1:]: # pylint:disable=protected-access + if scale is None: + result.append( + RowPartitionSpec( + nrows=None, + nvals=None, + uniform_row_length=rp.uniform_row_length, + dtype=spec.dtype)) + else: + nrows = None if rp.nrows is None else rp.nrows // scale + if rp.uniform_row_length is None: + scale = None + result.append( + RowPartitionSpec( + nrows=nrows, + nvals=None, + uniform_row_length=None, + dtype=spec.dtype)) + else: + result.append( + RowPartitionSpec( + nrows=nrows, + nvals=rp.nvals // scale, + uniform_row_length=rp.uniform_row_length, + dtype=spec.dtype)) + return DynamicRaggedShape.Spec( + row_partitions=result, + static_inner_shape=_unbatch_static_inner_shape( + spec._static_inner_shape, scale), # pylint:disable=protected-access + dtype=spec.dtype) + else: # spec.num_row_partitions == 0 + return DynamicRaggedShape.Spec( + row_partitions=[], + static_inner_shape=spec._static_inner_shape[1:], # pylint:disable=protected-access + dtype=spec.dtype) + + def decode(self, spec: "DynamicRaggedShape.Spec", + encoding) -> "DynamicRaggedShape": + return DynamicRaggedShape.from_tensor(encoding, dtype=spec.dtype) + + def encode( + self, + spec: "DynamicRaggedShape.Spec", + value, + minimum_rank=0) -> Union[ragged_tensor.RaggedTensor, tensor_lib.Tensor]: + return ones(value, dtype=dtypes.bool) + + def encoding_specs( + self, spec: "DynamicRaggedShape.Spec" + ) -> Union[ragged_tensor.RaggedTensorSpec, tensor_lib.TensorSpec]: + if spec.rank != 0: + ragged_rank = spec.num_row_partitions + else: + # special case: need to unbatch twice to get ragged tensor. + ragged_rank = -1 + return ragged_tensor.RaggedTensorSpec( + shape=spec._to_tensor_shape(), # pylint:disable=protected-access + dtype=dtypes.bool, + ragged_rank=ragged_rank, + row_splits_dtype=spec.dtype) + + +# TODO(martinz): allow inner_shape to be a fully defined TensorShape. +# A "fully defined TensorShape" means one where the rank and all dimensions are +# known. +# Allowing inner_shape might mean allowing inner_shape to be initialized by +# a fully defined TensorShape, or it might mean that you can actually store +# TensorShape in the inner_shape field. This could conceivably construct +# a DynamicRaggedShape that was dtype agnostic. +# +# TODO(martinz): unify the impl of the determination of index type across +# RowPartition and DynamicRaggedShape. +@tf_export("experimental.DynamicRaggedShape") +class DynamicRaggedShape(extension_type.BatchableExtensionType): + """The shape of a ragged or dense tensor. + + Ragged shapes are encoded using two fields: + + * `inner_shape`: An integer vector giving the shape of a dense tensor. + * `row_partitions`: A list of `RowPartition` objects, describing how + that flat shape should be partitioned to add ragged axes. + + If a DynamicRaggedShape is the shape of a RaggedTensor rt, then: + 1. row_partitions = rt._nested_row_partitions + (and thus len(row_partitions) > 0) + 2. inner_shape is the shape of rt.flat_values + + If a DynamicRaggedShape is the shape of a dense tensor t, then: + 1. row_partitions = [] + 2. inner_shape is the shape of t. + + Examples: + + The following table gives a few examples (where `RP(lengths)` is short + for `RowPartition.from_lengths(lengths)`): + + Row Partitions | Inner Shape | Example Tensor + --------------------------- | ------------ | ---------------------------- + [] | [2, 3] | `[[1, 2, 3], [4, 5, 6]]` + [RP([2, 0, 3])] | [5] | `[[1, 2], [], [3, 4, 5]]` + [RP([2, 1])] | [3, 2] | `[[[1, 2], [3, 4]], [[5, 6]]]` + [RP([2, 1]), RP([2, 1, 2])] | [5] | `[[[1, 2], [3]], [[4, 5]]]` + """ + _row_partitions: Tuple[RowPartition, ...] + _inner_shape: tensor_lib.Tensor + _static_inner_shape: tensor_shape.TensorShape + __batch_encoder__ = _DynamicRaggedShapeBatchEncoder() + __name__ = "tf.DynamicRaggedShape" + + def __init__(self, + row_partitions: Sequence[RowPartition], + inner_shape: core.TensorLike, + dtype: Optional[dtypes.DType] = None, + validate: bool = False, + static_inner_shape: ... = None): + """Core constructor for a DynamicRaggedShape. + + Create a DynamicRaggedShape. This can be used to construct a + DynamicRaggedShape representing a ragged or dense shape. If row_partitions + is an empty list, then this is equivalent to a dense shape. + + If row_partitions is specified, then the num_row_partitions will be equal + to len(row_partitions). There are several checks made. + Specifically: + 1. Consecutive row_partitions must have consistent nvals and nrows. + 2. The last row_partitions must have nvals equal to the first element of + inner_shape. + + The inner_shape is converted to a tensor. + All row_partitions and the inner_shape are converted to the same dtype + (int64 or int32). + + Args: + row_partitions: the row_partitions of the shape. + inner_shape: if len(row_partitions) > 0, the shape of the flat_values. + Otherwise, the shape of the tensor. + dtype: tf.int64, tf.int32, or None representing the preferred dtype. + validate: if true, dynamic validation is applied to the shape. + static_inner_shape: if len(row_partitions) > 0, the static shape of the + flat_values. Otherwise, the static shape of the tensor. Should be + convertible to a TensorShape. + """ + if not isinstance(row_partitions, Iterable): + raise TypeError( + "row_partitions should be a list of row partitions. Instead, got " + + str(row_partitions)) + for x in row_partitions: + if not isinstance(x, RowPartition): + raise TypeError("row_partitions contains " + str(x) + + " which is not a RowPartition") + dtype = _find_dtype_iterable(row_partitions, dtype) + dtype = _find_dtype(inner_shape, dtype) + if (isinstance(inner_shape, np.ndarray) and + inner_shape.dtype == np.int32 and dtype is None): + dtype = dtypes.int32 + dtype = _find_dtype(dtypes.int64, dtype) + + row_partitions = tuple([rp.with_dtype(dtype) for rp in row_partitions]) + self._row_partitions = row_partitions + self._inner_shape = ops.convert_to_tensor( + inner_shape, dtype_hint=dtype, name="inner_dim_sizes") + if self._inner_shape.dtype != dtype: + self._inner_shape = math_ops.cast(self._inner_shape, dtype) + + checks = [] + # Validate shapes. + if self._row_partitions: + for axis, rp in enumerate(self._row_partitions): + if axis > 0: + previous_row_partition = self._row_partitions[axis - 1] + msg = ("RowPartitions in DynamicRaggedShape do not align " + f"between {axis - 1} and {axis}") + static_nrows = rp.static_nrows + static_nvals = previous_row_partition.static_nvals + if (static_nrows is not None) and (static_nvals is not None): + if static_nrows != static_nvals: + raise ValueError(msg) + else: + continue + if validate: + checks.append( + check_ops.assert_equal( + previous_row_partition.nvals(), rp.nrows(), message=msg)) + + self._inner_shape.shape.assert_has_rank(1) + + self._static_inner_shape = tensor_util.constant_value_as_shape( + self._inner_shape) + if static_inner_shape is not None: + self._static_inner_shape = self._static_inner_shape.merge_with( + static_inner_shape) + + if row_partitions: + last_row_partition = row_partitions[-1] + static_nvals = last_row_partition.static_nvals + static_inner_shape_nvals = tensor_shape.dimension_value( + self._static_inner_shape[0]) + if static_nvals is not None and static_inner_shape_nvals is not None: + if static_nvals != static_inner_shape_nvals: + raise ValueError("Last row partition does not match inner_shape.") + elif validate: + checks.append( + check_ops.assert_equal( + last_row_partition.nvals(), + self._inner_shape[0], + message="Last row partition does not match inner_shape.")) + if checks: + self._inner_shape = control_flow_ops.with_dependencies( + checks, self._inner_shape, name="inner_shape_validated") + self._row_partitions = [ + rp._with_dependencies(checks) for rp in self._row_partitions # pylint: disable=protected-access + ] + + @classmethod + def from_lengths(cls, + lengths: Sequence[Union[Sequence[int], int]], + num_row_partitions=None, + dtype=dtypes.int64): + """Creates a shape with the given lengths and num_row_partitions. + + The lengths can either be a nonnegative int or a list of nonnegative ints. + + If num_row_partitions is None, then the minimal num_row_partitions is used. + + For example, [2, (3, 2)] is the shape of [[0, 0, 0], [0, 0]], and + [2, 2] is the shape of [[0, 0], [0, 0]] + + This chooses the minimal num_row_partitions required (including zero). + + The following table gives a few examples (where `RP(lengths)` is short + for `RowPartition.from_lengths(lengths)`): + + For example: + from_lengths | row_partitions | inner_shape + ---------------------- | --------------------------| ------------- + [] | [] | [] + [2, (3, 2)] | [RP([3, 2])] | [5] + [2, 2] | [] | [2, 2] + [2, (3, 2), 7] | [RP([3, 2])] | [5, 7] + [2, (2, 2), 3] | [RP([2, 2])] | [4, 3] + [2, 2, 3] | [] | [2, 2, 3] + [2, (2, 1), (2, 0, 3)] | [RP(2, 1), RP([2, 0, 3])] | [5] + + If we want the row partitions to end with uniform row partitions, then + we can set num_row_partitions. + + For example, + below URP(3, 12) is RowPartition.from_uniform_row_length(3, 12) + + from_lengths | num_row_partitions | row_partitions | inner_shape + ---------------| -------------------|--------------------------|------------ + [2, (3, 2), 2] | 2 | [RP([3, 2]), URP(2, 10)] | [10] + [2, 2] | 1 | [URP(2, 4)] | [4] + [2, 2, 3] | 0 | [] | [2, 2, 3] + [2, 2, 3] | 1 | [URP(2, 4)] | [4, 3] + [2, 2, 3] | 2 | [URP(2, 4), URP(3, 12)] | [12] + + + + Representing the shapes from init(): + + from_lengths | Tensor Example + ------------------------ | ------------------------------ + `[2, 3]` | `[[1, 2, 3], [4, 5, 6]]` + `[3, (2, 0, 3)]` | `[[1, 2], [], [3, 4, 5]]` + `[2, (2, 1), 2]` | `[[[1, 2], [3, 4]], [[5, 6]]]` + `[2, (2, 1), (2, 1, 2)]` | `[[[1, 2], [3]], [[4, 5]]]` + + Args: + lengths: the lengths of sublists along each axis. + num_row_partitions: the num_row_partitions of the result or None + indicating the minimum number of row_partitions. + dtype: the dtype of the shape (tf.int32 or tf.int64). + + Returns: + a new DynamicRaggedShape + """ + if not isinstance(lengths, list): + raise ValueError("lengths should be a list") + for x in lengths: + if not _is_int_or_tuple_of_ints(x): + raise ValueError( + "element of lengths should be int or tuple of ints: instead %r" % + (x,)) + + if num_row_partitions is None: + # Calculate the minimal num_row_partitions. + is_list = [not isinstance(x, int) for x in lengths] + if any(is_list): + # Last index when not a list. + num_row_partitions = len(is_list) - is_list[-1::-1].index(True) - 1 + else: + num_row_partitions = 0 + + if not isinstance(num_row_partitions, int): + raise ValueError("num_row_partitions should be an int or None") + + if not lengths: + if num_row_partitions > 0: + raise ValueError("num_row_partitions==0 for a scalar shape") + return DynamicRaggedShape([], [], dtype=dtype) + + if not num_row_partitions < len(lengths): + raise ValueError("num_row_partitions should be less than `len(lengths)` " + "if shape is not scalar.") + + if num_row_partitions > 0: + (row_partitions, nvals) = _to_row_partitions_and_nvals_from_lengths( + lengths[:num_row_partitions + 1]) + inner_shape = [nvals] + lengths[num_row_partitions + 1:] + return DynamicRaggedShape(row_partitions, inner_shape, dtype=dtype) + else: + return DynamicRaggedShape([], lengths, dtype=dtype) + + @classmethod + def from_row_partitions(cls, row_partitions, dtype=None): + """Create a shape from row_partitions. + + Args: + row_partitions: a nonempty list of RowPartition objects. + dtype: the dtype to use, or None to use the row_partitions dtype. + + Returns: + a DynamicRaggedShape with inner_rank==1. + """ + if not row_partitions: + raise ValueError("row_partitions cannot be empty") + inner_shape = [row_partitions[-1].nvals()] + return DynamicRaggedShape(row_partitions, inner_shape, dtype=dtype) + + @classmethod + def _from_inner_shape(cls, inner_shape, dtype=None): + """Create a shape from inner_shape, where num_row_partitions == 0.""" + return DynamicRaggedShape([], inner_shape, dtype=dtype) + + # pylint: disable=protected-access + @classmethod + def from_tensor(cls, t, dtype=None): + """Constructs a ragged shape for a potentially ragged tensor.""" + if ragged_tensor.is_ragged(t): + return DynamicRaggedShape( + t._nested_row_partitions, _flat_values_shape(t), dtype=dtype) + else: + return DynamicRaggedShape._from_inner_shape( + array_ops.shape(t), dtype=dtype) + + @property + def row_partitions(self): + """The row_partitions of the shape.""" + return self._row_partitions + + @property + def num_row_partitions(self): + """The number of row_partitions of the shape.""" + return len(self._row_partitions) + + @property + def dtype(self): + """The dtype of the shape -- one of tf.int32 or tf.int64.""" + return self._inner_shape.dtype + + def _static_inner_shape_as_list(self, truncate_first): + """Returns the lengths of the inner shape (if rank known), or [...].""" + if self._static_inner_shape.rank is None: + return [...] + result = self._static_inner_shape.as_list() + if truncate_first: + return result[1:] + return result + + def static_lengths(self, ragged_lengths=True): + """Returns a list of statically known axis lengths. + + This represents what values are known. For each row partition, it presents + either the uniform row length (if statically known), + the list of row lengths, or none if it is not statically known. + For the inner shape, if the rank is known, then each dimension is reported + if known, and None otherwise. If the rank of the inner shape is not known, + then the returned list ends with an ellipsis. + + Args: + ragged_lengths: If false, returns None for all ragged dimensions. + + Returns: + A Sequence[Union[Sequence[int],int, None]] of lengths, with a possible + Ellipsis at the end. + """ + if self.num_row_partitions == 0: + return self._static_inner_shape_as_list(False) + first_dim = self.row_partitions[0].static_nrows + if isinstance(first_dim, tensor_shape.Dimension): + first_dim = first_dim.value + rp_dims = [first_dim] + for rp in self.row_partitions: + if rp.is_uniform(): + rp_dims.append(rp.static_uniform_row_length) + elif ragged_lengths: + const_vals = tensor_util.constant_value(rp.row_lengths()) + if const_vals is None: + rp_dims.append(None) + else: + rp_dims.append(tuple(const_vals.tolist())) + else: + rp_dims.append(None) + + return rp_dims + self._static_inner_shape_as_list(True) + + def __repr__(self): + lengths = _list_with_ellipsis_to_str(self.static_lengths()) + return ("" % + (lengths, self.num_row_partitions)) + + def _to_tensor_shape(self) -> tensor_shape.TensorShape: + """Returns a TensorShape representation of the shape.""" + lengths = self.static_lengths(ragged_lengths=False) + if not lengths: + return tensor_shape.TensorShape(()) + if lengths[-1] == Ellipsis: + return tensor_shape.TensorShape(None) + return tensor_shape.TensorShape(lengths) + + def _slice_shape(self, start, stop): + """Returns a shape self[start:stop]. + + If start == 0, then this truncates dimensions after stop. + If start != 0, then this will return a shape with num_row_partitions == 0. + + See __getitem__. + + Args: + start: the first dimension. 0 <= start <= rank + stop: the last dimension (exclusive). 0 <= stop <= rank + """ + if stop <= start: + return DynamicRaggedShape._from_inner_shape([]) + elif start == 0: + if stop <= self.num_row_partitions: + if stop == 1: + return DynamicRaggedShape._from_inner_shape( + [self.row_partitions[0].nrows()]) + new_row_partitions = self.row_partitions[:stop - 1] + new_inner_shape = [new_row_partitions[-1].nvals()] + return DynamicRaggedShape(new_row_partitions, new_inner_shape) + else: + if self.rank is None: + new_inner_rank = stop - self.num_row_partitions + new_inner_shape = self.inner_shape[:new_inner_rank] + return DynamicRaggedShape( + row_partitions=self.row_partitions, + inner_shape=new_inner_shape, + static_inner_shape=None, + validate=False) + + elif self.rank <= stop: + return self + new_inner_rank = stop - self.num_row_partitions + new_inner_shape = self.inner_shape[:new_inner_rank] + return DynamicRaggedShape( + row_partitions=self.row_partitions, + inner_shape=new_inner_shape, + static_inner_shape=tensor_shape.TensorShape([None] * + new_inner_rank), + validate=False) + else: + if self.rank is None or stop < self.rank: + partial = self._slice_shape(0, stop) + else: + partial = self + + for x in partial.row_partitions: + if not x.is_uniform(): + raise ValueError("All relevant dimensions must be uniform") + if partial.rank is None: + # TODO(martinz): Implement _with_num_row_partitions(0) if rank is + # unknown, and remove. + raise NotImplementedError( + "__getitem__[start:stop] where start > 0 not implemented") + + return DynamicRaggedShape._from_inner_shape( + partial._with_num_row_partitions(0).inner_shape[start:]) + + def _dimension(self, index): + """Return a dimension, if the dimension is not ragged (see __getitem__).""" + rank = self.rank + if not isinstance(index, int): + raise TypeError("index should be an int") + if (self.num_row_partitions == 0 or index > self.num_row_partitions + 1): + # If num_row_partitions > 0 and index <= num_row_partitions + 1, then + # we are safe. + if rank is None: + raise ValueError( + "Rank must be known to use __getitem__ on a large index.") + if index >= rank: + raise IndexError("Index is too big: " + str(index) + ">=" + str(rank)) + if index < 0: + raise IndexError("Index must be non-negative: " + str(index)) + elif not self.is_uniform(index): + raise ValueError("Index " + str(index) + " is not uniform") + elif index == 0 and self.num_row_partitions > 0: + static_nrows = self.row_partitions[0].static_nrows + if static_nrows is not None: + return constant_op.constant(static_nrows, dtype=self.dtype) + return self.row_partitions[0].nrows() + elif self.num_row_partitions == 0: + static_result = tensor_shape.dimension_value( + self._static_inner_shape[index]) + if static_result is not None: + return constant_op.constant(static_result, dtype=self.dtype) + return self.inner_shape[index] + elif index > self.num_row_partitions: + static_result = tensor_shape.dimension_value( + self._static_inner_shape[index - self.num_row_partitions]) + if static_result is not None: + return constant_op.constant(static_result, dtype=self.dtype) + + return self.inner_shape[index - self.num_row_partitions] + else: + return self.row_partitions[index - 1].uniform_row_length() + + def __getitem__(self, index): + """Returns a dimension or a slice of the shape. + + Ragged shapes can have ragged dimensions that depend upon other dimensions. + Therefore, if you ask for a dimension that is ragged, this function returns + a ValueError. For similar reasons, if a slice is selected that includes + a ragged dimension without including the zero dimension, then this fails. + + Any slice that does not start at zero will return a shape + with num_row_partitions == 0. + + Args: + index: the index: can be an int or a slice. + + Raises: + IndexError: if the index is not in range. + ValueError: if the rank is unknown, or a ragged rank is requested + incorrectly. + """ + rank = self.rank + if isinstance(index, slice): + + if (index.step is not None) and (index.step != 1): + raise IndexError("Cannot stride through a shape") + start = index.start + stop = index.stop + if start is None: + start = 0 + start = _fix_start_index(start, rank, self.num_row_partitions) + stop = _fix_stop_index(stop, rank) + return self._slice_shape(start, stop) + elif isinstance(index, int): + if index < 0: + if rank is None: + raise ValueError( + "Rank must be known to use __getitem__ with a negative index.") + return self._dimension(rank + index) + return self._dimension(index) + else: + raise TypeError("Argument is not an int or a slice") + + def _num_elements(self): + """Number of elements in a shape. + + Returns: + The number of elements in the shape. + + """ + return math_ops.reduce_prod(self.inner_shape) + + def _num_slices_in_dimension(self, axis): + """The total size of a dimension (like nvals). + + Effectively, this is self[:axis+1]._num_elements() + + Example: + shape = DynamicRaggedShape._from_inner_shape([2, 3, 4]) + shape._num_slices_in_dimension(0) = 2 + shape._num_slices_in_dimension(1) = 6 + shape._num_slices_in_dimension(2) = 24 + shape._num_slices_in_dimension(-1) = 24 + shape._num_slices_in_dimension(-2) = 6 + shape._num_slices_in_dimension(-2) = 2 + + Args: + axis: the last axis to include in the number of elements. If negative, + then axis = axis + rank. + + Returns: + The number of elements in the shape. + """ + if not isinstance(axis, int): + raise TypeError("axis must be an integer") + if axis < 0: + rank = self.rank + if rank is None: + raise ValueError( + "You can't use negative values if the rank is undefined") + axis = axis + rank + if axis == 0: + return self._dimension(0) + if axis <= self.num_row_partitions: + return self.row_partitions[axis - 1].nvals() + # If self.num_row_partitions = 1, and + # self.inner_shape=[3,5,6], and axis=2, then you want: + # 15 = 3 * 5 = math_ops.reduce_prod(self.inner_shape[:2]) + # 2 = axis - (self.num_row_partitions - 1) + # If num_row_partitions=0, and + # self.inner_shape=[3,5,6] and axis=2, then you want: + # 90 = 3 * 5 * 6 = math_ops.reduce_prod(self.inner_shape[:3]) + # 3 = axis - (self.num_row_partitions - 1) + remainder = axis - (self.num_row_partitions - 1) + return _reduce_prod_patch(self.inner_shape[:remainder]) + + def is_uniform(self, axis): + """Returns true if the indicated dimension is uniform.""" + if not isinstance(axis, int): + raise TypeError("axis must be an integer") + rank = self.rank + if axis < 0: + raise IndexError("Negative axis values are not supported") + elif rank is not None and axis >= rank: + raise IndexError("Expected axis=%s < rank=%s" % (axis, rank)) + else: + return ((axis == 0 or axis > len(self._row_partitions)) # pylint:disable=superfluous-parens + or self._row_partitions[axis - 1].is_uniform()) + + @property + def rank(self): + """The number of dimensions in this shape, or None if unknown.""" + inner_rank = self.inner_rank + if inner_rank is None: + return None + else: + return self.num_row_partitions + inner_rank + + @property + def inner_shape(self): + """The inner dimension sizes for this shape. + + Returns: + A 1-D integer `Tensor`. + """ + return self._inner_shape + + @property + def inner_rank(self): + """The rank of inner_shape.""" + return tensor_shape.dimension_value(self._static_inner_shape.rank) + + def _alt_inner_shape(self, new_inner_rank): + """Get an alternative inner shape with higher or lower rank. + + For the rank of the inner shape to be be higher, the last few ragged + dimensions must have uniform_row_length. + + Args: + new_inner_rank: the new rank of the inner_shape + + Returns: + A new inner_shape of rank new_inner_rank. + """ + if new_inner_rank == 0: + raise ValueError("new_inner_rank cannot be zero") + elif self.inner_rank == 0: + raise ValueError("old inner_rank cannot be zero") + elif new_inner_rank == self.inner_rank: + return self.inner_shape + elif new_inner_rank < self.inner_rank: + if self._static_inner_shape.is_fully_defined(): + return _alt_inner_shape_from_tensor_shape(self._static_inner_shape, + self.dtype, new_inner_rank) + first_dimension = self._num_slices_in_dimension(-new_inner_rank) + if new_inner_rank == 1: + return array_ops.expand_dims(first_dimension, 0) + remaining_dimensions = self.inner_shape[1 - new_inner_rank:] + return array_ops.concat( + [array_ops.expand_dims(first_dimension, 0), remaining_dimensions], + axis=0) + else: + assert new_inner_rank > self.inner_rank + new_dimensions = new_inner_rank - self.inner_rank + if any( + [not x.is_uniform() for x in self.row_partitions[-new_dimensions:]]): + raise ValueError("Cannot get an inner shape over a ragged dimension") + first_dimension = self._num_slices_in_dimension(-new_inner_rank) + new_dimensions = new_inner_rank - self.inner_rank + new_dims = [first_dimension] + [ + x.uniform_row_length() for x in self.row_partitions[-new_dimensions:] + ] + return array_ops.concat( + [array_ops_stack.stack(new_dims), self.inner_shape[1:]], axis=0) + + def _inner_shape_dim(self, dimension): + """Returns an int or a tensor representing _inner_shape[dimension].""" + result = tensor_shape.dimension_value(self._static_inner_shape[dimension]) + return self._inner_shape[dimension] if result is None else result + + def _with_inner_rank(self, inner_rank): + """Returns the same shape but a different inner_rank. + + All dimensions that are to be represented in the inner_shape must be dense. + See inner_rank. + + Args: + inner_rank: the new inner_rank of the shape. + + Returns: + the same shape but a different inner_rank + + Raises: + ValueError if the new dense rank is invalid, or the old rank is unknown. + """ + rank = self.rank + if rank is None: + raise ValueError("Rank must be known to adjust inner_rank") + elif rank < 2: + if inner_rank == rank: + return self + raise ValueError("Cannot change inner_rank if rank < 2") + else: + # When self.rank is not None: + # self.rank = self.inner_rank + self.num_row_partitions + new_num_row_partitions = rank - inner_rank + return self._with_num_row_partitions(new_num_row_partitions) + + def _with_num_row_partitions(self, num_row_partitions): + """Creates an identical shape with the given num_row_partitions. + + Note that the shape must be statically refactorable to this rank. + In particular: + * rank must be known. + * num_row_partitions must be a nonnegative int. + * num_row_partitions must be less than the rank of the shape + * num_row_partitions must be greater or equal to the index of any ragged + dimension. + + Note that if the num_row_partitions is the same, self is returned. + + Args: + num_row_partitions: the target num_row_partitions (must be a nonnegative + int). + + Returns: + a shape with a (possibly) different num_row_partitions. + + Raises: + ValueError: if the rank is unknown, the argument is not a nonnegative int, + or there is a dimension that is nonuniform. + """ + rank = self.rank + if rank is None: + raise ValueError("Rank must be known to adjust num_row_partitions") + if not isinstance(num_row_partitions, int): + raise ValueError("num_row_partitions must be an int") + if num_row_partitions < 0: + raise ValueError("num_row_partitions must be nonnegative") + if num_row_partitions == self.num_row_partitions: + return self + if num_row_partitions >= rank: + raise ValueError("num_row_partitions must be less than rank") + if num_row_partitions > self.num_row_partitions: + num_row_partitions_diff = num_row_partitions - self.num_row_partitions + new_inner_rank = self.rank - num_row_partitions + nvals = self._inner_shape_dim(0) + more_rp = [] + for i in range(num_row_partitions_diff): + nrows = nvals + row_length = self._inner_shape_dim(i + 1) + nvals = nrows * row_length + rp = RowPartition.from_uniform_row_length( + row_length, nrows=nrows, dtype=self.dtype) + more_rp.append(rp) + alt_inner = self._alt_inner_shape(new_inner_rank) + return DynamicRaggedShape(list(self.row_partitions) + more_rp, alt_inner) + else: + assert num_row_partitions < self.num_row_partitions + return DynamicRaggedShape( + self.row_partitions[:num_row_partitions], + self._alt_inner_shape(self.rank - num_row_partitions)) + + def _merge_dims(self, outer_axis: int, + inner_axis: int) -> "DynamicRaggedShape": + """Merges outer_axis...inner_axis into a single dimension. + + Returns a copy of this shape with the specified range of dimensions + flattened into a single dimension, with elements in row-major order. + + #### Examples: + + >>> tf.experimental.DynamicRaggedShape.from_lengths([2, (2,1), + ... (1,2,3)])._merge_dims(0, 1) + + >>> tf.experimental.DynamicRaggedShape.from_lengths([2, (2,1), + ... (1,2,3)])._merge_dims(1, 2) + + >>> tf.experimental.DynamicRaggedShape.from_lengths([2, (2,1), + ... (1,2,3)])._merge_dims(0, 2) + + + To mimic the behavior of `np.flatten` (which flattens all dimensions), use + `rt.merge_dims(0, -1). To mimic the behavior of `tf.layers.Flatten` (which + flattens all dimensions except the outermost batch dimension), use + `rt.merge_dims(1, -1)`. + + Args: + outer_axis: `int`: The first dimension in the range of dimensions to + merge. May be negative if `self.shape.rank` is statically known. + inner_axis: `int`: The last dimension in the range of dimensions to merge. + May be negative if `self.shape.rank` is statically known. + + Returns: + A copy of this shape, with the specified dimensions merged into a + single dimension. The returned shape will be + `self.shape[:outer_axis] + [N] + self.shape[inner_axis + 1:]`, where `N` + is the total number of slices in the merged dimensions. + """ + outer_axis = array_ops.get_positive_axis( + outer_axis, self.rank, axis_name="outer_axis", ndims_name="rank(self)") + inner_axis = array_ops.get_positive_axis( + inner_axis, self.rank, axis_name="inner_axis", ndims_name="rank(self)") + if not outer_axis <= inner_axis: + raise ValueError(f"Expected outer_axis ({outer_axis}) to be less than or " + f"equal to inner_axis ({inner_axis}).") + if outer_axis == inner_axis: + return self + if self.num_row_partitions == 0: + # A dense tensor. + (new_inner_shape, + new_static_inner_shape) = _merge_inner_shape(self._inner_shape, + self._static_inner_shape, + outer_axis, inner_axis) + return DynamicRaggedShape([], + new_inner_shape, + dtype=self.dtype, + static_inner_shape=new_static_inner_shape) + if inner_axis <= self.num_row_partitions: + # Here, we are merging the row_partitions, + # but the inner_shape is unchanged. + if outer_axis == 0: + # There is no need to merge axes before the first, just truncate them. + return DynamicRaggedShape( + self._row_partitions[inner_axis:], + self.inner_shape, + dtype=self.dtype, + static_inner_shape=self._static_inner_shape) + prefix_rp = self._row_partitions[:outer_axis - 1] + suffix_rp = self._row_partitions[inner_axis:] + internal_rp = self._row_partitions[outer_axis - 1:inner_axis] + new_rp = prefix_rp + (_merge_row_partitions(internal_rp),) + suffix_rp + + return DynamicRaggedShape( + new_rp, + self.inner_shape, + dtype=self.dtype, + static_inner_shape=self._static_inner_shape) + elif outer_axis > self.num_row_partitions: + # In this scenario, only the inner_shape is changed. + # Example #1: + # if [2, (1, 2), 5, 3], num_row_partitions=1, outer_axis=2, inner_axis=3. + # Result: [2, (1, 2), 15], num_row_partitions=1, outer_axis=2, + # inner_axis=3. + (new_inner_shape, new_static_inner_shape) = _merge_inner_shape( + self._inner_shape, self._static_inner_shape, + outer_axis - self.num_row_partitions, + inner_axis - self.num_row_partitions) + return DynamicRaggedShape( + self._row_partitions, + new_inner_shape, + dtype=self.dtype, + static_inner_shape=new_static_inner_shape) + else: + # Here, both inner_shape and row_partitions are changed. + rank = self.rank + if rank is None: + raise ValueError("Cannot merge_dims of the inner shape if the " + + "dimension of inner_shape is unknown") + if outer_axis == 0: + new_inner_shape = self._alt_inner_shape(rank - inner_axis) + return DynamicRaggedShape._from_inner_shape(new_inner_shape) + else: + prefix = self._row_partitions[:outer_axis - 1] + suffix = _merge_row_partitions(self._row_partitions[outer_axis - 1:]) + new_inner_shape = self._alt_inner_shape(rank - inner_axis) + num_merged_inner = inner_axis - self.num_row_partitions + prod = _reduce_prod_patch(self._inner_shape[1:num_merged_inner + 1]) + tail_suffix = RowPartition.from_row_splits(suffix.row_splits() * prod) + return DynamicRaggedShape(prefix + (tail_suffix,), new_inner_shape) + + def with_dtype(self, dtype): + """Change the dtype of the shape.""" + if dtype == self.dtype: + return self + else: + return DynamicRaggedShape( + self.row_partitions, self.inner_shape, dtype=dtype) + + def _merge_with(self, other: "DynamicRaggedShape") -> "DynamicRaggedShape": + """Merge two shapes that are equal modulo num_row_partitions. + + The resulting num_row_partitions is the maximum of the two + num_row_partitions. + + Args: + other: a DynamicRaggedShape representing the same shape with a possibly + different number of row partitions. + + Returns: + A DynamicRaggedShape with the same shape and the maximum of the + num_row_partitions of the two shapes. + """ + max_num_row_partitions = max(self.num_row_partitions, + other.num_row_partitions) + a = self._with_num_row_partitions(max_num_row_partitions) + b = other._with_num_row_partitions(max_num_row_partitions) + new_row_partitions = [ + rp_a._merge_precomputed_encodings(rp_b) + for (rp_a, rp_b) in zip(a._row_partitions, b._row_partitions) + ] + new_dtype = b.dtype if a.dtype == dtypes.int32 else dtypes.int64 + + new_static_inner_shape = a._static_inner_shape.merge_with( + b._static_inner_shape) + new_inner_shape = a._inner_shape + return DynamicRaggedShape(new_row_partitions, new_inner_shape, new_dtype, + True, new_static_inner_shape) + + def _merge_with_spec( + self, other: "DynamicRaggedShape.Spec") -> "DynamicRaggedShape": + """Merge a spec with a DynamicRaggedShape.""" + # TODO(martinz): add tests for dynamic inconsistencies. + max_num_row_partitions = max(self.num_row_partitions, + other.num_row_partitions) + a = self._with_num_row_partitions(max_num_row_partitions) + b = other._with_num_row_partitions(max_num_row_partitions) + new_row_partitions = [ + rp_a._merge_with_spec(rp_b) + for (rp_a, rp_b) in zip(a._row_partitions, b._row_partitions) + ] + new_dtype = b.dtype if a.dtype == dtypes.int32 else dtypes.int64 + + new_static_inner_shape = a._static_inner_shape.merge_with( + b._static_inner_shape) + new_inner_shape = a._inner_shape + return DynamicRaggedShape(new_row_partitions, new_inner_shape, new_dtype, + True, new_static_inner_shape) + + def _as_row_partitions(self): + """Returns row partitions representing this shape. + + In order to represent a shape as row partitions, the rank of the shape + must be known, and the shape must have rank at least one. + + Returns: + A list of RowPartition objects. + Raises: + ValueError, if the shape cannot be represented by RowPartitions. + """ + rank = self.rank + if rank is None: + raise ValueError("rank must be known for _as_row_partitions") + elif rank < 1: + raise ValueError("rank must be >= 1 for _as_row_partitions") + fully_ragged = self._with_num_row_partitions(rank - 1) + return fully_ragged.row_partitions + + def _validate_flat_values_dynamically(self, flat_values): + """Test if flat_values have the right nvals dynamically.""" + if self.row_partitions: + assert_op = check_ops.assert_equal( + self.row_partitions[-1].nvals(), + array_ops.shape(flat_values, out_type=self.dtype)[0], + message="Last row partition does not match flat_values.") + return control_flow_ops.with_dependencies([assert_op], flat_values) + return flat_values + + def _validate_flat_values(self, flat_values): + """Test if flat_values have the right nvals.""" + if not isinstance(flat_values, tensor_lib.Tensor): + return flat_values + if self.row_partitions: + last_row_partition = self.row_partitions[-1] + flat_values_shape = flat_values.shape + if flat_values_shape is None: + return self._validate_flat_values_dynamically(flat_values) + first_dim_flat_values = flat_values_shape[0] + if isinstance(first_dim_flat_values, tensor_shape.Dimension): + first_dim_flat_values = first_dim_flat_values.value + if first_dim_flat_values is None: + return self._validate_flat_values_dynamically(flat_values) + static_nvals = last_row_partition.static_nvals + if static_nvals is None: + return self._validate_flat_values_dynamically(flat_values) + if first_dim_flat_values != static_nvals: + raise ValueError("Last row partition does not match flat_values.") + return flat_values + + def _add_row_partitions(self, flat_values, validate=False): + """Add row partitions to flat_values, if necessary. + + If the shape is truly ragged, then this adds the row_partitions. + + The shape is dense, then this just returns flat_values. + + Args: + flat_values: the flat_values of a ragged tensor with this shape, or a + dense tensor with this shape. + validate: validate the flat_values have the right first dimension. + + Returns: + flat_values reshaped to have row_partitions. + """ + if self.row_partitions: + if validate: + flat_values = self._validate_flat_values(flat_values) + return ragged_tensor.RaggedTensor._from_nested_row_partitions( + flat_values, self.row_partitions, validate=False) + else: + return flat_values + + class Spec: + """A Spec for DynamicRaggedShape: similar to a static shape.""" + + def __init__(self, row_partitions: Tuple[RowPartitionSpec, ...], + static_inner_shape: tensor_shape.TensorShape, + dtype: dtypes.DType): + """Create a Spec given row partitions, a static inner shape, and a dtype. + + Args: + row_partitions: A sequence of `RowPartitionSpec`s describing how the + ragged shape is partitioned. + static_inner_shape: The static shape of the flat_values. + dtype: The DType used to encode the shape (tf.int64 or tf.int32). + """ + # Independent validation and coercion of each argument. + if not isinstance(row_partitions, Iterable): + raise TypeError("row_partitions should be an Iterable") + + row_partitions = tuple(row_partitions) + + static_inner_shape = tensor_shape.as_shape(static_inner_shape) + + dtype = dtypes.as_dtype(dtype) + + if not all(isinstance(rp, RowPartitionSpec) for rp in row_partitions): + raise TypeError( + "row_partitions should be an Iterable of RowPartitionSpecs") + + if dtype != dtypes.int32 and dtype != dtypes.int64: + raise ValueError("dtype must be tf.int32 or tf.int64") + + # All fields are now typechecked and internally consistent. + for spec in row_partitions: + if spec.dtype != dtype: + raise ValueError( + f"dtype of {spec!r} is {spec.dtype!r}: expected {dtype!r}") + + row_partitions = tuple(row_partitions) + + inner_rank = static_inner_shape.rank + + if inner_rank == 0: + if row_partitions: + raise ValueError( + "If row_partitions are provided, must have inner_rank > 0") + else: + num_slices_in_dimension = [] # type: Sequence[tensor_shape.Dimension] + + # We first attempt to calculate num_slices_in_dimension through a + # forward pass, using nrows[k] = nrows[k-1] * uniform_row_length + # and other tricks. + for i in range(len(row_partitions)): + rp = row_partitions[i] + result = tensor_shape.Dimension(rp.nrows) + if i > 0: + previous_rp = row_partitions[i - 1] + result = result.merge_with(previous_rp.nvals) + result = result.merge_with(num_slices_in_dimension[-1] * + previous_rp.uniform_row_length) + num_slices_in_dimension.append(result) + # In the last step of the forward pass, + # we combine nvals and the first dimension in static_inner_shape. + if row_partitions: + last_rp = row_partitions[-1] + result = (num_slices_in_dimension[-1] * + last_rp.uniform_row_length).merge_with(last_rp.nvals) + if inner_rank is not None: + result = result.merge_with( + tensor_shape.dimension_at_index(static_inner_shape, 0)) + static_inner_shape = result + static_inner_shape[1:] + num_slices_in_dimension.append(result) + + # Now, we start a backward pass. + for i in range(len(num_slices_in_dimension) - 1, 0, -1): + num_slices_in_dimension[i - 1] = num_slices_in_dimension[ + i - 1].merge_with( + _safe_floor_div(num_slices_in_dimension[i], + row_partitions[i - 1].uniform_row_length)) + + # Finally, we construct the partitions. + row_partitions = [ + RowPartitionSpec( # pylint: disable=g-complex-comprehension + nrows=num_slices_in_dimension[i].value, + uniform_row_length=rp.uniform_row_length, + nvals=num_slices_in_dimension[i + 1].value, + dtype=rp.dtype) for i, rp in enumerate(row_partitions) + ] + + self._static_inner_shape = static_inner_shape + self._inner_shape = tensor_lib.TensorSpec([inner_rank], dtype=dtype) + self._row_partitions = row_partitions + + def __repr__(self): + return ( + f"DynamicRaggedShape.Spec(row_partitions={self._row_partitions!r}, " + + f"static_inner_shape={self._static_inner_shape!r}, " + + f"dtype={self.dtype!r})") + + @classmethod + def from_value(cls, value: Any) -> "DynamicRaggedShape.Spec": + """Create a Spec from a DynamicRaggedShape.""" + # super().from_value(...) creates an object, but there is no validation. + # No methods can be trusted on the object, just the properties. + initial = super(DynamicRaggedShape.Spec, cls).from_value(value) + + # However, since value is a DynamicRaggedShape, we + # can guarantee that initial._inner_shape.shape.rank == 1 + + # Moreover, if inner_shape.shape[0] is not None, then + # static_inner_shape.rank is not None. + + return DynamicRaggedShape.Spec( + row_partitions=initial._row_partitions, + static_inner_shape=initial._static_inner_shape, + dtype=initial._inner_shape.dtype) + + # TODO(martinz): it is unclear what the default uniformity of RowPartitions + # should be, so I am moving this to experimental until we figure it out. + # Also, while I have specified this is meant to represent a shape of a + # proper Tensor instead of a RaggedTensor, this is also subject to + # interpretation. + @classmethod + def _from_tensor_shape(cls, shape: Any, num_row_partitions: int, + dtype: dtypes.DType) -> "DynamicRaggedShape.Spec": + """Creates a `DynamicRaggedShape.Spec` corresponding to a `tf.TensorShape`. + + It is assumed that this is a `tf.TensorShape` coming from a + `tf.TensorSpec`, not from `RaggedTensor.shape`. + + In addition to the shape, we need to know the number of row partitions, + and the dtype used in the shape (tf.int32 or tf.int64). + + Within the dimensions that are partitioned, all dimensions are assumed + to be uniform. + + Args: + shape: a TensorShape. + num_row_partitions: the ragged rank of the RaggedShape. + dtype: the dtype of the shape (not the tensor); tf.int64 or tf.int32. + + Returns: + a DynamicRaggedShape.Spec representing a TensorShape. + """ + if dtype != dtypes.int32 and dtype != dtypes.int64: + raise ValueError("dtype must be tf.int32 or tf.int64") + + shape = tensor_shape.as_shape(shape) + if shape.rank is None: + row_partitions = [ + RowPartitionSpec(dtype=dtype) for _ in range(num_row_partitions) + ] + return DynamicRaggedShape.Spec( + row_partitions=row_partitions, + static_inner_shape=tensor_shape.TensorShape(None), + dtype=dtype) + + if shape.rank <= 1: + # Create a scalar or vector shape. + if num_row_partitions: + raise ValueError("num_row_partitions should be zero " + + "if shape is a scalar or vector.") + return DynamicRaggedShape.Spec( + row_partitions=[], static_inner_shape=shape, dtype=dtype) + + if shape.rank <= num_row_partitions: + raise ValueError("num_row_partitions must be less than rank") + + num_elements_so_far = tensor_shape.dimension_value(shape[0]) + rp_specs = [] + for i in range(num_row_partitions): + current_dim = tensor_shape.dimension_value(shape[i + 1]) + if current_dim is None or num_elements_so_far is None: + nvals = None + else: + nvals = num_elements_so_far * current_dim + rp_specs.append( + RowPartitionSpec( + nrows=num_elements_so_far, + nvals=nvals, + uniform_row_length=current_dim, + dtype=dtype)) + num_elements_so_far = nvals + + static_inner_shape = tensor_shape.TensorShape( + [num_elements_so_far]) + shape[num_row_partitions + 1:] + return DynamicRaggedShape.Spec( + row_partitions=rp_specs, + static_inner_shape=static_inner_shape, + dtype=dtype) + + @classmethod + def _from_spec( + cls, + spec: Union["DynamicRaggedShape.Spec", ragged_tensor.RaggedTensorSpec, + tensor_lib.TensorSpec], + dtype: dtypes.DType = dtypes.int64) -> "DynamicRaggedShape.Spec": + """Create a TypeSpec for the shape of an object with a given TypeSpec. + + I.e., if `x_spec = tf.type_spec_from_value(x)`, then + `DynamicRaggedShape.from_spec(x_spec)` returns a TypeSpec compatible with + `tf.type_spec_from_value(tf.shape(x))`. + + >>> rt = tf.ragged.constant([[1, 2], [3], [4, 5, 6]]) + >>> rt_spec = tf.type_spec_from_value(rt) + >>> rt_shape = DynamicRaggedShape.from_tensor(rt) + + >>> shape_spec_1 = tf.type_spec_from_value(rt_shape) + >>> shape_spec_2 = DynamicRaggedShape.Spec._from_spec(rt_spec) + >>> assert shape_spec_1.is_compatible_with(shape_spec_2) + + Args: + spec: a Spec of a Tensor or RaggedTensor. + dtype: the default dtype (if necessary). + + Returns: + A Spec of the shape of a Tensor or RaggedTensor. + + """ + # TODO(martinz): Add StructuredTensor.Spec when its easy. + if isinstance(spec, DynamicRaggedShape.Spec): + return spec + elif isinstance(spec, ragged_tensor.RaggedTensorSpec): + return cls._from_tensor_shape(spec.shape, spec.ragged_rank, + spec.row_splits_dtype) + elif isinstance(spec, tensor_lib.TensorSpec): + return cls._from_tensor_shape( + shape=spec.shape, num_row_partitions=0, dtype=dtype) + + @property + def dtype(self) -> dtypes.DType: + return self._inner_shape.dtype + + @property + def inner_rank(self) -> Optional[int]: + if self._static_inner_shape.rank is not None: + return self._static_inner_shape.rank + if self._inner_shape.shape.rank is None: + return None + return tensor_shape.dimension_value(self._inner_shape.shape[0]) + + @property + def num_row_partitions(self) -> int: + return len(self._row_partitions) + + @property + def rank(self) -> Optional[int]: + inner_rank = self.inner_rank + return None if inner_rank is None else inner_rank + self.num_row_partitions + + def _dimension(self, index: int) -> Optional[int]: + """Get the size of dimension index, if known statically.""" + if index == 0: + if self._row_partitions: + return self._row_partitions[0].nrows + elif self.inner_rank is None: + return None + elif self.inner_rank == 0: + raise ValueError("Index out of range: 0.") + else: + return tensor_shape.dimension_value(self._static_inner_shape[0]) + if index <= len(self._row_partitions): + return self._row_partitions[index - 1].uniform_row_length + + relative_index = index - self.num_row_partitions + + if self.inner_rank is None: + return None + elif self.inner_rank <= relative_index: + raise ValueError(f"Index out of range: {index}.") + else: + return tensor_shape.dimension_value( + self._static_inner_shape[relative_index]) + + def _num_slices_in_dimension(self, axis: int) -> Optional[int]: + """The total size of a dimension (like nvals). + + This is a static version of DynamicRaggedShape._num_slices_in_dimension() + + Example: + + ``` + shape = DynamicRaggedShape.Spec( + _row_partitions=[ + RowPartitionSpec(nrows=3, nvals=14, dtype=tf.int32) + RowPartitionSpec(nrows=14, nvals=25, dtype=tf.int32) + + ], + _static_inner_shape=tf.TensorShape([25, 3, 4]), + _inner_shape=tf.TensorSpec(tf.TensorShape([3]), dtype=tf.int32)) + shape._num_slices_in_dimension(0) = 3 + shape._num_slices_in_dimension(1) = 14 + shape._num_slices_in_dimension(2) = 25 + shape._num_slices_in_dimension(3) = 3 + shape._num_slices_in_dimension(4) = 4 + shape._num_slices_in_dimension(-2) = 3 + ``` + + Args: + axis: the last dimension to include. + + Returns: + the number of values in a dimension. + """ + if not isinstance(axis, int): + raise TypeError("axis must be an integer") + axis = array_ops.get_positive_axis(axis, self.rank, ndims_name="rank") + + if axis == 0: + return self._dimension(0) + if axis <= self.num_row_partitions: + # TODO(martinz): use nvals OR nrows, whichever is defined. + return self._row_partitions[axis - 1].nvals + remainder = axis - (self.num_row_partitions - 1) + head_inner_shape = self._static_inner_shape[:remainder] + return head_inner_shape.num_elements() + + def with_dtype(self, dtype: dtypes.DType) -> "DynamicRaggedShape.Spec": + """Return the same spec, but with a different DType.""" + new_rp_specs = [rp.with_dtype(dtype) for rp in self._row_partitions] + return DynamicRaggedShape.Spec( + row_partitions=new_rp_specs, + static_inner_shape=self._static_inner_shape, + dtype=dtype) + + def _merge_with( + self, other: "DynamicRaggedShape.Spec") -> "DynamicRaggedShape.Spec": + """Merges all information between two specs. + + Specs are expected to represent the same information modulo + num_row_partitons. + + If the specs are of different ranks, then fail. + + Args: + other: another Spec of the same rank. + + Returns: + a Spec with the union of information. + """ + max_num_row_partitions = max(self.num_row_partitions, + other.num_row_partitions) + a = self._with_num_row_partitions(max_num_row_partitions) + b = other._with_num_row_partitions(max_num_row_partitions) + + new_rp = [ + a._merge_with(b) + for (a, b) in zip(a._row_partitions, b._row_partitions) + ] + + new_static_inner_shape = a._static_inner_shape.merge_with( + b._static_inner_shape) + + dtype = b.dtype if (a.dtype == dtypes.int32) else dtypes.int64 + + return DynamicRaggedShape.Spec( + new_rp, new_static_inner_shape, dtype=dtype) + + def _with_num_row_partitions( + self, new_num_row_partitions: int) -> "DynamicRaggedShape.Spec": + """Change the number of row partitions in the spec.""" + rank = self.rank + if rank is None: + raise ValueError( + "Changing num_row_partitions with unknown rank unsupported") + if new_num_row_partitions > max(rank - 1, 0): + raise ValueError("Number of row partitions too large") + if new_num_row_partitions < 0: + raise ValueError("Number of row partitions negative") + if self.num_row_partitions == new_num_row_partitions: + return self + elif self.num_row_partitions < new_num_row_partitions: + # TODO(martinz): Consider swapping. + rp_delta = new_num_row_partitions - self.num_row_partitions + tail_shape = DynamicRaggedShape.Spec._from_tensor_shape( + self._static_inner_shape, rp_delta, self.dtype) + return DynamicRaggedShape.Spec( + row_partitions=self._row_partitions + tail_shape._row_partitions, + static_inner_shape=tail_shape._static_inner_shape, + dtype=self.dtype) + else: + assert self.num_row_partitions > new_num_row_partitions + new_row_partitions = self._row_partitions[:new_num_row_partitions] + last_row_partition = new_row_partitions[-1] + old_row_partitions = self._row_partitions[new_num_row_partitions:] + new_static_inner_shape = ( + tensor_shape.TensorShape( + [last_row_partition.nvals] + + [x.uniform_row_length for x in old_row_partitions]) + + self._static_inner_shape[1:]) + return DynamicRaggedShape.Spec(new_row_partitions, + new_static_inner_shape, self.dtype) + + def _set_rank_if_unknown(self, new_rank: int) -> "DynamicRaggedShape.Spec": + """Ensures this has a known rank at least new_rank.""" + if new_rank is None: + raise TypeError("new_rank is None, but expected int") + if new_rank < 0: + raise ValueError("Rank must be non-negative") + current_rank = self.rank + if current_rank is not None and current_rank < new_rank: + raise ValueError( + "Rank is {current_rank}, expected at least {new_rank}.".format( + current_rank=current_rank, new_rank=new_rank)) + + if current_rank is not None: + return self + + if self._row_partitions: + new_inner_rank = max(new_rank - self.num_row_partitions, 1) + first_dim = self._row_partitions[-1].nvals + static_inner_shape = tensor_shape.TensorShape([first_dim] + [None] * + (new_inner_rank - 1)) + else: + static_inner_shape = tensor_shape.TensorShape([None] * new_rank) + + return DynamicRaggedShape.Spec( + row_partitions=self._row_partitions, + static_inner_shape=static_inner_shape, + dtype=self.dtype) + + def _truncate(self, new_rank: int) -> "DynamicRaggedShape.Spec": + """Truncate a ragged shape spec. + + For example, if the original spec s was for a shape: + [3, [4, 1], 2, 7] + + Then truncate_dynamic_ragged_shape_spec(s, 3) is a spec for: + [3, [4, 1], 2] + + Args: + new_rank: the new rank + + Returns: + A truncated DynamicRaggedShape.Spec. + """ + if self.rank is None: + return self._set_rank_if_unknown(new_rank)._truncate(new_rank) + + if new_rank == 0: + return DynamicRaggedShape.Spec._from_tensor_shape([], 0, self.dtype) + + if new_rank == 1: + vector_size = self._dimension(0) + return DynamicRaggedShape.Spec._from_tensor_shape([vector_size], 0, + self.dtype) + + if new_rank < self.num_row_partitions + 1: + new_row_partitions = self._row_partitions[:new_rank - 1] + new_static_inner_shape = tensor_shape.TensorShape( + [new_row_partitions[-1].nvals]) + return DynamicRaggedShape.Spec( + row_partitions=new_row_partitions, + static_inner_shape=new_static_inner_shape, + dtype=self.dtype) + else: + remainder = new_rank - self.num_row_partitions + new_static_inner_shape = self._static_inner_shape[:remainder] + return DynamicRaggedShape.Spec( + row_partitions=self._row_partitions, + static_inner_shape=new_static_inner_shape, + dtype=self.dtype) + + def _to_tensor_shape(self): + """Get a tensor shape corresponding to this type.""" + alt = self + if alt._static_inner_shape.rank is None: + return tensor_shape.TensorShape(None) + if alt._static_inner_shape.rank == 0: + assert not alt._row_partitions + return alt._static_inner_shape + prefix = [alt._dimension(0)] + prefix.extend([rp.uniform_row_length for rp in alt._row_partitions]) + suffix = alt._static_inner_shape[1:] + return tensor_shape.TensorShape(prefix) + suffix + + +def broadcast_dynamic_shape(shape_x: DynamicRaggedShape, + shape_y: DynamicRaggedShape) -> DynamicRaggedShape: + """Returns the shape formed by broadcasting two shapes to be compatible. + + 1. If shape_x and shape_y both have row_partitions, then fail if their dtypes + don't match. + 2. If neither has row_partitions and they have different dtypes, + go with int64. + 3. If one has row_partitions, go with that dtype. + + Args: + shape_x: A `DynamicRaggedShape` + shape_y: A `DynamicRaggedShape` + + Returns: + A `DynamicRaggedShape`. + Raises: + ValueError: If `shape_x` and `shape_y` are not broadcast-compatible. + """ + if not isinstance(shape_x, DynamicRaggedShape): + raise TypeError("shape_x must be a DynamicRaggedShape") + if not isinstance(shape_y, DynamicRaggedShape): + raise TypeError("shape_y must be a DynamicRaggedShape") + + return broadcast_dynamic_shape_extended(shape_x, shape_y)[0] + + +def broadcast_to(rt_input, shape: DynamicRaggedShape): + """Broadcasts a potentially ragged tensor to a ragged shape. + + Tiles `rt_input` as necessary to match the given shape. + + Behavior is undefined if `rt_input` is not broadcast-compatible with `shape`. + + Args: + rt_input: The potentially ragged tensor to broadcast. + shape: A `DynamicRaggedShape` + + Returns: + A potentially ragged tensor whose values are taken from + `rt_input`, and whose shape matches `shape`. + """ + if not isinstance(shape, DynamicRaggedShape): + raise TypeError("shape must be a DynamicRaggedShape") + rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor(rt_input) + origin_shape = None + if ragged_tensor.is_ragged(rt_input): + if shape.num_row_partitions != 0: + if rt_input.row_splits.dtype != shape.dtype: + raise ValueError("Cannot coerce row_splits.dtype") + else: + shape = shape.with_dtype(rt_input.row_splits.dtype) + origin_shape = DynamicRaggedShape.from_tensor(rt_input) + else: + if shape.num_row_partitions != 0: + origin_shape = DynamicRaggedShape.from_tensor(rt_input, dtype=shape.dtype) + else: + origin_shape = DynamicRaggedShape.from_tensor( + rt_input, dtype=dtypes.int64) + shape = shape.with_dtype(dtype=dtypes.int64) + + broadcaster = _get_broadcaster(origin_shape, shape) + return broadcaster.broadcast(rt_input) + + +def broadcast_dynamic_shape_extended( + a: DynamicRaggedShape, b: DynamicRaggedShape +): # -> Tuple[DynamicRaggedShape, _Broadcaster, _Broadcaster] + """Gets the smallest shape to which a and b can broadcast. + + In order to create the smallest shape, one must also do most of the + work to figure out how to transform from the shapes given. Thus, in addition + to returning the shape, it also creates transformations from the + original shapes to the result. + + This is the equivalent of: + + c = broadcast_dynamic_shape(a, b) + ac = get_broadcaster(a, c) + bc = get_broadcaster(b, c) + return (c, ac, bc) + + Args: + a: a DynamicRaggedShape + b: a DynamicRaggedShape + + Returns: + A triple of a shape and two broadcasters. + """ + if a.row_partitions and b.row_partitions: + if a.dtype != b.dtype: + raise ValueError("Dtypes don't match") + elif a.dtype != b.dtype: + if a.row_partitions: + b = b.with_dtype(a.dtype) + elif b.row_partitions: + a = a.with_dtype(b.dtype) + else: + a = a.with_dtype(dtypes.int64) + b = b.with_dtype(dtypes.int64) + + if (a.rank is None or b.rank is None): + raise ValueError("Unable to broadcast: unknown rank") + elif a.rank == 0: + return (b, _Broadcaster(a, b, []), _get_identity_broadcaster(b)) + elif b.rank == 0: + return (a, _get_identity_broadcaster(a), _Broadcaster(b, a, [])) + elif a.rank == 1 and b.rank == 1: + [a_layer, b_layer, + target] = _broadcast_dynamic_shape_one_layer(a.inner_shape, b.inner_shape) + target_shape = DynamicRaggedShape._from_inner_shape(target) # pylint: disable=protected-access + return (target_shape, _Broadcaster(a, target_shape, [a_layer]), + _Broadcaster(b, target_shape, [b_layer])) + + if a.rank > b.rank: + (c, bc, ac) = _broadcast_dynamic_shape_extended_helper(b, a) # pylint: disable=arguments-out-of-order + + return (c, ac, bc) + + return _broadcast_dynamic_shape_extended_helper(a, b) + + +def _row_partitions_identical(shape_a, shape_b): + """Returns True iff all row_partitions in shapes are identical.""" + return ((shape_a.num_row_partitions == shape_b.num_row_partitions) and all( + a is b for a, b in zip(shape_a.row_partitions, shape_b.row_partitions))) + + +# TODO(martinz): Preserve shapes better (see CL/414806185) +@dispatch.dispatch_for_binary_elementwise_apis(ragged_tensor.RaggedOrDense, + ragged_tensor.RaggedOrDense) +def ragged_binary_elementwise_op_impl(op, x, y): + """Binary elementwise api handler for RaggedTensors.""" + x_is_ragged = ragged_tensor.is_ragged(x) + y_is_ragged = ragged_tensor.is_ragged(y) + + # Convert args to tensors. + x = ragged_tensor.convert_to_tensor_or_ragged_tensor( + x, preferred_dtype=(y.dtype if y_is_ragged else None)) + y = ragged_tensor.convert_to_tensor_or_ragged_tensor( + y, preferred_dtype=x.dtype) + + if x_is_ragged and y_is_ragged: + x, y = ragged_tensor.match_row_splits_dtypes(x, y) + + if ((x_is_ragged and y_is_ragged) or + (x_is_ragged and x.flat_values.shape.ndims <= y.shape.ndims) or + (y_is_ragged and y.flat_values.shape.ndims <= x.shape.ndims)): + shape_x = DynamicRaggedShape.from_tensor(x) + shape_y = DynamicRaggedShape.from_tensor(y) + if shape_x.dtype != shape_y.dtype: + if not x_is_ragged: + shape_x = shape_x.with_dtype(shape_y.dtype) + elif not y_is_ragged: + shape_y = shape_y.with_dtype(shape_x.dtype) + + if _row_partitions_identical(shape_x, shape_y): + # At this point, both x and y must be ragged. + return shape_x._add_row_partitions( # pylint: disable=protected-access + op(x.flat_values, y.flat_values), + validate=False) + + (shape_z, bcast_xz, + bcast_yz) = broadcast_dynamic_shape_extended(shape_x, shape_y) + x_new_flat = bcast_xz.broadcast_flat_values(x, inner_dimensions=False) + y_new_flat = bcast_yz.broadcast_flat_values(y, inner_dimensions=False) + z_flat = op(x_new_flat, y_new_flat) + return shape_z._add_row_partitions(z_flat, validate=True) # pylint: disable=protected-access + + x_values = x.flat_values if ragged_tensor.is_ragged(x) else x + y_values = y.flat_values if ragged_tensor.is_ragged(y) else y + mapped_values = op(x_values, y_values) + if isinstance(mapped_values, bool): + return mapped_values # Special case for tensor_equals. + if ragged_tensor.is_ragged(x): + return x.with_flat_values(mapped_values) + else: + return y.with_flat_values(mapped_values) + + +@dispatch.dispatch_for_binary_elementwise_assert_apis( + ragged_tensor.RaggedOrDense, ragged_tensor.RaggedOrDense) +def ragged_binary_elementwise_assert_op_impl(op, x, y): + """Binary elementwise assert api handler for RaggedTensors. + + This handles binary assert operations for ragged tensors. Compared with + `ragged_binary_elementwise_op_impl`, this handler does not compute a ragged + tensor as output. Instead, it applies the assert operation `op` to input + tensors based on their ragged shapes and flat_values, and returns the result + of the assertion operation. + + Args: + op: a binary assert operation on Tensors. + x: something that can be coerced to a Tensor or RaggedTensor. + y: something that can be coerced to a Tensor or RaggedTensor. + + Returns: + the result of the assertion operation. + + """ + x_is_ragged = ragged_tensor.is_ragged(x) + y_is_ragged = ragged_tensor.is_ragged(y) + + # Convert args to tensors. + x = ragged_tensor.convert_to_tensor_or_ragged_tensor( + x, preferred_dtype=(y.dtype if y_is_ragged else None)) + y = ragged_tensor.convert_to_tensor_or_ragged_tensor( + y, preferred_dtype=x.dtype) + + if x_is_ragged and y_is_ragged: + x, y = ragged_tensor.match_row_splits_dtypes(x, y) + + if ((x_is_ragged and y_is_ragged) or + (x_is_ragged and x.flat_values.shape.ndims <= y.shape.ndims) or + (y_is_ragged and y.flat_values.shape.ndims <= x.shape.ndims)): + shape_x = DynamicRaggedShape.from_tensor(x) + shape_y = DynamicRaggedShape.from_tensor(y) + if shape_x.dtype != shape_y.dtype: + if not x_is_ragged: + shape_x = shape_x.with_dtype(shape_y.dtype) + elif not y_is_ragged: + shape_y = shape_y.with_dtype(shape_x.dtype) + + if _row_partitions_identical(shape_x, shape_y): + # At this point, both x and y must be ragged. + return op(x.flat_values, y.flat_values) + + (_, bcast_xz, bcast_yz) = broadcast_dynamic_shape_extended(shape_x, shape_y) + x_new_flat = bcast_xz.broadcast_flat_values(x, inner_dimensions=False) + y_new_flat = bcast_yz.broadcast_flat_values(y, inner_dimensions=False) + return op(x_new_flat, y_new_flat) + + x_values = x.flat_values if ragged_tensor.is_ragged(x) else x + y_values = y.flat_values if ragged_tensor.is_ragged(y) else y + return op(x_values, y_values) + + +def _find_dtype_helper(value, preferred): + """Helper for _find_dtype.""" + if preferred is not None: + return preferred + elif isinstance(value, RowPartition): + return value.dtype + elif isinstance(value, dtypes.DType): + return value + elif isinstance(value, int): + return None + elif isinstance(value, list): + return None + elif isinstance(value, tuple): + return None + elif isinstance(value, core.Tensor): + return value.dtype + return value.dtype + + +def _find_dtype(value, preferred): + """Returns the preferred dtype of value or preferred if preferred != None. + + This is used as an operator to pass over multiple objects in decreasing order + of priority until there is a preferred dtype for one. For example, if you were + adding three tensor-ish things (some tensors, some lists), and needed a + preferred dtype, you could use this as: + + def adding(a, b, c, dtype = None): + dtype = _find_dtype(a, dtype) + dtype = _find_dtype(b, dtype) + dtype = _find_dtype(c, dtype) + if dtype is None: + dtype = tf.float32 + ...Code continues here... + + Args: + value: a list, value, RowPartition, or tensor. + preferred: a given dtype. If not None, this will be returned. + + Returns: + an optional dtype. + """ + result = _find_dtype_helper(value, preferred) + if (result == dtypes.int64 or result == dtypes.int32 or result is None): + return result + raise ValueError("Illegal dtype: " + str(result)) + + +def _find_dtype_iterable( + iterable: Iterable[Any], + dtype: Optional[dtypes.DType]) -> Optional[dtypes.DType]: + """Find the preferred dtype of a list of objects. + + This will go over the iterable, and use the first object with a preferred + dtype. The dtype passed has highest priority if it is not None. + + Args: + iterable: an iterable with things that might have a dtype. + dtype: an overriding dtype, or None. + + Returns: + an optional dtype. + """ + if dtype is not None: + return dtype + for x in iterable: + dtype = _find_dtype(x, dtype) + return dtype + + +class _LayerBroadcaster(abc.ABC): + """A broadcaster of a single layer. + + Although this class does not literally contain a gather_index, the reference + implementation is defined through a gather_index. Thus, any subclasses should + first define the gather_index property. Other functions can be overridden + for optimization, but it should not change the behavior. + """ + + @property + @abc.abstractmethod + def gather_index(self): + """Returns a 1D tensor. + + The size of the 1D tensor is equal to the destination size. + + The ith element of the result is the index of the source of the ith element. + """ + pass + + @property + def dtype(self): + """Returns the dtype of the broadcast.""" + return self.gather_index.dtype + + @abc.abstractmethod + def with_dtype(self, dtype): + """Returns an identical _LayerBroadcaster with a different dtype.""" + pass + + def __repr__(self): + return str(self.gather_index) + + @classmethod + def from_gather_index(cls, gather_index): + """Create a broadcaster from a gather_index.""" + return _GatherLayerBroadcaster(gather_index) + + @classmethod + def first_layer(cls, nrows_source, nrows_target): + """Create a broadcaster from a gather_index.""" + gather_index = _first_layer_gather_index(nrows_source, nrows_target) + return _LayerBroadcaster.from_gather_index(gather_index) + + @classmethod + def get_singleton_broadcaster(cls, target_size): + """Broadcast from 1 element to target_size elements.""" + return _LayerBroadcaster.from_gather_index( + array_ops.zeros(target_size, dtype=target_size.dtype)) + + @abc.abstractmethod + def with_dependencies(self, checks): + """Add dependencies to a _LayerBroadcaster. + + Args: + checks: a list of ops that need to be run before any tensors from the + Broadcaster are used. + + Returns: + a copy of this _LayerBroadcaster with dependencies added. + """ + pass + + @classmethod + def get_identity_broadcaster(cls, nvals, dtype=None): + """Create an identity broadcaster. + + TODO(martinz): an identity broadcaster can be far more efficient than a + generic broadcaster. Add an optimized implementation. + Args: + nvals: the number of values for the broadcaster. + dtype: the dtype of the broadcaster, or None to use the dtype of nvals. + + Returns: + an identity broadcaster from [0....nvals-1] to [0...nvals-1] + """ + return _GatherLayerBroadcaster(math_ops.range(nvals, dtype=dtype)) + + def broadcast_tensor(self, tensor): + """Broadcast from a dense tensor. + + It is assumed that the first axis of the dense tensor is indexed by the + source shape, and at the end, the first axis of the dense tensor is + indexed by the destination shape. + + Args: + tensor: a dense tensor. + + Returns: + A dense tensor. + """ + return array_ops.gather(tensor, self.gather_index) + + def dest_nrows(self): + """Return the number of rows in the resulting gather, or None if tiling.""" + return math_ops.cast( + array_ops.shape(self.gather_index)[0], dtype=self.dtype) + + def broadcast_row_partition(self, rp): + """Return a new shape where the rows are broadcasted. + + *--self--->* + | | + rp result + | | + V V + *--------->* + + This is equivalent to: + return RowPartition.from_row_lengths(self.broadcast(rp.row_lengths())) + + However, if the shape has uniform row length, then that property is + maintained. + + Args: + rp: a row partition. + + Returns: + a RowPartition representing a broadcast version of this row partition. + """ + if not rp.is_uniform(): + return RowPartition.from_row_lengths( + self.broadcast_tensor(rp.row_lengths())) + else: + return RowPartition.from_uniform_row_length( + rp.uniform_row_length(), + nvals=rp.uniform_row_length() * self.dest_nrows(), + nrows=self.dest_nrows()) + + def next_layer(self, original_rp, broadcast_rp): + r"""Create the next layer gather_index whether or not a broadcast happens. + + *---------self------->* + | | + original_rp broadcast_rp + | | + \|/ \|/ + *--next_broadcaster-->* + Args: + original_rp: the original row partition. + broadcast_rp: the target row partition. + + Returns: + the gather_index for next_broadcaster. + + """ + gather_index = _next_layer_gather_index(self, original_rp, broadcast_rp) + return _LayerBroadcaster.from_gather_index(gather_index) + + +class _GatherLayerBroadcaster(_LayerBroadcaster): + """Implements _LayerBroadcaster with an explicit gather_index. + + For example, suppose that the source shape is: + [*],[*,*] + And the target shape is: + [*],[*,*],[*],[*,*] + Then, this can be represented with a map: + [0,1,2,0,1,2] + + """ + + def __init__(self, gather_index): + gather_index = ops.convert_to_tensor(gather_index) + if (gather_index.dtype != dtypes.int64 and + gather_index.dtype != dtypes.int32): + raise ValueError("gather_index must be int64 or int32") + self._gather_index = gather_index + + @property + def gather_index(self): + return self._gather_index + + def with_dtype(self, dtype): + return _GatherLayerBroadcaster(math_ops.cast(self._gather_index, dtype)) + + def with_dependencies(self, checks): + new_gather_index = control_flow_ops.with_dependencies( + checks, self._gather_index) + return _GatherLayerBroadcaster(new_gather_index) + + +class _Broadcaster: + """A _Broadcaster represents a transformation from one shape to another. + + It provides a transform for each axis of the source shape to the + corresponding axis of the destination shape. + + """ + + def __init__(self, + source_shape, + target_shape, + layer_broadcasters, + dtype=None): + """Create a broadcaster. + + Do not call directly. + The source_shape, target_shape, and layer_broadcasters are converted + to have the same dtype. + + Note: source_shape.rank and target_shape.rank must be known. + Args: + source_shape: the source DynamicRaggedShape + target_shape: the target DynamicRaggedShape + layer_broadcasters: List[_LayerBroadcaster] of length source_shape.rank. + dtype: the preferred dtype of the broadcaster. + + Raises: + TypeError: if the input types don't match. + """ + if not isinstance(source_shape, DynamicRaggedShape): + raise TypeError("source_shape is not a DynamicRaggedShape") + if not isinstance(target_shape, DynamicRaggedShape): + raise TypeError("target_shape is not a DynamicRaggedShape") + if not isinstance(layer_broadcasters, list): + raise TypeError("layer_broadcasters not a list: " + + str(layer_broadcasters)) + for bc in layer_broadcasters: + if not isinstance(bc, _LayerBroadcaster): + raise TypeError("Not a LayerBroadcaster: " + str(bc)) + + dtype = _find_dtype(source_shape, dtype) + dtype = _find_dtype(target_shape, dtype) + dtype = _find_dtype_iterable(layer_broadcasters, dtype) + dtype = _find_dtype(dtypes.int64, dtype) + self._source_shape = source_shape.with_dtype(dtype) + self._target_shape = target_shape.with_dtype(dtype) + self._layer_broadcasters = [x.with_dtype(dtype) for x in layer_broadcasters] + + def __repr__(self): + return ("{src_shape:" + str(self._source_shape) + ", target_shape:" + + str(self._target_shape) + " layer_broadcasters: " + + str(self._layer_broadcasters) + "}") + + def with_dtype(self, dtype): + """Return a copy of this Broadcaster with a different dtype.""" + return _Broadcaster(self._source_shape, self._target_shape, + self._layer_broadcasters, dtype) + + @property + def source_shape(self): + return self._source_shape + + @property + def target_shape(self): + return self._target_shape + + @property + def dtype(self): + return self._source_shape.dtype + + def _target_inner_shape_int32(self): + new_inner_shape = self.target_shape.inner_shape + if new_inner_shape.dtype == dtypes.int64: + new_inner_shape = math_ops.cast(new_inner_shape, dtype=dtypes.int32) + return new_inner_shape + + # pylint:disable=protected-access + def broadcast_flat_values(self, rt, inner_dimensions=True): + """flat_values of a ragged tensor broadcast to target_shape. + + If inner_dimensions==True, then the result is a dense tensor with shape + target_shape.inner_shape, the flat values of the broadcasted shape. + + If you add target_shape.row_partitions, you will get the full broadcasted + shape. + + If inner_dimensions==False, the result is a dense tensor that satsifies + certain properties: + 1. broadcast_to(result, target_shape.inner_shape) will give the result + if inner_dimensions==True. + 2. Either (a) (result.rank < target_shape.inner_rank) + or (b) (result.shape[0] == target_shape.inner_shape[0]). + 3. result.rank = min(target_shape.inner_rank, rt.rank) + 4. For i < target_shape.inner_rank - 1, and i < rt.rank, + and if rt.shape[-i]!=1, then result.shape[-i]=target_shape[-i]. + Args: + rt: a ragged or dense tensor. + inner_dimensions: if true, broadcast the inner dimensions as well. + + Returns: + a dense tensor + """ + if ragged_tensor.is_ragged(rt): + rt = rt.flat_values + # If rt was a regular tensor, it is its own flat_values. + if self.target_shape.rank == 0: + return rt + inner_rank = self.target_shape.inner_rank + if inner_rank > self._source_shape.rank: + # The dense rank is larger than the whole shape. So, we make the shape + # dense. + if self.source_shape.num_row_partitions > 0: + rt = array_ops.reshape( + rt, self.source_shape._alt_inner_shape(self.source_shape.rank)) + # rt.rank == self._source_shape.rank < inner_rank + # Here, property 2a holds. + if inner_dimensions: + return array_ops.broadcast_to(rt, self._target_inner_shape_int32()) + return rt + else: + if self._source_shape.inner_rank != inner_rank: + rt = array_ops.reshape(rt, + self._source_shape._alt_inner_shape(inner_rank)) # pylint:disable=protected-access + # After the reshape, rt is flat_values with inner_rank. + flat_broadcaster = self._layer_broadcasters[-inner_rank] + rt = flat_broadcaster.broadcast_tensor(rt) + # Here, property 2b holds. + if inner_dimensions: + rt = array_ops.broadcast_to(rt, self._target_inner_shape_int32()) + return rt + + def broadcast(self, rt): + """Broadcast a tensor of source_shape to target_shape.""" + flat_values = self.broadcast_flat_values(rt) + return self.target_shape._add_row_partitions(flat_values) # pylint:disable=protected-access + + +def _get_layer_broadcasters_from_rps(zero_broadcaster, source_rps, target_rps): + """Get LayerBroadcasters from RowPartitions. + + *--zero_broadcaster->* + | | + source_rps[0] target_rps[0] + | | + V V + *---result[1]------->* + | | + source_rps[1] target_rps[1] + | | + V V + *---result[2]------->* + . + . + . + *---result[k-1]----->* + | | + source_rps[k] target_rps[k] + | | + V V + *---result[k]------->* + + Note: result[0] = zero_broadcaster + + Args: + zero_broadcaster: a broadcaster between the source and target row + partitions' rows, and equal to result[0]. + source_rps: source row partitions. + target_rps: target row partitions (same length as source_rps). + + Returns: + result: a list of LayerBroadcasters. + """ + if not isinstance(zero_broadcaster, _LayerBroadcaster): + raise TypeError("Not a _LayerBroadcaster: " + str(zero_broadcaster)) + assert len(source_rps) == len(target_rps) + if not source_rps: + return [zero_broadcaster] + next_broadcaster = zero_broadcaster.next_layer(source_rps[0], target_rps[0]) + tail_broadcasters = _get_layer_broadcasters_from_rps(next_broadcaster, + source_rps[1:], + target_rps[1:]) + return [zero_broadcaster] + tail_broadcasters + + +def _get_broadcaster(source_shape, target_shape): + """Get a _Broadcaster from source_shape to target_shape.""" + if source_shape.dtype != target_shape.dtype: + raise ValueError("The source and target row_split dtypes should be equal") + + if (source_shape.rank is None or target_shape.rank is None): + raise ValueError("Rank of source and target must be statically known") + elif source_shape.rank > target_shape.rank: + raise ValueError("Cannot broadcast to a shape with smaller rank") + elif source_shape.rank == 0: + return _Broadcaster(source_shape, target_shape, []) + elif target_shape.rank == 1: + assert source_shape.rank == 1 + layer = _LayerBroadcaster.first_layer(source_shape.inner_shape[0], + target_shape.inner_shape[0]) + return _Broadcaster(source_shape, target_shape, [layer]) + + assert source_shape.rank <= target_shape.rank + assert target_shape.rank >= 2 + assert source_shape.rank >= 1 + + source_rps = source_shape._as_row_partitions() # pylint: disable=protected-access + + target_rps = target_shape._as_row_partitions() # pylint: disable=protected-access + + assert len(target_rps) >= 1 + assert len(source_rps) <= len(target_rps) + source_nrows = source_shape[0] + if len(source_rps) < len(target_rps): + # Note: this includes the case where len(source_rps)==0. + # Here we begin at -1, one dimension before source_rps[0]. + # neg_one_source_rp | neg_one_target_rp=target_rps[-(len(source_rps)+1)] + # source_rps[0] | target_rps[-len(source_rps)] + # source_rps[1] | target_rps[1-len(source_rps)] + # ... | ... + # source_rps[-1] | target_rps[-1] + neg_one_source_rp = RowPartition.from_uniform_row_length( + uniform_row_length=source_nrows, nrows=1, nvals=source_nrows) + neg_one_target_rp = target_rps[-(len(source_rps) + 1)] + neg_one_broadcaster = _LayerBroadcaster.get_singleton_broadcaster( + neg_one_target_rp.nrows()) + zeroth_broadcaster = neg_one_broadcaster.next_layer(neg_one_source_rp, + neg_one_target_rp) + target_rps_tail = target_rps[-len(source_rps):] if len( + source_rps) >= 1 else [] + + layers = _get_layer_broadcasters_from_rps(zeroth_broadcaster, source_rps, + target_rps_tail) + return _Broadcaster(source_shape, target_shape, layers) + else: + assert len(target_rps) == len(source_rps) + zeroth_broadcaster = _LayerBroadcaster.first_layer(source_rps[0].nrows(), + target_rps[0].nrows()) + layers = _get_layer_broadcasters_from_rps(zeroth_broadcaster, source_rps, + target_rps) + + return _Broadcaster(source_shape, target_shape, layers) + + +def _get_identity_broadcaster(shape): + """Gets a Broadcaster for two identical shapes.""" + if shape.rank is None: + raise ValueError("Shape must have a defined rank") + layers = [ + _LayerBroadcaster.get_identity_broadcaster( + shape._num_slices_in_dimension(i)) for i in range(shape.rank) # pylint: disable=protected-access + ] + return _Broadcaster(shape, shape, layers) + + +def _broadcast_dynamic_shape_one_layer(a, b): + """Broadcast two vectors, given their shapes. + + Args: + a: the number of rows in a. + b: the number of rows in b. + + Returns: + (layer_a, layer_b, target_shape) + layer_a is a _LayerBroadcaster from a to the target_shape. + layer_b is a _LayerBroadcaster from b to the target_shape. + target_shape is the target_shape + + Raises: + InvalidArgumentError if the shapes are not consistent. + """ + a_0 = a[0] + b_0 = b[0] + + def broadcast_from_a(): + # Assumes a_0 == 1 + a_layer = array_ops.zeros(b_0, dtype=b_0.dtype) + b_layer = math_ops.range(b_0) + target = b + return [a_layer, b_layer, target] + + a_static = tensor_util.constant_value(a) + if a_static is not None and a_static[0] == 1: + [a_gi, b_gi, target] = broadcast_from_a() + a_layer = _LayerBroadcaster.from_gather_index(a_gi) + b_layer = _LayerBroadcaster.from_gather_index(b_gi) + return [a_layer, b_layer, target] + + def broadcast_from_b(): + # Assumes b_0 == 1 + a_layer = math_ops.range(a_0) + b_layer = array_ops.zeros(a_0, dtype=a_0.dtype) + target = a + return [a_layer, b_layer, target] + + b_static = tensor_util.constant_value(b) + if b_static is not None and b_static[0] == 1: + [a_gi, b_gi, target] = broadcast_from_b() + a_layer = _LayerBroadcaster.from_gather_index(a_gi) + b_layer = _LayerBroadcaster.from_gather_index(b_gi) + return [a_layer, b_layer, target] + + def broadcast_noop(): + # Assumes a_0 == 1 + a_layer = math_ops.range(a_0) + b_layer = math_ops.range(b_0) + target = b + return [a_layer, b_layer, target] + + can_broadcast_from_a = math_ops.equal(a_0, 1) + can_broadcast_from_b = math_ops.equal(b_0, 1) + + def broadcast_not_from_a(): + return cond.cond( + can_broadcast_from_b, true_fn=broadcast_from_b, false_fn=broadcast_noop) + + nrows_equal = math_ops.equal(a_0, b_0) + can_broadcast = math_ops.logical_or( + can_broadcast_from_a, + math_ops.logical_or(can_broadcast_from_b, nrows_equal)) + + check_can_broadcast = check_ops.assert_equal( + can_broadcast, True, message="Cannot broadcast") + + results = cond.cond( + can_broadcast_from_a, + true_fn=broadcast_from_a, + false_fn=broadcast_not_from_a) + + results = [ + control_flow_ops.with_dependencies([check_can_broadcast], x) + for x in results + ] + [a_gi, b_gi, target] = results + a_layer = _LayerBroadcaster.from_gather_index(a_gi) + b_layer = _LayerBroadcaster.from_gather_index(b_gi) + return [a_layer, b_layer, target] + + +def _broadcast_dynamic_shape_first_layer(a_0, b_0): + """Broadcast the first layer of two dynamic shapes given the dimensions. + + Args: + a_0: the number of rows in a. + b_0: the number of rows in b. + + Returns: + (use_a, layer_a, layer_b) + where use_a is true if the target provably equals a, false otherwise. + layer_a is a _LayerBroadcaster from a to the target. + layer_b is a _LayerBroadcaster from b to the target. + """ + + def broadcast_from_a(): + # Assumes a_0 == 1 + a_layer = array_ops.zeros(b_0, dtype=b_0.dtype) + b_layer = math_ops.range(b_0) + return [a_layer, b_layer] + + static_a_0 = tensor_util.constant_value(a_0) + static_b_0 = tensor_util.constant_value(b_0) + if static_a_0 is not None: + if static_a_0 == static_b_0: + id_broadcaster = _LayerBroadcaster.get_identity_broadcaster( + static_a_0, dtype=a_0.dtype) + return [id_broadcaster, id_broadcaster] + elif static_a_0 == 1: + return [ + _LayerBroadcaster.get_singleton_broadcaster(b_0), + _LayerBroadcaster.get_identity_broadcaster(b_0) + ] + + if static_b_0 == 1: + return [ + _LayerBroadcaster.get_identity_broadcaster(a_0), + _LayerBroadcaster.get_singleton_broadcaster(a_0) + ] + + def broadcast_from_b(): + # Assumes b_0 == 1 + a_layer = math_ops.range(a_0) + b_layer = array_ops.zeros(a_0, dtype=a_0.dtype) + return [a_layer, b_layer] + + def broadcast_noop(): + # Assumes a_0 == b_0 + a_layer = math_ops.range(a_0) + b_layer = math_ops.range(b_0) + return [a_layer, b_layer] + + can_broadcast_from_a = math_ops.equal(a_0, constant_op.constant(1, a_0.dtype)) + can_broadcast_from_b = math_ops.equal(b_0, constant_op.constant(1, b_0.dtype)) + + def broadcast_not_from_a(): + return cond.cond( + can_broadcast_from_b, true_fn=broadcast_from_b, false_fn=broadcast_noop) + + # Ideally, this would only block control flow on broadcast_noop, but + # the control flow doesn't seem to work. + can_broadcast = math_ops.logical_or( + math_ops.logical_or(can_broadcast_from_a, can_broadcast_from_b), + math_ops.equal(a_0, b_0)) + + result = cond.cond( + can_broadcast_from_a, + true_fn=broadcast_from_a, + false_fn=broadcast_not_from_a) + + return [ + _LayerBroadcaster.from_gather_index( + control_flow_ops.with_dependencies( + [check_ops.assert_equal(can_broadcast, True)], x)) for x in result + ] + + +def _broadcast_half( + ac_0: _LayerBroadcaster, + a_1: RowPartition) -> Tuple[_LayerBroadcaster, RowPartition]: + """Does a NOOP broadcast of a_1. + + *-ac_0-->* + | | + a_1 c_1 + | | + V V + *-ac_1-->* + + Note that by definition this cannot fail: there is always a well-defined + NOOP broadcast. This is usually intended as half of broadcasting two shapes + together. + Args: + ac_0: previous LayerBroadcaster + a_1: previous RowPartition + + Returns: + [ac_1, c_1] where ac_1 is the next LayerBroadcaster, and c_1 is the + broadcast RowPartition + """ + c_1 = ac_0.broadcast_row_partition(a_1) + old_value_rowids = array_ops.gather(ac_0.gather_index, c_1.value_rowids()) + old_row_starts = array_ops.gather(a_1.row_splits(), old_value_rowids) + gather_index = old_row_starts + c_1.offsets_in_rows() + return [_LayerBroadcaster.from_gather_index(gather_index), c_1] + + +def _broadcast_dynamic_shape_next_layer_half_ragged( + ac_0: _LayerBroadcaster, bc_0: _LayerBroadcaster, a_1: RowPartition, + b_1: RowPartition +) -> Tuple[RowPartition, _LayerBroadcaster, _LayerBroadcaster]: + r"""Broadcast target and next layer broadcaster of two dynamic shapes. + + a_1 is uniform, and b_1 is ragged. + *--ac_0-->*<--bc_0--* + | | | + a_1 c_1 b_1 + | | | + V V V + *--ac_1-->*<--bc_1--* + + Args: + ac_0: _LayerBroadcaster from a to c in the previous layer. + bc_0: _LayerBroadcaster from b to c in the previous layer. + a_1: a uniform RowPartition for the next layer of a. + b_1: a ragged RowPartition for the next layer of b. + + Returns: + (c_1, ac_1, bc_1) + c_1: a RowPartition for the next layer of the dynamic shape. + ac_1: _LayerBroadcaster from a to c in the next layer. + bc_1: _LayerBroadcaster from b to c in the next layer. + """ + if not isinstance(ac_0, _LayerBroadcaster): + raise TypeError("ac_0 should be a _LayerBroadcaster") + if not isinstance(bc_0, _LayerBroadcaster): + raise TypeError("bc_0 should be a _LayerBroadcaster") + if not isinstance(a_1, RowPartition): + raise TypeError("a_1 should be a RowPartition") + if not isinstance(b_1, RowPartition): + raise TypeError("b_1 should be a RowPartition") + + assert a_1.is_uniform() + assert not b_1.is_uniform() + + static_a_1 = tensor_util.constant_value(a_1.uniform_row_length()) + if static_a_1 == 1: + [bc_1, c_1b] = _broadcast_half(bc_0, b_1) + ac_1_gather_index = array_ops.gather(ac_0.gather_index, c_1b.value_rowids()) + c_1 = RowPartition.from_row_splits(c_1b.row_splits()) + ac_1 = _LayerBroadcaster.from_gather_index(ac_1_gather_index) + bc_1 = _LayerBroadcaster.from_gather_index(bc_1.gather_index) + return [c_1, ac_1, bc_1] + + def broadcast_noop(): + # The sides must be "equal". + [ac_1, c_1a] = _broadcast_half(ac_0, a_1) + [bc_1, c_1b] = _broadcast_half(bc_0, b_1) + checks = [check_ops.assert_equal(c_1a.row_splits(), c_1b.row_splits())] + return [ + control_flow_ops.with_dependencies(checks, x) + for x in [a_1.row_splits(), ac_1.gather_index, bc_1.gather_index] + ] + + def broadcast_a(): + [bc_1, c_1b] = _broadcast_half(bc_0, b_1) + ac_1_gather_index = array_ops.gather(ac_0.gather_index, c_1b.value_rowids()) + return [ + c_1b.row_splits(), + ac_1_gather_index, + bc_1.gather_index, + ] + + can_broadcast_a = math_ops.equal(a_1.uniform_row_length(), 1) + + [c_1_row_splits, ac_1_gather_index, + bc_1_gather_index] = cond.cond( + can_broadcast_a, true_fn=broadcast_a, false_fn=broadcast_noop) + + c_1 = RowPartition.from_row_splits(c_1_row_splits) + ac_1 = _LayerBroadcaster.from_gather_index(ac_1_gather_index) + bc_1 = _LayerBroadcaster.from_gather_index(bc_1_gather_index) + return [c_1, ac_1, bc_1] + + +def _broadcast_dynamic_shape_next_layer_both_uniform( + ac_0: _LayerBroadcaster, bc_0: _LayerBroadcaster, a_1: RowPartition, + b_1: RowPartition +) -> Tuple[RowPartition, _LayerBroadcaster, _LayerBroadcaster]: + r"""Broadcast target and next layer broadcaster of two uniform dynamic shapes. + + *--ac_0-->*<--bc_0--* + | | | + a_1 c_1 b_1 + | | | + V V V + *--ac_1-->*<--bc_1--* + + Args: + ac_0: _LayerBroadcaster from a to c in the previous layer. + bc_0: _LayerBroadcaster from b to c in the previous layer. + a_1: a RowPartition for the next layer of a. + b_1: a RowPartition for the next layer of b. + + Returns: + (c_1, ac_1, bc_1) + c_1: a RowPartition for the next layer of the dynamic shape. + ac_1: _LayerBroadcaster from a to c in the next layer. + bc_1: _LayerBroadcaster from b to c in the next layer. + """ + if not isinstance(ac_0, _LayerBroadcaster): + raise TypeError("ac_0 should be a _LayerBroadcaster") + if not isinstance(bc_0, _LayerBroadcaster): + raise TypeError("bc_0 should be a _LayerBroadcaster") + if not isinstance(a_1, RowPartition): + raise TypeError("a_1 should be a RowPartition") + if not isinstance(b_1, RowPartition): + raise TypeError("b_1 should be a RowPartition") + assert a_1.is_uniform() + assert b_1.is_uniform() + + static_a_1 = tensor_util.constant_value(a_1.uniform_row_length()) + static_b_1 = tensor_util.constant_value(b_1.uniform_row_length()) + + if static_a_1 is not None: + if static_a_1 == static_b_1: + # Here, this dimension is the same, but we may have to broadcast previous + # dimensions. + [ac_1, _] = _broadcast_half(ac_0, a_1) + [bc_1, _] = _broadcast_half(bc_0, b_1) + c_1 = RowPartition.from_uniform_row_length( + static_a_1, nrows=ac_0.dest_nrows()) + return [c_1, ac_1, bc_1] + elif static_a_1 == 1: + [bc_1, c_1b] = _broadcast_half(bc_0, b_1) + ac_1 = _LayerBroadcaster.from_gather_index( + array_ops.gather(ac_0.gather_index, c_1b.value_rowids())) + c_1 = RowPartition.from_uniform_row_length( + b_1.uniform_row_length(), nrows=bc_0.dest_nrows()) + return [c_1, ac_1, bc_1] + + if static_b_1 == 1: + [ac_1, c_1a] = _broadcast_half(ac_0, a_1) + bc_1 = _LayerBroadcaster.from_gather_index( + array_ops.gather(bc_0.gather_index, c_1a.value_rowids())) + c_1 = RowPartition.from_uniform_row_length( + a_1.uniform_row_length(), nrows=ac_0.dest_nrows()) + return [c_1, ac_1, bc_1] + + def broadcast_noop(): + # Assumes a_1.uniform_row_length() == b_1.uniform_row_length() + # Both sides broadcast to a single shape. + [ac_1, _] = _broadcast_half(ac_0, a_1) + [bc_1, _] = _broadcast_half(bc_0, b_1) + return [a_1.uniform_row_length(), ac_1.gather_index, bc_1.gather_index] + + def broadcast_a(): + [bc_1, c_1b] = _broadcast_half(bc_0, b_1) + ac_1_gather_index = array_ops.gather(ac_0.gather_index, c_1b.value_rowids()) + return [ + b_1.uniform_row_length(), + ac_1_gather_index, + bc_1.gather_index, + ] + + def broadcast_b(): + [ac_1, c_1a] = _broadcast_half(ac_0, a_1) + bc_1_gather_index = array_ops.gather(bc_0.gather_index, c_1a.value_rowids()) + return [a_1.uniform_row_length(), ac_1.gather_index, bc_1_gather_index] + + can_broadcast_b = math_ops.equal(b_1.uniform_row_length(), 1) + + def no_broadcast_a(): + return cond.cond( + can_broadcast_b, true_fn=broadcast_b, false_fn=broadcast_noop) + + can_broadcast_a = math_ops.equal(a_1.uniform_row_length(), 1) + + broadcast_asserts = [ + check_ops.assert_equal( + math_ops.logical_or( + math_ops.logical_or(can_broadcast_a, can_broadcast_b), + math_ops.equal(a_1.uniform_row_length(), + b_1.uniform_row_length())), True) + ] + + result = cond.cond( + can_broadcast_a, true_fn=broadcast_a, false_fn=no_broadcast_a) + + [c_1_uniform_row_length, ac_1_gather_index, bc_1_gather_index] = [ + control_flow_ops.with_dependencies(broadcast_asserts, x) for x in result + ] + + c_1 = RowPartition.from_uniform_row_length( + c_1_uniform_row_length, + nvals=c_1_uniform_row_length * ac_0.dest_nrows(), + nrows=ac_0.dest_nrows()) + ac_1 = _LayerBroadcaster.from_gather_index(ac_1_gather_index) + bc_1 = _LayerBroadcaster.from_gather_index(bc_1_gather_index) + return [c_1, ac_1, bc_1] + + +def _broadcast_dynamic_shape_next_layer( + ac_0: _LayerBroadcaster, bc_0: _LayerBroadcaster, a_1: RowPartition, + b_1: RowPartition +) -> Tuple[RowPartition, _LayerBroadcaster, _LayerBroadcaster]: + r"""Broadcast target and next layer broadcaster of two dynamic shapes. + + *--ac_0-->*<--bc_0--* + | | | + a_1 c_1 b_1 + | | | + V V V + *--ac_1-->*<--bc_1--* + + Args: + ac_0: _LayerBroadcaster from a to c in the previous layer. + bc_0: _LayerBroadcaster from b to c in the previous layer. + a_1: a RowPartition for the next layer of a. + b_1: a RowPartition for the next layer of b. + + Returns: + (c_1, ac_1, bc_1) + c_1: a RowPartition for the next layer of the dynamic shape. + ac_1: _LayerBroadcaster from a to c in the next layer. + bc_1: _LayerBroadcaster from b to c in the next layer. + """ + if not isinstance(ac_0, _LayerBroadcaster): + raise TypeError("ac_0 should be a _LayerBroadcaster") + if not isinstance(bc_0, _LayerBroadcaster): + raise TypeError("bc_0 should be a _LayerBroadcaster") + if not isinstance(a_1, RowPartition): + raise TypeError("a_1 should be a RowPartition") + if not isinstance(b_1, RowPartition): + raise TypeError("b_1 should be a RowPartition") + + if a_1.is_uniform(): + if b_1.is_uniform(): + return _broadcast_dynamic_shape_next_layer_both_uniform( + ac_0, bc_0, a_1, b_1) + else: + return _broadcast_dynamic_shape_next_layer_half_ragged( + ac_0, bc_0, a_1, b_1) + else: + if b_1.is_uniform(): + [c_1, bc_1, ac_1] = _broadcast_dynamic_shape_next_layer_half_ragged( # pylint: disable=arguments-out-of-order + bc_0, ac_0, b_1, a_1) + return (c_1, ac_1, bc_1) + else: + # If neither shape is uniform, we cannot broadcast the dimension. + [ac_1, c_1a] = _broadcast_half(ac_0, a_1) + [bc_1, c_1b] = _broadcast_half(bc_0, b_1) + check_valid = [ + check_ops.assert_equal(c_1a.row_splits(), c_1b.row_splits()) + ] + return ( + c_1a._with_dependencies(check_valid), # pylint: disable=protected-access + ac_1.with_dependencies(check_valid), + bc_1.with_dependencies(check_valid)) + + +def _broadcast_dynamic_shape_from_rps( + a_zero: _LayerBroadcaster, b_zero: _LayerBroadcaster, + a_rps: Sequence[RowPartition], b_rps: Sequence[RowPartition] +) -> Tuple[Sequence[RowPartition], Sequence[_LayerBroadcaster], + Sequence[_LayerBroadcaster]]: + """Create BroadcastLayers from two shapes to a target shape. + + + *--a_zero->*<-b_zero-* + | | | + a_rps[0] c_rps[0] b_rps[0] + | | | + V V V + *--ac[1]-->*<-bc[1]--* + | | | + a_rps[1] c_rps[0] b_rps[1] + | | | + V V V + *--ac[2]-->*<-bc[2]--* + + Note: ac[0]=a_zero, and bc[0]=b_zero. + Args: + a_zero: broadcaster from rows of a_rps[0] to target shape. + b_zero: broadcaster from rows of b_rps[0] to target shape. + a_rps: RowPartitions of first shape. + b_rps: RowPartitions of second shape, equal in length to a_rps. + + Returns: + (c_rps, ac, bc) where: + c_rps: RowPartitions of target shape. + ac: layers broadcasting from the first shape. + bc: layers broadcasting from the second shape. + """ + assert len(a_rps) == len(b_rps) + if a_rps: + (c_1, ac_1, + bc_1) = _broadcast_dynamic_shape_next_layer(a_zero, b_zero, a_rps[0], + b_rps[0]) + (c_suffix, a_layers, + b_layers) = _broadcast_dynamic_shape_from_rps(ac_1, bc_1, a_rps[1:], + b_rps[1:]) + + return ([c_1] + c_suffix, [ac_1] + a_layers, [bc_1] + b_layers) + else: + return ([], [], []) + + +def _get_broadcast_num_row_partitions(a: DynamicRaggedShape, + b: DynamicRaggedShape): + """Returns broadcast_dynamic_shape(a, b).num_row_partitions.""" + # Assumes rank and num_row_partitions are not None. + if (a.num_row_partitions == 0 and b.num_row_partitions == 0): + return 0 + expanded_num_row_partitions_a = a.num_row_partitions + max(0, b.rank - a.rank) + expanded_num_row_partitions_b = b.num_row_partitions + max(0, a.rank - b.rank) + + if a.num_row_partitions == 0: + return expanded_num_row_partitions_b + + if b.num_row_partitions == 0: + return expanded_num_row_partitions_a + + return max(expanded_num_row_partitions_a, expanded_num_row_partitions_b) + + +# pylint: disable=protected-access +def _broadcast_dynamic_shape_extended_complete( + a: DynamicRaggedShape, b: DynamicRaggedShape, b_rps: Sequence[RowPartition], + c_suffix: Sequence[RowPartition], ac: Sequence[_LayerBroadcaster], + bc_suffix: Sequence[_LayerBroadcaster] +) -> Tuple[DynamicRaggedShape, _Broadcaster, _Broadcaster]: + """Helper for broadcast_dynamic_shape_extended.""" + c_prefix = b_rps[:-len(c_suffix)] + bc_prefix_length = b.rank - len(bc_suffix) + bc_prefix = [ + _LayerBroadcaster.get_identity_broadcaster(b._num_slices_in_dimension(i)) + for i in range(bc_prefix_length) + ] + c_num_row_partitions = _get_broadcast_num_row_partitions(a, b) + + c_raw = DynamicRaggedShape.from_row_partitions(c_prefix + tuple(c_suffix)) + c = c_raw._with_num_row_partitions(c_num_row_partitions) + return (c, _Broadcaster(a, c, ac), _Broadcaster(b, c, bc_prefix + bc_suffix)) + + +def _broadcast_dynamic_shape_extended_helper( + a: DynamicRaggedShape, b: DynamicRaggedShape +) -> Tuple[DynamicRaggedShape, _Broadcaster, _Broadcaster]: + """Helper for broadcast_dynamic_shape_extended. + + Here, we force: + a.rank <= b.rank + 2 <= b.rank + 1 <= a.rank + Args: + a: a DynamicRaggedShape + b: a DynamicRaggedShape + + Returns: + A triple of a shape and two broadcasters. + """ + assert a.rank <= b.rank + assert 2 <= b.rank + assert 1 <= a.rank + a_rps = a._as_row_partitions() # pylint: disable=protected-access + b_rps = b._as_row_partitions() # pylint: disable=protected-access + + if len(a_rps) < len(b_rps): + # Note: this includes the case where len(a_rps)==0. + # Here we begin at -1, one dimension before a_rps[0]. + # neg_one_a_rp | b_rps[-(len(a_rps)+1)] + # a_rps[0] | b_rps[-len(a_rps)] + # a_rps[1] | b_rps[1-len(a_rps)] + # ... | ... + # a_rps[-1] | b_rps[-1] + + a_nrows = a[0] + a_nrows_static = tensor_util.constant_value(a_nrows) + if a_nrows_static is not None: + a_nrows = a_nrows_static + + neg_one_a_rp = RowPartition.from_uniform_row_length( + uniform_row_length=a_nrows, nrows=1, nvals=a_nrows) + neg_one_b_rp = b_rps[-(len(a_rps) + 1)] + (neg_one_ac, neg_one_bc) = _broadcast_dynamic_shape_first_layer( + constant_op.constant(1, dtype=b_rps[0].dtype), neg_one_b_rp.nrows()) + + # The first part of the solution. + (c_zero, ac_zero, + bc_zero) = _broadcast_dynamic_shape_next_layer(neg_one_ac, neg_one_bc, + neg_one_a_rp, neg_one_b_rp) + b_rps_tail = b_rps[-len(a_rps):] if len(a_rps) >= 1 else [] + + (c_suffix, ac_layers, + bc_layers) = _broadcast_dynamic_shape_from_rps(ac_zero, bc_zero, a_rps, + b_rps_tail) + + return _broadcast_dynamic_shape_extended_complete( + a=a, + b=b, + b_rps=b_rps, + c_suffix=[c_zero] + c_suffix, + ac=[ac_zero] + ac_layers, + bc_suffix=[neg_one_bc, bc_zero] + bc_layers) + + else: + assert len(a_rps) == len(b_rps) + (ac_zero, + bc_zero) = _broadcast_dynamic_shape_first_layer(a_rps[0].nrows(), + b_rps[0].nrows()) + + (c_rps, a_layers, + b_layers) = _broadcast_dynamic_shape_from_rps(ac_zero, bc_zero, a_rps, + b_rps) + return _broadcast_dynamic_shape_extended_complete( + a=a, + b=b, + b_rps=b_rps, + c_suffix=c_rps, + ac=[ac_zero] + a_layers, + bc_suffix=[bc_zero] + b_layers) + + +def _fix_start_index(index, rank, num_row_partitions): + """Slice indexes are always silently truncated.""" + if index < 0: + if rank is None: + raise ValueError( + "Rank must be known to use __getitem__ on a negative index.") + index = rank + index + if index < 0: + index = 0 + if (num_row_partitions > 0 and index <= num_row_partitions + 1): + # The rank is always >= num_row_partitions + 1 if num_row_partitions > 0. + return index + if index == 0: + return index + if rank is None: + raise ValueError("Rank must be known to use __getitem__ on a large index.") + if index >= rank: + index = rank + return index + + +def _fix_stop_index(index, rank): + """Slice indexes are always silently truncated.""" + if index is None: + if rank is None: + raise ValueError("Rank must be known to use __getitem__ without a stop.") + index = rank + if index < 0: + if rank is None: + raise ValueError( + "Rank must be known to use __getitem__ on a negative index.") + index = rank + index + if index < 0: + index = 0 + if rank is not None: + index = min(rank, index) + return index + + +def _first_layer_gather_index(nrows_source, nrows_target): + """Return the first layer gather_index. + + Args: + nrows_source: the number of rows in the source. + nrows_target: the number of rows in the target. + + Returns: + A tensor, usable as a gather_index for a _LayerBroadcaster. + """ + + def gi_broadcast_first(): + return array_ops.zeros(nrows_target, dtype=nrows_target.dtype) + + def gi_no_broadcast_first(): + gather_index = math_ops.range(nrows_target, dtype=nrows_target.dtype) + return gather_index + + do_broadcast = math_ops.equal(nrows_source, + constant_op.constant(1, nrows_source.dtype)) + nrows_equal = math_ops.equal(nrows_source, nrows_target) + can_broadcast = check_ops.assert_equal( + math_ops.logical_or(do_broadcast, nrows_equal), + True, + message="Cannot broadcast") + + gather_index = cond.cond( + do_broadcast, true_fn=gi_broadcast_first, false_fn=gi_no_broadcast_first) + + return control_flow_ops.with_dependencies([can_broadcast], gather_index) + + +def _next_layer_gather_index(bc, original_rp, broadcast_rp): + r"""Create the next layer gather_index whether or not a broadcast happens. + + *----------bc-------->* + | | + original_rp broadcast_rp + | | + \|/ \|/ + *--next_broadcaster-->* + + Args: + bc: the old broadcaster. + original_rp: the original row partition. + broadcast_rp: the target row partition. + + Returns: + the gather_index for next_broadcaster. + Raises: + InvalidArgumentError if the shapes are incompatible. + """ + old_value_rowids = array_ops.gather(bc.gather_index, + broadcast_rp.value_rowids()) + + def gi_no_broadcast(): + # TODO(martinz): decide if row_splits or row_starts should be used here. + old_row_starts = array_ops.gather(original_rp.row_splits(), + old_value_rowids) + expected_row_lengths = array_ops.gather( + params=original_rp.row_lengths(), indices=bc.gather_index) + actual_row_lengths = broadcast_rp.row_lengths() + check_valid = check_ops.assert_equal( + expected_row_lengths, actual_row_lengths, message="Cannot broadcast") + gather_index = old_row_starts + broadcast_rp.offsets_in_rows() + return control_flow_ops.with_dependencies([check_valid], gather_index) + + def gi_broadcast(): + # Several optimizations can occur here. + # old_row_starts == old_value_rowids, because: + # if you are broadcasting, then the source has uniform row length of 1, + # implying original_rp.row_splits == tf.range(orgininal_rp.nvals + 1) + # When broadcasting, there is no need to add offsets to the + # source, because the source has size 1. + # Also, this is always valid, because we enforce source and destination + # have uniform_row_length. + return old_value_rowids + + if not original_rp.is_uniform(): + return gi_no_broadcast() + + do_broadcast = math_ops.equal(original_rp.uniform_row_length(), + constant_op.constant(1, original_rp.dtype)) + gather_index = cond.cond( + do_broadcast, true_fn=gi_broadcast, false_fn=gi_no_broadcast) + + return gather_index + + +def _flat_values_shape(rt): + if isinstance(rt, ragged_tensor.RaggedTensor): + return array_ops.shape(rt.flat_values) + return rt.flat_values.shape + + +def _to_row_partitions_and_nvals_from_lengths( + lengths: Sequence[Union[int, Sequence[int]]], + dtype=None) -> Tuple[Sequence[RowPartition], int]: + """Allow ragged and uniform shapes to be specified. + + For example, [2, [2,1], 2] represents a shape like: + [[[0, 0], [0, 0]], [[0, 0]]] + + Args: + lengths: a list of integers and lists of integers. + dtype: dtype of the shape (tf.int32 or tf.int64) + + Returns: + a sequence of RowPartitions, and the number of values of the last partition. + """ + size_so_far = lengths[0] + result = [] + for current_lengths in lengths[1:]: + if isinstance(current_lengths, int): + nrows = size_so_far + nvals = current_lengths * nrows + size_so_far = nvals + result.append( + RowPartition.from_uniform_row_length( + current_lengths, nvals, nrows=nrows, dtype_hint=dtype)) + else: + if size_so_far != len(current_lengths): + raise ValueError("Shape not consistent.") + result.append( + RowPartition.from_row_lengths(current_lengths, dtype_hint=dtype)) + size_so_far = sum(current_lengths) + return (result, size_so_far) + + +def _element_to_string(x): + """element to a string within a list.""" + if x is Ellipsis: + return "..." + if isinstance(x, str): + return "'" + x + "'" + return str(x) + + +def _list_tail_with_ellipsis(arr): + """Print the tail of a list where the list might have an ellipsis.""" + if not arr: + return "]" + else: + return ", " + _element_to_string(arr[0]) + _list_tail_with_ellipsis(arr[1:]) + + +def _list_with_ellipsis_to_str(arr): + """Print a list that might have ellipsis.""" + if not arr: + return "[]" + return "[" + _element_to_string(arr[0]) + _list_tail_with_ellipsis(arr[1:]) + + +def _is_int_or_tuple_of_ints(x): + if isinstance(x, int): + return True + if not isinstance(x, tuple): + return False + for y in x: + if not isinstance(y, int): + return False + return True + + +def _alt_inner_shape_from_tensor_shape(shape, dtype, new_inner_rank): + """Helper for _alt_inner_shape, used directly in _with_num_row_partitions.""" + if new_inner_rank == 1: + return constant_op.constant([shape.num_elements()], dtype=dtype) + new_inner_rank_tail_length = new_inner_rank - 1 + inner_shape_tail = shape[-new_inner_rank_tail_length:].as_list() + first_dim = shape[:-new_inner_rank_tail_length].num_elements() + return constant_op.constant([first_dim] + inner_shape_tail, dtype=dtype) + + +def _safe_floor_div(dividend: tensor_shape.Dimension, + divisor: tensor_shape.Dimension) -> tensor_shape.Dimension: + if tensor_shape.dimension_value(divisor) == 0: + return None + return dividend // divisor + + +# TODO(b/218932570) +def _reduce_prod_patch(x): + if x.dtype == dtypes.int64: + return math_ops.cast( + math_ops.reduce_prod(math_ops.cast(x, dtypes.int32)), dtypes.int64) + return math_ops.reduce_prod(x) + + +# Type alias for shape encoded as a DynamicRaggedShape or a Tensor. +DenseOrRaggedShape = Union[DynamicRaggedShape, core.TensorLike] + + +def _merge_row_partitions( + row_partitions: Sequence[RowPartition]) -> RowPartition: + # TODO(martinz): handle uniform splits. + # TODO(martinz): consider using value_row_ids if present. + # Note: this probably won't be called with len(row_partitions)==1, so no + # need to optimize. + row_splits = row_partitions[0].row_splits() + for rp in row_partitions[1:]: + row_splits = array_ops.gather(rp.row_splits(), row_splits) + return RowPartition.from_row_splits(row_splits) + + +def _merge_inner_shape( + inner_shape: tensor_lib.Tensor, + static_inner_shape: tensor_shape.TensorShape, + outer_axis: int, + inner_axis: int) -> Tuple[tensor_lib.Tensor, tensor_shape.TensorShape]: + """Merge the inner shape of a DynamicRaggedShape.""" + prefix = inner_shape[:outer_axis] + suffix = inner_shape[inner_axis + 1:] + + internal = inner_shape[outer_axis:inner_axis + 1] + internal_value = [_reduce_prod_patch(internal)] + new_internal = array_ops.concat([prefix, internal_value, suffix], axis=0) + prefix_static = static_inner_shape[:outer_axis] + suffix_static = static_inner_shape[inner_axis + 1:] + internal_static = static_inner_shape[outer_axis:inner_axis + 1] + internal_value_static = tensor_shape.TensorShape( + [internal_static.num_elements()]) + new_internal_static = prefix_static + internal_value_static + suffix_static + + return (new_internal, new_internal_static) + + +def _batch_rp_spec(rp_spec: RowPartitionSpec, + batch_size: Optional[int]) -> RowPartitionSpec: + """Batches a RowPartitionSpec. + + Given a RowPartitionSpec and a batch_size, create a RowPartitionSpec that + will be the spec for the concatenation of batch_size RowPartitions. + + A RowPartition can be considered a transformation from a list of a given + length to a list of lists. Assume rp_a is a map from list_a to nlist_a, + And rp_b is a map from list_b to nlist_b. concat(rp_a, rp_b) is a + transform of concat(list_a, list_b) to concat(nlist_a, nlist_b). + + If batch_size is None, then have the spec be able to handle an arbitrary + number of RowPartitions. + + Args: + rp_spec: a RowPartitionSpec for all the RowPartitions to be concatenated. + batch_size: the number of rp_specs to be concatenated. + + Returns: + a batched RowPartitionSpec. + """ + if batch_size is None: + return RowPartitionSpec( + uniform_row_length=rp_spec.uniform_row_length, dtype=rp_spec.dtype) + nrows = None if rp_spec.nrows is None else rp_spec.nrows * batch_size + nvals = None if rp_spec.nvals is None else rp_spec.nvals * batch_size + return RowPartitionSpec( + nrows=nrows, + nvals=nvals, + uniform_row_length=rp_spec.uniform_row_length, + dtype=rp_spec.dtype) + + +def _batch_rp_spec_head(old_head: RowPartitionSpec, + batch_size: Optional[int]) -> RowPartitionSpec: + """Creates a RowPartitionSpec representing the new dimension created.""" + nvals = None if (old_head.nrows is None or + batch_size is None) else batch_size * old_head.nrows + return RowPartitionSpec( + nrows=batch_size, + nvals=nvals, + uniform_row_length=old_head.nrows, + dtype=old_head.dtype) + + +def _batch_static_inner_shape( + old_shape: tensor_shape.TensorShape, + batch_size: Optional[int]) -> tensor_shape.TensorShape: + """Returns a copy of old_shape with axis=0 multiplied by batch_size. + + Only use if this is the inner_shape of a DynamicRaggedShape.Spec with one + or more row partitions. + + Args: + old_shape: the original inner_shape. + batch_size: the batch size. + + Returns: + a new shape. + """ + head_dim = tensor_shape.dimension_at_index(old_shape, 0) * batch_size + return head_dim + old_shape[1:] + + +def _batch_tensor_shape(old_shape: tensor_shape.TensorShape, + batch_size: int) -> tensor_shape.TensorShape: + return tensor_shape.TensorShape([batch_size]) + old_shape + + +def _unbatch_static_inner_shape( + old_shape: tensor_shape.TensorShape, + batch_size: Optional[int]) -> tensor_shape.TensorShape: + """Unbatch a static_inner_shape when num_row_partitions > 0.""" + head_dim = tensor_shape.dimension_at_index(old_shape, 0) // batch_size + return head_dim + old_shape[1:] + + +# Copied from ragged_array_ops.py +def ones(shape: DynamicRaggedShape, + dtype=dtypes.float32, + name: Optional[str] = None) -> ragged_tensor.RaggedOrDense: + """Returns ones shaped like x.""" + flat_values = array_ops.ones(shape.inner_shape, dtype=dtype, name=name) + return ragged_tensor.RaggedTensor._from_nested_row_partitions( # pylint: disable=protected-access + flat_values, shape.row_partitions) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_array_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_array_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..6064da237ccea5bd928796d70f73a3e681d5e2c0 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_array_ops.py @@ -0,0 +1,1300 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Array operations for RaggedTensors.""" + +from typing import Optional +from typing import Union + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import sparse_tensor +from tensorflow.python.framework import tensor as tensor_lib +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import data_flow_ops +from tensorflow.python.ops import gen_ragged_array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import sort_ops +from tensorflow.python.ops.ragged import dynamic_ragged_shape +from tensorflow.python.ops.ragged import ragged_functional_ops +from tensorflow.python.ops.ragged import ragged_math_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.ops.ragged import ragged_util +from tensorflow.python.ops.ragged import segment_id_ops +from tensorflow.python.types import core as core_types +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + +# =============================================================================== +# Masking +# =============================================================================== + + +@tf_export('ragged.boolean_mask') +@dispatch.add_dispatch_support +def boolean_mask(data, mask, name=None): + """Applies a boolean mask to `data` without flattening the mask dimensions. + + Returns a potentially ragged tensor that is formed by retaining the elements + in `data` where the corresponding value in `mask` is `True`. + + * `output[a1...aA, i, b1...bB] = data[a1...aA, j, b1...bB]` + + Where `j` is the `i`th `True` entry of `mask[a1...aA]`. + + Note that `output` preserves the mask dimensions `a1...aA`; this differs + from `tf.boolean_mask`, which flattens those dimensions. + + Args: + data: A potentially ragged tensor. + mask: A potentially ragged boolean tensor. `mask`'s shape must be a prefix + of `data`'s shape. `rank(mask)` must be known statically. + name: A name prefix for the returned tensor (optional). + + Returns: + A potentially ragged tensor that is formed by retaining the elements in + `data` where the corresponding value in `mask` is `True`. + + * `rank(output) = rank(data)`. + * `output.ragged_rank = max(data.ragged_rank, rank(mask) - 1)`. + + Raises: + ValueError: if `rank(mask)` is not known statically; or if `mask.shape` is + not a prefix of `data.shape`. + + #### Examples: + + >>> # Aliases for True & False so data and mask line up. + >>> T, F = (True, False) + + >>> tf.ragged.boolean_mask( # Mask a 2D Tensor. + ... data=[[1, 2, 3], [4, 5, 6], [7, 8, 9]], + ... mask=[[T, F, T], [F, F, F], [T, F, F]]).to_list() + [[1, 3], [], [7]] + + >>> tf.ragged.boolean_mask( # Mask a 2D RaggedTensor. + ... tf.ragged.constant([[1, 2, 3], [4], [5, 6]]), + ... tf.ragged.constant([[F, F, T], [F], [T, T]])).to_list() + [[3], [], [5, 6]] + + >>> tf.ragged.boolean_mask( # Mask rows of a 2D RaggedTensor. + ... tf.ragged.constant([[1, 2, 3], [4], [5, 6]]), + ... tf.ragged.constant([True, False, True])).to_list() + [[1, 2, 3], [5, 6]] + """ + with ops.name_scope(name, 'RaggedMask', [data, mask]): + # Convert inputs to tensors. + data = ragged_tensor.convert_to_tensor_or_ragged_tensor(data, name='data') + mask = ragged_tensor.convert_to_tensor_or_ragged_tensor( + mask, dtypes.bool, name='mask') + row_splits_dtype, (data, mask) = ragged_tensor.match_row_splits_dtypes( + data, mask, return_dtype=True) + + # Get static rank of mask. + if mask.shape.ndims is None: + raise ValueError('mask.shape.ndims must be known statically.') + elif mask.shape.ndims == 0: + raise ValueError('mask cannot be scalar.') + + # If mask is ragged, then recurse with a non-ragged mask. + if ragged_tensor.is_ragged(mask): + if not ragged_tensor.is_ragged(data): + data = ragged_tensor.RaggedTensor.from_tensor( + data, + ragged_rank=mask.ragged_rank, + row_splits_dtype=mask.row_splits.dtype) + # Check that mask.nested_row_splits is a prefix of + # data.nested_row_splits. + splits_list = [ + mask.nested_row_splits, data.nested_row_splits[:mask.ragged_rank] + ] + with ops.control_dependencies( + ragged_util.assert_splits_match(splits_list)): + # Strip off ragged `splits` until `mask` is non-ragged. Keep the splits + # that we strip off in `splits`, so we can add them back on after + # we recursively mask the non-ragged data. + splits = [] + while ragged_tensor.is_ragged(mask): + if mask.shape.ndims > 2: + splits.append(mask.row_splits) + else: + # Count the number of True mask values in each row to find the + # lengths of the filtered rows; then convert to splits. + int_mask = ragged_functional_ops.map_flat_values( + math_ops.cast, mask, dtype=row_splits_dtype) + masked_row_lengths = ragged_math_ops.reduce_sum(int_mask, axis=1) + splits.append(ragged_util.lengths_to_splits(masked_row_lengths)) + mask = mask.values + data = data.values + + # Recursively apply the nested non-ragged mask to the nested data. + masked_values = boolean_mask(data, mask) + + # Add the ragged `splits` back to the result. + masked_values = ragged_tensor.RaggedTensor.from_nested_row_splits( + masked_values, splits, validate=False) + + return masked_values + + # If mask is non-ragged and has rank 1, and data is ragged, then build a + # ragged tensor with the indicated rows. + elif ragged_tensor.is_ragged(data) and mask.shape.ndims == 1: + # Get the masked splits: first get the length of each row, then filter + # out the rows that we are deleting, and convert that filtered set of + # masks back to a splits tensor. + lengths = data.row_lengths() + masked_lengths = array_ops.boolean_mask(lengths, mask) + masked_splits = ragged_util.lengths_to_splits(masked_lengths) + + # Get the masked values: first get row ids corresponding to each + # value, then use tf.gather to build a boolean mask that's false for + # values that come from rows that we are deleting, and use that mask to + # construct the masked values tensor. + segment_ids = segment_id_ops.row_splits_to_segment_ids(data.row_splits) + segment_mask = array_ops.gather(mask, segment_ids) + masked_values = boolean_mask(data.values, segment_mask) + + return ragged_tensor.RaggedTensor.from_row_splits( + masked_values, masked_splits, validate=False) + + # If mask is non-ragged and has rank>1, then convert it to be ragged, + # with a ragged rank matching data. + if ragged_tensor.is_ragged(data): + mask = ragged_tensor.RaggedTensor.from_tensor( + mask, + ragged_rank=min(data.ragged_rank, mask.shape.ndims - 1), + row_splits_dtype=data.row_splits.dtype) + return boolean_mask(data, mask) + + # Otherwise, data and mask are both `Tensor`s. + else: + # Apply `boolean_mask` to get the masked values. + masked_values = array_ops.boolean_mask(data, mask) + + if mask.shape.ndims >= 2: + # Add the innermost ragged dimension. For each innermost cell, get the + # number of values it contains. Then flatten that to get a list of + # cell lengths, and convert it to splits. Finally, combine the splits + # and values to get the innermost ragged tensor. + masked_lengths = math_ops.count_nonzero( + mask, axis=-1, dtype=row_splits_dtype) + flattened_masked_lengths = array_ops.reshape(masked_lengths, [-1]) + masked_values = ragged_tensor.RaggedTensor.from_row_lengths( + masked_values, flattened_masked_lengths, validate=False) + + # Wrap remaining ragged dimensions. + if mask.shape.ndims > 2: + mask_shape = array_ops.shape(mask, out_type=row_splits_dtype) + split_size = math_ops.cumprod(mask_shape) + 1 + for dim in range(mask.shape.ndims - 3, -1, -1): + elt_size = mask_shape[dim + 1] + masked_splits = math_ops.range(split_size[dim]) * elt_size + masked_values = ragged_tensor.RaggedTensor.from_row_splits( + masked_values, masked_splits, validate=False) + + return masked_values + + +# =============================================================================== +# Tiling +# =============================================================================== +@dispatch.dispatch_for_api(array_ops.tile) +def tile(input: ragged_tensor.Ragged, multiples, name=None): # pylint: disable=redefined-builtin + """Constructs a `RaggedTensor` by tiling a given `RaggedTensor`. + + The values of `input` are replicated `multiples[i]` times along the + `i`th dimension (for each dimension `i`). For every dimension `axis` in + `input`, the length of each output element in that dimension is the + length of corresponding input element multiplied by `multiples[axis]`. + + Args: + input: A `RaggedTensor`. + multiples: A 1-D integer `Tensor`. Length must be the same as the number of + dimensions in `input`. + name: A name for the operation (optional). + + Returns: + A `RaggedTensor` with the same type, rank, and ragged_rank as `input`. + + #### Example: + + >>> rt = tf.ragged.constant([[1, 2], [3]]) + >>> tf.tile(rt, [3, 2]).to_list() + [[1, 2, 1, 2], [3, 3], [1, 2, 1, 2], [3, 3], [1, 2, 1, 2], [3, 3]] + """ + with ops.name_scope(name, 'RaggedTile', [input, multiples]): + input = ragged_tensor.convert_to_tensor_or_ragged_tensor( + input, name='input') + if not ragged_tensor.is_ragged(input): + return array_ops.tile(input, multiples, name) + multiples = ragged_util.convert_to_int_tensor( + multiples, name='multiples', dtype=input.row_splits.dtype) + multiples.shape.assert_has_rank(1) + + # If the constant value of `multiples` is available, then we can use it + # to skip tiling dimensions where `multiples=1`. + const_multiples = tensor_util.constant_value(multiples) + + return ragged_tensor.RaggedTensor.from_nested_row_splits( + _tile_ragged_values(input, multiples, const_multiples), + _tile_ragged_splits(input, multiples, const_multiples), + validate=False) + + +def _tile_ragged_values(rt_input, multiples, const_multiples=None): + """Builds flat_values tensor for a tiled `RaggedTensor`. + + Returns a tensor that repeats the values in + `rt_input.flat_values` in the + appropriate pattern to construct a `RaggedTensor` that tiles `rt_input` as + specified by `multiples`. + + Args: + rt_input: The `RaggedTensor` whose values should be repeated. + multiples: A 1-D integer `tensor`, indicating how many times each dimension + should be repeated. + const_multiples: Optional constant value for multiples. Used to skip tiling + dimensions where `multiples=1`. + + Returns: + A `Tensor` with the same type and rank as `rt_input.flat_values`. + + #### Example: + + >>> rt = tf.ragged.constant([[1, 2], [3]]) + >>> _tile_ragged_values(rt, tf.constant([3, 2])).numpy() + array([1, 2, 1, 2, 3, 3, 1, 2, 1, 2, 3, 3, 1, 2, 1, 2, 3, 3], dtype=int32) + """ + ragged_rank = rt_input.ragged_rank + nested_splits = rt_input.nested_row_splits + + # Pointers to the values in `rt_input.flat_values`. + inner_value_ids = math_ops.range(nested_splits[-1][-1]) + + # For each ragged dimension (working from the innermost to outermost), + # expand `inner_value_ids` as necessary to tile that dimension. + prev_splits = None + for axis in range(ragged_rank, 0, -1): + # Ragged splits for this dimension. + splits = nested_splits[axis - 1] + + # Adjust splits so they point into `inner_value_ids` (instead of just + # pointing into the next dimension's values). + if prev_splits is not None: # Not the first pass through the loop. + splits = array_ops.gather(prev_splits * multiples[axis + 1], splits) + + # Repeat each element in this ragged dimension `multiples[axis]` times. + if const_multiples is None or const_multiples[axis] != 1: + inner_value_ids = ragged_util.repeat_ranges(inner_value_ids, splits, + multiples[axis]) + + prev_splits = splits + + # Gather the tiled inner values. + ragged_tiled_values = array_ops.gather(rt_input.flat_values, inner_value_ids) + + # Tile the flat_values for the uniform dimensions (i.e., for `axis=0` plus + # `axis=range(ragged_rank, rank)`). + inner_repeats = array_ops.concat([multiples[:1], multiples[ragged_rank + 1:]], + axis=0) + return array_ops.tile(ragged_tiled_values, inner_repeats) + + +def _tile_ragged_splits(rt_input, multiples, const_multiples=None): + """Builds nested_split tensors for a tiled `RaggedTensor`. + + Returns a list of split tensors that can be used to construct the + `RaggedTensor` that tiles `rt_input` as specified by `multiples`. + + Args: + rt_input: The `RaggedTensor` that is being tiled. + multiples: A 1-D integer `tensor`, indicating how many times each dimension + should be repeated. + const_multiples: Optional constant value for multiples. Used to skip tiling + dimensions where `multiples=1`. + + Returns: + A list of 1-D integer `Tensor`s (one for each ragged dimension in + `rt_input`). + + #### Example: + + >>> rt = tf.ragged.constant([[1, 2], [3]]) + >>> _tile_ragged_splits(rt, [3, 2]) + [] + """ + ragged_rank = rt_input.ragged_rank + nested_splits = rt_input.nested_row_splits + + # projected_splits[src_axis, dst_axis] contains the split points that divide + # the rows from src_axis in the list of dst_axis values. E.g., + # projected_splits[i, i] = nested_splits[i], and + # projected_splits[i, i+1] = gather(nested_splits[i+1], nested_splits[i]). + projected_splits = [{i: nested_splits[i]} for i in range(ragged_rank)] + for src_axis in range(ragged_rank): + for dst_axis in range(src_axis + 1, ragged_rank - 1): + projected_splits[src_axis][dst_axis] = array_ops.gather( + nested_splits[dst_axis], projected_splits[src_axis][dst_axis - 1]) + + # For each ragged dimension: nested_splits[axis] -> result_splits[axis]. + result_splits = [] + for axis in range(ragged_rank): + # Get the length of each row for the input tensor for this dimension. + input_lengths = nested_splits[axis][1:] - nested_splits[axis][:-1] + + # Multiply those lengths by the `multiples` of dimension axis+1, since + # each value will be repeated that number of times. + output_lengths = input_lengths * multiples[axis + 1] + + # Repeat ranges of the row lengths as necessary for them to be tiled in + # each ragged dimension `d < axis`. (Start with dimension d=axis-1, and + # work our way up to dimension d=0.) + repeats = 1 + for d in range(axis - 1, -1, -1): + if const_multiples is None or const_multiples[d + 1] != 1: + splits = projected_splits[d][axis - 1] * repeats + output_lengths = ragged_util.repeat_ranges(output_lengths, splits, + multiples[d + 1]) + repeats *= multiples[d + 1] + + # Tile splits for the outermost (uniform) dimension. + output_lengths = array_ops.tile(output_lengths, multiples[:1]) + + # Convert to splits. + result_splits.append(ragged_util.lengths_to_splits(output_lengths)) + + return result_splits + + +# =============================================================================== +# Reshaping +# =============================================================================== + + +@dispatch.dispatch_for_api(array_ops.expand_dims_v2) +def expand_dims(input: ragged_tensor.Ragged, axis, name=None): # pylint: disable=redefined-builtin + """Inserts a dimension with shape 1 into a potentially ragged tensor's shape. + + Given a potentially ragged tenor `input`, this operation inserts a + dimension with size 1 at the dimension `axis` of `input`'s shape. + + The following table gives some examples showing how `ragged.expand_dims` + impacts the shapes of different input tensors. Ragged dimensions are + indicated by enclosing them in parentheses. + + input.shape | axis | result.shape + ----------------------- | ---- | ----------------------------- + `[D1, D2]` | `0` | `[1, D1, D2]` + `[D1, D2]` | `1` | `[D1, 1, D2]` + `[D1, D2]` | `2` | `[D1, D2, 1]` + `[D1, (D2), (D3), D4]` | `0` | `[1, D1, (D2), (D3), D4]` + `[D1, (D2), (D3), D4]` | `1` | `[D1, 1, (D2), (D3), D4]` + `[D1, (D2), (D3), D4]` | `2` | `[D1, (D2), 1, (D3), D4]` + `[D1, (D2), (D3), D4]` | `3` | `[D1, (D2), (D3), 1, D4]` + `[D1, (D2), (D3), D4]` | `4` | `[D1, (D2), (D3), D4, 1]` + + Args: + input: The potentially tensor that should be expanded with a new dimension. + axis: An integer constant indicating where the new dimension should be + inserted. + name: A name for the operation (optional). + + Returns: + A tensor with the same values as `input`, with an added dimension of + size 1 at `axis`. + + #### Examples: + + >>> rt = tf.ragged.constant([[1, 2], [3]]) + >>> print(rt.shape) + (2, None) + + >>> expanded = tf.expand_dims(rt, axis=0) + >>> print(expanded.shape, expanded) + (1, 2, None) + + >>> expanded = tf.expand_dims(rt, axis=1) + >>> print(expanded.shape, expanded) + (2, 1, None) + + >>> expanded = tf.expand_dims(rt, axis=2) + >>> print(expanded.shape, expanded) + (2, None, 1) + """ + with ops.name_scope(name, 'RaggedExpandDims', [input]): + input = ragged_tensor.convert_to_tensor_or_ragged_tensor( + input, name='input') + + if not ragged_tensor.is_ragged(input): + return array_ops.expand_dims(input, axis) + + ndims = None if input.shape.ndims is None else input.shape.ndims + 1 + axis = array_ops.get_positive_axis(axis, ndims, ndims_name='rank(input)') + + if axis == 0: + return ragged_tensor.RaggedTensor.from_uniform_row_length( + input, uniform_row_length=input.nrows(), nrows=1, validate=False) + elif axis == 1: + return ragged_tensor.RaggedTensor.from_uniform_row_length( + input, uniform_row_length=1, nrows=input.nrows(), validate=False) + else: + if ragged_tensor.is_ragged(input.values): + return input.with_values(expand_dims(input.values, axis - 1)) + else: + return input.with_values(array_ops.expand_dims(input.values, axis - 1)) + + +@dispatch.dispatch_for_api(array_ops.expand_dims) +def _ragged_expand_dims_v1( + input: ragged_tensor.Ragged, # pylint: disable=redefined-builtin + axis=None, + name=None, + dim=None): + if dim is not None: + axis = dim + return expand_dims(input=input, axis=axis, name=name) + + +# =============================================================================== +# RaggedTensor Size +# =============================================================================== + + +@dispatch.dispatch_for_api(array_ops.size_v2) +def size(input: ragged_tensor.Ragged, out_type=dtypes.int32, name=None): # pylint: disable=redefined-builtin + """Returns the size of a potentially ragged tensor. + + The size of a ragged tensor is the size of its inner values. + + #### Example: + + >>> tf.size(tf.ragged.constant([[1, 2], [3]])).numpy() + 3 + + Args: + input: A potentially ragged `Tensor`. + out_type: The numeric output type for the operation. + name: A name for the operation (optional). + + Returns: + A Tensor of type `out_type`. + """ + if ragged_tensor.is_ragged(input): + return array_ops.size(input.flat_values, out_type=out_type, name=name) + else: + return array_ops.size(input, out_type=out_type, name=name) + + +@dispatch.dispatch_for_api(array_ops.size) +def _ragged_size_v1( + input: ragged_tensor.Ragged, # pylint: disable=redefined-builtin + name=None, + out_type=dtypes.int32): + return size(input=input, out_type=out_type, name=name) + + +# =============================================================================== +# ragged.rank +# =============================================================================== +@dispatch.dispatch_for_api(array_ops.rank) +def rank(input: ragged_tensor.Ragged, name=None): # pylint: disable=redefined-builtin + """Returns the rank of a RaggedTensor. + + Returns a 0-D `int32` `Tensor` representing the rank of `input`. + + #### Example: + + >>> # shape of tensor 't' is [2, None, None] + >>> t = tf.ragged.constant([[[1], [2, 2]], [[3, 3, 3], [4, 4, 4, 4]]]) + >>> tf.rank(t).numpy() + 3 + + Args: + input: A `RaggedTensor` + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `int32`. + """ + with ops.name_scope(name, 'RaggedRank', [input]) as name: + if not ragged_tensor.is_ragged(input): + return array_ops.rank(input, name) + + return input.ragged_rank + array_ops.rank(input.flat_values) + + +# =============================================================================== +# ragged.one_hot +# =============================================================================== +@dispatch.dispatch_for_api(array_ops.one_hot) +def ragged_one_hot(indices: ragged_tensor.Ragged, + depth, + on_value=None, + off_value=None, + axis=None, + dtype=None, + name=None): + """Applies tf.one_hot along the values of a RaggedTensor.""" + # Get the adjusted axis value for the call to array_ops.one_hot. + # Note: the only negative `axis` value supported by array_ops.one_hot is -1. + if isinstance(axis, int) and axis >= 0: + if axis <= indices.ragged_rank: + raise ValueError('axis (%d) must be greater than indices.ragged_rank ' + '(%d).' % (axis, indices.ragged_rank)) + axis -= indices.ragged_rank + + with ops.name_scope(name, 'RaggedOneHot', + [indices, depth, on_value, off_value, axis]): + indices = ragged_tensor.convert_to_tensor_or_ragged_tensor( + indices, name='indices') + return indices.with_flat_values( + array_ops.one_hot(indices.flat_values, depth, on_value, off_value, axis, + dtype, name)) + + +# =============================================================================== +# ragged.stack_dynamic_partitions +# =============================================================================== +@tf_export('ragged.stack_dynamic_partitions') +@dispatch.add_dispatch_support +def stack_dynamic_partitions(data, partitions, num_partitions, name=None): + """Stacks dynamic partitions of a Tensor or RaggedTensor. + + Returns a RaggedTensor `output` with `num_partitions` rows, where the row + `output[i]` is formed by stacking all slices `data[j1...jN]` such that + `partitions[j1...jN] = i`. Slices of `data` are stacked in row-major + order. + + If `num_partitions` is an `int` (not a `Tensor`), then this is equivalent to + `tf.ragged.stack(tf.dynamic_partition(data, partitions, num_partitions))`. + + #### Example: + + >>> data = ['a', 'b', 'c', 'd', 'e'] + >>> partitions = [ 3, 0, 2, 2, 3] + >>> num_partitions = 5 + >>> tf.ragged.stack_dynamic_partitions(data, partitions, num_partitions) + + + Args: + data: A `Tensor` or `RaggedTensor` containing the values to stack. + partitions: An `int32` or `int64` `Tensor` or `RaggedTensor` specifying the + partition that each slice of `data` should be added to. `partitions.shape` + must be a prefix of `data.shape`. Values must be greater than or equal to + zero, and less than `num_partitions`. `partitions` is not required to be + sorted. + num_partitions: An `int32` or `int64` scalar specifying the number of + partitions to output. This determines the number of rows in `output`. + name: A name prefix for the returned tensor (optional). + + Returns: + A `RaggedTensor` containing the stacked partitions. The returned tensor + has the same dtype as `data`, and its shape is + `[num_partitions, (D)] + data.shape[partitions.rank:]`, where `(D)` is a + ragged dimension whose length is the number of data slices stacked for + each `partition`. + """ + with ops.name_scope(name, 'SegmentStack', [data, partitions, num_partitions]): + # Convert inputs to tensors. + data = ragged_tensor.convert_to_tensor_or_ragged_tensor(data, name='data') + row_splits_dtype = ( + data.row_splits.dtype + if isinstance(data, ragged_tensor.RaggedTensor) else None) + partitions = ragged_tensor.convert_to_tensor_or_ragged_tensor( + partitions, name='partitions', preferred_dtype=row_splits_dtype) + num_partitions = ops.convert_to_tensor( + num_partitions, name='num_partitions', preferred_dtype=partitions.dtype) + if row_splits_dtype is not None: + partitions = math_ops.cast(partitions, row_splits_dtype) + num_partitions = math_ops.cast(num_partitions, partitions.dtype) + + # Sanity-checks for shapes. + partitions_rank = partitions.shape.ndims + if partitions_rank is None: + raise ValueError('partitions must have known rank.') + num_partitions.shape.assert_has_rank(0) + partitions.shape.assert_is_compatible_with(data.shape[:partitions_rank]) + + if partitions_rank == 0: + # If partitions is a scalar, then just create a RaggedTensor containing + # that single the complete `data` value in the specified row. + return ragged_tensor.RaggedTensor.from_value_rowids( + values=array_ops_stack.stack([data]), + value_rowids=array_ops_stack.stack([partitions]), + nrows=num_partitions, + validate=False) + + elif partitions_rank == 1: + # If partitions is a vector (the typical case): we can just use data and + # partitions as the `values` and `value_rowids` for `from_value_rowids`, + # as long as we sort them first. + permutation = sort_ops.argsort(partitions, stable=True) + value_rowids = array_ops.gather(partitions, permutation) + values = array_ops.gather(data, permutation) + checks = [ + check_ops.assert_less( + value_rowids[-1:], num_partitions, + message='partitions must be less than num_partitions'), + check_ops.assert_non_negative( + partitions, message='partitions must be non-negative.') + ] + with ops.control_dependencies(checks): + return ragged_tensor.RaggedTensor.from_value_rowids( + values, value_rowids, nrows=num_partitions, validate=False) + + else: + # Handle higher-dimensional partitions via recursion. + if not isinstance(data, ragged_tensor.RaggedTensor): + data = ragged_tensor.RaggedTensor.from_tensor( + data, row_splits_dtype=partitions.dtype, ragged_rank=1) + if not isinstance(partitions, ragged_tensor.RaggedTensor): + partitions = ragged_tensor.RaggedTensor.from_tensor( + partitions, + row_splits_dtype=partitions.dtype, + ragged_rank=max(data.ragged_rank, partitions_rank - 1)) + check = check_ops.assert_equal( + data.row_splits, + partitions.row_splits, + message='data and partitions have incompatible ragged shapes') + with ops.control_dependencies([check]): + return stack_dynamic_partitions(data.values, partitions.values, + num_partitions) + + +# =============================================================================== +# Reverse +# =============================================================================== +@dispatch.dispatch_for_api(array_ops.reverse) +def reverse(tensor: ragged_tensor.Ragged, axis, name=None): + """Reverses a RaggedTensor along the specified axes. + + #### Example: + + >>> data = tf.ragged.constant([ + ... [[1, 2], [3, 4]], [[5, 6]], [[7, 8], [9, 10], [11, 12]]]) + >>> tf.reverse(data, axis=[0, 2]) + + + Args: + tensor: A 'RaggedTensor' to reverse. + axis: A list or tuple of 'int' or a constant 1D 'tf.Tensor'. The indices of + the axes to reverse. + name: A name prefix for the returned tensor (optional). + + Returns: + A 'RaggedTensor'. + """ + type_error_msg = ('`axis` must be a list of int or a constant tensor' + 'when reversing axes in a ragged tensor') + + with ops.name_scope(name, 'Reverse', [tensor, axis]): + if isinstance(axis, tensor_lib.Tensor): + axis = tensor_util.constant_value(axis) + if axis is None: + raise TypeError(type_error_msg) + elif not (isinstance(axis, (list, tuple)) and + all(isinstance(dim, int) for dim in axis)): + raise TypeError(type_error_msg) + + tensor = ragged_tensor.convert_to_tensor_or_ragged_tensor( + tensor, name='tensor') + + # Allow usage of negative values to specify innermost axes. + axis = [ + array_ops.get_positive_axis(dim, tensor.shape.rank, 'axis[%d]' % i, + 'rank(tensor)') + for i, dim in enumerate(axis) + ] + + # We only need to slice up to the max axis. If the axis list + # is empty, it should be 0. + slices = [slice(None)] * (max(axis) + 1 if axis else 0) + + for dim in axis: + slices[dim] = slice(None, None, -1) + + return tensor[tuple(slices)] + + +# =============================================================================== +# Cross +# =============================================================================== + + +@tf_export('ragged.cross') +@dispatch.add_dispatch_support +def cross(inputs, name=None): + """Generates feature cross from a list of tensors. + + The input tensors must have `rank=2`, and must all have the same number of + rows. The result is a `RaggedTensor` with the same number of rows as the + inputs, where `result[row]` contains a list of all combinations of values + formed by taking a single value from each input's corresponding row + (`inputs[i][row]`). Values are combined by joining their strings with '_X_'. + E.g.: + + >>> tf.ragged.cross([tf.ragged.constant([['a'], ['b', 'c']]), + ... tf.ragged.constant([['d'], ['e']]), + ... tf.ragged.constant([['f'], ['g']])]) + + + Args: + inputs: A list of `RaggedTensor` or `Tensor` or `SparseTensor`. + name: Optional name for the op. + + Returns: + A 2D `RaggedTensor` of type `string`. + """ + return _cross_internal(inputs=inputs, hashed_output=False, name=name) + + +@tf_export('ragged.cross_hashed') +@dispatch.add_dispatch_support +def cross_hashed(inputs, num_buckets=0, hash_key=None, name=None): + """Generates hashed feature cross from a list of tensors. + + The input tensors must have `rank=2`, and must all have the same number of + rows. The result is a `RaggedTensor` with the same number of rows as the + inputs, where `result[row]` contains a list of all combinations of values + formed by taking a single value from each input's corresponding row + (`inputs[i][row]`). Values are combined by hashing together their + fingerprints. E.g.: + + >>> tf.ragged.cross_hashed([tf.ragged.constant([['a'], ['b', 'c']]), + ... tf.ragged.constant([['d'], ['e']]), + ... tf.ragged.constant([['f'], ['g']])], + ... num_buckets=100) + + + Args: + inputs: A list of `RaggedTensor` or `Tensor` or `SparseTensor`. + num_buckets: A non-negative `int` that used to bucket the hashed values. If + `num_buckets != 0`, then `output = hashed_value % num_buckets`. + hash_key: Integer hash_key that will be used by the `FingerprintCat64` + function. If not given, a default key is used. + name: Optional name for the op. + + Returns: + A 2D `RaggedTensor` of type `int64`. + """ + return _cross_internal( + inputs=inputs, + hashed_output=True, + num_buckets=num_buckets, + hash_key=hash_key, + name=name) + + +_DEFAULT_CROSS_HASH_KEY = 0xDECAFCAFFE + + +def _cross_internal(inputs, + hashed_output=False, + num_buckets=0, + hash_key=None, + name=None): + """Generates feature cross from a list of ragged and dense tensors.""" + if not isinstance(inputs, (tuple, list)): + raise TypeError('Inputs must be a list') + + if hash_key is None: + hash_key = _DEFAULT_CROSS_HASH_KEY + + ragged_inputs = [] + sparse_inputs = [] + dense_inputs = [] + input_order = [] + with ops.name_scope(name, 'RaggedCross', inputs): + for i, t in enumerate(inputs): + if sparse_tensor.is_sparse(t): + t = sparse_tensor.SparseTensor.from_value(t) + else: + t = ragged_tensor.convert_to_tensor_or_ragged_tensor(t) + if t.dtype.is_integer: + t = math_ops.cast(t, dtypes.int64) + elif t.dtype != dtypes.string: + raise ValueError('Unexpected dtype for inputs[%d]: %s' % (i, t.dtype)) + if isinstance(t, ragged_tensor.RaggedTensor): + if t.ragged_rank != 1: + raise ValueError('tf.ragged.cross only supports inputs with rank=2') + ragged_inputs.append(t) + input_order.append('R') + elif isinstance(t, sparse_tensor.SparseTensor): + sparse_inputs.append(t) + input_order.append('S') + else: + dense_inputs.append(t) + input_order.append('D') + + out_values_type = dtypes.int64 if hashed_output else dtypes.string + if ragged_inputs and all( + t.row_splits.dtype == dtypes.int32 for t in ragged_inputs): + out_row_splits_type = dtypes.int32 + else: + out_row_splits_type = dtypes.int64 + + # Convert hash_key from uint64 -> int64, since we need to pass it via + # an int64 attr. + if hash_key > 2**63: + hash_key -= 2**64 + + values_out, splits_out = gen_ragged_array_ops.ragged_cross( + ragged_values=[rt.values for rt in ragged_inputs], + ragged_row_splits=[rt.row_splits for rt in ragged_inputs], + sparse_indices=[st.indices for st in sparse_inputs], + sparse_values=[st.values for st in sparse_inputs], + sparse_shape=[st.dense_shape for st in sparse_inputs], + dense_inputs=dense_inputs, + input_order=''.join(input_order), + hashed_output=hashed_output, + num_buckets=num_buckets, + hash_key=hash_key, + out_values_type=out_values_type.as_datatype_enum, + out_row_splits_type=out_row_splits_type.as_datatype_enum, + name=name) + + return ragged_tensor.RaggedTensor.from_row_splits( + values_out, splits_out, validate=False) + + +def fill_empty_rows(ragged_input, default_value, name=None): + """Fills empty rows in the input `RaggedTensor` with rank 2 with a default + + value. + + This op adds entries with the specified `default_value` for any row in the + input that does not already have a value. + + The op also returns an indicator vector such that + + empty_row_indicator[i] = True iff row i was an empty row. + + Args: + ragged_input: A `RaggedTensor` with rank 2. + default_value: The value to fill for empty rows, with the same type as + `ragged_input.` + name: A name prefix for the returned tensors (optional) + + Returns: + ragged_ordered_output: A `RaggedTensor`with all empty rows filled in with + `default_value`. + empty_row_indicator: A bool vector indicating whether each input row was + empty. + + Raises: + TypeError: If `ragged_input` is not a `RaggedTensor`. + """ + with ops.name_scope(name, 'RaggedFillEmptyRows', [ragged_input]): + if not isinstance(ragged_input, ragged_tensor.RaggedTensor): + raise TypeError( + 'ragged_input must be RaggedTensor, got' + f' {type(ragged_input)}' + ) + default_value = ops.convert_to_tensor( + default_value, dtype=ragged_input.dtype + ) + ( + output_value_rowids, + output_values, + empty_row_indicator, + unused_reverse_index_map, + ) = gen_ragged_array_ops.ragged_fill_empty_rows( + value_rowids=ragged_input.value_rowids(), + values=ragged_input.values, + nrows=ragged_input.nrows(), + default_value=default_value, + ) + return ( + ragged_tensor.RaggedTensor.from_value_rowids( + values=output_values, + value_rowids=output_value_rowids, + validate=False, + ), + empty_row_indicator, + ) + + +@ops.RegisterGradient('RaggedFillEmptyRows') +def _ragged_fill_empty_rows_grad( + op, + unused_grad_output_indices, + output_grad_values, + unused_grad_empty_row_indicator, + unused_grad_reverse_index_map, +): + """Gradients for RaggedFillEmptyRows.""" + reverse_index_map = op.outputs[3] + + d_values, d_default_value = gen_ragged_array_ops.ragged_fill_empty_rows_grad( + reverse_index_map=reverse_index_map, grad_values=output_grad_values + ) + + # d_value_rowids, d_values, d_nrows, d_default_value. + return [None, d_values, None, d_default_value] + + +# =============================================================================== +# dynamic_partition +# =============================================================================== +@dispatch.dispatch_for_api(data_flow_ops.dynamic_partition) +def dynamic_partition(data: ragged_tensor.RaggedOrDense, + partitions: ragged_tensor.RaggedOrDense, + num_partitions, + name=None): + """RaggedTensor dispatch override for tf.dynamic_partition.""" + if not isinstance(num_partitions, int) or num_partitions < 0: + raise TypeError('num_partitions must be a non-negative integer') + result = stack_dynamic_partitions(data, partitions, num_partitions, name) + return [result[i] for i in range(num_partitions)] + + +# =============================================================================== +# split +# =============================================================================== +@dispatch.dispatch_for_api(array_ops.split) +def split(value: ragged_tensor.Ragged, + num_or_size_splits, + axis=0, + num=None, + name=None): + """Splits a RaggedTensor `value` into a list of sub RaggedTensors. + + If `num_or_size_splits` is an `int`, then it splits `value` along the + dimension `axis` into `num_or_size_splits` smaller RaggedTensors. This + requires that `value.shape[axis]` is divisible by `num_or_size_splits`. + + If `num_or_size_splits` is a 1-D Tensor (or list), then `value` is split into + `len(num_or_size_splits)` elements. The shape of the `i`-th element has the + same size as the `value` except along dimension `axis` where the size is + `num_or_size_splits[i]`. + + Splits along a ragged dimension is not allowed. + + For example: + + >>> rt = tf.RaggedTensor.from_row_lengths( + ... np.arange(6 * 3).reshape(6, 3), row_lengths=[1, 2, 2, 1]) + >>> rt.shape + TensorShape([4, None, 3]) + >>> + >>> rt1, rt2 = tf.split(rt, 2) # uniform splits + >>> rt1.shape + TensorShape([2, None, 3]) + >>> rt2.shape + TensorShape([2, None, 3]) + >>> + >>> rt3, rt4, rt5 = tf.split(rt, [1, 2, 1]) # ragged splits + >>> rt3.shape + TensorShape([1, None, 3]) + >>> rt4.shape + TensorShape([2, None, 3]) + >>> rt5.shape + TensorShape([1, None, 3]) + >>> + >>> rt6, rt7 = tf.split(rt, [1, 2], axis=2) # splits along axis 2 + >>> rt6.shape + TensorShape([4, None, 1]) + >>> rt7.shape + TensorShape([4, None, 2]) + + Args: + value: The `RaggedTensor` to split. + num_or_size_splits: Either an `int` indicating the number of splits + along `axis` or a 1-D integer `Tensor` or Python list containing the sizes + of each output tensor along `axis`. If a Python int, then it must evenly + divide `value.shape[axis]`; otherwise the sum of sizes along the split + axis must match that of the `value`. + axis: An `int` or scalar `int32` `Tensor`. The dimension along which + to split. Must be in the range `[-rank(value), rank(value))`. Defaults to + 0. + num: An `int` used to specify the number of outputs when + `num_or_size_splits` is a 1-D list or `Tensor` and its length is + statically unknown, e.g., specifying `tf.TensorSepc(None)` with + the `input_signature` argument of `tf.function` (optional). + name: A name for the operation (optional). + + Returns: + if `num_or_size_splits` is an `int` returns a list of `num_or_size_splits` + `RaggedTensor` objects; if `num_or_size_splits` is a 1-D Tensor returns + `num_or_size_splits.get_shape[0]` `RaggedTensor` objects resulting from + splitting `value`. + + Raises: + ValueError: If the dimension `axis` of `value` is a ragged dimension. + ValueError: If `num` is unspecified and cannot be inferred. + ValueError: If `num` is specified but doesn't match the length of + `num_or_size_splits`. + ValueError: If `num_or_size_splits` is an `int` and less than 1. + TypeError: If `num_or_size_splits` is not an `int` or 1-D + list or 1-D `Tensor`. + InvalidArgumentError: If the `axis` of `value` cannot be exactly splitted + by `num_or_size_splits`. + InvalidArgumentError: If `num_or_size_splits` is contains negative integers. + InvalidArgumentError: If `num_or_size_splits`'s static shape is unknown and + its dynamic shape is inconsistent `num`. + InvalidArgumentError: If `num_or_size_splits`'s static rank is unknown and + `axis` is a negative integer. + """ + with ops.name_scope(name, 'RaggedSplit'): + value = ragged_tensor.convert_to_tensor_or_ragged_tensor( + value, name='value') + if isinstance(num_or_size_splits, int) and num_or_size_splits == 1: + return [value] + + # static assert + check_ops.assert_integer_v2( + num_or_size_splits, + message=('`num_or_size_splits` must be an `int` or 1-D list or ' + '`Tensor` of integers.')) + value_shape = dynamic_ragged_shape.DynamicRaggedShape.from_tensor(value) + axis = array_ops.get_positive_axis(axis, value_shape.rank) + try: + dim_size = value_shape[axis] + except ValueError: + raise ValueError('Cannot split a ragged dimension. Got `value` with ' + f'shape {value_shape} and `axis` {axis}.') + if isinstance(num_or_size_splits, int): + # Uniform split + num_splits = num_or_size_splits + if num_splits < 1: + raise ValueError('`num_or_size_splits` must be >=1 if it is an `int`.' + f'Received {num_or_size_splits}.') + split_length = math_ops.floordiv(dim_size, num_splits) + split_lengths = array_ops.repeat(split_length, num_splits) + else: + # Ragged split + num_splits = None + split_lengths = ops.convert_to_tensor(num_or_size_splits) + if split_lengths.shape.ndims is not None: + if split_lengths.shape.ndims != 1: + raise TypeError('`num_or_size_splits` must be an `int` or 1-D list ' + f'or `Tensor`. Received {num_or_size_splits}.') + num_splits = tensor_shape.dimension_value(split_lengths.shape[0]) + + if num_splits is None: + if num is None: + raise ValueError('`num` must be specified as an `int` when the ' + 'size of `num_or_size_split` is statically ' + f'unknown. Received `num`: {num} and ' + f'`num_or_size_split`: {num_or_size_splits}.') + num_splits = num + else: + if num is not None and num != num_splits: + raise ValueError('`num` does not match the size of ' + f'`num_or_size_split`. Received `num`: {num} and ' + f'size of `num_or_size_split`: {num_splits}.') + + splits = array_ops.concat([[0], math_ops.cumsum(split_lengths)], axis=0) + checks = [] + checks.append( + check_ops.assert_non_negative_v2( + num_or_size_splits, + message='`num_or_size_splits` must be non-negative.')) + checks.append( + check_ops.assert_equal_v2( + num_splits, + array_ops.shape(split_lengths)[0], + message='`num` is inconsistent with `num_or_size_split.shape[0]`.')) + checks.append( + check_ops.assert_equal_v2( + math_ops.cast(dim_size, splits.dtype), + splits[-1], + message=('Cannot exactly split the `axis` dimension of `value` ' + 'with the given `num_or_size_split`.'))) + splits = control_flow_ops.with_dependencies(checks, splits) + splited_rts = [] + slices = [slice(None)] * (axis + 1) + for i in range(num_splits): + slices[-1] = slice(splits[i], splits[i + 1]) + splited_rts.append(value[tuple(slices)]) + return splited_rts + + +# =============================================================================== +# RaggedTensor shape operations +# =============================================================================== + + +@dispatch.dispatch_for_api(array_ops.reshape) +def ragged_reshape( + tensor: ragged_tensor.RaggedOrDense, + shape: dynamic_ragged_shape.DenseOrRaggedShape +) -> Union[ragged_tensor.RaggedTensor, tensor_lib.Tensor]: + """Reshapes a tensor or ragged tensor.""" + tensor = ragged_tensor.convert_to_tensor_or_ragged_tensor( + tensor, name='tensor') + if isinstance(tensor, ragged_tensor.RaggedTensor): + tensor = tensor.values + + if isinstance(shape, dynamic_ragged_shape.DynamicRaggedShape): + flat_values = array_ops.reshape(tensor, shape.inner_shape) + return ragged_tensor.RaggedTensor._from_nested_row_partitions( # pylint: disable=protected-access + flat_values, + shape.row_partitions, + validate=False) + else: + shape = ops.convert_to_tensor(shape, name='shape') + return array_ops.reshape(tensor, shape) + + +@dispatch.dispatch_for_api(array_ops.broadcast_to) +def broadcast_to( + input: ragged_tensor.RaggedOrDense, # pylint: disable=redefined-builtin + shape: dynamic_ragged_shape.DynamicRaggedShape +) -> Union[ragged_tensor.RaggedTensor, tensor_lib.Tensor]: + """Broadcasts a potentially ragged tensor to a ragged shape. + + Tiles `input` as necessary to match the given shape. + + Behavior is undefined if `input` is not broadcast-compatible with `shape`. + + Args: + input: The potentially ragged tensor to broadcast. + shape: A `DynamicRaggedShape` + + Returns: + A potentially ragged tensor whose values are taken from + `input`, and whose shape matches `shape`. + """ + return dynamic_ragged_shape.broadcast_to(input, shape) + + +# Note: default value for out_type needs to be int32, to match the +# default for tf.shape's out_type parameter. +@dispatch.dispatch_for_api(array_ops.shape) +def ragged_shape( + input: ragged_tensor.Ragged, # pylint: disable=redefined-builtin + name: Optional[str] = None, + out_type=dtypes.int32) -> dynamic_ragged_shape.DynamicRaggedShape: + """Returns the shape of a RaggedTensor. + + Args: + input: A `RaggedTensor` + name: A name for the operation (optional). + out_type: dtype used to encode the shape. + + Returns: + A `tf.experimental.DynamicRaggedShape` + """ + with ops.name_scope(name, 'RaggedShape', [input]): + return dynamic_ragged_shape.DynamicRaggedShape.from_tensor(input, out_type) + + +@dispatch.dispatch_for_api(array_ops.broadcast_dynamic_shape) +def broadcast_dynamic_shape( + shape_x: dynamic_ragged_shape.DenseOrRaggedShape, + shape_y: dynamic_ragged_shape.DenseOrRaggedShape +) -> dynamic_ragged_shape.DynamicRaggedShape: + """Returns the shape formed by broadcasting two shapes to be compatible. + + 1. If shape_x and shape_y both have row_partitions, then fail if their dtypes + don't match. + 2. If neither has row_partitions and they have different dtypes, + go with int64. + 3. If one has row_partitions, go with that dtype. + + Args: + shape_x: A `DynamicRaggedShape` + shape_y: A `DynamicRaggedShape` + + Returns: + A `DynamicRaggedShape`. + Raises: + ValueError: If `shape_x` and `shape_y` are not broadcast-compatible. + """ + if not isinstance(shape_x, dynamic_ragged_shape.DynamicRaggedShape): + shape_x = dynamic_ragged_shape.DynamicRaggedShape([], shape_x) + if not isinstance(shape_y, dynamic_ragged_shape.DynamicRaggedShape): + shape_y = dynamic_ragged_shape.DynamicRaggedShape([], shape_y) + return dynamic_ragged_shape.broadcast_dynamic_shape(shape_x, shape_y) + + +@dispatch.dispatch_for_api(array_ops.ones) +def ones( + shape: dynamic_ragged_shape.DynamicRaggedShape, + dtype=dtypes.float32, + name=None, + layout=None, +) -> ragged_tensor.RaggedOrDense: + """Returns ones shaped like x.""" + if layout is not None and not layout.is_fully_replicated(): + raise ValueError( + f'RaggedTensor only allows replicated layout. got {layout}' + ) + flat_values = array_ops.ones( + shape.inner_shape, dtype=dtype, name=name, layout=layout + ) + return shape._add_row_partitions(flat_values) # pylint: disable=protected-access + + +@dispatch.dispatch_for_api(array_ops.zeros) +def zeros( + shape: dynamic_ragged_shape.DynamicRaggedShape, + dtype=dtypes.float32, + name=None, + layout=None, +) -> ragged_tensor.RaggedOrDense: + """Returns ones shaped like x.""" + if layout is not None and not layout.is_fully_replicated(): + raise ValueError( + f'RaggedTensor only allows replicated layout. got {layout}' + ) + flat_values = array_ops.zeros( + shape.inner_shape, dtype=dtype, name=name, layout=layout + ) + return shape._add_row_partitions(flat_values) # pylint: disable=protected-access + + +@dispatch.dispatch_for_api(array_ops.fill) +def fill( + dims: dynamic_ragged_shape.DynamicRaggedShape, + value: core_types.TensorLike, + name: Optional[str] = None, + layout=None, +) -> ragged_tensor.RaggedOrDense: + """Creates a tensor with shape `dims` and fills it with `value`.""" + if layout is not None and not layout.is_fully_replicated(): + raise ValueError( + f'RaggedTensor only allows replicated layout. got {layout}' + ) + flat_values = array_ops.fill( + dims.inner_shape, value, name=name, layout=layout + ) + return dims._add_row_partitions(flat_values) # pylint: disable=protected-access + + +# =============================================================================== +# bitcast +# =============================================================================== +@dispatch.dispatch_for_api(array_ops.bitcast) +def bitcast( + input: ragged_tensor.RaggedOrDense, # pylint: disable=redefined-builtin + type, # pylint: disable=redefined-builtin + name=None) -> ragged_tensor.RaggedOrDense: + """RaggedTensor dispatch override for tf.bitcast.""" + type = dtypes.as_dtype(type) + with ops.name_scope(name, 'Bitcast', [input]): + input = ragged_tensor.convert_to_tensor_or_ragged_tensor( + input, name='input') + if (input.dtype.size < type.size and input.flat_values.shape.rank < 2): + raise ValueError('`input.flat_values` is required to have rank >= 2 when ' + 'input.dtype.size < type.size. Actual rank: ' + f'{input.flat_values.shape.rank}') + return input.with_flat_values(array_ops.bitcast(input.flat_values, type)) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_autograph.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_autograph.py new file mode 100644 index 0000000000000000000000000000000000000000..9e366d274069129adfe9b6b52a2eba522f4be237 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_autograph.py @@ -0,0 +1,73 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Autograph-specific overrides for ragged_tensor.""" +from tensorflow.python.autograph.operators import control_flow +from tensorflow.python.ops import cond as tf_cond +from tensorflow.python.ops.ragged import ragged_tensor + + +def _tf_ragged_for_stmt( + iter_, extra_test, body, get_state, set_state, symbol_names, opts +): + """Overload of for_stmt that iterates over TF ragged tensors.""" + init_vars = get_state() + control_flow.verify_loop_init_vars(init_vars, symbol_names) + + # TODO(mdan): Move this into len()? Requires eager support. + if iter_.shape and iter_.shape[0] is not None: + n = iter_.shape[0] + else: + n = iter_.row_lengths()[0] + + iterate_index = 0 + + def aug_get_state(): + return (iterate_index,) + get_state() + + def aug_set_state(aug_loop_vars): + nonlocal iterate_index + # TODO(b/171479293): Drop the lint override. + iterate_index, *loop_vars = aug_loop_vars # pylint:disable=unused-variable + # The iteration index is not "output" by the for loop. If the iteration + # index is used outside the loop, it will appear + # in the loop vars separately. + set_state(loop_vars) + + def aug_body(): + nonlocal iterate_index + body(iter_[iterate_index]) + iterate_index += 1 + + def aug_test(): + main_test = iterate_index < n + if extra_test is not None: + return tf_cond.cond(main_test, extra_test, lambda: False) + return main_test + + control_flow._add_max_iterations_hint(opts, n) # pylint: disable=protected-access + + control_flow._tf_while_stmt( # pylint: disable=protected-access + aug_test, + aug_body, + aug_get_state, + aug_set_state, + ('',) + symbol_names, + opts, + ) + + +control_flow.for_loop_registry.register( + ragged_tensor.RaggedTensor, _tf_ragged_for_stmt +) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_batch_gather_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_batch_gather_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..c9482bc3cc02240fb0b86cb4865044c4181ab7f5 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_batch_gather_ops.py @@ -0,0 +1,60 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Batch gather operations for RaggedTensors.""" + +from tensorflow.python.ops import array_ops +from tensorflow.python.ops.ragged import ragged_gather_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.util import dispatch + + +#=============================================================================== +# ragged.batch_gather +#=============================================================================== +@dispatch.dispatch_for_api(array_ops.batch_gather) +def batch_gather(params: ragged_tensor.RaggedOrDense, + indices: ragged_tensor.RaggedOrDense, + name=None): + """Gathers slices from `params` according to `indices` with batch dims. + + This operation is similar to `gather`, but it assumes that the leading `N` + dimensions of `indices` and `params` are batch dimensions, and performs a + gather within each batch. In particular, when using this operation with `N` + batch dimensions `B1...BN`: + + * `indices` has shape `[B1...BN, I]` + * `params` has shape `[B1...BN, P1...PM]`. + * `result` has shape `[B1...BN, I, P2...PM]`. + * `result[b1...bN, i, p2...pM] = + params[b1...bN, indices[b1...bN, i], p2...pM]` + + Args: + params: A potentially ragged tensor with shape `[B1...BN, P1...PM]` (`N>=0`, + `M>0`). + indices: A potentially ragged tensor with shape `[B1...BN, I]` (`N>=0`). + name: A name for the operation (optional). + + Returns: + A potentially ragged tensor with shape `[B1...BN, I, P2...PM]`. + `result.ragged_rank = max(indices.ragged_rank, params.ragged_rank)`. + + #### Example: + + >>> params = tf.ragged.constant([['a', 'b', 'c'], ['d'], [], ['e']]) + >>> indices = tf.ragged.constant([[1, 2, 0], [], [], [0, 0]]) + >>> tf.compat.v1.batch_gather(params, indices) + + """ + return ragged_gather_ops.gather(params, indices, batch_dims=-1, name=name) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_batch_gather_with_default_op.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_batch_gather_with_default_op.py new file mode 100644 index 0000000000000000000000000000000000000000..2c8cfdc583f6801550ba501f4316a052855a0173 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_batch_gather_with_default_op.py @@ -0,0 +1,179 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Array operations for RaggedTensors.""" + + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.ragged import ragged_array_ops +from tensorflow.python.ops.ragged import ragged_dispatch # pylint: disable=unused-import +from tensorflow.python.ops.ragged import ragged_operators # pylint: disable=unused-import +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.ops.ragged import ragged_tensor_shape +from tensorflow.python.ops.ragged import ragged_where_op + + +#=============================================================================== +# ragged.batch_gather_with_default +#=============================================================================== +def batch_gather_with_default(params, + indices, + default_value='', + name=None): + """Same as `batch_gather` but inserts `default_value` for invalid indices. + + This operation is similar to `batch_gather` except that it will substitute + the value for invalid indices with `default_value` as the contents. + See `batch_gather` for more details. + + + Args: + params: A potentially ragged tensor with shape `[B1...BN, P1...PM]` (`N>=0`, + `M>0`). + indices: A potentially ragged tensor with shape `[B1...BN, I]` (`N>=0`). + default_value: A value to be inserted in places where `indices` are out of + bounds. Must be the same dtype as params and either a scalar or rank 1. + name: A name for the operation (optional). + + Returns: + A potentially ragged tensor with shape `[B1...BN, I, P2...PM]`. + `result.ragged_rank = max(indices.ragged_rank, params.ragged_rank)`. + + #### Example: + + >>> params = tf.ragged.constant([['a', 'b', 'c'], ['d'], [], ['e']]) + >>> indices = tf.ragged.constant([[1, 2, -1], [], [], [0, 10]]) + >>> batch_gather_with_default(params, indices, 'FOO') + + + """ + with ops.name_scope(name, 'RaggedBatchGatherWithDefault'): + params = ragged_tensor.convert_to_tensor_or_ragged_tensor( + params, name='params', + ) + indices = ragged_tensor.convert_to_tensor_or_ragged_tensor( + indices, name='indices', + ) + default_value = ragged_tensor.convert_to_tensor_or_ragged_tensor( + default_value, name='default_value', + ) + row_splits_dtype, (params, indices, default_value) = ( + ragged_tensor.match_row_splits_dtypes(params, indices, default_value, + return_dtype=True)) + # TODO(hterry): lift this restriction and support default_values of + # of rank > 1 + if default_value.shape.ndims not in (0, 1): + raise ValueError('"default_value" must be a scalar or vector') + upper_bounds = None + if indices.shape.ndims is None: + raise ValueError('Indices must have a known rank.') + if params.shape.ndims is None: + raise ValueError('Params must have a known rank.') + + num_batch_dimensions = indices.shape.ndims - 1 + pad = None + # The logic for this works as follows: + # - create a padded params, where: + # padded_params[b1...bn, 0] = default_value + # padded_params[b1...bn, i] = params[b1...bn, i-1] (i>0) + # - create an `upper_bounds` Tensor that contains the number of elements + # in each innermost rank. Broadcast `upper_bounds` to be the same shape + # as `indices`. + # - check to see which index in `indices` are out of bounds and substitute + # it with the index containing `default_value` (the first). + # - call batch_gather with the indices adjusted. + with ops.control_dependencies([ + check_ops.assert_greater_equal(array_ops.rank(params), + array_ops.rank(indices))]): + if ragged_tensor.is_ragged(params): + row_lengths = ragged_array_ops.expand_dims( + params.row_lengths(axis=num_batch_dimensions), + axis=-1) + upper_bounds = math_ops.cast(row_lengths, indices.dtype) + + pad_shape = _get_pad_shape(params, indices, row_splits_dtype) + + pad = ragged_tensor_shape.broadcast_to( + default_value, pad_shape) + else: + params_shape = array_ops.shape(params) + pad_shape = array_ops.concat([ + params_shape[:num_batch_dimensions], + [1], + params_shape[num_batch_dimensions + 1:params.shape.ndims] + ], 0) + upper_bounds = params_shape[num_batch_dimensions] + pad = array_ops.broadcast_to(default_value, pad_shape) + + # Add `default_value` as the first value in the innermost (ragged) rank. + pad = math_ops.cast(pad, params.dtype) + padded_params = array_ops.concat( + [pad, params], axis=num_batch_dimensions) + + # Adjust the indices by substituting out-of-bound indices to the + # default-value index (which is the first element) + shifted_indices = indices + 1 + is_out_of_bounds = (indices < 0) | (indices > upper_bounds) + adjusted_indices = ragged_where_op.where( + is_out_of_bounds, + x=array_ops.zeros_like(indices), y=shifted_indices, + ) + return array_ops.batch_gather( + params=padded_params, indices=adjusted_indices, name=name) + + +def _get_pad_shape(params, indices, row_splits_dtype): + """Gets the RaggedTensorDynamicShape for the pad tensor.""" + num_batch_dimensions = indices.shape.ndims - 1 + params_shape = ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor( + params, dim_size_dtype=row_splits_dtype) + + # We want to create a pad tensor that can be concatenated with the params. + if params.shape.ndims == indices.shape.ndims: + # When params and indices are the same rank, the shape of the pad tensor is + # almost identical to params, except the last dimension which has size = 1. + if params_shape.num_inner_dimensions == 0: + pad_dims = params_shape.partitioned_dim_sizes[:-1] + ( + array_ops.ones_like(params_shape.partitioned_dim_sizes[-1]),) + return ragged_tensor_shape.RaggedTensorDynamicShape( + pad_dims, []) + else: + return ragged_tensor_shape.RaggedTensorDynamicShape( + params_shape.partitioned_dim_sizes, + array_ops.concat([params_shape.inner_dim_sizes[:-1], [1]], axis=0)) + else: + # When the rank of indices < params, the pad has the same dimension as + # params up to the 'num_batch_dimensions' rank. Every dimension after that + # has size 1. + pad_dims = None + if num_batch_dimensions == 0: + pad_dims = (constant_op.constant(1, dtype=row_splits_dtype),) + ( + constant_op.constant([1], dtype=row_splits_dtype),) * ( + params_shape.num_partitioned_dimensions - + num_batch_dimensions - 1) + else: + batch_dimensions = params_shape.partitioned_dim_sizes[ + :num_batch_dimensions] + gather_dimension = params_shape.partitioned_dim_sizes[ + num_batch_dimensions] + pad_dims = batch_dimensions + ( + array_ops.ones_like(gather_dimension),) * ( + params_shape.num_partitioned_dimensions - num_batch_dimensions) + + return ragged_tensor_shape.RaggedTensorDynamicShape( + pad_dims, params_shape.inner_dim_sizes) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_bincount_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_bincount_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..7cae73ba66db8c3cabd2a52b4533a98173e1c9eb --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_bincount_ops.py @@ -0,0 +1,405 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# maxlengthations under the License. +# ============================================================================== +"""bincount ops for RaggedTensors.""" + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import sparse_tensor +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import bincount_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import gen_count_ops +from tensorflow.python.ops import gen_math_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import sparse_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.util import dispatch + + +@dispatch.dispatch_for_api(bincount_ops.bincount) +def bincount(arr: ragged_tensor.RaggedTensor, + weights=None, + minlength=None, + maxlength=None, + dtype=dtypes.int32, + name=None, + axis=None, + binary_output=False): + """Counts the number of occurrences of each value in an integer array. + + If `minlength` and `maxlength` are not given, returns a vector with length + `tf.reduce_max(arr) + 1` if `arr` is non-empty, and length 0 otherwise. + + >>> data = tf.ragged.constant([[1, 1], [2, 3, 2, 4, 4, 5]]) + >>> tf.math.bincount(data) + + + Vector length = Maximum element in vector `values` is 5. Adding 1, which is 6 + will be the vector length. + + Each bin value in the output indicates number of occurrences of the particular + index. Here, index 1 in output has a value 2. This indicates value 1 occurs + two times in `values`. + + **Bin-counting with weights** + + >>> data = tf.ragged.constant([[1, 1], [2, 3, 2, 4, 4, 5]]) + >>> weights = tf.ragged.constant([[1, 5], [0, 1, 0, 5, 4, 5]]) + >>> tf.math.bincount(data, weights=weights) + + + When `weights` is specified, bins will be incremented by the corresponding + weight instead of 1. Here, index 1 in output has a value 6. This is the + summation of `weights` corresponding to the value in `arr` (i.e. for index + 1, the first two values `arr` are 1 so the first two weights, 1 and 5, are + summed). + + There is an equivilance between bin-counting with weights and + `unsorted_segement_sum` where `data` is the weights and `segment_ids` are the + values. + + >>> data = tf.ragged.constant([[1, 1], [2, 3, 2, 4, 4, 5]]) + >>> weights = tf.ragged.constant([[1, 5], [0, 1, 0, 5, 4, 5]]) + >>> tf.math.unsorted_segment_sum(weights, data, num_segments=6).numpy() + array([0, 6, 0, 1, 9, 5], dtype=int32) + + On GPU, `bincount` with weights is only supported when XLA is enabled + (typically when a function decorated with `@tf.function(jit_compile=True)`). + `unsorted_segment_sum` can be used as a workaround for the non-XLA case on + GPU. + + **Bin-counting matrix rows independently** + + This example uses `axis=-1` with a 2 dimensional input and returns a + `Tensor` with bincounting where axis 0 is **not** flattened, i.e. an + independent bincount for each matrix row. + + >>> data = tf.ragged.constant([[1, 2], [3, 0, 0, 0, 1, 2]], dtype=np.int32) + >>> tf.math.bincount(data, axis=-1) + + + **Bin-counting with binary_output** + + This example gives binary output instead of counting the occurrence. + + >>> data = tf.ragged.constant([[1, 2], [3, 0, 0, 0, 1, 2]], dtype=np.int32) + >>> tf.math.bincount(data, axis=-1, binary_output=True) + + + Args: + arr: A RaggedTensor whose values should be counted. + These tensors must have a rank of 2 if `axis=-1`. + weights: If non-None, must be a RaggedTensor with the same row splits as + `arr`. For each value in `arr`, the bin will be incremented by the + corresponding weight instead of 1. If non-None, `binary_output` must be + False. + minlength: If given, ensures the output has length at least `minlength`, + padding with zeros at the end if necessary. + maxlength: If given, skips values in `arr` that are equal or greater than + `maxlength`, ensuring that the output has length at most `maxlength`. + dtype: If `weights` is None, determines the type of the output bins. + name: A name scope for the associated operations (optional). + axis: The axis to slice over. Axes at and below `axis` will be flattened + before bin counting. Currently, only `0`, and `-1` are supported. If None, + all axes will be flattened (identical to passing `0`). + binary_output: If True, this op will output 1 instead of the number of times + a token appears (equivalent to one_hot + reduce_any instead of one_hot + + reduce_add). Defaults to False. + + Returns: + A vector with the same dtype as `weights` or the given `dtype` containing + the bincount values. + + Raises: + `InvalidArgumentError` if negative values are provided as an input. + + """ + name = "bincount" if name is None else name + with ops.name_scope(name): + arr = ragged_tensor.convert_to_tensor_or_ragged_tensor(arr, name="arr") + if weights is not None: + if not isinstance(weights, sparse_tensor.SparseTensor): + weights = ragged_tensor.convert_to_tensor_or_ragged_tensor( + weights, name="weights") + + if weights is not None and binary_output: + raise ValueError("Arguments `binary_output` and `weights` are mutually " + "exclusive. Please specify only one.") + + if not arr.dtype.is_integer: + arr = math_ops.cast(arr, dtypes.int32) + if axis is None: + axis = 0 + + if axis not in [0, -1]: + raise ValueError(f"Unsupported value for argument axis={axis}. Only 0 and" + " -1 are currently supported.") + + array_is_nonempty = array_ops.size(arr) > 0 + output_size = math_ops.cast(array_is_nonempty, arr.dtype) * ( + math_ops.reduce_max(arr) + 1) + if minlength is not None: + minlength = ops.convert_to_tensor( + minlength, name="minlength", dtype=arr.dtype) + output_size = gen_math_ops.maximum(minlength, output_size) + if maxlength is not None: + maxlength = ops.convert_to_tensor( + maxlength, name="maxlength", dtype=arr.dtype) + output_size = gen_math_ops.minimum(maxlength, output_size) + + if axis == 0: + # Flatten RaggedTensors with multiple ragged dimensions which use a + # nested RaggedTensor for the values tensor. + while isinstance(arr, ragged_tensor.RaggedTensor): + if weights is not None: + weights = validate_ragged_weights(arr, weights, dtype) + arr = arr.values + + if isinstance(arr, ragged_tensor.RaggedTensor): + weights = validate_ragged_weights(arr, weights, dtype) + return gen_math_ops.ragged_bincount( + splits=arr.row_splits, + values=arr.values, + size=output_size, + weights=weights, + binary_output=binary_output) + else: + weights = bincount_ops.validate_dense_weights(arr, weights, dtype) + return gen_math_ops.dense_bincount( + input=arr, + size=output_size, + weights=weights, + binary_output=binary_output) + + +@dispatch.dispatch_for_api(sparse_ops.sparse_bincount) +def sparse_bincount(values: ragged_tensor.RaggedTensor, + weights=None, + axis=0, + minlength=None, + maxlength=None, + binary_output=False, + name=None): + """Count the number of times an integer value appears in a tensor. + + This op takes an N-dimensional `Tensor`, `RaggedTensor`, or `SparseTensor`, + and returns an N-dimensional int64 SparseTensor where element + `[i0...i[axis], j]` contains the number of times the value `j` appears in + slice `[i0...i[axis], :]` of the input tensor. Currently, only N=0 and + N=-1 are supported. + + Args: + values: A RaggedTensor whose values should be + counted. These tensors must have a rank of 2 if `axis=-1`. + weights: If non-None, must be a RaggedTensor with the same row splits as + `values`. For each value in `value`, the bin will be incremented by the + corresponding weight instead of 1. + axis: The axis to slice over. Axes at and below `axis` will be flattened + before bin counting. Currently, only `0`, and `-1` are supported. If None, + all axes will be flattened (identical to passing `0`). + minlength: If given, ensures the output has length at least `minlength`, + padding with zeros at the end if necessary. + maxlength: If given, skips values in `values` that are equal or greater than + `maxlength`, ensuring that the output has length at most `maxlength`. + binary_output: If True, this op will output 1 instead of the number of times + a token appears (equivalent to one_hot + reduce_any instead of one_hot + + reduce_add). Defaults to False. + name: A name for this op. + + Returns: + A SparseTensor with `output.shape = values.shape[:axis] + [N]`, where `N` is + * `maxlength` (if set); + * `minlength` (if set, and `minlength > reduce_max(values)`); + * `0` (if `values` is empty); + * `reduce_max(values) + 1` otherwise. + + Raises: + `InvalidArgumentError` if negative values are provided as an input. + + Examples: + + **Bin-counting every item in individual batches** + + This example takes an input (which could be a Tensor, RaggedTensor, or + SparseTensor) and returns a SparseTensor where the value of (i,j) is the + number of times value j appears in batch i. + + >>> data = tf.ragged.constant( + ... [[10, 20], [30, 20, 11, 101, 11, 10001]], dtype=np.int64) + >>> tf.sparse.bincount(data, axis=-1) + SparseTensor(indices=tf.Tensor( + [[ 0 10] + [ 0 20] + [ 1 11] + [ 1 20] + [ 1 30] + [ 1 101] + [ 1 10001]], shape=(7, 2), dtype=int64), + values=tf.Tensor([1 1 2 1 1 1 1], shape=(7,), dtype=int64), + dense_shape=tf.Tensor([ 2 10002], shape=(2,), dtype=int64)) + + **Bin-counting with defined output shape** + + This example takes an input (which could be a Tensor, RaggedTensor, or + SparseTensor) and returns a SparseTensor where the value of (i,j) is the + number of times value j appears in batch i. However, all values of j + above 'maxlength' are ignored. The dense_shape of the output sparse tensor + is set to 'minlength'. Note that, while the input is identical to the + example above, the value '10001' in batch item 2 is dropped, and the + dense shape is [2, 500] instead of [2,10002] or [2, 102]. + + >>> minlength = maxlength = 500 + >>> data = tf.ragged.constant( + ... [[10, 20], [30, 20, 11, 101, 11, 10001]], dtype=np.int64) + >>> tf.sparse.bincount( + ... data, axis=-1, minlength=minlength, maxlength=maxlength) + SparseTensor(indices=tf.Tensor( + [[ 0 10] + [ 0 20] + [ 1 11] + [ 1 20] + [ 1 30] + [ 1 101]], shape=(6, 2), dtype=int64), + values=tf.Tensor([1 1 2 1 1 1], shape=(6,), dtype=int64), + dense_shape=tf.Tensor([ 2 500], shape=(2,), dtype=int64)) + + **Binary bin-counting** + + This example takes an input (which could be a Tensor, RaggedTensor, or + SparseTensor) and returns a SparseTensor where (i,j) is 1 if the value j + appears in batch i at least once and is 0 otherwise. Note that, even though + some values (like 20 in batch 1 and 11 in batch 2) appear more than once, + the 'values' tensor is all 1s. + + >>> data = tf.ragged.constant( + ... [[10, 20], [30, 20, 11, 101, 11, 10001]], dtype=np.int64) + >>> tf.sparse.bincount(data, binary_output=True, axis=-1) + SparseTensor(indices=tf.Tensor( + [[ 0 10] + [ 0 20] + [ 1 11] + [ 1 20] + [ 1 30] + [ 1 101] + [ 1 10001]], shape=(7, 2), dtype=int64), + values=tf.Tensor([1 1 1 1 1 1 1], shape=(7,), dtype=int64), + dense_shape=tf.Tensor([ 2 10002], shape=(2,), dtype=int64)) + + **Weighted bin-counting** + + This example takes two inputs - a values tensor and a weights tensor. These + tensors must be identically shaped, and have the same row splits or indices + in the case of RaggedTensors or SparseTensors. When performing a weighted + count, the op will output a SparseTensor where the value of (i, j) is the + sum of the values in the weight tensor's batch i in the locations where + the values tensor has the value j. In this case, the output dtype is the + same as the dtype of the weights tensor. + + >>> data = tf.ragged.constant( + ... [[10, 20], [30, 20, 11, 101, 11, 10001]], dtype=np.int64) + >>> weights = tf.ragged.constant( + ... [[2, 0.25], [15, 0.5, 2, 17, 3, 0.9]]) + >>> tf.sparse.bincount(data, weights=weights, axis=-1) + SparseTensor(indices=tf.Tensor( + [[ 0 10] + [ 0 20] + [ 1 11] + [ 1 20] + [ 1 30] + [ 1 101] + [ 1 10001]], shape=(7, 2), dtype=int64), + values=tf.Tensor([ 2. 0.25 5. 0.5 15. 17. 0.9 ], shape=(7,), dtype=float32), + dense_shape=tf.Tensor([ 2 10002], shape=(2,), dtype=int64)) + + """ + with ops.name_scope(name, "count", [values, weights]): + values = ragged_tensor.convert_to_tensor_or_ragged_tensor( + values, name="values") + if weights is not None: + if not isinstance(weights, sparse_tensor.SparseTensor): + weights = ragged_tensor.convert_to_tensor_or_ragged_tensor( + weights, name="weights") + + if weights is not None and binary_output: + raise ValueError("Arguments `binary_output` and `weights` are mutually " + "exclusive. Please specify only one.") + + if axis is None: + axis = 0 + + if axis not in [0, -1]: + raise ValueError(f"Unsupported value for argument axis={axis}. Only 0 and" + " -1 are currently supported.") + + minlength_value = minlength if minlength is not None else -1 + maxlength_value = maxlength if maxlength is not None else -1 + + if axis == 0: + if weights is not None: + weights = validate_ragged_weights(values, weights) + values = values.values + + if isinstance(values, ragged_tensor.RaggedTensor): + weights = validate_ragged_weights(values, weights) + c_ind, c_val, c_shape = gen_count_ops.ragged_count_sparse_output( + values.row_splits, + values.values, + weights, + minlength=minlength_value, + maxlength=maxlength_value, + binary_output=binary_output) + else: + weights = bincount_ops.validate_dense_weights(values, weights) + c_ind, c_val, c_shape = gen_count_ops.dense_count_sparse_output( + values, + weights=weights, + minlength=minlength_value, + maxlength=maxlength_value, + binary_output=binary_output) + + return sparse_tensor.SparseTensor(c_ind, c_val, c_shape) + + +def validate_ragged_weights(values, weights, dtype=None): + """Validates the passed weight tensor or creates an empty one.""" + if weights is None: + if dtype: + return array_ops.constant([], dtype=dtype) + return array_ops.constant([], dtype=values.values.dtype) + + if not isinstance(weights, ragged_tensor.RaggedTensor): + raise ValueError( + "`weights` must be a RaggedTensor if `values` is a RaggedTensor. " + f"Received argument weights={weights} of type: " + f"{type(weights).__name__}.") + + checks = [] + if weights.row_splits is not values.row_splits: + checks.append( + check_ops.assert_equal( + weights.row_splits, + values.row_splits, + message="'weights' and 'values' must have the same row splits.")) + if checks: + with ops.control_dependencies(checks): + weights = array_ops.identity(weights.values) + else: + weights = weights.values + + return weights diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_check_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_check_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..6f8b96abc618fcd21ed789750180201e19c1c329 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_check_ops.py @@ -0,0 +1,27 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Asserts and Boolean Checks for RaggedTensors.""" + +from tensorflow.python.ops import check_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.util import dispatch + + +@dispatch.dispatch_for_api(check_ops.assert_type) +def assert_type(tensor: ragged_tensor.Ragged, tf_type, message=None, name=None): + return check_ops.assert_type(tensor.flat_values, tf_type, + message=message, name=name) + + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_concat_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_concat_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..04e9b29d8c3a87b4b7f07563c8fcfae60b448d1e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_concat_ops.py @@ -0,0 +1,330 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Concat and stack operations for RaggedTensors.""" + +import typing + +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.ragged import ragged_gather_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.ops.ragged import ragged_util +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + + +@dispatch.dispatch_for_api(array_ops.concat) +def concat(values: typing.List[ragged_tensor.RaggedOrDense], axis, name=None): + """Concatenates potentially ragged tensors along one dimension. + + Given a list of tensors with the same rank `K` (`K >= axis`), returns a + rank-`K` `RaggedTensor` `result` such that `result[i0...iaxis]` is the + concatenation of `[rt[i0...iaxis] for rt in values]`. + + Args: + values: A list of potentially ragged tensors. May not be empty. All + `values` must have the same rank and the same dtype; but unlike + `tf.concat`, they can have arbitrary shapes. + axis: A python integer, indicating the dimension along which to concatenate. + (Note: Unlike `tf.concat`, the `axis` parameter must be statically known.) + Negative values are supported only if the rank of at least one + `values` value is statically known. + name: A name prefix for the returned tensor (optional). + + Returns: + A `RaggedTensor` with rank `K`. + `result.ragged_rank=max(axis, max(rt.ragged_rank for rt in values]))`. + + Raises: + ValueError: If `values` is empty, if `axis` is out of bounds or if + the input tensors have different ranks. + + #### Example: + + >>> t1 = tf.ragged.constant([[1, 2], [3, 4, 5]]) + >>> t2 = tf.ragged.constant([[6], [7, 8, 9]]) + >>> tf.concat([t1, t2], axis=0) + + >>> tf.concat([t1, t2], axis=1) + + """ + if not isinstance(values, (list, tuple)): + values = [values] + with ops.name_scope(name, 'RaggedConcat', values): + return _ragged_stack_concat_helper(values, axis, stack_values=False) + + +@tf_export('ragged.stack') +@dispatch.add_dispatch_support +@dispatch.dispatch_for_api(array_ops_stack.stack) +def stack(values: typing.List[ragged_tensor.RaggedOrDense], + axis=0, + name=None): + """Stacks a list of rank-`R` tensors into one rank-`(R+1)` `RaggedTensor`. + + Given a list of tensors or ragged tensors with the same rank `R` + (`R >= axis`), returns a rank-`R+1` `RaggedTensor` `result` such that + `result[i0...iaxis]` is `[value[i0...iaxis] for value in values]`. + + #### Examples: + + >>> # Stacking two ragged tensors. + >>> t1 = tf.ragged.constant([[1, 2], [3, 4, 5]]) + >>> t2 = tf.ragged.constant([[6], [7, 8, 9]]) + >>> tf.ragged.stack([t1, t2], axis=0) + + >>> tf.ragged.stack([t1, t2], axis=1) + + + >>> # Stacking two dense tensors with different sizes. + >>> t3 = tf.constant([[1, 2, 3], [4, 5, 6]]) + >>> t4 = tf.constant([[5], [6], [7]]) + >>> tf.ragged.stack([t3, t4], axis=0) + + + Args: + values: A list of `tf.Tensor` or `tf.RaggedTensor`. May not be empty. All + `values` must have the same rank and the same dtype; but unlike + `tf.stack`, they can have arbitrary dimension sizes. + axis: A python integer, indicating the dimension along which to stack. + (Note: Unlike `tf.stack`, the `axis` parameter must be statically known.) + Negative values are supported only if the rank of at least one + `values` value is statically known. + name: A name prefix for the returned tensor (optional). + + Returns: + A `RaggedTensor` with rank `R+1` (if `R>0`). + If `R==0`, then the result will be returned as a 1D `Tensor`, since + `RaggedTensor` can only be used when `rank>1`. + `result.ragged_rank=1+max(axis, max(rt.ragged_rank for rt in values]))`. + + Raises: + ValueError: If `values` is empty, if `axis` is out of bounds or if + the input tensors have different ranks. + """ + if not isinstance(values, (list, tuple)): + values = [values] + with ops.name_scope(name, 'RaggedConcat', values): + return _ragged_stack_concat_helper(values, axis, stack_values=True) + + +def _ragged_stack_concat_helper(rt_inputs, axis, stack_values): + """Helper function to concatenate or stack ragged tensors. + + Args: + rt_inputs: A list of RaggedTensors or Tensors to combine. + axis: The axis along which to concatenate or stack. + stack_values: A boolean -- if true, then stack values; otherwise, + concatenate them. + + Returns: + A RaggedTensor. + Raises: + ValueError: If rt_inputs is empty, or if axis is out of range. + """ + # Validate parameters. + if not rt_inputs: + raise ValueError('rt_inputs may not be empty.') + + # Convert input tensors. + rt_inputs = [ + ragged_tensor.convert_to_tensor_or_ragged_tensor( + rt_input, name='rt_input') for rt_input in rt_inputs + ] + row_splits_dtype, rt_inputs = ragged_tensor.match_row_splits_dtypes( + *rt_inputs, return_dtype=True) + rt_inputs = list(rt_inputs) + + # Special case: if there's only one input, then return it as-is. + if len(rt_inputs) == 1 and not stack_values: + return rt_inputs[0] + + # Check the rank (number of dimensions) of the input tensors. + ndims = None + for rt in rt_inputs: + if ndims is None: + ndims = rt.shape.ndims + else: + rt.shape.assert_has_rank(ndims) + + out_ndims = ndims if (ndims is None or not stack_values) else ndims + 1 + axis = array_ops.get_positive_axis(axis, out_ndims) + + if stack_values and ndims == 1 and axis == 0: + return ragged_tensor.RaggedTensor.from_row_lengths( + values=array_ops.concat(rt_inputs, axis=0), + row_lengths=array_ops.concat([array_ops.shape(r) for r in rt_inputs], + axis=0)) + + # If all the inputs are Tensors, and we're combining the final dimension, + # then we can delegate to the tf.stack/tf.concat operation, and return a + # Tensor. + if all(not ragged_tensor.is_ragged(rt) for rt in rt_inputs): + if ndims is not None and (axis == out_ndims - 1 or axis == ndims - 1): + if stack_values: + return array_ops_stack.stack(rt_inputs, axis) + else: + return array_ops.concat(rt_inputs, axis) + + # Convert any Tensor inputs to RaggedTensors. This makes it + # possible to concatenate Tensors and RaggedTensors together. + for i in range(len(rt_inputs)): + if not ragged_tensor.is_ragged(rt_inputs[i]): + rt_inputs[i] = ragged_tensor.RaggedTensor.from_tensor( + rt_inputs[i], ragged_rank=1, row_splits_dtype=row_splits_dtype) + + # Convert the input tensors to all have the same ragged_rank. + ragged_rank = max(max(rt.ragged_rank for rt in rt_inputs), 1) + rt_inputs = [_increase_ragged_rank_to(rt, ragged_rank, row_splits_dtype) + for rt in rt_inputs] + + if axis == 0: + return _ragged_stack_concat_axis_0(rt_inputs, stack_values) + elif axis == 1: + return _ragged_stack_concat_axis_1(rt_inputs, stack_values) + else: # axis > 1: recurse. + values = [rt.values for rt in rt_inputs] + splits = [[rt_input.row_splits] for rt_input in rt_inputs] + with ops.control_dependencies(ragged_util.assert_splits_match(splits)): + return ragged_tensor.RaggedTensor.from_row_splits( + _ragged_stack_concat_helper(values, axis - 1, stack_values), + splits[0][0], validate=False) + + +def _ragged_stack_concat_axis_0(rt_inputs, stack_values): + """Helper function to concatenate or stack ragged tensors along axis 0. + + Args: + rt_inputs: A list of RaggedTensors, all with the same rank and ragged_rank. + stack_values: Boolean. If true, then stack values; otherwise, concatenate + them. + + Returns: + A RaggedTensor. + """ + # Concatenate the inner values together. + flat_values = [rt.flat_values for rt in rt_inputs] + concatenated_flat_values = array_ops.concat(flat_values, axis=0) + + # Concatenate the splits together for each ragged dimension (adjusting + # split offsets as necessary). + nested_splits = [rt.nested_row_splits for rt in rt_inputs] + ragged_rank = rt_inputs[0].ragged_rank + concatenated_nested_splits = [ + _concat_ragged_splits([ns[dim] + for ns in nested_splits]) + for dim in range(ragged_rank) + ] + + # If we are performing a stack operation, then add another splits. + if stack_values: + stack_lengths = array_ops_stack.stack([rt.nrows() for rt in rt_inputs]) + stack_splits = ragged_util.lengths_to_splits(stack_lengths) + concatenated_nested_splits.insert(0, stack_splits) + + return ragged_tensor.RaggedTensor.from_nested_row_splits( + concatenated_flat_values, concatenated_nested_splits, validate=False) + + +def _ragged_stack_concat_axis_1(rt_inputs, stack_values): + """Helper function to concatenate or stack ragged tensors along axis 1. + + Args: + rt_inputs: A list of RaggedTensors, all with the same rank and ragged_rank. + stack_values: Boolean. If true, then stack values; otherwise, concatenate + them. + + Returns: + A RaggedTensor. + """ + num_inputs = len(rt_inputs) + + nrows_checks = [] + rt_nrows = rt_inputs[0].nrows() + for index, rt in enumerate(rt_inputs[1:]): + nrows_checks.append( + check_ops.assert_equal( + rt_nrows, + rt.nrows(), + message=( + f'Input tensors at index 0 (=x) and {index+1} (=y) have' + ' incompatible shapes.' + ), + ) + ) + + with ops.control_dependencies(nrows_checks): + # Concatenate the inputs together to put them in a single ragged tensor. + concatenated_rt = _ragged_stack_concat_axis_0(rt_inputs, stack_values=False) + + # Use ragged.gather to permute the rows of concatenated_rt. In particular, + # permuted_rt = [rt_inputs[0][0], ..., rt_inputs[N][0], + # rt_inputs[0][1], ..., rt_inputs[N][1], + # ..., + # rt_inputs[0][M], ..., rt_input[N][M]] + # where `N=num_inputs-1` and `M=rt_nrows-1`. + row_indices = math_ops.range(rt_nrows * num_inputs) + row_index_matrix = array_ops.reshape(row_indices, [num_inputs, -1]) + transposed_row_index_matrix = array_ops.transpose(row_index_matrix) + row_permutation = array_ops.reshape(transposed_row_index_matrix, [-1]) + permuted_rt = ragged_gather_ops.gather(concatenated_rt, row_permutation) + + if stack_values: + # Add a new splits tensor to group together the values. + stack_splits = math_ops.range(0, rt_nrows * num_inputs + 1, num_inputs) + _copy_row_shape(rt_inputs, stack_splits) + return ragged_tensor.RaggedTensor.from_row_splits( + permuted_rt, stack_splits, validate=False) + else: + # Merge together adjacent rows by dropping the row-split indices that + # separate them. + concat_splits = permuted_rt.row_splits[::num_inputs] + _copy_row_shape(rt_inputs, concat_splits) + return ragged_tensor.RaggedTensor.from_row_splits( + permuted_rt.values, concat_splits, validate=False) + + +def _copy_row_shape(rt_inputs, splits): + """Sets splits.shape to [rt[shape[0]+1] for each rt in rt_inputs.""" + for rt in rt_inputs: + if rt.shape[0] is not None: + splits.set_shape(tensor_shape.TensorShape(rt.shape[0] + 1)) + + +def _increase_ragged_rank_to(rt_input, ragged_rank, row_splits_dtype): + """Adds ragged dimensions to `rt_input` so it has the desired ragged rank.""" + if ragged_rank > 0: + if not ragged_tensor.is_ragged(rt_input): + rt_input = ragged_tensor.RaggedTensor.from_tensor( + rt_input, row_splits_dtype=row_splits_dtype) + if rt_input.ragged_rank < ragged_rank: + rt_input = rt_input.with_values( + _increase_ragged_rank_to(rt_input.values, ragged_rank - 1, + row_splits_dtype)) + return rt_input + + +def _concat_ragged_splits(splits_list): + """Concatenates a list of RaggedTensor splits to form a single splits.""" + pieces = [splits_list[0]] + splits_offset = splits_list[0][-1] + for splits in splits_list[1:]: + pieces.append(splits[1:] + splits_offset) + splits_offset += splits[-1] + return array_ops.concat(pieces, axis=0) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_config.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_config.py new file mode 100644 index 0000000000000000000000000000000000000000..cf19c5a62012f771122732ff8f7fb350ff5659f4 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_config.py @@ -0,0 +1,29 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Configuration parameters for RaggedTensors.""" + + +def auto_cast_partition_dtype(): + """Whether incompatible row-partitioning dtypes should be auto-converted. + + If true, then operations that combine RaggedTensors but have different + row-partitioning tensor dtypes will be automatically cast to a + compatible dtype (`tf.int64`). If false, then such operations will result + in an error. + + Returns: + `bool` + """ + return False diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_conversion_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_conversion_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..e71f5cad7929fc034eaa2bad2f6b81110c36b1ed --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_conversion_ops.py @@ -0,0 +1,180 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Ops to convert between RaggedTensors and other tensor types.""" + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import indexed_slices +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import gen_ragged_conversion_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.ragged import ragged_tensor + + +def from_tensor(tensor, + lengths=None, + padding=None, + ragged_rank=1, + row_splits_dtype=dtypes.int64, + name=None): + if ragged_tensor.is_ragged(tensor): + return tensor + else: + return ragged_tensor.RaggedTensor.from_tensor( + tensor, + lengths=lengths, + padding=padding, + ragged_rank=ragged_rank, + row_splits_dtype=row_splits_dtype, + name=name) + + +def to_tensor(rt_input, default_value=None, name=None): + if ragged_tensor.is_ragged(rt_input): + return rt_input.to_tensor(default_value, name) + else: + return rt_input + + +def ragged_to_dense(rt_input, default_value=None, shape=None): + """Create a dense tensor from a ragged tensor.""" + return rt_input.to_tensor(default_value=default_value, shape=shape) + + +@ops.RegisterGradient("RaggedTensorToTensor") +def _ragged_tensor_to_tensor_grad(op, grad): + """Gradient for RaggedToTensor op.""" + # Extract inputs from the op. + flat_values = op.inputs[1] + default_value = op.inputs[2] + row_partition_tensors = op.inputs[3:] + row_partition_types = op.get_attr("row_partition_types") + flat_value_shape = array_ops.shape(flat_values) + ragged_rank = sum( + 1 for typ in row_partition_types if typ != b"FIRST_DIM_SIZE") + + # Create two tensors that correspond 1:1 with grad (and op.output): + # * indices[i1...iN] is the index in `flat_values` of the value used to + # populate output[i1...iN] (if the value came from `flat_values`) or + # -1 (if the value came from `default_value`). + # * mask[i1...iN] is true if output[i1...iN] came from `flat_values`, or + # false if it came from `default_value`. + indices = gen_ragged_conversion_ops.ragged_tensor_to_tensor( + shape=array_ops.shape(grad)[:1 + ragged_rank], + values=math_ops.range(flat_value_shape[0]), + default_value=-1, + row_partition_types=row_partition_types, + row_partition_tensors=row_partition_tensors) + mask = math_ops.not_equal(indices, -1) + + # Select out the gradients & indices that came from `flat_values`, and use + # those to construct the gradient for `flat_values` (as an IndexedSlices). + values_grad = indexed_slices.IndexedSlices( + values=array_ops.boolean_mask(grad, mask), + indices=array_ops.boolean_mask(indices, mask), + dense_shape=flat_value_shape) + + # Select out the gradients that came from `default_value`, and sum them to + # get the gradient for the default. Note that the default_value may have + # been broadcast as part of the RaggedTensorToTensor operation, so we also + # need to reduce any dimensions that might have been broadcast. + default_grads = array_ops.boolean_mask(grad, ~mask) + dims_to_reduce = math_ops.range( + array_ops.rank(default_grads) - + _rank_ignoring_leading_dims_with_size_1(default_value)) + default_grad = math_ops.reduce_sum(default_grads, axis=dims_to_reduce) + + # Restore any leading dims with size one. + default_grad = array_ops.reshape(default_grad, array_ops.shape(default_value)) + + return ([None, values_grad, default_grad] + + [None for _ in row_partition_tensors]) + + +def _rank_ignoring_leading_dims_with_size_1(value): + """Returns `rank(value)`, ignoring any leading dimensions with size 1.""" + # Compute the result using static shape, if possible. + if value.shape.rank is not None: + ndims = value.shape.rank + for dim in value.shape.dims: + if dim.value == 1: + ndims -= 1 + elif dim.value is None: + ndims = None # Can't compute the result using static shape. + break + else: + break + if ndims is not None: + return ndims + + # Otherwise, we need to compute the result dynamically. The math we use to + # do this is a bit round-about, so here's an example to illustrate: + # shape = [1, 1, 3, 5, 1, 4] # shape(value) + # dim_is_one = [1, 1, 0, 0, 1, 0] # equal(shape, 1) + # leading_ones = [1, 1, 0, 0, 0, 0] # cumprod(dim_is_one) + # num_leading_ones = 2 # reduce_sum(leading_ones) + # result = 4 # rank(value) - num_leading_ones + shape = array_ops.shape(value) + dim_is_one = math_ops.cast(math_ops.equal(shape, 1), dtypes.int32) + leading_ones = math_ops.cumprod(dim_is_one) + num_leading_ones = math_ops.reduce_sum(leading_ones) + return array_ops.rank(value) - num_leading_ones + + +def to_sparse(rt_input, name=None): + return rt_input.to_sparse(name) + + +def from_sparse(st_input, name=None): + return ragged_tensor.RaggedTensor.from_sparse(st_input, name) + + +@ops.RegisterGradient("RaggedTensorFromVariant") +def _ragged_tensor_from_variant_grad(op, *grads): + """Gradient for RaggedTensorFromVariant op.""" + + variant_rank = op.inputs[0].shape.rank + if variant_rank == 0: + batched_input = False + elif variant_rank == 1: + batched_input = True + elif variant_rank is None: + batched_input = (op.get_attr("output_ragged_rank") > 0) + else: + # TODO(edloper): Add a batch_dims argument to RaggedTensorToVariant, so + # we can support this. + raise ValueError("Unable to compute gradient: RaggedTensorToVariant " + "can currently only generate 0D or 1D output.") + return [ + gen_ragged_conversion_ops.ragged_tensor_to_variant( + rt_nested_splits=op.outputs[:-1], + rt_dense_values=grads[-1], + batched_input=batched_input) + ] + + +@ops.RegisterGradient("RaggedTensorToVariant") +def _ragged_tensor_to_variant_grad(op, encoded_ragged_grad): + """Gradient for RaggedTensorToVariant op.""" + dense_values = op.inputs[-1] + ragged_rank = len(op.inputs) - 1 + row_splits = 0 if ragged_rank == 0 else op.inputs[0] + values_grad = gen_ragged_conversion_ops.ragged_tensor_to_variant_gradient( + encoded_ragged_grad=encoded_ragged_grad, + row_splits=row_splits, + dense_values_shape=array_ops.shape(dense_values), + Tvalues=op.inputs[-1].dtype) + result = [None] * ragged_rank + [values_grad] + return result diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_dispatch.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_dispatch.py new file mode 100644 index 0000000000000000000000000000000000000000..176e18be3d7f520a2e648c7ad3b0d5c2a43fca9c --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_dispatch.py @@ -0,0 +1,160 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Operator dispatch for RaggedTensors.""" + +from tensorflow.python.ops import logging_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import string_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.ops.ragged import ragged_tensor_shape +from tensorflow.python.util import dispatch +from tensorflow.python.util import tf_decorator +from tensorflow.python.util import tf_export +from tensorflow.python.util import tf_inspect + + +@dispatch.dispatch_for_unary_elementwise_apis(ragged_tensor.Ragged) +def ragged_unary_elementwise_op(op, x): + """Unary elementwise api handler for RaggedTensors.""" + x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x) + return x.with_values(op(x.values)) + + +# TODO(martinz): This is deprecated. Delete. +def ragged_binary_elementwise_op(op, x, y): + """Binary elementwise api handler for RaggedTensors.""" + x_is_ragged = ragged_tensor.is_ragged(x) + y_is_ragged = ragged_tensor.is_ragged(y) + + # Convert args to tensors. + x = ragged_tensor.convert_to_tensor_or_ragged_tensor( + x, preferred_dtype=(y.dtype if y_is_ragged else None)) + y = ragged_tensor.convert_to_tensor_or_ragged_tensor( + y, preferred_dtype=x.dtype) + + if x_is_ragged and y_is_ragged: + x, y = ragged_tensor.match_row_splits_dtypes(x, y) + + # Perform broadcasting, when appropraite + if ((x_is_ragged and y_is_ragged) or + (x_is_ragged and x.flat_values.shape.ndims <= y.shape.ndims) or + (y_is_ragged and y.flat_values.shape.ndims <= x.shape.ndims)): + # If both x and y are ragged, they must have the same row_splits_dtype now. + if x_is_ragged: + dim_size_dtype = x.row_splits.dtype + else: + dim_size_dtype = y.row_splits.dtype + + shape_x = ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor( + x, dim_size_dtype=dim_size_dtype) + shape_y = ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor( + y, dim_size_dtype=dim_size_dtype) + bcast_shape = ragged_tensor_shape.broadcast_dynamic_shape(shape_x, shape_y) + x = ragged_tensor_shape.broadcast_to( + x, bcast_shape, broadcast_inner_dimensions=False) + y = ragged_tensor_shape.broadcast_to( + y, bcast_shape, broadcast_inner_dimensions=False) + + x_values = x.flat_values if ragged_tensor.is_ragged(x) else x + y_values = y.flat_values if ragged_tensor.is_ragged(y) else y + mapped_values = op(x_values, y_values) + if isinstance(mapped_values, bool): + return mapped_values # Special case for tensor_equals. + if ragged_tensor.is_ragged(x): + return x.with_flat_values(mapped_values) + else: + return y.with_flat_values(mapped_values) + + +# TODO(edloper): Update the documentation generation tools to automatically +# build lists of which types are supported by which ops (and then delete all +# the following code). + + +# We don't need to register a separate delegation handler for these v1 ops, +# since they delegate to the v2 ops (which already have a handler). But we +# still want to include them in the ragged_op_list() output. +_V2_OPS_THAT_ARE_DELEGATED_TO_FROM_V1_OPS = [ + math_ops.reduce_sum, + math_ops.reduce_prod, + math_ops.reduce_min, + math_ops.reduce_max, + math_ops.reduce_mean, + math_ops.reduce_variance, + math_ops.reduce_std, + math_ops.reduce_any, + math_ops.reduce_all, + string_ops.string_to_number, + string_ops.string_to_hash_bucket, + string_ops.reduce_join_v2, +] + + +def _ragged_op_signature(op, ragged_args, ragged_varargs=False): + """Returns a signature for the given op, marking ragged args in bold.""" + op_name = tf_export.get_canonical_name_for_symbol(op) + argspec = tf_inspect.getfullargspec(op) + arg_names = argspec.args + + # Mark ragged arguments in bold. + for pos in ragged_args: + arg_names[pos] = '**' + arg_names[pos] + '**' + + # Add argument defaults. + if argspec.defaults is not None: + for pos in range(-1, -len(argspec.defaults) - 1, -1): + arg_names[pos] += '=`{!r}`'.format(argspec.defaults[pos]) + + # Add varargs and keyword args + if argspec.varargs: + if ragged_varargs: + arg_names.append('***' + argspec.varargs + '**') + else: + arg_names.append('*' + argspec.varargs) + if argspec.varkw: + arg_names.append('**' + argspec.varkw) + + return '* `tf.{}`({})'.format(op_name, ', '.join(arg_names)) + + +def _op_is_in_tf_version(op, version): + if version == 1: + return (tf_export.get_v1_names(tf_decorator.unwrap(op)[1]) or + op in _V2_OPS_THAT_ARE_DELEGATED_TO_FROM_V1_OPS) + elif version == 2: + return tf_export.get_v2_names(tf_decorator.unwrap(op)[1]) + else: + raise ValueError('Expected version 1 or 2.') + + +def ragged_op_list(tf_version=2): + """Returns a string listing operations that have dispathers registered.""" + lines = [] + api_signatures = dispatch.type_based_dispatch_signatures_for( + ragged_tensor.RaggedTensor) + for api, signatures in api_signatures.items(): + arg_names = tf_inspect.getargspec(api).args + ragged_args = set() + for signature in signatures: + for arg in signature: + ragged_args.add(arg if isinstance(arg, int) else arg_names.index(arg)) + if _op_is_in_tf_version(api, tf_version): + lines.append(_ragged_op_signature(api, ragged_args)) + + lines.append( + _ragged_op_signature(logging_ops.print_v2, [], ragged_varargs=True)) + return ('\n\n### Additional ops that support `RaggedTensor`\n\n' + 'Arguments that accept `RaggedTensor`s are marked in **bold**.\n\n' + + '\n'.join(sorted(lines)) + 'n') diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_embedding_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_embedding_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..a9ea994bfa242d55b639461ed0f5eb0cc9fd4177 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_embedding_ops.py @@ -0,0 +1,432 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Embedding operations.""" + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import embedding_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import resource_variable_ops +from tensorflow.python.ops import variables +from tensorflow.python.ops.ragged import ragged_array_ops +from tensorflow.python.ops.ragged import ragged_functional_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.util import dispatch + + +@dispatch.dispatch_for_api(embedding_ops.embedding_lookup) +def embedding_lookup( + params, + ids: ragged_tensor.Ragged, + partition_strategy="mod", + name=None, + validate_indices=True, # pylint: disable=unused-argument + max_norm=None, +): + """Look up the ragged ids in a list of embedding tensors. + + Args: + params: A tensor representing the complete embedding tensor having the shape + [e1, ...eM] + ragged_ids: A 'RaggedTensor' with type 'int32' or 'int64' containing the ids + to be looked up in 'params' of shape [r0, ..rN]. Values must be in the + range '[0, params.shape[0]]'. + partition_strategy: A string specifying the partitioning strategy. + max_norm: If not `None`, each embedding is clipped if its l2-norm is larger + than this value. + name: A name for the operation (optional) + + Returns: + A ragged tensor of shape [r0, r1, ...rN, e1, ...eM]. + + Raises: + ValueError: When params is empty or the type of the ids is not int32 or + int64. + """ + if params is None: + raise ValueError("params must be specified.") + if isinstance(params, (list, tuple)) and not params: + raise ValueError("params should not be empty.") + if ids.dtype != dtypes.int32 and ids.dtype != dtypes.int64: + raise ValueError( + "The values contained by the inputs have type " + f"{str(ids.dtype)}" + " and cannot be processed. All values" + " should be indices, either of type `int32` or `int64`." + ) + + with ops.name_scope(name, "embedding_lookup_ragged") as name: + looked_up_ragged = ragged_functional_ops.map_flat_values( + embedding_ops.embedding_lookup, + params=params, + ids=ids, + partition_strategy=partition_strategy, + max_norm=max_norm, + ) + + return looked_up_ragged + + +@dispatch.dispatch_for_api(embedding_ops.embedding_lookup_sparse) +def embedding_lookup_sparse( + params, + sp_ids: ragged_tensor.Ragged, + sp_weights, + partition_strategy="mod", + name=None, + combiner=None, + max_norm=None, + allow_fast_lookup=False, +): + """Looks up embeddings for the given ids and weights from a list of tensors. + + This op assumes that there is at least one id for each row in the dense tensor + represented by sp_ids (i.e. there are no rows with empty features), and that + all the indices of sp_ids are in canonical row-major order. + + `sp_ids` and `sp_weights` (if not None) are `RaggedTensor`s with rank of 2. + Embeddings are always aggregated along the last dimension. + + It also assumes that all id values lie in the range [0, p0), where p0 + is the sum of the size of params along dimension 0. + + Args: + params: A single tensor representing the complete embedding tensor, or a + list tensors all of same shape except for the first dimension, + representing sharded embedding tensors. Alternatively, a + `PartitionedVariable`, created by partitioning along dimension 0. Each + element must be appropriately sized for the given `partition_strategy`. + sp_ids: `RaggedTensor` with rank 2. The rank is not verified for performance + reasons. + sparse_weights: `RaggedTensor` of same type and shape as `sparse_ids`, + containing float / double weights corresponding to `sparse_ids`, or `None` + if all weights are assumed to be 1.0. + partition_strategy: A string specifying the partitioning strategy, relevant + if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default + is `"mod"`. See `tf.nn.embedding_lookup` for more details. + name: Optional name for the op. + combiner: A string specifying the reduction op. Currently "mean", "sqrtn" + and "sum" are supported. "sum" computes the weighted sum of the embedding + results for each row. "mean" is the weighted sum divided by the total + weight. "sqrtn" is the weighted sum divided by the square root of the sum + of the squares of the weights. Defaults to `mean`. + max_norm: If not `None`, each embedding is clipped if its l2-norm is larger + than this value, before combining. + allow_fast_lookup: An optional boolean specifying whether to allow + simplified embedding lookups when `params` is a single tensor and + `max_norm` is `None`. Setting this flag to `True` during training can + cause the use of dense gradients with increased memory footprint. + + Returns: + A dense tensor representing the combined embeddings for the + sparse ids. For each row in the dense tensor represented by `sp_ids`, the op + looks up the embeddings for all ids in that row, multiplies them by the + corresponding weight, and combines these embeddings as specified. + + In other words, if + + `shape(combined params) = [p0, p1, ..., pm]` + + and + + `shape(sp_ids) = shape(sp_weights) = [d0, d1]` + + then + + `shape(output) = [d0, p1, ..., pm]`. + + For instance, if params is a 10x20 matrix, and sp_ids / sp_weights are + + ```python + [0, 0]: id 1, weight 2.0 + [0, 1]: id 3, weight 0.5 + [1, 0]: id 0, weight 1.0 + [2, 3]: id 1, weight 3.0 + ``` + + with `combiner`="mean", then the output will be a 3x20 matrix where + + ```python + output[0, :] = (params[1, :] * 2.0 + params[3, :] * 0.5) / (2.0 + 0.5) + output[1, :] = (params[0, :] * 1.0) / 1.0 + output[2, :] = (params[1, :] * 3.0) / 3.0 + ``` + + Raises: + TypeError: If `sp_weights` is neither `None` nor of the same type as + `sp_ids`. + ValueError: If `combiner` is not one of {"mean", "sqrtn", "sum"}. + """ + rt_ids = sp_ids + rt_weights = sp_weights + if combiner is None: + combiner = "mean" + if combiner not in ("mean", "sqrtn", "sum"): + raise ValueError( + f"combiner must be one of 'mean', 'sqrtn' or 'sum', got {combiner}" + ) + if isinstance(params, variables.PartitionedVariable): + params = list(params) # Iterate to get the underlying Variables. + if not isinstance(params, list): + params = [params] + ignore_weights = rt_weights is None + if not ignore_weights: + if not isinstance(rt_weights, ragged_tensor.RaggedTensor): + raise TypeError( + f"sp_ids must be of the same type as sp_weights, " + f"received {{type(sp_ids).__name__!r}} for sp_ids and " + f"{{type(sp_weights).__name__!r}} for sp_weights." + ) + rt_ids.values.get_shape().assert_is_compatible_with( + rt_weights.values.get_shape() + ) + rt_ids.get_shape().assert_is_compatible_with(rt_weights.get_shape()) + + with ops.name_scope( + name, "embedding_lookup_sparse", params + [rt_ids] + ) as name: + segment_ids = rt_ids.value_rowids() + ids = rt_ids.flat_values + + return embedding_ops.embedding_lookup_sparse_impl( + params, + segment_ids, + sp_weights, + ids, + combiner, + ignore_weights, + max_norm, + allow_fast_lookup, + partition_strategy, + name, + ) + + +@dispatch.dispatch_for_api(embedding_ops.safe_embedding_lookup_sparse) +def safe_embedding_lookup_sparse( + embedding_weights, + sparse_ids: ragged_tensor.Ragged, + sparse_weights=None, + combiner="mean", + default_id=None, + name=None, + partition_strategy="div", + max_norm=None, + allow_fast_lookup=False, +): + """Lookup embedding results, accounting for invalid IDs and empty features. + + The partitioned embedding in `embedding_weights` must all be the same shape + except for the first dimension. The first dimension is allowed to vary as the + vocabulary size is not necessarily a multiple of `P`. `embedding_weights` + may be a `PartitionedVariable` as returned by using + `tf.compat.v1.get_variable()` with a + partitioner. + + Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs + with non-positive weight. For an entry with no features, the embedding vector + for `default_id` is returned, or the 0-vector if `default_id` is not supplied. + + The ids and weights may be multi-dimensional `SparseTensor`s or + `RaggedTensor`s with rank of 2. For `SpareTensor`s with left-aligned non-zero + entries which can be described as `RaggedTensor`s, use of `RaggedTensor`s can + yield higher performance. Embeddings are always aggregated along the last + dimension. + + Args: + embedding_weights: A single tensor representing the complete embedding + tensor, or a list tensors all of same shape except for the first + dimension, representing sharded embedding tensors. Alternatively, a + `PartitionedVariable`, created by partitioning along dimension 0. Each + element must be appropriately sized for the given `partition_strategy`. + sp_ids: `RaggedTensor` with rank 2. The rank is not verified for performance + reasons. + sparse_weights: `RaggedTensor` of same type and shape as `sparse_ids`, + containing float weights corresponding to `sparse_ids`, or `None` if all + weights are assumed to be 1.0. + combiner: A string specifying how to combine embedding results for each + entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean" the + default. + default_id: The id to use for an entry with no features. + name: A name for this operation (optional). + partition_strategy: A string specifying the partitioning strategy. Currently + `"div"` and `"mod"` are supported. Default is `"div"`. + max_norm: If not `None`, all embeddings are l2-normalized to max_norm before + combining. + allow_fast_lookup: An optional boolean specifying whether to allow + simplified embedding lookups when `params` is a single tensor and + `max_norm` is `None`. Setting this flag to `True` during training can + cause the use of dense gradients with increased memory footprint. + + Returns: + A dense tensor representing the combined embeddings for the + sparse ids. For each row in the dense tensor represented by `sp_ids`, the op + looks up the embeddings for all ids in that row, multiplies them by the + corresponding weight, and combines these embeddings as specified. + + In other words, if + + `shape(combined embedding_weights) = [p0, p1, ..., pm]` + + and + + `shape(sparse_ids) = shape(sparse_weights) = [d0, d1, ..., dn]` + + then + + `shape(output) = [d0, d1, ... dn-1, p1, ..., pm]`. + + For instance, if params is a 10x20 matrix, and sp_ids / sp_weights are + + ```python + [0, 0]: id 1, weight 2.0 + [0, 1]: id 3, weight 0.5 + [1, 0]: id -1, weight 1.0 + [2, 3]: id 1, weight 3.0 + ``` + + `default_id` is 0. + + with `combiner`="mean", then the output will be a 3x20 matrix where + + ```python + output[0, :] = (params[1, :] * 2.0 + params[3, :] * 0.5) / (2.0 + 0.5) + output[1, :] = (params[0, :] * 1.0) / 1.0 + output[2, :] = (params[1, :] * 3.0) / 3.0 + ``` + + Raises: + ValueError: if `embedding_weights` is empty. + """ + ragged_ids = sparse_ids + ragged_weights = sparse_weights + if embedding_weights is None: + raise ValueError(f"Missing embedding_weights {embedding_weights}.") + if isinstance(embedding_weights, variables.PartitionedVariable): + embedding_weights = list(embedding_weights) # get underlying Variables. + if not isinstance(embedding_weights, list): + embedding_weights = [embedding_weights] + if len(embedding_weights) < 1: + raise ValueError(f"Missing embedding_weights {embedding_weights}.") + + dtype = ragged_weights.dtype if ragged_weights is not None else None + embedding_weights = [ + w + if ( + resource_variable_ops.is_resource_variable(w) + and dtype in (None, w.dtype) + ) + else ops.convert_to_tensor(w, dtype=dtype) + for w in embedding_weights + ] + + with ops.name_scope( + name, "embedding_lookup", embedding_weights + [ragged_ids, ragged_weights] + ) as scope: + # Prune invalid ids and weights. + ragged_ids, ragged_weights = _prune_invalid_ids_ragged( + ragged_ids, ragged_weights + ) + if combiner != "sum": + ragged_ids, ragged_weights = _prune_invalid_weights_ragged( + ragged_ids, ragged_weights + ) + ragged_ids, is_row_empty = ragged_array_ops.fill_empty_rows( + ragged_ids, default_id or 0 + ) + if ragged_weights is not None: + ragged_weights, _ = ragged_array_ops.fill_empty_rows(ragged_weights, 1.0) + + result = embedding_lookup_sparse( + embedding_weights, + ragged_ids, + ragged_weights, + combiner=combiner, + partition_strategy=partition_strategy, + name=None if default_id is None else scope, + max_norm=max_norm, + allow_fast_lookup=allow_fast_lookup, + ) + + if default_id is None: + # Broadcast is_row_empty to the same shape as embedding_lookup_result, + # for use in Select. + is_row_empty = array_ops.tile( + array_ops.reshape(is_row_empty, [-1, 1]), + array_ops_stack.stack([1, array_ops.shape(result)[1]]), + ) + + result = array_ops.where( + is_row_empty, array_ops.zeros_like(result), result, name=scope + ) + + return result + + +def _prune_invalid_ids_ragged(ids, weights): + """Prune invalid IDs (< 0) from the input ids and weights.""" + is_id_valid = math_ops.greater_equal(ids.values, 0) + nrows = ids.nrows() + # TODO(philipphack): Consider calling ragged_array_ops.boolean_mask once the + # resulting performance is comparable to array_ops.boolean_mask. Currently, + # ragged_array_ops.boolean_mask constructs the returned RaggedTensor by + # calling its from_row_splits method which does not set value_row_ids and + # requires it to be computed on demand. + pruned_values = array_ops.boolean_mask_v2(ids.values, is_id_valid) + pruned_value_rowids = array_ops.boolean_mask_v2( + ids.value_rowids(), is_id_valid + ) + ids = ragged_tensor.RaggedTensor.from_value_rowids( + pruned_values, pruned_value_rowids, nrows=nrows, validate=False + ) + if weights is not None: + pruned_weights_values = array_ops.boolean_mask_v2( + weights.values, is_id_valid + ) + weights = ragged_tensor.RaggedTensor.from_value_rowids( + pruned_weights_values, pruned_value_rowids, nrows=nrows, validate=False + ) + + return ids, weights + + +def _prune_invalid_weights_ragged(ids, weights): + """Prune invalid weights (< 0) from the input ids and weights.""" + if weights is not None: + is_weights_valid = math_ops.greater(weights.values, 0) + nrows = ids.nrows() + # TODO(philipphack): Consider calling ragged_array_ops.boolean_mask once the + # resulting performance is comparable to array_ops.boolean_mask. Currently, + # ragged_array_ops.boolean_mask constructs the returned RaggedTensor by + # calling its from_row_splits method which does not set value_row_ids and + # requires it to be computed on demand. + pruned_values = array_ops.boolean_mask_v2(ids.values, is_weights_valid) + pruned_value_rowids = array_ops.boolean_mask_v2( + ids.value_rowids(), is_weights_valid + ) + ids = ragged_tensor.RaggedTensor.from_value_rowids( + pruned_values, pruned_value_rowids, nrows=nrows, validate=False + ) + + pruned_weights_values = array_ops.boolean_mask_v2( + weights.values, is_weights_valid + ) + weights = ragged_tensor.RaggedTensor.from_value_rowids( + pruned_weights_values, pruned_value_rowids, nrows=nrows, validate=False + ) + + return ids, weights diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_factory_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_factory_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..9e096e01b56d7a03d3d237846d1a1bf2bd92a8a1 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_factory_ops.py @@ -0,0 +1,379 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Operations for constructing RaggedTensors.""" + +from typing import Union + +import numpy as np + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.ops.ragged import ragged_tensor_value +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + + +#=============================================================================== +# Op to construct a constant RaggedTensor from a nested Python list. +#=============================================================================== +@tf_export("ragged.constant") +@dispatch.add_dispatch_support +def constant( + pylist, + dtype=None, + ragged_rank=None, + inner_shape=None, + name=None, + row_splits_dtype=dtypes.int64, +) -> Union[ragged_tensor.RaggedTensor, ops._EagerTensorBase, ops.Operation]: + """Constructs a constant RaggedTensor from a nested Python list. + + Example: + + >>> tf.ragged.constant([[1, 2], [3], [4, 5, 6]]) + + + All scalar values in `pylist` must have the same nesting depth `K`, and the + returned `RaggedTensor` will have rank `K`. If `pylist` contains no scalar + values, then `K` is one greater than the maximum depth of empty lists in + `pylist`. All scalar values in `pylist` must be compatible with `dtype`. + + Args: + pylist: A nested `list`, `tuple` or `np.ndarray`. Any nested element that + is not a `list`, `tuple` or `np.ndarray` must be a scalar value + compatible with `dtype`. + dtype: The type of elements for the returned `RaggedTensor`. If not + specified, then a default is chosen based on the scalar values in + `pylist`. + ragged_rank: An integer specifying the ragged rank of the returned + `RaggedTensor`. Must be nonnegative and less than `K`. Defaults to + `max(0, K - 1)` if `inner_shape` is not specified. Defaults to + `max(0, K - 1 - len(inner_shape))` if `inner_shape` is specified. + inner_shape: A tuple of integers specifying the shape for individual inner + values in the returned `RaggedTensor`. Defaults to `()` if `ragged_rank` + is not specified. If `ragged_rank` is specified, then a default is chosen + based on the contents of `pylist`. + name: A name prefix for the returned tensor (optional). + row_splits_dtype: data type for the constructed `RaggedTensor`'s row_splits. + One of `tf.int32` or `tf.int64`. + + Returns: + A potentially ragged tensor with rank `K` and the specified `ragged_rank`, + containing the values from `pylist`. + + Raises: + ValueError: If the scalar values in `pylist` have inconsistent nesting + depth; or if ragged_rank or inner_shape are incompatible with `pylist`. + """ + def ragged_factory(values, row_splits): + row_splits = constant_op.constant(row_splits, dtype=row_splits_dtype) + return ragged_tensor.RaggedTensor.from_row_splits(values, row_splits, + validate=False) + + with ops.name_scope(name, "RaggedConstant"): + return _constant_value(ragged_factory, constant_op.constant, pylist, dtype, + ragged_rank, inner_shape) + + +@tf_export(v1=["ragged.constant_value"]) +@dispatch.add_dispatch_support +def constant_value( + pylist, + dtype=None, + ragged_rank=None, + inner_shape=None, + row_splits_dtype="int64", +) -> Union[ragged_tensor_value.RaggedTensorValue, np.ndarray]: + """Constructs a RaggedTensorValue from a nested Python list. + + Warning: This function returns a `RaggedTensorValue`, not a `RaggedTensor`. + If you wish to construct a constant `RaggedTensor`, use + [`ragged.constant(...)`](constant.md) instead. + + Example: + + >>> tf.compat.v1.ragged.constant_value([[1, 2], [3], [4, 5, 6]]) + tf.RaggedTensorValue(values=array([1, 2, 3, 4, 5, 6]), + row_splits=array([0, 2, 3, 6])) + + All scalar values in `pylist` must have the same nesting depth `K`, and the + returned `RaggedTensorValue` will have rank `K`. If `pylist` contains no + scalar values, then `K` is one greater than the maximum depth of empty lists + in `pylist`. All scalar values in `pylist` must be compatible with `dtype`. + + Args: + pylist: A nested `list`, `tuple` or `np.ndarray`. Any nested element that + is not a `list` or `tuple` must be a scalar value compatible with `dtype`. + dtype: `numpy.dtype`. The type of elements for the returned `RaggedTensor`. + If not specified, then a default is chosen based on the scalar values in + `pylist`. + ragged_rank: An integer specifying the ragged rank of the returned + `RaggedTensorValue`. Must be nonnegative and less than `K`. Defaults to + `max(0, K - 1)` if `inner_shape` is not specified. Defaults to `max(0, K + - 1 - len(inner_shape))` if `inner_shape` is specified. + inner_shape: A tuple of integers specifying the shape for individual inner + values in the returned `RaggedTensorValue`. Defaults to `()` if + `ragged_rank` is not specified. If `ragged_rank` is specified, then a + default is chosen based on the contents of `pylist`. + row_splits_dtype: data type for the constructed `RaggedTensorValue`'s + row_splits. One of `numpy.int32` or `numpy.int64`. + + Returns: + A `tf.RaggedTensorValue` or `numpy.array` with rank `K` and the specified + `ragged_rank`, containing the values from `pylist`. + + Raises: + ValueError: If the scalar values in `pylist` have inconsistent nesting + depth; or if ragged_rank or inner_shape are incompatible with `pylist`. + """ + if dtype is not None and isinstance(dtype, dtypes.DType): + dtype = dtype.as_numpy_dtype + row_splits_dtype = dtypes.as_dtype(row_splits_dtype).as_numpy_dtype + def _ragged_factory(values, row_splits): + row_splits = np.array(row_splits, dtype=row_splits_dtype) + return ragged_tensor_value.RaggedTensorValue(values, row_splits) + + def _inner_factory(pylist, dtype, shape, name=None): # pylint: disable=unused-argument + return np.reshape(np.array(pylist, dtype=dtype), shape) + + return _constant_value(_ragged_factory, _inner_factory, pylist, dtype, + ragged_rank, inner_shape) + + +def _constant_value(ragged_factory, inner_factory, pylist, dtype, ragged_rank, + inner_shape): + """Constructs a constant RaggedTensor or RaggedTensorValue. + + Args: + ragged_factory: A factory function with the signature: + `ragged_factory(values, row_splits)` + inner_factory: A factory function with the signature: `inner_factory(pylist, + dtype, shape, name)` + pylist: A nested `list`, `tuple` or `np.ndarray`. + dtype: Data type for returned value. + ragged_rank: Ragged rank for returned value. + inner_shape: Inner value shape for returned value. + + Returns: + A value returned by `ragged_factory` or `inner_factory`. + + Raises: + ValueError: If the scalar values in `pylist` have inconsistent nesting + depth; or if ragged_rank or inner_shape are incompatible with `pylist`. + """ + if ragged_tensor.is_ragged(pylist): + raise TypeError("pylist may not be a RaggedTensor or RaggedTensorValue.") + # np.ndim builds an array, so we short-circuit lists and tuples. + if not isinstance(pylist, (list, tuple)) and np.ndim(pylist) == 0: + # Scalar value + if ragged_rank is not None and ragged_rank != 0: + raise ValueError("Invalid pylist=%r: incompatible with ragged_rank=%d" % + (pylist, ragged_rank)) + if inner_shape is not None and inner_shape: + raise ValueError( + "Invalid pylist=%r: incompatible with dim(inner_shape)=%d" % + (pylist, len(inner_shape))) + return inner_factory(pylist, dtype, ()) + + if ragged_rank is not None and ragged_rank < 0: + raise ValueError( + "Invalid ragged_rank=%r: must be nonnegative" % ragged_rank) + + # Find the depth of scalar values in `pylist`. + scalar_depth, max_depth = _find_scalar_and_max_depth(pylist) + if scalar_depth is not None: + if max_depth > scalar_depth: + raise ValueError("Invalid pylist=%r: empty list nesting is greater " + "than scalar value nesting" % pylist) + if ragged_rank is not None and max_depth < ragged_rank: + raise ValueError(f"Invalid pylist={pylist}, max depth smaller than " + f"ragged_rank={ragged_rank}") + + # If both inner_shape and ragged_rank were specified, then check that + # they are compatible with pylist. + if inner_shape is not None and ragged_rank is not None: + expected_depth = ragged_rank + len(inner_shape) + 1 + if ((scalar_depth is not None and expected_depth != scalar_depth) or + (scalar_depth is None and expected_depth < max_depth)): + raise ValueError( + "Invalid pylist=%r: incompatible with ragged_rank=%d " + "and dim(inner_shape)=%d" % (pylist, ragged_rank, len(inner_shape))) + + # Check if the result is a `Tensor`. + if (ragged_rank == 0 or + (ragged_rank is None and + ((max_depth < 2) or + (inner_shape is not None and max_depth - len(inner_shape) < 2)))): + return inner_factory(pylist, dtype, inner_shape) + + # Compute default value for inner_shape. + if inner_shape is None: + if ragged_rank is None: + inner_shape = () + else: + inner_shape = _default_inner_shape_for_pylist(pylist, ragged_rank) + + # Compute default value for ragged_rank. + if ragged_rank is None: + if scalar_depth is None: + ragged_rank = max(1, max_depth - 1) + else: + ragged_rank = max(1, scalar_depth - 1 - len(inner_shape)) + + # Build the splits for each ragged rank, and concatenate the inner values + # into a single list. + nested_splits = [] + values = pylist + for dim in range(ragged_rank): + nested_splits.append([0]) + concatenated_values = [] + for row in values: + nested_splits[dim].append(nested_splits[dim][-1] + len(row)) + concatenated_values.extend(row) + values = concatenated_values + + values = inner_factory( + values, dtype=dtype, shape=(len(values),) + inner_shape, name="values") + for row_splits in reversed(nested_splits): + values = ragged_factory(values, row_splits) + return values + + +def _find_scalar_and_max_depth(pylist): + """Finds nesting depth of scalar values in pylist. + + Args: + pylist: A nested python `list` or `tuple`. + + Returns: + A tuple `(scalar_depth, max_depth)`. `scalar_depth` is the nesting + depth of scalar values in `pylist`, or `None` if `pylist` contains no + scalars. `max_depth` is the maximum depth of `pylist` (including + empty lists). + + Raises: + ValueError: If pylist has inconsistent nesting depths for scalars. + """ + # Check if pylist is not scalar. np.ndim builds an array, so we + # short-circuit lists and tuples. + if isinstance(pylist, (list, tuple)) or np.ndim(pylist) != 0: + scalar_depth = None + max_depth = 1 + for child in pylist: + child_scalar_depth, child_max_depth = _find_scalar_and_max_depth(child) + if child_scalar_depth is not None: + if scalar_depth is not None and scalar_depth != child_scalar_depth + 1: + raise ValueError("all scalar values must have the same nesting depth") + scalar_depth = child_scalar_depth + 1 + max_depth = max(max_depth, child_max_depth + 1) + return (scalar_depth, max_depth) + return (0, 0) + + +def _default_inner_shape_for_pylist(pylist, ragged_rank): + """Computes a default inner shape for the given python list.""" + + def get_inner_shape(item): + """Returns the inner shape for a python list `item`.""" + if not isinstance(item, (list, tuple)) and np.ndim(item) == 0: + return () + # Note that we need this check here in case `item` is not a Python list but + # fakes as being one (pylist). For a scenario of this, see test added in + # https://github.com/tensorflow/tensorflow/pull/48945 + elif len(item) > 0: # pylint: disable=g-explicit-length-test + return (len(item),) + get_inner_shape(item[0]) + return (0,) + + def check_inner_shape(item, shape): + """Checks that `item` has a consistent shape matching `shape`.""" + is_nested = isinstance(item, (list, tuple)) or np.ndim(item) != 0 + if is_nested != bool(shape): + raise ValueError("inner values have inconsistent shape") + if is_nested: + if shape[0] != len(item): + raise ValueError("inner values have inconsistent shape") + for child in item: + check_inner_shape(child, shape[1:]) + + # Collapse the ragged layers to get the list of inner values. + flat_values = pylist + for dim in range(ragged_rank): + if not all( + isinstance(v, (list, tuple)) or np.ndim(v) != 0 for v in flat_values): + raise ValueError("pylist has scalar values depth %d, but ragged_rank=%d " + "requires scalar value depth greater than %d" % + (dim + 1, ragged_rank, ragged_rank)) + flat_values = sum((list(v) for v in flat_values), []) + + # Compute the inner shape looking only at the leftmost elements; and then + # use check_inner_shape to verify that other elements have the same shape. + inner_shape = get_inner_shape(flat_values) + check_inner_shape(flat_values, inner_shape) + return inner_shape[1:] + + +@tf_export(v1=["ragged.placeholder"]) +@dispatch.add_dispatch_support +def placeholder(dtype, ragged_rank, value_shape=None, name=None): + """Creates a placeholder for a `tf.RaggedTensor` that will always be fed. + + **Important**: This ragged tensor will produce an error if evaluated. + Its value must be fed using the `feed_dict` optional argument to + `Session.run()`, `Tensor.eval()`, or `Operation.run()`. + + + Args: + dtype: The data type for the `RaggedTensor`. + ragged_rank: The ragged rank for the `RaggedTensor` + value_shape: The shape for individual flat values in the `RaggedTensor`. + name: A name for the operation (optional). + + Returns: + A `RaggedTensor` that may be used as a handle for feeding a value, but + not evaluated directly. + + Raises: + RuntimeError: if eager execution is enabled + + @compatibility(TF2) + This API is not compatible with eager execution and `tf.function`. To migrate + to TF2, rewrite the code to be compatible with eager execution. Check the + [migration + guide](https://www.tensorflow.org/guide/migrate#1_replace_v1sessionrun_calls) + on replacing `Session.run` calls. In TF2, you can just pass tensors directly + into ops and layers. If you want to explicitly set up your inputs, also see + [Keras functional API](https://www.tensorflow.org/guide/keras/functional) on + how to use `tf.keras.Input` to replace `tf.compat.v1.ragged.placeholder`. + `tf.function` arguments also do the job of `tf.compat.v1.ragged.placeholder`. + For more details please read [Better + performance with tf.function](https://www.tensorflow.org/guide/function). + @end_compatibility + """ + if ragged_rank == 0: + return array_ops.placeholder(dtype, value_shape, name) + + with ops.name_scope(name, "RaggedPlaceholder", []): + flat_shape = tensor_shape.TensorShape([None]).concatenate(value_shape) + result = array_ops.placeholder(dtype, flat_shape, "flat_values") + for i in reversed(range(ragged_rank)): + row_splits = array_ops.placeholder(dtypes.int64, [None], + "row_splits_%d" % i) + result = ragged_tensor.RaggedTensor.from_row_splits(result, row_splits, + validate=False) + return result diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_functional_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_functional_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..d011ce570397b32f7330ff68e96d2b0a7ef5a22d --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_functional_ops.py @@ -0,0 +1,200 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Support for ragged tensors.""" + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops.ragged import ragged_config +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + + +@tf_export("ragged.map_flat_values") +@dispatch.add_dispatch_support +def map_flat_values(op, *args, **kwargs): + """Applies `op` to the `flat_values` of one or more RaggedTensors. + + Replaces any `RaggedTensor` in `args` or `kwargs` with its `flat_values` + tensor (which collapses all ragged dimensions), and then calls `op`. Returns + a `RaggedTensor` that is constructed from the input `RaggedTensor`s' + `nested_row_splits` and the value returned by the `op`. + + If the input arguments contain multiple `RaggedTensor`s, then they must have + identical `nested_row_splits`. + + This operation is generally used to apply elementwise operations to each value + in a `RaggedTensor`. + + Warning: `tf.ragged.map_flat_values` does *not* apply `op` to each row of a + ragged tensor. This difference is important for non-elementwise operations, + such as `tf.reduce_sum`. If you wish to apply a non-elementwise operation to + each row of a ragged tensor, use `tf.map_fn` instead. (You may need to + specify an `output_signature` when using `tf.map_fn` with ragged tensors.) + + Examples: + + >>> rt = tf.ragged.constant([[1, 2, 3], [], [4, 5], [6]]) + >>> tf.ragged.map_flat_values(tf.ones_like, rt) + + >>> tf.ragged.map_flat_values(tf.multiply, rt, rt) + + >>> tf.ragged.map_flat_values(tf.add, rt, 5) + + + Example with a non-elementwise operation (note that `map_flat_values` and + `map_fn` return different results): + + >>> rt = tf.ragged.constant([[1.0, 3.0], [], [3.0, 6.0, 3.0]]) + >>> def normalized(x): + ... return x / tf.reduce_sum(x) + >>> tf.ragged.map_flat_values(normalized, rt) + + >>> tf.map_fn(normalized, rt) + + + Args: + op: The operation that should be applied to the RaggedTensor `flat_values`. + `op` is typically an element-wise operation (such as math_ops.add), but + any operation that preserves the size of the outermost dimension can be + used. I.e., `shape[0]` of the value returned by `op` must match + `shape[0]` of the `RaggedTensor`s' `flat_values` tensors. + *args: Arguments for `op`. + **kwargs: Keyword arguments for `op`. + + Returns: + A `RaggedTensor` whose `ragged_rank` matches the `ragged_rank` of all + input `RaggedTensor`s. + Raises: + ValueError: If args contains no `RaggedTensors`, or if the `nested_splits` + of the input `RaggedTensor`s are not identical. + """ + # Replace RaggedTensors with their values; and collect the partitions tensors + # from each RaggedTensor. + partition_lists = [] + flat_values_nrows = [] + inner_args = _replace_ragged_with_flat_values(args, partition_lists, + flat_values_nrows) + inner_kwargs = _replace_ragged_with_flat_values(kwargs, partition_lists, + flat_values_nrows) + if not partition_lists: + return op(*args, **kwargs) + + # If we can statically determine that the inputs are incompatible, then raise + # an error. (We can't guarantee full compatibility statically, so we need to + # perform some runtime checks too; but this allows us to fail sooner in some + # cases.) + if flat_values_nrows: + flat_values_nrows = set(flat_values_nrows) + if len(flat_values_nrows) != 1: + raise ValueError("Input RaggedTensors' flat_values must all have the " + "same outer-dimension size. Got sizes: %s" % + flat_values_nrows) + flat_values_nrows = flat_values_nrows.pop() # Get the single element + else: + flat_values_nrows = None + + partition_dtypes = set(p[0].dtype for p in partition_lists) + if len(partition_dtypes) > 1: + if not ragged_config.auto_cast_partition_dtype(): + raise ValueError("Input RaggedTensors have mismatched row partition " + "dtypes; use RaggedTensor.with_row_splits_dtype() to " + "convert them to compatible dtypes.") + + partition_lists = [ + [p.with_dtype(dtypes.int64) + for p in partition_list] # pylint: disable=g-complex-comprehension + for partition_list in partition_lists + ] + + # Delegate to `op` + op_output = op(*inner_args, **inner_kwargs) + # Check that the result has the expected shape (if known). + if flat_values_nrows is not None: + if not op_output.shape[:1].is_compatible_with([flat_values_nrows]): + raise ValueError( + "tf.ragged.map_flat_values requires that the output of `op` have " + "the same outer-dimension size as flat_values of any ragged " + "inputs. (output shape: %s; expected outer dimension size: %s)" % + (op_output.shape, flat_values_nrows)) + # Compose the result from the transformed values and the partitions. + return ragged_tensor.RaggedTensor._from_nested_row_partitions( # pylint: disable=protected-access + op_output, + _merge_partition_lists(partition_lists), + validate=False) + + +def _replace_ragged_with_flat_values(value, partition_lists, flat_values_nrows): + """Replace RaggedTensors with their flat_values, and record their partitions. + + Returns a copy of `value`, with any nested `RaggedTensor`s replaced by their + `flat_values` tensor. Looks inside lists, tuples, and dicts. + + Appends each `RaggedTensor`'s `RowPartition`s to `partition_lists`. + + Args: + value: The value that should be transformed by replacing `RaggedTensors`. + partition_lists: An output parameter used to record the row partitions + for any `RaggedTensors` that were replaced. + flat_values_nrows: An output parameter used to record the outer dimension + size for each replacement `flat_values` (when known). Contains a list of + int. + + Returns: + A copy of `value` with nested `RaggedTensors` replaced by their `values`. + """ + # Base case + if ragged_tensor.is_ragged(value): + value = ragged_tensor.convert_to_tensor_or_ragged_tensor(value) + partition_lists.append(value._nested_row_partitions) # pylint: disable=protected-access + nrows = tensor_shape.dimension_at_index(value.flat_values.shape, 0).value + if nrows is not None: + flat_values_nrows.append(nrows) + return value.flat_values + + # Recursion cases + def recurse(v): + return _replace_ragged_with_flat_values(v, partition_lists, + flat_values_nrows) + + if isinstance(value, list): + return [recurse(v) for v in value] + elif isinstance(value, tuple): + return tuple(recurse(v) for v in value) + elif isinstance(value, dict): + return dict((k, recurse(v)) for (k, v) in value.items()) + else: + return value + + +def _merge_partition_lists(partition_lists): + """Merges the given list of lists of RowPartitions. + + Args: + partition_lists: A list of lists of RowPartition. + + Returns: + A list of RowPartitions, where `result[i]` is formed by merging + `partition_lists[j][i]` for all `j`, using + `RowPartition._merge_precomputed_encodings`. + """ + dst = list(partition_lists[0]) + for src in partition_lists[1:]: + if len(src) != len(dst): + raise ValueError("All ragged inputs must have the same ragged_rank.") + for i in range(len(dst)): + # pylint: disable=protected-access + dst[i] = dst[i]._merge_precomputed_encodings(src[i]) + return dst diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_gather_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_gather_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..055b0a3d84c906f67fbe0f2913a479e830ce2705 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_gather_ops.py @@ -0,0 +1,520 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Gather operations for RaggedTensors.""" + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import indexed_slices +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import gen_ragged_array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.ragged import ragged_array_ops +from tensorflow.python.ops.ragged import ragged_math_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.util import dispatch + + +#=============================================================================== +# ragged_gather +#=============================================================================== +@dispatch.dispatch_for_api(array_ops.gather_v2) +def gather(params: ragged_tensor.RaggedOrDense, + indices: ragged_tensor.RaggedOrDense, + validate_indices=None, + axis=None, + batch_dims=0, + name=None): + """Gathers ragged slices from `params` axis `0` according to `indices`. + + See `tf.gather` for full documentation. (This version has the same API + as `tf.gather`, but supports ragged `params` and `indices`.) + + Examples: + + >>> params = tf.constant(['a', 'b', 'c', 'd', 'e']) + >>> indices = tf.constant([3, 1, 2, 1, 0]) + >>> ragged_params = tf.ragged.constant([['a', 'b', 'c'], ['d'], [], ['e']]) + >>> ragged_indices = tf.ragged.constant([[3, 1, 2], [1], [], [0]]) + + >>> tf.gather(params, ragged_indices) + + + >>> tf.gather(ragged_params, indices) + + + >>> tf.gather(ragged_params, ragged_indices) + + + Args: + params: The potentially ragged tensor from which to gather values. Must be + at least rank 1. + indices: The potentially ragged tensor indicating which values to gather. + Must have dtype `int32` or `int64`. Values must be in the range `[0, + params.shape[0]]`. + validate_indices: Ignored. + axis: The axis in `params` to gather `indices` from. + batch_dims: The number of batch dimensions. + name: A name for the operation (optional). + + Returns: + A `RaggedTensor`, where `output.dtype=params.dtype` and + `output.shape=indices.shape + params.shape[1:]` and + `output.ragged_rank=indices.shape.ndims + params.ragged_rank`. + + Raises: + ValueError: If indices.shape.ndims is not known statically. + """ + del validate_indices + + with ops.name_scope(name, 'RaggedGather', [params, indices]): + params = ragged_tensor.convert_to_tensor_or_ragged_tensor( + params, name='params') + indices = ragged_tensor.convert_to_tensor_or_ragged_tensor( + indices, name='indices') + params, indices = ragged_tensor.match_row_splits_dtypes(params, indices) + + if batch_dims != indices.shape.rank: + batch_dims = array_ops.get_positive_axis( + batch_dims, + indices.shape.rank, + axis_name='batch_dims', + ndims_name='rank(indices)') + if params.shape.rank is not None and batch_dims >= params.shape.rank: + raise ValueError('batch_dims must be less than rank(params)') + if axis is None: + axis = batch_dims + axis = array_ops.get_positive_axis( + axis, params.shape.rank, ndims_name='rank(params)') + if axis < batch_dims: + raise ValueError('axis must be greater than or equal to batch_dims') + if indices.shape.rank is not None: + if not 0 <= batch_dims <= indices.shape.rank: + raise ValueError( + 'batch_dims=%s must be between 0 and rank(indices)=%s' % + (batch_dims, indices.shape.rank)) + + return _gather(params, indices, axis, batch_dims) + + +def _gather(params, indices, axis, batch_dims): + """Helper that implements the body for ragged gather(). + + Assumes that `params` and `indices` have been converted to tensors or + ragged tensors, and that `axis` and `batch_dims` have been normalized to + be positive. (So these conversions & normalizations can be skipped in + recursive calls to _gather). + + Args: + params: The tensor from which to gather values. + indices: The indices of values to gather. + axis: The axis in `params` to gather `indices` from. + batch_dims: The number of batch dimensions. + + Returns: + A potentially ragged tensor. + """ + params_is_ragged = ragged_tensor.is_ragged(params) + indices_is_ragged = ragged_tensor.is_ragged(indices) + + if not (params_is_ragged or indices_is_ragged): + return array_ops.gather(params, indices, axis=axis, batch_dims=batch_dims) + + if batch_dims > 0: + return _batch_gather(params, indices, axis, batch_dims) + + if axis > 0: + return _axis_gather(params, indices, axis) + + if indices_is_ragged: + return indices.with_values(_gather(params, indices.values, 0, 0)) + + if indices.shape.ndims is None: + raise ValueError('rank(indices) must be known statically') + + out_ragged_rank = indices.shape.ndims + len(params.nested_row_splits) - 1 + result = gen_ragged_array_ops.ragged_gather( + indices=indices, + params_dense_values=params.flat_values, + params_nested_splits=params.nested_row_splits, + OUTPUT_RAGGED_RANK=out_ragged_rank) + + result = ragged_tensor.RaggedTensor.from_nested_row_splits( + result.output_dense_values, result.output_nested_splits, validate=False) + + # Inject uniform_row_lengths into the result RaggedTensors for dimensions + # corresponding to dense outer dimensions of `indices`. + # TODO(edloper): Change this to construct the result using RowPartition + # objects instead, so we don't need to modify private variables. + if indices.shape.ndims > 1: + target = result + indices_shape = array_ops.shape(indices, out_type=params.row_splits.dtype) + shape_cumprod = math_ops.cumprod(indices_shape) + for dim in range(indices.shape.ndims - 1): + # pylint: disable=protected-access + target._cached_nrows = shape_cumprod[dim] + target._uniform_row_length = indices_shape[dim + 1] + target = target.values + + return result + + +def _batch_gather(params, indices, axis, batch_dims): + """Helper that implements the body for ragged gather() when batch_dims>0. + + Args: + params: The tensor from which to gather values. + indices: The indices of values to gather. + axis: The axis in `params` to gather `indices` from. + batch_dims: The number of batch dimensions. + + Returns: + A potentially ragged tensor. + """ + # Perform static checks that `params` and `indices` have compatible batch + # dimensions. Note: we do not perform *runtime* checks that `params` and + # `indices` actually have the same row-splits (because we wish to avoid the + # runtime cost of those checks). If `params` and `indices` are + # incompatible, the resulting `RaggedTensor` may be nonsensical. + if not params.shape[:batch_dims].is_compatible_with( + indices.shape[:batch_dims]): + raise ValueError('batch shape from indices %s does not match params ' + 'shape %s' % (indices.shape[:batch_dims], params.shape)) + + if batch_dims > 1: + # Convert params & indices to ragged tensors. + if not isinstance(params, ragged_tensor.RaggedTensor): + if indices.uniform_row_length is None: + raise ValueError( + 'batch shape from indices does not match params shape: ragged ' + 'indices dimension corresponds to uniform params dimension') + params = ragged_tensor.RaggedTensor.from_tensor( + params, ragged_rank=1, row_splits_dtype=indices.row_splits.dtype) + if not isinstance(indices, ragged_tensor.RaggedTensor): + if params.uniform_row_length is None: + raise ValueError( + 'batch shape from indices does not match params shape: ragged ' + 'params dimension corresponds to uniform indices dimension') + indices = ragged_tensor.RaggedTensor.from_tensor( + indices, ragged_rank=1, row_splits_dtype=params.row_splits.dtype) + # Flatten the two outer batch dimensions into a single batch dimension, + # and recurse. + return params.with_values( + _gather(params.values, indices.values, axis - 1, batch_dims - 1)) + + if axis > 1: + # Convert an axis dimension into a batch dimension, by adding a dimension + # to `indices`, and tiling it to match `params`. E.g., if `params` + # had shape `[B, P1, P2]`, and `indices` had shape `[B, I1, I2]`, then we + # tile `indices` to have shape `[B, P1, I1, I2]`. That way, we can treat + # the `P1` dimension as a batch dimension. + if not isinstance(indices, ragged_tensor.RaggedTensor): + adjusted_indices = params.with_values( + array_ops.repeat(indices, params.row_lengths(), 0)) + else: + if not isinstance(params, ragged_tensor.RaggedTensor): + params = ragged_tensor.RaggedTensor.from_tensor( + params, ragged_rank=1, row_splits_dtype=indices.row_splits.dtype) + adjusted_indices = _gather( + indices, + params.with_values( + array_ops.repeat( + math_ops.range(params.nrows()), params.row_lengths())), 0, 0) + return _batch_gather(params, adjusted_indices, axis, batch_dims + 1) + + if indices.shape.rank is None: + raise ValueError('rank(indices) must be known statically') + + assert batch_dims == 1 + # If params.shape=[B, P1...PN] and indices.shape=[B, I1...IM], then: + # + # output[b, i1...im, p2...pn] = + # params[b, indices[b, i1...im], p2...pn] + # + # We construct `output` by flattening `params`, adjusting the `indices` to + # point into that flattened list, and recursively calling `gather`. + flat_params = _flatten_dims_0_and_1(params) + adjustments = _row_starts(params, indices.dtype) # offset for each batch + # increase adjustments's rank so it broadcasts w/ the outer dim of indices + adjustments = _increase_rank_to(adjustments, indices.shape.ndims) + adjusted_indices = indices + adjustments + return _gather(flat_params, adjusted_indices, axis - 1, 0) + + +def _axis_gather(params, indices, axis): + """Helper that implements ragged gather when axis>0 and batch_dims==0. + + Args: + params: The tensor from which to gather values. + indices: The indices of values to gather. + axis: The axis in `params` to gather `indices` from. + + Returns: + A potentially ragged tensor. + """ + if axis > 1: + if not isinstance(params, ragged_tensor.RaggedTensor): + params = ragged_tensor.RaggedTensor.from_tensor( + params, ragged_rank=1, row_splits_dtype=indices.row_splits.dtype) + # Recurse, using the flattened params (but do not flatten indices). + return params.with_values(_gather(params.values, indices, axis - 1, 0)) + + if indices.shape.rank is None: + raise ValueError('rank(indices) must be known statically') + + # Note: there is no checking of indices. If there is some index + # out of bounds, the results may be nonsensical. + + assert axis == 1 + # If params.shape=[P1...PN] and indices.shape=[I1...IM], then: + # + # output[p1, i1...im, p3...pn] = + # params[p1, indices[i1...im], p3...pn] + # + # We construct `output` by flattening `params`, adjusting the `indices` to + # have one additional dimension, and to point into that flattened list, and + # recursively calling `gather`. + flat_params = _flatten_dims_0_and_1(params) + adjustments = _row_starts(params, indices.dtype) # offset for each batch + adjustments = _increase_rank_to(adjustments, indices.shape.ndims + 1) + adjusted_indices = indices + adjustments + return _gather(flat_params, adjusted_indices, axis - 1, 0) + + +def _flatten_dims_0_and_1(t): + """Returns a copy of `t` with the outer two dimensions merged.""" + if isinstance(t, ragged_tensor.RaggedTensor): + return t.values + else: + t_shape = array_ops.shape(t) + return array_ops.reshape(t, array_ops.concat([[-1], t_shape[2:]], axis=0)) + + +def _row_starts(t, dtype): + """Returns the start indices for the rows in `t`.""" + if isinstance(t, ragged_tensor.RaggedTensor): + return math_ops.cast(t.row_starts(), dtype) + else: + t_shape = array_ops.shape(t, out_type=dtype) + return math_ops.range(t_shape[0]) * t_shape[1] + + +def _increase_rank_to(t, rank): + """Adds *trailing* size-1 dimensions to `t` until it has the given rank.""" + if isinstance(t, ragged_tensor.RaggedTensor): + return t.with_values(_increase_rank_to(t, rank - 1)) + else: + old_dims = array_ops.shape(t) + new_dims = array_ops.ones([rank - array_ops.rank(t)], old_dims.dtype) + new_shape = array_ops.concat([old_dims, new_dims], axis=0) + return array_ops.reshape(t, new_shape) + + +@dispatch.dispatch_for_api(array_ops.gather) +def _ragged_gather_v1(params: ragged_tensor.RaggedOrDense, + indices: ragged_tensor.RaggedOrDense, + validate_indices=None, + name=None, + axis=0, + batch_dims=0): + return gather(params, indices, validate_indices, axis, batch_dims, name) + + +#=============================================================================== +# ragged.gather_nd +#=============================================================================== +@dispatch.dispatch_for_api(array_ops.gather_nd_v2) +def gather_nd(params: ragged_tensor.RaggedOrDense, + indices: ragged_tensor.RaggedOrDense, + batch_dims=0, + name=None): + """Gather slices from `params` using `n`-dimensional indices. + + This operation is similar to `gather`, but it uses the innermost dimension + of `indices` to define a slice into `params`. In particular, if: + + * `indices` has shape `[A1...AN, I]` + * `params` has shape `[B1...BM]` + + Then: + + * `result` has shape `[A1...AN, B_{I+1}...BM]`. + * `result[a1...aN] = params[indices[a1...aN, :]]` + + Args: + params: A potentially ragged tensor with shape `[A1...AN, I]`. + indices: A potentially ragged tensor with shape `[B1...BM]`. + batch_dims: Must be zero. + name: A name for the operation (optional). + + Returns: + A potentially ragged tensor with shape `[A1...AN, B_{I+1}...BM]`. + + #### Examples: + + >>> params = tf.ragged.constant( + ... [ [ ['000', '001'], ['010' ] ], + ... [ ['100' ], ['110', '111', '112'], ['120'] ], + ... [ [ ], ['210' ] ] ]) + + >>> # Gather 2D slices from a 3D tensor + >>> tf.gather_nd(params, [[2], [0]]) + + + >>> # Gather 1D slices from a 3D tensor + >>> tf.gather_nd(params, [[2, 1], [0, 0]]) + + + >>> # Gather scalars from a 3D tensor + >>> tf.gather_nd(params, [[0, 0, 1], [1, 1, 2]]).numpy() + array([b'001', b'112'], dtype=object) + """ + if not isinstance(batch_dims, int) or batch_dims != 0: + raise ValueError('batch_dims != 0 is not supported for ragged gather yet.') + if not (ragged_tensor.is_ragged(params) or ragged_tensor.is_ragged(indices)): + return array_ops.gather_nd(params, indices, name) + + with ops.name_scope(name, 'RaggedGatherNd', [params, indices]): + + params = ragged_tensor.convert_to_tensor_or_ragged_tensor( + params, name='params') + indices = ragged_tensor.convert_to_tensor_or_ragged_tensor( + indices, name='indices') + params, indices = ragged_tensor.match_row_splits_dtypes(params, indices) + indices_shape = indices.shape + indices_ndims = indices_shape.ndims + if indices_ndims is None: + raise ValueError('indices.rank be statically known.') + if indices_ndims == 0: + raise ValueError('indices.rank must be at least 1.') + if (ragged_tensor.is_ragged(indices) and + indices_ndims == indices.ragged_rank + 1): + raise ValueError('The innermost dimension of indices may not be ragged') + + # `index_size` is the "n" in "gather_nd" -- i.e., the number of dimensions + # that each index slices into. + index_size = tensor_shape.dimension_value(indices_shape[-1]) + if index_size is None: + raise ValueError('indices.shape[-1] must be statically known.') + + # If `indices` has more than 2 dimensions, then recurse. If `indices` is + # dense, then we convert it to ragged before recursing, and then convert + # the result back to `dense` if appropriate. + if indices_ndims > 2: + indices_is_dense = not ragged_tensor.is_ragged(indices) + if indices_is_dense: + indices = ragged_tensor.RaggedTensor.from_tensor( + indices, ragged_rank=indices_ndims - 2, + row_splits_dtype=params.row_splits.dtype) + result = indices.with_flat_values(gather_nd(params, indices.flat_values)) + if (indices_is_dense and ragged_tensor.is_ragged(result) and + result.ragged_rank == indices_ndims - 2): + result = ragged_tensor.RaggedTensor.to_tensor(result) + return result + + # indices_ndims <= 2, and the innermost dimension of indices may not be + # ragged, so `indices` must not be ragged. + assert not ragged_tensor.is_ragged(indices) + assert ragged_tensor.is_ragged(params) + + # Handle corner case: An empty index tuple selects the entire `params` + # value. So if `index_size` is zero, then tile `params`. + if index_size == 0: + params_ndims = params.ragged_rank + array_ops.rank(params.flat_values) + for dim in range(indices_ndims - 1): + params = ragged_array_ops.expand_dims(params, axis=0) + multiples = array_ops.concat([ + array_ops.shape(indices)[:-1], + array_ops.ones([params_ndims], dtypes.int32) + ], + axis=0) + return ragged_array_ops.tile(params, multiples) + + # When index_size=1, we can just flatten the index tuples and use gather. + elif index_size == 1: + flattened_index_tuples = array_ops.reshape(indices, [-1]) + return gather(params, flattened_index_tuples) + + # Otherwise, params is a RaggedTensor, and indices is a 1D or 2D Tensor. + # Flatten both the index tuples and the params, such that the flattened + # index tuples point to the correct values in the flattened params; and + # then use ragged.gather on the flattened index tuples & params. + else: + indices = math_ops.cast(indices, params.row_splits.dtype) + + # Flatten the outermost 2 dimensions of the index tuples & params. + flattened_index_tuples = array_ops.gather(params.row_splits, + indices[..., 0]) + flattened_index_tuples += indices[..., 1] + flattened_params = params.values + + # Flatten any remaining dimensions. + for dim in range(2, index_size): + if not ragged_tensor.is_ragged(flattened_params): + flattened_index_tuples = array_ops.expand_dims( + flattened_index_tuples, axis=1) + flattened_index_tuples = array_ops.concat( + [flattened_index_tuples, indices[..., dim:]], axis=1) + return array_ops.gather_nd(flattened_params, flattened_index_tuples) + + flattened_index_tuples = array_ops.gather( + flattened_params.row_starts(), flattened_index_tuples) + flattened_index_tuples += indices[..., dim] + flattened_params = flattened_params.values + + # Gather using the flattened index tuples and params. + return gather(flattened_params, flattened_index_tuples) + + +@dispatch.dispatch_for_api(array_ops.gather_nd) +def _ragged_gather_nd_v1(params: ragged_tensor.RaggedOrDense, + indices: ragged_tensor.RaggedOrDense, + name=None, + batch_dims=0): + return gather_nd(params, indices, batch_dims, name) + + +#=============================================================================== +# Gradient for the RaggedGather kernel +#=============================================================================== +@ops.RegisterGradient('RaggedGather') +def _ragged_gather_grad(op, *grads): + """Gradient for RaggedGather op.""" + param_nested_splits = op.inputs[:-2] + param_inner_values = op.inputs[-2] + indices = op.inputs[-1] + grad_inner_values = grads[-1] + + # For each row in `params`, find the range of values in `params.inner_values` + # that is covered by that row. In particular, the values in row `i` are + # `param_inner_values[combined_splits[i]:combined_splits[i+1]`. + combined_splits = param_nested_splits[0] + for row_splits in param_nested_splits[1:]: + combined_splits = array_ops.gather(row_splits, combined_splits) + + # The outer dimensions of `indices` correspond 1:1 with the outer dimensions + # of `ragged_grad` that are encoded by `grad_nested_splits`. Thus, the + # flattened `indices` correspond 1:1 with `grad_inner_values`. + flat_indices = array_ops.reshape(indices, [-1]) + + # Build an IndexedSlices where the values are taken from `flat_grad`. + grad_indices = ragged_math_ops.range( + array_ops.gather(combined_splits, flat_indices), + array_ops.gather(combined_splits[1:], flat_indices)).values + + param_inner_values_grad = indexed_slices.IndexedSlices( + values=grad_inner_values, indices=grad_indices, + dense_shape=array_ops.shape(param_inner_values)) + return [None for _ in param_nested_splits] + [param_inner_values_grad, None] diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_getitem.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_getitem.py new file mode 100644 index 0000000000000000000000000000000000000000..b7ceb9240be243fcdbe471c431337a712ea34a0c --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_getitem.py @@ -0,0 +1,477 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Python-style indexing and slicing for RaggedTensors.""" + +from tensorflow.python.eager import context +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor as tensor_lib +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import cond +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.ragged import ragged_gather_ops +from tensorflow.python.ops.ragged import ragged_math_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + + +@tf_export("__operators__.ragged_getitem", v1=[]) +@dispatch.add_dispatch_support +def ragged_tensor_getitem(rt_input, key): + """Returns the specified piece of this RaggedTensor. + + Supports multidimensional indexing and slicing, with one restriction: + indexing into a ragged inner dimension is not allowed. This case is + problematic because the indicated value may exist in some rows but not + others. In such cases, it's not obvious whether we should (1) report an + IndexError; (2) use a default value; or (3) skip that value and return a + tensor with fewer rows than we started with. Following the guiding + principles of Python ("In the face of ambiguity, refuse the temptation to + guess"), we simply disallow this operation. + + Args: + rt_input: The RaggedTensor to slice. + key: Indicates which piece of the RaggedTensor to return, using standard + Python semantics (e.g., negative values index from the end). `key` + may have any of the following types: + + * `int` constant + * Scalar integer `Tensor` + * `slice` containing integer constants and/or scalar integer + `Tensor`s + * `Ellipsis` + * `tf.newaxis` + * `tuple` containing any of the above (for multidimensional indexing) + + Returns: + A `Tensor` or `RaggedTensor` object. Values that include at least one + ragged dimension are returned as `RaggedTensor`. Values that include no + ragged dimensions are returned as `Tensor`. See above for examples of + expressions that return `Tensor`s vs `RaggedTensor`s. + + Raises: + ValueError: If `key` is out of bounds. + ValueError: If `key` is not supported. + TypeError: If the indices in `key` have an unsupported type. + + Examples: + + >>> # A 2-D ragged tensor with 1 ragged dimension. + >>> rt = tf.ragged.constant([['a', 'b', 'c'], ['d', 'e'], ['f'], ['g']]) + >>> rt[0].numpy() # First row (1-D `Tensor`) + array([b'a', b'b', b'c'], dtype=object) + >>> rt[:3].to_list() # First three rows (2-D RaggedTensor) + [[b'a', b'b', b'c'], [b'd', b'e'], [b'f']] + >>> rt[3, 0].numpy() # 1st element of 4th row (scalar) + b'g' + + >>> # A 3-D ragged tensor with 2 ragged dimensions. + >>> rt = tf.ragged.constant([[[1, 2, 3], [4]], + ... [[5], [], [6]], + ... [[7]], + ... [[8, 9], [10]]]) + >>> rt[1].to_list() # Second row (2-D RaggedTensor) + [[5], [], [6]] + >>> rt[3, 0].numpy() # First element of fourth row (1-D Tensor) + array([8, 9], dtype=int32) + >>> rt[:, 1:3].to_list() # Items 1-3 of each row (3-D RaggedTensor) + [[[4]], [[], [6]], [], [[10]]] + >>> rt[:, -1:].to_list() # Last item of each row (3-D RaggedTensor) + [[[4]], [[6]], [[7]], [[10]]] + """ + if not isinstance(rt_input, ragged_tensor.RaggedTensor): + raise TypeError("Ragged __getitem__ expects a ragged_tensor.") + scope_tensors = [rt_input] + list(_tensors_in_key_list(key)) + if isinstance(key, (list, tuple)): + key = list(key) + else: + key = [key] + with ops.name_scope(None, "RaggedGetItem", scope_tensors): + return _ragged_getitem(rt_input, key) + + +def _ragged_getitem(rt_input, key_list): + """Helper for indexing and slicing ragged tensors with __getitem__(). + + Extracts the specified piece of the `rt_input`. See + `RaggedTensor.__getitem__` for examples and restrictions. + + Args: + rt_input: The `RaggedTensor` from which a piece should be returned. + key_list: The list of keys specifying which piece to return. Each key + corresponds with a separate dimension. + + Returns: + The indicated piece of rt_input. + + Raises: + ValueError: If `key_list` is not supported. + TypeError: If any keys in `key_list` have an unsupported type. + """ + if not key_list: + return rt_input + row_key = key_list[0] + inner_keys = key_list[1:] + + if row_key is Ellipsis: + expanded_key_list = _expand_ellipsis(key_list, rt_input.shape.ndims) + return _ragged_getitem(rt_input, expanded_key_list) + + # Adding a new axis: Get rt_input[inner_keys], and wrap it in a RaggedTensor + # that puts all values in a single row. + if row_key is array_ops.newaxis: + inner_rt = _ragged_getitem(rt_input, inner_keys) + nsplits = tensor_shape.dimension_at_index(inner_rt.row_splits.shape, 0) + if nsplits.value is not None: + nsplits = nsplits.value + else: + nsplits = array_ops.shape(inner_rt.row_splits, + out_type=inner_rt.row_splits.dtype)[0] + return ragged_tensor.RaggedTensor.from_uniform_row_length( + inner_rt, nsplits - 1, nrows=1, validate=False) + + # Slicing a range of rows: first slice the outer dimension, and then + # call `_ragged_getitem_inner_dimensions` to handle the inner keys. + if isinstance(row_key, slice): + sliced_rt_input = _slice_ragged_row_dimension(rt_input, row_key) + if rt_input.uniform_row_length is not None: + # If the inner dimension has uniform_row_length, then preserve it (by + # re-wrapping the values in a new RaggedTensor). Note that the row + # length won't have changed, since we're slicing a range of rows (and not + # slicing the rows themselves). + sliced_rt_input = ragged_tensor.RaggedTensor.from_uniform_row_length( + sliced_rt_input.values, rt_input.uniform_row_length, + nrows=sliced_rt_input.nrows()) + return _ragged_getitem_inner_dimensions(sliced_rt_input, inner_keys) + + # Indexing a single row: slice values to get the indicated row, and then + # use a recursive call to __getitem__ to handle the inner keys. + else: + starts = rt_input.row_splits[:-1] + limits = rt_input.row_splits[1:] + if context.executing_eagerly(): + # In python, __getitem__ should throw IndexError for out of bound + # indices. This will allow iteration run correctly as python will + # translate IndexError into StopIteration for next()/__next__(). + # Below is an example: + # import tensorflow as tf + # r = tf.ragged.constant([[1., 2.], [3., 4., 5.], [6.]]) + # for elem in r: + # print(elem) + # In non eager mode, the exception is thrown when session runs + # so we don't know if out of bound happens before. + # In eager mode, however, it is possible to find out when to + # throw out of bound IndexError. + # In the following row_key >= len(starts) is checked. In case of + # TypeError which happens when row_key is not an integer, the exception + # will simply be ignored as it will be processed later anyway. + try: + if int(row_key) >= len(starts): + raise IndexError("Row key {} out of bounds".format(row_key)) + except (TypeError, ValueError): + pass + row = rt_input.values[starts[row_key]:limits[row_key]] + return row.__getitem__(inner_keys) + + +def _slice_ragged_row_dimension(rt_input, row_key): + """Slice the outer dimension of `rt_input` according to the given `slice`. + + Args: + rt_input: The `RaggedTensor` to slice. + row_key: The `slice` object that should be used to slice `rt_input`. + + Returns: + A `RaggedTensor` containing the indicated slice of `rt_input`. + """ + if row_key.start is None and row_key.stop is None and row_key.step is None: + return rt_input + + # Use row_key to slice the starts & limits. + new_starts = rt_input.row_splits[:-1][row_key] + new_limits = rt_input.row_splits[1:][row_key] + zero_pad = array_ops.zeros([1], rt_input.row_splits.dtype) + + # If there's no slice step, then we can just select a single continuous + # span of `ragged.values(rt_input)`. + if row_key.step is None or row_key.step == 1: + # Construct the new splits. If new_starts and new_limits are empty, + # then this reduces to [0]. Otherwise, this reduces to: + # concat([[new_starts[0]], new_limits]) + new_splits = array_ops.concat( + [zero_pad[array_ops.size(new_starts):], new_starts[:1], new_limits], + axis=0) + values_start = new_splits[0] + values_limit = new_splits[-1] + return ragged_tensor.RaggedTensor.from_row_splits( + rt_input.values[values_start:values_limit], new_splits - values_start, + validate=False) + + # If there is a slice step (aka a strided slice), then use ragged_gather to + # collect the necessary elements of `ragged.values(rt_input)`. + else: + return _build_ragged_tensor_from_value_ranges(new_starts, new_limits, 1, + rt_input.values) + + +def _ragged_getitem_inner_dimensions(rt_input, key_list): + """Retrieve inner dimensions, keeping outermost dimension unchanged. + + Args: + rt_input: The `RaggedTensor` or `Tensor` from which a piece should be + extracted. + key_list: The __getitem__ keys for slicing the inner dimensions. + + Returns: + A `RaggedTensor`. + + Raises: + ValueError: If key_list is not supported. + """ + if not key_list: + return rt_input + + if not isinstance(rt_input, ragged_tensor.RaggedTensor): + return rt_input.__getitem__([slice(None, None, None)] + key_list) + + column_key = key_list[0] + if column_key is Ellipsis: + expanded_key_list = _expand_ellipsis(key_list, rt_input.values.shape.ndims) + return _ragged_getitem_inner_dimensions(rt_input, expanded_key_list) + + # Adding a new axis to a ragged inner dimension: recursively get the inner + # dimensions of rt_input with key_list[1:], and then wrap the result in a + # RaggedTensor that puts each value in its own row. + if column_key is array_ops.newaxis: + inner_rt = _ragged_getitem_inner_dimensions(rt_input, key_list[1:]) + nsplits = tensor_shape.dimension_at_index(inner_rt.row_splits.shape, 0) + if nsplits.value is not None: + nsplits = nsplits.value + else: + nsplits = array_ops.shape( + inner_rt.row_splits, out_type=inner_rt.row_splits.dtype + )[0] + return ragged_tensor.RaggedTensor.from_uniform_row_length( + inner_rt, 1, nrows=nsplits - 1, validate=False) + + # Slicing a range of columns in a ragged inner dimension. We use a + # recursive call to process the values, and then assemble a RaggedTensor + # with those values. + if isinstance(column_key, slice): + if (column_key.start is None and column_key.stop is None and + column_key.step is None): + # Trivial slice: recursively process all values, & splits is unchanged. + return rt_input.with_values( + _ragged_getitem_inner_dimensions(rt_input.values, key_list[1:])) + else: + if not ( + isinstance(column_key.start, (tensor_lib.Tensor, int, type(None))) + and isinstance(column_key.stop, (tensor_lib.Tensor, int, type(None))) + ): + raise TypeError("slice offsets must be integers or None") + + # Nontrivial slice: use ragged_gather to extract the indicated slice as + # a new RaggedTensor (inner_rt), and then recursively process its values. + starts = rt_input.row_splits[:-1] + limits = rt_input.row_splits[1:] + step = 1 if column_key.step is None else column_key.step + lower_bound = _if_ge_zero(step, lambda: starts, lambda: starts - 1) + upper_bound = _if_ge_zero(step, lambda: limits, lambda: limits - 1) + # inner_rt_starts[i] = index to start gathering for row i. + if column_key.start is None: + inner_rt_starts = _if_ge_zero(step, lambda: starts, lambda: limits - 1) + else: + start_offset = math_ops.cast(column_key.start, starts.dtype) + inner_rt_starts = _if_ge_zero( + column_key.start, + lambda: math_ops.minimum(starts + start_offset, upper_bound), + lambda: math_ops.maximum(limits + start_offset, lower_bound)) + # inner_rt_limits[i] = index to stop gathering for row i. + if column_key.stop is None: + inner_rt_limits = _if_ge_zero(step, lambda: limits, lambda: starts - 1) + else: + stop_offset = math_ops.cast(column_key.stop, starts.dtype) + inner_rt_limits = _if_ge_zero( + column_key.stop, + lambda: math_ops.minimum(starts + stop_offset, upper_bound), + lambda: math_ops.maximum(limits + stop_offset, lower_bound)) + inner_rt = _build_ragged_tensor_from_value_ranges( + inner_rt_starts, inner_rt_limits, column_key.step, rt_input.values) + # If the row dimension is uniform, then calculate the new + # uniform_row_length, and rebuild inner_rt using that uniform_row_lengths. + if rt_input.uniform_row_length is not None: + new_row_length = _slice_length(rt_input.uniform_row_length, column_key) + inner_rt = ragged_tensor.RaggedTensor.from_uniform_row_length( + inner_rt.values, new_row_length, rt_input.nrows()) + return inner_rt.with_values( + _ragged_getitem_inner_dimensions(inner_rt.values, key_list[1:])) + + # Indexing a single column in a ragged inner dimension: raise an Exception. + # See RaggedTensor.__getitem__.__doc__ for an explanation of why indexing + # into a ragged inner dimension is problematic. + if rt_input.uniform_row_length is None: + raise ValueError("Cannot index into an inner ragged dimension.") + + # Indexing a single column in a uniform inner dimension: check that the + # given index is in-bounds, and then use a strided slice over rt_input.values + # to take the indicated element from each row. + row_length = rt_input.uniform_row_length + column_key = math_ops.cast(column_key, row_length.dtype) + oob_err_msg = "Index out of bounds when indexing into a ragged tensor" + oob_checks = [ + check_ops.assert_greater_equal( + column_key, -row_length, message=oob_err_msg), + check_ops.assert_less(column_key, row_length, message=oob_err_msg), + ] + with ops.control_dependencies(oob_checks): + offset = _if_ge_zero(column_key, lambda: column_key, + lambda: row_length + column_key) + sliced_rt = rt_input.values[offset::row_length] + return _ragged_getitem_inner_dimensions(sliced_rt, key_list[1:]) + + +def _slice_length(value_length, slice_key): + """Computes the number of elements in a slice of a value with a given length. + + Returns the equivalent of: `len(range(value_length)[slice_key])` + + Args: + value_length: Scalar int `Tensor`: the length of the value being sliced. + slice_key: A `slice` object used to slice elements from the value. + + Returns: + The number of elements in the sliced value. + """ + # Note: we could compute the slice length without creating a zeros tensor + # with some variant of (stop-start)//step, but doing so would require more + # ops (for checking bounds, handling negative indices, negative step sizes, + # etc); and we expect this to be an uncommon operation, so we use this + # simpler implementation. + zeros = array_ops.zeros(value_length, dtype=dtypes.bool) + return array_ops.size(zeros[slice_key], out_type=value_length.dtype) + + +def _expand_ellipsis(key_list, num_remaining_dims): + """Expands the ellipsis at the start of `key_list`. + + Assumes that the first element of `key_list` is Ellipsis. This will either + remove the Ellipsis (if it corresponds to zero indices) or prepend a new + `slice(None, None, None)` (if it corresponds to more than zero indices). + + Args: + key_list: The arguments to `__getitem__()`. + num_remaining_dims: The number of dimensions remaining. + + Returns: + A copy of `key_list` with he ellipsis expanded. + Raises: + ValueError: If ragged_rank.shape.ndims is None + IndexError: If there are too many elements in `key_list`. + """ + if num_remaining_dims is None: + raise ValueError("Ellipsis not supported for unknown shape RaggedTensors") + num_indices = sum(1 for idx in key_list if idx is not array_ops.newaxis) + if num_indices > num_remaining_dims + 1: + raise IndexError("Too many indices for RaggedTensor") + elif num_indices == num_remaining_dims + 1: + return key_list[1:] + else: + return [slice(None, None, None)] + key_list + + +def _tensors_in_key_list(key_list): + """Generates all Tensors in the given slice spec.""" + if isinstance(key_list, tensor_lib.Tensor): + yield key_list + if isinstance(key_list, (list, tuple)): + for v in key_list: + for tensor in _tensors_in_key_list(v): + yield tensor + if isinstance(key_list, slice): + for tensor in _tensors_in_key_list(key_list.start): + yield tensor + for tensor in _tensors_in_key_list(key_list.stop): + yield tensor + for tensor in _tensors_in_key_list(key_list.step): + yield tensor + + +def _build_ragged_tensor_from_value_ranges(starts, limits, step, values): + """Returns a `RaggedTensor` containing the specified sequences of values. + + Returns a RaggedTensor `output` where: + + ```python + output.shape[0] = starts.shape[0] + output[i] = values[starts[i]:limits[i]:step] + ``` + + Requires that `starts.shape == limits.shape` and + `0 <= starts[i] <= limits[i] <= values.shape[0]`. + + Args: + starts: 1D integer Tensor specifying the start indices for the sequences of + values to include. + limits: 1D integer Tensor specifying the limit indices for the sequences of + values to include. + step: Integer value specifying the step size for strided slices. + values: The set of values to select from. + + Returns: + A `RaggedTensor`. + + Raises: + ValueError: Until the prerequisite ops are checked in. + """ + # Use `ragged_range` to get the index of each value we should include. + if step is None: + step = 1 + step = ops.convert_to_tensor(step, name="step") + if step.dtype.is_integer: + step = math_ops.cast(step, starts.dtype) + else: + raise TypeError("slice strides must be integers or None") + value_indices = ragged_math_ops.range(starts, limits, step, + row_splits_dtype=starts.dtype) + + # Use `ragged_gather` or `array_ops.gather` to collect the values. + if isinstance(values, ragged_tensor.RaggedTensor): + gathered_values = ragged_gather_ops.gather( + params=values, indices=value_indices.values) + else: + gathered_values = array_ops.gather( + params=values, indices=value_indices.values) + + # Assemble the RaggedTensor from splits & values. + return value_indices.with_values(gathered_values) + + +def _if_ge_zero(value, true_fn, false_fn): + """Returns `true_fn() if value >= 0 else false_fn()`.""" + # If `value` is statically known, then don't use a control flow op. + if isinstance(value, tensor_lib.Tensor): + const_value = tensor_util.constant_value(value) + if const_value is None: + return cond.cond(value >= 0, true_fn, false_fn) + else: + value = const_value + if value >= 0: + return true_fn() + else: + return false_fn() diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_image_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_image_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..f139b97f55eb52955cd9807094c3dc85b7cb51e0 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_image_ops.py @@ -0,0 +1,98 @@ +# Copyright 2021 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Image operations for RaggedTensors.""" + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_spec +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import cond +from tensorflow.python.ops import image_ops +from tensorflow.python.ops import map_fn +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.util import dispatch + + +@dispatch.dispatch_for_api(image_ops.resize_images_v2) +def resize_images_v2(images: ragged_tensor.RaggedTensor, + size, + method=image_ops.ResizeMethod.BILINEAR, + preserve_aspect_ratio=False, + antialias=False, + name=None): + """RaggedTensor dispatcher for tf.image.resize (tf-v2).""" + with ops.name_scope(name, "RaggedResizeImages", [images, size]): + return _resize_images( + image_ops.resize_images_v2, + images, + size, + method=method, + preserve_aspect_ratio=preserve_aspect_ratio, + antialias=antialias) + + +@dispatch.dispatch_for_api(image_ops.resize_images) +def resize_images_v1(images: ragged_tensor.RaggedTensor, + size, + method=image_ops.ResizeMethodV1.BILINEAR, + align_corners=False, + preserve_aspect_ratio=False, + name=None): + """RaggedTensor dispatcher for tf.image.resize (tf-v1).""" + with ops.name_scope(name, "RaggedResizeImages", [images, size]): + return _resize_images( + image_ops.resize_images, + images, + size, + method=method, + preserve_aspect_ratio=preserve_aspect_ratio, + align_corners=align_corners) + + +def _resize_images(resize_op, images, size, **kwargs): + """RaggedTensor dispatcher for tf.image.resize.""" + if images.shape.rank != 4: + raise ValueError( + "tf.image.resize: images.shape.rank must be 4 if images is ragged.") + + # Determine the output shape (excluding the batch dimension). + static_batch_size = tensor_shape.dimension_value(images.shape[0]) + size = ops.convert_to_tensor(size, dtypes.int32, "size") + size_as_shape = tensor_util.constant_value_as_shape(size).with_rank(2) + out_shape = size_as_shape + images.shape[-1:] + out_spec = tensor_spec.TensorSpec(out_shape, dtypes.float32) + + def resize_one(image): + if isinstance(image, ragged_tensor.RaggedTensor): + image = image.to_tensor() + return resize_op(image, size, **kwargs) + + def resize_with_map(): + return map_fn.map_fn_v2(resize_one, images, fn_output_signature=out_spec) + + def empty_result(): + channels = array_ops.shape(images.flat_values)[-1:] + return array_ops.zeros(array_ops.concat([[0], size, channels], axis=0)) + + if static_batch_size == 0: + return empty_result() + elif static_batch_size is not None: + return resize_with_map() + else: + empty_batch = math_ops.equal(images.nrows(), 0) + return cond.cond(empty_batch, empty_result, resize_with_map) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_map_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_map_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..06d9f0624d08306ecb1b3b4e838116f6fd86b0ed --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_map_ops.py @@ -0,0 +1,174 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Functional operations for RaggedTensors.""" + +from tensorflow.python.ops import map_fn as map_fn_lib +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.util import nest + + +def map_fn(fn, + elems, + dtype=None, + parallel_iterations=None, + back_prop=True, + swap_memory=False, + infer_shape=True, + name=None): + """map on the list of tensors unpacked from `elems` on dimension 0. + + The simplest version of `map_fn` repeatedly applies the callable `fn` to a + sequence of elements from first to last. The elements are made of the + tensors unpacked from `elems`. `dtype` is the data type of the return + value of `fn`. Users must provide `dtype` if it is different from + the data type of `elems`. + + Suppose that `elems` is unpacked into `values`, a list of tensors. The shape + of the result tensor is `[values.shape[0]] + fn(values[0]).shape`. + + This method also allows multi-arity `elems` and output of `fn`. If `elems` + is a (possibly nested) list or tuple of tensors, then each of these tensors + must have a matching first (unpack) dimension. The signature of `fn` may + match the structure of `elems`. That is, if `elems` is + `(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is: + `fn = lambda (t1, [t2, t3, [t4, t5]]):`. + + Furthermore, `fn` may emit a different structure than its input. For example, + `fn` may look like: `fn = lambda t1: return (t1 + 1, t1 - 1)`. In this case, + the `dtype` parameter is not optional: `dtype` must be a type or (possibly + nested) tuple of types matching the output of `fn`. + + To apply a functional operation to the nonzero elements of a SparseTensor + one of the following methods is recommended. First, if the function is + expressible as TensorFlow ops, use + + ```python + result = SparseTensor(input.indices, fn(input.values), input.dense_shape) + ``` + + If, however, the function is not expressible as a TensorFlow op, then use + + ```python + result = SparseTensor( + input.indices, map_fn(fn, input.values), input.dense_shape) + ``` + + instead. + + When executing eagerly, map_fn does not execute in parallel even if + `parallel_iterations` is set to a value > 1. You can still get the + performance benefits of running a function in parallel by using the + `tf.contrib.eager.defun` decorator, + + ```python + # Assume the function being used in map_fn is fn. + # To ensure map_fn calls fn in parallel, use the defun decorator. + @tf.contrib.eager.defun + def func(tensor): + return tf.map_fn(fn, tensor) + ``` + + Note that if you use the defun decorator, any non-TensorFlow Python code + that you may have written in your function won't get executed. See + `tf.contrib.eager.defun` for more details. The recommendation would be to + debug without defun but switch to defun to get performance benefits of + running map_fn in parallel. + + Args: + fn: The callable to be performed. It accepts one argument, which will have + the same (possibly nested) structure as `elems`. Its output must have the + same structure as `dtype` if one is provided, otherwise it must have the + same structure as `elems`. + elems: A tensor or (possibly nested) sequence of tensors, each of which will + be unpacked along their first dimension. The nested sequence of the + resulting slices will be applied to `fn`. + dtype: (optional) The output type(s) of `fn`. If `fn` returns a structure + of Tensors differing from the structure of `elems`, then `dtype` is not + optional and must have the same structure as the output of `fn`. Use + `RaggedTensorType` to declare an output of type `RaggedTensor`. + parallel_iterations: (optional) The number of iterations allowed to run in + parallel. When graph building, the default value is 10. While executing + eagerly, the default value is set to 1. + back_prop: (optional) True enables support for back propagation. + swap_memory: (optional) True enables GPU-CPU memory swapping. + infer_shape: (optional) False disables tests for consistent output shapes. + name: (optional) Name prefix for the returned tensors. + + Returns: + A possibly nested sequence of potentially ragged tensors. Each + tensor packs the results of applying `fn` to tensors unpacked from `elems` + along the first dimension, from first to last. + + Raises: + TypeError: if `fn` is not callable or the structure of the output of + `fn` and `dtype` do not match, or if elems is a SparseTensor. + ValueError: if the lengths of the output of `fn` and `dtype` do not match. + + #### Examples: + + ```python + elems = np.array([1, 2, 3, 4, 5, 6]) + squares = map_fn(lambda x: x * x, elems) + # squares == [1, 4, 9, 16, 25, 36] + ``` + + ```python + elems = (np.array([1, 2, 3]), np.array([-1, 1, -1])) + alternate = map_fn(lambda x: x[0] * x[1], elems, dtype=tf.int64) + # alternate == [-1, 2, -3] + ``` + + ```python + elems = np.array([1, 2, 3]) + alternates = map_fn(lambda x: (x, -x), elems, dtype=(tf.int64, tf.int64)) + # alternates[0] == [1, 2, 3] + # alternates[1] == [-1, -2, -3] + ``` + + ```python + elems=ragged.constant([[1, 2, 3], [4, 5], [6, 7]]) + mean = map_fn(tf.reduce_mean, elems) + # mean == [2, 4, 6] + ``` + + ```python + elems=ragged.constant([[1, 2, 3], [4, 5], [6, 7]], dtype=tf.int64) + out = map_fn(fn=lambda x: x+1, elems, + dtype=ragged.RaggedTensorType(type=tf.int64, ragged_rank=0)) + # out = tf.ragged.constant([[2, 3, 4], [5, 6], [7, 8]]) + ``` + """ + if dtype is None: + dtype = nest.map_structure(lambda e: e.dtype, elems) + dtype = nest.map_structure(_ragged_type_to_spec, dtype) + return map_fn_lib.map_fn(fn, + elems, + dtype, + parallel_iterations, + back_prop, + swap_memory, + infer_shape, + name) + + +def _ragged_type_to_spec(t): + if isinstance(t, ragged_tensor.RaggedTensorType): + # Note: need to adjust ragged_rank by 1, since RaggedTensorSpec gives the + # type for the mapped `fn` output, but RaggedTensorType gives the type for + # the result of stacking the mapped `fn` outputs. + return ragged_tensor.RaggedTensorSpec( + None, t.dtype, t.ragged_rank - 1, t.row_splits_dtype) + else: + return t diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_math_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_math_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..fac49983845728e307f94a8a53be8d367e6212ab --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_math_ops.py @@ -0,0 +1,1261 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Support for ragged tensors.""" + +import functools +import typing + +import numpy as np + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import gen_ragged_math_ops +from tensorflow.python.ops import map_fn +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn_ops +from tensorflow.python.ops.ragged import ragged_functional_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.ops.ragged import segment_id_ops +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + + +# =============================================================================== +# ragged.range +# =============================================================================== +# pylint: disable=redefined-builtin +@tf_export('ragged.range') +@dispatch.add_dispatch_support +def range(starts, + limits=None, + deltas=1, + dtype=None, + name=None, + row_splits_dtype=dtypes.int64): + """Returns a `RaggedTensor` containing the specified sequences of numbers. + + Each row of the returned `RaggedTensor` contains a single sequence: + + ```python + ragged.range(starts, limits, deltas)[i] == + tf.range(starts[i], limits[i], deltas[i]) + ``` + + If `start[i] < limits[i] and deltas[i] > 0`, then `output[i]` will be an + empty list. Similarly, if `start[i] > limits[i] and deltas[i] < 0`, then + `output[i]` will be an empty list. This behavior is consistent with the + Python `range` function, but differs from the `tf.range` op, which returns + an error for these cases. + + Examples: + + >>> tf.ragged.range([3, 5, 2]).to_list() + [[0, 1, 2], [0, 1, 2, 3, 4], [0, 1]] + >>> tf.ragged.range([0, 5, 8], [3, 3, 12]).to_list() + [[0, 1, 2], [], [8, 9, 10, 11]] + >>> tf.ragged.range([0, 5, 8], [3, 3, 12], 2).to_list() + [[0, 2], [], [8, 10]] + + The input tensors `starts`, `limits`, and `deltas` may be scalars or vectors. + The vector inputs must all have the same size. Scalar inputs are broadcast + to match the size of the vector inputs. + + Args: + starts: Vector or scalar `Tensor`. Specifies the first entry for each range + if `limits` is not `None`; otherwise, specifies the range limits, and the + first entries default to `0`. + limits: Vector or scalar `Tensor`. Specifies the exclusive upper limits for + each range. + deltas: Vector or scalar `Tensor`. Specifies the increment for each range. + Defaults to `1`. + dtype: The type of the elements of the resulting tensor. If not specified, + then a value is chosen based on the other args. + name: A name for the operation. + row_splits_dtype: `dtype` for the returned `RaggedTensor`'s `row_splits` + tensor. One of `tf.int32` or `tf.int64`. + + Returns: + A `RaggedTensor` of type `dtype` with `ragged_rank=1`. + """ + row_splits_dtype = dtypes.as_dtype(row_splits_dtype) + if limits is None: + starts, limits = 0, starts + + with ops.name_scope(name, 'RaggedRange', [starts, limits, deltas]) as name: + starts = ops.convert_to_tensor(starts, dtype=dtype, name='starts') + limits = ops.convert_to_tensor(limits, dtype=dtype, name='limits') + deltas = ops.convert_to_tensor(deltas, dtype=dtype, name='deltas') + + # infer dtype if not explicitly provided + if dtype is None: + starts, limits, deltas = _infer_matching_dtype( + [starts, limits, deltas], + [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64]) + + result = gen_ragged_math_ops.ragged_range( + starts, limits, deltas, Tsplits=row_splits_dtype, name=name) + return ragged_tensor.RaggedTensor.from_row_splits( + result.rt_dense_values, result.rt_nested_splits, validate=False) + + +def _infer_matching_dtype(tensors, dtype_hierarchy): + """Infers a matching dtype for tensors, and casts them to that dtype.""" + assert all(t.dtype in dtype_hierarchy for t in tensors) + inferred_dtype = max([t.dtype for t in tensors], key=dtype_hierarchy.index) + return [math_ops.cast(t, inferred_dtype) for t in tensors] + + +ops.no_gradient('RaggedRange') + +# =============================================================================== +# ragged_segment_ +# =============================================================================== + +# Docstring template used for the raggged_segment_ ops. +_RAGGED_SEGMENT_DOCSTRING = """\ +Computes the %(combination)s along segments of a RaggedTensor. + + Returns a RaggedTensor `output` with `num_segments` rows, where the row + `output[i]` is formed by taking the %(combination)s of all rows of `data` + whose corresponding `segment_id` is `i`. + + The length of the row `output[i]` will be the maximum of the lengths of + all rows of `data` whose corresponding `segment_id` is `i`. If no `data` + rows correspond to a given segment ID, then the output row for that segment + ID will be empty. + + Args: + data: A `RaggedTensor` containing the values to combine. + segment_ids: A `Tensor` or `RaggedTensor`. Must have type `int64` or + `int32`. `segment_ids.shape` must be a prefix of `data.shape`. + Must be greater than or equal to zero, and less than `num_segments`. + `segment_ids` is not required to be sorted. + num_segments: An `int32` or `int64` scalar specifying the number of + distinct segment ids. + name: A name prefix for the returned tensor (optional). + Returns: + A `RaggedTensor` containing the %(combined)s values. The returned tensor + has the same dtype as `data`, and its shape is + `[num_segments] + data.shape[segment_ids.rank:]`. + Raises: + ValueError: If `segment_ids.shape` is not a prefix of `data.shape`. +""" + + +def _ragged_segment_aggregate(unsorted_segment_op, + data, + segment_ids, + num_segments, + separator=None, + name=None): + """Aggregates along segments of a RaggedTensor using `unsorted_segment_op`. + + Returns a RaggedTensor `output` with `num_segments` rows, where the row + `output[i]` is formed by combining all rows of `data` whose corresponding + `segment_id` is `i`. The values in each row are combined using + `unsorted_segment_op`. + + The length of the row `output[i]` will be the maximum of the lengths of + all rows of `data` whose corresponding `segment_id` is `i`. If no `data` + rows correspond to a given segment ID, then the output row for that segment + ID will be empty. + + Args: + unsorted_segment_op: The tensorflow `op` that should be used to combine + values in each row. Must have the same signature and basic behavior as + `unsorted_segment_sum`, `unsorted_segment_max`, etc. + data: A `RaggedTensor` containing the values to be combined. + segment_ids: A `Tensor` or `RaggedTensor`. Must have type `int64` or + `int32`. `segment_ids.shape` must be a prefix of `data.shape`. + `segment_ids` is not required to be sorted. + num_segments: An `int32` or `int64` scalar. + separator: An optional string. Defaults to None. The separator to use when + joining. Only used for string types. + name: A name prefix for the returned tensor (optional). + + Returns: + A `RaggedTensor` containing the aggregated values. The returned tensor + has the same dtype as `data`, and its shape is + `[num_segments] + data.shape[segment_ids.rank:]`. + Raises: + ValueError: If segment_ids.shape is not a prefix of data.shape. + """ + if not (ragged_tensor.is_ragged(data) or + ragged_tensor.is_ragged(segment_ids)): + if separator is not None: + # It uses unsorted_segment_join. + return unsorted_segment_op(data, segment_ids, num_segments, separator, + name) + else: + return unsorted_segment_op(data, segment_ids, num_segments, name) + + with ops.name_scope(name, 'RaggedSegment', + [data, segment_ids, num_segments]) as name: + data = ragged_tensor.convert_to_tensor_or_ragged_tensor(data, name='data') + segment_ids = ragged_tensor.convert_to_tensor_or_ragged_tensor( + segment_ids, name='segment_ids') + data, segment_ids = ragged_tensor.match_row_splits_dtypes(data, segment_ids) + if segment_ids.dtype not in (dtypes.int32, dtypes.int64): + raise ValueError('segment_ids must have dtype int32 or int64.') + + if ragged_tensor.is_ragged(segment_ids): + if not ragged_tensor.is_ragged(data): + raise ValueError('segment_ids.shape must be a prefix of data.shape, ' + 'but segment_ids is ragged and data is not.') + check_splits = check_ops.assert_equal( + segment_ids.row_splits, + data.row_splits, + message='segment_ids.shape must be a prefix of data.shape') + with ops.control_dependencies([check_splits]): + return _ragged_segment_aggregate(unsorted_segment_op, data.values, + segment_ids.values, num_segments, + separator) + + # Find the length of each row in data. (shape=[data_nrows]) + data_row_lengths = data.row_splits[1:] - data.row_splits[:-1] + + # Find the length that each output row will have. The length of the row + # corresponding to segment `id` is `max(data_row_lengths[i])` where + # `segment_ids[i]=id`. (shape=[output_nrows]) + output_row_lengths = math_ops.maximum( + math_ops.unsorted_segment_max(data_row_lengths, segment_ids, + num_segments), 0) + + # Build the splits tensor for the output RaggedTensor. + output_splits = array_ops.concat([ + array_ops.zeros([1], output_row_lengths.dtype), + math_ops.cumsum(output_row_lengths) + ], + axis=0) + + # For each row in `data`, find the start & limit position where that row's + # values will be aggregated in output.values. + data_row_to_out_row_start = array_ops.gather(output_splits, segment_ids) + data_row_to_out_row_limit = data_row_to_out_row_start + data_row_lengths + + # For each value in `data.values`, find the position where it will + # aggregated in `output.values`. + # Get the target output values index for each data values index. + data_val_to_out_val_index = range(data_row_to_out_row_start, + data_row_to_out_row_limit).values + + # Recursively aggregate the values. + output_values = _ragged_segment_aggregate(unsorted_segment_op, data.values, + data_val_to_out_val_index, + output_splits[-1], separator) + return ragged_tensor.RaggedTensor.from_row_splits( + output_values, output_splits, validate=False) + + +@dispatch.dispatch_for_api(math_ops.unsorted_segment_sum) +def segment_sum(data: ragged_tensor.RaggedOrDense, + segment_ids: ragged_tensor.RaggedOrDense, + num_segments, + name=None): + # For docs, see: _RAGGED_SEGMENT_DOCSTRING + return _ragged_segment_aggregate( + math_ops.unsorted_segment_sum, + data=data, + segment_ids=segment_ids, + num_segments=num_segments, + name=(name or 'RaggedSegmentSum')) + + +@dispatch.dispatch_for_api(math_ops.unsorted_segment_prod) +def segment_prod(data: ragged_tensor.RaggedOrDense, + segment_ids: ragged_tensor.RaggedOrDense, + num_segments, + name=None): + # For docs, see: _RAGGED_SEGMENT_DOCSTRING + return _ragged_segment_aggregate( + math_ops.unsorted_segment_prod, + data=data, + segment_ids=segment_ids, + num_segments=num_segments, + name=(name or 'RaggedSegmentProd')) + + +@dispatch.dispatch_for_api(math_ops.unsorted_segment_min) +def segment_min(data: ragged_tensor.RaggedOrDense, + segment_ids: ragged_tensor.RaggedOrDense, + num_segments, + name=None): + # For docs, see: _RAGGED_SEGMENT_DOCSTRING + return _ragged_segment_aggregate( + math_ops.unsorted_segment_min, + data=data, + segment_ids=segment_ids, + num_segments=num_segments, + name=(name or 'RaggedSegmentMin')) + + +@dispatch.dispatch_for_api(math_ops.unsorted_segment_max) +def segment_max(data: ragged_tensor.RaggedOrDense, + segment_ids: ragged_tensor.RaggedOrDense, + num_segments, + name=None): + # For docs, see: _RAGGED_SEGMENT_DOCSTRING + return _ragged_segment_aggregate( + math_ops.unsorted_segment_max, + data=data, + segment_ids=segment_ids, + num_segments=num_segments, + name=(name or 'RaggedSegmentMax')) + + +@dispatch.dispatch_for_api(math_ops.unsorted_segment_mean) +def segment_mean(data: ragged_tensor.RaggedOrDense, + segment_ids: ragged_tensor.RaggedOrDense, + num_segments, + name=None): + """For docs, see: _RAGGED_SEGMENT_DOCSTRING.""" + with ops.name_scope(name, 'RaggedSegmentMean', + [data, segment_ids, num_segments]): + total = segment_sum(data, segment_ids, num_segments) + ones = ragged_tensor.RaggedTensor.from_nested_row_splits( + array_ops.ones_like(data.flat_values), + data.nested_row_splits, + validate=False) + count = segment_sum(ones, segment_ids, num_segments) + if ragged_tensor.is_ragged(total): + return total.with_flat_values(total.flat_values / count.flat_values) + else: + return total / count + + +@dispatch.dispatch_for_api(math_ops.unsorted_segment_sqrt_n) +def segment_sqrt_n(data: ragged_tensor.RaggedOrDense, + segment_ids: ragged_tensor.RaggedOrDense, + num_segments, + name=None): + """For docs, see: _RAGGED_SEGMENT_DOCSTRING.""" + with ops.name_scope(name, 'RaggedSegmentSqrtN', + [data, segment_ids, num_segments]): + total = segment_sum(data, segment_ids, num_segments) + ones = ragged_tensor.RaggedTensor.from_nested_row_splits( + array_ops.ones_like(data.flat_values), + data.nested_row_splits, + validate=False) + count = segment_sum(ones, segment_ids, num_segments) + if ragged_tensor.is_ragged(total): + return total.with_flat_values(total.flat_values / + math_ops.sqrt(count.flat_values)) + else: + return total / math_ops.sqrt(count) + + +def _set_ragged_segment_docstring(func, combination, combined): + func.__doc__ = _RAGGED_SEGMENT_DOCSTRING % dict( + combination=combination, combined=combined) + + +_set_ragged_segment_docstring(segment_sum, 'sum', 'summed') +_set_ragged_segment_docstring(segment_prod, 'product', 'multiplied') +_set_ragged_segment_docstring(segment_min, 'minimum', 'minimized') +_set_ragged_segment_docstring(segment_max, 'maximum', 'maximized') +_set_ragged_segment_docstring(segment_mean, 'mean', 'averaged') +_set_ragged_segment_docstring(segment_sqrt_n, 'sum divided by sqrt(N)', + 'summed') + +# =============================================================================== +# ragged_reduce_ +# =============================================================================== + +# Docstring template used for ragged_reduce_ ops. +_RAGGED_REDUCE_DOCSTRING = """\ +Computes the %(combination)s of elements across dimensions of a `RaggedTensor`. + + Reduces `input_tensor` along the dimensions given in `axis` by taking the + %(combination)s of values. If a reduced dimension has no elements for + some index, then the value for that index will be %(default)s. + + The rank of the tensor is reduced by `1` for each entry in `axis`. If + `axis` is not specified, then all dimensions are reduced, and a scalar + value is returned. + Args: + input_tensor: A `RaggedTensor` containing the values to be %(combined)s. + axis: The dimensions to reduce. May be `None` (to reduce all axes), an + `int` (to reduce a single axis), a `list` or `tuple` of `int` (to reduce + a given set of axes), or a `Tensor` with a constant value. Must be in + the range `[0, input_tensor.rank]`. + name: A name prefix for the returned tensor (optional). + Returns: + A `RaggedTensor` containing the %(combined)s values. The returned tensor + has the same dtype as `data`, and its shape is given by removing the + dimensions specified in `axis` from `input_tensor.shape`. The `ragged_rank` + of the returned tensor is given by substracting any ragged dimensions + specified in `axis` from `input_tensor.ragged_rank`. + Raises: + ValueError: If `axis` contains a `Tensor` whose value is not constant. + ####Example: + %(example)s +""" +_RAGGED_REDUCE_SUM_EXAMPLE = """ + >>> rt = tf.ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]]) + >>> tf.reduce_sum(rt, axis=0).numpy() # = [3+1+9+2, 1+5+6, 4] + array([15, 12, 4], dtype=int32) + >>> tf.reduce_sum(rt, axis=1).numpy() # = [3+1+4, 1+5, 9, 2+6] + array([8, 6, 9, 8], dtype=int32) +""" +_RAGGED_REDUCE_PROD_EXAMPLE = """ + >>> rt = tf.ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]]) + >>> tf.reduce_prod(rt, axis=0).numpy() # = [3*1*9*2, 1*5*6, 4] + array([54, 30, 4], dtype=int32) + >>> tf.reduce_prod(rt, axis=1).numpy() # = [3*1*4, 1*5, 9, 2*6] + array([12, 5, 9, 12], dtype=int32) +""" +_RAGGED_REDUCE_MIN_EXAMPLE = """ + >>> rt = tf.ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]]) + >>> tf.reduce_min(rt, axis=0).numpy() + array([1, 1, 4], dtype=int32) + >>> tf.reduce_min(rt, axis=1).numpy() + array([1, 1, 9, 2], dtype=int32) +""" +_RAGGED_REDUCE_MAX_EXAMPLE = """ + >>> rt = tf.ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]]) + >>> tf.reduce_max(rt, axis=0).numpy() + array([9, 6, 4], dtype=int32) + >>> tf.reduce_max(rt, axis=1).numpy() + array([4, 5, 9, 6], dtype=int32) +""" +_RAGGED_REDUCE_MEAN_EXAMPLE = """ + >>> rt = tf.ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]]) + >>> tf.reduce_mean(rt, axis=0).numpy() + array([3.75, 4. , 4. ]) + >>> tf.reduce_mean(rt, axis=1).numpy() + array([2.66666667, 3. , 9. , 4. ]) +""" +_RAGGED_REDUCE_VARIANCE_EXAMPLE = """ + >>> rt = tf.ragged.constant([[1, 1, 4], [2, 1], [3], [4, 1]], + ... dtype=tf.float64) + >>> tf.math.reduce_variance(rt, axis=0).numpy() + array([1.25, 0., 0.]) + >>> tf.math.reduce_variance(rt, axis=1).numpy() + array([2., 0.25, 0., 2.25]) +""" +_RAGGED_REDUCE_STD_EXAMPLE = """ + >>> rt = tf.ragged.constant([[1, 0], [2, 1], [3], [4, 1]], + ... dtype=tf.float64) + >>> tf.math.reduce_std(rt, axis=0).numpy() + array([1.11803399, 0.47140452]) + >>> tf.math.reduce_std(rt, axis=1).numpy() + array([0.5, 0.5, 0., 1.5]) +""" +_RAGGED_REDUCE_ALL_EXAMPLE = """ + >>> rt = tf.ragged.constant([[True, True], [True, True, False, True], [False, True]]) + >>> tf.reduce_all(rt, axis=0).numpy() + array([False, True, False, True]) + >>> tf.reduce_all(rt, axis=1).numpy() + array([ True, False, False]) +""" +_RAGGED_REDUCE_ANY_EXAMPLE = """ + >>> rt = tf.ragged.constant([[True, True], [True, True, False, True], [False, True]]) + >>> tf.reduce_any(rt, axis=0).numpy() + array([ True, True, False, True]) + >>> tf.reduce_any(rt, axis=1).numpy() + array([ True, True, True]) +""" + + +def ragged_reduce_aggregate(reduce_op, + unsorted_segment_op, + rt_input, + axis, + keepdims, + separator=None, + name=None): + """Aggregates across axes of a RaggedTensor using the given `Tensor` ops. + + Reduces `rt_input` along the dimensions given in `axis`. The rank of the + tensor is reduced by 1 for each entry in `axis`. If `axis` is not specified, + then all dimensions are reduced, and a scalar value is returned. + + This op assumes that `reduce_op` and `unsorted_segment_op` are associative; + if not, then reducing multiple axes will return incorrect results. (In + particular, reducing multiple axes is currently implemented by reducing the + axes one at a time.) + + Args: + reduce_op: The tensorflow `op` that should be used to reduce values in + uniform dimensions. Must have the same signature and basic behavior as + `reduce_sum`, `reduce_max`, etc. + unsorted_segment_op: The tensorflow `op` that should be used to combine + values in ragged dimensions. Must have the same signature and basic + behavior as `unsorted_segment_sum`, `unsorted_segment_max`, etc. + rt_input: A `Tensor` or `RaggedTensor` containing the values to be reduced. + axis: The axis or axes to reduce. May be `None` (to reduce all axes), an + `int` (to reduce a single axis), a `list` or `tuple` of `int` (to reduce a + given set of axes), or a `Tensor` with a constant value. Must be in the + range `[0, rt_input.rank)`. + keepdims: If true, retains reduced dimensions with length 1. + separator: An optional string. Defaults to None. The separator to use when + joining. The separator must not be set for non-string data types. (i.e. if + separator is not None then it uses string ops) + name: A name prefix for the returned tensor (optional). + + Returns: + A `RaggedTensor` containing the reduced values. The returned tensor + has the same dtype as `data`, and its shape is given by removing the + dimensions specified in `axis` from `rt_input.shape`. The `ragged_rank` + of the returned tensor is given by substracting any ragged dimensions + specified in `axis` from `rt_input.ragged_rank`. + Raises: + ValueError: If `axis` contains a `Tensor` whose value is not constant. + """ + # When separator is not None, We infer that dtype is string and + # reduce_join will be called. + if separator is None: + maybe_separator = {} + else: + maybe_separator = {'separator': separator} + + if not ragged_tensor.is_ragged(rt_input): + return reduce_op( + rt_input, axis, keepdims=keepdims, name=name, **maybe_separator) + + if isinstance(axis, tensor.Tensor): + axis = tensor_util.constant_value(axis) + if axis is None: + raise ValueError('axis must be known at graph construction time.') + if isinstance(axis, np.ndarray): + axis = axis.tolist() + + # When reducing all axes, just ignore splits & reduce the inner values. + if axis is None: + result = reduce_op(rt_input.flat_values, None, keepdims=keepdims, + name=name, **maybe_separator) + if keepdims: + # Expand the result to the input number of dimensions. + for _ in rt_input.shape[1:]: + result = array_ops.expand_dims(result, axis=0) + return result + + with ops.name_scope(name, 'RaggedReduce', [rt_input, axis]): + if isinstance(axis, (tuple, list)): + if not axis: + return rt_input + elif len(axis) == 1: + axis = axis[0] + else: + # When reducing multiple axes, as we reduce one at a time (see below), + # the negative axis has to be converted to positive at the first run + # as the sort with negative axis will have different orders. + # See GitHub issue 27497. + axis = [ + array_ops.get_positive_axis(a, rt_input.shape.ndims, 'axis[%s]' % i, + 'rank(input_tensor)') + for i, a in enumerate(axis) + ] + # When reducing multiple axes, just reduce one at a time. This is less + # efficient, and only works for associative ops. (In particular, it + # does not work for reduce_mean.) However, reducing multiple axes at + # once will probably require a nontrivial c++ op. + axis = sorted(axis) + inner_reduced = ragged_reduce_aggregate(reduce_op, unsorted_segment_op, + rt_input, axis[-1], keepdims, + separator) + return ragged_reduce_aggregate(reduce_op, unsorted_segment_op, + inner_reduced, axis[:-1], keepdims, + separator) + + rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor( + rt_input, name='rt_input') + + axis = array_ops.get_positive_axis( + axis, rt_input.shape.ndims, ndims_name='rank(input_tensor)') + + if axis == 0: + # out[i_1, i_2, ..., i_N] = sum_{j} rt_input[j, i_1, i_2, ..., i_N] + row_lengths = rt_input.row_splits[1:] - rt_input.row_splits[:-1] + num_segments = math_ops.maximum(math_ops.reduce_max(row_lengths), 0) + segment_ids = range(row_lengths).values + result = _ragged_segment_aggregate(unsorted_segment_op, rt_input.values, + segment_ids, num_segments, separator) + if keepdims: + result = array_ops.expand_dims(result, axis=0) + return result + elif axis == 1: + # out[i_0, i_1, i_2, ..., i_N] = sum_{j} rt_input[i_0, j, i_2, ..., i_N] + num_segments = array_ops.shape(rt_input.row_splits)[0] - 1 + segment_ids = segment_id_ops.row_splits_to_segment_ids( + rt_input.row_splits) + result = _ragged_segment_aggregate(unsorted_segment_op, rt_input.values, + segment_ids, num_segments, separator) + if keepdims: + result = array_ops.expand_dims(result, axis=1) + return result + else: + # out[i_0, ..., i_[axis-1], i_axis+1], ..., i_N] = + # sum_{j} rt_input [i_0, ..., i_[axis-1], j, i_axis+1], ..., i_N] + return rt_input.with_values( + ragged_reduce_aggregate(reduce_op, unsorted_segment_op, + rt_input.values, axis - 1, keepdims, + separator)) + + +@dispatch.dispatch_for_api(math_ops.reduce_sum) +def reduce_sum(input_tensor: ragged_tensor.Ragged, + axis=None, + keepdims=None, + name=None): + """For docs, see: _RAGGED_REDUCE_DOCSTRING.""" + + return ragged_reduce_aggregate( + reduce_op=math_ops.reduce_sum, + unsorted_segment_op=math_ops.unsorted_segment_sum, + rt_input=input_tensor, + axis=axis, + keepdims=keepdims, + name=(name or 'RaggedReduceSum')) + + +@dispatch.dispatch_for_api(math_ops.reduce_prod) +def reduce_prod(input_tensor: ragged_tensor.Ragged, + axis=None, + keepdims=None, + name=None): + """For docs, see: _RAGGED_REDUCE_DOCSTRING.""" + return ragged_reduce_aggregate( + reduce_op=math_ops.reduce_prod, + unsorted_segment_op=math_ops.unsorted_segment_prod, + rt_input=input_tensor, + axis=axis, + keepdims=keepdims, + name=(name or 'RaggedReduceProd')) + + +@dispatch.dispatch_for_api(math_ops.reduce_min) +def reduce_min(input_tensor: ragged_tensor.Ragged, + axis=None, + keepdims=None, + name=None): + """For docs, see: _RAGGED_REDUCE_DOCSTRING.""" + return ragged_reduce_aggregate( + reduce_op=math_ops.reduce_min, + unsorted_segment_op=math_ops.unsorted_segment_min, + rt_input=input_tensor, + axis=axis, + keepdims=keepdims, + name=(name or 'RaggedReduceMin')) + + +@dispatch.dispatch_for_api(math_ops.reduce_max) +def reduce_max(input_tensor: ragged_tensor.Ragged, + axis=None, + keepdims=None, + name=None): + """For docs, see: _RAGGED_REDUCE_DOCSTRING.""" + return ragged_reduce_aggregate( + reduce_op=math_ops.reduce_max, + unsorted_segment_op=math_ops.unsorted_segment_max, + rt_input=input_tensor, + axis=axis, + keepdims=keepdims, + name=(name or 'RaggedReduceMax')) + + +@dispatch.dispatch_for_api(math_ops.reduce_mean) +def reduce_mean(input_tensor: ragged_tensor.Ragged, + axis=None, + keepdims=None, + name=None): + """For docs, see: _RAGGED_REDUCE_DOCSTRING.""" + with ops.name_scope(name, 'RaggedReduceMean', [input_tensor, axis]): + total = reduce_sum(input_tensor, axis, keepdims) + if ragged_tensor.is_ragged(input_tensor): + ones = ragged_tensor.RaggedTensor.from_nested_row_splits( + array_ops.ones_like(input_tensor.flat_values), + input_tensor.nested_row_splits, + validate=False) + else: + ones = array_ops.ones_like(input_tensor) + count = reduce_sum(ones, axis, keepdims) + if ragged_tensor.is_ragged(total): + return ragged_tensor.RaggedTensor.from_nested_row_splits( + total.flat_values / count.flat_values, + total.nested_row_splits, + validate=False) + else: + return total / count + + +@dispatch.dispatch_for_api(math_ops.reduce_variance) +def reduce_variance(input_tensor: ragged_tensor.Ragged, + axis=None, + keepdims=False, + name=None): + """For docs, see: _RAGGED_REDUCE_DOCSTRING.""" + with ops.name_scope(name, 'RaggedReduceVariance', [input_tensor, axis]): + input_tensor = ragged_tensor.convert_to_tensor_or_ragged_tensor( + input_tensor, name='input_tensor') + if input_tensor.dtype.is_complex: + raise ValueError( + 'reduce_variance is not supported for RaggedTensors with complex' + ' dtypes.' + ) + square_of_input = math_ops.square(input_tensor) + mean_of_square = reduce_mean(square_of_input, axis=axis, keepdims=keepdims) + mean = reduce_mean(input_tensor, axis=axis, keepdims=keepdims) + square_of_mean = math_ops.square(mean) + # Note: the above method of computing variance is not numerically stable, + # and can result in negative variances. Here we clip to >= 0. + return math_ops.maximum(mean_of_square - square_of_mean, 0) + + +@dispatch.dispatch_for_api(math_ops.reduce_std) +def reduce_std(input_tensor: ragged_tensor.Ragged, + axis=None, + keepdims=False, + name=None): + """For docs, see: _RAGGED_REDUCE_DOCSTRING.""" + with ops.name_scope(name, 'RaggedReduceStd', [input_tensor, axis]): + variance = reduce_variance(input_tensor, axis=axis, keepdims=keepdims) + return math_ops.sqrt(variance) + + +def _cast(input_tensor, dtype): + return ragged_functional_ops.map_flat_values(math_ops.cast, input_tensor, + dtype) + + +@dispatch.dispatch_for_api(math_ops.reduce_all) +def reduce_all(input_tensor: ragged_tensor.Ragged, + axis=None, + keepdims=None, + name=None): + """For docs, see: _RAGGED_REDUCE_DOCSTRING.""" + with ops.name_scope(name, 'RaggedReduceAll', [input_tensor, axis]): + return _cast( + reduce_prod(_cast(input_tensor, dtypes.int32), axis, keepdims), + dtypes.bool) + + +@dispatch.dispatch_for_api(math_ops.reduce_any) +def reduce_any(input_tensor: ragged_tensor.Ragged, + axis=None, + keepdims=None, + name=None): + """For docs, see: _RAGGED_REDUCE_DOCSTRING.""" + with ops.name_scope(name, 'RaggedReduceAny', [input_tensor, axis]): + return _cast( + reduce_sum(_cast(input_tensor, dtypes.int32), axis, keepdims), + dtypes.bool) + + +def _set_ragged_reduce_docstring(func, combination, combined, default, example): + func.__doc__ = _RAGGED_REDUCE_DOCSTRING % dict( + combination=combination, + combined=combined, + default=default, + example=example) + + +_set_ragged_reduce_docstring(reduce_sum, 'sum', 'summed', '0', + _RAGGED_REDUCE_SUM_EXAMPLE) +_set_ragged_reduce_docstring(reduce_prod, 'product', 'multiplied', '1', + _RAGGED_REDUCE_PROD_EXAMPLE) +_set_ragged_reduce_docstring(reduce_min, 'minimum', 'minimized', + '`input_tensor.dtype.min`', + _RAGGED_REDUCE_MIN_EXAMPLE) +_set_ragged_reduce_docstring(reduce_max, 'maximum', 'maximized', + '`input_tensor.dtype.max`', + _RAGGED_REDUCE_MAX_EXAMPLE) +_set_ragged_reduce_docstring(reduce_mean, 'mean', 'averaged', 'NaN', + _RAGGED_REDUCE_MEAN_EXAMPLE) +_set_ragged_reduce_docstring(reduce_variance, 'variance', 'averaged', 'NaN', + _RAGGED_REDUCE_VARIANCE_EXAMPLE) +_set_ragged_reduce_docstring(reduce_std, 'std', 'averaged', 'NaN', + _RAGGED_REDUCE_STD_EXAMPLE) +_set_ragged_reduce_docstring(reduce_all, 'logical and', 'and-ed', 'True', + _RAGGED_REDUCE_ALL_EXAMPLE) +_set_ragged_reduce_docstring(reduce_any, 'logical or', 'or-ed', 'False', + _RAGGED_REDUCE_ANY_EXAMPLE) + + +# =============================================================================== +# ragged.matmul +# =============================================================================== +@dispatch.dispatch_for_api(math_ops.matmul) +def matmul( + a: ragged_tensor.RaggedOrDense, + b: ragged_tensor.RaggedOrDense, + transpose_a=False, + transpose_b=False, + adjoint_a=False, + adjoint_b=False, + a_is_sparse=False, + b_is_sparse=False, + output_type=None, + grad_a=False, + grad_b=False, + name=None, +): + """Multiplies matrix `a` by matrix `b`. + + If all transpose or adjoint attributes are `False` then: + + ``` + output[..., i, j] = sum_k (a[..., i, k] * b[..., k, j]), for all indices i, j. + ``` + + The inputs `a` and `b` must have `rank >= 2`, where the outermost `rank - 2` + dimensions are batch dimensions. The inputs must have the same dtype. See + `tf.matmul` for more information. + + Args: + a: `tf.Tensor` or `RaggedTensor` with `rank > 1`. + b: `tf.Tensor` or `RaggedTensor` with same type and rank as `a`. + transpose_a: If `True`, `a` is transposed before multiplication. + transpose_b: If `True`, `b` is transposed before multiplication. + adjoint_a: If `True`, `a` is conjugated & transposed before multiplication. + adjoint_b: If `True`, `b` is conjugated & transposed before multiplication. + a_is_sparse: If `True`, optimize assuming `a` is mostly zero. + b_is_sparse: If `True`, optimize assuming `b` is mostly zero. + output_type: The output datatype (optional). + grad_a: Unused. + grad_b: Unused. + name: Name for the operation (optional). + + Returns: + A `Tensor` or `RaggedTensor` with the same rank and shape as `a`, where + each inner-most matrix is the product of the corresponding matrices in `a` + and `b`. + """ + del grad_a + del grad_b + if transpose_a and adjoint_a: + raise ValueError('Only one of transpose_a and adjoint_a can be True.') + if transpose_b and adjoint_b: + raise ValueError('Only one of transpose_b and adjoint_b can be True.') + + kwargs = dict( + transpose_a=transpose_a, + transpose_b=transpose_b, + adjoint_a=adjoint_a, + adjoint_b=adjoint_b, + a_is_sparse=a_is_sparse, + b_is_sparse=b_is_sparse, + output_type=output_type) + + with ops.name_scope(name, 'RaggedMatMul', [a, b]) as name: + a = ragged_tensor.convert_to_tensor_or_ragged_tensor(a, name='a') + b = ragged_tensor.convert_to_tensor_or_ragged_tensor(b, name='b') + + a_is_ragged = isinstance(a, ragged_tensor.RaggedTensor) + b_is_ragged = isinstance(b, ragged_tensor.RaggedTensor) + if not (a_is_ragged or b_is_ragged): + return math_ops.matmul(a, b, **kwargs) + + if a.dtype != b.dtype: + raise ValueError('`a` and `b` must have the same dtype.') + + # TODO(edloper): Support broadcasting inputs. (Broadcast support is not + # documented by https://www.tensorflow.org/api_docs/python/tf/linalg/matmul, + # but it is supported by the op.) + + # Find the rank of the input tensors. + if a.shape.rank is None: + if b.shape.rank is None: + raise ValueError('matmul requires at least one input to have known ' + 'rank if either input is ragged.') + rank = b.shape.rank + else: + if b.shape.rank is not None and a.shape.rank != b.shape.rank: + raise ValueError('`a` and `b` must have the same rank.') + rank = a.shape.rank + + # At least one of `a` and `b` is ragged; and ragged tensors always have + # rank>=2. + if rank < 2: + # This can happen if e.g. `a` is a 1D dense tensor and `b` is a + # ragged tensor with unknown rank. Since ragged tensors always have + # `rank>=2`, this implies that `a` and `b` have different ranks. + raise ValueError('`a` and `b` must have the same rank.') + + # Rank>3: We have multiple batch dimensions. Merge them into a single + # batch dimension, recursively call `matmul`, and then restore the original + # batch dimension (using a.row_splits). + if rank > 3: + shape_err = 'Batch dimensions of `a` and `b` do not have the same size.' + if not a_is_ragged: + a = ragged_tensor.RaggedTensor.from_tensor(a, ragged_rank=1) + if not b_is_ragged: + b = ragged_tensor.RaggedTensor.from_tensor(b, ragged_rank=1) + with ops.control_dependencies([ + check_ops.assert_equal(a.row_splits, b.row_splits, message=shape_err) + ]): + flat_result = matmul(a.values, b.values, **kwargs) + return a.with_values(flat_result) + + if rank == 2: + return _matmul_2d(a, b, **kwargs) + + assert rank == 3 # I.e., we have a single batch dimension. + + a_ragged_rank = a.ragged_rank if a_is_ragged else 0 + if a_ragged_rank == 1 and not (b_is_ragged or transpose_a or adjoint_a): + # If `a.shape=[B, (I), J]` and `b.shape=[B, J, K], then we can compute + # the result with a single dense `matmul`. + return _matmul_3d_with_batch_dim_folding(a, b, **kwargs) + else: + # Otherwie, fall back on using `map_fn`. + return _matmul_3d_with_map_fn(a, b, **kwargs) + + +def _matmul_2d(a, b, **kwargs): + """Multiplies potentially ragged 2D tensors. + + Args: + a: A 2D Tensor or RaggedTensor with `shape=[I, J]` + b: A 2D Tensor or RaggedTensor with `shape=[J, K]` + **kwargs: Additional arguments for `tf.matmul` (e.g. transpose_a). + + Returns: + A 2D Tensor with `shape=[I, K]`. + """ + # multiplying `a` and `b` is only well-defined if `a` and `b` are + # actually uniform (and just happened to be stored as ragged tensors). + # Check that they're uniform, convert them to tf.Tensor. + ragged_err = ('The matrices in `a` and `b` may not be ' + 'ragged in their innermost dimension.') + checks = [] + if isinstance(a, ragged_tensor.RaggedTensor): + original_size = array_ops.size(a.flat_values) + a = a.to_tensor() + checks.append( + check_ops.assert_equal( + original_size, array_ops.size(a), message=ragged_err)) + if isinstance(b, ragged_tensor.RaggedTensor): + original_size = array_ops.size(b.flat_values) + b = b.to_tensor() + checks.append( + check_ops.assert_equal( + original_size, array_ops.size(b), message=ragged_err)) + with ops.control_dependencies(checks): + return math_ops.matmul(a, b, **kwargs) + + +def _matmul_3d_with_map_fn(a, b, **kwargs): + """Multiplies batches of 2D matrices using map_fn. + + `output[n, i, k]` = sum_j (a[n, i, j] * b[n, j, k])` (for all `n`, `i`, `k`). + + Requires that `a[n, i].nrows()` == `b[n].nrows()` (for all `n` and `i`). + + Args: + a: A 3D Tensor or RaggedTensor with `shape=[B, I, J]`, where dimensions `I` + and `J` may be ragged. + b: A 3D Tensor or RaggedTensor with `shape=[B, J, K]`, where dimensions `J` + and `K` may be ragged. + **kwargs: Additional arguments for `tf.matmul` (e.g. transpose_a). + + Returns: + A 3D RaggedTensor with `shape=[B, (I), (K)]`. + """ + # Determine the ragged rank of the result. In the normal case, we have: + # [B, I, J] * [B, J, K] -> [B, I, K] + # Or if we're using transpose_b, then we have: + # [B, I, J] * [B, K, J] -> [B, I, K] + # In either case, output_ragged_rank=2 iff the K dimension is ragged. + if (isinstance(b, ragged_tensor.RaggedTensor) and + (b.ragged_rank == 2 or kwargs.get('transpose_b') or + kwargs.get('adjoint_b'))): + output_ragged_rank = 2 + else: + output_ragged_rank = 1 + + def single_batch_matmul(x): + out = _matmul_2d(x[0], x[1], **kwargs) + if output_ragged_rank == 2: + out = ragged_tensor.RaggedTensor.from_tensor(out) + return out + + fn_out_shape = None # Figure out proper shape. + row_splits_dtype = ( + a.row_splits.dtype + if isinstance(a, ragged_tensor.RaggedTensor) else b.row_splits.dtype) + output_type = kwargs['output_type'] + if output_type is None: + output_type = a.dtype + spec = ragged_tensor.RaggedTensorSpec( + shape=fn_out_shape, + dtype=output_type, + ragged_rank=output_ragged_rank - 1, + row_splits_dtype=row_splits_dtype) + result = map_fn.map_fn( + single_batch_matmul, elems=(a, b), fn_output_signature=spec) + + # map_fn loses shape information; restore it, where possible. + # pylint: disable=protected-access + if kwargs.get('transpose_a') or kwargs.get('adjoint_a'): + result._set_shape(a.shape[:-2] + a.shape[-1:] + [None]) + else: + result._set_shape(a.shape[:-2] + a.shape[-2:-1] + [None]) + if kwargs.get('transpose_b') or kwargs.get('adjoint_b'): + result._set_shape(b.shape[:-2] + [None] + b.shape[-2:-1]) + else: + result._set_shape(b.shape[:-2] + [None] + b.shape[-1:]) + + return result + + +def _matmul_3d_with_batch_dim_folding(a, b, **kwargs): + """Multiply batches of 2D matrices where only `a.shape[1]` is ragged. + + Args: + a: A RaggedTensor with `shape=[B, (I), J]`. (ragged_rank must be 1.) + b: A Tensor with `shape=[B, J, K]` + **kwargs: Additional arguments for `tf.matmul` (e.g. transpose_a). + transpose_a and adjoint_a must not be true. + + Returns: + A RaggedTensor with `shape=[B, (I), K]. + """ + # reshaped_a.shape = [sum(i_1, i_2, ..., i_B), 1, J] + reshaped_a = array_ops.expand_dims(a.values, 1) + # reshaped_b.shape = [sum(i_1, i_2, ..., i_B), J, K] + reshaped_b = array_ops.repeat(b, a.row_lengths(), axis=0) + # flat_result.shape = [sum(i_1, i_2, ..., i_B), 1, K] + flat_result = math_ops.matmul(reshaped_a, reshaped_b, **kwargs) + # result.shape = [B, (I), K] + return a.with_values(array_ops.squeeze(flat_result, axis=1)) + + +# =============================================================================== +# ragged.softmax +# =============================================================================== +@dispatch.dispatch_for_api(nn_ops.softmax_v2) +def softmax(logits: ragged_tensor.Ragged, axis=None, name=None): + """Computes softmax activations. + + Used for multi-class predictions. The sum of all outputs generated by softmax + is 1. + + This function performs the equivalent of + + softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis) + + Example usage: + + >>> softmax = tf.nn.softmax([-1, 0., 1.]) + >>> softmax + + >>> sum(softmax) + + + Args: + logits: A non-empty `Tensor`. Must be one of the following types: `half`, + `float32`, `float64`. + axis: The dimension softmax would be performed on. The default is -1 which + indicates the last dimension. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type and shape as `logits`. + + Raises: + InvalidArgumentError: if `logits` is empty or `axis` is beyond the last + dimension of `logits`. + """ + if axis is None: + axis = -1 + + with ops.name_scope(name, 'RaggedSoftmax', [logits]) as name: + max_input = reduce_max(logits, axis=axis, keepdims=True) + logits_exp = math_ops.exp(math_ops.subtract(logits, max_input)) + denominator = reduce_sum(logits_exp, axis=axis, keepdims=True) + return math_ops.divide(logits_exp, denominator) + + +# =============================================================================== +# ragged.add_n +# =============================================================================== +@dispatch.dispatch_for_api(math_ops.add_n) +def add_n(inputs: typing.List[ragged_tensor.RaggedOrDense], name=None): + """RaggedTensor implementation for tf.math.add_n.""" + if len(inputs) < 0: + raise ValueError('tf.add_n: expected at least one input.') + with ops.name_scope(name, 'RaggedAddN', inputs): + return ragged_functional_ops.map_flat_values(math_ops.add_n, inputs) + + +# =============================================================================== +# Ragged version of nn_ops.dropout +# =============================================================================== +@dispatch.dispatch_for_api(nn_ops.dropout) +def dropout_v1(x: ragged_tensor.Ragged, + keep_prob=None, + noise_shape=None, + seed=None, + name=None, + rate=None): + """Ragged dispatch target for tf.nn.dropout.""" + if noise_shape is not None: + raise ValueError('noise_shape is not supported yet for RaggedTensor x') + with ops.name_scope(name, 'RaggedNNDropout', [x, rate]): + x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, name='x') + return x.with_flat_values( + nn_ops.dropout( + x.flat_values, keep_prob=keep_prob, seed=seed, rate=rate)) + + +@dispatch.dispatch_for_api(nn_ops.dropout_v2) +def dropout_v2(x: ragged_tensor.Ragged, + rate, + noise_shape=None, + seed=None, + name=None): + """Ragged dispatch target for tf.nn.dropout.""" + if noise_shape is not None: + raise ValueError('noise_shape is not supported yet for RaggedTensor x') + with ops.name_scope(name, 'RaggedNNDropout', [x, rate]): + x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, name='x') + return x.with_flat_values( + nn_ops.dropout_v2(x.flat_values, rate=rate, seed=seed)) + + +@dispatch.dispatch_for_api(nn_ops.stateless_dropout) +def stateless_dropout(x: ragged_tensor.Ragged, + rate, + seed, + rng_alg=None, + noise_shape=None, + name=None): + """Ragged dispatch target for tf.nn.experimental.stateless_dropout.""" + if noise_shape is not None: + raise ValueError('noise_shape is not supported yet for RaggedTensor x') + with ops.name_scope(name, 'RaggedNNStatelessDropout', [x, rate]): + x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, name='x') + return x.with_flat_values( + nn_ops.stateless_dropout( + x.flat_values, rate=rate, seed=seed, rng_alg=rng_alg)) + + +# =============================================================================== +# Ragged version of Tensor.__eq__ and Tensor.__ne__ +# =============================================================================== +@dispatch.dispatch_for_api(math_ops.tensor_equals) +def tensor_equals(self: ragged_tensor.RaggedOrDense, + other: ragged_tensor.RaggedOrDense): + """Ragged version of the operation invoked by `Tensor.__eq__`.""" + if other is None: + return False + elif _use_legacy_mode_for_tensor_equality(self): + return self is other + else: + try: + return math_ops.equal(self, other) + except (errors.InvalidArgumentError, ValueError): + return False # values are not broadcast-compatbile. + + +@dispatch.dispatch_for_api(math_ops.tensor_not_equals) +def tensor_not_equals(self: ragged_tensor.RaggedOrDense, + other: ragged_tensor.RaggedOrDense): + """Ragged version of the operation invoked by `Tensor.__ne__`.""" + if other is None: + return False + elif _use_legacy_mode_for_tensor_equality(self): + return self is not other + else: + try: + return math_ops.not_equal(self, other) + except (errors.InvalidArgumentError, ValueError): + return True # values are not broadcast-compatbile. + + +def _use_legacy_mode_for_tensor_equality(self): + g = getattr(self, 'graph', None) + return not (tensor.Tensor._USE_EQUALITY and # pylint: disable=protected-access + ops.executing_eagerly_outside_functions() and + (g is None or g.building_function)) + + +def _cumsum_flat_values_at_ragged_rank(last_rp, flat_values, exclusive=False, + reverse=False): + """Calculate flat_values for math_ops.cumsum when axis==ragged_rank.""" + if not exclusive: + partial = _cumsum_flat_values_at_ragged_rank( + last_rp, flat_values, exclusive=True, reverse=reverse) + return partial + flat_values + + if reverse: + youngest_sibling = array_ops.gather( + params=last_rp.row_splits(), indices=last_rp.value_rowids() + 1) - 1 + new_flat_values = math_ops.cumsum(flat_values, exclusive=True, reverse=True) + initial_values = array_ops.gather(params=new_flat_values, + indices=youngest_sibling) + + return new_flat_values - initial_values + else: + eldest_sibling = array_ops.gather( + params=last_rp.row_splits(), indices=last_rp.value_rowids()) + new_flat_values = math_ops.cumsum(flat_values, exclusive=True) + initial_values = array_ops.gather(params=new_flat_values, + indices=eldest_sibling) + return new_flat_values - initial_values + + +@dispatch.dispatch_for_api(math_ops.cumsum) +def ragged_cumsum(x: ragged_tensor.Ragged, + axis: int = 0, + exclusive: bool = False, + reverse: bool = False, + name: typing.Optional[str] = None): + """Calculate math_ops.cumsum for a RaggedTensor. + + Given a ragged tensor `x`, the `result` is a ragged tensor with the same + shape. One can calculate the value of `result[i_1...i_k]` as follows: + ``` + dense_result=tf.math.cumsum(rt.to_tensor(), axis=axis, exclusive=exclusive, + reverse=reverse) + result[i_1...i_k]=dense_result[i_1...i_k] + ``` + + Args: + x: the original ragged tensor to sum. + axis: the axis along which to sum, can range -rank<=axis x.ragged_rank: + new_axis = axis - x.ragged_rank + cumsum_bound = functools.partial( + math_ops.cumsum, axis=new_axis, exclusive=exclusive, reverse=reverse) + return ragged_functional_ops.map_flat_values(cumsum_bound, x) + else: + dense_version = x.to_tensor() + result = math_ops.cumsum( + dense_version, axis, exclusive=exclusive, reverse=reverse, name=name) + return ragged_tensor.RaggedTensor.from_tensor( + result, lengths=x.nested_row_lengths()) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_operators.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_operators.py new file mode 100644 index 0000000000000000000000000000000000000000..479442c3f4055a53045247a5a4ce9ef977a84b10 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_operators.py @@ -0,0 +1,342 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Operator overloads for `RaggedTensor`.""" + +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.ragged import ragged_getitem +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.util import tf_decorator + + +# ============================================================================= +# Equality Docstring +# ============================================================================= +def ragged_eq(self, other): # pylint: disable=g-doc-args + """Returns result of elementwise `==` or False if not broadcast-compatible. + + Compares two ragged tensors elemewise for equality if they are + broadcast-compatible; or returns False if they are not + [broadcast-compatible](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). + + Note that this behavior differs from `tf.math.equal`, which raises an + exception if the two ragged tensors are not broadcast-compatible. + + For example: + + >>> rt1 = tf.ragged.constant([[1, 2], [3]]) + >>> rt1 == rt1 + + + >>> rt2 = tf.ragged.constant([[1, 2], [4]]) + >>> rt1 == rt2 + + + >>> rt3 = tf.ragged.constant([[1, 2], [3, 4]]) + >>> # rt1 and rt3 are not broadcast-compatible. + >>> rt1 == rt3 + False + + >>> # You can also compare a `tf.RaggedTensor` to a `tf.Tensor`. + >>> t = tf.constant([[1, 2], [3, 4]]) + >>> rt1 == t + False + >>> t == rt1 + False + >>> rt4 = tf.ragged.constant([[1, 2], [3, 4]]) + >>> rt4 == t + + >>> t == rt4 + + + Args: + other: The right-hand side of the `==` operator. + + Returns: + The ragged tensor result of the elementwise `==` operation, or `False` if + the arguments are not broadcast-compatible. + """ + return math_ops.tensor_equals(self, other) + + +# ============================================================================= +# Ordering Docstring +# ============================================================================= +def ragged_ge(self, other): # pylint: disable=g-doc-args + """Elementwise `>=` comparison of two convertible-to-ragged-tensor values. + + Computes the elemewise `>=` comparison of two values that are convertible to + ragged tenors, with [broadcasting] + (http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) support. + Raises an exception if two values are not broadcast-compatible. + + For example: + + >>> rt1 = tf.ragged.constant([[1, 2], [3]]) + >>> rt1 >= rt1 + + + >>> rt2 = tf.ragged.constant([[2, 1], [3]]) + >>> rt1 >= rt2 + + + >>> rt3 = tf.ragged.constant([[1, 2], [3, 4]]) + >>> # rt1 and rt3 are not broadcast-compatible. + >>> rt1 >= rt3 + Traceback (most recent call last): + ... + InvalidArgumentError: ... + + >>> # You can also compare a `tf.RaggedTensor` to a `tf.Tensor`. + >>> rt4 = tf.ragged.constant([[1, 2],[3, 4]]) + >>> t1 = tf.constant([[2, 1], [4, 3]]) + >>> rt4 >= t1 + + >>> t1 >= rt4 + + + >>> # Compares a `tf.RaggedTensor` to a `tf.Tensor` with broadcasting. + >>> t2 = tf.constant([[2]]) + >>> rt4 >= t2 + + >>> t2 >= rt4 + + + Args: + other: The right-hand side of the `>=` operator. + + Returns: + A `tf.RaggedTensor` of dtype `tf.bool` with the shape that `self` and + `other` broadcast to. + + Raises: + InvalidArgumentError: If `self` and `other` are not broadcast-compatible. + """ + return math_ops.greater_equal(self, other) + + +# ============================================================================= +# Logical Docstring +# ============================================================================= + + +# ============================================================================= +# Arithmetic Docstring +# ============================================================================= +def ragged_abs(self, name=None): # pylint: disable=g-doc-args + r"""Computes the absolute value of a ragged tensor. + + Given a ragged tensor of integer or floating-point values, this operation + returns a ragged tensor of the same type, where each element contains the + absolute value of the corresponding element in the input. + + Given a ragged tensor `x` of complex numbers, this operation returns a tensor + of type `float32` or `float64` that is the absolute value of each element in + `x`. For a complex number \\(a + bj\\), its absolute value is computed as + \\(\sqrt{a^2 + b^2}\\). + + For example: + + >>> # real number + >>> x = tf.ragged.constant([[-2.2, 3.2], [-4.2]]) + >>> tf.abs(x) + + + >>> # complex number + >>> x = tf.ragged.constant([[-2.2 + 4.7j], [-3.2 + 5.7j], [-4.2 + 6.7j]]) + >>> tf.abs(x) + + + Args: + name: A name for the operation (optional). + + Returns: + A `RaggedTensor` of the same size and type as `x`, with absolute values. + Note, for `complex64` or `complex128` input, the returned `RaggedTensor` + will be of type `float32` or `float64`, respectively. + """ + return math_ops.abs(self, name=name) + + +# =========================================================================== +def ragged_and(self, y, name=None): # pylint: disable=g-doc-args + r"""Returns the truth value of elementwise `x & y`. + + Logical AND function. + + Requires that `x` and `y` have the same shape or have + [broadcast-compatible](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + shapes. For example, `y` can be: + + - A single Python boolean, where the result will be calculated by applying + logical AND with the single element to each element in `x`. + - A `tf.Tensor` object of dtype `tf.bool` of the same shape or + [broadcast-compatible](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + shape. In this case, the result will be the element-wise logical AND of + `x` and `y`. + - A `tf.RaggedTensor` object of dtype `tf.bool` of the same shape or + [broadcast-compatible](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + shape. In this case, the result will be the element-wise logical AND of + `x` and `y`. + + For example: + + >>> # `y` is a Python boolean + >>> x = tf.ragged.constant([[True, False], [True]]) + >>> y = True + >>> x & y + + >>> tf.math.logical_and(x, y) # Equivalent of x & y + + >>> y & x + + >>> tf.math.reduce_all(x & y) # Reduce to a scalar bool Tensor. + + + >>> # `y` is a tf.Tensor of the same shape. + >>> x = tf.ragged.constant([[True, False], [True, False]]) + >>> y = tf.constant([[True, False], [False, True]]) + >>> x & y + + + >>> # `y` is a tf.Tensor of a broadcast-compatible shape. + >>> x = tf.ragged.constant([[True, False], [True]]) + >>> y = tf.constant([[True], [False]]) + >>> x & y + + + >>> # `y` is a `tf.RaggedTensor` of the same shape. + >>> x = tf.ragged.constant([[True, False], [True]]) + >>> y = tf.ragged.constant([[False, True], [True]]) + >>> x & y + + + >>> # `y` is a `tf.RaggedTensor` of a broadcast-compatible shape. + >>> x = tf.ragged.constant([[[True, True, False]], [[]], [[True, False]]]) + >>> y = tf.ragged.constant([[[True]], [[True]], [[False]]], ragged_rank=1) + >>> x & y + + + Args: + y: A Python boolean or a `tf.Tensor` or `tf.RaggedTensor` of dtype + `tf.bool`. + name: A name for the operation (optional). + + Returns: + A `tf.RaggedTensor` of dtype `tf.bool` with the shape that `x` and `y` + broadcast to. + """ + return math_ops.logical_and(self, y, name) + + +# Helper Methods. +def _right(operator): + """Right-handed version of an operator: swap args x and y.""" + return tf_decorator.make_decorator(operator, lambda y, x: operator(x, y)) + + +def ragged_hash(self): + """The operation invoked by the `RaggedTensor.__hash__` operator.""" + g = getattr(self.row_splits, "graph", None) + # pylint: disable=protected-access + if ( + tensor.Tensor._USE_EQUALITY + and ops.executing_eagerly_outside_functions() + and (g is None or g.building_function) + ): + raise TypeError("RaggedTensor is unhashable.") + else: + return id(self) + + +# Indexing +ragged_tensor.RaggedTensor.__getitem__ = ragged_getitem.ragged_tensor_getitem + +# Equality +ragged_tensor.RaggedTensor.__eq__ = ragged_eq +ragged_tensor.RaggedTensor.__ne__ = math_ops.tensor_not_equals +ragged_tensor.RaggedTensor.__hash__ = ragged_hash + +# Ordering operators +ragged_tensor.RaggedTensor.__ge__ = ragged_ge +ragged_tensor.RaggedTensor.__gt__ = math_ops.greater +ragged_tensor.RaggedTensor.__le__ = math_ops.less_equal +ragged_tensor.RaggedTensor.__lt__ = math_ops.less + +# Logical operators +ragged_tensor.RaggedTensor.__and__ = ragged_and +ragged_tensor.RaggedTensor.__rand__ = _right(ragged_and) + +ragged_tensor.RaggedTensor.__invert__ = math_ops.logical_not +ragged_tensor.RaggedTensor.__ror__ = _right(math_ops.logical_or) +ragged_tensor.RaggedTensor.__or__ = math_ops.logical_or +ragged_tensor.RaggedTensor.__xor__ = math_ops.logical_xor +ragged_tensor.RaggedTensor.__rxor__ = _right(math_ops.logical_xor) + +# Arithmetic operators +ragged_tensor.RaggedTensor.__abs__ = ragged_abs +ragged_tensor.RaggedTensor.__add__ = math_ops.add +ragged_tensor.RaggedTensor.__radd__ = _right(math_ops.add) +ragged_tensor.RaggedTensor.__div__ = math_ops.div +ragged_tensor.RaggedTensor.__rdiv__ = _right(math_ops.div) +ragged_tensor.RaggedTensor.__floordiv__ = math_ops.floordiv +ragged_tensor.RaggedTensor.__rfloordiv__ = _right(math_ops.floordiv) +ragged_tensor.RaggedTensor.__mod__ = math_ops.floormod +ragged_tensor.RaggedTensor.__rmod__ = _right(math_ops.floormod) +ragged_tensor.RaggedTensor.__mul__ = math_ops.multiply +ragged_tensor.RaggedTensor.__rmul__ = _right(math_ops.multiply) +ragged_tensor.RaggedTensor.__neg__ = math_ops.negative +ragged_tensor.RaggedTensor.__pow__ = math_ops.pow +ragged_tensor.RaggedTensor.__rpow__ = _right(math_ops.pow) +ragged_tensor.RaggedTensor.__sub__ = math_ops.subtract +ragged_tensor.RaggedTensor.__rsub__ = _right(math_ops.subtract) +ragged_tensor.RaggedTensor.__truediv__ = math_ops.truediv +ragged_tensor.RaggedTensor.__rtruediv__ = _right(math_ops.truediv) + + +def ragged_bool(self): # pylint: disable=g-doc-args + """Raises TypeError when a RaggedTensor is used as a Python bool. + + To prevent RaggedTensor from being used as a bool, this function always raise + TypeError when being called. + + For example: + + >>> x = tf.ragged.constant([[1, 2], [3]]) + >>> result = True if x else False # Evaluate x as a bool value. + Traceback (most recent call last): + ... + TypeError: RaggedTensor may not be used as a boolean. + + >>> x = tf.ragged.constant([[1]]) + >>> r = (x == 1) # tf.RaggedTensor [[True]] + >>> if r: # Evaluate r as a bool value. + ... pass + Traceback (most recent call last): + ... + TypeError: RaggedTensor may not be used as a boolean. + """ + raise TypeError("RaggedTensor may not be used as a boolean.") + + +ragged_tensor.RaggedTensor.__bool__ = ragged_bool # Python3 bool conversion. +ragged_tensor.RaggedTensor.__nonzero__ = ragged_bool # Python2 bool conversion. diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..0809d152bac3230f0bac37a0333cafde7130416e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_ops.py @@ -0,0 +1,51 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Import all modules in the `ragged` package that define exported symbols. + +Additional, import ragged_dispatch (which has the side-effect of registering +dispatch handlers for many standard TF ops) and ragged_operators (which has the +side-effect of overriding RaggedTensor operators, such as RaggedTensor.__add__). + +We don't import these modules from ragged/__init__.py, since we want to avoid +circular dependencies. +""" + + +# pylint: disable=unused-import +from tensorflow.python.ops.ragged import ragged_array_ops +from tensorflow.python.ops.ragged import ragged_autograph +from tensorflow.python.ops.ragged import ragged_batch_gather_ops +from tensorflow.python.ops.ragged import ragged_batch_gather_with_default_op +from tensorflow.python.ops.ragged import ragged_bincount_ops +from tensorflow.python.ops.ragged import ragged_check_ops +from tensorflow.python.ops.ragged import ragged_concat_ops +from tensorflow.python.ops.ragged import ragged_conversion_ops +from tensorflow.python.ops.ragged import ragged_dispatch +from tensorflow.python.ops.ragged import ragged_embedding_ops +from tensorflow.python.ops.ragged import ragged_factory_ops +from tensorflow.python.ops.ragged import ragged_functional_ops +from tensorflow.python.ops.ragged import ragged_gather_ops +from tensorflow.python.ops.ragged import ragged_getitem +from tensorflow.python.ops.ragged import ragged_image_ops +from tensorflow.python.ops.ragged import ragged_map_ops +from tensorflow.python.ops.ragged import ragged_math_ops +from tensorflow.python.ops.ragged import ragged_operators +from tensorflow.python.ops.ragged import ragged_squeeze_op +from tensorflow.python.ops.ragged import ragged_string_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.ops.ragged import ragged_tensor_shape +from tensorflow.python.ops.ragged import ragged_tensor_value +from tensorflow.python.ops.ragged import ragged_where_op +from tensorflow.python.ops.ragged import segment_id_ops diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_squeeze_op.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_squeeze_op.py new file mode 100644 index 0000000000000000000000000000000000000000..6d35ffd493ecfb9823eaca70c349cc2b22a947b5 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_squeeze_op.py @@ -0,0 +1,133 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Operator Squeeze for RaggedTensors.""" + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import control_flow_assert +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.ops.ragged.ragged_tensor import RaggedTensor +from tensorflow.python.util import deprecation +from tensorflow.python.util import dispatch + + +@dispatch.dispatch_for_api(array_ops.squeeze_v2) +def squeeze(input: ragged_tensor.Ragged, axis=None, name=None): # pylint: disable=redefined-builtin + """Ragged compatible squeeze. + + If `input` is a `tf.Tensor`, then this calls `tf.squeeze`. + + If `input` is a `tf.RaggedTensor`, then this operation takes `O(N)` time, + where `N` is the number of elements in the squeezed dimensions. + + Args: + input: A potentially ragged tensor. The input to squeeze. + axis: An optional list of ints. Defaults to `None`. If the `input` is + ragged, it only squeezes the dimensions listed. It fails if `input` is + ragged and axis is []. If `input` is not ragged it calls tf.squeeze. Note + that it is an error to squeeze a dimension that is not 1. It must be in + the range of [-rank(input), rank(input)). + name: A name for the operation (optional). + + Returns: + A potentially ragged tensor. Contains the same data as input, + but has one or more dimensions of size 1 removed. + """ + with ops.name_scope(name, 'RaggedSqueeze', [input]): + input = ragged_tensor.convert_to_tensor_or_ragged_tensor(input) + if isinstance(input, tensor.Tensor): + return array_ops.squeeze(input, axis, name) + + if axis is None: + raise ValueError('Ragged.squeeze must have an axis argument.') + if isinstance(axis, int): + axis = [axis] + elif ((not isinstance(axis, (list, tuple))) or + (not all(isinstance(d, int) for d in axis))): + raise TypeError('Axis must be a list or tuple of integers.') + + dense_dims = [] + ragged_dims = [] + # Normalize all the dims in axis to be positive + axis = [ + array_ops.get_positive_axis(d, input.shape.ndims, 'axis[%d]' % i, + 'rank(input)') for i, d in enumerate(axis) + ] + for dim in axis: + if dim > input.ragged_rank: + dense_dims.append(dim - input.ragged_rank) + else: + ragged_dims.append(dim) + + # Make sure the specified ragged dimensions are squeezable. + assertion_list = [] + scalar_tensor_one = constant_op.constant(1, dtype=input.row_splits.dtype) + for i, r in enumerate(input.nested_row_lengths()): + if i + 1 in ragged_dims: + assertion_list.append( + control_flow_assert.Assert( + math_ops.reduce_all(math_ops.equal(r, scalar_tensor_one)), + ['the given axis (axis = %d) is not squeezable!' % (i + 1)])) + if 0 in ragged_dims: + scalar_tensor_two = constant_op.constant(2, dtype=dtypes.int32) + assertion_list.append( + control_flow_assert.Assert( + math_ops.equal( + array_ops.size(input.row_splits), scalar_tensor_two), + ['the given axis (axis = 0) is not squeezable!'])) + + # Till now, we are sure that the ragged dimensions are squeezable. + squeezed_rt = None + squeezed_rt = control_flow_ops.with_dependencies(assertion_list, + input.flat_values) + + if dense_dims: + # Gives error if the dense dimension is not squeezable. + squeezed_rt = array_ops.squeeze(squeezed_rt, dense_dims) + + remaining_row_splits = [] + remaining_row_splits = list() + for i, row_split in enumerate(input.nested_row_splits): + # each row_splits tensor is for dimension #(i+1) . + if (i + 1) not in ragged_dims: + remaining_row_splits.append(row_split) + # Take care of the first row if it is to be squeezed. + if remaining_row_splits and 0 in ragged_dims: + remaining_row_splits.pop(0) + + squeezed_rt = RaggedTensor.from_nested_row_splits(squeezed_rt, + remaining_row_splits) + + # Corner case: when removing all the ragged dimensions and the output is + # a scalar tensor e.g. ragged.squeeze(ragged.constant([[[1]]])). + if set(range(0, input.ragged_rank + 1)).issubset(set(ragged_dims)): + squeezed_rt = array_ops.squeeze(squeezed_rt, [0], name) + + return squeezed_rt + + +@dispatch.dispatch_for_api(array_ops.squeeze) +def _ragged_squeeze_v1(input: ragged_tensor.Ragged, # pylint: disable=redefined-builtin + axis=None, + name=None, + squeeze_dims=None): + axis = deprecation.deprecated_argument_lookup('axis', axis, 'squeeze_dims', + squeeze_dims) + return squeeze(input, axis, name) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_string_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_string_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..41f79781c7b4a8895d0ec050e7361edfb791371b --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_string_ops.py @@ -0,0 +1,948 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Ragged operations for working with string Tensors.""" + +import typing + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor as tensor_lib +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import cond +from tensorflow.python.ops import gen_string_ops +from tensorflow.python.ops import map_fn as map_fn_lib +from tensorflow.python.ops import string_ops +from tensorflow.python.ops.ragged import ragged_array_ops +from tensorflow.python.ops.ragged import ragged_functional_ops +from tensorflow.python.ops.ragged import ragged_math_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.util import compat as util_compat +from tensorflow.python.util import deprecation +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + + +@tf_export("strings.bytes_split") +@dispatch.add_dispatch_support +def string_bytes_split(input, name=None): # pylint: disable=redefined-builtin + """Split string elements of `input` into bytes. + + Examples: + + >>> tf.strings.bytes_split('hello').numpy() + array([b'h', b'e', b'l', b'l', b'o'], dtype=object) + >>> tf.strings.bytes_split(['hello', '123']) + + + Note that this op splits strings into bytes, not unicode characters. To + split strings into unicode characters, use `tf.strings.unicode_split`. + + See also: `tf.io.decode_raw`, `tf.strings.split`, `tf.strings.unicode_split`. + + Args: + input: A string `Tensor` or `RaggedTensor`: the strings to split. Must + have a statically known rank (`N`). + name: A name for the operation (optional). + + Returns: + A `RaggedTensor` of rank `N+1`: the bytes that make up the source strings. + """ + with ops.name_scope(name, "StringsByteSplit", [input]): + input = ragged_tensor.convert_to_tensor_or_ragged_tensor(input, + name="input") + if isinstance(input, ragged_tensor.RaggedTensor): + return input.with_flat_values(string_bytes_split(input.flat_values)) + + rank = input.shape.ndims + if rank is None: + raise ValueError("input must have a statically-known rank.") + + if rank == 0: + return string_bytes_split(array_ops_stack.stack([input]))[0] + elif rank == 1: + indices, values, shape = gen_string_ops.string_split( + input, delimiter="", skip_empty=False) + return ragged_tensor.RaggedTensor.from_value_rowids( + values=values, value_rowids=indices[:, 0], nrows=shape[0], + validate=False) + else: + return string_bytes_split(ragged_tensor.RaggedTensor.from_tensor(input)) + + +# pylint: disable=redefined-builtin +@tf_export("strings.unicode_encode") +@dispatch.add_dispatch_support +def unicode_encode(input, + output_encoding, + errors="replace", + replacement_char=65533, + name=None): + r"""Encodes each sequence of Unicode code points in `input` into a string. + + `result[i1...iN]` is the string formed by concatenating the Unicode + codepoints `input[1...iN, :]`, encoded using `output_encoding`. + + Args: + input: An `N+1` dimensional potentially ragged integer tensor with shape + `[D1...DN, num_chars]`. + output_encoding: Unicode encoding that should be used to encode each + codepoint sequence. Can be `"UTF-8"`, `"UTF-16-BE"`, or `"UTF-32-BE"`. + errors: Specifies the response when an invalid codepoint is encountered + (optional). One of: + * `'replace'`: Replace invalid codepoint with the + `replacement_char`. (default) + * `'ignore'`: Skip invalid codepoints. + * `'strict'`: Raise an exception for any invalid codepoint. + replacement_char: The replacement character codepoint to be used in place of + any invalid input when `errors='replace'`. Any valid unicode codepoint may + be used. The default value is the default unicode replacement character + which is 0xFFFD (U+65533). + name: A name for the operation (optional). + + Returns: + A `N` dimensional `string` tensor with shape `[D1...DN]`. + + #### Example: + + >>> input = tf.ragged.constant( + ... [[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]]) + >>> print(unicode_encode(input, 'UTF-8')) + tf.Tensor([b'G\xc3\xb6\xc3\xb6dnight' b'\xf0\x9f\x98\x8a'], + shape=(2,), dtype=string) + """ + with ops.name_scope(name, "UnicodeEncode", [input]): + input_tensor = ragged_tensor.convert_to_tensor_or_ragged_tensor(input) + if input_tensor.shape.ndims is None: + raise ValueError("Rank of input_tensor must be statically known.") + if ragged_tensor.is_ragged(input_tensor): + if input_tensor.flat_values.shape.ndims > 1: + # If the flat_values of our ragged tensor is multi-dimensional, we can + # process it separately and our output will have the same nested splits + # as our input. + return input_tensor.with_flat_values( + unicode_encode(input_tensor.flat_values, output_encoding, errors, + replacement_char)) + elif input_tensor.ragged_rank > 1: + # Recursively process the values of the ragged tensor. + return input_tensor.with_values( + unicode_encode(input_tensor.values, output_encoding, errors, + replacement_char)) + else: + # Our ragged tensor is of the correct shape (rank 1 flat_values tensor + # with ragged_rank of 1) so we can process it as normal. + return gen_string_ops.unicode_encode( + input_values=input_tensor.values, + input_splits=input_tensor.row_splits, + output_encoding=output_encoding, + errors=errors, + replacement_char=replacement_char) + else: + if input_tensor.shape.ndims == 2: + # The input tensor is of the correct 2-D shape, it's just not ragged. + return unicode_encode( + ragged_tensor.RaggedTensor.from_tensor(input_tensor), + output_encoding, errors, replacement_char) + elif input_tensor.shape.ndims > 2: + # We need to initially flatten the input tensor to 2-D, and then can + # reshape the output of our processed flattened tensor. + flat_input_tensor = array_ops.reshape( + input_tensor, + array_ops_stack.stack([-1, array_ops.shape(input_tensor)[-1]])) + flat_output_tensor = unicode_encode(flat_input_tensor, output_encoding, + errors, replacement_char) + return array_ops.reshape(flat_output_tensor, input_tensor.shape[:-1]) + elif input_tensor.shape.ndims == 0: + raise ValueError("input_tensor's rank must be at least 1.") + else: + # Our input tensor is rank 1, so we create a ragged tensor with an added + # dimension to create the correct input shape & type, and then remove + # the additional dimension from the output and return the string scalar. + ragged_input_tensor = ragged_tensor.RaggedTensor.from_row_splits( + input_tensor, + array_ops_stack.stack( + [0, array_ops.shape(input_tensor, out_type=dtypes.int32)[0]]), + validate=False) + output_tensor = unicode_encode(ragged_input_tensor, output_encoding, + errors, replacement_char) + return array_ops.reshape(output_tensor, []) + + +# pylint: disable=redefined-builtin +@tf_export("strings.unicode_decode") +@dispatch.add_dispatch_support +def unicode_decode(input, + input_encoding, + errors="replace", + replacement_char=0xFFFD, + replace_control_characters=False, + name=None): + r"""Decodes each string in `input` into a sequence of Unicode code points. + + `result[i1...iN, j]` is the Unicode codepoint for the `j`th character in + `input[i1...iN]`, when decoded using `input_encoding`. + + Args: + input: An `N` dimensional potentially ragged `string` tensor with shape + `[D1...DN]`. `N` must be statically known. + input_encoding: String name for the unicode encoding that should be used to + decode each string. + errors: Specifies the response when an input string can't be converted + using the indicated encoding. One of: + * `'strict'`: Raise an exception for any illegal substrings. + * `'replace'`: Replace illegal substrings with `replacement_char`. + * `'ignore'`: Skip illegal substrings. + replacement_char: The replacement codepoint to be used in place of invalid + substrings in `input` when `errors='replace'`; and in place of C0 control + characters in `input` when `replace_control_characters=True`. + replace_control_characters: Whether to replace the C0 control characters + `(U+0000 - U+001F)` with the `replacement_char`. + name: A name for the operation (optional). + + Returns: + A `N+1` dimensional `int32` tensor with shape `[D1...DN, (num_chars)]`. + The returned tensor is a `tf.Tensor` if `input` is a scalar, or a + `tf.RaggedTensor` otherwise. + + #### Example: + + >>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')] + >>> tf.strings.unicode_decode(input, 'UTF-8').to_list() + [[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]] + """ + with ops.name_scope(name, "UnicodeDecode", [input]): + return _unicode_decode(input, input_encoding, errors, replacement_char, + replace_control_characters, with_offsets=False) + + +@tf_export("strings.unicode_decode_with_offsets") +@dispatch.add_dispatch_support +def unicode_decode_with_offsets(input, + input_encoding, + errors="replace", + replacement_char=0xFFFD, + replace_control_characters=False, + name=None): + r"""Decodes each string into a sequence of code points with start offsets. + + This op is similar to `tf.strings.decode(...)`, but it also returns the + start offset for each character in its respective string. This information + can be used to align the characters with the original byte sequence. + + Returns a tuple `(codepoints, start_offsets)` where: + + * `codepoints[i1...iN, j]` is the Unicode codepoint for the `j`th character + in `input[i1...iN]`, when decoded using `input_encoding`. + * `start_offsets[i1...iN, j]` is the start byte offset for the `j`th + character in `input[i1...iN]`, when decoded using `input_encoding`. + + Args: + input: An `N` dimensional potentially ragged `string` tensor with shape + `[D1...DN]`. `N` must be statically known. + input_encoding: String name for the unicode encoding that should be used to + decode each string. + errors: Specifies the response when an input string can't be converted + using the indicated encoding. One of: + * `'strict'`: Raise an exception for any illegal substrings. + * `'replace'`: Replace illegal substrings with `replacement_char`. + * `'ignore'`: Skip illegal substrings. + replacement_char: The replacement codepoint to be used in place of invalid + substrings in `input` when `errors='replace'`; and in place of C0 control + characters in `input` when `replace_control_characters=True`. + replace_control_characters: Whether to replace the C0 control characters + `(U+0000 - U+001F)` with the `replacement_char`. + name: A name for the operation (optional). + + Returns: + A tuple of `N+1` dimensional tensors `(codepoints, start_offsets)`. + + * `codepoints` is an `int32` tensor with shape `[D1...DN, (num_chars)]`. + * `offsets` is an `int64` tensor with shape `[D1...DN, (num_chars)]`. + + The returned tensors are `tf.Tensor`s if `input` is a scalar, or + `tf.RaggedTensor`s otherwise. + + #### Example: + + >>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')] + >>> result = tf.strings.unicode_decode_with_offsets(input, 'UTF-8') + >>> result[0].to_list() # codepoints + [[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]] + >>> result[1].to_list() # offsets + [[0, 1, 3, 5, 6, 7, 8, 9, 10], [0]] + + """ + with ops.name_scope(name, "UnicodeDecodeWithOffsets", [input]): + return _unicode_decode(input, input_encoding, errors, replacement_char, + replace_control_characters, with_offsets=True) + + +@tf_export("strings.unicode_split") +@dispatch.add_dispatch_support +def unicode_split(input, + input_encoding, + errors="replace", + replacement_char=0xFFFD, + name=None): + r"""Splits each string in `input` into a sequence of Unicode code points. + + `result[i1...iN, j]` is the substring of `input[i1...iN]` that encodes its + `j`th character, when decoded using `input_encoding`. + + Args: + input: An `N` dimensional potentially ragged `string` tensor with shape + `[D1...DN]`. `N` must be statically known. + input_encoding: String name for the unicode encoding that should be used to + decode each string. + errors: Specifies the response when an input string can't be converted + using the indicated encoding. One of: + * `'strict'`: Raise an exception for any illegal substrings. + * `'replace'`: Replace illegal substrings with `replacement_char`. + * `'ignore'`: Skip illegal substrings. + replacement_char: The replacement codepoint to be used in place of invalid + substrings in `input` when `errors='replace'`. + name: A name for the operation (optional). + + Returns: + A `N+1` dimensional `int32` tensor with shape `[D1...DN, (num_chars)]`. + The returned tensor is a `tf.Tensor` if `input` is a scalar, or a + `tf.RaggedTensor` otherwise. + + #### Example: + + >>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')] + >>> tf.strings.unicode_split(input, 'UTF-8').to_list() + [[b'G', b'\xc3\xb6', b'\xc3\xb6', b'd', b'n', b'i', b'g', b'h', b't'], + [b'\xf0\x9f\x98\x8a']] + """ + with ops.name_scope(name, "UnicodeSplit", [input]): + codepoints = _unicode_decode(input, input_encoding, errors, + replacement_char, False, with_offsets=False) + return unicode_encode( + ragged_array_ops.expand_dims(codepoints, -1), + output_encoding=input_encoding, + errors=errors, + replacement_char=replacement_char) + + +@tf_export("strings.unicode_split_with_offsets") +@dispatch.add_dispatch_support +def unicode_split_with_offsets(input, + input_encoding, + errors="replace", + replacement_char=0xFFFD, + name=None): + r"""Splits each string into a sequence of code points with start offsets. + + This op is similar to `tf.strings.decode(...)`, but it also returns the + start offset for each character in its respective string. This information + can be used to align the characters with the original byte sequence. + + Returns a tuple `(chars, start_offsets)` where: + + * `chars[i1...iN, j]` is the substring of `input[i1...iN]` that encodes its + `j`th character, when decoded using `input_encoding`. + * `start_offsets[i1...iN, j]` is the start byte offset for the `j`th + character in `input[i1...iN]`, when decoded using `input_encoding`. + + Args: + input: An `N` dimensional potentially ragged `string` tensor with shape + `[D1...DN]`. `N` must be statically known. + input_encoding: String name for the unicode encoding that should be used to + decode each string. + errors: Specifies the response when an input string can't be converted + using the indicated encoding. One of: + * `'strict'`: Raise an exception for any illegal substrings. + * `'replace'`: Replace illegal substrings with `replacement_char`. + * `'ignore'`: Skip illegal substrings. + replacement_char: The replacement codepoint to be used in place of invalid + substrings in `input` when `errors='replace'`. + name: A name for the operation (optional). + + Returns: + A tuple of `N+1` dimensional tensors `(codepoints, start_offsets)`. + + * `codepoints` is an `int32` tensor with shape `[D1...DN, (num_chars)]`. + * `offsets` is an `int64` tensor with shape `[D1...DN, (num_chars)]`. + + The returned tensors are `tf.Tensor`s if `input` is a scalar, or + `tf.RaggedTensor`s otherwise. + + #### Example: + + >>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')] + >>> result = tf.strings.unicode_split_with_offsets(input, 'UTF-8') + >>> result[0].to_list() # character substrings + [[b'G', b'\xc3\xb6', b'\xc3\xb6', b'd', b'n', b'i', b'g', b'h', b't'], + [b'\xf0\x9f\x98\x8a']] + >>> result[1].to_list() # offsets + [[0, 1, 3, 5, 6, 7, 8, 9, 10], [0]] + + """ + with ops.name_scope(name, "UnicodeSplitWithOffsets", [input]): + codepoints, offsets = _unicode_decode(input, input_encoding, errors, + replacement_char, False, + with_offsets=True) + chars = unicode_encode( + ragged_array_ops.expand_dims(codepoints, -1), + output_encoding=input_encoding, + errors=errors, + replacement_char=replacement_char) + return chars, offsets + + +def _unicode_decode(input, input_encoding, errors, replacement_char, + replace_control_characters, with_offsets): + """Decodes each string into a sequence of codepoints.""" + input = ragged_tensor.convert_to_tensor_or_ragged_tensor(input, name="input") + input_ndims = input.shape.ndims + if input_ndims is None: + raise ValueError("Rank of `input` must be statically known.") + + if input_ndims > 1: + # Convert to a ragged tensor with ragged_rank = input_ndims - 1. + if not ragged_tensor.is_ragged(input): + input = ragged_tensor.RaggedTensor.from_tensor( + input, ragged_rank=input_ndims - 1) + elif input.ragged_rank < input_ndims - 1: + input = input.with_flat_values( + ragged_tensor.RaggedTensor.from_tensor( + input.flat_values, + ragged_rank=input_ndims - input.ragged_rank - 1)) + + # Reshape the input to a flat vector, and apply the gen_string_ops op. + if ragged_tensor.is_ragged(input): + flat_input = array_ops.reshape(input.flat_values, [-1]) + else: + flat_input = array_ops.reshape(input, [-1]) + + if with_offsets: + decode_op = gen_string_ops.unicode_decode_with_offsets + else: + decode_op = gen_string_ops.unicode_decode + flat_result = decode_op( + input=flat_input, + input_encoding=input_encoding, + errors=errors, + replacement_char=replacement_char, + replace_control_characters=replace_control_characters) + + if input_ndims == 0: + codepoints = flat_result.char_values + if with_offsets: + offsets = flat_result.char_to_byte_starts + else: + codepoints = ragged_tensor.RaggedTensor.from_row_splits( + flat_result.char_values, flat_result.row_splits, validate=False) + if input_ndims > 1: + codepoints = input.with_flat_values(codepoints) + if with_offsets: + offsets = ragged_tensor.RaggedTensor.from_row_splits( + flat_result.char_to_byte_starts, flat_result.row_splits, + validate=False) + if input_ndims > 1: + offsets = input.with_flat_values(offsets) + + if with_offsets: + return codepoints, offsets + else: + return codepoints + + +@tf_export("strings.split", v1=[]) +@dispatch.add_dispatch_support +def string_split_v2(input, sep=None, maxsplit=-1, name=None): # pylint: disable=redefined-builtin + """Split elements of `input` based on `sep` into a `RaggedTensor`. + + Let N be the size of `input` (typically N will be the batch size). Split each + element of `input` based on `sep` and return a `RaggedTensor` containing the + split tokens. Empty tokens are ignored. + + Example: + + >>> tf.strings.split('hello world').numpy() + array([b'hello', b'world'], dtype=object) + >>> tf.strings.split(['hello world', 'a b c']) + + + If `sep` is given, consecutive delimiters are not grouped together and are + deemed to delimit empty strings. For example, `input` of `"1<>2<><>3"` and + `sep` of `"<>"` returns `["1", "2", "", "3"]`. If `sep` is None or an empty + string, consecutive whitespace are regarded as a single separator, and the + result will contain no empty strings at the start or end if the string has + leading or trailing whitespace. + + Note that the above mentioned behavior matches python's str.split. + + Args: + input: A string `Tensor` of rank `N`, the strings to split. If + `rank(input)` is not known statically, then it is assumed to be `1`. + sep: `0-D` string `Tensor`, the delimiter string. + maxsplit: An `int`. If `maxsplit > 0`, limit of the split of the result. + name: A name for the operation (optional). + + Raises: + ValueError: If sep is not a string. + + Returns: + A `RaggedTensor` of rank `N+1`, the strings split according to the + delimiter. + """ + with ops.name_scope(name, "StringSplit", [input]): + input = ragged_tensor.convert_to_tensor_or_ragged_tensor( + input, dtype=dtypes.string, name="input") + if isinstance(input, ragged_tensor.RaggedTensor): + return input.with_flat_values( + string_split_v2(input.flat_values, sep, maxsplit)) + + rank = input.shape.ndims + if rank == 0: + return string_split_v2(array_ops_stack.stack([input]), sep, maxsplit)[0] + elif rank == 1 or rank is None: + sparse_result = string_ops.string_split_v2( + input, sep=sep, maxsplit=maxsplit) + return ragged_tensor.RaggedTensor.from_value_rowids( + values=sparse_result.values, + value_rowids=sparse_result.indices[:, 0], + nrows=sparse_result.dense_shape[0], + validate=False) + else: + return string_split_v2( + ragged_tensor.RaggedTensor.from_tensor(input), sep, maxsplit) + + +@tf_export(v1=["string_split"]) +@dispatch.add_dispatch_support +@deprecation.deprecated_args(None, + "delimiter is deprecated, please use sep instead.", + "delimiter") +def string_split(source, sep=None, skip_empty=True, delimiter=None, + result_type="SparseTensor", name=None): # pylint: disable=invalid-name + """Split elements of `source` based on `delimiter`. + + Let N be the size of `source` (typically N will be the batch size). Split each + element of `source` based on `delimiter` and return a `SparseTensor` + or `RaggedTensor` containing the split tokens. Empty tokens are ignored. + + If `sep` is an empty string, each element of the `source` is split + into individual strings, each containing one byte. (This includes splitting + multibyte sequences of UTF-8.) If delimiter contains multiple bytes, it is + treated as a set of delimiters with each considered a potential split point. + + Examples: + + >>> print(tf.compat.v1.string_split(['hello world', 'a b c'])) + SparseTensor(indices=tf.Tensor( [[0 0] [0 1] [1 0] [1 1] [1 2]], ...), + values=tf.Tensor([b'hello' b'world' b'a' b'b' b'c'], ...), + dense_shape=tf.Tensor([2 3], shape=(2,), dtype=int64)) + + >>> print(tf.compat.v1.string_split(['hello world', 'a b c'], + ... result_type="RaggedTensor")) + + + Args: + source: `1-D` string `Tensor`, the strings to split. + sep: `0-D` string `Tensor`, the delimiter character, the string should + be length 0 or 1. Default is ' '. + skip_empty: A `bool`. If `True`, skip the empty strings from the result. + delimiter: deprecated alias for `sep`. + result_type: The tensor type for the result: one of `"RaggedTensor"` or + `"SparseTensor"`. + name: A name for the operation (optional). + + Raises: + ValueError: If delimiter is not a string. + + Returns: + A `SparseTensor` or `RaggedTensor` of rank `2`, the strings split according + to the delimiter. The first column of the indices corresponds to the row + in `source` and the second column corresponds to the index of the split + component in this row. + """ + with ops.name_scope(name, "StringSplit", [source]): + sparse_result = string_ops.string_split( + source, sep=sep, skip_empty=skip_empty, delimiter=delimiter) + if result_type == "SparseTensor": + return sparse_result + elif result_type == "RaggedTensor": + return ragged_tensor.RaggedTensor.from_value_rowids( + values=sparse_result.values, + value_rowids=sparse_result.indices[:, 0], + nrows=sparse_result.dense_shape[0], + validate=False) + else: + raise ValueError("result_type must be 'RaggedTensor' or 'SparseTensor'.") + + +# In TensorFlow 1.x, "tf.strings.split" uses the new signature (with maxsplit), +# but we need to add the result_type argument. +@tf_export(v1=["strings.split"]) +@dispatch.add_dispatch_support +def strings_split_v1(input=None, sep=None, maxsplit=-1, # pylint: disable=redefined-builtin + result_type="SparseTensor", source=None, name=None): + """Split elements of `input` based on `sep`. + + Let N be the size of `input` (typically N will be the batch size). Split each + element of `input` based on `sep` and return a `SparseTensor` or + `RaggedTensor` containing the split tokens. Empty tokens are ignored. + + Examples: + + >>> print(tf.compat.v1.strings.split(['hello world', 'a b c'])) + SparseTensor(indices=tf.Tensor( [[0 0] [0 1] [1 0] [1 1] [1 2]], ...), + values=tf.Tensor([b'hello' b'world' b'a' b'b' b'c'], ...), + dense_shape=tf.Tensor([2 3], shape=(2,), dtype=int64)) + + >>> print(tf.compat.v1.strings.split(['hello world', 'a b c'], + ... result_type="RaggedTensor")) + + + If `sep` is given, consecutive delimiters are not grouped together and are + deemed to delimit empty strings. For example, `input` of `"1<>2<><>3"` and + `sep` of `"<>"` returns `["1", "2", "", "3"]`. If `sep` is None or an empty + string, consecutive whitespace are regarded as a single separator, and the + result will contain no empty strings at the start or end if the string has + leading or trailing whitespace. + + Note that the above mentioned behavior matches python's str.split. + + Args: + input: A string `Tensor` of rank `N`, the strings to split. If + `rank(input)` is not known statically, then it is assumed to be `1`. + sep: `0-D` string `Tensor`, the delimiter character. + maxsplit: An `int`. If `maxsplit > 0`, limit of the split of the result. + result_type: The tensor type for the result: one of `"RaggedTensor"` or + `"SparseTensor"`. + source: alias for "input" argument. + name: A name for the operation (optional). + + Raises: + ValueError: If sep is not a string. + + Returns: + A `SparseTensor` or `RaggedTensor` of rank `N+1`, the strings split + according to the delimiter. + """ + input = deprecation.deprecated_argument_lookup( + "input", input, "source", source) + with ops.name_scope(name, "StringSplit", [input]): + input = ragged_tensor.convert_to_tensor_or_ragged_tensor( + input, dtype=dtypes.string, name="input") + + if input.shape.rank == 0: + input = array_ops.expand_dims(input, 0) + + if result_type == "SparseTensor": + if input.shape.rank == 1: + return string_ops.string_split_v2(input, sep=sep, maxsplit=maxsplit) + else: + return string_split_v2(input, sep=sep, maxsplit=maxsplit).to_sparse() + elif result_type == "RaggedTensor": + return string_split_v2(input, sep=sep, maxsplit=maxsplit) + else: + raise ValueError("result_type must be 'RaggedTensor' or 'SparseTensor'.") + + +@dispatch.dispatch_for_api(string_ops.reduce_join_v2) +def reduce_join(inputs: ragged_tensor.Ragged, + axis=None, + keepdims=None, + separator="", + name=None): + """For docs, see: _RAGGED_REDUCE_DOCSTRING.""" + return ragged_math_ops.ragged_reduce_aggregate( + string_ops.reduce_join, string_ops.unsorted_segment_join, inputs, axis, + keepdims, separator, name or "RaggedSegmentJoin") + + +@tf_export("strings.ngrams") +@dispatch.add_dispatch_support +def ngrams(data, + ngram_width, + separator=" ", + pad_values=None, + padding_width=None, + preserve_short_sequences=False, + name=None): + """Create a tensor of n-grams based on `data`. + + Creates a tensor of n-grams based on `data`. The n-grams are created by + joining windows of `width` adjacent strings from the inner axis of `data` + using `separator`. + + The input data can be padded on both the start and end of the sequence, if + desired, using the `pad_values` argument. If set, `pad_values` should contain + either a tuple of strings or a single string; the 0th element of the tuple + will be used to pad the left side of the sequence and the 1st element of the + tuple will be used to pad the right side of the sequence. The `padding_width` + arg controls how many padding values are added to each side; it defaults to + `ngram_width-1`. + + If this op is configured to not have padding, or if it is configured to add + padding with `padding_width` set to less than ngram_width-1, it is possible + that a sequence, or a sequence plus padding, is smaller than the ngram + width. In that case, no ngrams will be generated for that sequence. This can + be prevented by setting `preserve_short_sequences`, which will cause the op + to always generate at least one ngram per non-empty sequence. + + Examples: + + >>> tf.strings.ngrams(["A", "B", "C", "D"], 2).numpy() + array([b'A B', b'B C', b'C D'], dtype=object) + >>> tf.strings.ngrams(["TF", "and", "keras"], 1).numpy() + array([b'TF', b'and', b'keras'], dtype=object) + + Args: + data: A Tensor or RaggedTensor containing the source data for the ngrams. + ngram_width: The width(s) of the ngrams to create. If this is a list or + tuple, the op will return ngrams of all specified arities in list order. + Values must be non-Tensor integers greater than 0. + separator: The separator string used between ngram elements. Must be a + string constant, not a Tensor. + pad_values: A tuple of (left_pad_value, right_pad_value), a single string, + or None. If None, no padding will be added; if a single string, then that + string will be used for both left and right padding. Values must be Python + strings. + padding_width: If set, `padding_width` pad values will be added to both + sides of each sequence. Defaults to `ngram_width`-1. Must be greater than + 0. (Note that 1-grams are never padded, regardless of this value.) + preserve_short_sequences: If true, then ensure that at least one ngram is + generated for each input sequence. In particular, if an input sequence is + shorter than `min(ngram_width) + 2*pad_width`, then generate a single + ngram containing the entire sequence. If false, then no ngrams are + generated for these short input sequences. + name: The op name. + + Returns: + A RaggedTensor of ngrams. If `data.shape=[D1...DN, S]`, then + `output.shape=[D1...DN, NUM_NGRAMS]`, where + `NUM_NGRAMS=S-ngram_width+1+2*padding_width`. + + Raises: + TypeError: if `pad_values` is set to an invalid type. + ValueError: if `pad_values`, `padding_width`, or `ngram_width` is set to an + invalid value. + """ + + with ops.name_scope(name, "StringNGrams", [data]): + if pad_values is None: + left_pad = "" + right_pad = "" + elif isinstance(pad_values, (list, tuple)): + if (not isinstance(pad_values[0], util_compat.bytes_or_text_types) or + not isinstance(pad_values[1], util_compat.bytes_or_text_types)): + raise TypeError( + "pad_values must be a string, tuple of strings, or None.") + left_pad = pad_values[0] + right_pad = pad_values[1] + else: + if not isinstance(pad_values, util_compat.bytes_or_text_types): + raise TypeError( + "pad_values must be a string, tuple of strings, or None.") + left_pad = pad_values + right_pad = pad_values + + if padding_width is not None and padding_width < 1: + raise ValueError("padding_width must be greater than 0.") + + if padding_width is not None and pad_values is None: + raise ValueError("pad_values must be provided if padding_width is set.") + + data = ragged_tensor.convert_to_tensor_or_ragged_tensor( + data, name="data", dtype=dtypes.string) + + # preserve the shape of the data if it is a tensor + to_tensor = False + if isinstance(data, tensor_lib.Tensor): + dense_shape = array_ops.concat([array_ops.shape(data)[:-1], [-1]], axis=0) + to_tensor = True + + if not isinstance(data, ragged_tensor.RaggedTensor): + if data.shape.ndims is None: + raise ValueError("Rank of data must be known.") + elif data.shape.ndims == 0: + raise ValueError("Data must have rank>0") + elif data.shape.ndims == 1: + rt = ragged_tensor.RaggedTensor.from_row_starts( + data, [0], validate=False) + return ngrams(rt, ngram_width, separator, pad_values, padding_width, + preserve_short_sequences, name)[0] + else: + data = ragged_tensor.RaggedTensor.from_tensor( + data, ragged_rank=data.shape.ndims - 1) + + if data.ragged_rank > 1: + output = data.with_values( + ngrams(data.values, ngram_width, separator, pad_values, padding_width, + preserve_short_sequences, name)) + return array_ops.reshape(output.flat_values, + dense_shape) if to_tensor else output + + if pad_values is None: + padding_width = 0 + + if pad_values is not None and padding_width is None: + padding_width = -1 + + if not isinstance(ngram_width, (list, tuple)): + ngram_widths = [ngram_width] + else: + ngram_widths = ngram_width + for width in ngram_widths: + if width < 1: + raise ValueError("All ngram_widths must be greater than 0. Got %s" % + ngram_width) + + output, output_splits = gen_string_ops.string_n_grams( + data=data.flat_values, + data_splits=data.row_splits, + separator=separator, + ngram_widths=ngram_widths, + left_pad=left_pad, + right_pad=right_pad, + pad_width=padding_width, + preserve_short_sequences=preserve_short_sequences) + + # if the input is Dense tensor, the output should also be a dense tensor + output = ragged_tensor.RaggedTensor.from_row_splits( + values=output, row_splits=output_splits, validate=False) + return array_ops.reshape(output.flat_values, + dense_shape) if to_tensor else output + + +@dispatch.dispatch_for_api(string_ops.string_format) +def string_format( + template: str, + inputs: typing.Union[ragged_tensor.Ragged, + typing.List[ragged_tensor.RaggedOrDense]], + placeholder="{}", + summarize=3, + name=None): + """Version of tf.strings.format that handles RaggedTensors.""" + if tensor_util.is_tf_type(inputs) or ragged_tensor.is_ragged(inputs): + inputs = [inputs] + + split_template = template.split(placeholder) + if len(inputs) != len(split_template) - 1: + raise ValueError("num placeholders in template and num inputs must match" + ": {} vs {}".format(len(split_template) - 1, len(inputs))) + + with ops.name_scope(name, "StringFormat", [inputs]): + output_pieces = [constant_op.constant(split_template[0])] + for i, input in enumerate(inputs): + if ragged_tensor.is_ragged(input): + output_pieces.append(ragged_tensor_to_string(input, summarize)) + else: + output_pieces.append(string_ops.string_format( + "{}", [input], summarize=summarize)) + output_pieces.append(constant_op.constant(split_template[i + 1])) + if len(output_pieces) == 1: + return output_pieces[0] + else: + return string_ops.reduce_join(output_pieces) + + +def ragged_tensor_to_string(rt, summarize=None): + """Returns a scalar string tensor with the contents of a RaggedTensor. + + Requires that `rt.shape.rank` is not `None`. + + Note: this converts the entire `RaggedTensor` into a single string scalar. + If you want to convert individual elements, use `tf.strings.as_string(rt)`. + + >>> rt1 = tf.ragged.constant([[1, 2, 3], [4, 5]]) + >>> ragged_tensor_to_string(rt1).numpy() + b'[[1, 2, 3], [4, 5]]' + + >>> rt2 = tf.ragged.constant([[['a'], ['b', 'c']], [['d', 'e', 'f'], []]]) + >>> ragged_tensor_to_string(rt2).numpy() + b"[[['a'], ['b', 'c']], [['d', 'e', 'f'], []]]" + + >>> rt3 = tf.ragged.constant([[1], [2, 3, 4, 5, 6], [], [], [7], [8, 9]]) + >>> ragged_tensor_to_string(rt3, summarize=2).numpy() + b'[[1], [2, 3, ..., 5, 6], ..., [7], [8, 9]]' + + Args: + rt: The RaggedTensor that should be converted to a string. + summarize: If specified, then only the first and last `summarize` elements + within each dimension are included in the string. If `-1` or `None`, then + all elements are included. + """ + if (summarize is not None and summarize != -1 and + not (isinstance(summarize, int) and summarize > 0)): + raise ValueError("Expected summarize to be -1 or a positive int, got %r" % + summarize) + with ops.name_scope(None, "AsString", [rt]): + rt = ragged_tensor.convert_to_tensor_or_ragged_tensor(rt) + if rt.shape.rank is None: + raise ValueError("RaggedTensor to_string requires that rt.shape.rank " + "is not None.") + # Convert all elements of `rt` to strings. + if rt.dtype == dtypes.string: + escaped = string_ops.regex_replace(rt.flat_values, r"(['\\])", r"\\\1") + str_t = rt.with_flat_values("'" + escaped + "'") + else: + str_t = rt.with_flat_values(string_ops.as_string(rt.flat_values)) + + return _ragged_tensor_to_string(str_t, summarize) + + +def _ragged_tensor_to_string(string_tensor, summarize): + """Returns a scalar string tensor with the contents of `string_tensor`. + + Args: + string_tensor: A potentially ragged tensor with dtype=string. + summarize: Include only the first and last `summarize` elements of each + dimension. If `-1` or `None`, then include all elements. + + Returns: + A scalar string Tensor. + """ + if string_tensor.shape.rank == 1: + pieces = string_tensor + else: + pieces = map_fn_lib.map_fn( + lambda s: _ragged_tensor_to_string(s, summarize), + string_tensor, + fn_output_signature=tensor_lib.TensorSpec(None, dtypes.string)) + if summarize not in (-1, None): + pieces = cond.cond( + _nrows(string_tensor) <= 2 * summarize, + lambda: pieces, + lambda: array_ops.concat( # pylint: disable=g-long-lambda + [pieces[:summarize], ["..."], pieces[-summarize:]], + axis=0)) + return "[" + string_ops.reduce_join(pieces, separator=", ") + "]" + + +def _nrows(tensor, out_type=dtypes.int32): + if isinstance(tensor, ragged_tensor.RaggedTensor): + return tensor.nrows(out_type=out_type) + else: + return array_ops.shape(tensor, out_type=out_type)[0] + + +@dispatch.dispatch_for_api(string_ops.string_join) +def string_join(inputs: typing.List[ragged_tensor.RaggedOrDense], + separator="", + name=None): + """RaggedTensor implementation for tf.strings.join.""" + if len(inputs) < 0: + raise ValueError("tf.strings.join: expected at least one input.") + with ops.name_scope(name, "RaggedStringJoin", inputs): + return ragged_functional_ops.map_flat_values(string_ops.string_join, inputs, + separator) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_tensor.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..a92d425a4c748eab10ff468a432272912a0689b7 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_tensor.py @@ -0,0 +1,3149 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Classes for storing ragged tensors and their values.""" + +import functools +import operator + +import typing +import numpy as np + +from tensorflow.core.protobuf import struct_pb2 +from tensorflow.python import tf2 +from tensorflow.python.client import session +from tensorflow.python.framework import composite_tensor +from tensorflow.python.framework import composite_tensor_gradient +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import sparse_tensor +from tensorflow.python.framework import tensor as tensor_lib +from tensorflow.python.framework import tensor_conversion +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.framework import type_spec +from tensorflow.python.framework import type_spec_registry +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import cond +from tensorflow.python.ops import control_flow_assert +from tensorflow.python.ops import gen_ragged_conversion_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.ragged import ragged_config +from tensorflow.python.ops.ragged import ragged_tensor_value +from tensorflow.python.ops.ragged import ragged_util +from tensorflow.python.ops.ragged.row_partition import RowPartition +from tensorflow.python.saved_model import nested_structure_coder +from tensorflow.python.types import core as core_types +from tensorflow.python.types import internal as internal_types +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export +from tensorflow.tools.docs import doc_controls + +# pylint: disable=protected-access +_convert_row_partition = RowPartition._convert_row_partition +# pylint: enable=protected-access + +# =============================================================================== +# RaggedTensor +# =============================================================================== + + +@tf_export("RaggedTensor") +class RaggedTensor( + composite_tensor.CompositeTensor, + internal_types.NativeObject, + internal_types.RaggedTensor, +): + """Represents a ragged tensor. + + A `RaggedTensor` is a tensor with one or more *ragged dimensions*, which are + dimensions whose slices may have different lengths. For example, the inner + (column) dimension of `rt=[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is ragged, + since the column slices (`rt[0, :]`, ..., `rt[4, :]`) have different lengths. + Dimensions whose slices all have the same length are called *uniform + dimensions*. The outermost dimension of a `RaggedTensor` is always uniform, + since it consists of a single slice (and so there is no possibility for + differing slice lengths). + + The total number of dimensions in a `RaggedTensor` is called its *rank*, + and the number of ragged dimensions in a `RaggedTensor` is called its + *ragged-rank*. A `RaggedTensor`'s ragged-rank is fixed at graph creation + time: it can't depend on the runtime values of `Tensor`s, and can't vary + dynamically for different session runs. + + Note that the `__init__` constructor is private. Please use one of the + following methods to construct a `RaggedTensor`: + + * `tf.RaggedTensor.from_row_lengths` + * `tf.RaggedTensor.from_value_rowids` + * `tf.RaggedTensor.from_row_splits` + * `tf.RaggedTensor.from_row_starts` + * `tf.RaggedTensor.from_row_limits` + * `tf.RaggedTensor.from_nested_row_splits` + * `tf.RaggedTensor.from_nested_row_lengths` + * `tf.RaggedTensor.from_nested_value_rowids` + + ### Potentially Ragged Tensors + + Many ops support both `Tensor`s and `RaggedTensor`s + (see [tf.ragged](https://www.tensorflow.org/api_docs/python/tf/ragged) for a + full listing). The term "potentially ragged tensor" may be used to refer to a + tensor that might be either a `Tensor` or a `RaggedTensor`. The ragged-rank + of a `Tensor` is zero. + + ### Documenting RaggedTensor Shapes + + When documenting the shape of a RaggedTensor, ragged dimensions can be + indicated by enclosing them in parentheses. For example, the shape of + a 3-D `RaggedTensor` that stores the fixed-size word embedding for each + word in a sentence, for each sentence in a batch, could be written as + `[num_sentences, (num_words), embedding_size]`. The parentheses around + `(num_words)` indicate that dimension is ragged, and that the length + of each element list in that dimension may vary for each item. + + ### Component Tensors + + Internally, a `RaggedTensor` consists of a concatenated list of values that + are partitioned into variable-length rows. In particular, each `RaggedTensor` + consists of: + + * A `values` tensor, which concatenates the variable-length rows into a + flattened list. For example, the `values` tensor for + `[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is `[3, 1, 4, 1, 5, 9, 2, 6]`. + + * A `row_splits` vector, which indicates how those flattened values are + divided into rows. In particular, the values for row `rt[i]` are stored + in the slice `rt.values[rt.row_splits[i]:rt.row_splits[i+1]]`. + + Example: + + >>> print(tf.RaggedTensor.from_row_splits( + ... values=[3, 1, 4, 1, 5, 9, 2, 6], + ... row_splits=[0, 4, 4, 7, 8, 8])) + + + ### Alternative Row-Partitioning Schemes + + In addition to `row_splits`, ragged tensors provide support for five other + row-partitioning schemes: + + * `row_lengths`: a vector with shape `[nrows]`, which specifies the length + of each row. + + * `value_rowids` and `nrows`: `value_rowids` is a vector with shape + `[nvals]`, corresponding one-to-one with `values`, which specifies + each value's row index. In particular, the row `rt[row]` consists of the + values `rt.values[j]` where `value_rowids[j]==row`. `nrows` is an + integer scalar that specifies the number of rows in the + `RaggedTensor`. (`nrows` is used to indicate trailing empty rows.) + + * `row_starts`: a vector with shape `[nrows]`, which specifies the start + offset of each row. Equivalent to `row_splits[:-1]`. + + * `row_limits`: a vector with shape `[nrows]`, which specifies the stop + offset of each row. Equivalent to `row_splits[1:]`. + + * `uniform_row_length`: A scalar tensor, specifying the length of every + row. This row-partitioning scheme may only be used if all rows have + the same length. + + Example: The following ragged tensors are equivalent, and all represent the + nested list `[[3, 1, 4, 1], [], [5, 9, 2], [6], []]`. + + >>> values = [3, 1, 4, 1, 5, 9, 2, 6] + >>> RaggedTensor.from_row_splits(values, row_splits=[0, 4, 4, 7, 8, 8]) + + >>> RaggedTensor.from_row_lengths(values, row_lengths=[4, 0, 3, 1, 0]) + + >>> RaggedTensor.from_value_rowids( + ... values, value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], nrows=5) + + >>> RaggedTensor.from_row_starts(values, row_starts=[0, 4, 4, 7, 8]) + + >>> RaggedTensor.from_row_limits(values, row_limits=[4, 4, 7, 8, 8]) + + >>> RaggedTensor.from_uniform_row_length(values, uniform_row_length=2) + + + ### Multiple Ragged Dimensions + + `RaggedTensor`s with multiple ragged dimensions can be defined by using + a nested `RaggedTensor` for the `values` tensor. Each nested `RaggedTensor` + adds a single ragged dimension. + + >>> inner_rt = RaggedTensor.from_row_splits( # =rt1 from above + ... values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8]) + >>> outer_rt = RaggedTensor.from_row_splits( + ... values=inner_rt, row_splits=[0, 3, 3, 5]) + >>> print(outer_rt.to_list()) + [[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]] + >>> print(outer_rt.ragged_rank) + 2 + + The factory function `RaggedTensor.from_nested_row_splits` may be used to + construct a `RaggedTensor` with multiple ragged dimensions directly, by + providing a list of `row_splits` tensors: + + >>> RaggedTensor.from_nested_row_splits( + ... flat_values=[3, 1, 4, 1, 5, 9, 2, 6], + ... nested_row_splits=([0, 3, 3, 5], [0, 4, 4, 7, 8, 8])).to_list() + [[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]] + + ### Uniform Inner Dimensions + + `RaggedTensor`s with uniform inner dimensions can be defined + by using a multidimensional `Tensor` for `values`. + + >>> rt = RaggedTensor.from_row_splits(values=tf.ones([5, 3], tf.int32), + ... row_splits=[0, 2, 5]) + >>> print(rt.to_list()) + [[[1, 1, 1], [1, 1, 1]], + [[1, 1, 1], [1, 1, 1], [1, 1, 1]]] + >>> print(rt.shape) + (2, None, 3) + + ### Uniform Outer Dimensions + + `RaggedTensor`s with uniform outer dimensions can be defined by using + one or more `RaggedTensor` with a `uniform_row_length` row-partitioning + tensor. For example, a `RaggedTensor` with shape `[2, 2, None]` can be + constructed with this method from a `RaggedTensor` values with shape + `[4, None]`: + + >>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]]) + >>> print(values.shape) + (4, None) + >>> rt6 = tf.RaggedTensor.from_uniform_row_length(values, 2) + >>> print(rt6) + + >>> print(rt6.shape) + (2, 2, None) + + Note that `rt6` only contains one ragged dimension (the innermost + dimension). In contrast, if `from_row_splits` is used to construct a similar + `RaggedTensor`, then that `RaggedTensor` will have two ragged dimensions: + + >>> rt7 = tf.RaggedTensor.from_row_splits(values, [0, 2, 4]) + >>> print(rt7.shape) + (2, None, None) + + Uniform and ragged outer dimensions may be interleaved, meaning that a + tensor with any combination of ragged and uniform dimensions may be created. + For example, a RaggedTensor `t4` with shape `[3, None, 4, 8, None, 2]` could + be constructed as follows: + + ```python + t0 = tf.zeros([1000, 2]) # Shape: [1000, 2] + t1 = RaggedTensor.from_row_lengths(t0, [...]) # [160, None, 2] + t2 = RaggedTensor.from_uniform_row_length(t1, 8) # [20, 8, None, 2] + t3 = RaggedTensor.from_uniform_row_length(t2, 4) # [5, 4, 8, None, 2] + t4 = RaggedTensor.from_row_lengths(t3, [...]) # [3, None, 4, 8, None, 2] + ``` + + """ + + #============================================================================= + # Constructor (private) + #============================================================================= + @doc_controls.do_not_generate_docs + def __init__(self, values, row_partition, internal=False): + """Creates a `RaggedTensor` with a specified partitioning for `values`. + + This constructor is private -- please use one of the following ops to + build `RaggedTensor`s: + + * `tf.RaggedTensor.from_row_lengths` + * `tf.RaggedTensor.from_value_rowids` + * `tf.RaggedTensor.from_row_splits` + * `tf.RaggedTensor.from_row_starts` + * `tf.RaggedTensor.from_row_limits` + * `tf.RaggedTensor.from_nested_row_splits` + * `tf.RaggedTensor.from_nested_row_lengths` + * `tf.RaggedTensor.from_nested_value_rowids` + + Args: + values: A potentially ragged tensor of any dtype and shape `[nvals, ...]`. + row_partition: A `RowPartition` object, representing the arrangement of + the lists at the top level. + internal: True if the constructor is being called by one of the factory + methods. If false, an exception will be raised. + + Raises: + ValueError: If internal = False. Note that this method is intended only + for internal use. + TypeError: If values is not a `RaggedTensor` or `Tensor`, or + row_partition is not a `RowPartition`. + """ + + if not internal: + raise ValueError("RaggedTensor constructor is private; please use one " + "of the factory methods instead (e.g., " + "RaggedTensor.from_row_lengths())") + _assert_is_supported_ragged_values_type(values) + if not isinstance(row_partition, RowPartition): + raise TypeError(f"Argument `row_partition` must be a RowPartition. " + f"Received {row_partition}.") + + # Validate shapes. + values.shape.with_rank_at_least(1) + if isinstance(values, RaggedTensor): + # pylint: disable=protected-access + assert row_partition.dtype == values._row_partition.dtype + + self._values = values + self._row_partition = row_partition + + #============================================================================= + # Factory Methods + #============================================================================= + + @classmethod + def _from_row_partition(cls, values, row_partition, validate=True): + """Creates a `RaggedTensor` with a row partition. + + This is used as a way for RaggedTensors to share row partitions. + + The outer dimension of values must be equal to `partition.nvals()`. + + Args: + values: A potentially ragged tensor. + row_partition: a `RowPartition`: can be shared between tensors. + validate: If true, then use assertions to check that the arguments form a + valid `RaggedTensor`. + + Returns: + A `RaggedTensor`. `result.rank = values.rank + 1`. + `result.ragged_rank = values.ragged_rank + 1`. + + Raises: + ValueError: If partition.nvals() != _nrows(values) + """ + if not isinstance(row_partition, RowPartition): + raise TypeError(f"Argument `row_partition` must be a RowPartition. " + f"Received {row_partition}.") + if not isinstance(validate, bool): + raise TypeError(f"Argument `validate` must have type bool. " + f"Received {validate}.") + values, row_partition = cls._convert_values_and_partition( + values, row_partition, "partition") + if row_partition._has_precomputed_value_rowids(): # pylint: disable=protected-access + value_rowids_shape = row_partition.value_rowids().shape + values.shape[:1].assert_is_compatible_with(value_rowids_shape) + if validate: + msg = "Arguments to _from_row_partition do not form a valid RaggedTensor" + nvals = _nrows(values, row_partition.dtype) + checks = [ + check_ops.assert_equal( + math_ops.cast(row_partition.nvals(), row_partition.dtype), + nvals, + message=msg), + ] + if not isinstance(values, RaggedTensor): + checks.append(check_ops.assert_rank_at_least(values, 1)) + row_partition = row_partition._with_dependencies(checks) # pylint: disable=protected-access + return cls(values=values, internal=True, row_partition=row_partition) + + @classmethod + @dispatch.add_dispatch_support + def from_value_rowids(cls, + values, + value_rowids, + nrows=None, + name=None, + validate=True): + """Creates a `RaggedTensor` with rows partitioned by `value_rowids`. + + The returned `RaggedTensor` corresponds with the python list defined by: + + ```python + result = [[values[i] for i in range(len(values)) if value_rowids[i] == row] + for row in range(nrows)] + ``` + + Args: + values: A potentially ragged tensor with shape `[nvals, ...]`. + value_rowids: A 1-D integer tensor with shape `[nvals]`, which corresponds + one-to-one with `values`, and specifies each value's row index. Must be + nonnegative, and must be sorted in ascending order. + nrows: An integer scalar specifying the number of rows. This should be + specified if the `RaggedTensor` may containing empty training rows. Must + be greater than `value_rowids[-1]` (or zero if `value_rowids` is empty). + Defaults to `value_rowids[-1] + 1` (or zero if `value_rowids` is empty). + name: A name prefix for the RaggedTensor (optional). + validate: If true, then use assertions to check that the arguments form + a valid `RaggedTensor`. Note: these assertions incur a runtime cost, + since they must be checked for each tensor value. + + Returns: + A `RaggedTensor`. `result.rank = values.rank + 1`. + `result.ragged_rank = values.ragged_rank + 1`. + + Raises: + ValueError: If `nrows` is incompatible with `value_rowids`. + + #### Example: + + >>> print(tf.RaggedTensor.from_value_rowids( + ... values=[3, 1, 4, 1, 5, 9, 2, 6], + ... value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], + ... nrows=5)) + + + """ + if not isinstance(validate, bool): + raise TypeError(f"Argument `validate` must have type bool. " + f"Received {validate}.") + + with ops.name_scope(name, "RaggedFromValueRowIds", + [values, value_rowids, nrows]): + row_partition = RowPartition.from_value_rowids( + value_rowids=value_rowids, + nrows=nrows, + validate=validate, + dtype_hint=_get_optional_partition_dtype(values)) + return cls._from_row_partition(values, row_partition, validate=validate) + + @classmethod + @dispatch.add_dispatch_support + def from_row_splits(cls, values, row_splits, name=None, validate=True): + """Creates a `RaggedTensor` with rows partitioned by `row_splits`. + + The returned `RaggedTensor` corresponds with the python list defined by: + + ```python + result = [values[row_splits[i]:row_splits[i + 1]] + for i in range(len(row_splits) - 1)] + ``` + + Args: + values: A potentially ragged tensor with shape `[nvals, ...]`. + row_splits: A 1-D integer tensor with shape `[nrows+1]`. Must not be + empty, and must be sorted in ascending order. `row_splits[0]` must be + zero and `row_splits[-1]` must be `nvals`. + name: A name prefix for the RaggedTensor (optional). + validate: If true, then use assertions to check that the arguments form + a valid `RaggedTensor`. Note: these assertions incur a runtime cost, + since they must be checked for each tensor value. + + Returns: + A `RaggedTensor`. `result.rank = values.rank + 1`. + `result.ragged_rank = values.ragged_rank + 1`. + + Raises: + ValueError: If `row_splits` is an empty list. + + #### Example: + + >>> print(tf.RaggedTensor.from_row_splits( + ... values=[3, 1, 4, 1, 5, 9, 2, 6], + ... row_splits=[0, 4, 4, 7, 8, 8])) + + + """ + if not isinstance(validate, bool): + raise TypeError(f"Argument `validate` must have type bool. " + f"Received {validate}.") + + with ops.name_scope(name, "RaggedFromRowSplits", [values, row_splits]): + row_partition = RowPartition.from_row_splits( + row_splits=row_splits, + validate=validate, + dtype_hint=_get_optional_partition_dtype(values)) + return cls._from_row_partition(values, row_partition, validate=validate) + + @classmethod + @dispatch.add_dispatch_support + def from_row_lengths(cls, values, row_lengths, name=None, validate=True): + """Creates a `RaggedTensor` with rows partitioned by `row_lengths`. + + The returned `RaggedTensor` corresponds with the python list defined by: + + ```python + result = [[values.pop(0) for i in range(length)] + for length in row_lengths] + ``` + + Args: + values: A potentially ragged tensor with shape `[nvals, ...]`. + row_lengths: A 1-D integer tensor with shape `[nrows]`. Must be + nonnegative. `sum(row_lengths)` must be `nvals`. + name: A name prefix for the RaggedTensor (optional). + validate: If true, then use assertions to check that the arguments form + a valid `RaggedTensor`. Note: these assertions incur a runtime cost, + since they must be checked for each tensor value. + + Returns: + A `RaggedTensor`. `result.rank = values.rank + 1`. + `result.ragged_rank = values.ragged_rank + 1`. + + #### Example: + + >>> print(tf.RaggedTensor.from_row_lengths( + ... values=[3, 1, 4, 1, 5, 9, 2, 6], + ... row_lengths=[4, 0, 3, 1, 0])) + + + """ + if not isinstance(validate, bool): + raise TypeError(f"Argument `validate` must have type bool. " + f"Received {validate}.") + + with ops.name_scope(name, "RaggedFromRowLengths", [values, row_lengths]): + row_partition = RowPartition.from_row_lengths( + row_lengths=row_lengths, + validate=validate, + dtype_hint=_get_optional_partition_dtype(values)) + return cls._from_row_partition(values, row_partition, validate=validate) + + @classmethod + @dispatch.add_dispatch_support + def from_row_starts(cls, values, row_starts, name=None, validate=True): + """Creates a `RaggedTensor` with rows partitioned by `row_starts`. + + Equivalent to: `from_row_splits(values, concat([row_starts, nvals]))`. + + Args: + values: A potentially ragged tensor with shape `[nvals, ...]`. + row_starts: A 1-D integer tensor with shape `[nrows]`. Must be + nonnegative and sorted in ascending order. If `nrows>0`, then + `row_starts[0]` must be zero. + name: A name prefix for the RaggedTensor (optional). + validate: If true, then use assertions to check that the arguments form + a valid `RaggedTensor`. Note: these assertions incur a runtime cost, + since they must be checked for each tensor value. + + Returns: + A `RaggedTensor`. `result.rank = values.rank + 1`. + `result.ragged_rank = values.ragged_rank + 1`. + + #### Example: + + >>> print(tf.RaggedTensor.from_row_starts( + ... values=[3, 1, 4, 1, 5, 9, 2, 6], + ... row_starts=[0, 4, 4, 7, 8])) + + + """ + if not isinstance(validate, bool): + raise TypeError(f"Argument `validate` must have type bool. " + f"Received {validate}.") + with ops.name_scope(name, "RaggedFromRowStarts", [values, row_starts]): + values = _convert_to_ragged_tensor_values(values) + row_partition = RowPartition.from_row_starts( + row_starts=row_starts, + nvals=_nrows(values), + validate=validate, + dtype_hint=_get_optional_partition_dtype(values)) + return cls._from_row_partition(values, row_partition, validate=validate) + + @classmethod + @dispatch.add_dispatch_support + def from_row_limits(cls, values, row_limits, name=None, validate=True): + """Creates a `RaggedTensor` with rows partitioned by `row_limits`. + + Equivalent to: `from_row_splits(values, concat([0, row_limits]))`. + + Args: + values: A potentially ragged tensor with shape `[nvals, ...]`. + row_limits: A 1-D integer tensor with shape `[nrows]`. Must be sorted in + ascending order. If `nrows>0`, then `row_limits[-1]` must be `nvals`. + name: A name prefix for the RaggedTensor (optional). + validate: If true, then use assertions to check that the arguments form + a valid `RaggedTensor`. Note: these assertions incur a runtime cost, + since they must be checked for each tensor value. + + Returns: + A `RaggedTensor`. `result.rank = values.rank + 1`. + `result.ragged_rank = values.ragged_rank + 1`. + + #### Example: + + >>> print(tf.RaggedTensor.from_row_limits( + ... values=[3, 1, 4, 1, 5, 9, 2, 6], + ... row_limits=[4, 4, 7, 8, 8])) + + + """ + if not isinstance(validate, bool): + raise TypeError(f"Argument `validate` must have type bool. " + f"Received {validate}.") + with ops.name_scope(name, "RaggedFromRowLimits", [values, row_limits]): + values = _convert_to_ragged_tensor_values(values) + row_partition = RowPartition.from_row_limits( + row_limits=row_limits, + validate=validate, + dtype_hint=_get_optional_partition_dtype(values)) + return cls._from_row_partition(values, row_partition, validate=validate) + + @classmethod + @dispatch.add_dispatch_support + def from_uniform_row_length(cls, + values, + uniform_row_length, + nrows=None, + validate=True, + name=None): + """Creates a `RaggedTensor` with rows partitioned by `uniform_row_length`. + + This method can be used to create `RaggedTensor`s with multiple uniform + outer dimensions. For example, a `RaggedTensor` with shape `[2, 2, None]` + can be constructed with this method from a `RaggedTensor` values with shape + `[4, None]`: + + >>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]]) + >>> print(values.shape) + (4, None) + >>> rt1 = tf.RaggedTensor.from_uniform_row_length(values, 2) + >>> print(rt1) + + >>> print(rt1.shape) + (2, 2, None) + + Note that `rt1` only contains one ragged dimension (the innermost + dimension). In contrast, if `from_row_splits` is used to construct a similar + `RaggedTensor`, then that `RaggedTensor` will have two ragged dimensions: + + >>> rt2 = tf.RaggedTensor.from_row_splits(values, [0, 2, 4]) + >>> print(rt2.shape) + (2, None, None) + + Args: + values: A potentially ragged tensor with shape `[nvals, ...]`. + uniform_row_length: A scalar integer tensor. Must be nonnegative. The + size of the outer axis of `values` must be evenly divisible by + `uniform_row_length`. + nrows: The number of rows in the constructed RaggedTensor. If not + specified, then it defaults to `nvals/uniform_row_length` (or `0` if + `uniform_row_length==0`). `nrows` only needs to be specified if + `uniform_row_length` might be zero. `uniform_row_length*nrows` must be + `nvals`. + validate: If true, then use assertions to check that the arguments form + a valid `RaggedTensor`. Note: these assertions incur a runtime cost, + since they must be checked for each tensor value. + name: A name prefix for the RaggedTensor (optional). + + Returns: + A `RaggedTensor` that corresponds with the python list defined by: + + ```python + result = [[values.pop(0) for i in range(uniform_row_length)] + for _ in range(nrows)] + ``` + + `result.rank = values.rank + 1`. + `result.ragged_rank = values.ragged_rank + 1`. + """ + if not isinstance(validate, bool): + raise TypeError(f"Argument `validate` must have type bool. " + f"Received {validate}.") + with ops.name_scope(name, "RaggedFromUniformRowLength", + [values, uniform_row_length, nrows]): + values = _convert_to_ragged_tensor_values(values) + uniform_row_length = _convert_row_partition( + uniform_row_length, "UniformRowLength", + _get_optional_partition_dtype(values)) + nvals = _nvals_uniform_row_length(values, uniform_row_length) + row_partition = RowPartition.from_uniform_row_length( + uniform_row_length=uniform_row_length, + nvals=nvals, + nrows=nrows, + validate=validate, + dtype_hint=_get_optional_partition_dtype(values)) + return cls._from_row_partition(values, row_partition, validate=validate) + + @classmethod + @dispatch.add_dispatch_support + def from_nested_value_rowids(cls, + flat_values, + nested_value_rowids, + nested_nrows=None, + name=None, + validate=True): + """Creates a `RaggedTensor` from a nested list of `value_rowids` tensors. + + Equivalent to: + + ```python + result = flat_values + for (rowids, nrows) in reversed(zip(nested_value_rowids, nested_nrows)): + result = from_value_rowids(result, rowids, nrows) + ``` + + Args: + flat_values: A potentially ragged tensor. + nested_value_rowids: A list of 1-D integer tensors. The `i`th tensor is + used as the `value_rowids` for the `i`th ragged dimension. + nested_nrows: A list of integer scalars. The `i`th scalar is used as the + `nrows` for the `i`th ragged dimension. + name: A name prefix for the RaggedTensor (optional). + validate: If true, then use assertions to check that the arguments form + a valid `RaggedTensor`. Note: these assertions incur a runtime cost, + since they must be checked for each tensor value. + + Returns: + A `RaggedTensor` (or `flat_values` if `nested_value_rowids` is empty). + + Raises: + ValueError: If `len(nested_values_rowids) != len(nested_nrows)`. + """ + if not isinstance(validate, bool): + raise TypeError(f"Argument `validate` must have type bool. " + f"Received {validate}.") + if isinstance(nested_value_rowids, tensor_lib.Tensor): + raise TypeError(f"Argument `nested_value_rowids` must be a list of " + f"Tensors. Received {nested_value_rowids}.") + if nested_nrows is None: + nested_nrows = [None] * len(nested_value_rowids) + else: + if isinstance(nested_nrows, tensor_lib.Tensor): + raise TypeError(f"Argument `nested_nrows` must be a list of " + f"Tensors. Received {nested_nrows}.") + if len(nested_nrows) != len(nested_value_rowids): + raise ValueError( + f"Argument `nested_nrows` must have the same length as " + f"argument `nested_value_rowids`. len(nested_nrows) = " + f"{len(nested_nrows)} vs. len(nested_values_rowids) = " + f"{len(nested_value_rowids)}.") + + with ops.name_scope(name, "RaggedFromNestedValueRowIds", [flat_values] + + list(nested_value_rowids) + list(nested_nrows)): + result = flat_values + for value_rowids, nrows in reversed( + list(zip(nested_value_rowids, nested_nrows))): + result = cls.from_value_rowids( + result, value_rowids, nrows, validate=validate) + return result + + @classmethod + @dispatch.add_dispatch_support + def from_nested_row_splits(cls, + flat_values, + nested_row_splits, + name=None, + validate=True): + """Creates a `RaggedTensor` from a nested list of `row_splits` tensors. + + Equivalent to: + + ```python + result = flat_values + for row_splits in reversed(nested_row_splits): + result = from_row_splits(result, row_splits) + ``` + + Args: + flat_values: A potentially ragged tensor. + nested_row_splits: A list of 1-D integer tensors. The `i`th tensor is + used as the `row_splits` for the `i`th ragged dimension. + name: A name prefix for the RaggedTensor (optional). + validate: If true, then use assertions to check that the arguments form + a valid `RaggedTensor`. Note: these assertions incur a runtime cost, + since they must be checked for each tensor value. + + Returns: + A `RaggedTensor` (or `flat_values` if `nested_row_splits` is empty). + """ + if not isinstance(validate, bool): + raise TypeError(f"Argument `validate` must have type bool. " + f"Received {validate}.") + if isinstance(nested_row_splits, tensor_lib.Tensor): + raise TypeError(f"Argument `nested_row_splits` must be a list of " + f"Tensors. Received {nested_row_splits}.") + with ops.name_scope(name, "RaggedFromNestedRowSplits", + [flat_values] + list(nested_row_splits)): + result = flat_values + for splits in reversed(nested_row_splits): + result = cls.from_row_splits(result, splits, validate=validate) + return result + + @classmethod + @dispatch.add_dispatch_support + def from_nested_row_lengths(cls, + flat_values, + nested_row_lengths, + name=None, + validate=True): + """Creates a `RaggedTensor` from a nested list of `row_lengths` tensors. + + Equivalent to: + + ```python + result = flat_values + for row_lengths in reversed(nested_row_lengths): + result = from_row_lengths(result, row_lengths) + ``` + + Args: + flat_values: A potentially ragged tensor. + nested_row_lengths: A list of 1-D integer tensors. The `i`th tensor is + used as the `row_lengths` for the `i`th ragged dimension. + name: A name prefix for the RaggedTensor (optional). + validate: If true, then use assertions to check that the arguments form + a valid `RaggedTensor`. Note: these assertions incur a runtime cost, + since they must be checked for each tensor value. + + Returns: + A `RaggedTensor` (or `flat_values` if `nested_row_lengths` is empty). + """ + if not isinstance(validate, bool): + raise TypeError(f"Argument `validate` must have type bool. " + f"Received {validate}.") + if isinstance(nested_row_lengths, tensor_lib.Tensor): + raise TypeError(f"Argument `nested_row_lengths` must be a list of " + f"Tensors. Received {nested_row_lengths}.") + with ops.name_scope(name, "RaggedFromNestedRowlengths", + [flat_values] + list(nested_row_lengths)): + result = flat_values + for lengths in reversed(nested_row_lengths): + result = cls.from_row_lengths(result, lengths, validate=validate) + return result + + @classmethod + def _from_nested_row_partitions(cls, + flat_values, + nested_row_partitions, + name=None, + validate=True): + """Creates a `RaggedTensor` from a nested list of row partitions. + + Equivalent to: + + ```python + result = flat_values + for row_partition in reversed(nested_row_partitions): + result = _from_row_partition(result, row_partition) + ``` + + Args: + flat_values: A potentially ragged tensor. + nested_row_partitions: A list of row partitions. The `i`th element is + used as the row partition for the `i`th ragged dimension. + name: A name prefix for the RaggedTensor (optional). + validate: If true, then use assertions to check that the arguments form + a valid `RaggedTensor`. Note: these assertions incur a runtime cost, + since they must be checked for each tensor value. + + Returns: + A `RaggedTensor` (or `flat_values` if `nested_row_lengths` is empty). + """ + if not isinstance(validate, bool): + raise TypeError(f"Argument `validate` must have type bool. " + f"Received {validate}.") + if isinstance(nested_row_partitions, RowPartition): + raise TypeError(f"Argument `nested_row_partitions` must be a list of " + f"RowPartitions. Received {nested_row_partitions}.") + if isinstance(nested_row_partitions, tensor_lib.Tensor): + raise TypeError(f"Argument `nested_row_partitions` must be a list of " + f"RowPartitions. Received {nested_row_partitions}.") + with ops.name_scope(name, "RaggedFromNestedRowPartitions", + [flat_values] + list(nested_row_partitions)): + result = flat_values + for partition in reversed(nested_row_partitions): + result = cls._from_row_partition(result, partition, validate=validate) + return result + + @classmethod + def _convert_values_and_partition(cls, values, row_partition, name): + """Converts `values` and `partition` to Tensors. + + If `values` is a `RaggedTensor`, then converts `values` and `partition` + to have compatible row-partitioning dtypes. In particular, if any of the + row partitioning tensors are `int64`, then all of the other row + partitioning tensors wil be cast to `int64` (if auto_cast_partition_dtype() + is true) or an error will be raised (if auto_cast_partition_dtype() is + false). + + Args: + values: The `values` for the `RaggedTensor` being constructed. + row_partition: A RowPartition object for the `RaggedTensor` being + constructed. + name: The name of the RowPartition object. + + Returns: + A tuple (values, partition). + """ + if not isinstance(row_partition, RowPartition): + raise TypeError(f"Argument `row_partition` must be a RowPartition. " + f"Received {row_partition}.") + if isinstance(values, RaggedTensor): + # pylint: disable=protected-access + if values._row_partition.dtype != row_partition.dtype: + if not ragged_config.auto_cast_partition_dtype(): + # pylint: disable=protected-access + # TODO(edloper): get rid of the `name` parameter. + raise ValueError( + f"Argument `row_partition` of RaggedTensor with name: {name} " + f"must have same dtype as Argument `values`. " + f"({row_partition.dtype} vs. {values._row_partition.dtype}).") + values = values.with_row_splits_dtype(row_partition.dtype) + else: + values = _convert_to_ragged_tensor_values(values) + + return (values, row_partition) + + #============================================================================= + # Accessors + #============================================================================= + + @property + def dtype(self): + """The `DType` of values in this tensor.""" + return self._values.dtype + + @property + def shape(self): + """The statically known shape of this ragged tensor. + + Returns: + A `TensorShape` containing the statically known shape of this ragged + tensor. Ragged dimensions have a size of `None`. + + Examples: + + >>> tf.ragged.constant([[0], [1, 2]]).shape + TensorShape([2, None]) + + >>> tf.ragged.constant([[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1).shape + TensorShape([2, None, 2]) + + """ + nrows = self._row_partition.static_nrows + ncols = self._row_partition.static_uniform_row_length + value_shape = self._values.shape[1:] + return tensor_shape.TensorShape([nrows, ncols]).concatenate(value_shape) + + def get_shape(self) -> tensor_shape.TensorShape: + """The statically known shape of this ragged tensor. + + Returns: + A `TensorShape` containing the statically known shape of this ragged + tensor. Ragged dimensions have a size of `None`. + + Alias for `shape` property. + + Examples: + + >>> tf.ragged.constant([[0], [1, 2]]).get_shape() + TensorShape([2, None]) + + >>> tf.ragged.constant( + ... [[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1).get_shape() + TensorShape([2, None, 2]) + + """ + return self.shape + + @property + def ragged_rank(self): + """The number of times the RaggedTensor's flat_values is partitioned. + + Examples: + + >>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]]) + >>> values.ragged_rank + 1 + + >>> rt = tf.RaggedTensor.from_uniform_row_length(values, 2) + >>> rt.ragged_rank + 2 + + Returns: + A Python `int` indicating the number of times the underlying `flat_values` + Tensor has been partitioned to add a new dimension. + I.e., `tf.rank(rt) = tf.rank(rt.flat_values) + rt.ragged_rank`. + """ + values_is_ragged = isinstance(self._values, RaggedTensor) + return self._values.ragged_rank + 1 if values_is_ragged else 1 + + @property + def values(self): + """The concatenated rows for this ragged tensor. + + `rt.values` is a potentially ragged tensor formed by flattening the two + outermost dimensions of `rt` into a single dimension. + + `rt.values.shape = [nvals] + rt.shape[2:]` (where `nvals` is the + number of items in the outer two dimensions of `rt`). + + `rt.ragged_rank = self.ragged_rank - 1` + + Returns: + A potentially ragged tensor. + + #### Example: + + >>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) + >>> print(rt.values) + tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32) + + """ + return self._values + + @property + def _nested_row_partitions(self): + """Returns the row partitions for this `RaggedTensor`.""" + partitions = [self._row_partition] + rt_values = self.values + while isinstance(rt_values, RaggedTensor): + # pylint: disable=protected-access + partitions.append(rt_values._row_partition) + rt_values = rt_values.values + return tuple(partitions) + + @property + def row_splits(self): + """The row-split indices for this ragged tensor's `values`. + + `rt.row_splits` specifies where the values for each row begin and end in + `rt.values`. In particular, the values for row `rt[i]` are stored in + the slice `rt.values[rt.row_splits[i]:rt.row_splits[i+1]]`. + + Returns: + A 1-D integer `Tensor` with shape `[self.nrows+1]`. + The returned tensor is non-empty, and is sorted in ascending order. + `self.row_splits[0]` is zero, and `self.row_splits[-1]` is equal to + `self.values.shape[0]`. + + #### Example: + + >>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) + >>> print(rt.row_splits) # indices of row splits in rt.values + tf.Tensor([0 4 4 7 8 8], shape=(6,), dtype=int64) + + """ + return self._row_partition.row_splits() + + @property + def uniform_row_length(self): + """The length of each row in this ragged tensor, or None if rows are ragged. + + >>> rt1 = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]]) + >>> print(rt1.uniform_row_length) # rows are ragged. + None + + >>> rt2 = tf.RaggedTensor.from_uniform_row_length( + ... values=rt1, uniform_row_length=2) + >>> print(rt2) + + >>> print(rt2.uniform_row_length) # rows are not ragged (all have size 2). + tf.Tensor(2, shape=(), dtype=int64) + + A RaggedTensor's rows are only considered to be uniform (i.e. non-ragged) + if it can be determined statically (at graph construction time) that the + rows all have the same length. + + Returns: + A scalar integer `Tensor`, specifying the length of every row in this + ragged tensor (for ragged tensors whose rows are uniform); or `None` + (for ragged tensors whose rows are ragged). + """ + return self._row_partition.uniform_row_length() + + @property + def flat_values(self): + """The innermost `values` tensor for this ragged tensor. + + Concretely, if `rt.values` is a `Tensor`, then `rt.flat_values` is + `rt.values`; otherwise, `rt.flat_values` is `rt.values.flat_values`. + + Conceptually, `flat_values` is the tensor formed by flattening the + outermost dimension and all of the ragged dimensions into a single + dimension. + + `rt.flat_values.shape = [nvals] + rt.shape[rt.ragged_rank + 1:]` + (where `nvals` is the number of items in the flattened dimensions). + + Returns: + A `Tensor`. + + #### Example: + + >>> rt = tf.ragged.constant([[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]) + >>> print(rt.flat_values) + tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32) + + """ + rt_values = self.values + while isinstance(rt_values, RaggedTensor): + rt_values = rt_values.values + return rt_values + + @property + def nested_row_splits(self): + """A tuple containing the row_splits for all ragged dimensions. + + `rt.nested_row_splits` is a tuple containing the `row_splits` tensors for + all ragged dimensions in `rt`, ordered from outermost to innermost. In + particular, `rt.nested_row_splits = (rt.row_splits,) + value_splits` where: + + * `value_splits = ()` if `rt.values` is a `Tensor`. + * `value_splits = rt.values.nested_row_splits` otherwise. + + Returns: + A `tuple` of 1-D integer `Tensor`s. + + #### Example: + + >>> rt = tf.ragged.constant( + ... [[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]]) + >>> for i, splits in enumerate(rt.nested_row_splits): + ... print('Splits for dimension %d: %s' % (i+1, splits.numpy())) + Splits for dimension 1: [0 3] + Splits for dimension 2: [0 3 3 5] + Splits for dimension 3: [0 4 4 7 8 8] + + """ + rt_nested_splits = [self.row_splits] + rt_values = self.values + while isinstance(rt_values, RaggedTensor): + rt_nested_splits.append(rt_values.row_splits) + rt_values = rt_values.values + return tuple(rt_nested_splits) + + def value_rowids(self, name=None): + """Returns the row indices for the `values` in this ragged tensor. + + `rt.value_rowids()` corresponds one-to-one with the outermost dimension of + `rt.values`, and specifies the row containing each value. In particular, + the row `rt[row]` consists of the values `rt.values[j]` where + `rt.value_rowids()[j] == row`. + + Args: + name: A name prefix for the returned tensor (optional). + + Returns: + A 1-D integer `Tensor` with shape `self.values.shape[:1]`. + The returned tensor is nonnegative, and is sorted in ascending order. + + #### Example: + + >>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) + >>> print(rt.values) + tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32) + >>> print(rt.value_rowids()) # corresponds 1:1 with rt.values + tf.Tensor([0 0 0 0 2 2 2 3], shape=(8,), dtype=int64) + + """ + with ops.name_scope(name, "RaggedValueRowIds", [self]): + return self._row_partition.value_rowids() + + def nested_value_rowids(self, name=None): + """Returns a tuple containing the value_rowids for all ragged dimensions. + + `rt.nested_value_rowids` is a tuple containing the `value_rowids` tensors + for + all ragged dimensions in `rt`, ordered from outermost to innermost. In + particular, `rt.nested_value_rowids = (rt.value_rowids(),) + value_ids` + where: + + * `value_ids = ()` if `rt.values` is a `Tensor`. + * `value_ids = rt.values.nested_value_rowids` otherwise. + + Args: + name: A name prefix for the returned tensors (optional). + + Returns: + A `tuple` of 1-D integer `Tensor`s. + + #### Example: + + >>> rt = tf.ragged.constant( + ... [[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]]) + >>> for i, ids in enumerate(rt.nested_value_rowids()): + ... print('row ids for dimension %d: %s' % (i+1, ids.numpy())) + row ids for dimension 1: [0 0 0] + row ids for dimension 2: [0 0 0 2 2] + row ids for dimension 3: [0 0 0 0 2 2 2 3] + + """ + with ops.name_scope(name, "RaggedNestedValueRowIds", [self]): + rt_nested_ids = [self.value_rowids()] + rt_values = self.values + while isinstance(rt_values, RaggedTensor): + rt_nested_ids.append(rt_values.value_rowids()) + rt_values = rt_values.values + return tuple(rt_nested_ids) + + def nrows(self, out_type=None, name=None): + """Returns the number of rows in this ragged tensor. + + I.e., the size of the outermost dimension of the tensor. + + Args: + out_type: `dtype` for the returned tensor. Defaults to + `self.row_splits.dtype`. + name: A name prefix for the returned tensor (optional). + + Returns: + A scalar `Tensor` with dtype `out_type`. + + #### Example: + + >>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) + >>> print(rt.nrows()) # rt has 5 rows. + tf.Tensor(5, shape=(), dtype=int64) + + """ + with ops.name_scope(name, "RaggedNRows", [self]): + if out_type is None: + return self._row_partition.nrows() + else: + return math_ops.cast(self._row_partition.nrows(), dtype=out_type) + + def row_starts(self, name=None): + """Returns the start indices for rows in this ragged tensor. + + These indices specify where the values for each row begin in + `self.values`. `rt.row_starts()` is equal to `rt.row_splits[:-1]`. + + Args: + name: A name prefix for the returned tensor (optional). + + Returns: + A 1-D integer Tensor with shape `[nrows]`. + The returned tensor is nonnegative, and is sorted in ascending order. + + #### Example: + + >>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) + >>> print(rt.values) + tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32) + >>> print(rt.row_starts()) # indices of row starts in rt.values + tf.Tensor([0 4 4 7 8], shape=(5,), dtype=int64) + + """ + with ops.name_scope(name, "RaggedRowStarts", [self]): + return self._row_partition.row_starts() + + def row_limits(self, name=None): + """Returns the limit indices for rows in this ragged tensor. + + These indices specify where the values for each row end in + `self.values`. `rt.row_limits(self)` is equal to `rt.row_splits[:-1]`. + + Args: + name: A name prefix for the returned tensor (optional). + + Returns: + A 1-D integer Tensor with shape `[nrows]`. + The returned tensor is nonnegative, and is sorted in ascending order. + + #### Example: + + >>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) + >>> print(rt.values) + tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32) + >>> print(rt.row_limits()) # indices of row limits in rt.values + tf.Tensor([4 4 7 8 8], shape=(5,), dtype=int64) + + """ + with ops.name_scope(name, "RaggedRowLimits", [self]): + return self._row_partition.row_limits() + + def row_lengths(self, axis=1, name=None): + """Returns the lengths of the rows in this ragged tensor. + + `rt.row_lengths()[i]` indicates the number of values in the + `i`th row of `rt`. + + Args: + axis: An integer constant indicating the axis whose row lengths should be + returned. + name: A name prefix for the returned tensor (optional). + + Returns: + A potentially ragged integer Tensor with shape `self.shape[:axis]`. + + Raises: + ValueError: If `axis` is out of bounds. + + #### Example: + + >>> rt = tf.ragged.constant( + ... [[[3, 1, 4], [1]], [], [[5, 9], [2]], [[6]], []]) + >>> print(rt.row_lengths()) # lengths of rows in rt + tf.Tensor([2 0 2 1 0], shape=(5,), dtype=int64) + >>> print(rt.row_lengths(axis=2)) # lengths of axis=2 rows. + + + """ + if axis == 0: + return self._row_partition.nrows() + + if axis == 1: + return self._row_partition.row_lengths() + + with ops.name_scope(name, "RaggedRowLengths", [self]): + axis = array_ops.get_positive_axis( + axis, self.shape.rank, ndims_name="rank(self)") + if axis == 0: + return self.nrows() + elif axis == 1: + splits = self.row_splits + return splits[1:] - splits[:-1] + elif isinstance(self.values, RaggedTensor): + return self.with_values(self.values.row_lengths(axis - 1)) + else: + shape = array_ops.shape(self.values, out_type=self._row_partition.dtype) + return self.with_values( + array_ops.ones(shape[:axis - 1], self._row_partition.dtype) * + shape[axis - 1]) + + def nested_row_lengths(self, name=None): + """Returns a tuple containing the row_lengths for all ragged dimensions. + + `rt.nested_row_lengths()` is a tuple containing the `row_lengths` tensors + for all ragged dimensions in `rt`, ordered from outermost to innermost. + + Args: + name: A name prefix for the returned tensors (optional). + + Returns: + A `tuple` of 1-D integer `Tensors`. The length of the tuple is equal to + `self.ragged_rank`. + """ + with ops.name_scope(name, "RaggedNestedRowLengths", [self]): + rt_nested_row_lengths = [] + rt = self + while isinstance(rt, RaggedTensor): + rt_nested_row_lengths.append(rt.row_lengths()) + rt = rt.values + return tuple(rt_nested_row_lengths) + + def bounding_shape(self, axis=None, name=None, out_type=None): + """Returns the tight bounding box shape for this `RaggedTensor`. + + Args: + axis: An integer scalar or vector indicating which axes to return the + bounding box for. If not specified, then the full bounding box is + returned. + name: A name prefix for the returned tensor (optional). + out_type: `dtype` for the returned tensor. Defaults to + `self.row_splits.dtype`. + + Returns: + An integer `Tensor` (`dtype=self.row_splits.dtype`). If `axis` is not + specified, then `output` is a vector with + `output.shape=[self.shape.ndims]`. If `axis` is a scalar, then the + `output` is a scalar. If `axis` is a vector, then `output` is a vector, + where `output[i]` is the bounding size for dimension `axis[i]`. + + #### Example: + + >>> rt = tf.ragged.constant([[1, 2, 3, 4], [5], [], [6, 7, 8, 9], [10]]) + >>> rt.bounding_shape().numpy() + array([5, 4]) + + """ + if out_type is None: + out_type = self._row_partition.dtype + else: + out_type = dtypes.as_dtype(out_type) + with ops.name_scope(name, "RaggedBoundingBox", [self, axis]): + nested_splits = self.nested_row_splits + rt_flat_values = self.flat_values + + # Optimized special cases for when axis=0 or axis=1: + if isinstance(axis, int): + if axis == 0: + return array_ops.shape(nested_splits[0], out_type=out_type)[0] - 1 + elif axis == 1: + result = math_ops.maximum(math_ops.reduce_max(self.row_lengths()), 0) + if out_type != self._row_partition.dtype: + result = math_ops.cast(result, out_type) + return result + + splits_shape = array_ops.shape(self.row_splits, out_type=out_type) + flat_values_shape = array_ops.shape(rt_flat_values, out_type=out_type) + + ragged_dimensions = [splits_shape[0] - 1] + [ + math_ops.maximum(math_ops.reduce_max(splits[1:] - splits[:-1]), 0) + for splits in nested_splits + ] + inner_dimensions = flat_values_shape[1:] + + if out_type != self._row_partition.dtype: + ragged_dimensions = [ + math_ops.cast(d, out_type) for d in ragged_dimensions + ] + bbox = array_ops.concat( + [array_ops_stack.stack(ragged_dimensions), inner_dimensions], axis=0) + return bbox if axis is None else array_ops.gather(bbox, axis) + + #============================================================================= + # Transformation + #============================================================================= + + def with_values(self, new_values): + """Returns a copy of `self` with `values` replaced by `new_value`. + + Preserves cached row-partitioning tensors such as `self.cached_nrows` and + `self.cached_value_rowids` if they have values. + + Args: + new_values: Potentially ragged tensor to use as the `values` for the + returned `RaggedTensor`. Must have `rank > 0`, and must have the same + number of rows as `self.values`. + + Returns: + A `RaggedTensor`. `result.rank = 1 + new_values.rank`. + `result.ragged_rank = 1 + new_values.ragged_rank` + """ + new_values = _convert_to_ragged_tensor_values(new_values) + new_values.shape.with_rank_at_least(1) + self.values.shape[:1].assert_is_compatible_with(new_values.shape[:1]) + if (isinstance(new_values, RaggedTensor) and + self._row_partition.dtype != new_values.row_splits.dtype): + if not ragged_config.auto_cast_partition_dtype(): + raise ValueError("self and new_values have mismatched row_splits " + "dtypes; use RaggedTensor.with_row_splits_dtype() to " + "convert them to compatible dtypes.") + new_values = new_values.with_row_splits_dtype(dtypes.int64) + return self.with_row_splits_dtype(dtypes.int64).with_values(new_values) + return RaggedTensor( + values=new_values, row_partition=self._row_partition, internal=True) + + def with_flat_values(self, new_values): + """Returns a copy of `self` with `flat_values` replaced by `new_value`. + + Preserves cached row-partitioning tensors such as `self.cached_nrows` and + `self.cached_value_rowids` if they have values. + + Args: + new_values: Potentially ragged tensor that should replace + `self.flat_values`. Must have `rank > 0`, and must have the same number + of rows as `self.flat_values`. + + Returns: + A `RaggedTensor`. + `result.rank = self.ragged_rank + new_values.rank`. + `result.ragged_rank = self.ragged_rank + new_values.ragged_rank`. + """ + if isinstance(self._values, RaggedTensor): + return self.with_values(self.values.with_flat_values(new_values)) + else: + new_values = _convert_to_ragged_tensor_values(new_values) + return self.with_values(new_values) + + def with_row_splits_dtype(self, dtype): + """Returns a copy of this RaggedTensor with the given `row_splits` dtype. + + For RaggedTensors with multiple ragged dimensions, the `row_splits` for all + nested `RaggedTensor` objects are cast to the given dtype. + + Args: + dtype: The dtype for `row_splits`. One of `tf.int32` or `tf.int64`. + + Returns: + A copy of this RaggedTensor, with the `row_splits` cast to the given + type. + """ + dtype = dtypes.as_dtype(dtype) + if dtype not in (dtypes.int32, dtypes.int64): + raise ValueError(f"Argument `row_splits` dtype must be int32 or int64. " + f"Received {dtype}.") + if self._row_partition.dtype == dtype: + return self + current_values = self._values + if isinstance(current_values, RaggedTensor): + return RaggedTensor( + values=current_values.with_row_splits_dtype(dtype), + row_partition=self._row_partition.with_dtype(dtype), + internal=True) + else: + return RaggedTensor( + values=current_values, + row_partition=self._row_partition.with_dtype(dtype), + internal=True) + + def merge_dims(self, outer_axis, inner_axis): + """Merges outer_axis...inner_axis into a single dimension. + + Returns a copy of this RaggedTensor with the specified range of dimensions + flattened into a single dimension, with elements in row-major order. + + #### Examples: + + >>> rt = tf.ragged.constant([[[1, 2], [3]], [[4, 5, 6]]]) + >>> print(rt.merge_dims(0, 1)) + + >>> print(rt.merge_dims(1, 2)) + + >>> print(rt.merge_dims(0, 2)) + tf.Tensor([1 2 3 4 5 6], shape=(6,), dtype=int32) + + To mimic the behavior of `np.flatten` (which flattens all dimensions), use + `rt.merge_dims(0, -1). To mimic the behavior of `tf.layers.Flatten` (which + flattens all dimensions except the outermost batch dimension), use + `rt.merge_dims(1, -1)`. + + Args: + outer_axis: `int`: The first dimension in the range of dimensions to + merge. May be negative if `self.shape.rank` is statically known. + inner_axis: `int`: The last dimension in the range of dimensions to merge. + May be negative if `self.shape.rank` is statically known. + + Returns: + A copy of this tensor, with the specified dimensions merged into a + single dimension. The shape of the returned tensor will be + `self.shape[:outer_axis] + [N] + self.shape[inner_axis + 1:]`, where `N` + is the total number of slices in the merged dimensions. + """ + outer_axis = array_ops.get_positive_axis( + outer_axis, + self.shape.rank, + axis_name="outer_axis", + ndims_name="rank(self)") + inner_axis = array_ops.get_positive_axis( + inner_axis, + self.shape.rank, + axis_name="inner_axis", + ndims_name="rank(self)") + if not outer_axis <= inner_axis: + raise ValueError(f"Expected outer_axis ({outer_axis}) to be less than or " + f"equal to inner_axis ({inner_axis}).") + return merge_dims(self, outer_axis, inner_axis) + + def _set_shape(self, shape): + """Updates the static shape of `self` to be `shape`. + + * If a dimension of `shape` has known rank, and is encoded via + partitioning, then this will update the corresponding partition to + define `_uniform_row_length` and `nrows`. + * If a dimension of `shape` has a known rank, and is encoded as one + of the `flat_values` dimensions, then `flat_values.set_shape()` will + be used to update its shape. + + Warning: Using this method to assert an incorrect shape for a RaggedTensor + (i.e., one that's not consistent with its actual shape) can cause + segmentation faults and very difficult-to-diagnose behavior. Only use this + method if you are certain that the shape is correct. + + Args: + shape: `tf.TensorShape` specifying the shape for this `RaggedTensor`. + """ + # TODO(edloper): Refactor this to not directly access private members + # of RowPartition. + # pylint: disable=protected-access + + shape = tensor_shape.as_shape(shape) + if shape.rank is None: + return # Nothing to do. + + shape = shape.as_list() + + # Outermost dimension + if shape[0] is not None: + self._row_partition._row_splits.set_shape(shape[0] + 1) + + # Partitioned dimensions + dtype = self._row_partition.dtype + for i, partition in enumerate(self._nested_row_partitions): + size = shape[i + 1] + if size is not None: + if partition._uniform_row_length is not None: + old_row_length = tensor_util.constant_value( + partition._uniform_row_length) + if old_row_length is not None: + if size == old_row_length: + continue # already have shape info for this axis. + else: + raise ValueError(f"Inconsistent size for axis {i + 1}: " + f"{old_row_length} vs. {size}.") + partition._uniform_row_length = ops.convert_to_tensor(size, dtype) + if partition._nrows is None: + partition._nrows = array_ops.size( + partition._row_splits, out_type=dtype) - 1 + + # self.flat_values could be a CompositeTensor and doesn't have set_shape. + if hasattr(self.flat_values, "set_shape"): + # Inner dimensions + flat_shape = tensor_shape.as_shape([None] + shape[self.ragged_rank + 1:]) + self.flat_values.set_shape(flat_shape) + + #============================================================================= + # Tensor Type Conversions + #============================================================================= + + @classmethod + @dispatch.add_dispatch_support + def from_tensor(cls, + tensor, + lengths=None, + padding=None, + ragged_rank=1, + name=None, + row_splits_dtype=dtypes.int64): + """Converts a `tf.Tensor` into a `RaggedTensor`. + + The set of absent/default values may be specified using a vector of lengths + or a padding value (but not both). If `lengths` is specified, then the + output tensor will satisfy `output[row] = tensor[row][:lengths[row]]`. If + 'lengths' is a list of lists or tuple of lists, those lists will be used + as nested row lengths. If `padding` is specified, then any row *suffix* + consisting entirely of `padding` will be excluded from the returned + `RaggedTensor`. If neither `lengths` nor `padding` is specified, then the + returned `RaggedTensor` will have no absent/default values. + + Examples: + + >>> dt = tf.constant([[5, 7, 0], [0, 3, 0], [6, 0, 0]]) + >>> tf.RaggedTensor.from_tensor(dt) + + >>> tf.RaggedTensor.from_tensor(dt, lengths=[1, 0, 3]) + + + >>> tf.RaggedTensor.from_tensor(dt, padding=0) + + + >>> dt = tf.constant([[[5, 0], [7, 0], [0, 0]], + ... [[0, 0], [3, 0], [0, 0]], + ... [[6, 0], [0, 0], [0, 0]]]) + >>> tf.RaggedTensor.from_tensor(dt, lengths=([2, 0, 3], [1, 1, 2, 0, 1])) + + + Args: + tensor: The `Tensor` to convert. Must have rank `ragged_rank + 1` or + higher. + lengths: An optional set of row lengths, specified using a 1-D integer + `Tensor` whose length is equal to `tensor.shape[0]` (the number of rows + in `tensor`). If specified, then `output[row]` will contain + `tensor[row][:lengths[row]]`. Negative lengths are treated as zero. You + may optionally pass a list or tuple of lengths to this argument, which + will be used as nested row lengths to construct a ragged tensor with + multiple ragged dimensions. + padding: An optional padding value. If specified, then any row suffix + consisting entirely of `padding` will be excluded from the returned + RaggedTensor. `padding` is a `Tensor` with the same dtype as `tensor` + and with `shape=tensor.shape[ragged_rank + 1:]`. + ragged_rank: Integer specifying the ragged rank for the returned + `RaggedTensor`. Must be greater than zero. + name: A name prefix for the returned tensors (optional). + row_splits_dtype: `dtype` for the returned `RaggedTensor`'s `row_splits` + tensor. One of `tf.int32` or `tf.int64`. + + Returns: + A `RaggedTensor` with the specified `ragged_rank`. The shape of the + returned ragged tensor is compatible with the shape of `tensor`. + + Raises: + ValueError: If both `lengths` and `padding` are specified. + ValueError: If the rank of `tensor` is 0 or 1. + """ + row_splits_dtype = dtypes.as_dtype(row_splits_dtype) + if lengths is not None and padding is not None: + raise ValueError("Specify argument `lengths` or `padding`, but not both.") + if not isinstance(ragged_rank, int): + raise TypeError(f"Argument `ragged_rank` must be an int. " + f"Received {ragged_rank}.") + if ragged_rank <= 0: + raise ValueError(f"Argument `ragged_rank` must be greater than 0. " + f"Received {ragged_rank}.") + + with ops.name_scope(name, "RaggedFromTensor", [tensor, lengths, padding]): + tensor = ops.convert_to_tensor(tensor, name="tensor") + if tensor.shape.rank is not None and tensor.shape.rank < 2: + raise ValueError(f"The rank of a RaggedTensor must be greater than 1, " + f"i.e., a list of scalars won't have ragged " + f"dimensions. Received argument `tensor` with rank " + f"{tensor.shape.rank}.") + tensor.shape.with_rank_at_least(ragged_rank + 1) + input_shape = array_ops.shape(tensor, out_type=row_splits_dtype) + ncols = input_shape[1] + + # Handle nested row lengths. + if (lengths is not None and isinstance(lengths, (list, tuple)) and + len(lengths) and not isinstance(lengths[0], (int, float))): + if ragged_rank not in (1, len(lengths)): + # Note: we accept `ragged_rank=1` here because it's the default value; + # i.e., if the user passes in a tuple of lengths, but doesn't specify + # ragged_rank, then we should use that tuple to determine ragged_rank. + # We only want to complain if they pass in an explicit ragged_rank + # that doesn't match len(lengths). + raise ValueError(f"If Argument `lengths` is a tuple of row_lengths, " + f"argument `ragged_rank` must be " + f"len(lengths): {len(lengths)}. Received " + f"ragged_rank: {ragged_rank}.") + # Rather than reconstructing the tensor mask directly, we can + # recreate it as a boolean RaggedTensor, then densify that and use + # that as the mask to clear out the unused data in the passed tensor. + tensor.shape.with_rank_at_least(len(lengths) + 1) + num_tokens = math_ops.reduce_sum(lengths[-1]) + ones_mask = array_ops.ones([num_tokens], dtype=dtypes.bool) + ragged_mask = cls.from_nested_row_lengths( + ones_mask, lengths, validate=False) + dense_ragged_mask = ragged_mask.to_tensor(default_value=False) + masked_data = array_ops.boolean_mask(tensor, dense_ragged_mask) + return cls.from_nested_row_lengths(masked_data, lengths, validate=False) + + # Handle ragged_rank>1 via recursion: + # If the output should have multiple ragged dimensions, then first + # flatten the tensor to eliminate all but the last ragged dimension, + # and recursively convert that flattened tensor. Then add on the splits + # for the dimensions that we flattened out. + if ragged_rank > 1: + if tensor.shape.is_fully_defined(): + input_shape = tensor.shape.as_list() + # The total number of elements in each dimension. E.g., if + # input_shape=[3, 4, 5, 6], then dim[2] has 3*4*5 elements in total. + dim_size = np.cumprod(input_shape) + new_shape = [dim_size[ragged_rank - 1]] + input_shape[ragged_rank:] + else: + dim_size = math_ops.cumprod(input_shape) + new_shape = array_ops.concat( + [[dim_size[ragged_rank - 1]], input_shape[ragged_rank:]], axis=0) + flattened = array_ops.reshape(tensor, new_shape) + result = cls.from_tensor( + flattened, lengths, padding, row_splits_dtype=row_splits_dtype) + + for axis in range(ragged_rank - 1, 0, -1): + dim_len = tensor_shape.dimension_at_index(tensor.shape, axis).value + if dim_len is None: + dim_len = input_shape[axis] + else: + dim_len = constant_op.constant(dim_len, row_splits_dtype) + result = RaggedTensor.from_uniform_row_length( + values=result, + uniform_row_length=dim_len, + nrows=dim_size[axis - 1], + validate=False) + return result + + # If padding was specified, then use it to find row lengths. + if padding is not None: + padding = ops.convert_to_tensor( + padding, name="padding", dtype=tensor.dtype) + padding.shape.assert_is_compatible_with(tensor.shape[2:]) + + # Find places where the padding is equal to the tensor. (This will + # broadcast `padding` across the outermost 2 dimensions of `tensor`, + # so `has_default_value.shape = tensor.shape`.) + has_default_value = math_ops.equal(padding, tensor) + + # If the padding isn't a scalar, then require that all values in the + # padding match each item in the tensor. After this block of code, + # `has_default.shape = tensor.shape[:2]`. (Unfortunately, we can't just + # use reduce_all for both cases, becaue when you pass an empty `axis` + # list to reduce_all, it reduces all axes; but we want it to reduce no + # axes -- i.e., to be a no-op.) + tensor_rank = array_ops.rank(tensor) + reduce_axis = math_ops.range(2, tensor_rank) + has_default = cond.cond( + tensor_rank > 2, + lambda: math_ops.reduce_all(has_default_value, axis=reduce_axis), + lambda: has_default_value) + has_default.set_shape(tensor_shape.TensorShape([None, None])) + has_default.set_shape(tensor.shape[:2]) + + # Use has_default to find the length of each row: for each + # non-default item in a row, calculate the length that the row needs to + # have to include that item; and then take the max of those values + # (across each row). + has_nondefault = math_ops.logical_not(has_default) + has_nondefault = math_ops.cast(has_nondefault, row_splits_dtype) + length_for_nondefault_value = ( + has_nondefault * + array_ops.expand_dims(math_ops.range(1, ncols + 1), 0)) + lengths = math_ops.reduce_max(length_for_nondefault_value, axis=1) + + if lengths is not None: + # If we have lengths (either directly supplied, or computed from + # paddings), then use those to construct splits; and then use masking + # to get the corresponding values. + lengths = ragged_util.convert_to_int_tensor(lengths, "lengths", + row_splits_dtype) + lengths.shape.assert_has_rank(1) + lengths = math_ops.minimum(lengths, ncols) + lengths = math_ops.maximum(lengths, 0) + limits = math_ops.cumsum(lengths) + splits = array_ops.concat( + [array_ops.zeros([1], row_splits_dtype), limits], axis=0) + mask = array_ops.sequence_mask(lengths, maxlen=ncols) + values = array_ops.boolean_mask(tensor, mask) + return cls.from_row_splits(values, splits, validate=False) + + # If neither padding nor lengths were specified, then create a splits + # vector that contains no default values, and reshape the input tensor + # to form the values for the RaggedTensor. + values_shape = array_ops.concat( + [[input_shape[0] * input_shape[1]], input_shape[2:]], axis=0) + values = array_ops.reshape(tensor, values_shape) + const_nrows = tensor_shape.dimension_at_index(tensor.shape, 0).value + const_ncols = tensor_shape.dimension_at_index(tensor.shape, 1).value + if const_nrows is not None: + nrows = constant_op.constant(const_nrows, row_splits_dtype) + else: + nrows = input_shape[0] + if const_ncols is not None: + ncols = constant_op.constant(const_ncols, row_splits_dtype) + else: + ncols = input_shape[1] + return RaggedTensor.from_uniform_row_length( + values=values, uniform_row_length=ncols, nrows=nrows, validate=False) + + def to_tensor(self, default_value=None, name=None, shape=None): + """Converts this `RaggedTensor` into a `tf.Tensor`. + + If `shape` is specified, then the result is padded and/or truncated to + the specified shape. + + Examples: + + >>> rt = tf.ragged.constant([[9, 8, 7], [], [6, 5], [4]]) + >>> print(rt.to_tensor()) + tf.Tensor( + [[9 8 7] [0 0 0] [6 5 0] [4 0 0]], shape=(4, 3), dtype=int32) + >>> print(rt.to_tensor(shape=[5, 2])) + tf.Tensor( + [[9 8] [0 0] [6 5] [4 0] [0 0]], shape=(5, 2), dtype=int32) + + Args: + default_value: Value to set for indices not specified in `self`. Defaults + to zero. `default_value` must be broadcastable to + `self.shape[self.ragged_rank + 1:]`. + name: A name prefix for the returned tensors (optional). + shape: The shape of the resulting dense tensor. In particular, + `result.shape[i]` is `shape[i]` (if `shape[i]` is not None), or + `self.bounding_shape(i)` (otherwise).`shape.rank` must be `None` or + equal to `self.rank`. + + Returns: + A `Tensor` with shape `ragged.bounding_shape(self)` and the + values specified by the non-empty values in `self`. Empty values are + assigned `default_value`. + """ + with ops.name_scope(name, "RaggedToTensor", [self, default_value, shape]): + if default_value is not None: + default_value = ops.convert_to_tensor( + default_value, name="default_value", dtype=self.dtype) + type_tensor_pairs = _get_row_partition_type_tensor_pairs(self) + row_partition_types = [x[0] for x in type_tensor_pairs] + row_partition_tensors = [x[1] for x in type_tensor_pairs] + if default_value is None: + default_value = array_ops.zeros((), self.dtype) + + if (isinstance(shape, (list, tuple)) and + any(isinstance(v, tensor_lib.Tensor) for v in shape) and + all(isinstance(v, (int, tensor_lib.Tensor)) for v in shape)): + shape = array_ops_stack.stack(shape) + + shape_tensor = _shape_as_tensor(shape, row_partition_tensors[0].dtype) + tensor = gen_ragged_conversion_ops.ragged_tensor_to_tensor( + shape=shape_tensor, + values=self.flat_values, + default_value=default_value, + row_partition_types=row_partition_types, + row_partition_tensors=row_partition_tensors, + ) + + ragged_shape = self.shape + + if ragged_shape.rank is not None and not isinstance( + shape, tensor_lib.Tensor + ): + # Merged self.shape and shape, favoring the second one as it takes + # into account potential padding added to the output. + shape = tensor_shape.as_shape(shape) + if shape.rank is None: + output_shape = ragged_shape + else: + # At this point we can assume that hshape.rank == ragged_shape.rank + # because otherwise it would have failed earlier. + output_shape = [ + s1 if s1 is not None else s2 + for (s1, s2) in zip(shape.as_list(), ragged_shape.as_list()) + ] + tensor.set_shape(output_shape) + + return tensor + + @classmethod + @dispatch.add_dispatch_support + def from_sparse(cls, st_input, name=None, row_splits_dtype=dtypes.int64): + """Converts a 2D `tf.sparse.SparseTensor` to a `RaggedTensor`. + + Each row of the `output` `RaggedTensor` will contain the explicit values + from the same row in `st_input`. `st_input` must be ragged-right. If not + it is not ragged-right, then an error will be generated. + + Example: + + >>> indices = [[0, 0], [0, 1], [0, 2], [1, 0], [3, 0]] + >>> st = tf.sparse.SparseTensor(indices=indices, + ... values=[1, 2, 3, 4, 5], + ... dense_shape=[4, 3]) + >>> tf.RaggedTensor.from_sparse(st).to_list() + [[1, 2, 3], [4], [], [5]] + + Currently, only two-dimensional `SparseTensors` are supported. + + Args: + st_input: The sparse tensor to convert. Must have rank 2. + name: A name prefix for the returned tensors (optional). + row_splits_dtype: `dtype` for the returned `RaggedTensor`'s `row_splits` + tensor. One of `tf.int32` or `tf.int64`. + + Returns: + A `RaggedTensor` with the same values as `st_input`. + `output.ragged_rank = rank(st_input) - 1`. + `output.shape = [st_input.dense_shape[0], None]`. + Raises: + ValueError: If the number of dimensions in `st_input` is not known + statically, or is not two. + """ + row_splits_dtype = dtypes.as_dtype(row_splits_dtype) + if not sparse_tensor.is_sparse(st_input): + raise TypeError(f"Argument `st_input` must be of type SparseTensor, but " + f"is of type {type(st_input).__name__}.") + with ops.name_scope(name, "RaggedFromSparse", [st_input]): + st_input = sparse_tensor.convert_to_tensor_or_sparse_tensor( + st_input, name="st_input") + + if st_input.dense_shape.shape.ndims is None: + static_rank_from_dense_shape = None + else: + static_rank_from_dense_shape = st_input.dense_shape.shape.dims[0].value + + if st_input.indices.shape.ndims is None: + static_rank_from_indices = None + else: + static_rank_from_indices = st_input.indices.shape.dims[1].value + + if static_rank_from_dense_shape != 2 and static_rank_from_indices != 2: + raise ValueError("rank(st_input) must be 2.") + + with ops.control_dependencies( + _assert_sparse_indices_are_ragged_right(st_input.indices)): + # Treat sparse row indices as segment ids to generate a splits tensor + # thta we can pair with the sparse tensor values. (Ignore sparse column + # indices.) + segment_ids = math_ops.cast(st_input.indices[:, 0], row_splits_dtype) + num_segments = math_ops.cast(st_input.dense_shape[0], row_splits_dtype) + return cls.from_value_rowids( + st_input.values, segment_ids, num_segments, validate=False) + + def to_sparse(self, name=None): + """Converts this `RaggedTensor` into a `tf.sparse.SparseTensor`. + + Example: + + >>> rt = tf.ragged.constant([[1, 2, 3], [4], [], [5, 6]]) + >>> print(rt.to_sparse()) + SparseTensor(indices=tf.Tensor( + [[0 0] [0 1] [0 2] [1 0] [3 0] [3 1]], + shape=(6, 2), dtype=int64), + values=tf.Tensor([1 2 3 4 5 6], shape=(6,), dtype=int32), + dense_shape=tf.Tensor([4 3], shape=(2,), dtype=int64)) + + Args: + name: A name prefix for the returned tensors (optional). + + Returns: + A SparseTensor with the same values as `self`. + """ + with ops.name_scope(name, "RaggedToSparse", [self]): + result = gen_ragged_conversion_ops.ragged_tensor_to_sparse( + self.nested_row_splits, self.flat_values, name=name) + return sparse_tensor.SparseTensor(result.sparse_indices, + result.sparse_values, + result.sparse_dense_shape) + + @classmethod + def _from_variant(cls, + variant, + dtype, + output_ragged_rank, + input_ragged_rank=None, + row_splits_dtype=dtypes.int64, + name=None): + """Converts a `variant` Tensor into a `RaggedTensor`. + + The input `variant` could be a scalar, meaning it encodes a single + `RaggedTensor` with ragged_rank `output_ragged_rank`. Alternatively it could + have an arbitrary rank, in which case each element is decoded into a + `RaggedTensor` with ragged_rank `input_ragged_rank` and these are then + stacked according to the input shape to output a single `RaggedTensor` + with ragged_rank `output_ragged_rank`. If `input_ragged_rank` is not + provided, it is inferred dynamically as `output_ragged_rank` - + `rank(variant)`. If `input_ragged_rank` is provided, the following must be + true: `output_ragged_rank` = `input_ragged_rank` + `rank(variant)`. + + Example: + + >>> rt = tf.ragged.constant([[0], [1, 2]]) + >>> et = rt._to_variant() + >>> stacked_et = tf.stack([et, et]) + >>> tf.RaggedTensor._from_variant( # scalar input. + ... et, dtype=tf.int32, output_ragged_rank=1).to_list() + [[0], [1, 2]] + >>> tf.RaggedTensor._from_variant( # batched input. + ... stacked_et, dtype=tf.int32, output_ragged_rank=2).to_list() + [[[0], [1, 2]], [[0], [1, 2]]] + + Args: + variant: A `variant` Tensor representing an encoded (possibly + nested-batched) `RaggedTensor`. + dtype: The dtype of the encoded `RaggedTensor`. + output_ragged_rank: The expected ragged rank of the output `RaggedTensor`. + input_ragged_rank: The ragged rank of each encoded `RaggedTensor`. This is + optional and inferred dynamically if not provided. + row_splits_dtype: `dtype` for the RaggedTensor's `row_splits` tensor. One + of `tf.int32` or `tf.int64`. + name: A name prefix for the returned tensors (optional). + + Returns: + A `RaggedTensor` of dtype `dtype` and ragged rank `output_ragged_rank`. + + Raises: + ValueError: If the input rank is known, `input_ragged_rank` is provided + and `output_ragged_rank` = `input_ragged_rank` + `rank(variant)` does + not hold. + """ + variant = ops.convert_to_tensor( + variant, name="variant", dtype=dtypes.variant) + if (variant.shape.ndims is not None and input_ragged_rank is not None and + output_ragged_rank != input_ragged_rank + variant.shape.ndims): + raise ValueError( + f"Argument `output_ragged_rank` ({output_ragged_rank}) must be equal " + f"to `input_ragged_rank` + `variant.shape.ndims` " + f"({input_ragged_rank} + {variant.shape.ndims}).") + input_ragged_rank = -1 if input_ragged_rank is None else input_ragged_rank + with ops.name_scope( + name, "RaggedFromVariant", + [variant, dtype, input_ragged_rank, output_ragged_rank]): + result = gen_ragged_conversion_ops.ragged_tensor_from_variant( + variant, input_ragged_rank, max(output_ragged_rank, 0), dtype, + row_splits_dtype, name) + return cls.from_nested_row_splits( + result.output_dense_values, + result.output_nested_splits, + validate=False) + + def _to_variant(self, batched_input=False, name=None): + """Converts this `RaggedTensor` into a `variant` Tensor. + + If `batched_input` is `True`, then the `RaggedTensor` is unbatched along the + zero-th dimension, each component `RaggedTensor` is encoded into a scalar + `variant` Tensor, and these are stacked to return a 1-D `variant` Tensor. + If `batched_input` is `False`, then the `RaggedTensor` is encoded as is and + a scalar `variant` Tensor is returned. + + Example: + >>> rt = tf.ragged.constant([[[0]], [[1]], [[2]]]) + >>> rt._to_variant().shape.as_list() + [] + >>> rt._to_variant(batched_input=True).shape.as_list() + [3] + + Args: + batched_input: If `True`, the `RaggedTensor` is unbatched and converted to + a `variant` vector. Set to `False` by default. + name: A name prefix for the returned tensors (optional). + + Returns: + A `variant` Tensor that encodes this `RaggedTensor`. + """ + with ops.name_scope(name, "RaggedToVariant", [self, batched_input]): + return gen_ragged_conversion_ops.ragged_tensor_to_variant( + self.nested_row_splits, self.flat_values, batched_input, name) + + #============================================================================= + # String Encoding + #============================================================================= + def __repr__(self): + if self._is_eager(): + # The np.array2string in _formatter provides a separator argument, but + # doesn't handle recursive calls correctly. The np.printoptions handles + # recursive calls correctly, but doesn't provide a separator argument. + # Combines them together to print elements separated by comma, while + # avoiding the redundant array prefixes and dtypes. For example, + # the value of tf.ragged.constant([[1, 2], [3, 4]]) will look like + # + # [[1, 2], + # [3, 4]] + with np.printoptions(formatter={"all": _formatter}): + value_text = _formatter(self.numpy()) + return f"" + else: + return "tf.RaggedTensor(values=%s, row_splits=%s)" % (self.values, + self.row_splits) + + #============================================================================= + # Eager Execution Mode + #============================================================================= + + def numpy(self): + """Returns a numpy `array` with the values for this `RaggedTensor`. + + Requires that this `RaggedTensor` was constructed in eager execution mode. + + Ragged dimensions are encoded using numpy `arrays` with `dtype=object` and + `rank=1`, where each element is a single row. + + #### Examples + + In the following example, the value returned by `RaggedTensor.numpy()` + contains three numpy `array` objects: one for each row (with `rank=1` and + `dtype=int64`), and one to combine them (with `rank=1` and `dtype=object`): + + >>> tf.ragged.constant([[1, 2, 3], [4, 5]], dtype=tf.int64).numpy() + array([array([1, 2, 3]), array([4, 5])], dtype=object) + + Uniform dimensions are encoded using multidimensional numpy `array`s. In + the following example, the value returned by `RaggedTensor.numpy()` contains + a single numpy `array` object, with `rank=2` and `dtype=int64`: + + >>> tf.ragged.constant([[1, 2, 3], [4, 5, 6]], dtype=tf.int64).numpy() + array([[1, 2, 3], [4, 5, 6]]) + + Returns: + A numpy `array`. + """ + if not self._is_eager(): + raise ValueError("RaggedTensor.numpy() is only supported in eager mode.") + values = self.values.numpy() + splits = self.row_splits.numpy() + rows = [values[splits[i]:splits[i + 1]] for i in range(len(splits) - 1)] + if not rows: + return np.zeros((0, 0) + values.shape[1:], dtype=values.dtype) + # Note: if `rows` have ragged lengths, then they will be stored in a + # np.ndarray with dtype=object and rank=1. If they have uniform lengths, + # they will be combined into a single np.ndarray with dtype=row.dtype and + # rank=row.rank+1. + # + # Manually set dtype as numpy now complains when given ragged rows. + has_variable_length_rows = any(len(row) != len(rows[0]) for row in rows) + dtype = np.object_ if has_variable_length_rows else None + return np.array(rows, dtype=dtype) + + def to_list(self): + """Returns a nested Python `list` with the values for this `RaggedTensor`. + + Requires that `rt` was constructed in eager execution mode. + + Returns: + A nested Python `list`. + """ + if not isinstance(self.row_splits, ops.EagerTensor): + raise ValueError("to_list can only be used in eager mode.") + row_splits = self.row_splits.numpy().tolist() + values = self.values + + if isinstance(values, RaggedTensor): + return [ + values[row_splits[i]:row_splits[i + 1]].to_list() + for i in range(len(row_splits) - 1) + ] + else: + # Convert values to a Python list. + if hasattr(values, "numpy"): + values_as_list = values.numpy().tolist() + elif hasattr(values, "to_list"): + values_as_list = values.to_list() + else: + raise ValueError("values must be convertible to a list") + + return [ + values_as_list[row_splits[i]:row_splits[i + 1]] + for i in range(len(row_splits) - 1) + ] + + def _eager_value(self): + """Returns a RaggedTensorValue for self. Requires self._is_eager()=true.""" + value = self.flat_values.numpy() + for row_splits in reversed(self.nested_row_splits): + value = ragged_tensor_value.RaggedTensorValue(value, row_splits.numpy()) + return value + + def _is_eager(self): + """Returns True if values & row_splits Tensors are all `EagerTensor`s.""" + rt = self + while isinstance(rt, RaggedTensor): + if not isinstance(rt.row_splits, ops.EagerTensor): + return False + rt = rt.values + return isinstance(rt, ops.EagerTensor) + + #============================================================================= + # Operators + #============================================================================= + # To avoid circular dependencies, we define stub methods for operators here, + # and then override them when the ragged_operators module is imported. + + def _overloaded_operator(name): # pylint: disable=no-self-argument + + def stub(*args, **kwargs): + del args, kwargs + raise ValueError( + f"You must import 'tensorflow.python.ops.ragged.ragged_ops' " + f"before using RaggedTensor.{name}.") + + return stub + + __getitem__ = _overloaded_operator("__getitem__") + __ge__ = _overloaded_operator("__ge__") + __gt__ = _overloaded_operator("__gt__") + __le__ = _overloaded_operator("__le__") + __lt__ = _overloaded_operator("__lt__") + __and__ = _overloaded_operator("__and__") + __rand__ = _overloaded_operator("__rand__") + __invert__ = _overloaded_operator("__invert__") + __ror__ = _overloaded_operator("__ror__") + __or__ = _overloaded_operator("__or__") + __xor__ = _overloaded_operator("__xor__") + __rxor__ = _overloaded_operator("__rxor__") + __abs__ = _overloaded_operator("__abs__") + __add__ = _overloaded_operator("__add__") + __radd__ = _overloaded_operator("__radd__") + __div__ = _overloaded_operator("__div__") + __rdiv__ = _overloaded_operator("__rdiv__") + __floordiv__ = _overloaded_operator("__floordiv__") + __rfloordiv__ = _overloaded_operator("__rfloordiv__") + __mod__ = _overloaded_operator("__mod__") + __rmod__ = _overloaded_operator("__rmod__") + __mul__ = _overloaded_operator("__mul__") + __rmul__ = _overloaded_operator("__rmul__") + __neg__ = _overloaded_operator("__neg__") + __pow__ = _overloaded_operator("__pow__") + __rpow__ = _overloaded_operator("__rpow__") + __sub__ = _overloaded_operator("__sub__") + __rsub__ = _overloaded_operator("__rsub__") + __truediv__ = _overloaded_operator("__truediv__") + __rtruediv__ = _overloaded_operator("__rtruediv__") + del _overloaded_operator + + #============================================================================= + # Name Scope + #============================================================================= + + # This private function is used by ops.name_scope to ensure that all of the + # input tensors for the scope belong to the same graph. Defining this means + # that you may include `RaggedTensor` objects in the name_scope `values` + # list. + def _as_graph_element(self): + """Convert `self` to a graph element.""" + values = self.values + while isinstance(values, RaggedTensor): + values = values.values + return values + + #============================================================================= + # Composite Tensor + #============================================================================= + + @property + def _type_spec(self): + return RaggedTensorSpec.from_value(self) + + def _shape_invariant_to_type_spec(self, shape): + return RaggedTensorSpec(shape, self.dtype, self.ragged_rank, + self.row_splits.dtype) + + def consumers(self): + return self._consumers() + + __composite_gradient__ = ( + composite_tensor_gradient.WithValuesCompositeTensorGradient()) + + +def is_ragged(value): + """Returns true if `value` is a ragged tensor or ragged tensor value.""" + return isinstance(value, + (RaggedTensor, ragged_tensor_value.RaggedTensorValue)) + + +def match_row_splits_dtypes(*tensors, **kwargs): + """Return a copy of `tensors` with row_splits all having the same dtype. + + Args: + *tensors: A list of Tensors or RaggedTensors. + **kwargs: If 'return_dtype=True', then return a tuple (dtype, tensors), + where `dtype` is the data type used by row-splits, and `tensors` is the + converted list of `Tensors` and `RaggedTensors`. + + Returns: + The converted list of `Tensors` and `RaggedTensors`. + """ + return_dtype = kwargs.pop("return_dtype", False) + if kwargs: + raise ValueError(f"Unexpected keyword args {kwargs}.") + + has_int32 = False + has_int64 = False + for tensor in tensors: + if isinstance(tensor, RaggedTensor): + if tensor.row_splits.dtype == dtypes.int32: + has_int32 = True + else: + has_int64 = True + + if has_int32 and has_int64: + if not ragged_config.auto_cast_partition_dtype(): + raise ValueError("Input RaggedTensors have mismatched row_splits dtypes; " + "use RaggedTensor.with_row_splits_dtype() to convert " + "them to compatible dtypes.") + dtype = dtypes.int64 + tensors = tuple( + t.with_row_splits_dtype(dtypes.int64) if isinstance(t, RaggedTensor + ) else t + for t in tensors) + + elif has_int32: + dtype = dtypes.int32 + else: + dtype = dtypes.int64 + + if return_dtype: + return (dtype, tensors) + else: + return tensors + + +# =============================================================================== +# RaggedTensorSpec +# =============================================================================== +@tf_export("RaggedTensorSpec") +@type_spec_registry.register("tf.RaggedTensorSpec") +class RaggedTensorSpec( + type_spec.BatchableTypeSpec, internal_types.RaggedTensorSpec): + """Type specification for a `tf.RaggedTensor`.""" + + __slots__ = [ + "_shape", "_dtype", "_ragged_rank", "_row_splits_dtype", + "_flat_values_spec" + ] + + @property + def dtype(self): + """The `tf.dtypes.DType` specified by this type for the RaggedTensor. + + Examples: + + >>> rt = tf.ragged.constant([["a"], ["b", "c"]], dtype=tf.string) + >>> tf.type_spec_from_value(rt).dtype + tf.string + + Returns: + A `tf.dtypes.DType` of the values in the RaggedTensor. + """ + return self._dtype + + @property + def shape(self): + """The statically known shape of the RaggedTensor. + + Examples: + + >>> rt = tf.ragged.constant([[0], [1, 2]]) + >>> tf.type_spec_from_value(rt).shape + TensorShape([2, None]) + + >>> rt = tf.ragged.constant([[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1) + >>> tf.type_spec_from_value(rt).shape + TensorShape([2, None, 2]) + + Returns: + A `tf.TensorShape` containing the statically known shape of the + RaggedTensor. Ragged dimensions have a size of `None`. + """ + return self._shape + + @property + def ragged_rank(self): + """The number of times the RaggedTensor's flat_values is partitioned. + + Defaults to `shape.ndims - 1`. + + Examples: + + >>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]]) + >>> tf.type_spec_from_value(values).ragged_rank + 1 + + >>> rt1 = tf.RaggedTensor.from_uniform_row_length(values, 2) + >>> tf.type_spec_from_value(rt1).ragged_rank + 2 + + Returns: + A Python `int` indicating the number of times the underlying `flat_values` + Tensor has been partitioned to add a new dimension. + I.e., `tf.rank(rt) = tf.rank(rt.flat_values) + rt.ragged_rank`. + """ + return self._ragged_rank + + @property + def row_splits_dtype(self): + """The `tf.dtypes.DType` of the RaggedTensor's `row_splits`. + + Examples: + + >>> rt = tf.ragged.constant([[1, 2, 3], [4]], row_splits_dtype=tf.int64) + >>> tf.type_spec_from_value(rt).row_splits_dtype + tf.int64 + + Returns: + A `tf.dtypes.DType` for the RaggedTensor's `row_splits` tensor. One + of `tf.int32` or `tf.int64`. + """ + return self._row_splits_dtype + + @property + def flat_values_spec(self): + """The `TypeSpec` of the flat_values of RaggedTensor. + + Returns: + - The TypeSpec of flat_values. + - None when the flat_values is a Tensor. + """ + return self._flat_values_spec + + @property + def value_type(self): + return RaggedTensor if self._ragged_rank > 0 else tensor_lib.Tensor + + def __init__(self, + shape=None, + dtype=dtypes.float32, + ragged_rank=None, + row_splits_dtype=dtypes.int64, + flat_values_spec=None): + """Constructs a type specification for a `tf.RaggedTensor`. + + Args: + shape: The shape of the RaggedTensor, or `None` to allow any shape. If a + shape is specified, then all ragged dimensions must have size `None`. + dtype: `tf.DType` of values in the RaggedTensor. + ragged_rank: Python integer, the number of times the RaggedTensor's + flat_values is partitioned. Defaults to `shape.ndims - 1`. + row_splits_dtype: `dtype` for the RaggedTensor's `row_splits` tensor. One + of `tf.int32` or `tf.int64`. + flat_values_spec: TypeSpec for flat_value of the RaggedTensor. It shall be + provided when the flat_values is a CompositeTensor rather then Tensor. + If both `dtype` and `flat_values_spec` and are provided, `dtype` must + be the same as `flat_values_spec.dtype`. (experimental) + """ + self._shape = tensor_shape.as_shape(shape) + self._row_splits_dtype = dtypes.as_dtype(row_splits_dtype) + if flat_values_spec is not None: + if dtype is None: + dtype = flat_values_spec.dtype + elif dtype != flat_values_spec.dtype: + raise ValueError("dtype must be the same as flat_values_spec.dtype") + elif dtype is None: + raise ValueError( + "At least one of dtype or flat_values_spec must be provided") + self._dtype = dtypes.as_dtype(dtype) + self._flat_values_spec = flat_values_spec + + rank = self._shape.ndims + if ragged_rank is None: + if rank is None: + raise ValueError("Must specify ragged_rank or " + "a shape with a known rank.") + ragged_rank = rank - 1 + self._ragged_rank = ragged_rank + if not isinstance(self._ragged_rank, int): + raise TypeError(f"Argument `ragged_rank` must be an int. " + f"Received {ragged_rank}.") + + if rank is not None: + if ragged_rank >= rank: + raise ValueError(f"Argument `ragged_rank` ({ragged_rank}) must be less " + f"than rank ({rank}).") + + def is_compatible_with(self, spec_or_value): + # RaggedTensor with ragged_rank 0 can be compatible with raw flat_values. + if self._ragged_rank == 0: + if self._flat_values_spec is None: + if isinstance( + spec_or_value, (tensor_lib.Tensor, tensor_lib.TensorSpec)): + return tensor_lib.TensorSpec( + self._shape, self._dtype).is_compatible_with(spec_or_value) + elif not isinstance(spec_or_value, (RaggedTensor, RaggedTensorSpec)): + return self._flat_values_spec.is_compatible_with(spec_or_value) + return super(RaggedTensorSpec, self).is_compatible_with(spec_or_value) + + def _serialize(self): + if self._flat_values_spec is None: + return (self._shape, self._dtype, self._ragged_rank, + self._row_splits_dtype) + else: + return (self._shape, self._dtype, self._ragged_rank, + self._row_splits_dtype, self._flat_values_spec) + + @property + def _component_specs(self): + if self._ragged_rank <= 0: + if self._flat_values_spec is not None: + return [self._flat_values_spec] + else: + return [tensor_lib.TensorSpec(self._shape, self._dtype)] + + flat_values_spec = self._flat_values_spec + if flat_values_spec is None: + flat_values_shape = tensor_shape.TensorShape([None]).concatenate( + self._shape[self._ragged_rank + 1:]) + flat_values_spec = tensor_lib.TensorSpec(flat_values_shape, self._dtype) + outer_dim = tensor_shape.dimension_at_index(self._shape, 0) + outer_splits_shape = [None if outer_dim is None else outer_dim + 1] + inner_splits_spec = tensor_lib.TensorSpec([None], self._row_splits_dtype) + + specs = ([ + flat_values_spec, + tensor_lib.TensorSpec(outer_splits_shape, self._row_splits_dtype) + ] + [inner_splits_spec for _ in range(self._ragged_rank - 1)]) + return specs + + def _to_components(self, value): + if is_ragged(value): + return [value.flat_values] + list(value.nested_row_splits) + else: + return [value] + + def _from_components(self, tensor_list): + result = tensor_list[0] + if (all(isinstance(t, np.ndarray) for t in tensor_list) and + not tf2.enabled()): + for row_splits in reversed(tensor_list[1:]): + result = ragged_tensor_value.RaggedTensorValue(result, row_splits) + else: + if isinstance(tensor_list[0], np.ndarray): + tensor_list = [ops.convert_to_tensor(t) for t in tensor_list] + result = tensor_list[0] + for row_splits in reversed(tensor_list[1:]): + result = RaggedTensor( + result, + RowPartition.from_row_splits(row_splits, validate=False), + internal=True) + if self._shape.ndims is not None: + if isinstance(result, RaggedTensor): + result._set_shape(self._shape) # pylint: disable=protected-access + # TODO(xjun): MaskedTensor doesn't implement set_shape. + if self.flat_values_spec is not None and hasattr(result.flat_values, + "set_shape"): + result.flat_values.set_shape(self.flat_values_spec.shape) + elif isinstance(result, tensor_lib.Tensor): + result.set_shape(self._shape) + return result + + # The RaggedTensorSpec tensor_list encoding uses to/from_variant ops + # to (un)box the component tensors in a way that allows for batching & + # unbatching. + @property + def _flat_tensor_specs(self): + # NOTE(mishragaurav): The default flat shape of a boxed `RaggedTensor` is + # `[]` (scalar), but a `RaggedTensorSpec` can also represent a batch of + # boxed `RaggedTensor` objects with shape `(...)` (and batches of batches, + # etc.), so the flat shape must be unknown. + return [tensor_lib.TensorSpec(None, dtypes.variant)] + + def _to_tensor_list(self, value): + # TODO(edloper): Update gen_ragged_conversion_ops that convert to and + # from variant to include all of the row-partitioning tensors. + if self._flat_values_spec is not None: + raise ValueError("Customized value_type is not supported.") + if isinstance(value, RaggedTensor): + if value.ragged_rank != self._ragged_rank: + raise ValueError( + f"Ragged rank of value {value.ragged_rank} does not match " + f"ragged rank of type {self._ragged_rank}.") + # pylint: disable=protected-access + return [value._to_variant(batched_input=False)] + else: + if self._ragged_rank > 0: + raise ValueError( + f"Expected a RaggedTensor if ragged rank={self._ragged_rank}" + f" but got {type(value).__name__}." + ) + return [ + gen_ragged_conversion_ops.ragged_tensor_to_variant( + (), value, batched_input=False) + ] + + def _to_batched_tensor_list(self, value): + if self._flat_values_spec is not None: + raise ValueError("Customized value_type is not supported.") + if isinstance(value, RaggedTensor): + if value.ragged_rank != self._ragged_rank: + raise ValueError( + f"Ragged rank of value {value.ragged_rank} does not match " + f"ragged rank of type {self._ragged_rank}.") + # pylint: disable=protected-access + return [value._to_variant(batched_input=True)] + else: + if self._ragged_rank > 0: + raise ValueError( + f"Expected a RaggedTensor if ragged rank={self._ragged_rank}" + f" but got {type(value).__name__}." + ) + return [ + gen_ragged_conversion_ops.ragged_tensor_to_variant( + rt_nested_splits=(), rt_dense_values=value, batched_input=True) + ] + + def _from_compatible_tensor_list(self, tensor_list): + if self._flat_values_spec is not None: + raise ValueError("Customized value_type is not supported.") + result = RaggedTensor._from_variant( # pylint: disable=protected-access + tensor_list[0], + dtype=self._dtype, + row_splits_dtype=self._row_splits_dtype, + output_ragged_rank=self._ragged_rank) + if self._shape.ndims is not None: + if isinstance(result, RaggedTensor): + result._set_shape(self._shape) # pylint: disable=protected-access + # TODO(xjun): MaskedTensor doesn't implement set_shape. + if self.flat_values_spec is not None and hasattr(self.flat_values, + "set_shape"): + result.flat_values.set_shape(self.flat_values_spec.shape) + else: + result.set_shape(self._shape) + return result + + def _batch(self, batch_size): + if self._flat_values_spec is not None: + raise ValueError("Customized value_type is not supported.") + return RaggedTensorSpec( + tensor_shape.TensorShape([batch_size]).concatenate(self._shape), + self._dtype, self._ragged_rank + 1, self._row_splits_dtype) + + def _unbatch(self): + if self._flat_values_spec is not None: + raise ValueError("Customized value_type is not supported.") + # Note: Negative ragged_rank is allowed here because the dataset could be + # subsequently batched again. If ragged_rank > 1, assume row_splits_dtype is + # consistent. Errors are handled in + # RaggedTensorSpec._from_compatible_tensor_list() + return RaggedTensorSpec(self._shape[1:], self._dtype, self._ragged_rank - 1, + self._row_splits_dtype) + + def _to_legacy_output_types(self): + return self._dtype + + def _to_legacy_output_shapes(self): + return self._shape + + def _to_legacy_output_classes(self): + return self + + @classmethod + def from_value(cls, value): + if (isinstance(value, ragged_tensor_value.RaggedTensorValue) or + isinstance(value.flat_values, tensor_lib.Tensor)): + return cls( + shape=value.shape, + dtype=value.values.dtype, + ragged_rank=value.ragged_rank, + row_splits_dtype=value.row_splits.dtype) + else: + flat_values_spec = type_spec.type_spec_from_value(value.flat_values) + # Relax shape[0] to None, as it is connected to dynamic ragged shapes. + flat_values_spec = flat_values_spec._unbatch()._batch(None) # pylint: disable=protected-access + return cls( + shape=value.shape, + dtype=value.values.dtype, + ragged_rank=value.ragged_rank, + row_splits_dtype=value.row_splits.dtype, + flat_values_spec=flat_values_spec) + + +nested_structure_coder.register_codec( + nested_structure_coder.BuiltInTypeSpecCodec( + RaggedTensorSpec, struct_pb2.TypeSpecProto.RAGGED_TENSOR_SPEC + ) +) + + +type_spec.register_type_spec_from_value_converter( + ragged_tensor_value.RaggedTensorValue, RaggedTensorSpec.from_value) + + +# =============================================================================== +# Convert value -> tensor +# =============================================================================== +def convert_to_tensor_or_ragged_tensor(value, + dtype=None, + preferred_dtype=None, + name=None): + """Converts value to a `RaggedTensor` or `Tensor`. + + * If `value` is a `RaggedTensor`, then return it as-is. + * If `value` is a `RaggedTensorValue`, return a corresponding constant + `RaggedTensor`. + * Otherwise, use `convert_to_tensor` to convert `value` to a `Tensor`. + + Args: + value: A `RaggedTensor`, a `RaggedTensorValue`, or an object whose type has + a registered `Tensor` conversion function. + dtype: Optional element type for the returned tensor. If missing the type + is inferred from the type of `value`. + preferred_dtype: Optional element type for the returned tensor, used when + dtype is None. This argument has no effect if `value` is already a + tensor, or when conversion is not possible. + name: Optional name to use if a new `Tensor` is created. + + Returns: + A `Tensor` or `RaggedTensor`. + """ + if isinstance(value, RaggedTensor): + if dtype and not dtype.is_compatible_with(value.dtype): + raise ValueError(f"Tensor conversion requested dtype {dtype.name} for " + f"RaggedTensor with dtype {value.dtype.name}: {value}.") + return value + elif isinstance(value, ragged_tensor_value.RaggedTensorValue): + with ops.name_scope(name, "ConvertToTensorOrRaggedTensor", []): + flat_values = ops.convert_to_tensor( + value=value.flat_values, + dtype=dtype, + dtype_hint=preferred_dtype, + name="flat_values") + return RaggedTensor.from_nested_row_splits( + flat_values, value.nested_row_splits, validate=False) + else: + return tensor_conversion.convert_to_tensor_v2_with_dispatch( + value=value, dtype=dtype, dtype_hint=preferred_dtype, name=name + ) + + +def _convert_to_ragged_tensor_values(value): + """Converts value to supported RaggedTensor value. + + * If `value` is an object of supported value type, then return it as-is. + * Otherwise convert it to Tensor or RaggedTensor. + + Args: + value: An object of `Tensor`, `RaggedTensor` or registerred RaggedTensor + value types, or an object whose type has a registered `Tensor` conversion + function. + + Returns: + An object of `Tensor`, `RaggedTensor` or registerred RaggedTensor + value types + """ + if _is_supported_ragged_values_type(value): + return value + else: + return convert_to_tensor_or_ragged_tensor(value, name="values") + + +# =============================================================================== +# Register RaggedTensor for use with session.run. +# =============================================================================== +def _ragged_tensor_value_from_components(components): + components = list(components) + value = components.pop() + while components: + value = ragged_tensor_value.RaggedTensorValue(value, components.pop()) + return value + + +def _ragged_tensor_session_fetch(rt): + components = rt.nested_row_splits + (rt.flat_values,) + return (components, _ragged_tensor_value_from_components) + + +def _ragged_tensor_session_feed(feed_key, feed_val): + key_components = feed_key.nested_row_splits + (feed_key.flat_values,) + val_components = feed_val.nested_row_splits + (feed_val.flat_values,) + return zip(key_components, val_components) + + +def _ragged_tensor_session_feed_for_partial_run(feed_key): + return feed_key.nested_row_splits + (feed_key.flat_values,) + + +session.register_session_run_conversion_functions( + RaggedTensor, _ragged_tensor_session_fetch, _ragged_tensor_session_feed, + _ragged_tensor_session_feed_for_partial_run) + + +# =============================================================================== +# RaggedTensorType +# =============================================================================== +class RaggedTensorType: + """Encoding of a static type for a `RaggedTensor`. + + Use this type to express/declare that an output must have the type of + `RaggedTensor`. + """ + + def __init__(self, dtype, ragged_rank, row_splits_dtype=dtypes.int64): + """Initializes a RaggedTensorType object. + + Args: + dtype: data type of the `RaggedTensor`'s inner values. + ragged_rank: ragged_rank of the declared `RaggedTensor`. + row_splits_dtype: data type for the `RaggedTensor`'s row splits. + One of: `tf.int32` or `tf.int64`. + """ + row_splits_dtype = dtypes.as_dtype(row_splits_dtype) + self._dtype = dtype + self._ragged_rank = ragged_rank + self._row_splits_dtype = row_splits_dtype + + dtype = property(lambda self: self._dtype) + ragged_rank = property(lambda self: self._ragged_rank) + row_splits_dtype = property(lambda self: self._row_splits_dtype) + + def __repr__(self): + return "RaggedTensorType(%r, %r, %r)" % (self.dtype, self.ragged_rank, + self.row_splits_dtype) + + +# =============================================================================== +# Helper Functions +# =============================================================================== +def _assert_sparse_indices_are_ragged_right(indices): + """Checks that the given SparseTensor.indices tensor is ragged-right. + + Example: `indices = [[0, 0], [0, 1], [2, 0], [3, 1]]` is not ragged right + because the entry `[3, 1]` skips a cell. + + Args: + indices: The SparseTensor indices to check. + + Returns: + A list of control dependency op tensors. + """ + index_prefix = indices[:, :-1] + index_suffix = indices[:, -1] + + # Check whether each index is starting a new row in the innermost dimension + # (prefix[i] != prefix[i-1]) or continuing a row (prefix[i] == prefix[i-1]). + # (Note: this skips the first index; we will check that separately below.) + index_prefix_changed = math_ops.reduce_any( + math_ops.not_equal(index_prefix[1:], index_prefix[:-1]), axis=1) + + # Check two cases: + # * For indices that start a new row: index_suffix[i] must be zero. + # * For indices that continue a row: index_suffix[i] must be equal to + # index_suffix[i-1]+1. + index_ok = array_ops.where( + index_prefix_changed, math_ops.equal(index_suffix[1:], 0), + math_ops.equal(index_suffix[1:], index_suffix[:-1] + 1)) + + # Also check that the very first index didn't skip any cells. The first + # index starts a new row (by definition), so its suffix should be zero. + sparse_indices_are_ragged_right = math_ops.logical_and( + math_ops.reduce_all(math_ops.equal(index_suffix[:1], 0)), + math_ops.reduce_all(index_ok)) + + message = [ + "SparseTensor is not right-ragged", "SparseTensor.indices =", indices + ] + return [control_flow_assert.Assert(sparse_indices_are_ragged_right, message)] + + +@ops.RegisterGradient("RaggedTensorToSparse") +def _ragged_tensor_to_sparse_gradient(op, unused_sparse_indices_grad, + sparse_values_grad, + unused_sparse_shape_grad): + """Gradient for RaggedTensorToSparse.""" + op_inputs_nested_row_splits = op.inputs[:-1] + op_inputs_flat_values = op.inputs[-1] + + # No gradient for the RaggedTensor's nested_row_splits. + nested_row_splits_gradient = [None] * len(op_inputs_nested_row_splits) + + # Gradient for the RaggedTensor's flat_values is formed by reshaping + # the gradient for the SparseTensor's values. + flat_values_shape = array_ops.shape(op_inputs_flat_values) + flat_values_gradient = array_ops.reshape(sparse_values_grad, + flat_values_shape) + + return nested_row_splits_gradient + [flat_values_gradient] + + +def _assert_monotonic_increasing(tensor, message=None): + return check_ops.assert_non_negative( + tensor[1:] - tensor[:-1], message=message) + + +def _assert_zero(tensor, message=None): + return check_ops.assert_equal( + tensor, constant_op.constant(0, dtype=tensor.dtype), message=message) + + +def _nrows(tensor, out_type=dtypes.int32): + if isinstance(tensor, RaggedTensor): + return tensor.nrows(out_type=out_type) + else: + return array_ops.shape(tensor, out_type=out_type)[0] + + +def merge_dims(value, outer_axis, inner_axis): + """Merges value[outer_axis...inner_axis] into a single dimension. + + See `RaggedTensor.merge_dims()` for more details. This helper differs from + `RaggedTensor.merge_dims()` in that `value` may be a dense or ragged tensor. + + Args: + value: A `RaggedTensor` or `Tensor` + outer_axis: `int` + inner_axis: `int` + + Returns: + A flattened `RaggedTensor` or `Tensor`. + """ + if outer_axis == inner_axis: + return value + + # Flatten outer dimensions of a RaggedTensor by just taking its values. + while outer_axis == 0 and isinstance(value, RaggedTensor): + value = value.values + inner_axis -= 1 + if inner_axis == 0: + return value + + # Flatten non-Ragged tensors using tf.reshape(). + if not isinstance(value, RaggedTensor): + if value.shape.is_fully_defined(): + old_shape = value.shape.as_list() + new_shape = old_shape[:outer_axis] + [-1] + old_shape[inner_axis + 1:] + else: + old_shape = array_ops.shape(value) + new_shape = array_ops.concat( + [old_shape[:outer_axis], [-1], old_shape[inner_axis + 1:]], axis=0) + return array_ops.reshape(value, new_shape) + + # Handle outer_axis>1 via recursion. + if outer_axis > 1: + return value.with_values( + merge_dims(value.values, outer_axis - 1, inner_axis - 1)) + + # At this point, we know outer_axis == 1, and value is a RaggedTensor. + # So we need to flatten the values and build a corresponding splits tensor. + new_values = value.values + new_splits = value.row_splits + for axis in range(outer_axis, inner_axis): + if isinstance(new_values, RaggedTensor): + # Flatten a single ragged dimension. + new_splits = array_ops.gather(new_values.row_splits, new_splits) + new_values = new_values.values + else: + # Flatten all remaining dense dimensions. + shape_split = inner_axis - axis + 1 + if new_values.shape.is_fully_defined(): + old_shape = new_values.shape.as_list() + new_shape = [-1] + old_shape[shape_split:] + flat_size = _prod(old_shape[1:shape_split]) + else: + old_shape = array_ops.shape(new_values) + new_shape = array_ops.concat([[-1], old_shape[shape_split:]], axis=0) + flat_size = math_ops.cast( + math_ops.reduce_prod(old_shape[1:shape_split]), new_splits.dtype) + new_values = array_ops.reshape(new_values, new_shape) + new_splits = new_splits * flat_size + break + return RaggedTensor.from_row_splits(new_values, new_splits) + + +def _prod(lst): + """Returns the product of the numbers in a list.""" + return functools.reduce(operator.mul, lst, 1) + + +def _get_row_partition_type_tensor_pairs_tail(partition): + """Gets a row partition type tensor pair for the tail. + + If value_rowid is defined, then it is used. Otherwise, row_splits + are used. + + Args: + partition: a RowPartition. + + Returns: + A list of (row_partition_type, row_partition_tensor) pairs. + """ + if partition._has_precomputed_value_rowids(): # pylint: disable=protected-access + return ("VALUE_ROWIDS", partition.value_rowids()) + else: + return ("ROW_SPLITS", partition.row_splits()) + + +def _get_row_partition_type_tensor_pairs(rt_input): + """Gets a list of the row partitions for rt_input. + + If value_rowids are defined, then they are used. Otherwise, row_splits + are used. If the outermost level has value_rowids defind, then nrows is + also added. + + Args: + rt_input: a ragged tensor. + + Returns: + A list of (row_partition_type, row_partition_tensor) pairs. + """ + partitions = rt_input._nested_row_partitions # pylint: disable=protected-access + tail = [_get_row_partition_type_tensor_pairs_tail(x) for x in partitions[1:]] + + if partitions[0]._value_rowids is not None: # pylint: disable=protected-access + return [("FIRST_DIM_SIZE", partitions[0].nrows()), + ("VALUE_ROWIDS", partitions[0].value_rowids())] + tail + else: + return [("ROW_SPLITS", partitions[0].row_splits())] + tail + + +def _shape_as_tensor(shape, dtype): + """Takes shape and coerces it to a shape as a tensor. + + If the object is already a tensor, simply passes it on (result is guaranteed + to be int64 or int32, but not necessarily dtype). + If not, creates a tensor of type dtype. + + Result is either a scalar equal to -1 if the shape is unknown_rank. + Otherwise, it is a vector, where unknown dimensions are represented with a + value of -1. + + In C++, see TensorShapeFromTensor for parsing shapes in kernels, and + InferenceContext::MakeShapeFromShapeTensorTreatScalarAsUnknownShape, for + use in the shape inference function. + + Args: + shape: input to coerce from TensorShape, Tensor, None, List[Optional[Int]], + Tuple[Optional[Int]]. + dtype: tf.int64 or tf.int32 + + Returns: + a scalar or vector tensor of dtype tf.int32 or tf.int64. + """ + if dtype != dtypes.int64 and dtype != dtypes.int32: + raise ValueError(f"Expected int64 or int32 for dtype: got {dtype}.") + + if isinstance(shape, tensor_lib.Tensor): + if shape.dtype != dtypes.int64 and shape.dtype != dtypes.int32: + return math_ops.cast(shape, dtype) + return shape + shape = tensor_shape.as_shape(shape) + if not shape: + # Imply rank is unknown using a -1 scalar. + return constant_op.constant(-1, dtype=dtype) + shape = [(-1 if x is None else x) for x in shape.as_list()] + # At this point, shape is List[Int]. + return constant_op.constant(shape, dtype=dtype) + + +def _nvals_uniform_row_length(values, uniform_row_length): + """Get the number of values for uniform row length constructor.""" + const_nvals = tensor_shape.dimension_at_index(values.shape, 0).value + if const_nvals is not None: + nvals = constant_op.constant(const_nvals, uniform_row_length.dtype) + elif isinstance(values, RaggedTensor): + nvals = values.nrows(out_type=uniform_row_length.dtype) + else: + nvals = array_ops.shape(values, out_type=uniform_row_length.dtype)[0] + return nvals + + +def _get_optional_partition_dtype(values): + """Returns the partition dtype, or None if None exists.""" + if isinstance(values, RaggedTensor): + # pylint: disable=protected-access + return values._row_partition.dtype + return None + + +_SUPPORTED_RAGGED_VALUE_TYPES = (tensor_lib.Tensor, RaggedTensor) + + +# TODO(edloper): Consider whether we should change the registry to be on +# TypeSpecs rather than ValueTypes. +def _add_supported_value_type(cls): + """Register the `cls` as supported value type of RaggedTenosr. + + The cls must be a subclass of CompositeTensor, and must support: + - Spec: + The Spec must be a `BatchableTypeSpec` + - Properties: + - x.shape + - x.dtype + - Methods: + - x.__getitem__(idx) (method: returns a supported value type) + - x.set_shape(shape) + - Ops: + - tf.shape(x) -- tf.shape(x)[0] must be a tf.Tensor. + - tf.tile(x) + - assert_rank_at_least(x) + - tf.ones_like(x) + - tf.gather(params=x, indices=Tensor) + - tf.add(x, y) + - tf.boolean_mask(x, ...) + - @TODO(edloper): Complete this list + + Note: the following RaggedTensor, RaggedTensorSpec methods & ops are not + currently supported unless `rt.values` is a RaggedTensor or a tf.Tensor: + - rt.to_tensor() + - rt.to_sparse_tensor() + - rt._to_variant() + - rt._from_variant() + - tf.ragged.cross([rt]) + - tf.gather(params=x, indices=rt) # rt used for indices + - RaggedTensorSpec methods: + - _batch + - _unbatch + - _to_tensor_list + - _to_batched_tensor_list + - _from_compatible_tensor_list + + Args: + cls: The type to be added to supported value types. + """ + if not issubclass(cls, composite_tensor.CompositeTensor): + raise ValueError(f"cls ({cls}) must be a subclass of CompositeTensor.") + if not hasattr(cls, "shape"): + raise ValueError("cls must support the `shape` property.") + if not hasattr(cls, "dtype"): + raise ValueError("cls must support the `dtype` property.") + global _SUPPORTED_RAGGED_VALUE_TYPES + _SUPPORTED_RAGGED_VALUE_TYPES += (cls,) + + +def _is_supported_ragged_values_type(value): + return isinstance(value, _SUPPORTED_RAGGED_VALUE_TYPES) + + +def _assert_is_supported_ragged_values_type(value): + if not _is_supported_ragged_values_type(value): + ok_types = ", ".join(cls.__name__ for cls in _SUPPORTED_RAGGED_VALUE_TYPES) + raise TypeError(f"type(values) must be one of: {ok_types}, got {value}.") + + +def _formatter(x): + """Separate Numpy array elements with comma.""" + if isinstance(x, np.ndarray): + if x.size != 0: + return np.array2string(x, separator=", ") + else: + # When x.size==0, np.array2string always returns `[]`. This isn't always + # what we want. E.g., if `x.shape=[0, 3]`, then we want `[[], [], []]`. + return repr(x.tolist()) + else: + return str(x) + +# Type annotation indicating that a value is ragged. Includes RaggedTensor +# as well as the (deprecated) RaggedTensorValue class from TF 1.x. +Ragged = typing.Union[RaggedTensor, ragged_tensor_value.RaggedTensorValue] + +# Type annotation indicating that a value is a ragged tensor, a dense tensor, +# or a value that can be converted to a tensor (e.g. np.array). +# TODO(edloper): Add Variable to TensorLike, and remove it from here. +RaggedOrDense = typing.Union[Ragged, core_types.TensorLike] + +# RaggedTensor must import ragged_ops to ensure that all dispatched ragged ops +# are registered. Ragged ops import RaggedTensor, so import at bottom of the +# file to avoid a partially-initialized module error. +from tensorflow.python.ops.ragged import ragged_ops # pylint: disable=unused-import, g-bad-import-order, g-import-not-at-top diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_tensor_shape.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_tensor_shape.py new file mode 100644 index 0000000000000000000000000000000000000000..8fb0c56e8ed6447fff091fc7ff2a1dc81d14c01c --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_tensor_shape.py @@ -0,0 +1,628 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Shapes & broadcasting for RaggedTensors.""" + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import control_flow_assert +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.ragged import ragged_array_ops +from tensorflow.python.ops.ragged import ragged_config +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.ops.ragged import ragged_util + + +class RaggedTensorDynamicShape: + """A collection of tensors encoding the shape of a potentially ragged tensor. + + Each `RaggedTensorDynamicShape` consists of an ordered list of dimension + sizes. There are two dimension types: + + * "Uniform dimensions" are dimensions where all slices have the same + length. `RaggedTensorDynamicShape` records the size of each uniform + dimension using a single scalar integer. + + * "Ragged dimensions" are dimensions whose slices may have different + lengths. `RaggedTensorDynamicShape` records the size of each ragged + dimension using an integer vector containing the slice lengths for all + the slices across that dimension. + + Furthermore, there are two ways a dimension might be encoded: + + * "Partitioned dimensions" are dimensions that are encoded using a + `RaggedTensor`'s `nested_row_splits`. The outermostmost partitioned + dimension must be uniform, and the innermost partitioned dimension must + be ragged. + + * "Inner dimensions" are dimensions that are encoded using a + `RaggedTensor`'s `flat_values`. Inner dimensions are always uniform. + + The sizes of partitioned dimensions are recorded using `partitioned_dim_sizes` + and `inner_dim_sizes`: + + * `partitioned_dim_sizes` is a list of tensors (one for each partitioned + dimension). + + * For uniform dimensions, the tensor is an integer scalar specifying the + size of all slices across that dimension. + * For ragged dimensions, the tensor is an integer vector specifying the + size of each slice across that dimension. + + * `inner_dim_sizes` is a single integer vector, where each element + specifies the size of a single inner dimension. + + Examples: + + Tensor | Ragged | Partitioned Dim Sizes | Inner Dim + : Rank : : Sizes + ------------------------------ | ------ | ---------------------- | ---------- + `[[1, 2, 3], [4, 5, 6]]` | 0 | | `2, 3` + `[[1, 2], [], [3, 4, 5]]` | 1 | `3, (2, 0, 3)` | + `[[[1, 2], [3, 4]], [[5, 6]]]` | 1 | `2, (2, 1)` | 2 + `[[[1, 2], [3]], [[4, 5]]]` | 2 | `2, (2, 1), (2, 1, 2)` | + """ + + def __init__(self, partitioned_dim_sizes, inner_dim_sizes, + dim_size_dtype=None): + """Creates a RaggedTensorDynamicShape. + + Args: + partitioned_dim_sizes: A `list` of 0-D or 1-D integer `Tensor`, one for + each partitioned dimension. If dimension `d` is uniform, then + `partitioned_dim_sizes[d]` must be an integer scalar, specifying the + size of all slices across dimension `d`. If dimension `d` is ragged, + then `partitioned_dim_sizes[d]` must be an integer vector, specifying + the size of each slice across dimension `d`. + inner_dim_sizes: A 1-D integer `Tensor`, whose length is equal to the + number of inner dimensions. `inner_dim_sizes[n]` is the size of all + slices across the `n`th inner dimension (which is the + `(len(partitioned_dim_sizes)+n)`th dimension in the overall tensor. + dim_size_dtype: dtype for dimension sizes. If not specified, then it + is chosen based on the dtypes of `partitioned_dim_sizes` and + `inner_dim_sizes`. + """ + assert isinstance(partitioned_dim_sizes, (list, tuple)) + + with ops.name_scope(None, 'RaggedTensorDynamicShape', + (partitioned_dim_sizes, inner_dim_sizes)): + partitioned_dim_sizes = tuple( + ops.convert_to_tensor(size, name='partitioned_dimension_size_%d' % i) + for (i, size) in enumerate(partitioned_dim_sizes)) + inner_dim_sizes = ops.convert_to_tensor( + inner_dim_sizes, name='inner_dim_sizes') + + # Validate shapes. + if partitioned_dim_sizes: + for axis, dimension_size in enumerate(partitioned_dim_sizes): + if dimension_size.shape.ndims is None: + raise ValueError( + 'rank of partitioned_dim_sizes[%d] is unknown' % axis) + dimension_size.shape.with_rank_at_most(1) + if partitioned_dim_sizes[0].shape.ndims == 1: + raise ValueError('outermost partitioned dimension must be uniform') + if partitioned_dim_sizes[-1].shape.ndims == 0: + raise ValueError('innermost partitioned dimension must be ragged') + inner_dim_sizes.shape.assert_has_rank(1) + + # Convert dimension size tensors to a single dtype. + if dim_size_dtype is None: + dim_size_dtypes = set( + p.dtype for p in partitioned_dim_sizes if p.shape.ndims == 1) + if not dim_size_dtypes: + dim_size_dtype = dtypes.int64 + elif len(dim_size_dtypes) == 1: + dim_size_dtype = dim_size_dtypes.pop() + else: + if not ragged_config.auto_cast_partition_dtype(): + raise ValueError('partitioned_dim_sizes must have matching dtypes') + dim_size_dtype = dtypes.int64 + partitioned_dim_sizes = tuple(math_ops.cast(p, dim_size_dtype) + for p in partitioned_dim_sizes) + inner_dim_sizes = math_ops.cast(inner_dim_sizes, dim_size_dtype) + + self._partitioned_dim_sizes = partitioned_dim_sizes + self._inner_dim_sizes = inner_dim_sizes + + def __repr__(self): + return ('RaggedTensorDynamicShape' + '(partitioned_dim_sizes=%r, inner_dim_sizes=%r)' % + (self._partitioned_dim_sizes, self._inner_dim_sizes)) + + @staticmethod + def from_dim_sizes(dim_sizes): + """Constructs a ragged shape from a list of dimension sizes. + + This list contains a single tensor for each dimension, where the tensor + is a scalar if the dimension is uniform, or a vector if the dimension is + ragged. + + Args: + dim_sizes: List of int32 or int64 scalars or vectors. + + Returns: + A RaggedTensorDynamicShape. + """ + with ops.name_scope(None, 'RaggedTensorDynamicShapeFromDimensionSizes', + [dim_sizes]): + dim_sizes = tuple( + ops.convert_to_tensor(size, preferred_dtype=dtypes.int64, + name='dim_sizes') for size in dim_sizes) + # Split the dimensions into partitioned & inner dimensions. + inner_split = 0 + for dim, dim_size in enumerate(dim_sizes): + if dim_size.shape.ndims == 1: + inner_split = dim + 1 + elif dim_size.shape.ndims != 0: + raise ValueError('Each dim_size must be a scalar or a vector') + return RaggedTensorDynamicShape(dim_sizes[:inner_split], + dim_sizes[inner_split:]) + + @classmethod + def from_tensor(cls, rt_input, dim_size_dtype=None): + """Constructs a ragged shape for a potentially ragged tensor.""" + with ops.name_scope(None, 'RaggedTensorDynamicShapeFromTensor', [rt_input]): + rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor(rt_input) + if not ragged_tensor.is_ragged(rt_input): + return cls([], array_ops.shape(rt_input), dim_size_dtype=dim_size_dtype) + else: + partitioned_dim_sizes = ( + (rt_input.nrows(),) + rt_input.nested_row_lengths()) + return RaggedTensorDynamicShape( + partitioned_dim_sizes, + array_ops.shape(rt_input.flat_values)[1:], + dim_size_dtype=dim_size_dtype) + + def dimension_size(self, axis): + """Returns the size of slices across the specified dimension.""" + if not isinstance(axis, int): + raise TypeError('axis must be an integer') + partitioned_ndims = len(self._partitioned_dim_sizes) + if axis < partitioned_ndims: + return self._partitioned_dim_sizes[axis] + else: + return self._inner_dim_sizes[axis - partitioned_ndims] + + def is_ragged(self, axis): + """Returns true if the indicated dimension is ragged.""" + if not isinstance(axis, int): + raise TypeError('axis must be an integer') + rank = self.rank + if axis < 0: + raise ValueError('Negative axis values are not supported') + elif rank is not None and axis >= rank: + raise ValueError('Expected axis=%s < rank=%s' % (axis, rank)) + else: + return (axis > 0 and axis < len(self._partitioned_dim_sizes) and + self._partitioned_dim_sizes[axis].shape.ndims == 1) + + @property + def rank(self): + """The number of dimensions in this shape, or None if unknown.""" + inner_ndims = tensor_shape.dimension_value(self._inner_dim_sizes.shape[0]) + if inner_ndims is None: + return None + else: + return len(self._partitioned_dim_sizes) + inner_ndims + + @property + def partitioned_dim_sizes(self): + """The partitioned dimension sizes for this shape. + + Returns: + A `list` of 0-D or 1-D integer `Tensor`. + """ + return self._partitioned_dim_sizes + + @property + def inner_dim_sizes(self): + """The inner dimension sizes for this shape. + + Returns: + A 1-D integer `Tensor`. + """ + return self._inner_dim_sizes + + @property + def num_partitioned_dimensions(self): + """The number of partitioned dimensions in this shape.""" + return len(self._partitioned_dim_sizes) + + @property + def num_inner_dimensions(self): + """The number of inner dimensions, or `None` if not statically known.""" + return tensor_shape.dimension_value(self._inner_dim_sizes.shape[0]) + + @property + def dim_size_dtype(self): + """DType used by this shape for dimension sizes.""" + return self._inner_dim_sizes.dtype + + def broadcast_to_rank(self, rank): + """Adds leading size-1 dimensions to broadcast `self` to the given rank. + + E.g., if `shape1` is `[3, (D2), 4]`, then `shape1.broadcast_to_rank(5)` + is `[1, 1, 3, (D2), 4]`. + + Args: + rank: The rank for the returned shape. + + Returns: + A RaggedTensorDynamicShape with `rank` dimensions, whose inner dimensions + have the same size as `self` and whose outer dimensions have size `1`. + + Raises: + ValueError: If `self.rank` is unknown or greater than `rank`. + """ + if self.rank is None: + raise ValueError('Unable to broadcast: self.rank is unknown') + dims_to_add = rank - self.rank + if dims_to_add < 0: + raise ValueError('Unable to broadcast: rank=%d must be greater than ' + 'self.rank=%d.' % (rank, self.rank)) + elif dims_to_add == 0: + return self + elif self._partitioned_dim_sizes: + partitioned_dims = (1,) * dims_to_add + self._partitioned_dim_sizes + return RaggedTensorDynamicShape(partitioned_dims, self.inner_dim_sizes, + self.dim_size_dtype) + else: + inner_dims = array_ops.concat( + [array_ops.ones([dims_to_add], self.dim_size_dtype), + self.inner_dim_sizes], + axis=0) + return RaggedTensorDynamicShape([], inner_dims, self.dim_size_dtype) + + def broadcast_dimension(self, axis, lengths): + """Returns a shape that is broadcast-compatible with self & lengths. + + * If dimension[axis] is uniform and lengths is a scalar, the check + that either lengths==1 or axis==1 or lengths==axis, and tile + dimension[axis] with tf.where(lengths==axis, 1, axis) repeats. + + * If dimension[axis] is uniform and lengths is a vector, then check + that dimension[axis]==1, and raggedly tile dimension[axis] with + lengths repeats. (we can skip tiling if we statically know that + slice_lengths == 1??) + + * If dimension[axis] is ragged and lengths is a scalar, then check + that lengths==1. + + * If dimension[axis] is ragged and lengths is a vector, then check + that self.dimension_size(axis) == lengths. + + Args: + axis: `int`. The dimension to broadcast. + lengths: 0-D or 1-D integer `Tensor`. + + Returns: + A `RaggedTensorDynamicShape`. + """ + lengths = ragged_util.convert_to_int_tensor( + lengths, name='lengths', dtype=self.dim_size_dtype) + # Check whether lengths is a scalar (for uniform dimensions) or + # vector (for ragged dimensions). + if lengths.shape.ndims is None: + raise ValueError('lengths must have a known rank.') + elif lengths.shape.ndims > 1: + raise ValueError('lengths must be a scalar or vector') + else: + lengths_is_scalar = (lengths.shape.ndims == 0) + + # Verify that the shapes are compatible. + if self.is_ragged(axis): + if lengths_is_scalar: + condition = math_ops.equal(lengths, 1) + else: + condition = math_ops.reduce_all( + math_ops.equal(lengths, self.dimension_size(axis))) + else: + axis_dim_size = self.dimension_size(axis) + if lengths_is_scalar: + condition = ( + math_ops.equal(lengths, 1) | math_ops.equal(axis_dim_size, 1) + | math_ops.equal(axis_dim_size, lengths)) + else: + condition = math_ops.equal(axis_dim_size, 1) + broadcast_err = [ + 'Unable to broadcast: dimension size mismatch in dimension', axis, + 'lengths=', lengths, 'dim_size=', + self.dimension_size(axis) + ] + broadcast_check = control_flow_assert.Assert( + condition, data=broadcast_err, summarize=10) + + with ops.control_dependencies([broadcast_check]): + # Partitioned dimensions: + if axis < self.num_partitioned_dimensions: + if self.is_ragged(axis): + # Use an identity op to make sure the check actually gets run. + return RaggedTensorDynamicShape( + self._partitioned_dim_sizes, + array_ops.identity(self.inner_dim_sizes), self.dim_size_dtype) + else: + return self._broadcast_uniform_partitioned_dimension(axis, lengths) + + # Inner dimensions: + else: + if lengths_is_scalar: + return self._broadcast_inner_dimension_to_uniform(axis, lengths) + else: + if axis == 0: + raise ValueError('Unable to broadcast: ' + 'outermost dimension must be uniform.') + return self._broadcast_inner_dimension_to_ragged(axis, lengths) + + def num_slices_in_dimension(self, axis): + """Returns the total number of slices across the indicated dimension.""" + if axis < 0: + return constant_op.constant(1, dtype=self.dim_size_dtype) + elif self.is_ragged(axis): + return math_ops.reduce_sum(self._partitioned_dim_sizes[axis]) + else: + return self.dimension_size(axis) * self.num_slices_in_dimension(axis - 1) + + def _broadcast_uniform_partitioned_dimension(self, axis, lengths): + """Broadcasts the partitioned dimension `axis` to match `lengths`.""" + axis_dim_size = self.dimension_size(axis) + partitioned_sizes = list(self._partitioned_dim_sizes[:axis]) + + if lengths.shape.ndims == 0: + lengths = array_ops.where( + math_ops.equal(axis_dim_size, 1), lengths, axis_dim_size) + repeats = array_ops.where(math_ops.equal(axis_dim_size, 1), lengths, 1) + splits = array_ops_stack.stack([0, self.num_slices_in_dimension(axis)]) + else: + splits = math_ops.range( + array_ops.size(lengths, out_type=self.dim_size_dtype) + 1) + repeats = lengths + + partitioned_sizes.append(lengths) + + for dim_size in self._partitioned_dim_sizes[axis + 1:]: + if dim_size.shape.ndims == 0: + partitioned_sizes.append(dim_size) + splits *= dim_size + else: + partitioned_sizes.append( + ragged_util.repeat_ranges(dim_size, splits, repeats)) + splits = array_ops.gather( + ragged_util.lengths_to_splits(dim_size), splits) + inner_sizes = self._inner_dim_sizes + return RaggedTensorDynamicShape(partitioned_sizes, inner_sizes, + self.dim_size_dtype) + + def _broadcast_inner_dimension_to_uniform(self, axis, length): + """Broadcasts the inner dimension `axis` to match `lengths`.""" + dim_size = self.dimension_size(axis) + axis_in_inner_dims = axis - self.num_partitioned_dimensions + partitioned_sizes = self._partitioned_dim_sizes + inner_sizes = array_ops.concat([ + self._inner_dim_sizes[:axis_in_inner_dims], + [array_ops.where(math_ops.equal(dim_size, 1), length, dim_size)], + self._inner_dim_sizes[axis_in_inner_dims + 1:] + ], + axis=0) + return RaggedTensorDynamicShape(partitioned_sizes, inner_sizes, + self.dim_size_dtype) + + def _broadcast_inner_dimension_to_ragged(self, axis, lengths): + axis_in_inner_dims = axis - self.num_partitioned_dimensions + partitioned_sizes = ( + self._partitioned_dim_sizes + tuple([ + self._inner_dim_sizes[i] for i in range(axis_in_inner_dims) + ]) + (lengths,)) + inner_sizes = self._inner_dim_sizes[axis_in_inner_dims + 1:] + return RaggedTensorDynamicShape(partitioned_sizes, inner_sizes) + + def with_dim_size_dtype(self, dtype): + if dtype not in (dtypes.int32, dtypes.int64): + raise ValueError('dtype must be int32 or int64') + if self.dim_size_dtype == dtype: + return self + return RaggedTensorDynamicShape( + [math_ops.cast(p, dtype) for p in self._partitioned_dim_sizes], + math_ops.cast(self._inner_dim_sizes, dtype)) + + +def broadcast_dynamic_shape(shape_x, shape_y): + """Returns the shape formed by broadcasting two shapes to be compatible. + + Args: + shape_x: A `RaggedTensorDynamicShape` + shape_y: A `RaggedTensorDynamicShape` + + Returns: + A `RaggedTensorDynamicShape`. + Raises: + ValueError: If `shape_x` and `shape_y` are not broadcast-compatible. + """ + if not isinstance(shape_x, RaggedTensorDynamicShape): + raise TypeError('shape_x must be a RaggedTensorDynamicShape') + if not isinstance(shape_y, RaggedTensorDynamicShape): + raise TypeError('shape_y must be a RaggedTensorDynamicShape') + + # Broadcast both shapes to have the same rank. + if shape_x.rank is None or shape_y.rank is None: + raise ValueError('Unable to broadcast: unknown rank') + broadcast_rank = max(shape_x.rank, shape_y.rank) + shape_x = shape_x.broadcast_to_rank(broadcast_rank) + shape_y = shape_y.broadcast_to_rank(broadcast_rank) + + # Broadcast dimensions one at a time, starting from the outermost dimension. + for axis in range(broadcast_rank): + shape_x = shape_x.broadcast_dimension(axis, shape_y.dimension_size(axis)) + shape_y = shape_y.broadcast_dimension(axis, shape_x.dimension_size(axis)) + + return shape_x + + +def broadcast_to(rt_input, shape, broadcast_inner_dimensions=True): + """Broadcasts a potentially ragged tensor to a ragged shape. + + Tiles `rt_input` as necessary to match the given shape. + + Behavior is undefined if `rt_input` is not broadcast-compatible with `shape`. + + Args: + rt_input: The potentially ragged tensor to broadcast. + shape: A `RaggedTensorDynamicShape` + broadcast_inner_dimensions: If false, then inner dimensions will not be + tiled. + + Returns: + A potentially ragged tensor whose values are taken from + `rt_input`, and whose shape matches `shape`. + """ + if not isinstance(shape, RaggedTensorDynamicShape): + raise TypeError('shape must be a RaggedTensorDynamicShape') + rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor(rt_input) + + # Broadcasting to a uniform shape. + if shape.num_partitioned_dimensions == 0: + return _broadcast_to_uniform_shape(rt_input, shape, + broadcast_inner_dimensions) + else: + return _broadcast_to_ragged_shape(rt_input, shape, + broadcast_inner_dimensions) + + +def _broadcast_to_uniform_shape(rt_input, shape, broadcast_inner_dimensions): + """Broadcasts rt_input to the uniform shape `shape`.""" + if isinstance(rt_input, ragged_tensor.RaggedTensor): + raise ValueError('Incompatible with shape: ragged rank mismatch') + if broadcast_inner_dimensions: + return array_ops.broadcast_to(rt_input, shape.inner_dim_sizes) + else: + return rt_input + + +def _broadcast_to_ragged_shape(rt_input, dst_shape, broadcast_inner_dimensions): + """Broadcasts rt_input to the ragged shape `dst_shape`.""" + # Check that rt_input and dst_shape have the same row_splits dtype. + if (isinstance(rt_input, ragged_tensor.RaggedTensor) and + rt_input.row_splits.dtype != dst_shape.dim_size_dtype): + if not ragged_config.auto_cast_partition_dtype(): + raise ValueError('rt_input and dst_shape have different row_split ' + 'dtypes; use RaggedTensor.with_row_splits_dtype() or ' + 'RaggedTensorDynamicShape.with_dim_size_dtype() to ' + 'convert to a compatible dtype.') + rt_input = rt_input.with_row_splits_dtype(dtypes.int64) + dst_shape = dst_shape.with_dim_size_dtype(dtypes.int64) + + # dst_shape's rank and ragged_rank must be greater than or equal to rt_input's + if rt_input.shape.ndims is None or dst_shape.rank is None: + raise ValueError('Unable to broadcast: unknown rank') + if rt_input.shape.ndims > dst_shape.rank: + raise ValueError('Incompatible with shape: rank mismatch') + if (isinstance(rt_input, ragged_tensor.RaggedTensor) and + rt_input.ragged_rank >= dst_shape.num_partitioned_dimensions): + raise ValueError('Incompatible with shape: ragged rank mismatch') + + src_shape = RaggedTensorDynamicShape.from_tensor(rt_input) + src_shape = src_shape.broadcast_to_rank(dst_shape.rank) + + # Add dimensions to rt_input so its rank and ragged_rank matches dst_shape. + if dst_shape.rank > rt_input.shape.ndims: + if rt_input.shape.ndims < dst_shape.num_inner_dimensions + 1: + rt_input = array_ops.reshape( + rt_input, array_ops.concat([[-1], dst_shape.inner_dim_sizes], axis=0)) + for _ in range(dst_shape.rank - rt_input.shape.ndims): + if ragged_tensor.is_ragged(rt_input): + nrows = rt_input.nrows() + else: + nrows = array_ops.shape(rt_input, + out_type=dst_shape.dim_size_dtype)[0] + rt_input = ragged_tensor.RaggedTensor.from_row_lengths(rt_input, [nrows], + validate=False) + + # Add ragged dimensions to match dst_shape. + if ragged_tensor.is_ragged(rt_input): + inner_rank_diff = ( + rt_input.flat_values.shape.ndims - 1 - dst_shape.num_inner_dimensions) + if inner_rank_diff > 0: + rt_input = rt_input.with_flat_values( + ragged_tensor.RaggedTensor.from_tensor( + rt_input.flat_values, ragged_rank=inner_rank_diff, + row_splits_dtype=dst_shape.dim_size_dtype)) + else: + rt_input = ragged_tensor.RaggedTensor.from_tensor( + rt_input, ragged_rank=dst_shape.num_partitioned_dimensions - 1, + row_splits_dtype=dst_shape.dim_size_dtype) + + # Do broadcasting for any dimensions that will remain uniform. We can do + # these all at once, since they're independent of one another. + multiples = [1] * dst_shape.rank + for axis in range(dst_shape.num_partitioned_dimensions): + if not src_shape.is_ragged(axis) and not dst_shape.is_ragged(axis): + src_size = src_shape.dimension_size(axis) + dst_size = dst_shape.dimension_size(axis) + if ((tensor_util.constant_value(src_size) in (1, None)) and + (tensor_util.constant_value(dst_size) != 1)): + multiples[axis] = array_ops.where( + math_ops.equal(src_size, 1), dst_size, 1) + if not all(isinstance(v, int) and v == 1 for v in multiples): + multiples = array_ops_stack.stack(multiples, axis=0) + rt_input = ragged_array_ops.tile(rt_input, multiples) + + if broadcast_inner_dimensions: + new_shape = array_ops.broadcast_dynamic_shape( + array_ops.shape( + rt_input.flat_values, out_type=dst_shape.dim_size_dtype), + array_ops.concat([[1], dst_shape.inner_dim_sizes], axis=0)) + rt_input = rt_input.with_flat_values( + array_ops.broadcast_to(rt_input.flat_values, new_shape)) + + # Do broadcasting for dimensions that become ragged. We must do these from + # outermost to innermost. + for axis in range(dst_shape.num_partitioned_dimensions): + if not src_shape.is_ragged(axis) and dst_shape.is_ragged(axis): + dst_size = dst_shape.dimension_size(axis) + rt_input = _ragged_tile_axis(rt_input, axis, dst_size, + dst_shape.dim_size_dtype) + + return rt_input + + +def _ragged_tile_axis(rt_input, axis, repeats, row_splits_dtype): + """Tile a dimension of a RaggedTensor to match a ragged shape.""" + assert axis > 0 # Outermost dimension may not be ragged. + + if not ragged_tensor.is_ragged(rt_input): + rt_input = ragged_tensor.RaggedTensor.from_tensor( + rt_input, ragged_rank=1, row_splits_dtype=row_splits_dtype) + + if axis > 1: + return rt_input.with_values( + _ragged_tile_axis(rt_input.values, axis - 1, repeats, + row_splits_dtype)) + else: + src_row_splits = rt_input.nested_row_splits + src_row_lengths = rt_input.nested_row_lengths() + splits = src_row_splits[0] + + dst_row_lengths = [repeats] + for i in range(1, len(src_row_lengths)): + dst_row_lengths.append( + ragged_util.repeat_ranges(src_row_lengths[i], splits, repeats)) + splits = array_ops.gather(src_row_splits[i], splits) + dst_values = ragged_util.repeat_ranges(rt_input.flat_values, splits, + repeats) + return ragged_tensor.RaggedTensor.from_nested_row_lengths( + dst_values, dst_row_lengths, validate=False) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_tensor_test_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_tensor_test_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..05c978e1deccd2fdfaad9c371bb3a6a3eca75229 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_tensor_test_ops.py @@ -0,0 +1,185 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""It lists ops of RaggedTensor for the interest of test.""" + +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import gen_bitwise_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn_impl +from tensorflow.python.ops import nn_ops +from tensorflow.python.ops import parsing_ops +from tensorflow.python.ops import special_math_ops +from tensorflow.python.ops import string_ops + + +# Constants listing various op types to test. Each operation +# should be included in at least one list below, or tested separately if +# necessary (e.g., because it expects additional arguments). +UNARY_FLOAT_OPS = [ + math_ops.abs, + math_ops.acos, + math_ops.acosh, + math_ops.angle, + math_ops.asin, + math_ops.asinh, + math_ops.atan, + math_ops.atanh, + math_ops.ceil, + math_ops.conj, + math_ops.cos, + math_ops.cosh, + math_ops.digamma, + math_ops.erf, + math_ops.erfc, + math_ops.erfcinv, + math_ops.erfinv, + math_ops.exp, + math_ops.expm1, + math_ops.floor, + math_ops.imag, + math_ops.is_finite, + math_ops.is_inf, + math_ops.is_nan, + math_ops.lgamma, + math_ops.log, + math_ops.log1p, + math_ops.log_sigmoid, + math_ops.ndtri, + math_ops.negative, + math_ops.real, + math_ops.reciprocal, + math_ops.reciprocal_no_nan, + math_ops.rint, + math_ops.round, + math_ops.rsqrt, + math_ops.sign, + math_ops.sigmoid, + math_ops.sin, + math_ops.sinh, + math_ops.softplus, + math_ops.sqrt, + math_ops.square, + math_ops.tan, + math_ops.tanh, + nn_ops.elu, + nn_ops.gelu, + nn_ops.leaky_relu, + nn_ops.log_softmax, + nn_ops.relu, + nn_ops.relu6, + nn_ops.selu, + nn_ops.softsign, + nn_impl.swish, + array_ops.ones_like, + array_ops.ones_like_v2, + array_ops.zeros_like, + array_ops.zeros_like_v2, + special_math_ops.bessel_i0, + special_math_ops.bessel_i0e, + special_math_ops.bessel_i1, + special_math_ops.bessel_j0, + special_math_ops.bessel_j1, + special_math_ops.bessel_i1e, + special_math_ops.bessel_k0, + special_math_ops.bessel_k0e, + special_math_ops.bessel_k1, + special_math_ops.bessel_k1e, + special_math_ops.bessel_y0, + special_math_ops.bessel_y1, + special_math_ops.dawsn, + special_math_ops.expint, + special_math_ops.fresnel_cos, + special_math_ops.fresnel_sin, + special_math_ops.spence, + string_ops.as_string, +] +UNARY_BOOL_OPS = [ + math_ops.logical_not, +] +UNARY_STRING_OPS = [ + string_ops.decode_base64, + string_ops.encode_base64, + string_ops.string_strip, + string_ops.string_lower, + string_ops.string_upper, + string_ops.string_length, + string_ops.string_length_v2, + parsing_ops.decode_compressed, +] +BINARY_FLOAT_OPS = [ + math_ops.add, + math_ops.atan2, + math_ops.complex, + math_ops.div, + math_ops.div_no_nan, + math_ops.divide, + math_ops.equal, + math_ops.floor_div, + math_ops.floordiv, + math_ops.floormod, + math_ops.greater, + math_ops.greater_equal, + math_ops.less, + math_ops.less_equal, + math_ops.maximum, + math_ops.minimum, + math_ops.multiply, + math_ops.multiply_no_nan, + math_ops.not_equal, + math_ops.pow, + math_ops.realdiv, + math_ops.squared_difference, + math_ops.subtract, + math_ops.truediv, + math_ops.xdivy, + math_ops.xlog1py, + math_ops.xlogy, + math_ops.zeta, +] +BINARY_BOOL_OPS = [ + math_ops.logical_and, + math_ops.logical_or, + math_ops.logical_xor, +] +UNARY_INT_OPS = [ + gen_bitwise_ops.invert, + string_ops.unicode_script, +] +BINARY_INT_OPS = [ + gen_bitwise_ops.bitwise_and, + gen_bitwise_ops.bitwise_or, + gen_bitwise_ops.bitwise_xor, + gen_bitwise_ops.left_shift, + gen_bitwise_ops.right_shift, + math_ops.truncatediv, + math_ops.truncatemod, +] +BINARY_ASSERT_OPS = [ + check_ops.assert_equal, + check_ops.assert_equal_v2, + check_ops.assert_near, + check_ops.assert_near_v2, + check_ops.assert_none_equal, + check_ops.assert_none_equal_v2, + check_ops.assert_greater, + check_ops.assert_greater_v2, + check_ops.assert_greater_equal, + check_ops.assert_greater_equal_v2, + check_ops.assert_less, + check_ops.assert_less_v2, + check_ops.assert_less_equal, + check_ops.assert_less_equal_v2, +] diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_tensor_value.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_tensor_value.py new file mode 100644 index 0000000000000000000000000000000000000000..638dc5207ee632a47ea264252a954c7413f4ae21 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_tensor_value.py @@ -0,0 +1,114 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Value for RaggedTensor.""" + +import numpy as np + +from tensorflow.python.ops.ragged.row_partition import RowPartition +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + + +@tf_export(v1=["ragged.RaggedTensorValue"]) +@dispatch.register_dispatchable_type +class RaggedTensorValue: + """Represents the value of a `RaggedTensor`. + + Warning: `RaggedTensorValue` should only be used in graph mode; in + eager mode, the `tf.RaggedTensor` class contains its value directly. + + See `tf.RaggedTensor` for a description of ragged tensors. + """ + + def __init__(self, values, row_splits): + """Creates a `RaggedTensorValue`. + + Args: + values: A numpy array of any type and shape; or a RaggedTensorValue. + row_splits: A 1-D int32 or int64 numpy array. + """ + if not (isinstance(row_splits, (np.ndarray, np.generic)) and + row_splits.dtype in (np.int64, np.int32) and row_splits.ndim == 1): + raise TypeError("row_splits must be a 1D int32 or int64 numpy array") + if not isinstance(values, (np.ndarray, np.generic, RaggedTensorValue)): + raise TypeError("values must be a numpy array or a RaggedTensorValue") + if (isinstance(values, RaggedTensorValue) and + row_splits.dtype != values.row_splits.dtype): + raise ValueError("row_splits and values.row_splits must have " + "the same dtype") + self._values = values + self._row_splits = row_splits + + row_splits = property( + lambda self: self._row_splits, + doc="""The split indices for the ragged tensor value.""") + values = property( + lambda self: self._values, + doc="""The concatenated values for all rows in this tensor.""") + dtype = property( + lambda self: self._values.dtype, + doc="""The numpy dtype of values in this tensor.""") + + @property + def flat_values(self): + """The innermost `values` array for this ragged tensor value.""" + rt_values = self.values + while isinstance(rt_values, RaggedTensorValue): + rt_values = rt_values.values + return rt_values + + @property + def nested_row_splits(self): + """The row_splits for all ragged dimensions in this ragged tensor value.""" + rt_nested_splits = [self.row_splits] + rt_values = self.values + while isinstance(rt_values, RaggedTensorValue): + rt_nested_splits.append(rt_values.row_splits) + rt_values = rt_values.values + return tuple(rt_nested_splits) + + @property + def ragged_rank(self): + """The number of ragged dimensions in this ragged tensor value.""" + values_is_ragged = isinstance(self._values, RaggedTensorValue) + return self._values.ragged_rank + 1 if values_is_ragged else 1 + + @property + def shape(self): + """A tuple indicating the shape of this RaggedTensorValue.""" + return (self._row_splits.shape[0] - 1,) + (None,) + self._values.shape[1:] + + @property + def _nested_row_partitions(self): + """The row_partitions representing this shape.""" + return [RowPartition.from_row_splits(rs) for rs in self.nested_row_splits] + + def __str__(self): + return "" % self.to_list() + + def __repr__(self): + return "tf.RaggedTensorValue(values=%r, row_splits=%r)" % (self._values, + self._row_splits) + + def to_list(self): + """Returns this ragged tensor value as a nested Python list.""" + if isinstance(self._values, RaggedTensorValue): + values_as_list = self._values.to_list() + else: + values_as_list = self._values.tolist() + return [ + values_as_list[self._row_splits[i]:self._row_splits[i + 1]] + for i in range(len(self._row_splits) - 1) + ] diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_util.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_util.py new file mode 100644 index 0000000000000000000000000000000000000000..81b91d0214ec2c07d2b66ac03e4d38bf6e65ecdc --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_util.py @@ -0,0 +1,138 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Private convenience functions for RaggedTensors. + +None of these methods are exposed in the main "ragged" package. +""" + +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import gen_ragged_math_ops +from tensorflow.python.ops import math_ops + + +def assert_splits_match(nested_splits_lists): + """Checks that the given splits lists are identical. + + Performs static tests to ensure that the given splits lists are identical, + and returns a list of control dependency op tensors that check that they are + fully identical. + + Args: + nested_splits_lists: A list of nested_splits_lists, where each split_list is + a list of `splits` tensors from a `RaggedTensor`, ordered from outermost + ragged dimension to innermost ragged dimension. + + Returns: + A list of control dependency op tensors. + Raises: + ValueError: If the splits are not identical. + """ + error_msg = "Inputs must have identical ragged splits" + for splits_list in nested_splits_lists: + if len(splits_list) != len(nested_splits_lists[0]): + raise ValueError(error_msg) + return [ + check_ops.assert_equal(s1, s2, message=error_msg) + for splits_list in nested_splits_lists[1:] + for (s1, s2) in zip(nested_splits_lists[0], splits_list) + ] + + +# Note: imported here to avoid circular dependency of array_ops. +get_positive_axis = array_ops.get_positive_axis +convert_to_int_tensor = array_ops.convert_to_int_tensor +repeat = array_ops.repeat_with_axis + + +def lengths_to_splits(lengths): + """Returns splits corresponding to the given lengths.""" + return array_ops.concat([[0], math_ops.cumsum(lengths)], axis=-1) + + +def repeat_ranges(params, splits, repeats): + """Repeats each range of `params` (as specified by `splits`) `repeats` times. + + Let the `i`th range of `params` be defined as + `params[splits[i]:splits[i + 1]]`. Then this function returns a tensor + containing range 0 repeated `repeats[0]` times, followed by range 1 repeated + `repeats[1]`, ..., followed by the last range repeated `repeats[-1]` times. + + Args: + params: The `Tensor` whose values should be repeated. + splits: A splits tensor indicating the ranges of `params` that should be + repeated. Elements should be non-negative integers. + repeats: The number of times each range should be repeated. Supports + broadcasting from a scalar value. Elements should be non-negative + integers. + + Returns: + A `Tensor` with the same rank and type as `params`. + + #### Example: + + >>> print(repeat_ranges( + ... params=tf.constant(['a', 'b', 'c']), + ... splits=tf.constant([0, 2, 3]), + ... repeats=tf.constant(3))) + tf.Tensor([b'a' b'b' b'a' b'b' b'a' b'b' b'c' b'c' b'c'], + shape=(9,), dtype=string) + """ + # Check if the input is valid + splits_checks = [ + check_ops.assert_non_negative( + splits, message="Input argument 'splits' must be non-negative" + ), + check_ops.assert_integer( + splits, + message=( + "Input argument 'splits' must be integer, but got" + f" {splits.dtype} instead" + ), + ), + ] + repeats_checks = [ + check_ops.assert_non_negative( + repeats, message="Input argument 'repeats' must be non-negative" + ), + check_ops.assert_integer( + repeats, + message=( + "Input argument 'repeats' must be integer, but got" + f" {repeats.dtype} instead" + ), + ), + ] + splits = control_flow_ops.with_dependencies(splits_checks, splits) + repeats = control_flow_ops.with_dependencies(repeats_checks, repeats) + + # Divide `splits` into starts and limits, and repeat them `repeats` times. + if repeats.shape.ndims != 0: + repeated_starts = repeat(splits[:-1], repeats, axis=0) + repeated_limits = repeat(splits[1:], repeats, axis=0) + else: + # Optimization: we can just call repeat once, and then slice the result. + repeated_splits = repeat(splits, repeats, axis=0) + n_splits = array_ops.shape(repeated_splits, out_type=repeats.dtype)[0] + repeated_starts = repeated_splits[:n_splits - repeats] + repeated_limits = repeated_splits[repeats:] + + # Get indices for each range from starts to limits, and use those to gather + # the values in the desired repetition pattern. + one = array_ops.ones((), repeated_starts.dtype) + offsets = gen_ragged_math_ops.ragged_range( + repeated_starts, repeated_limits, one) + return array_ops.gather(params, offsets.rt_dense_values) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_where_op.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_where_op.py new file mode 100644 index 0000000000000000000000000000000000000000..f638d51ea0b3a42d48bfd7d3a348632811853fd5 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_where_op.py @@ -0,0 +1,259 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""where operation for RaggedTensors.""" + +import typing + +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.ragged import ragged_concat_ops +from tensorflow.python.ops.ragged import ragged_functional_ops +from tensorflow.python.ops.ragged import ragged_gather_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.ops.ragged import ragged_tensor_shape +from tensorflow.python.util import dispatch + + +@dispatch.dispatch_for_api(array_ops.where_v2) +def where_v2(condition: ragged_tensor.RaggedOrDense, + x: typing.Optional[ragged_tensor.RaggedOrDense] = None, + y: typing.Optional[ragged_tensor.RaggedOrDense] = None, + name=None): + """Return the elements where `condition` is `True`. + + : If both `x` and `y` are None: Retrieve indices of true elements. + + Returns the coordinates of true elements of `condition`. The coordinates + are returned in a 2-D tensor with shape + `[num_true_values, dim_size(condition)]`, where `result[i]` is the + coordinates of the `i`th true value (in row-major order). + + : If both `x` and `y` are non-`None`: Multiplex between `x` and `y`. + + Choose an output shape from the shapes of `condition`, `x`, and `y` that + all three shapes are broadcastable to; and then use the broadcasted + `condition` tensor as a mask that chooses whether the corredsponding element + in the output should be taken from `x` (if `condition` is true) or `y` (if + `condition` is false). + + >>> # Example: retrieve indices of true elements + >>> tf.where(tf.ragged.constant([[True, False], [True]])) + + + >>> # Example: multiplex between `x` and `y` + >>> tf.where(tf.ragged.constant([[True, False], [True, False, True]]), + ... tf.ragged.constant([['A', 'B'], ['C', 'D', 'E']]), + ... tf.ragged.constant([['a', 'b'], ['c', 'd', 'e']])) + + + Args: + condition: A potentially ragged tensor of type `bool` + x: A potentially ragged tensor (optional). + y: A potentially ragged tensor (optional). Must be specified if `x` is + specified. Must have the same rank and type as `x`. + name: A name of the operation (optional). + + Returns: + : If both `x` and `y` are `None`: + A `Tensor` with shape `(num_true, rank(condition))`. + : Otherwise: + A potentially ragged tensor with the same type as `x` and `y`, and whose + shape is broadcast-compatible with `x`, `y`, and `condition`. + + Raises: + ValueError: When exactly one of `x` or `y` is non-`None`; or when + `condition`, `x`, and `y` have incompatible shapes. + """ + if (x is None) != (y is None): + raise ValueError('x and y must be either both None or both non-None') + + with ops.name_scope('RaggedWhere', name, [condition, x, y]): + condition = ragged_tensor.convert_to_tensor_or_ragged_tensor( + condition, name='condition') + if x is None: + return _coordinate_where(condition) + else: + x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, name='x') + y = ragged_tensor.convert_to_tensor_or_ragged_tensor(y, name='y') + condition, x, y = ragged_tensor.match_row_splits_dtypes(condition, x, y) + return _elementwise_where_v2(condition, x, y) + + +@dispatch.dispatch_for_api(array_ops.where) +def where(condition: ragged_tensor.RaggedOrDense, + x: typing.Optional[ragged_tensor.RaggedOrDense] = None, + y: typing.Optional[ragged_tensor.RaggedOrDense] = None, + name=None): + """Return the elements, either from `x` or `y`, depending on the `condition`. + + : If both `x` and `y` are `None`: + Returns the coordinates of true elements of `condition`. The coordinates + are returned in a 2-D tensor with shape + `[num_true_values, dim_size(condition)]`, where `result[i]` is the + coordinates of the `i`th true value (in row-major order). + + : If both `x` and `y` are non-`None`: + Returns a tensor formed by selecting values from `x` where condition is + true, and from `y` when condition is false. In particular: + + : If `condition`, `x`, and `y` all have the same shape: + + * `result[i1...iN] = x[i1...iN]` if `condition[i1...iN]` is true. + * `result[i1...iN] = y[i1...iN]` if `condition[i1...iN]` is false. + + : Otherwise: + + * `condition` must be a vector. + * `x` and `y` must have the same number of dimensions. + * The outermost dimensions of `condition`, `x`, and `y` must all have the + same size. + * `result[i] = x[i]` if `condition[i]` is true. + * `result[i] = y[i]` if `condition[i]` is false. + + Args: + condition: A potentially ragged tensor of type `bool` + x: A potentially ragged tensor (optional). + y: A potentially ragged tensor (optional). Must be specified if `x` is + specified. Must have the same rank and type as `x`. + name: A name of the operation (optional) + + Returns: + : If both `x` and `y` are `None`: + A `Tensor` with shape `(num_true, dim_size(condition))`. + : Otherwise: + A potentially ragged tensor with the same type, rank, and outermost + dimension size as `x` and `y`. + `result.ragged_rank = max(x.ragged_rank, y.ragged_rank)`. + + Raises: + ValueError: When exactly one of `x` or `y` is non-`None`; or when + `condition`, `x`, and `y` have incompatible shapes. + + #### Examples: + + >>> # Coordinates where condition is true. + >>> condition = tf.ragged.constant([[True, False, True], [False, True]]) + >>> print(where(condition)) + tf.Tensor( [[0 0] [0 2] [1 1]], shape=(3, 2), dtype=int64) + + >>> # Elementwise selection between x and y, based on condition. + >>> condition = tf.ragged.constant([[True, False, True], [False, True]]) + >>> x = tf.ragged.constant([['A', 'B', 'C'], ['D', 'E']]) + >>> y = tf.ragged.constant([['a', 'b', 'c'], ['d', 'e']]) + >>> print(where(condition, x, y)) + + + >>> # Row selection between x and y, based on condition. + >>> condition = [True, False] + >>> x = tf.ragged.constant([['A', 'B', 'C'], ['D', 'E']]) + >>> y = tf.ragged.constant([['a', 'b', 'c'], ['d', 'e']]) + >>> print(where(condition, x, y)) + + """ + if (x is None) != (y is None): + raise ValueError('x and y must be either both None or both non-None') + with ops.name_scope('RaggedWhere', name, [condition, x, y]): + condition = ragged_tensor.convert_to_tensor_or_ragged_tensor( + condition, name='condition') + if x is None: + return _coordinate_where(condition) + else: + x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, name='x') + y = ragged_tensor.convert_to_tensor_or_ragged_tensor(y, name='y') + condition, x, y = ragged_tensor.match_row_splits_dtypes(condition, x, y) + return _elementwise_where(condition, x, y) + + +def _elementwise_where(condition, x, y): + """Ragged version of tf.where(condition, x, y).""" + condition_is_ragged = isinstance(condition, ragged_tensor.RaggedTensor) + x_is_ragged = isinstance(x, ragged_tensor.RaggedTensor) + y_is_ragged = isinstance(y, ragged_tensor.RaggedTensor) + + if not (condition_is_ragged or x_is_ragged or y_is_ragged): + return array_ops.where(condition, x, y) + + elif condition_is_ragged and x_is_ragged and y_is_ragged: + return ragged_functional_ops.map_flat_values(array_ops.where, condition, x, + y) + elif not condition_is_ragged: + # Concatenate x and y, and then use `gather` to assemble the selected rows. + condition.shape.assert_has_rank(1) + x_and_y = ragged_concat_ops.concat([x, y], axis=0) + x_nrows = _nrows(x, out_type=x_and_y.row_splits.dtype) + y_nrows = _nrows(y, out_type=x_and_y.row_splits.dtype) + indices = array_ops.where(condition, math_ops.range(x_nrows), + x_nrows + math_ops.range(y_nrows)) + return ragged_gather_ops.gather(x_and_y, indices) + + else: + raise ValueError('Input shapes do not match.') + + +def _elementwise_where_v2(condition, x, y): + """Ragged version of tf.where_v2(condition, x, y).""" + # Broadcast x, y, and condition to have the same shape. + if not (condition.shape.is_fully_defined() and x.shape.is_fully_defined() and + y.shape.is_fully_defined() and x.shape == y.shape and + condition.shape == x.shape): + shape_c = ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor( + condition) + shape_x = ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor(x) + shape_y = ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor(y) + shape = ragged_tensor_shape.broadcast_dynamic_shape( + shape_c, ragged_tensor_shape.broadcast_dynamic_shape(shape_x, shape_y)) + condition = ragged_tensor_shape.broadcast_to(condition, shape) + x = ragged_tensor_shape.broadcast_to(x, shape) + y = ragged_tensor_shape.broadcast_to(y, shape) + + condition_is_ragged = isinstance(condition, ragged_tensor.RaggedTensor) + x_is_ragged = isinstance(x, ragged_tensor.RaggedTensor) + y_is_ragged = isinstance(y, ragged_tensor.RaggedTensor) + if not (condition_is_ragged or x_is_ragged or y_is_ragged): + return array_ops.where_v2(condition, x, y) + + return ragged_functional_ops.map_flat_values(array_ops.where_v2, condition, x, + y) + + +def _coordinate_where(condition): + """Ragged version of tf.where(condition).""" + if not isinstance(condition, ragged_tensor.RaggedTensor): + return array_ops.where(condition) + + # The coordinate for each `true` value in condition.values. + selected_coords = _coordinate_where(condition.values) + + # Convert the first index in each coordinate to a row index and column index. + condition = condition.with_row_splits_dtype(selected_coords.dtype) + first_index = selected_coords[:, 0] + selected_rows = array_ops.gather(condition.value_rowids(), first_index) + selected_row_starts = array_ops.gather(condition.row_splits, selected_rows) + selected_cols = first_index - selected_row_starts + + # Assemble the row & column index with the indices for inner dimensions. + return array_ops.concat([ + array_ops.expand_dims(selected_rows, 1), + array_ops.expand_dims(selected_cols, 1), selected_coords[:, 1:] + ], + axis=1) + + +def _nrows(rt_input, out_type): + if isinstance(rt_input, ragged_tensor.RaggedTensor): + return rt_input.nrows(out_type=out_type) + else: + return array_ops.shape(rt_input, out_type=out_type)[0] diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/row_partition.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/row_partition.py new file mode 100644 index 0000000000000000000000000000000000000000..43d739cc0cc4198fe2cc8effc8445263212086fc --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/row_partition.py @@ -0,0 +1,1495 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A class used to partition a sequence into contiguous subsequences ("rows"). +""" + + +# TODO(edloper): Make into a ExtensionType (if possible) + + +import numpy as np + +from tensorflow.core.protobuf import struct_pb2 +from tensorflow.python.framework import composite_tensor +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor as tensor_lib +from tensorflow.python.framework import tensor_conversion +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.framework import type_spec +from tensorflow.python.framework import type_spec_registry +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import gen_ragged_math_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.ragged import segment_id_ops +from tensorflow.python.saved_model import nested_structure_coder +from tensorflow.python.util.tf_export import tf_export + +# =============================================================================== +# RowPartition +# =============================================================================== +# TODO(edloper): Consider removing row_starts and row_limits factory methods +# and accessors from RowPartition. In particular, these two encodings are +# "second-class citizens": we never cache them, and if you do construct a +# RowPartition from them then it may be more expensive than you might expect +# (because we append a value to the beginning/end to transform them into +# splits). If we do remove them from RowPartition, then we would still keep +# the from_row_starts and from_row_limits factory methods in RaggedTensor. + + +@tf_export("experimental.RowPartition") +class RowPartition(composite_tensor.CompositeTensor): + """Partitioning of a sequence of values into contiguous subsequences ("rows"). + + A `RowPartition` describes how a sequence with `nvals` items should be + divided into `nrows` contiguous subsequences ("rows"). For example, a + `RowPartition` could be used to partition the vector `[1, 2, 3, 4, 5]` into + subsequences `[[1, 2], [3], [], [4, 5]]`. Note that `RowPartition` stores + information about how values are partitioned, but does not include the + partitioned values themselves. `tf.RaggedTensor` is used to pair a `values` + tensor with one or more `RowPartition`s, providing a complete encoding for a + ragged tensor (i.e. a tensor with variable-length dimensions). + + `RowPartition`s may be defined using several different schemes: + + * `row_lengths`: an integer vector with shape `[nrows]`, which specifies + the length of each row. + + * `row_splits`: an integer vector with shape `[nrows+1]`, specifying the + "split points" between each row. + + * `row_starts`: an integer vector with shape `[nrows]`, which specifies + the start offset for each row. Equivalent to `row_splits[:-1]`. + + * `row_limits`: an integer vector with shape `[nrows]`, which specifies + the stop offset for each row. Equivalent to `row_splits[1:]`. + + * `value_rowids` is an integer vector with shape `[nvals]`, corresponding + one-to-one with sequence values, which specifies the row that each value + belongs to. If the partition has empty trailing rows, then `nrows` + must also be specified. + + * `uniform_row_length` is an integer scalar, specifying the length of every + row. This scheme may only be used if all rows have the same length. + + For example, the following `RowPartition`s all represent the partitioning of + 8 values into 5 sublists as follows: `[[*, *, *, *], [], [*, *, *], [*], []]`. + + >>> p1 = RowPartition.from_row_lengths([4, 0, 3, 1, 0]) + >>> p2 = RowPartition.from_row_splits([0, 4, 4, 7, 8, 8]) + >>> p3 = RowPartition.from_row_starts([0, 4, 4, 7, 8], nvals=8) + >>> p4 = RowPartition.from_row_limits([4, 4, 7, 8, 8]) + >>> p5 = RowPartition.from_value_rowids([0, 0, 0, 0, 2, 2, 2, 3], nrows=5) + + For more information about each scheme, see the documentation for the + its factory method. For additional examples, see the documentation on + `tf.RaggedTensor`. + + ### Precomputed Encodings + + `RowPartition` always stores at least one encoding of the partitioning, but + it can be configured to cache additional encodings as well. This can + avoid unnecessary recomputation in eager mode. (In graph mode, optimizations + such as common subexpression elimination will typically prevent these + unnecessary recomputations.) To check which encodings are precomputed, use + `RowPartition.has_precomputed_`. To cache an additional + encoding, use `RowPartition.with_precomputed_`. + """ + + # ============================================================================= + # Constructor (private) + # ============================================================================= + def __init__(self, + row_splits, + row_lengths=None, + value_rowids=None, + nrows=None, + uniform_row_length=None, + nvals=None, + internal=False): + """Creates a `RowPartition` from the specified encoding tensor(s). + + This constructor is private -- please use one of the following ops to + build `RowPartition`s: + + * `RowPartition.from_row_lengths` + * `RowPartition.from_value_rowids` + * `RowPartition.from_row_splits` + * `RowPartition.from_row_starts` + * `RowPartition.from_row_limits` + * `RowPartition.from_uniform_row_length` + + If row_splits is has a constant value, then all other arguments should + have a constant value. + + Args: + row_splits: A 1-D integer tensor with shape `[nrows+1]`. + row_lengths: A 1-D integer tensor with shape `[nrows]` + value_rowids: A 1-D integer tensor with shape `[nvals]`. + nrows: A 1-D integer scalar tensor. + uniform_row_length: A scalar tensor. + nvals: A scalar tensor. + internal: Private key value, required to ensure that this private + constructor is *only* called from the factory methods. + + Raises: + TypeError: If a row partitioning tensor has an inappropriate dtype. + TypeError: If exactly one row partitioning argument was not specified. + ValueError: If a row partitioning tensor has an inappropriate shape. + ValueError: If multiple partitioning arguments are specified. + ValueError: If nrows is specified but value_rowids is not None. + """ + if internal is not _row_partition_factory_key: + raise ValueError("RowPartition constructor is private; please use one " + "of the factory methods instead (e.g., " + "RowPartition.from_row_lengths())") + + # Validate the arguments. + if not isinstance(row_splits, tensor_lib.Tensor): + raise TypeError("Row-partitioning argument must be a Tensor, got %r" % + row_splits) + if row_splits.dtype not in (dtypes.int32, dtypes.int64): + raise ValueError("Row-partitioning argument must be int32 or int64") + + # Validate shapes & dtypes. + row_splits.shape.assert_has_rank(1) + row_splits.set_shape([None]) + self._row_splits = row_splits + + # Store any cached tensors. These are used to avoid unnecessary + # round-trip conversions when a RowPartition is constructed from + # lengths or rowids, and we later want those lengths/rowids back. + for tensor in [row_lengths, value_rowids, nrows, uniform_row_length, nvals]: + if tensor is not None: + if not isinstance(tensor, tensor_lib.Tensor): + raise TypeError("Cached value must be a Tensor or None.") + elif tensor.dtype != row_splits.dtype: + raise ValueError(f"Inconsistent dtype for encoding tensors: " + f"{tensor} vs {row_splits}") + self._row_lengths = row_lengths + self._value_rowids = value_rowids + self._nrows = nrows + self._uniform_row_length = uniform_row_length + self._nvals = nvals + + # ============================================================================= + # Factory Methods + # ============================================================================= + + @classmethod + def from_value_rowids(cls, + value_rowids, + nrows=None, + validate=True, + dtype=None, + dtype_hint=None): + """Creates a `RowPartition` with rows partitioned by `value_rowids`. + + This `RowPartition` divides a sequence `values` into rows by specifying + which row each value should be added to: + + ```python + partitioned_rows = [[] for _ in nrows] + for (value, rowid) in zip(values, value_rowids): + partitioned_rows[rowid].append(value) + ``` + + Args: + value_rowids: A 1-D integer tensor with shape `[nvals]`, which corresponds + one-to-one with `values`, and specifies each value's row index. Must be + nonnegative, and must be sorted in ascending order. + nrows: An integer scalar specifying the number of rows. This should be + specified if the `RowPartition` may containing empty training rows. Must + be greater than `value_rowids[-1]` (or greater than or equal to zero if + `value_rowids` is empty). Defaults to `value_rowids[-1] + 1` (or zero if + `value_rowids` is empty). + validate: If true, then use assertions to check that the arguments form a + valid `RowPartition`. + dtype: Optional dtype for the RowPartition. If missing, the type + is inferred from the type of `value_rowids`, dtype_hint, or tf.int64. + dtype_hint: Optional dtype for the RowPartition, used when dtype + is None. In some cases, a caller may not have a dtype in mind when + converting to a tensor, so dtype_hint can be used as a soft preference. + If the conversion to `dtype_hint` is not possible, this argument has no + effect. + + Returns: + A `RowPartition`. + + Raises: + ValueError: If `nrows` is incompatible with `value_rowids`. + + #### Example: + + >>> print(RowPartition.from_value_rowids( + ... value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], + ... nrows=4)) + tf.RowPartition(row_splits=[0 4 4 7 8]) + """ + # Local import bincount_ops to avoid import-cycle since bincount_ops + # imports ragged_tensor. + from tensorflow.python.ops import bincount_ops # pylint: disable=g-import-not-at-top + if not isinstance(validate, bool): + raise TypeError("validate must have type bool") + with ops.name_scope(None, "RowPartitionFromValueRowIds", + [value_rowids, nrows]): + value_rowids = cls._convert_row_partition( + value_rowids, "value_rowids", dtype_hint=dtype_hint, dtype=dtype) + if nrows is None: + const_rowids = tensor_util.constant_value(value_rowids) + if const_rowids is None: + nrows = array_ops.concat([value_rowids[-1:], [-1]], axis=0)[0] + 1 + const_nrows = None + else: + const_nrows = const_rowids[-1] + 1 if const_rowids.size > 0 else 0 + nrows = ops.convert_to_tensor( + const_nrows, value_rowids.dtype, name="nrows") + else: + nrows = ops.convert_to_tensor(nrows, value_rowids.dtype, "nrows") + const_nrows = tensor_util.constant_value(nrows) + if const_nrows is not None: + if const_nrows < 0: + raise ValueError("Expected nrows >= 0; got %d" % const_nrows) + const_rowids = tensor_util.constant_value(value_rowids) + if const_rowids is not None and const_rowids.size > 0: + if not const_nrows >= const_rowids[-1] + 1: + raise ValueError( + "Expected nrows >= value_rowids[-1] + 1; got nrows=%d, " + "value_rowids[-1]=%d" % (const_nrows, const_rowids[-1])) + + value_rowids.shape.assert_has_rank(1) + nrows.shape.assert_has_rank(0) + + if validate: + msg = ("Arguments to from_value_rowids do not form a valid " + "RowPartition") + checks = [ + check_ops.assert_rank(value_rowids, 1, message=msg), + check_ops.assert_rank(nrows, 0, message=msg), + check_ops.assert_non_negative(value_rowids[:1], message=msg), + _assert_monotonic_increasing(value_rowids, message=msg), + check_ops.assert_less(value_rowids[-1:], nrows, message=msg), + ] + value_rowids = control_flow_ops.with_dependencies(checks, value_rowids) + + # Convert value_rowids & nrows to row_splits. + # Note: we don't use segment_ids_to_row_splits() here because we want + # to save the intermediate value `row_lengths`, so we can cache it. + # TODO(b/116708836) Upgrade bincount to accept int64 so we can skip the + # cast. + value_rowids_int32 = math_ops.cast(value_rowids, dtypes.int32) + nrows_int32 = math_ops.cast(nrows, dtypes.int32) + row_lengths = bincount_ops.bincount( + value_rowids_int32, + minlength=nrows_int32, + maxlength=nrows_int32, + dtype=value_rowids.dtype) + row_splits = array_ops.concat([[0], math_ops.cumsum(row_lengths)], axis=0) + if const_nrows is not None: + row_lengths.set_shape([const_nrows]) + row_splits.set_shape([const_nrows + 1]) + + return cls( + row_splits=row_splits, + row_lengths=row_lengths, + value_rowids=value_rowids, + nrows=nrows, + internal=_row_partition_factory_key) + + @classmethod + def from_row_splits(cls, + row_splits, + validate=True, + dtype=None, + dtype_hint=None): + """Creates a `RowPartition` with rows partitioned by `row_splits`. + + This `RowPartition` divides a sequence `values` into rows by indicating + where each row begins and ends: + + ```python + partitioned_rows = [] + for i in range(len(row_splits) - 1): + row_start = row_splits[i] + row_end = row_splits[i + 1] + partitioned_rows.append(values[row_start:row_end]) + ``` + + Args: + row_splits: A 1-D integer tensor with shape `[nrows+1]`. Must not be + empty, and must be sorted in ascending order. `row_splits[0]` must be + zero. + validate: If true, then use assertions to check that the arguments form a + valid `RowPartition`. + dtype: Optional dtype for the RowPartition. If missing, the type + is inferred from the type of `row_splits`, dtype_hint, or tf.int64. + dtype_hint: Optional dtype for the RowPartition, used when dtype + is None. In some cases, a caller may not have a dtype in mind when + converting to a tensor, so dtype_hint can be used as a soft preference. + If the conversion to `dtype_hint` is not possible, this argument has no + effect. + + Returns: + A `RowPartition`. + + Raises: + ValueError: If `row_splits` is an empty list. + """ + if not isinstance(validate, bool): + raise TypeError("validate must have type bool") + if isinstance(row_splits, (list, tuple)) and not row_splits: + raise ValueError("row_splits tensor may not be empty.") + if isinstance(row_splits, tensor_lib.TensorSpec): + return cls(row_splits=row_splits, internal=_row_partition_factory_key) + + with ops.name_scope(None, "RowPartitionFromRowSplits", [row_splits]): + row_splits = cls._convert_row_partition( + row_splits, "row_splits", dtype_hint=dtype_hint, dtype=dtype) + row_splits.shape.assert_has_rank(1) + + if validate: + msg = "Arguments to from_row_splits do not form a valid RaggedTensor:" + checks = [ + check_ops.assert_rank(row_splits, 1, message=(msg + "rank")), + _assert_zero(row_splits[0], message=(msg + "zero")), + _assert_monotonic_increasing( + row_splits, message=(msg + "monotonic")), + ] + row_splits = control_flow_ops.with_dependencies(checks, row_splits) + + return cls(row_splits=row_splits, internal=_row_partition_factory_key) + + @classmethod + def from_row_lengths(cls, + row_lengths, + validate=True, + dtype=None, + dtype_hint=None): + """Creates a `RowPartition` with rows partitioned by `row_lengths`. + + This `RowPartition` divides a sequence `values` into rows by indicating + the length of each row: + + ```python + partitioned_rows = [[values.pop(0) for _ in range(length)] + for length in row_lengths] + ``` + + Args: + row_lengths: A 1-D integer tensor with shape `[nrows]`. Must be + nonnegative. + validate: If true, then use assertions to check that the arguments form a + valid `RowPartition`. + + dtype: Optional dtype for the RowPartition. If missing, the type + is inferred from the type of `row_lengths`, dtype_hint, or tf.int64. + dtype_hint: Optional dtype for the RowPartition, used when dtype + is None. In some cases, a caller may not have a dtype in mind when + converting to a tensor, so dtype_hint can be used as a soft preference. + If the conversion to `dtype_hint` is not possible, this argument has no + effect. + + Returns: + A `RowPartition`. + """ + if not isinstance(validate, bool): + raise TypeError("validate must have type bool") + with ops.name_scope(None, "RowPartitionFromRowLengths", [row_lengths]): + row_lengths = cls._convert_row_partition( + row_lengths, "row_lengths", dtype_hint=dtype_hint, dtype=dtype) + row_lengths.shape.assert_has_rank(1) + + if validate: + msg = "Arguments to from_row_lengths do not form a valid RowPartition" + checks = [ + check_ops.assert_rank(row_lengths, 1, message=msg), + check_ops.assert_non_negative(row_lengths, message=msg), + ] + row_lengths = control_flow_ops.with_dependencies(checks, row_lengths) + + row_limits = math_ops.cumsum(row_lengths) + row_splits = array_ops.concat([[0], row_limits], axis=0) + return cls( + row_splits=row_splits, + row_lengths=row_lengths, + internal=_row_partition_factory_key) + + @classmethod + def from_row_starts(cls, + row_starts, + nvals, + validate=True, + dtype=None, + dtype_hint=None): + """Creates a `RowPartition` with rows partitioned by `row_starts`. + + Equivalent to: `from_row_splits(concat([row_starts, nvals], axis=0))`. + + Args: + row_starts: A 1-D integer tensor with shape `[nrows]`. Must be + nonnegative and sorted in ascending order. If `nrows>0`, then + `row_starts[0]` must be zero. + nvals: A scalar tensor indicating the number of values. + validate: If true, then use assertions to check that the arguments form a + valid `RowPartition`. + dtype: Optional dtype for the RowPartition. If missing, the type + is inferred from the type of `row_starts`, dtype_hint, or tf.int64. + dtype_hint: Optional dtype for the RowPartition, used when dtype + is None. In some cases, a caller may not have a dtype in mind when + converting to a tensor, so dtype_hint can be used as a soft preference. + If the conversion to `dtype_hint` is not possible, this argument has no + effect. + + Returns: + A `RowPartition`. + """ + if not isinstance(validate, bool): + raise TypeError("validate must have type bool") + with ops.name_scope(None, "RowPartitionFromRowStarts", [row_starts]): + row_starts = cls._convert_row_partition( + row_starts, "row_starts", dtype_hint=dtype_hint, dtype=dtype) + row_starts.shape.assert_has_rank(1) + # TODO(martinz): nvals and row_starts could be inconsistent at call time, + # even though they eventually end up the same type. + nvals = math_ops.cast(nvals, row_starts.dtype) + if validate: + msg = "Arguments to from_row_starts do not form a valid RaggedTensor" + checks = [ + check_ops.assert_rank(row_starts, 1, message=msg), + _assert_zero(row_starts[:1], message=msg), + _assert_monotonic_increasing(row_starts, message=msg), + check_ops.assert_less_equal(row_starts[-1:], nvals, message=msg), + ] + row_starts = control_flow_ops.with_dependencies(checks, row_starts) + + row_splits = array_ops.concat([row_starts, [nvals]], axis=0) + return cls(row_splits=row_splits, nvals=nvals, + internal=_row_partition_factory_key) + + @classmethod + def from_row_limits(cls, + row_limits, + validate=True, + dtype=None, + dtype_hint=None): + """Creates a `RowPartition` with rows partitioned by `row_limits`. + + Equivalent to: `from_row_splits(values, concat([0, row_limits], axis=0))`. + + Args: + row_limits: A 1-D integer tensor with shape `[nrows]`. Must be sorted in + ascending order. + validate: If true, then use assertions to check that the arguments form a + valid `RowPartition`. + dtype: Optional dtype for the RowPartition. If missing, the type + is inferred from the type of `row_limits`, dtype_hint, or tf.int64. + dtype_hint: Optional dtype for the RowPartition, used when dtype + is None. In some cases, a caller may not have a dtype in mind when + converting to a tensor, so dtype_hint can be used as a soft preference. + If the conversion to `dtype_hint` is not possible, this argument has no + effect. + + Returns: + A `RowPartition`. + """ + if not isinstance(validate, bool): + raise TypeError("validate must have type bool") + with ops.name_scope(None, "RowPartitionFromRowLimits", [row_limits]): + row_limits = cls._convert_row_partition( + row_limits, "row_limits", dtype_hint=dtype_hint, dtype=dtype) + row_limits.shape.assert_has_rank(1) + + if validate: + msg = "Arguments to from_row_limits do not form a valid RaggedTensor" + checks = [ + check_ops.assert_rank(row_limits, 1, message=msg), + check_ops.assert_non_negative(row_limits[:1], message=msg), + _assert_monotonic_increasing(row_limits, message=msg), + ] + row_limits = control_flow_ops.with_dependencies(checks, row_limits) + + zero = array_ops.zeros([1], row_limits.dtype) + row_splits = array_ops.concat([zero, row_limits], axis=0) + return cls(row_splits=row_splits, internal=_row_partition_factory_key) + + @classmethod + def from_uniform_row_length(cls, + uniform_row_length, + nvals=None, + nrows=None, + validate=True, + dtype=None, + dtype_hint=None): + """Creates a `RowPartition` with rows partitioned by `uniform_row_length`. + + This `RowPartition` divides a sequence `values` into rows that all have + the same length: + + ```python + partitioned_rows = [[values.pop(0) for _ in range(uniform_row_length)] + for _ in range(nrows)] + ``` + + Note that either or both of nvals and nrows must be specified. + + Args: + uniform_row_length: A scalar integer tensor. Must be nonnegative. The + size of the outer axis of `values` must be evenly divisible by + `uniform_row_length`. + nvals: a non-negative scalar integer tensor for the number of values. + Must be specified if nrows is not specified. If not specified, + defaults to uniform_row_length*nrows + nrows: The number of rows in the constructed RowPartition. If not + specified, then it defaults to `nvals/uniform_row_length` (or `0` if + `uniform_row_length==0`). `nrows` only needs to be specified if + `uniform_row_length` might be zero. `uniform_row_length*nrows` must be + `nvals`. + validate: If true, then use assertions to check that the arguments form a + valid `RowPartition`. + dtype: Optional dtype for the RowPartition. If missing, the type + is inferred from the type of `uniform_row_length`, dtype_hint, + or tf.int64. + dtype_hint: Optional dtype for the RowPartition, used when dtype + is None. In some cases, a caller may not have a dtype in mind when + converting to a tensor, so dtype_hint can be used as a soft preference. + If the conversion to `dtype_hint` is not possible, this argument has no + effect. + + Returns: + A `RowPartition`. + """ + if not isinstance(validate, bool): + raise TypeError("validate must have type bool") + if nrows is None and nvals is None: + raise ValueError("Either (or both) of nvals and nrows must be specified") + with ops.name_scope(None, "RowPartitionFromUniformRowLength", + [uniform_row_length, nrows]): + [uniform_row_length, nvals, nrows + ] = _convert_all_to_tensors([(uniform_row_length, "uniform_row_length"), + (nvals, "nvals"), (nrows, "nrows")], + dtype=dtype, + dtype_hint=dtype_hint) + + uniform_row_length.shape.assert_has_rank(0) + + # Find nrows. + const_row_length = tensor_util.constant_value(uniform_row_length) + if nrows is None: + if const_row_length is None: + # Avoid division by zero if uniform_row_length==0 (and nvals==0). + rowlen_or_1 = math_ops.maximum( + uniform_row_length, + constant_op.constant(1, uniform_row_length.dtype)) + nrows = nvals // rowlen_or_1 + elif const_row_length == 0: + nrows = constant_op.constant(0, dtype=uniform_row_length.dtype) + else: + nrows = nvals // const_row_length + const_nrows = None if nrows is None else tensor_util.constant_value(nrows) + const_nvals = None if nvals is None else tensor_util.constant_value(nvals) + const_uniform_row_length = tensor_util.constant_value(uniform_row_length) + + checks = [] + + if const_nvals is None and const_nrows is not None and const_uniform_row_length is not None: + const_nvals = const_nrows * const_uniform_row_length + if nvals is not None and validate: + checks.append(check_ops.assert_equal(nvals, const_nvals)) + nvals = constant_op.constant(const_nvals, uniform_row_length.dtype) + + if nvals is None: + nvals = nrows * uniform_row_length + + # Find row_splits. + if const_nrows is not None and const_row_length is not None: + row_splits = [v * const_row_length for v in range(const_nrows + 1)] + row_splits = constant_op.constant(row_splits, uniform_row_length.dtype) + else: + row_splits = math_ops.range( + nrows + 1, dtype=uniform_row_length.dtype) * uniform_row_length + + if validate: + + if (const_nrows is None or const_row_length is None or + const_nvals is None): + checks.append( + check_ops.assert_equal( + nrows * uniform_row_length, nvals, + ("uniform_row_length", uniform_row_length, "times nrows", + nrows, "must equal nvals", nvals))) + else: + if const_nrows * const_row_length != const_nvals: + raise ValueError( + "uniform_row_length=%d times nrows=%d must equal nvals=%d" % + (const_row_length, const_nrows, const_nvals)) + + if uniform_row_length.shape.rank is None: + checks.append( + check_ops.assert_rank( + uniform_row_length, + 0, + message="uniform_row_length must be a scalar.")) + + const_row_length = tensor_util.constant_value(uniform_row_length) + if const_row_length is None: + checks.append( + check_ops.assert_greater_equal( + uniform_row_length, + constant_op.constant(0, uniform_row_length.dtype), + message="uniform_row_length must be >= 0.")) + else: + if const_row_length < 0: + raise ValueError("uniform_row_length must be >= 0.") + + row_splits = control_flow_ops.with_dependencies(checks, row_splits) + + return cls( + row_splits=row_splits, + uniform_row_length=uniform_row_length, + nrows=nrows, + nvals=nvals, + internal=_row_partition_factory_key) + + @classmethod + def _convert_row_partition(cls, partition, name, dtype=None, dtype_hint=None): + """Converts `partition` to Tensors. + + Args: + partition: A row-partitioning tensor for the `RowPartition` being + constructed. I.e., one of: row_splits, row_lengths, row_starts, + row_limits, value_rowids, uniform_row_length. + name: The name of the row-partitioning tensor. + dtype: Optional dtype for the RowPartition. If missing, the type + is inferred from the type of `uniform_row_length`, dtype_hint, + or tf.int64. + dtype_hint: Optional dtype for the RowPartition, used when dtype + is None. In some cases, a caller may not have a dtype in mind when + converting to a tensor, so dtype_hint can be used as a soft preference. + If the conversion to `dtype_hint` is not possible, this argument has no + effect. + + Returns: + A tensor equivalent to partition. + + Raises: + ValueError: if dtype is not int32 or int64. + """ + if dtype_hint is None: + dtype_hint = dtypes.int64 + if (isinstance(partition, np.ndarray) and + partition.dtype == np.int32 and dtype is None): + partition = ops.convert_to_tensor(partition, name=name) + else: + partition = tensor_conversion.convert_to_tensor_v2( + partition, dtype_hint=dtype_hint, dtype=dtype, name=name + ) + if partition.dtype not in (dtypes.int32, dtypes.int64): + raise ValueError("%s must have dtype int32 or int64" % name) + + return partition + + def _with_dependencies(self, dependencies): + """Returns a new RowPartition equal to self with control dependencies. + + Specifically, self._row_splits is gated by the given control dependencies. + Used to add sanity checks to the constructors. + + Args: + dependencies: a list of tensors to use as dependencies. + + Returns: + A new RowPartition object. + """ + new_row_splits = control_flow_ops.with_dependencies(dependencies, + self._row_splits) + return RowPartition( + row_splits=new_row_splits, + row_lengths=self._row_lengths, + value_rowids=self._value_rowids, + nrows=self._nrows, + uniform_row_length=self._uniform_row_length, + internal=_row_partition_factory_key) + + # ============================================================================= + # Accessors + # ============================================================================= + + @property + def dtype(self): + """The `DType` used to encode the row partition (either int32 or int64).""" + return self._row_splits.dtype + + def row_splits(self): + """Returns the row-split indices for this row partition. + + `row_splits` specifies where the values for each row begin and end. + In particular, the values for row `i` are stored in the slice + `values[row_splits[i]:row_splits[i+1]]`. + + Returns: + A 1-D integer `Tensor` with shape `[self.nrows+1]`. + The returned tensor is non-empty, and is sorted in ascending order. + `self.row_splits()[0] == 0`. + `self.row_splits()[-1] == self.nvals()`. + """ + return self._row_splits + + def value_rowids(self): + """Returns the row indices for this row partition. + + `value_rowids` specifies the row index fo reach value. In particular, + `value_rowids[i]` is the row index for `values[i]`. + + Returns: + A 1-D integer `Tensor` with shape `[self.nvals()]`. + The returned tensor is nonnegative, and is sorted in ascending order. + """ + if self._value_rowids is not None: + return self._value_rowids + return segment_id_ops.row_splits_to_segment_ids(self._row_splits) + + def nvals(self): + """Returns the number of values partitioned by this `RowPartition`. + + If the sequence partitioned by this `RowPartition` is a tensor, then + `nvals` is the size of that tensor's outermost dimension -- i.e., + `nvals == values.shape[0]`. + + Returns: + scalar integer Tensor + """ + # TODO(martinz): Uncomment these lines. + # if self._nvals is not None: + # return self._nvals + return self._row_splits[-1] + + def nrows(self): + """Returns the number of rows created by this `RowPartition`. + + Returns: + scalar integer Tensor + """ + if self._nrows is not None: + return self._nrows + nsplits = tensor_shape.dimension_at_index(self._row_splits.shape, 0) + if nsplits.value is None: + return array_ops.shape(self._row_splits, out_type=self.dtype)[0] - 1 + else: + return constant_op.constant(nsplits.value - 1, dtype=self.dtype) + + def uniform_row_length(self): + """Returns the length of each row in this partition, if rows are uniform. + + If all rows in this `RowPartition` have the same length, then this returns + that length as a scalar integer `Tensor`. Otherwise, it returns `None`. + + Returns: + scalar Tensor with `type=self.dtype`, or `None`. + """ + return self._uniform_row_length + + def row_starts(self): + """Returns the start indices for rows in this row partition. + + These indices specify where the values for each row begin. + `partition.row_starts()` is equal to `partition.row_splits()[:-1]`. + + Returns: + A 1-D integer Tensor with shape `[self.nrows()]`. + The returned tensor is nonnegative, and is sorted in ascending order. + `self.row_starts()[0] == 0`. + `self.row_starts()[-1] <= self.nvals()`. + """ + return self._row_splits[:-1] + + def row_limits(self): + """Returns the limit indices for rows in this row partition. + + These indices specify where the values for each row end. + `partition.row_limits()` is equal to `partition.row_splits()[:-1]`. + + Returns: + A 1-D integer Tensor with shape `[self.nrows]`. + The returned tensor is nonnegative, and is sorted in ascending order. + `self.row_limits()[-1] == self.nvals()`. + """ + return self._row_splits[1:] + + def row_lengths(self): + """Returns the lengths of rows in this `RowPartition`. + + Returns: + A 1-D integer Tensor with shape `[self.nrows]`. + The returned tensor is nonnegative. + `tf.reduce_sum(self.row_lengths) == self.nvals()`. + """ + if self._row_lengths is not None: + return self._row_lengths + splits = self._row_splits + return splits[1:] - splits[:-1] + + @property + def static_nrows(self): + """The number of rows in this partition, if statically known. + + ```python + self.row_lengths().shape == [self.static_nrows] + self.row_starts().shape == [self.static_nrows] + self.row_limits().shape == [self.static_nrows] + self.row_splits().shape == [self.static_nrows + 1] + ``` + + Returns: + The number of rows in this partition as an `int` (if statically known); + or `None` (otherwise). + """ + if self._row_splits is not None: + nrows_plus_one = tensor_shape.dimension_value(self._row_splits.shape[0]) + if nrows_plus_one is not None: + return nrows_plus_one - 1 + if self._row_lengths is not None: + nrows = tensor_shape.dimension_value(self._row_lengths.shape[0]) + if nrows is not None: + return nrows + if self._nrows is not None: + return tensor_util.constant_value(self._nrows) + return None + + @property + def static_nvals(self): + """The number of values in this partition, if statically known. + + ```python + self.value_rowids().shape == [self.static_vals] + ``` + + Returns: + The number of values in this partition as an `int` (if statically known); + or `None` (otherwise). + """ + if self._nvals is not None: + nvals = tensor_util.constant_value(self._nvals) + if nvals is not None: + return nvals + if self._value_rowids is not None: + nvals = tensor_shape.dimension_at_index(self._value_rowids.shape, 0) + if nvals.value is not None: + return nvals.value + return None + + @property + def static_uniform_row_length(self): + """The number of values in each row of this partition, if statically known. + + Returns: + The number of values in each row of this partition as an `int` (if + statically known); or `None` (otherwise). + """ + if self._uniform_row_length is not None: + return tensor_util.constant_value(self._uniform_row_length) + return None + + def offsets_in_rows(self): + """Return the offset of each value. + + RowPartition takes an array x and converts it into sublists. + offsets[i] is the index of x[i] in its sublist. + Given a shape, such as: + [*,*,*],[*,*],[],[*,*] + This returns: + 0,1,2,0,1,0,1 + + Returns: + an offset for every value. + """ + return gen_ragged_math_ops.ragged_range( + starts=constant_op.constant(0, self.dtype), + limits=self.row_lengths(), + deltas=constant_op.constant(1, self.dtype)).rt_dense_values + + def is_uniform(self): + """Returns true if the partition is known to be uniform statically. + + This is based upon the existence of self._uniform_row_length. For example: + RowPartition.from_row_lengths([3,3,3]).is_uniform()==false + RowPartition.from_uniform_row_length(5, nvals=20).is_uniform()==true + RowPartition.from_row_lengths([2,0,2]).is_uniform()==false + + Returns: + Whether a RowPartition is known to be uniform statically. + """ + return self._uniform_row_length is not None + + def _static_check(self): + """Checks if the object is internally consistent. + + Raises: + ValueError if inconsistent. + """ + my_dtype = self.dtype + if self._uniform_row_length is not None: + if self._uniform_row_length.dtype != my_dtype: + raise ValueError("_uniform_row_length.dtype=" + + str(self._uniform_row_length.dtype) + ", not " + + str(my_dtype)) + + if self._row_lengths is not None and self._row_lengths.dtype != my_dtype: + raise ValueError("_row_lengths.dtype=" + str(self._row_lengths.dtype) + + ", not " + str(my_dtype)) + + if self._value_rowids is not None and self._value_rowids.dtype != my_dtype: + raise ValueError("_value_rowids.dtype=" + str(self._value_rowids.dtype) + + ", not " + str(my_dtype)) + + if self._nrows is not None and self._nrows.dtype != my_dtype: + raise ValueError("_nrows.dtype=" + str(self._nrows.dtype) + ", not " + + str(my_dtype)) + + # ============================================================================= + # Transformation + # ============================================================================= + + def with_dtype(self, dtype): + """Returns a copy of this RowPartition with the given encoding dtype. + + Args: + dtype: The dtype for encoding tensors, such as `row_splits` and `nrows`. + One of `tf.int32` or `tf.int64`. + + Returns: + A copy of this RowPartition, with the encoding tensors cast to the given + type. + """ + dtype = dtypes.as_dtype(dtype) + if dtype not in (dtypes.int32, dtypes.int64): + raise ValueError("dtype must be int32 or int64") + if self.dtype == dtype: + return self + + return RowPartition( + row_splits=_cast_if_not_none(self._row_splits, dtype), + row_lengths=_cast_if_not_none(self._row_lengths, dtype), + value_rowids=_cast_if_not_none(self._value_rowids, dtype), + nrows=_cast_if_not_none(self._nrows, dtype), + uniform_row_length=_cast_if_not_none(self._uniform_row_length, dtype), + internal=_row_partition_factory_key) + + # ============================================================================= + # String Encoding + # ============================================================================= + + def __repr__(self): + if self._uniform_row_length is not None: + return (f"tf.RowPartition(nrows={self._nrows}, " + f"uniform_row_length={self._uniform_row_length})") + else: + return f"tf.RowPartition(row_splits={self._row_splits})" + + # ============================================================================= + # Precomputed Encodings + # ============================================================================= + + def _has_precomputed_row_splits(self): + """Returns true if `row_splits` has already been computed. + + If true, then `self.row_splits()` will return its value without calling + any TensorFlow ops. + """ + return self._row_splits is not None + + def _has_precomputed_row_lengths(self): + """Returns true if `row_lengths` has already been computed. + + If true, then `self.row_lengths()` will return its value without calling + any TensorFlow ops. + """ + return self._row_lengths is not None + + def _has_precomputed_value_rowids(self): + """Returns true if `value_rowids` has already been computed. + + If true, then `self.value_rowids()` will return its value without calling + any TensorFlow ops. + """ + return self._value_rowids is not None + + def _has_precomputed_nrows(self): + """Returns true if `nrows` has already been computed. + + If true, then `self.nrows()` will return its value without calling + any TensorFlow ops. + """ + return self._nrows is not None + + def _has_precomputed_nvals(self): + """Returns true if `nvals` has already been computed. + + If true, then `self.nvals()` will return its value without calling + any TensorFlow ops. + """ + return self._nvals is not None + + def _with_precomputed_row_splits(self): + """Returns a copy of `self` with `row_splits` precomputed.""" + return RowPartition( + row_splits=self.row_splits(), + row_lengths=self._row_lengths, + value_rowids=self._value_rowids, + nrows=self._nrows, + uniform_row_length=self._uniform_row_length, + nvals=self._nvals, + internal=_row_partition_factory_key) + + def _with_precomputed_row_lengths(self): + """Returns a copy of `self` with `row_lengths` precomputed.""" + return RowPartition( + row_splits=self._row_splits, + row_lengths=self.row_lengths(), + value_rowids=self._value_rowids, + nrows=self._nrows, + nvals=self._nvals, + uniform_row_length=self._uniform_row_length, + internal=_row_partition_factory_key) + + def _with_precomputed_value_rowids(self): + """Returns a copy of `self` with `value_rowids` precomputed.""" + return RowPartition( + row_splits=self._row_splits, + row_lengths=self._row_lengths, + value_rowids=self.value_rowids(), + nrows=self._nrows, + nvals=self._nvals, + uniform_row_length=self._uniform_row_length, + internal=_row_partition_factory_key) + + def _with_precomputed_nrows(self): + """Returns a copy of `self` with `nrows` precomputed.""" + return RowPartition( + row_splits=self._row_splits, + row_lengths=self._row_lengths, + value_rowids=self._value_rowids, + nrows=self.nrows(), + nvals=self._nvals, + uniform_row_length=self._uniform_row_length, + internal=_row_partition_factory_key) + + def _with_precomputed_nvals(self): + """Returns a copy of `self` with `row_splits` precomputed.""" + return RowPartition( + row_splits=self.row_splits(), + row_lengths=self._row_lengths, + value_rowids=self._value_rowids, + nrows=self._nrows, + nvals=self.nvals(), + uniform_row_length=self._uniform_row_length, + internal=_row_partition_factory_key) + + def _merge_with_spec(self, b): + """Merge with a TypeSpec to create a new RowPartition.""" + a_spec = self._type_spec + if not a_spec.is_compatible_with(b): + # TODO(martinz): Should a dynamic check be used here? + raise ValueError("RowPartition and RowPartitionSpec are not compatible") + nrows = constant_op.constant( + b.nrows, self.dtype) if b.nrows is not None else self._nrows + nvals = constant_op.constant( + b.nvals, self.dtype) if b.nvals is not None else self._nvals + uniform_row_length = constant_op.constant( + b.uniform_row_length, self.dtype + ) if b.uniform_row_length is not None else self._uniform_row_length + return RowPartition( + row_splits=self._row_splits, + row_lengths=self._row_lengths, + value_rowids=self._value_rowids, + nvals=nvals, + uniform_row_length=uniform_row_length, + nrows=nrows, + internal=_row_partition_factory_key) + + def _merge_precomputed_encodings(self, other, validate=True): + """Returns a RowPartition that merges encodings from `self` and `other`. + + Requires that `self` and `other` describe the same partition. + + Args: + other: A `RowPartition` that encodes the same partition as `self`. + validate: If true, then add runtime checks to verify that `self` and + `other` encode the same row partition. + + Returns: + A `RowPartition`. + """ + # pylint: disable=protected-access + if (self is other or # Fast path if row partitions are equal. + (self._row_splits is other._row_splits and + self._row_lengths is other._row_lengths and + self._value_rowids is other._value_rowids and + self._nrows is other._nrows and + self._nvals is other._nvals and + self._uniform_row_length is other._uniform_row_length)): + return self + + # Merge the component tensors. We only need to validate one encoding. + # We merge less-expensive encodings first (to avoid expensive validation). + nrows, nrows_validated = _merge_tensors(self._nrows, other._nrows, "nrows", + validate) + nvals, _ = _merge_tensors(self._nvals, other._nvals, "nvals", validate) + uniform_row_length, uniform_row_length_validated = _merge_tensors( + self._uniform_row_length, other._uniform_row_length, + "uniform_row_length", validate) + if uniform_row_length_validated and nrows_validated: + validate = False # Validation complete. + row_splits, row_splits_validated = _merge_tensors(self._row_splits, + other._row_splits, + "row_splits", validate) + if row_splits_validated: + validate = False # Validation complete. + row_lengths, row_lengths_validated = _merge_tensors(self._row_lengths, + other._row_lengths, + "row_lengths", validate) + if row_lengths_validated: + validate = False # Validation complete. + value_rowids, value_rowids_validated = _merge_tensors( + self._value_rowids, other._value_rowids, "value_rowids", validate) + if value_rowids_validated and nrows_validated: + validate = False # Validation complete. + # TODO(edloper): If we make the row_splits encoding optional, then there + # will be cases where we need to do validation at this point -- e.g. if + # self has only row_splits and other has only value_rowids. But for + # now, we are guaranteed to have done validation by this point. + + # Avoid creating new RowPartition objects if we don't need to. + if (row_splits is self._row_splits and row_lengths is self._row_lengths and + value_rowids is self._value_rowids and nrows is self._nrows and + uniform_row_length is self._uniform_row_length): + return self + if (row_splits is other._row_splits and + row_lengths is other._row_lengths and + value_rowids is other._value_rowids and nrows is other._nrows and + uniform_row_length is other._uniform_row_length): + return other + + return RowPartition( + row_splits=row_splits, + row_lengths=row_lengths, + value_rowids=value_rowids, + nrows=nrows, + uniform_row_length=uniform_row_length, + nvals=nvals, + internal=_row_partition_factory_key) + + # ============================================================================= + # Composite Tensor + # ============================================================================= + + @property + def _type_spec(self): + return RowPartitionSpec.from_value(self) + + +# =============================================================================== +# RowPartitionSpec +# =============================================================================== +# TODO(edloper): Consider refactoring RowPartitionSpec to allow any combination +# of precomputed row-partition encodings (rather than always using row_splits). + + +@type_spec_registry.register("tf.RowPartitionSpec") +class RowPartitionSpec(type_spec.TypeSpec): + """Type specification for a `tf.RowPartition`.""" + + __slots__ = ["_nrows", "_nvals", "_uniform_row_length", "_dtype"] + + value_type = property(lambda self: RowPartition) + + def __init__(self, + nrows=None, + nvals=None, + uniform_row_length=None, + dtype=dtypes.int64): + """Constructs a new RowPartitionSpec. + + Args: + nrows: The number of rows in the RowPartition, or `None` if unspecified. + nvals: The number of values partitioned by the RowPartition, or `None` if + unspecified. + uniform_row_length: The number of values in each row for this + RowPartition, or `None` if rows are ragged or row length is unspecified. + dtype: The data type used to encode the partition. One of `tf.int64` or + `tf.int32`. + """ + # Wrap dimension sizes in 1D TensorShapes so the default implementations + # of TypeSpec methods such as `is_compatile_with` will work. + nrows = tensor_shape.TensorShape([nrows]) + nvals = tensor_shape.TensorShape([nvals]) + if not isinstance(uniform_row_length, tensor_shape.TensorShape): + uniform_row_length = tensor_shape.TensorShape([uniform_row_length]) + else: + uniform_row_length = uniform_row_length.with_rank(1) + + self._nrows = nrows + self._nvals = nvals + self._uniform_row_length = uniform_row_length + self._dtype = dtypes.as_dtype(dtype) + if self._dtype not in (dtypes.int32, dtypes.int64): + raise ValueError("dtype must be tf.int32 or tf.int64") + + # Check dimension consistency, & infer dimensions when possible. + nrows = tensor_shape.dimension_value(nrows[0]) + nvals = tensor_shape.dimension_value(nvals[0]) + ncols = tensor_shape.dimension_value(uniform_row_length[0]) + if nrows == 0: # no rows -> no values. + if nvals is None: + self._nvals = tensor_shape.TensorShape([0]) + elif nvals != 0: + raise ValueError("nvals=%s is not compatible with nrows=%s" % + (nvals, nrows)) + if ncols == 0: # there are no values in each row -> no values. + if nvals is None: + self._nvals = tensor_shape.TensorShape([0]) + elif nvals != 0: + raise ValueError("nvals=%s is not compatible with uniform_row_length" + "=%s" % (nvals, uniform_row_length)) + if ncols is not None and nvals is not None: + if ncols != 0 and nvals % ncols != 0: + raise ValueError("nvals=%s is not compatible with uniform_row_length" + "=%s (doesn't divide evenly)" % (nvals, ncols)) + if nrows is not None and nvals != ncols * nrows: + raise ValueError("nvals=%s is not compatible with nrows=%s and " + "uniform_row_length=%s" % (nvals, nrows, ncols)) + if nrows is None and ncols != 0: + self._nrows = tensor_shape.TensorShape([nvals // ncols]) + if ncols is not None and nrows is not None and nvals is None: + self._nvals = tensor_shape.TensorShape([ncols * nrows]) + + def is_compatible_with(self, other): + if not super(RowPartitionSpec, self).is_compatible_with(other): + return False + nrows = self._nrows.merge_with(other.nrows) + nvals = self._nvals.merge_with(other.nvals) + ncols = self._uniform_row_length.merge_with(other.uniform_row_length) + return self._dimensions_compatible(nrows, nvals, ncols) + + def _serialize(self): + return (self._nrows, self._nvals, self._uniform_row_length, self._dtype) + + @classmethod + def _deserialize(cls, serialization): + # Remove TensorShape wrappers from serialization. + (nrows, nvals, uniform_row_length, dtype) = serialization + nrows = tensor_shape.dimension_value(nrows[0]) + nvals = tensor_shape.dimension_value(nvals[0]) + return cls(nrows, nvals, uniform_row_length, dtype) + + @property + def nrows(self): + return tensor_shape.dimension_value(self._nrows[0]) + + @property + def nvals(self): + return tensor_shape.dimension_value(self._nvals[0]) + + @property + def uniform_row_length(self): + return tensor_shape.dimension_value(self._uniform_row_length[0]) + + @property + def dtype(self): + return self._dtype + + @property + def _component_specs(self): + row_splits_shape = tensor_shape.TensorShape( + [tensor_shape.dimension_at_index(self._nrows, 0) + 1]) + return tensor_lib.TensorSpec(row_splits_shape, self._dtype) + + def _to_components(self, value): + return value.row_splits() + + def _from_components(self, tensor): + return RowPartition.from_row_splits(tensor, validate=False) + + @classmethod + def from_value(cls, value): + if not isinstance(value, RowPartition): + raise TypeError("Expected `value` to be a `RowPartition`") + return cls(value.static_nrows, value.static_nvals, + value.static_uniform_row_length, value.dtype) + + def __repr__(self): + return ("RowPartitionSpec(nrows=%s, nvals=%s, uniform_row_length=%s, " + "dtype=%r)" % (self.nrows, self.nvals, self.uniform_row_length, + self.dtype)) + + @staticmethod + def _dimensions_compatible(nrows, nvals, uniform_row_length): + """Returns true if the given dimensions are compatible.""" + nrows = tensor_shape.dimension_value(nrows[0]) + nvals = tensor_shape.dimension_value(nvals[0]) + ncols = tensor_shape.dimension_value(uniform_row_length[0]) + if nrows == 0 and nvals not in (0, None): + return False # can't have values if we have no rows. + if ncols == 0 and nvals not in (0, None): + return False # can't have values if we have no values in each row. + if ncols is not None and nvals is not None: + if ncols != 0 and nvals % ncols != 0: + return False # rows aren't uniform. + if nrows is not None and nvals != ncols * nrows: + return False # inconsistent number of values. + return True + + def _merge_with(self, other): + """Merge two RowPartitionSpecs.""" + nrows = self._nrows.merge_with(other.nrows) + nvals = self._nvals.merge_with(other.nvals) + ncols = self._uniform_row_length.merge_with(other.uniform_row_length) + + if not RowPartitionSpec._dimensions_compatible(nrows, nvals, ncols): + raise ValueError("Merging incompatible RowPartitionSpecs") + + # NOTE: if the dtypes are unequal, behavior is unspecified. + if self.dtype != other.dtype: + raise ValueError("Merging RowPartitionSpecs with incompatible dtypes") + + return RowPartitionSpec(nrows=nrows[0], + nvals=nvals[0], + uniform_row_length=ncols[0], + dtype=self.dtype) + + def with_dtype(self, dtype): + nrows = tensor_shape.dimension_value(self._nrows[0]) + nvals = tensor_shape.dimension_value(self._nvals[0]) + return RowPartitionSpec(nrows, nvals, self._uniform_row_length, dtype) + + def __deepcopy__(self, memo): + del memo + dtype = self.dtype + nrows = tensor_shape.dimension_value(self._nrows[0]) + nvals = tensor_shape.dimension_value(self._nvals[0]) + uniform_row_length = (None if self._uniform_row_length is None else + tensor_shape.dimension_value( + self._uniform_row_length[0])) + return RowPartitionSpec(nrows, nvals, uniform_row_length, dtype) + + +nested_structure_coder.register_codec( + nested_structure_coder.BuiltInTypeSpecCodec( + RowPartitionSpec, struct_pb2.TypeSpecProto.ROW_PARTITION_SPEC + ) +) + + +# =============================================================================== +# Helper Functions +# =============================================================================== + + +def _assert_monotonic_increasing(tensor, message=None): + return check_ops.assert_non_negative( + tensor[1:] - tensor[:-1], message=message) + + +def _assert_zero(tensor, message=None): + return check_ops.assert_equal( + tensor, constant_op.constant(0, dtype=tensor.dtype), message=message) + + +def _cast_if_not_none(tensor, dtype): + return None if tensor is None else math_ops.cast(tensor, dtype) + + +def _merge_tensors(t1, t2, name, validate): + """Merge two optional Tensors with equal values into a single Tensor. + + Args: + t1: tf.Tensor or None + t2: tf.Tensor or None + name: A name for the tensors (for error messages) + validate: If true, then check that `t1` is compatible with `t2` (if both are + non-None). + + Returns: + A pair `(merged_value, validated)`: + * `merged_value` is `t1` if it is not None; or `t2` otherwise. + * `validated` is true if we validated that t1 and t2 are equal (either + by adding a check, or because t1 is t2). + """ + if t1 is None: + return t2, False + elif t2 is None: + return t1, False + elif t1 is t2: + return t1, True + else: + err_msg = ("RowPartition._merge_precomputed_encodings: partitions " + "have incompatible %s" % name) + if not t1.shape.is_compatible_with(t2.shape): + raise ValueError(err_msg) + if validate: + checks = [check_ops.assert_equal(t1, t2, message=err_msg)] + return control_flow_ops.with_dependencies(checks, t1), True + else: + return t1, False + +_row_partition_factory_key = object() # unique private object + + +def _get_dtype_or_none(value): + if isinstance(value, tensor_lib.Tensor): + return value.dtype + return None + + +def _get_target_dtype(values, dtype=None, dtype_hint=None): + """Gets the target dtype of a family of values.""" + if dtype is not None: + return dtype + + for value in values: + if isinstance(value, tensor_lib.Tensor): + return value.dtype + + for value in values: + if isinstance(value, np.ndarray): + return dtypes.as_dtype(value.dtype) + + if dtype_hint is not None: + return dtype_hint + + return dtypes.int64 + + +def _convert_all_to_tensors(values, dtype=None, dtype_hint=None): + """Convert a list of objects to tensors of the same dtype.""" + target_dtype = _get_target_dtype([x for (x, _) in values], dtype, dtype_hint) + + # If dtype is None, we use convert behavior. + # If dtype is not None, we use cast behavior. + convert_behavior = dtype is None + + if convert_behavior: + return [ + None if x is None else ops.convert_to_tensor( + x, dtype=target_dtype, name=name) for (x, name) in values + ] + else: + return [ + None if x is None else math_ops.cast(x, dtype=target_dtype, name=name) + for (x, name) in values + ] diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/segment_id_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/segment_id_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..52489678f54c42d6a560c5339494aa6b732568ed --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/segment_id_ops.py @@ -0,0 +1,134 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Ops for converting between row_splits and segment_ids.""" + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.ragged import ragged_util +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + + +# For background on "segments" and "segment ids", see: +# https://www.tensorflow.org/api_docs/python/tf/math#Segmentation +@tf_export("ragged.row_splits_to_segment_ids") +@dispatch.add_dispatch_support +def row_splits_to_segment_ids(splits, name=None, out_type=None): + """Generates the segmentation corresponding to a RaggedTensor `row_splits`. + + Returns an integer vector `segment_ids`, where `segment_ids[i] == j` if + `splits[j] <= i < splits[j+1]`. Example: + + >>> print(tf.ragged.row_splits_to_segment_ids([0, 3, 3, 5, 6, 9])) + tf.Tensor([0 0 0 2 2 3 4 4 4], shape=(9,), dtype=int64) + + Args: + splits: A sorted 1-D integer Tensor. `splits[0]` must be zero. + name: A name prefix for the returned tensor (optional). + out_type: The dtype for the return value. Defaults to `splits.dtype`, + or `tf.int64` if `splits` does not have a dtype. + + Returns: + A sorted 1-D integer Tensor, with `shape=[splits[-1]]` + + Raises: + ValueError: If `splits` is invalid. + """ + with ops.name_scope(name, "RaggedSplitsToSegmentIds", [splits]) as name: + splits = ops.convert_to_tensor( + splits, name="splits", + preferred_dtype=dtypes.int64) + if splits.dtype not in (dtypes.int32, dtypes.int64): + raise ValueError("splits must have dtype int32 or int64") + splits.shape.assert_has_rank(1) + if tensor_shape.dimension_value(splits.shape[0]) == 0: + raise ValueError("Invalid row_splits: []") + if out_type is None: + out_type = splits.dtype + else: + out_type = dtypes.as_dtype(out_type) + row_lengths = splits[1:] - splits[:-1] + nrows = array_ops.shape(splits, out_type=out_type)[-1] - 1 + indices = math_ops.range(nrows) + return ragged_util.repeat(indices, repeats=row_lengths, axis=0) + + +# For background on "segments" and "segment ids", see: +# https://www.tensorflow.org/api_docs/python/tf/math#Segmentation +@tf_export("ragged.segment_ids_to_row_splits") +@dispatch.add_dispatch_support +def segment_ids_to_row_splits(segment_ids, num_segments=None, + out_type=None, name=None): + """Generates the RaggedTensor `row_splits` corresponding to a segmentation. + + Returns an integer vector `splits`, where `splits[0] = 0` and + `splits[i] = splits[i-1] + count(segment_ids==i)`. Example: + + >>> print(tf.ragged.segment_ids_to_row_splits([0, 0, 0, 2, 2, 3, 4, 4, 4])) + tf.Tensor([0 3 3 5 6 9], shape=(6,), dtype=int64) + + Args: + segment_ids: A 1-D integer Tensor. + num_segments: A scalar integer indicating the number of segments. Defaults + to `max(segment_ids) + 1` (or zero if `segment_ids` is empty). + out_type: The dtype for the return value. Defaults to `segment_ids.dtype`, + or `tf.int64` if `segment_ids` does not have a dtype. + name: A name prefix for the returned tensor (optional). + + Returns: + A sorted 1-D integer Tensor, with `shape=[num_segments + 1]`. + """ + # Local import bincount_ops to avoid import-cycle. + from tensorflow.python.ops import bincount_ops # pylint: disable=g-import-not-at-top + if out_type is None: + if isinstance(segment_ids, tensor.Tensor): + out_type = segment_ids.dtype + elif isinstance(num_segments, tensor.Tensor): + out_type = num_segments.dtype + else: + out_type = dtypes.int64 + else: + out_type = dtypes.as_dtype(out_type) + with ops.name_scope(name, "SegmentIdsToRaggedSplits", [segment_ids]) as name: + # Note: we cast int64 tensors to int32, since bincount currently only + # supports int32 inputs. + segment_ids = ragged_util.convert_to_int_tensor(segment_ids, "segment_ids", + dtype=dtypes.int32) + segment_ids.shape.assert_has_rank(1) + if num_segments is not None: + num_segments = ragged_util.convert_to_int_tensor(num_segments, + "num_segments", + dtype=dtypes.int32) + num_segments.shape.assert_has_rank(0) + + row_lengths = bincount_ops.bincount( + segment_ids, + minlength=num_segments, + maxlength=num_segments, + dtype=out_type) + splits = array_ops.concat([[0], math_ops.cumsum(row_lengths)], axis=0) + + # Update shape information, if possible. + if num_segments is not None: + const_num_segments = tensor_util.constant_value(num_segments) + if const_num_segments is not None: + splits.set_shape(tensor_shape.TensorShape([const_num_segments + 1])) + + return splits diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_checkpoint_reader.so b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_checkpoint_reader.so new file mode 100644 index 0000000000000000000000000000000000000000..1f2df06ed202cc7f7263e03f328d7c5ef2630abd --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_checkpoint_reader.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f712cbaf9c3e05878f8ef1efd3dbf50a2f42acf09eac9dec8d53dd838204998d +size 324872 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_kernel_registry.so b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_kernel_registry.so new file mode 100644 index 0000000000000000000000000000000000000000..0e73a26eb0a6eeff3fc5951a0f4db9aa16341d59 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_kernel_registry.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40009b895747648e93b5a8eff29f3c3aa86fdfc230c1f979559c53abd9c13a34 +size 203768 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_util_port.so b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_util_port.so new file mode 100644 index 0000000000000000000000000000000000000000..cda36ca3cbdda60eed4194ea8b30075f309debe3 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_util_port.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db4befc28ecb3d24887a0aa3afa96491a3ddeac7a865d11ce5e493cf04677143 +size 274504