Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- parrot/lib/python3.10/site-packages/decord.libs/libavfilter-1e2243e2.so.7.40.101 +3 -0
- parrot/lib/python3.10/site-packages/scipy/ndimage/_cytest.cpython-310-x86_64-linux-gnu.so +0 -0
- parrot/lib/python3.10/site-packages/scipy/ndimage/_measurements.py +1680 -0
- parrot/lib/python3.10/site-packages/scipy/ndimage/_morphology.py +2537 -0
- parrot/lib/python3.10/site-packages/scipy/ndimage/_ni_support.py +119 -0
- parrot/lib/python3.10/site-packages/scipy/ndimage/filters.py +27 -0
- parrot/lib/python3.10/site-packages/scipy/ndimage/measurements.py +24 -0
- parrot/lib/python3.10/site-packages/scipy/ndimage/morphology.py +27 -0
- parrot/lib/python3.10/site-packages/scipy/ndimage/tests/test_interpolation.py +1327 -0
- parrot/lib/python3.10/site-packages/scipy/ndimage/tests/test_measurements.py +1419 -0
- parrot/lib/python3.10/site-packages/scipy/odr/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/scipy/odr/__pycache__/odrpack.cpython-310.pyc +0 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_req.h +29 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rpc_with_autograd.h +98 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rref_backward_req.h +39 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Backend.hpp +383 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Functional.hpp +12 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/GroupRegistry.hpp +14 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/HashStore.hpp +61 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ParamCommsUtils.hpp +139 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PrefixStore.hpp +64 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp +918 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupRoundRobin.hpp +113 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupUCC.hpp +353 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupWrapper.hpp +140 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/RankLocal.hpp +73 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TCPStore.hpp +161 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TCPStoreBackend.hpp +77 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TraceUtils.h +543 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Types.hpp +180 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCTracing.hpp +58 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCUtils.hpp +187 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UnixSockUtils.hpp +27 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/WinSockUtils.hpp +27 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Work.hpp +161 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/c10d.h +13 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/comm.hpp +140 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/default_comm_hooks.hpp +52 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/exception.h +33 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/logger.hpp +104 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/logging.h +51 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/python_comm_hook.h +34 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer.hpp +589 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer_timer.hpp +81 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/sequence_num.hpp +65 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/socket.h +93 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/py_rref.h +84 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_call.h +32 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_remote_call.h +49 -0
.gitattributes
CHANGED
|
@@ -1695,3 +1695,4 @@ vllm/lib/python3.10/site-packages/sympy/solvers/diophantine/__pycache__/diophant
|
|
| 1695 |
parrot/lib/python3.10/site-packages/scipy/linalg/_solve_toeplitz.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1696 |
vllm/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1697 |
vllm/lib/python3.10/site-packages/sympy/solvers/ode/__pycache__/ode.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 1695 |
parrot/lib/python3.10/site-packages/scipy/linalg/_solve_toeplitz.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1696 |
vllm/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1697 |
vllm/lib/python3.10/site-packages/sympy/solvers/ode/__pycache__/ode.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1698 |
+
parrot/lib/python3.10/site-packages/decord.libs/libavfilter-1e2243e2.so.7.40.101 filter=lfs diff=lfs merge=lfs -text
|
parrot/lib/python3.10/site-packages/decord.libs/libavfilter-1e2243e2.so.7.40.101
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:79b37a526b50d6ebcd2255983198276718c29c0942d1fde96306e413041e01cb
|
| 3 |
+
size 3075448
|
parrot/lib/python3.10/site-packages/scipy/ndimage/_cytest.cpython-310-x86_64-linux-gnu.so
ADDED
|
Binary file (91 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/scipy/ndimage/_measurements.py
ADDED
|
@@ -0,0 +1,1680 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2003-2005 Peter J. Verveer
|
| 2 |
+
#
|
| 3 |
+
# Redistribution and use in source and binary forms, with or without
|
| 4 |
+
# modification, are permitted provided that the following conditions
|
| 5 |
+
# are met:
|
| 6 |
+
#
|
| 7 |
+
# 1. Redistributions of source code must retain the above copyright
|
| 8 |
+
# notice, this list of conditions and the following disclaimer.
|
| 9 |
+
#
|
| 10 |
+
# 2. Redistributions in binary form must reproduce the above
|
| 11 |
+
# copyright notice, this list of conditions and the following
|
| 12 |
+
# disclaimer in the documentation and/or other materials provided
|
| 13 |
+
# with the distribution.
|
| 14 |
+
#
|
| 15 |
+
# 3. The name of the author may not be used to endorse or promote
|
| 16 |
+
# products derived from this software without specific prior
|
| 17 |
+
# written permission.
|
| 18 |
+
#
|
| 19 |
+
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
|
| 20 |
+
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
| 21 |
+
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
| 22 |
+
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
|
| 23 |
+
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 24 |
+
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
|
| 25 |
+
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
| 26 |
+
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
| 27 |
+
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
| 28 |
+
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
| 29 |
+
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 30 |
+
|
| 31 |
+
import numpy as np
|
| 32 |
+
from . import _ni_support
|
| 33 |
+
from . import _ni_label
|
| 34 |
+
from . import _nd_image
|
| 35 |
+
from . import _morphology
|
| 36 |
+
|
| 37 |
+
__all__ = ['label', 'find_objects', 'labeled_comprehension', 'sum', 'mean',
|
| 38 |
+
'variance', 'standard_deviation', 'minimum', 'maximum', 'median',
|
| 39 |
+
'minimum_position', 'maximum_position', 'extrema', 'center_of_mass',
|
| 40 |
+
'histogram', 'watershed_ift', 'sum_labels', 'value_indices']
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def label(input, structure=None, output=None):
|
| 44 |
+
"""
|
| 45 |
+
Label features in an array.
|
| 46 |
+
|
| 47 |
+
Parameters
|
| 48 |
+
----------
|
| 49 |
+
input : array_like
|
| 50 |
+
An array-like object to be labeled. Any non-zero values in `input` are
|
| 51 |
+
counted as features and zero values are considered the background.
|
| 52 |
+
structure : array_like, optional
|
| 53 |
+
A structuring element that defines feature connections.
|
| 54 |
+
`structure` must be centrosymmetric
|
| 55 |
+
(see Notes).
|
| 56 |
+
If no structuring element is provided,
|
| 57 |
+
one is automatically generated with a squared connectivity equal to
|
| 58 |
+
one. That is, for a 2-D `input` array, the default structuring element
|
| 59 |
+
is::
|
| 60 |
+
|
| 61 |
+
[[0,1,0],
|
| 62 |
+
[1,1,1],
|
| 63 |
+
[0,1,0]]
|
| 64 |
+
|
| 65 |
+
output : (None, data-type, array_like), optional
|
| 66 |
+
If `output` is a data type, it specifies the type of the resulting
|
| 67 |
+
labeled feature array.
|
| 68 |
+
If `output` is an array-like object, then `output` will be updated
|
| 69 |
+
with the labeled features from this function. This function can
|
| 70 |
+
operate in-place, by passing output=input.
|
| 71 |
+
Note that the output must be able to store the largest label, or this
|
| 72 |
+
function will raise an Exception.
|
| 73 |
+
|
| 74 |
+
Returns
|
| 75 |
+
-------
|
| 76 |
+
label : ndarray or int
|
| 77 |
+
An integer ndarray where each unique feature in `input` has a unique
|
| 78 |
+
label in the returned array.
|
| 79 |
+
num_features : int
|
| 80 |
+
How many objects were found.
|
| 81 |
+
|
| 82 |
+
If `output` is None, this function returns a tuple of
|
| 83 |
+
(`labeled_array`, `num_features`).
|
| 84 |
+
|
| 85 |
+
If `output` is a ndarray, then it will be updated with values in
|
| 86 |
+
`labeled_array` and only `num_features` will be returned by this
|
| 87 |
+
function.
|
| 88 |
+
|
| 89 |
+
See Also
|
| 90 |
+
--------
|
| 91 |
+
find_objects : generate a list of slices for the labeled features (or
|
| 92 |
+
objects); useful for finding features' position or
|
| 93 |
+
dimensions
|
| 94 |
+
|
| 95 |
+
Notes
|
| 96 |
+
-----
|
| 97 |
+
A centrosymmetric matrix is a matrix that is symmetric about the center.
|
| 98 |
+
See [1]_ for more information.
|
| 99 |
+
|
| 100 |
+
The `structure` matrix must be centrosymmetric to ensure
|
| 101 |
+
two-way connections.
|
| 102 |
+
For instance, if the `structure` matrix is not centrosymmetric
|
| 103 |
+
and is defined as::
|
| 104 |
+
|
| 105 |
+
[[0,1,0],
|
| 106 |
+
[1,1,0],
|
| 107 |
+
[0,0,0]]
|
| 108 |
+
|
| 109 |
+
and the `input` is::
|
| 110 |
+
|
| 111 |
+
[[1,2],
|
| 112 |
+
[0,3]]
|
| 113 |
+
|
| 114 |
+
then the structure matrix would indicate the
|
| 115 |
+
entry 2 in the input is connected to 1,
|
| 116 |
+
but 1 is not connected to 2.
|
| 117 |
+
|
| 118 |
+
References
|
| 119 |
+
----------
|
| 120 |
+
.. [1] James R. Weaver, "Centrosymmetric (cross-symmetric)
|
| 121 |
+
matrices, their basic properties, eigenvalues, and
|
| 122 |
+
eigenvectors." The American Mathematical Monthly 92.10
|
| 123 |
+
(1985): 711-717.
|
| 124 |
+
|
| 125 |
+
Examples
|
| 126 |
+
--------
|
| 127 |
+
Create an image with some features, then label it using the default
|
| 128 |
+
(cross-shaped) structuring element:
|
| 129 |
+
|
| 130 |
+
>>> from scipy.ndimage import label, generate_binary_structure
|
| 131 |
+
>>> import numpy as np
|
| 132 |
+
>>> a = np.array([[0,0,1,1,0,0],
|
| 133 |
+
... [0,0,0,1,0,0],
|
| 134 |
+
... [1,1,0,0,1,0],
|
| 135 |
+
... [0,0,0,1,0,0]])
|
| 136 |
+
>>> labeled_array, num_features = label(a)
|
| 137 |
+
|
| 138 |
+
Each of the 4 features are labeled with a different integer:
|
| 139 |
+
|
| 140 |
+
>>> num_features
|
| 141 |
+
4
|
| 142 |
+
>>> labeled_array
|
| 143 |
+
array([[0, 0, 1, 1, 0, 0],
|
| 144 |
+
[0, 0, 0, 1, 0, 0],
|
| 145 |
+
[2, 2, 0, 0, 3, 0],
|
| 146 |
+
[0, 0, 0, 4, 0, 0]])
|
| 147 |
+
|
| 148 |
+
Generate a structuring element that will consider features connected even
|
| 149 |
+
if they touch diagonally:
|
| 150 |
+
|
| 151 |
+
>>> s = generate_binary_structure(2,2)
|
| 152 |
+
|
| 153 |
+
or,
|
| 154 |
+
|
| 155 |
+
>>> s = [[1,1,1],
|
| 156 |
+
... [1,1,1],
|
| 157 |
+
... [1,1,1]]
|
| 158 |
+
|
| 159 |
+
Label the image using the new structuring element:
|
| 160 |
+
|
| 161 |
+
>>> labeled_array, num_features = label(a, structure=s)
|
| 162 |
+
|
| 163 |
+
Show the 2 labeled features (note that features 1, 3, and 4 from above are
|
| 164 |
+
now considered a single feature):
|
| 165 |
+
|
| 166 |
+
>>> num_features
|
| 167 |
+
2
|
| 168 |
+
>>> labeled_array
|
| 169 |
+
array([[0, 0, 1, 1, 0, 0],
|
| 170 |
+
[0, 0, 0, 1, 0, 0],
|
| 171 |
+
[2, 2, 0, 0, 1, 0],
|
| 172 |
+
[0, 0, 0, 1, 0, 0]])
|
| 173 |
+
|
| 174 |
+
"""
|
| 175 |
+
input = np.asarray(input)
|
| 176 |
+
if np.iscomplexobj(input):
|
| 177 |
+
raise TypeError('Complex type not supported')
|
| 178 |
+
if structure is None:
|
| 179 |
+
structure = _morphology.generate_binary_structure(input.ndim, 1)
|
| 180 |
+
structure = np.asarray(structure, dtype=bool)
|
| 181 |
+
if structure.ndim != input.ndim:
|
| 182 |
+
raise RuntimeError('structure and input must have equal rank')
|
| 183 |
+
for ii in structure.shape:
|
| 184 |
+
if ii != 3:
|
| 185 |
+
raise ValueError('structure dimensions must be equal to 3')
|
| 186 |
+
|
| 187 |
+
# Use 32 bits if it's large enough for this image.
|
| 188 |
+
# _ni_label.label() needs two entries for background and
|
| 189 |
+
# foreground tracking
|
| 190 |
+
need_64bits = input.size >= (2**31 - 2)
|
| 191 |
+
|
| 192 |
+
if isinstance(output, np.ndarray):
|
| 193 |
+
if output.shape != input.shape:
|
| 194 |
+
raise ValueError("output shape not correct")
|
| 195 |
+
caller_provided_output = True
|
| 196 |
+
else:
|
| 197 |
+
caller_provided_output = False
|
| 198 |
+
if output is None:
|
| 199 |
+
output = np.empty(input.shape, np.intp if need_64bits else np.int32)
|
| 200 |
+
else:
|
| 201 |
+
output = np.empty(input.shape, output)
|
| 202 |
+
|
| 203 |
+
# handle scalars, 0-D arrays
|
| 204 |
+
if input.ndim == 0 or input.size == 0:
|
| 205 |
+
if input.ndim == 0:
|
| 206 |
+
# scalar
|
| 207 |
+
maxlabel = 1 if (input != 0) else 0
|
| 208 |
+
output[...] = maxlabel
|
| 209 |
+
else:
|
| 210 |
+
# 0-D
|
| 211 |
+
maxlabel = 0
|
| 212 |
+
if caller_provided_output:
|
| 213 |
+
return maxlabel
|
| 214 |
+
else:
|
| 215 |
+
return output, maxlabel
|
| 216 |
+
|
| 217 |
+
try:
|
| 218 |
+
max_label = _ni_label._label(input, structure, output)
|
| 219 |
+
except _ni_label.NeedMoreBits as e:
|
| 220 |
+
# Make another attempt with enough bits, then try to cast to the
|
| 221 |
+
# new type.
|
| 222 |
+
tmp_output = np.empty(input.shape, np.intp if need_64bits else np.int32)
|
| 223 |
+
max_label = _ni_label._label(input, structure, tmp_output)
|
| 224 |
+
output[...] = tmp_output[...]
|
| 225 |
+
if not np.all(output == tmp_output):
|
| 226 |
+
# refuse to return bad results
|
| 227 |
+
raise RuntimeError(
|
| 228 |
+
"insufficient bit-depth in requested output type"
|
| 229 |
+
) from e
|
| 230 |
+
|
| 231 |
+
if caller_provided_output:
|
| 232 |
+
# result was written in-place
|
| 233 |
+
return max_label
|
| 234 |
+
else:
|
| 235 |
+
return output, max_label
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
def find_objects(input, max_label=0):
|
| 239 |
+
"""
|
| 240 |
+
Find objects in a labeled array.
|
| 241 |
+
|
| 242 |
+
Parameters
|
| 243 |
+
----------
|
| 244 |
+
input : ndarray of ints
|
| 245 |
+
Array containing objects defined by different labels. Labels with
|
| 246 |
+
value 0 are ignored.
|
| 247 |
+
max_label : int, optional
|
| 248 |
+
Maximum label to be searched for in `input`. If max_label is not
|
| 249 |
+
given, the positions of all objects are returned.
|
| 250 |
+
|
| 251 |
+
Returns
|
| 252 |
+
-------
|
| 253 |
+
object_slices : list of tuples
|
| 254 |
+
A list of tuples, with each tuple containing N slices (with N the
|
| 255 |
+
dimension of the input array). Slices correspond to the minimal
|
| 256 |
+
parallelepiped that contains the object. If a number is missing,
|
| 257 |
+
None is returned instead of a slice. The label ``l`` corresponds to
|
| 258 |
+
the index ``l-1`` in the returned list.
|
| 259 |
+
|
| 260 |
+
See Also
|
| 261 |
+
--------
|
| 262 |
+
label, center_of_mass
|
| 263 |
+
|
| 264 |
+
Notes
|
| 265 |
+
-----
|
| 266 |
+
This function is very useful for isolating a volume of interest inside
|
| 267 |
+
a 3-D array, that cannot be "seen through".
|
| 268 |
+
|
| 269 |
+
Examples
|
| 270 |
+
--------
|
| 271 |
+
>>> from scipy import ndimage
|
| 272 |
+
>>> import numpy as np
|
| 273 |
+
>>> a = np.zeros((6,6), dtype=int)
|
| 274 |
+
>>> a[2:4, 2:4] = 1
|
| 275 |
+
>>> a[4, 4] = 1
|
| 276 |
+
>>> a[:2, :3] = 2
|
| 277 |
+
>>> a[0, 5] = 3
|
| 278 |
+
>>> a
|
| 279 |
+
array([[2, 2, 2, 0, 0, 3],
|
| 280 |
+
[2, 2, 2, 0, 0, 0],
|
| 281 |
+
[0, 0, 1, 1, 0, 0],
|
| 282 |
+
[0, 0, 1, 1, 0, 0],
|
| 283 |
+
[0, 0, 0, 0, 1, 0],
|
| 284 |
+
[0, 0, 0, 0, 0, 0]])
|
| 285 |
+
>>> ndimage.find_objects(a)
|
| 286 |
+
[(slice(2, 5, None), slice(2, 5, None)),
|
| 287 |
+
(slice(0, 2, None), slice(0, 3, None)),
|
| 288 |
+
(slice(0, 1, None), slice(5, 6, None))]
|
| 289 |
+
>>> ndimage.find_objects(a, max_label=2)
|
| 290 |
+
[(slice(2, 5, None), slice(2, 5, None)), (slice(0, 2, None), slice(0, 3, None))]
|
| 291 |
+
>>> ndimage.find_objects(a == 1, max_label=2)
|
| 292 |
+
[(slice(2, 5, None), slice(2, 5, None)), None]
|
| 293 |
+
|
| 294 |
+
>>> loc = ndimage.find_objects(a)[0]
|
| 295 |
+
>>> a[loc]
|
| 296 |
+
array([[1, 1, 0],
|
| 297 |
+
[1, 1, 0],
|
| 298 |
+
[0, 0, 1]])
|
| 299 |
+
|
| 300 |
+
"""
|
| 301 |
+
input = np.asarray(input)
|
| 302 |
+
if np.iscomplexobj(input):
|
| 303 |
+
raise TypeError('Complex type not supported')
|
| 304 |
+
|
| 305 |
+
if max_label < 1:
|
| 306 |
+
max_label = input.max()
|
| 307 |
+
|
| 308 |
+
return _nd_image.find_objects(input, max_label)
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
def value_indices(arr, *, ignore_value=None):
|
| 312 |
+
"""
|
| 313 |
+
Find indices of each distinct value in given array.
|
| 314 |
+
|
| 315 |
+
Parameters
|
| 316 |
+
----------
|
| 317 |
+
arr : ndarray of ints
|
| 318 |
+
Array containing integer values.
|
| 319 |
+
ignore_value : int, optional
|
| 320 |
+
This value will be ignored in searching the `arr` array. If not
|
| 321 |
+
given, all values found will be included in output. Default
|
| 322 |
+
is None.
|
| 323 |
+
|
| 324 |
+
Returns
|
| 325 |
+
-------
|
| 326 |
+
indices : dictionary
|
| 327 |
+
A Python dictionary of array indices for each distinct value. The
|
| 328 |
+
dictionary is keyed by the distinct values, the entries are array
|
| 329 |
+
index tuples covering all occurrences of the value within the
|
| 330 |
+
array.
|
| 331 |
+
|
| 332 |
+
This dictionary can occupy significant memory, usually several times
|
| 333 |
+
the size of the input array.
|
| 334 |
+
|
| 335 |
+
See Also
|
| 336 |
+
--------
|
| 337 |
+
label, maximum, median, minimum_position, extrema, sum, mean, variance,
|
| 338 |
+
standard_deviation, numpy.where, numpy.unique
|
| 339 |
+
|
| 340 |
+
Notes
|
| 341 |
+
-----
|
| 342 |
+
For a small array with few distinct values, one might use
|
| 343 |
+
`numpy.unique()` to find all possible values, and ``(arr == val)`` to
|
| 344 |
+
locate each value within that array. However, for large arrays,
|
| 345 |
+
with many distinct values, this can become extremely inefficient,
|
| 346 |
+
as locating each value would require a new search through the entire
|
| 347 |
+
array. Using this function, there is essentially one search, with
|
| 348 |
+
the indices saved for all distinct values.
|
| 349 |
+
|
| 350 |
+
This is useful when matching a categorical image (e.g. a segmentation
|
| 351 |
+
or classification) to an associated image of other data, allowing
|
| 352 |
+
any per-class statistic(s) to then be calculated. Provides a
|
| 353 |
+
more flexible alternative to functions like ``scipy.ndimage.mean()``
|
| 354 |
+
and ``scipy.ndimage.variance()``.
|
| 355 |
+
|
| 356 |
+
Some other closely related functionality, with different strengths and
|
| 357 |
+
weaknesses, can also be found in ``scipy.stats.binned_statistic()`` and
|
| 358 |
+
the `scikit-image <https://scikit-image.org/>`_ function
|
| 359 |
+
``skimage.measure.regionprops()``.
|
| 360 |
+
|
| 361 |
+
Note for IDL users: this provides functionality equivalent to IDL's
|
| 362 |
+
REVERSE_INDICES option (as per the IDL documentation for the
|
| 363 |
+
`HISTOGRAM <https://www.l3harrisgeospatial.com/docs/histogram.html>`_
|
| 364 |
+
function).
|
| 365 |
+
|
| 366 |
+
.. versionadded:: 1.10.0
|
| 367 |
+
|
| 368 |
+
Examples
|
| 369 |
+
--------
|
| 370 |
+
>>> import numpy as np
|
| 371 |
+
>>> from scipy import ndimage
|
| 372 |
+
>>> a = np.zeros((6, 6), dtype=int)
|
| 373 |
+
>>> a[2:4, 2:4] = 1
|
| 374 |
+
>>> a[4, 4] = 1
|
| 375 |
+
>>> a[:2, :3] = 2
|
| 376 |
+
>>> a[0, 5] = 3
|
| 377 |
+
>>> a
|
| 378 |
+
array([[2, 2, 2, 0, 0, 3],
|
| 379 |
+
[2, 2, 2, 0, 0, 0],
|
| 380 |
+
[0, 0, 1, 1, 0, 0],
|
| 381 |
+
[0, 0, 1, 1, 0, 0],
|
| 382 |
+
[0, 0, 0, 0, 1, 0],
|
| 383 |
+
[0, 0, 0, 0, 0, 0]])
|
| 384 |
+
>>> val_indices = ndimage.value_indices(a)
|
| 385 |
+
|
| 386 |
+
The dictionary `val_indices` will have an entry for each distinct
|
| 387 |
+
value in the input array.
|
| 388 |
+
|
| 389 |
+
>>> val_indices.keys()
|
| 390 |
+
dict_keys([np.int64(0), np.int64(1), np.int64(2), np.int64(3)])
|
| 391 |
+
|
| 392 |
+
The entry for each value is an index tuple, locating the elements
|
| 393 |
+
with that value.
|
| 394 |
+
|
| 395 |
+
>>> ndx1 = val_indices[1]
|
| 396 |
+
>>> ndx1
|
| 397 |
+
(array([2, 2, 3, 3, 4]), array([2, 3, 2, 3, 4]))
|
| 398 |
+
|
| 399 |
+
This can be used to index into the original array, or any other
|
| 400 |
+
array with the same shape.
|
| 401 |
+
|
| 402 |
+
>>> a[ndx1]
|
| 403 |
+
array([1, 1, 1, 1, 1])
|
| 404 |
+
|
| 405 |
+
If the zeros were to be ignored, then the resulting dictionary
|
| 406 |
+
would no longer have an entry for zero.
|
| 407 |
+
|
| 408 |
+
>>> val_indices = ndimage.value_indices(a, ignore_value=0)
|
| 409 |
+
>>> val_indices.keys()
|
| 410 |
+
dict_keys([np.int64(1), np.int64(2), np.int64(3)])
|
| 411 |
+
|
| 412 |
+
"""
|
| 413 |
+
# Cope with ignore_value being None, without too much extra complexity
|
| 414 |
+
# in the C code. If not None, the value is passed in as a numpy array
|
| 415 |
+
# with the same dtype as arr.
|
| 416 |
+
ignore_value_arr = np.zeros((1,), dtype=arr.dtype)
|
| 417 |
+
ignoreIsNone = (ignore_value is None)
|
| 418 |
+
if not ignoreIsNone:
|
| 419 |
+
ignore_value_arr[0] = ignore_value_arr.dtype.type(ignore_value)
|
| 420 |
+
|
| 421 |
+
val_indices = _nd_image.value_indices(arr, ignoreIsNone, ignore_value_arr)
|
| 422 |
+
return val_indices
|
| 423 |
+
|
| 424 |
+
|
| 425 |
+
def labeled_comprehension(input, labels, index, func, out_dtype, default,
|
| 426 |
+
pass_positions=False):
|
| 427 |
+
"""
|
| 428 |
+
Roughly equivalent to [func(input[labels == i]) for i in index].
|
| 429 |
+
|
| 430 |
+
Sequentially applies an arbitrary function (that works on array_like input)
|
| 431 |
+
to subsets of an N-D image array specified by `labels` and `index`.
|
| 432 |
+
The option exists to provide the function with positional parameters as the
|
| 433 |
+
second argument.
|
| 434 |
+
|
| 435 |
+
Parameters
|
| 436 |
+
----------
|
| 437 |
+
input : array_like
|
| 438 |
+
Data from which to select `labels` to process.
|
| 439 |
+
labels : array_like or None
|
| 440 |
+
Labels to objects in `input`.
|
| 441 |
+
If not None, array must be same shape as `input`.
|
| 442 |
+
If None, `func` is applied to raveled `input`.
|
| 443 |
+
index : int, sequence of ints or None
|
| 444 |
+
Subset of `labels` to which to apply `func`.
|
| 445 |
+
If a scalar, a single value is returned.
|
| 446 |
+
If None, `func` is applied to all non-zero values of `labels`.
|
| 447 |
+
func : callable
|
| 448 |
+
Python function to apply to `labels` from `input`.
|
| 449 |
+
out_dtype : dtype
|
| 450 |
+
Dtype to use for `result`.
|
| 451 |
+
default : int, float or None
|
| 452 |
+
Default return value when a element of `index` does not exist
|
| 453 |
+
in `labels`.
|
| 454 |
+
pass_positions : bool, optional
|
| 455 |
+
If True, pass linear indices to `func` as a second argument.
|
| 456 |
+
Default is False.
|
| 457 |
+
|
| 458 |
+
Returns
|
| 459 |
+
-------
|
| 460 |
+
result : ndarray
|
| 461 |
+
Result of applying `func` to each of `labels` to `input` in `index`.
|
| 462 |
+
|
| 463 |
+
Examples
|
| 464 |
+
--------
|
| 465 |
+
>>> import numpy as np
|
| 466 |
+
>>> a = np.array([[1, 2, 0, 0],
|
| 467 |
+
... [5, 3, 0, 4],
|
| 468 |
+
... [0, 0, 0, 7],
|
| 469 |
+
... [9, 3, 0, 0]])
|
| 470 |
+
>>> from scipy import ndimage
|
| 471 |
+
>>> lbl, nlbl = ndimage.label(a)
|
| 472 |
+
>>> lbls = np.arange(1, nlbl+1)
|
| 473 |
+
>>> ndimage.labeled_comprehension(a, lbl, lbls, np.mean, float, 0)
|
| 474 |
+
array([ 2.75, 5.5 , 6. ])
|
| 475 |
+
|
| 476 |
+
Falling back to `default`:
|
| 477 |
+
|
| 478 |
+
>>> lbls = np.arange(1, nlbl+2)
|
| 479 |
+
>>> ndimage.labeled_comprehension(a, lbl, lbls, np.mean, float, -1)
|
| 480 |
+
array([ 2.75, 5.5 , 6. , -1. ])
|
| 481 |
+
|
| 482 |
+
Passing positions:
|
| 483 |
+
|
| 484 |
+
>>> def fn(val, pos):
|
| 485 |
+
... print("fn says: %s : %s" % (val, pos))
|
| 486 |
+
... return (val.sum()) if (pos.sum() % 2 == 0) else (-val.sum())
|
| 487 |
+
...
|
| 488 |
+
>>> ndimage.labeled_comprehension(a, lbl, lbls, fn, float, 0, True)
|
| 489 |
+
fn says: [1 2 5 3] : [0 1 4 5]
|
| 490 |
+
fn says: [4 7] : [ 7 11]
|
| 491 |
+
fn says: [9 3] : [12 13]
|
| 492 |
+
array([ 11., 11., -12., 0.])
|
| 493 |
+
|
| 494 |
+
"""
|
| 495 |
+
|
| 496 |
+
as_scalar = np.isscalar(index)
|
| 497 |
+
input = np.asarray(input)
|
| 498 |
+
|
| 499 |
+
if pass_positions:
|
| 500 |
+
positions = np.arange(input.size).reshape(input.shape)
|
| 501 |
+
|
| 502 |
+
if labels is None:
|
| 503 |
+
if index is not None:
|
| 504 |
+
raise ValueError("index without defined labels")
|
| 505 |
+
if not pass_positions:
|
| 506 |
+
return func(input.ravel())
|
| 507 |
+
else:
|
| 508 |
+
return func(input.ravel(), positions.ravel())
|
| 509 |
+
|
| 510 |
+
try:
|
| 511 |
+
input, labels = np.broadcast_arrays(input, labels)
|
| 512 |
+
except ValueError as e:
|
| 513 |
+
raise ValueError("input and labels must have the same shape "
|
| 514 |
+
"(excepting dimensions with width 1)") from e
|
| 515 |
+
|
| 516 |
+
if index is None:
|
| 517 |
+
if not pass_positions:
|
| 518 |
+
return func(input[labels > 0])
|
| 519 |
+
else:
|
| 520 |
+
return func(input[labels > 0], positions[labels > 0])
|
| 521 |
+
|
| 522 |
+
index = np.atleast_1d(index)
|
| 523 |
+
if np.any(index.astype(labels.dtype).astype(index.dtype) != index):
|
| 524 |
+
raise ValueError(f"Cannot convert index values from <{index.dtype}> to "
|
| 525 |
+
f"<{labels.dtype}> (labels' type) without loss of precision")
|
| 526 |
+
|
| 527 |
+
index = index.astype(labels.dtype)
|
| 528 |
+
|
| 529 |
+
# optimization: find min/max in index,
|
| 530 |
+
# and select those parts of labels, input, and positions
|
| 531 |
+
lo = index.min()
|
| 532 |
+
hi = index.max()
|
| 533 |
+
mask = (labels >= lo) & (labels <= hi)
|
| 534 |
+
|
| 535 |
+
# this also ravels the arrays
|
| 536 |
+
labels = labels[mask]
|
| 537 |
+
input = input[mask]
|
| 538 |
+
if pass_positions:
|
| 539 |
+
positions = positions[mask]
|
| 540 |
+
|
| 541 |
+
# sort everything by labels
|
| 542 |
+
label_order = labels.argsort()
|
| 543 |
+
labels = labels[label_order]
|
| 544 |
+
input = input[label_order]
|
| 545 |
+
if pass_positions:
|
| 546 |
+
positions = positions[label_order]
|
| 547 |
+
|
| 548 |
+
index_order = index.argsort()
|
| 549 |
+
sorted_index = index[index_order]
|
| 550 |
+
|
| 551 |
+
def do_map(inputs, output):
|
| 552 |
+
"""labels must be sorted"""
|
| 553 |
+
nidx = sorted_index.size
|
| 554 |
+
|
| 555 |
+
# Find boundaries for each stretch of constant labels
|
| 556 |
+
# This could be faster, but we already paid N log N to sort labels.
|
| 557 |
+
lo = np.searchsorted(labels, sorted_index, side='left')
|
| 558 |
+
hi = np.searchsorted(labels, sorted_index, side='right')
|
| 559 |
+
|
| 560 |
+
for i, l, h in zip(range(nidx), lo, hi):
|
| 561 |
+
if l == h:
|
| 562 |
+
continue
|
| 563 |
+
output[i] = func(*[inp[l:h] for inp in inputs])
|
| 564 |
+
|
| 565 |
+
temp = np.empty(index.shape, out_dtype)
|
| 566 |
+
temp[:] = default
|
| 567 |
+
if not pass_positions:
|
| 568 |
+
do_map([input], temp)
|
| 569 |
+
else:
|
| 570 |
+
do_map([input, positions], temp)
|
| 571 |
+
|
| 572 |
+
output = np.zeros(index.shape, out_dtype)
|
| 573 |
+
output[index_order] = temp
|
| 574 |
+
if as_scalar:
|
| 575 |
+
output = output[0]
|
| 576 |
+
|
| 577 |
+
return output
|
| 578 |
+
|
| 579 |
+
|
| 580 |
+
def _safely_castable_to_int(dt):
|
| 581 |
+
"""Test whether the NumPy data type `dt` can be safely cast to an int."""
|
| 582 |
+
int_size = np.dtype(int).itemsize
|
| 583 |
+
safe = ((np.issubdtype(dt, np.signedinteger) and dt.itemsize <= int_size) or
|
| 584 |
+
(np.issubdtype(dt, np.unsignedinteger) and dt.itemsize < int_size))
|
| 585 |
+
return safe
|
| 586 |
+
|
| 587 |
+
|
| 588 |
+
def _stats(input, labels=None, index=None, centered=False):
|
| 589 |
+
"""Count, sum, and optionally compute (sum - centre)^2 of input by label
|
| 590 |
+
|
| 591 |
+
Parameters
|
| 592 |
+
----------
|
| 593 |
+
input : array_like, N-D
|
| 594 |
+
The input data to be analyzed.
|
| 595 |
+
labels : array_like (N-D), optional
|
| 596 |
+
The labels of the data in `input`. This array must be broadcast
|
| 597 |
+
compatible with `input`; typically, it is the same shape as `input`.
|
| 598 |
+
If `labels` is None, all nonzero values in `input` are treated as
|
| 599 |
+
the single labeled group.
|
| 600 |
+
index : label or sequence of labels, optional
|
| 601 |
+
These are the labels of the groups for which the stats are computed.
|
| 602 |
+
If `index` is None, the stats are computed for the single group where
|
| 603 |
+
`labels` is greater than 0.
|
| 604 |
+
centered : bool, optional
|
| 605 |
+
If True, the centered sum of squares for each labeled group is
|
| 606 |
+
also returned. Default is False.
|
| 607 |
+
|
| 608 |
+
Returns
|
| 609 |
+
-------
|
| 610 |
+
counts : int or ndarray of ints
|
| 611 |
+
The number of elements in each labeled group.
|
| 612 |
+
sums : scalar or ndarray of scalars
|
| 613 |
+
The sums of the values in each labeled group.
|
| 614 |
+
sums_c : scalar or ndarray of scalars, optional
|
| 615 |
+
The sums of mean-centered squares of the values in each labeled group.
|
| 616 |
+
This is only returned if `centered` is True.
|
| 617 |
+
|
| 618 |
+
"""
|
| 619 |
+
def single_group(vals):
|
| 620 |
+
if centered:
|
| 621 |
+
vals_c = vals - vals.mean()
|
| 622 |
+
return vals.size, vals.sum(), (vals_c * vals_c.conjugate()).sum()
|
| 623 |
+
else:
|
| 624 |
+
return vals.size, vals.sum()
|
| 625 |
+
|
| 626 |
+
if labels is None:
|
| 627 |
+
return single_group(input)
|
| 628 |
+
|
| 629 |
+
# ensure input and labels match sizes
|
| 630 |
+
input, labels = np.broadcast_arrays(input, labels)
|
| 631 |
+
|
| 632 |
+
if index is None:
|
| 633 |
+
return single_group(input[labels > 0])
|
| 634 |
+
|
| 635 |
+
if np.isscalar(index):
|
| 636 |
+
return single_group(input[labels == index])
|
| 637 |
+
|
| 638 |
+
def _sum_centered(labels):
|
| 639 |
+
# `labels` is expected to be an ndarray with the same shape as `input`.
|
| 640 |
+
# It must contain the label indices (which are not necessarily the labels
|
| 641 |
+
# themselves).
|
| 642 |
+
means = sums / counts
|
| 643 |
+
centered_input = input - means[labels]
|
| 644 |
+
# bincount expects 1-D inputs, so we ravel the arguments.
|
| 645 |
+
bc = np.bincount(labels.ravel(),
|
| 646 |
+
weights=(centered_input *
|
| 647 |
+
centered_input.conjugate()).ravel())
|
| 648 |
+
return bc
|
| 649 |
+
|
| 650 |
+
# Remap labels to unique integers if necessary, or if the largest
|
| 651 |
+
# label is larger than the number of values.
|
| 652 |
+
|
| 653 |
+
if (not _safely_castable_to_int(labels.dtype) or
|
| 654 |
+
labels.min() < 0 or labels.max() > labels.size):
|
| 655 |
+
# Use np.unique to generate the label indices. `new_labels` will
|
| 656 |
+
# be 1-D, but it should be interpreted as the flattened N-D array of
|
| 657 |
+
# label indices.
|
| 658 |
+
unique_labels, new_labels = np.unique(labels, return_inverse=True)
|
| 659 |
+
new_labels = np.reshape(new_labels, (-1,)) # flatten, since it may be >1-D
|
| 660 |
+
counts = np.bincount(new_labels)
|
| 661 |
+
sums = np.bincount(new_labels, weights=input.ravel())
|
| 662 |
+
if centered:
|
| 663 |
+
# Compute the sum of the mean-centered squares.
|
| 664 |
+
# We must reshape new_labels to the N-D shape of `input` before
|
| 665 |
+
# passing it _sum_centered.
|
| 666 |
+
sums_c = _sum_centered(new_labels.reshape(labels.shape))
|
| 667 |
+
idxs = np.searchsorted(unique_labels, index)
|
| 668 |
+
# make all of idxs valid
|
| 669 |
+
idxs[idxs >= unique_labels.size] = 0
|
| 670 |
+
found = (unique_labels[idxs] == index)
|
| 671 |
+
else:
|
| 672 |
+
# labels are an integer type allowed by bincount, and there aren't too
|
| 673 |
+
# many, so call bincount directly.
|
| 674 |
+
counts = np.bincount(labels.ravel())
|
| 675 |
+
sums = np.bincount(labels.ravel(), weights=input.ravel())
|
| 676 |
+
if centered:
|
| 677 |
+
sums_c = _sum_centered(labels)
|
| 678 |
+
# make sure all index values are valid
|
| 679 |
+
idxs = np.asanyarray(index, np.int_).copy()
|
| 680 |
+
found = (idxs >= 0) & (idxs < counts.size)
|
| 681 |
+
idxs[~found] = 0
|
| 682 |
+
|
| 683 |
+
counts = counts[idxs]
|
| 684 |
+
counts[~found] = 0
|
| 685 |
+
sums = sums[idxs]
|
| 686 |
+
sums[~found] = 0
|
| 687 |
+
|
| 688 |
+
if not centered:
|
| 689 |
+
return (counts, sums)
|
| 690 |
+
else:
|
| 691 |
+
sums_c = sums_c[idxs]
|
| 692 |
+
sums_c[~found] = 0
|
| 693 |
+
return (counts, sums, sums_c)
|
| 694 |
+
|
| 695 |
+
|
| 696 |
+
def sum(input, labels=None, index=None):
|
| 697 |
+
"""
|
| 698 |
+
Calculate the sum of the values of the array.
|
| 699 |
+
|
| 700 |
+
Notes
|
| 701 |
+
-----
|
| 702 |
+
This is an alias for `ndimage.sum_labels` kept for backwards compatibility
|
| 703 |
+
reasons, for new code please prefer `sum_labels`. See the `sum_labels`
|
| 704 |
+
docstring for more details.
|
| 705 |
+
|
| 706 |
+
"""
|
| 707 |
+
return sum_labels(input, labels, index)
|
| 708 |
+
|
| 709 |
+
|
| 710 |
+
def sum_labels(input, labels=None, index=None):
|
| 711 |
+
"""
|
| 712 |
+
Calculate the sum of the values of the array.
|
| 713 |
+
|
| 714 |
+
Parameters
|
| 715 |
+
----------
|
| 716 |
+
input : array_like
|
| 717 |
+
Values of `input` inside the regions defined by `labels`
|
| 718 |
+
are summed together.
|
| 719 |
+
labels : array_like of ints, optional
|
| 720 |
+
Assign labels to the values of the array. Has to have the same shape as
|
| 721 |
+
`input`.
|
| 722 |
+
index : array_like, optional
|
| 723 |
+
A single label number or a sequence of label numbers of
|
| 724 |
+
the objects to be measured.
|
| 725 |
+
|
| 726 |
+
Returns
|
| 727 |
+
-------
|
| 728 |
+
sum : ndarray or scalar
|
| 729 |
+
An array of the sums of values of `input` inside the regions defined
|
| 730 |
+
by `labels` with the same shape as `index`. If 'index' is None or scalar,
|
| 731 |
+
a scalar is returned.
|
| 732 |
+
|
| 733 |
+
See Also
|
| 734 |
+
--------
|
| 735 |
+
mean, median
|
| 736 |
+
|
| 737 |
+
Examples
|
| 738 |
+
--------
|
| 739 |
+
>>> from scipy import ndimage
|
| 740 |
+
>>> input = [0,1,2,3]
|
| 741 |
+
>>> labels = [1,1,2,2]
|
| 742 |
+
>>> ndimage.sum_labels(input, labels, index=[1,2])
|
| 743 |
+
[1.0, 5.0]
|
| 744 |
+
>>> ndimage.sum_labels(input, labels, index=1)
|
| 745 |
+
1
|
| 746 |
+
>>> ndimage.sum_labels(input, labels)
|
| 747 |
+
6
|
| 748 |
+
|
| 749 |
+
|
| 750 |
+
"""
|
| 751 |
+
count, sum = _stats(input, labels, index)
|
| 752 |
+
return sum
|
| 753 |
+
|
| 754 |
+
|
| 755 |
+
def mean(input, labels=None, index=None):
|
| 756 |
+
"""
|
| 757 |
+
Calculate the mean of the values of an array at labels.
|
| 758 |
+
|
| 759 |
+
Parameters
|
| 760 |
+
----------
|
| 761 |
+
input : array_like
|
| 762 |
+
Array on which to compute the mean of elements over distinct
|
| 763 |
+
regions.
|
| 764 |
+
labels : array_like, optional
|
| 765 |
+
Array of labels of same shape, or broadcastable to the same shape as
|
| 766 |
+
`input`. All elements sharing the same label form one region over
|
| 767 |
+
which the mean of the elements is computed.
|
| 768 |
+
index : int or sequence of ints, optional
|
| 769 |
+
Labels of the objects over which the mean is to be computed.
|
| 770 |
+
Default is None, in which case the mean for all values where label is
|
| 771 |
+
greater than 0 is calculated.
|
| 772 |
+
|
| 773 |
+
Returns
|
| 774 |
+
-------
|
| 775 |
+
out : list
|
| 776 |
+
Sequence of same length as `index`, with the mean of the different
|
| 777 |
+
regions labeled by the labels in `index`.
|
| 778 |
+
|
| 779 |
+
See Also
|
| 780 |
+
--------
|
| 781 |
+
variance, standard_deviation, minimum, maximum, sum, label
|
| 782 |
+
|
| 783 |
+
Examples
|
| 784 |
+
--------
|
| 785 |
+
>>> from scipy import ndimage
|
| 786 |
+
>>> import numpy as np
|
| 787 |
+
>>> a = np.arange(25).reshape((5,5))
|
| 788 |
+
>>> labels = np.zeros_like(a)
|
| 789 |
+
>>> labels[3:5,3:5] = 1
|
| 790 |
+
>>> index = np.unique(labels)
|
| 791 |
+
>>> labels
|
| 792 |
+
array([[0, 0, 0, 0, 0],
|
| 793 |
+
[0, 0, 0, 0, 0],
|
| 794 |
+
[0, 0, 0, 0, 0],
|
| 795 |
+
[0, 0, 0, 1, 1],
|
| 796 |
+
[0, 0, 0, 1, 1]])
|
| 797 |
+
>>> index
|
| 798 |
+
array([0, 1])
|
| 799 |
+
>>> ndimage.mean(a, labels=labels, index=index)
|
| 800 |
+
[10.285714285714286, 21.0]
|
| 801 |
+
|
| 802 |
+
"""
|
| 803 |
+
|
| 804 |
+
count, sum = _stats(input, labels, index)
|
| 805 |
+
return sum / np.asanyarray(count).astype(np.float64)
|
| 806 |
+
|
| 807 |
+
|
| 808 |
+
def variance(input, labels=None, index=None):
|
| 809 |
+
"""
|
| 810 |
+
Calculate the variance of the values of an N-D image array, optionally at
|
| 811 |
+
specified sub-regions.
|
| 812 |
+
|
| 813 |
+
Parameters
|
| 814 |
+
----------
|
| 815 |
+
input : array_like
|
| 816 |
+
Nd-image data to process.
|
| 817 |
+
labels : array_like, optional
|
| 818 |
+
Labels defining sub-regions in `input`.
|
| 819 |
+
If not None, must be same shape as `input`.
|
| 820 |
+
index : int or sequence of ints, optional
|
| 821 |
+
`labels` to include in output. If None (default), all values where
|
| 822 |
+
`labels` is non-zero are used.
|
| 823 |
+
|
| 824 |
+
Returns
|
| 825 |
+
-------
|
| 826 |
+
variance : float or ndarray
|
| 827 |
+
Values of variance, for each sub-region if `labels` and `index` are
|
| 828 |
+
specified.
|
| 829 |
+
|
| 830 |
+
See Also
|
| 831 |
+
--------
|
| 832 |
+
label, standard_deviation, maximum, minimum, extrema
|
| 833 |
+
|
| 834 |
+
Examples
|
| 835 |
+
--------
|
| 836 |
+
>>> import numpy as np
|
| 837 |
+
>>> a = np.array([[1, 2, 0, 0],
|
| 838 |
+
... [5, 3, 0, 4],
|
| 839 |
+
... [0, 0, 0, 7],
|
| 840 |
+
... [9, 3, 0, 0]])
|
| 841 |
+
>>> from scipy import ndimage
|
| 842 |
+
>>> ndimage.variance(a)
|
| 843 |
+
7.609375
|
| 844 |
+
|
| 845 |
+
Features to process can be specified using `labels` and `index`:
|
| 846 |
+
|
| 847 |
+
>>> lbl, nlbl = ndimage.label(a)
|
| 848 |
+
>>> ndimage.variance(a, lbl, index=np.arange(1, nlbl+1))
|
| 849 |
+
array([ 2.1875, 2.25 , 9. ])
|
| 850 |
+
|
| 851 |
+
If no index is given, all non-zero `labels` are processed:
|
| 852 |
+
|
| 853 |
+
>>> ndimage.variance(a, lbl)
|
| 854 |
+
6.1875
|
| 855 |
+
|
| 856 |
+
"""
|
| 857 |
+
count, sum, sum_c_sq = _stats(input, labels, index, centered=True)
|
| 858 |
+
return sum_c_sq / np.asanyarray(count).astype(float)
|
| 859 |
+
|
| 860 |
+
|
| 861 |
+
def standard_deviation(input, labels=None, index=None):
|
| 862 |
+
"""
|
| 863 |
+
Calculate the standard deviation of the values of an N-D image array,
|
| 864 |
+
optionally at specified sub-regions.
|
| 865 |
+
|
| 866 |
+
Parameters
|
| 867 |
+
----------
|
| 868 |
+
input : array_like
|
| 869 |
+
N-D image data to process.
|
| 870 |
+
labels : array_like, optional
|
| 871 |
+
Labels to identify sub-regions in `input`.
|
| 872 |
+
If not None, must be same shape as `input`.
|
| 873 |
+
index : int or sequence of ints, optional
|
| 874 |
+
`labels` to include in output. If None (default), all values where
|
| 875 |
+
`labels` is non-zero are used.
|
| 876 |
+
|
| 877 |
+
Returns
|
| 878 |
+
-------
|
| 879 |
+
standard_deviation : float or ndarray
|
| 880 |
+
Values of standard deviation, for each sub-region if `labels` and
|
| 881 |
+
`index` are specified.
|
| 882 |
+
|
| 883 |
+
See Also
|
| 884 |
+
--------
|
| 885 |
+
label, variance, maximum, minimum, extrema
|
| 886 |
+
|
| 887 |
+
Examples
|
| 888 |
+
--------
|
| 889 |
+
>>> import numpy as np
|
| 890 |
+
>>> a = np.array([[1, 2, 0, 0],
|
| 891 |
+
... [5, 3, 0, 4],
|
| 892 |
+
... [0, 0, 0, 7],
|
| 893 |
+
... [9, 3, 0, 0]])
|
| 894 |
+
>>> from scipy import ndimage
|
| 895 |
+
>>> ndimage.standard_deviation(a)
|
| 896 |
+
2.7585095613392387
|
| 897 |
+
|
| 898 |
+
Features to process can be specified using `labels` and `index`:
|
| 899 |
+
|
| 900 |
+
>>> lbl, nlbl = ndimage.label(a)
|
| 901 |
+
>>> ndimage.standard_deviation(a, lbl, index=np.arange(1, nlbl+1))
|
| 902 |
+
array([ 1.479, 1.5 , 3. ])
|
| 903 |
+
|
| 904 |
+
If no index is given, non-zero `labels` are processed:
|
| 905 |
+
|
| 906 |
+
>>> ndimage.standard_deviation(a, lbl)
|
| 907 |
+
2.4874685927665499
|
| 908 |
+
|
| 909 |
+
"""
|
| 910 |
+
return np.sqrt(variance(input, labels, index))
|
| 911 |
+
|
| 912 |
+
|
| 913 |
+
def _select(input, labels=None, index=None, find_min=False, find_max=False,
|
| 914 |
+
find_min_positions=False, find_max_positions=False,
|
| 915 |
+
find_median=False):
|
| 916 |
+
"""Returns min, max, or both, plus their positions (if requested), and
|
| 917 |
+
median."""
|
| 918 |
+
|
| 919 |
+
input = np.asanyarray(input)
|
| 920 |
+
|
| 921 |
+
find_positions = find_min_positions or find_max_positions
|
| 922 |
+
positions = None
|
| 923 |
+
if find_positions:
|
| 924 |
+
positions = np.arange(input.size).reshape(input.shape)
|
| 925 |
+
|
| 926 |
+
def single_group(vals, positions):
|
| 927 |
+
result = []
|
| 928 |
+
if find_min:
|
| 929 |
+
result += [vals.min()]
|
| 930 |
+
if find_min_positions:
|
| 931 |
+
result += [positions[vals == vals.min()][0]]
|
| 932 |
+
if find_max:
|
| 933 |
+
result += [vals.max()]
|
| 934 |
+
if find_max_positions:
|
| 935 |
+
result += [positions[vals == vals.max()][0]]
|
| 936 |
+
if find_median:
|
| 937 |
+
result += [np.median(vals)]
|
| 938 |
+
return result
|
| 939 |
+
|
| 940 |
+
if labels is None:
|
| 941 |
+
return single_group(input, positions)
|
| 942 |
+
|
| 943 |
+
# ensure input and labels match sizes
|
| 944 |
+
input, labels = np.broadcast_arrays(input, labels)
|
| 945 |
+
|
| 946 |
+
if index is None:
|
| 947 |
+
mask = (labels > 0)
|
| 948 |
+
masked_positions = None
|
| 949 |
+
if find_positions:
|
| 950 |
+
masked_positions = positions[mask]
|
| 951 |
+
return single_group(input[mask], masked_positions)
|
| 952 |
+
|
| 953 |
+
if np.isscalar(index):
|
| 954 |
+
mask = (labels == index)
|
| 955 |
+
masked_positions = None
|
| 956 |
+
if find_positions:
|
| 957 |
+
masked_positions = positions[mask]
|
| 958 |
+
return single_group(input[mask], masked_positions)
|
| 959 |
+
|
| 960 |
+
# remap labels to unique integers if necessary, or if the largest
|
| 961 |
+
# label is larger than the number of values.
|
| 962 |
+
if (not _safely_castable_to_int(labels.dtype) or
|
| 963 |
+
labels.min() < 0 or labels.max() > labels.size):
|
| 964 |
+
# remap labels, and indexes
|
| 965 |
+
unique_labels, labels = np.unique(labels, return_inverse=True)
|
| 966 |
+
idxs = np.searchsorted(unique_labels, index)
|
| 967 |
+
|
| 968 |
+
# make all of idxs valid
|
| 969 |
+
idxs[idxs >= unique_labels.size] = 0
|
| 970 |
+
found = (unique_labels[idxs] == index)
|
| 971 |
+
else:
|
| 972 |
+
# labels are an integer type, and there aren't too many
|
| 973 |
+
idxs = np.asanyarray(index, np.int_).copy()
|
| 974 |
+
found = (idxs >= 0) & (idxs <= labels.max())
|
| 975 |
+
|
| 976 |
+
idxs[~ found] = labels.max() + 1
|
| 977 |
+
|
| 978 |
+
if find_median:
|
| 979 |
+
order = np.lexsort((input.ravel(), labels.ravel()))
|
| 980 |
+
else:
|
| 981 |
+
order = input.ravel().argsort()
|
| 982 |
+
input = input.ravel()[order]
|
| 983 |
+
labels = labels.ravel()[order]
|
| 984 |
+
if find_positions:
|
| 985 |
+
positions = positions.ravel()[order]
|
| 986 |
+
|
| 987 |
+
result = []
|
| 988 |
+
if find_min:
|
| 989 |
+
mins = np.zeros(labels.max() + 2, input.dtype)
|
| 990 |
+
mins[labels[::-1]] = input[::-1]
|
| 991 |
+
result += [mins[idxs]]
|
| 992 |
+
if find_min_positions:
|
| 993 |
+
minpos = np.zeros(labels.max() + 2, int)
|
| 994 |
+
minpos[labels[::-1]] = positions[::-1]
|
| 995 |
+
result += [minpos[idxs]]
|
| 996 |
+
if find_max:
|
| 997 |
+
maxs = np.zeros(labels.max() + 2, input.dtype)
|
| 998 |
+
maxs[labels] = input
|
| 999 |
+
result += [maxs[idxs]]
|
| 1000 |
+
if find_max_positions:
|
| 1001 |
+
maxpos = np.zeros(labels.max() + 2, int)
|
| 1002 |
+
maxpos[labels] = positions
|
| 1003 |
+
result += [maxpos[idxs]]
|
| 1004 |
+
if find_median:
|
| 1005 |
+
locs = np.arange(len(labels))
|
| 1006 |
+
lo = np.zeros(labels.max() + 2, np.int_)
|
| 1007 |
+
lo[labels[::-1]] = locs[::-1]
|
| 1008 |
+
hi = np.zeros(labels.max() + 2, np.int_)
|
| 1009 |
+
hi[labels] = locs
|
| 1010 |
+
lo = lo[idxs]
|
| 1011 |
+
hi = hi[idxs]
|
| 1012 |
+
# lo is an index to the lowest value in input for each label,
|
| 1013 |
+
# hi is an index to the largest value.
|
| 1014 |
+
# move them to be either the same ((hi - lo) % 2 == 0) or next
|
| 1015 |
+
# to each other ((hi - lo) % 2 == 1), then average.
|
| 1016 |
+
step = (hi - lo) // 2
|
| 1017 |
+
lo += step
|
| 1018 |
+
hi -= step
|
| 1019 |
+
if (np.issubdtype(input.dtype, np.integer)
|
| 1020 |
+
or np.issubdtype(input.dtype, np.bool_)):
|
| 1021 |
+
# avoid integer overflow or boolean addition (gh-12836)
|
| 1022 |
+
result += [(input[lo].astype('d') + input[hi].astype('d')) / 2.0]
|
| 1023 |
+
else:
|
| 1024 |
+
result += [(input[lo] + input[hi]) / 2.0]
|
| 1025 |
+
|
| 1026 |
+
return result
|
| 1027 |
+
|
| 1028 |
+
|
| 1029 |
+
def minimum(input, labels=None, index=None):
|
| 1030 |
+
"""
|
| 1031 |
+
Calculate the minimum of the values of an array over labeled regions.
|
| 1032 |
+
|
| 1033 |
+
Parameters
|
| 1034 |
+
----------
|
| 1035 |
+
input : array_like
|
| 1036 |
+
Array_like of values. For each region specified by `labels`, the
|
| 1037 |
+
minimal values of `input` over the region is computed.
|
| 1038 |
+
labels : array_like, optional
|
| 1039 |
+
An array_like of integers marking different regions over which the
|
| 1040 |
+
minimum value of `input` is to be computed. `labels` must have the
|
| 1041 |
+
same shape as `input`. If `labels` is not specified, the minimum
|
| 1042 |
+
over the whole array is returned.
|
| 1043 |
+
index : array_like, optional
|
| 1044 |
+
A list of region labels that are taken into account for computing the
|
| 1045 |
+
minima. If index is None, the minimum over all elements where `labels`
|
| 1046 |
+
is non-zero is returned.
|
| 1047 |
+
|
| 1048 |
+
Returns
|
| 1049 |
+
-------
|
| 1050 |
+
minimum : float or list of floats
|
| 1051 |
+
List of minima of `input` over the regions determined by `labels` and
|
| 1052 |
+
whose index is in `index`. If `index` or `labels` are not specified, a
|
| 1053 |
+
float is returned: the minimal value of `input` if `labels` is None,
|
| 1054 |
+
and the minimal value of elements where `labels` is greater than zero
|
| 1055 |
+
if `index` is None.
|
| 1056 |
+
|
| 1057 |
+
See Also
|
| 1058 |
+
--------
|
| 1059 |
+
label, maximum, median, minimum_position, extrema, sum, mean, variance,
|
| 1060 |
+
standard_deviation
|
| 1061 |
+
|
| 1062 |
+
Notes
|
| 1063 |
+
-----
|
| 1064 |
+
The function returns a Python list and not a NumPy array, use
|
| 1065 |
+
`np.array` to convert the list to an array.
|
| 1066 |
+
|
| 1067 |
+
Examples
|
| 1068 |
+
--------
|
| 1069 |
+
>>> from scipy import ndimage
|
| 1070 |
+
>>> import numpy as np
|
| 1071 |
+
>>> a = np.array([[1, 2, 0, 0],
|
| 1072 |
+
... [5, 3, 0, 4],
|
| 1073 |
+
... [0, 0, 0, 7],
|
| 1074 |
+
... [9, 3, 0, 0]])
|
| 1075 |
+
>>> labels, labels_nb = ndimage.label(a)
|
| 1076 |
+
>>> labels
|
| 1077 |
+
array([[1, 1, 0, 0],
|
| 1078 |
+
[1, 1, 0, 2],
|
| 1079 |
+
[0, 0, 0, 2],
|
| 1080 |
+
[3, 3, 0, 0]])
|
| 1081 |
+
>>> ndimage.minimum(a, labels=labels, index=np.arange(1, labels_nb + 1))
|
| 1082 |
+
[1.0, 4.0, 3.0]
|
| 1083 |
+
>>> ndimage.minimum(a)
|
| 1084 |
+
0.0
|
| 1085 |
+
>>> ndimage.minimum(a, labels=labels)
|
| 1086 |
+
1.0
|
| 1087 |
+
|
| 1088 |
+
"""
|
| 1089 |
+
return _select(input, labels, index, find_min=True)[0]
|
| 1090 |
+
|
| 1091 |
+
|
| 1092 |
+
def maximum(input, labels=None, index=None):
|
| 1093 |
+
"""
|
| 1094 |
+
Calculate the maximum of the values of an array over labeled regions.
|
| 1095 |
+
|
| 1096 |
+
Parameters
|
| 1097 |
+
----------
|
| 1098 |
+
input : array_like
|
| 1099 |
+
Array_like of values. For each region specified by `labels`, the
|
| 1100 |
+
maximal values of `input` over the region is computed.
|
| 1101 |
+
labels : array_like, optional
|
| 1102 |
+
An array of integers marking different regions over which the
|
| 1103 |
+
maximum value of `input` is to be computed. `labels` must have the
|
| 1104 |
+
same shape as `input`. If `labels` is not specified, the maximum
|
| 1105 |
+
over the whole array is returned.
|
| 1106 |
+
index : array_like, optional
|
| 1107 |
+
A list of region labels that are taken into account for computing the
|
| 1108 |
+
maxima. If index is None, the maximum over all elements where `labels`
|
| 1109 |
+
is non-zero is returned.
|
| 1110 |
+
|
| 1111 |
+
Returns
|
| 1112 |
+
-------
|
| 1113 |
+
output : float or list of floats
|
| 1114 |
+
List of maxima of `input` over the regions determined by `labels` and
|
| 1115 |
+
whose index is in `index`. If `index` or `labels` are not specified, a
|
| 1116 |
+
float is returned: the maximal value of `input` if `labels` is None,
|
| 1117 |
+
and the maximal value of elements where `labels` is greater than zero
|
| 1118 |
+
if `index` is None.
|
| 1119 |
+
|
| 1120 |
+
See Also
|
| 1121 |
+
--------
|
| 1122 |
+
label, minimum, median, maximum_position, extrema, sum, mean, variance,
|
| 1123 |
+
standard_deviation
|
| 1124 |
+
|
| 1125 |
+
Notes
|
| 1126 |
+
-----
|
| 1127 |
+
The function returns a Python list and not a NumPy array, use
|
| 1128 |
+
`np.array` to convert the list to an array.
|
| 1129 |
+
|
| 1130 |
+
Examples
|
| 1131 |
+
--------
|
| 1132 |
+
>>> import numpy as np
|
| 1133 |
+
>>> a = np.arange(16).reshape((4,4))
|
| 1134 |
+
>>> a
|
| 1135 |
+
array([[ 0, 1, 2, 3],
|
| 1136 |
+
[ 4, 5, 6, 7],
|
| 1137 |
+
[ 8, 9, 10, 11],
|
| 1138 |
+
[12, 13, 14, 15]])
|
| 1139 |
+
>>> labels = np.zeros_like(a)
|
| 1140 |
+
>>> labels[:2,:2] = 1
|
| 1141 |
+
>>> labels[2:, 1:3] = 2
|
| 1142 |
+
>>> labels
|
| 1143 |
+
array([[1, 1, 0, 0],
|
| 1144 |
+
[1, 1, 0, 0],
|
| 1145 |
+
[0, 2, 2, 0],
|
| 1146 |
+
[0, 2, 2, 0]])
|
| 1147 |
+
>>> from scipy import ndimage
|
| 1148 |
+
>>> ndimage.maximum(a)
|
| 1149 |
+
15.0
|
| 1150 |
+
>>> ndimage.maximum(a, labels=labels, index=[1,2])
|
| 1151 |
+
[5.0, 14.0]
|
| 1152 |
+
>>> ndimage.maximum(a, labels=labels)
|
| 1153 |
+
14.0
|
| 1154 |
+
|
| 1155 |
+
>>> b = np.array([[1, 2, 0, 0],
|
| 1156 |
+
... [5, 3, 0, 4],
|
| 1157 |
+
... [0, 0, 0, 7],
|
| 1158 |
+
... [9, 3, 0, 0]])
|
| 1159 |
+
>>> labels, labels_nb = ndimage.label(b)
|
| 1160 |
+
>>> labels
|
| 1161 |
+
array([[1, 1, 0, 0],
|
| 1162 |
+
[1, 1, 0, 2],
|
| 1163 |
+
[0, 0, 0, 2],
|
| 1164 |
+
[3, 3, 0, 0]])
|
| 1165 |
+
>>> ndimage.maximum(b, labels=labels, index=np.arange(1, labels_nb + 1))
|
| 1166 |
+
[5.0, 7.0, 9.0]
|
| 1167 |
+
|
| 1168 |
+
"""
|
| 1169 |
+
return _select(input, labels, index, find_max=True)[0]
|
| 1170 |
+
|
| 1171 |
+
|
| 1172 |
+
def median(input, labels=None, index=None):
|
| 1173 |
+
"""
|
| 1174 |
+
Calculate the median of the values of an array over labeled regions.
|
| 1175 |
+
|
| 1176 |
+
Parameters
|
| 1177 |
+
----------
|
| 1178 |
+
input : array_like
|
| 1179 |
+
Array_like of values. For each region specified by `labels`, the
|
| 1180 |
+
median value of `input` over the region is computed.
|
| 1181 |
+
labels : array_like, optional
|
| 1182 |
+
An array_like of integers marking different regions over which the
|
| 1183 |
+
median value of `input` is to be computed. `labels` must have the
|
| 1184 |
+
same shape as `input`. If `labels` is not specified, the median
|
| 1185 |
+
over the whole array is returned.
|
| 1186 |
+
index : array_like, optional
|
| 1187 |
+
A list of region labels that are taken into account for computing the
|
| 1188 |
+
medians. If index is None, the median over all elements where `labels`
|
| 1189 |
+
is non-zero is returned.
|
| 1190 |
+
|
| 1191 |
+
Returns
|
| 1192 |
+
-------
|
| 1193 |
+
median : float or list of floats
|
| 1194 |
+
List of medians of `input` over the regions determined by `labels` and
|
| 1195 |
+
whose index is in `index`. If `index` or `labels` are not specified, a
|
| 1196 |
+
float is returned: the median value of `input` if `labels` is None,
|
| 1197 |
+
and the median value of elements where `labels` is greater than zero
|
| 1198 |
+
if `index` is None.
|
| 1199 |
+
|
| 1200 |
+
See Also
|
| 1201 |
+
--------
|
| 1202 |
+
label, minimum, maximum, extrema, sum, mean, variance, standard_deviation
|
| 1203 |
+
|
| 1204 |
+
Notes
|
| 1205 |
+
-----
|
| 1206 |
+
The function returns a Python list and not a NumPy array, use
|
| 1207 |
+
`np.array` to convert the list to an array.
|
| 1208 |
+
|
| 1209 |
+
Examples
|
| 1210 |
+
--------
|
| 1211 |
+
>>> from scipy import ndimage
|
| 1212 |
+
>>> import numpy as np
|
| 1213 |
+
>>> a = np.array([[1, 2, 0, 1],
|
| 1214 |
+
... [5, 3, 0, 4],
|
| 1215 |
+
... [0, 0, 0, 7],
|
| 1216 |
+
... [9, 3, 0, 0]])
|
| 1217 |
+
>>> labels, labels_nb = ndimage.label(a)
|
| 1218 |
+
>>> labels
|
| 1219 |
+
array([[1, 1, 0, 2],
|
| 1220 |
+
[1, 1, 0, 2],
|
| 1221 |
+
[0, 0, 0, 2],
|
| 1222 |
+
[3, 3, 0, 0]])
|
| 1223 |
+
>>> ndimage.median(a, labels=labels, index=np.arange(1, labels_nb + 1))
|
| 1224 |
+
[2.5, 4.0, 6.0]
|
| 1225 |
+
>>> ndimage.median(a)
|
| 1226 |
+
1.0
|
| 1227 |
+
>>> ndimage.median(a, labels=labels)
|
| 1228 |
+
3.0
|
| 1229 |
+
|
| 1230 |
+
"""
|
| 1231 |
+
return _select(input, labels, index, find_median=True)[0]
|
| 1232 |
+
|
| 1233 |
+
|
| 1234 |
+
def minimum_position(input, labels=None, index=None):
|
| 1235 |
+
"""
|
| 1236 |
+
Find the positions of the minimums of the values of an array at labels.
|
| 1237 |
+
|
| 1238 |
+
Parameters
|
| 1239 |
+
----------
|
| 1240 |
+
input : array_like
|
| 1241 |
+
Array_like of values.
|
| 1242 |
+
labels : array_like, optional
|
| 1243 |
+
An array of integers marking different regions over which the
|
| 1244 |
+
position of the minimum value of `input` is to be computed.
|
| 1245 |
+
`labels` must have the same shape as `input`. If `labels` is not
|
| 1246 |
+
specified, the location of the first minimum over the whole
|
| 1247 |
+
array is returned.
|
| 1248 |
+
|
| 1249 |
+
The `labels` argument only works when `index` is specified.
|
| 1250 |
+
index : array_like, optional
|
| 1251 |
+
A list of region labels that are taken into account for finding the
|
| 1252 |
+
location of the minima. If `index` is None, the ``first`` minimum
|
| 1253 |
+
over all elements where `labels` is non-zero is returned.
|
| 1254 |
+
|
| 1255 |
+
The `index` argument only works when `labels` is specified.
|
| 1256 |
+
|
| 1257 |
+
Returns
|
| 1258 |
+
-------
|
| 1259 |
+
output : list of tuples of ints
|
| 1260 |
+
Tuple of ints or list of tuples of ints that specify the location
|
| 1261 |
+
of minima of `input` over the regions determined by `labels` and
|
| 1262 |
+
whose index is in `index`.
|
| 1263 |
+
|
| 1264 |
+
If `index` or `labels` are not specified, a tuple of ints is
|
| 1265 |
+
returned specifying the location of the first minimal value of `input`.
|
| 1266 |
+
|
| 1267 |
+
See Also
|
| 1268 |
+
--------
|
| 1269 |
+
label, minimum, median, maximum_position, extrema, sum, mean, variance,
|
| 1270 |
+
standard_deviation
|
| 1271 |
+
|
| 1272 |
+
Examples
|
| 1273 |
+
--------
|
| 1274 |
+
>>> import numpy as np
|
| 1275 |
+
>>> a = np.array([[10, 20, 30],
|
| 1276 |
+
... [40, 80, 100],
|
| 1277 |
+
... [1, 100, 200]])
|
| 1278 |
+
>>> b = np.array([[1, 2, 0, 1],
|
| 1279 |
+
... [5, 3, 0, 4],
|
| 1280 |
+
... [0, 0, 0, 7],
|
| 1281 |
+
... [9, 3, 0, 0]])
|
| 1282 |
+
|
| 1283 |
+
>>> from scipy import ndimage
|
| 1284 |
+
|
| 1285 |
+
>>> ndimage.minimum_position(a)
|
| 1286 |
+
(2, 0)
|
| 1287 |
+
>>> ndimage.minimum_position(b)
|
| 1288 |
+
(0, 2)
|
| 1289 |
+
|
| 1290 |
+
Features to process can be specified using `labels` and `index`:
|
| 1291 |
+
|
| 1292 |
+
>>> label, pos = ndimage.label(a)
|
| 1293 |
+
>>> ndimage.minimum_position(a, label, index=np.arange(1, pos+1))
|
| 1294 |
+
[(2, 0)]
|
| 1295 |
+
|
| 1296 |
+
>>> label, pos = ndimage.label(b)
|
| 1297 |
+
>>> ndimage.minimum_position(b, label, index=np.arange(1, pos+1))
|
| 1298 |
+
[(0, 0), (0, 3), (3, 1)]
|
| 1299 |
+
|
| 1300 |
+
"""
|
| 1301 |
+
dims = np.array(np.asarray(input).shape)
|
| 1302 |
+
# see np.unravel_index to understand this line.
|
| 1303 |
+
dim_prod = np.cumprod([1] + list(dims[:0:-1]))[::-1]
|
| 1304 |
+
|
| 1305 |
+
result = _select(input, labels, index, find_min_positions=True)[0]
|
| 1306 |
+
|
| 1307 |
+
if np.isscalar(result):
|
| 1308 |
+
return tuple((result // dim_prod) % dims)
|
| 1309 |
+
|
| 1310 |
+
return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims]
|
| 1311 |
+
|
| 1312 |
+
|
| 1313 |
+
def maximum_position(input, labels=None, index=None):
|
| 1314 |
+
"""
|
| 1315 |
+
Find the positions of the maximums of the values of an array at labels.
|
| 1316 |
+
|
| 1317 |
+
For each region specified by `labels`, the position of the maximum
|
| 1318 |
+
value of `input` within the region is returned.
|
| 1319 |
+
|
| 1320 |
+
Parameters
|
| 1321 |
+
----------
|
| 1322 |
+
input : array_like
|
| 1323 |
+
Array_like of values.
|
| 1324 |
+
labels : array_like, optional
|
| 1325 |
+
An array of integers marking different regions over which the
|
| 1326 |
+
position of the maximum value of `input` is to be computed.
|
| 1327 |
+
`labels` must have the same shape as `input`. If `labels` is not
|
| 1328 |
+
specified, the location of the first maximum over the whole
|
| 1329 |
+
array is returned.
|
| 1330 |
+
|
| 1331 |
+
The `labels` argument only works when `index` is specified.
|
| 1332 |
+
index : array_like, optional
|
| 1333 |
+
A list of region labels that are taken into account for finding the
|
| 1334 |
+
location of the maxima. If `index` is None, the first maximum
|
| 1335 |
+
over all elements where `labels` is non-zero is returned.
|
| 1336 |
+
|
| 1337 |
+
The `index` argument only works when `labels` is specified.
|
| 1338 |
+
|
| 1339 |
+
Returns
|
| 1340 |
+
-------
|
| 1341 |
+
output : list of tuples of ints
|
| 1342 |
+
List of tuples of ints that specify the location of maxima of
|
| 1343 |
+
`input` over the regions determined by `labels` and whose index
|
| 1344 |
+
is in `index`.
|
| 1345 |
+
|
| 1346 |
+
If `index` or `labels` are not specified, a tuple of ints is
|
| 1347 |
+
returned specifying the location of the ``first`` maximal value
|
| 1348 |
+
of `input`.
|
| 1349 |
+
|
| 1350 |
+
See Also
|
| 1351 |
+
--------
|
| 1352 |
+
label, minimum, median, maximum_position, extrema, sum, mean, variance,
|
| 1353 |
+
standard_deviation
|
| 1354 |
+
|
| 1355 |
+
Examples
|
| 1356 |
+
--------
|
| 1357 |
+
>>> from scipy import ndimage
|
| 1358 |
+
>>> import numpy as np
|
| 1359 |
+
>>> a = np.array([[1, 2, 0, 0],
|
| 1360 |
+
... [5, 3, 0, 4],
|
| 1361 |
+
... [0, 0, 0, 7],
|
| 1362 |
+
... [9, 3, 0, 0]])
|
| 1363 |
+
>>> ndimage.maximum_position(a)
|
| 1364 |
+
(3, 0)
|
| 1365 |
+
|
| 1366 |
+
Features to process can be specified using `labels` and `index`:
|
| 1367 |
+
|
| 1368 |
+
>>> lbl = np.array([[0, 1, 2, 3],
|
| 1369 |
+
... [0, 1, 2, 3],
|
| 1370 |
+
... [0, 1, 2, 3],
|
| 1371 |
+
... [0, 1, 2, 3]])
|
| 1372 |
+
>>> ndimage.maximum_position(a, lbl, 1)
|
| 1373 |
+
(1, 1)
|
| 1374 |
+
|
| 1375 |
+
If no index is given, non-zero `labels` are processed:
|
| 1376 |
+
|
| 1377 |
+
>>> ndimage.maximum_position(a, lbl)
|
| 1378 |
+
(2, 3)
|
| 1379 |
+
|
| 1380 |
+
If there are no maxima, the position of the first element is returned:
|
| 1381 |
+
|
| 1382 |
+
>>> ndimage.maximum_position(a, lbl, 2)
|
| 1383 |
+
(0, 2)
|
| 1384 |
+
|
| 1385 |
+
"""
|
| 1386 |
+
dims = np.array(np.asarray(input).shape)
|
| 1387 |
+
# see np.unravel_index to understand this line.
|
| 1388 |
+
dim_prod = np.cumprod([1] + list(dims[:0:-1]))[::-1]
|
| 1389 |
+
|
| 1390 |
+
result = _select(input, labels, index, find_max_positions=True)[0]
|
| 1391 |
+
|
| 1392 |
+
if np.isscalar(result):
|
| 1393 |
+
return tuple((result // dim_prod) % dims)
|
| 1394 |
+
|
| 1395 |
+
return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims]
|
| 1396 |
+
|
| 1397 |
+
|
| 1398 |
+
def extrema(input, labels=None, index=None):
|
| 1399 |
+
"""
|
| 1400 |
+
Calculate the minimums and maximums of the values of an array
|
| 1401 |
+
at labels, along with their positions.
|
| 1402 |
+
|
| 1403 |
+
Parameters
|
| 1404 |
+
----------
|
| 1405 |
+
input : ndarray
|
| 1406 |
+
N-D image data to process.
|
| 1407 |
+
labels : ndarray, optional
|
| 1408 |
+
Labels of features in input.
|
| 1409 |
+
If not None, must be same shape as `input`.
|
| 1410 |
+
index : int or sequence of ints, optional
|
| 1411 |
+
Labels to include in output. If None (default), all values where
|
| 1412 |
+
non-zero `labels` are used.
|
| 1413 |
+
|
| 1414 |
+
Returns
|
| 1415 |
+
-------
|
| 1416 |
+
minimums, maximums : int or ndarray
|
| 1417 |
+
Values of minimums and maximums in each feature.
|
| 1418 |
+
min_positions, max_positions : tuple or list of tuples
|
| 1419 |
+
Each tuple gives the N-D coordinates of the corresponding minimum
|
| 1420 |
+
or maximum.
|
| 1421 |
+
|
| 1422 |
+
See Also
|
| 1423 |
+
--------
|
| 1424 |
+
maximum, minimum, maximum_position, minimum_position, center_of_mass
|
| 1425 |
+
|
| 1426 |
+
Examples
|
| 1427 |
+
--------
|
| 1428 |
+
>>> import numpy as np
|
| 1429 |
+
>>> a = np.array([[1, 2, 0, 0],
|
| 1430 |
+
... [5, 3, 0, 4],
|
| 1431 |
+
... [0, 0, 0, 7],
|
| 1432 |
+
... [9, 3, 0, 0]])
|
| 1433 |
+
>>> from scipy import ndimage
|
| 1434 |
+
>>> ndimage.extrema(a)
|
| 1435 |
+
(0, 9, (0, 2), (3, 0))
|
| 1436 |
+
|
| 1437 |
+
Features to process can be specified using `labels` and `index`:
|
| 1438 |
+
|
| 1439 |
+
>>> lbl, nlbl = ndimage.label(a)
|
| 1440 |
+
>>> ndimage.extrema(a, lbl, index=np.arange(1, nlbl+1))
|
| 1441 |
+
(array([1, 4, 3]),
|
| 1442 |
+
array([5, 7, 9]),
|
| 1443 |
+
[(0, 0), (1, 3), (3, 1)],
|
| 1444 |
+
[(1, 0), (2, 3), (3, 0)])
|
| 1445 |
+
|
| 1446 |
+
If no index is given, non-zero `labels` are processed:
|
| 1447 |
+
|
| 1448 |
+
>>> ndimage.extrema(a, lbl)
|
| 1449 |
+
(1, 9, (0, 0), (3, 0))
|
| 1450 |
+
|
| 1451 |
+
"""
|
| 1452 |
+
dims = np.array(np.asarray(input).shape)
|
| 1453 |
+
# see np.unravel_index to understand this line.
|
| 1454 |
+
dim_prod = np.cumprod([1] + list(dims[:0:-1]))[::-1]
|
| 1455 |
+
|
| 1456 |
+
minimums, min_positions, maximums, max_positions = _select(input, labels,
|
| 1457 |
+
index,
|
| 1458 |
+
find_min=True,
|
| 1459 |
+
find_max=True,
|
| 1460 |
+
find_min_positions=True,
|
| 1461 |
+
find_max_positions=True)
|
| 1462 |
+
|
| 1463 |
+
if np.isscalar(minimums):
|
| 1464 |
+
return (minimums, maximums, tuple((min_positions // dim_prod) % dims),
|
| 1465 |
+
tuple((max_positions // dim_prod) % dims))
|
| 1466 |
+
|
| 1467 |
+
min_positions = [
|
| 1468 |
+
tuple(v) for v in (min_positions.reshape(-1, 1) // dim_prod) % dims
|
| 1469 |
+
]
|
| 1470 |
+
max_positions = [
|
| 1471 |
+
tuple(v) for v in (max_positions.reshape(-1, 1) // dim_prod) % dims
|
| 1472 |
+
]
|
| 1473 |
+
|
| 1474 |
+
return minimums, maximums, min_positions, max_positions
|
| 1475 |
+
|
| 1476 |
+
|
| 1477 |
+
def center_of_mass(input, labels=None, index=None):
|
| 1478 |
+
"""
|
| 1479 |
+
Calculate the center of mass of the values of an array at labels.
|
| 1480 |
+
|
| 1481 |
+
Parameters
|
| 1482 |
+
----------
|
| 1483 |
+
input : ndarray
|
| 1484 |
+
Data from which to calculate center-of-mass. The masses can either
|
| 1485 |
+
be positive or negative.
|
| 1486 |
+
labels : ndarray, optional
|
| 1487 |
+
Labels for objects in `input`, as generated by `ndimage.label`.
|
| 1488 |
+
Only used with `index`. Dimensions must be the same as `input`.
|
| 1489 |
+
index : int or sequence of ints, optional
|
| 1490 |
+
Labels for which to calculate centers-of-mass. If not specified,
|
| 1491 |
+
the combined center of mass of all labels greater than zero
|
| 1492 |
+
will be calculated. Only used with `labels`.
|
| 1493 |
+
|
| 1494 |
+
Returns
|
| 1495 |
+
-------
|
| 1496 |
+
center_of_mass : tuple, or list of tuples
|
| 1497 |
+
Coordinates of centers-of-mass.
|
| 1498 |
+
|
| 1499 |
+
Examples
|
| 1500 |
+
--------
|
| 1501 |
+
>>> import numpy as np
|
| 1502 |
+
>>> a = np.array(([0,0,0,0],
|
| 1503 |
+
... [0,1,1,0],
|
| 1504 |
+
... [0,1,1,0],
|
| 1505 |
+
... [0,1,1,0]))
|
| 1506 |
+
>>> from scipy import ndimage
|
| 1507 |
+
>>> ndimage.center_of_mass(a)
|
| 1508 |
+
(2.0, 1.5)
|
| 1509 |
+
|
| 1510 |
+
Calculation of multiple objects in an image
|
| 1511 |
+
|
| 1512 |
+
>>> b = np.array(([0,1,1,0],
|
| 1513 |
+
... [0,1,0,0],
|
| 1514 |
+
... [0,0,0,0],
|
| 1515 |
+
... [0,0,1,1],
|
| 1516 |
+
... [0,0,1,1]))
|
| 1517 |
+
>>> lbl = ndimage.label(b)[0]
|
| 1518 |
+
>>> ndimage.center_of_mass(b, lbl, [1,2])
|
| 1519 |
+
[(0.33333333333333331, 1.3333333333333333), (3.5, 2.5)]
|
| 1520 |
+
|
| 1521 |
+
Negative masses are also accepted, which can occur for example when
|
| 1522 |
+
bias is removed from measured data due to random noise.
|
| 1523 |
+
|
| 1524 |
+
>>> c = np.array(([-1,0,0,0],
|
| 1525 |
+
... [0,-1,-1,0],
|
| 1526 |
+
... [0,1,-1,0],
|
| 1527 |
+
... [0,1,1,0]))
|
| 1528 |
+
>>> ndimage.center_of_mass(c)
|
| 1529 |
+
(-4.0, 1.0)
|
| 1530 |
+
|
| 1531 |
+
If there are division by zero issues, the function does not raise an
|
| 1532 |
+
error but rather issues a RuntimeWarning before returning inf and/or NaN.
|
| 1533 |
+
|
| 1534 |
+
>>> d = np.array([-1, 1])
|
| 1535 |
+
>>> ndimage.center_of_mass(d)
|
| 1536 |
+
(inf,)
|
| 1537 |
+
"""
|
| 1538 |
+
normalizer = sum(input, labels, index)
|
| 1539 |
+
grids = np.ogrid[[slice(0, i) for i in input.shape]]
|
| 1540 |
+
|
| 1541 |
+
results = [sum(input * grids[dir].astype(float), labels, index) / normalizer
|
| 1542 |
+
for dir in range(input.ndim)]
|
| 1543 |
+
|
| 1544 |
+
if np.isscalar(results[0]):
|
| 1545 |
+
return tuple(results)
|
| 1546 |
+
|
| 1547 |
+
return [tuple(v) for v in np.array(results).T]
|
| 1548 |
+
|
| 1549 |
+
|
| 1550 |
+
def histogram(input, min, max, bins, labels=None, index=None):
|
| 1551 |
+
"""
|
| 1552 |
+
Calculate the histogram of the values of an array, optionally at labels.
|
| 1553 |
+
|
| 1554 |
+
Histogram calculates the frequency of values in an array within bins
|
| 1555 |
+
determined by `min`, `max`, and `bins`. The `labels` and `index`
|
| 1556 |
+
keywords can limit the scope of the histogram to specified sub-regions
|
| 1557 |
+
within the array.
|
| 1558 |
+
|
| 1559 |
+
Parameters
|
| 1560 |
+
----------
|
| 1561 |
+
input : array_like
|
| 1562 |
+
Data for which to calculate histogram.
|
| 1563 |
+
min, max : int
|
| 1564 |
+
Minimum and maximum values of range of histogram bins.
|
| 1565 |
+
bins : int
|
| 1566 |
+
Number of bins.
|
| 1567 |
+
labels : array_like, optional
|
| 1568 |
+
Labels for objects in `input`.
|
| 1569 |
+
If not None, must be same shape as `input`.
|
| 1570 |
+
index : int or sequence of ints, optional
|
| 1571 |
+
Label or labels for which to calculate histogram. If None, all values
|
| 1572 |
+
where label is greater than zero are used
|
| 1573 |
+
|
| 1574 |
+
Returns
|
| 1575 |
+
-------
|
| 1576 |
+
hist : ndarray
|
| 1577 |
+
Histogram counts.
|
| 1578 |
+
|
| 1579 |
+
Examples
|
| 1580 |
+
--------
|
| 1581 |
+
>>> import numpy as np
|
| 1582 |
+
>>> a = np.array([[ 0. , 0.2146, 0.5962, 0. ],
|
| 1583 |
+
... [ 0. , 0.7778, 0. , 0. ],
|
| 1584 |
+
... [ 0. , 0. , 0. , 0. ],
|
| 1585 |
+
... [ 0. , 0. , 0.7181, 0.2787],
|
| 1586 |
+
... [ 0. , 0. , 0.6573, 0.3094]])
|
| 1587 |
+
>>> from scipy import ndimage
|
| 1588 |
+
>>> ndimage.histogram(a, 0, 1, 10)
|
| 1589 |
+
array([13, 0, 2, 1, 0, 1, 1, 2, 0, 0])
|
| 1590 |
+
|
| 1591 |
+
With labels and no indices, non-zero elements are counted:
|
| 1592 |
+
|
| 1593 |
+
>>> lbl, nlbl = ndimage.label(a)
|
| 1594 |
+
>>> ndimage.histogram(a, 0, 1, 10, lbl)
|
| 1595 |
+
array([0, 0, 2, 1, 0, 1, 1, 2, 0, 0])
|
| 1596 |
+
|
| 1597 |
+
Indices can be used to count only certain objects:
|
| 1598 |
+
|
| 1599 |
+
>>> ndimage.histogram(a, 0, 1, 10, lbl, 2)
|
| 1600 |
+
array([0, 0, 1, 1, 0, 0, 1, 1, 0, 0])
|
| 1601 |
+
|
| 1602 |
+
"""
|
| 1603 |
+
_bins = np.linspace(min, max, bins + 1)
|
| 1604 |
+
|
| 1605 |
+
def _hist(vals):
|
| 1606 |
+
return np.histogram(vals, _bins)[0]
|
| 1607 |
+
|
| 1608 |
+
return labeled_comprehension(input, labels, index, _hist, object, None,
|
| 1609 |
+
pass_positions=False)
|
| 1610 |
+
|
| 1611 |
+
|
| 1612 |
+
def watershed_ift(input, markers, structure=None, output=None):
|
| 1613 |
+
"""
|
| 1614 |
+
Apply watershed from markers using image foresting transform algorithm.
|
| 1615 |
+
|
| 1616 |
+
Parameters
|
| 1617 |
+
----------
|
| 1618 |
+
input : array_like
|
| 1619 |
+
Input.
|
| 1620 |
+
markers : array_like
|
| 1621 |
+
Markers are points within each watershed that form the beginning
|
| 1622 |
+
of the process. Negative markers are considered background markers
|
| 1623 |
+
which are processed after the other markers.
|
| 1624 |
+
structure : structure element, optional
|
| 1625 |
+
A structuring element defining the connectivity of the object can be
|
| 1626 |
+
provided. If None, an element is generated with a squared
|
| 1627 |
+
connectivity equal to one.
|
| 1628 |
+
output : ndarray, optional
|
| 1629 |
+
An output array can optionally be provided. The same shape as input.
|
| 1630 |
+
|
| 1631 |
+
Returns
|
| 1632 |
+
-------
|
| 1633 |
+
watershed_ift : ndarray
|
| 1634 |
+
Output. Same shape as `input`.
|
| 1635 |
+
|
| 1636 |
+
References
|
| 1637 |
+
----------
|
| 1638 |
+
.. [1] A.X. Falcao, J. Stolfi and R. de Alencar Lotufo, "The image
|
| 1639 |
+
foresting transform: theory, algorithms, and applications",
|
| 1640 |
+
Pattern Analysis and Machine Intelligence, vol. 26, pp. 19-29, 2004.
|
| 1641 |
+
|
| 1642 |
+
"""
|
| 1643 |
+
input = np.asarray(input)
|
| 1644 |
+
if input.dtype.type not in [np.uint8, np.uint16]:
|
| 1645 |
+
raise TypeError('only 8 and 16 unsigned inputs are supported')
|
| 1646 |
+
|
| 1647 |
+
if structure is None:
|
| 1648 |
+
structure = _morphology.generate_binary_structure(input.ndim, 1)
|
| 1649 |
+
structure = np.asarray(structure, dtype=bool)
|
| 1650 |
+
if structure.ndim != input.ndim:
|
| 1651 |
+
raise RuntimeError('structure and input must have equal rank')
|
| 1652 |
+
for ii in structure.shape:
|
| 1653 |
+
if ii != 3:
|
| 1654 |
+
raise RuntimeError('structure dimensions must be equal to 3')
|
| 1655 |
+
|
| 1656 |
+
if not structure.flags.contiguous:
|
| 1657 |
+
structure = structure.copy()
|
| 1658 |
+
markers = np.asarray(markers)
|
| 1659 |
+
if input.shape != markers.shape:
|
| 1660 |
+
raise RuntimeError('input and markers must have equal shape')
|
| 1661 |
+
|
| 1662 |
+
integral_types = [np.int8,
|
| 1663 |
+
np.int16,
|
| 1664 |
+
np.int32,
|
| 1665 |
+
np.int64,
|
| 1666 |
+
np.intc,
|
| 1667 |
+
np.intp]
|
| 1668 |
+
|
| 1669 |
+
if markers.dtype.type not in integral_types:
|
| 1670 |
+
raise RuntimeError('marker should be of integer type')
|
| 1671 |
+
|
| 1672 |
+
if isinstance(output, np.ndarray):
|
| 1673 |
+
if output.dtype.type not in integral_types:
|
| 1674 |
+
raise RuntimeError('output should be of integer type')
|
| 1675 |
+
else:
|
| 1676 |
+
output = markers.dtype
|
| 1677 |
+
|
| 1678 |
+
output = _ni_support._get_output(output, input)
|
| 1679 |
+
_nd_image.watershed_ift(input, markers, structure, output)
|
| 1680 |
+
return output
|
parrot/lib/python3.10/site-packages/scipy/ndimage/_morphology.py
ADDED
|
@@ -0,0 +1,2537 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2003-2005 Peter J. Verveer
|
| 2 |
+
#
|
| 3 |
+
# Redistribution and use in source and binary forms, with or without
|
| 4 |
+
# modification, are permitted provided that the following conditions
|
| 5 |
+
# are met:
|
| 6 |
+
#
|
| 7 |
+
# 1. Redistributions of source code must retain the above copyright
|
| 8 |
+
# notice, this list of conditions and the following disclaimer.
|
| 9 |
+
#
|
| 10 |
+
# 2. Redistributions in binary form must reproduce the above
|
| 11 |
+
# copyright notice, this list of conditions and the following
|
| 12 |
+
# disclaimer in the documentation and/or other materials provided
|
| 13 |
+
# with the distribution.
|
| 14 |
+
#
|
| 15 |
+
# 3. The name of the author may not be used to endorse or promote
|
| 16 |
+
# products derived from this software without specific prior
|
| 17 |
+
# written permission.
|
| 18 |
+
#
|
| 19 |
+
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
|
| 20 |
+
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
| 21 |
+
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
| 22 |
+
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
|
| 23 |
+
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 24 |
+
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
|
| 25 |
+
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
| 26 |
+
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
| 27 |
+
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
| 28 |
+
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
| 29 |
+
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 30 |
+
|
| 31 |
+
import warnings
|
| 32 |
+
import operator
|
| 33 |
+
|
| 34 |
+
import numpy as np
|
| 35 |
+
from . import _ni_support
|
| 36 |
+
from . import _nd_image
|
| 37 |
+
from . import _filters
|
| 38 |
+
|
| 39 |
+
__all__ = ['iterate_structure', 'generate_binary_structure', 'binary_erosion',
|
| 40 |
+
'binary_dilation', 'binary_opening', 'binary_closing',
|
| 41 |
+
'binary_hit_or_miss', 'binary_propagation', 'binary_fill_holes',
|
| 42 |
+
'grey_erosion', 'grey_dilation', 'grey_opening', 'grey_closing',
|
| 43 |
+
'morphological_gradient', 'morphological_laplace', 'white_tophat',
|
| 44 |
+
'black_tophat', 'distance_transform_bf', 'distance_transform_cdt',
|
| 45 |
+
'distance_transform_edt']
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def _center_is_true(structure, origin):
|
| 49 |
+
structure = np.asarray(structure)
|
| 50 |
+
coor = tuple([oo + ss // 2 for ss, oo in zip(structure.shape,
|
| 51 |
+
origin)])
|
| 52 |
+
return bool(structure[coor])
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def iterate_structure(structure, iterations, origin=None):
|
| 56 |
+
"""
|
| 57 |
+
Iterate a structure by dilating it with itself.
|
| 58 |
+
|
| 59 |
+
Parameters
|
| 60 |
+
----------
|
| 61 |
+
structure : array_like
|
| 62 |
+
Structuring element (an array of bools, for example), to be dilated with
|
| 63 |
+
itself.
|
| 64 |
+
iterations : int
|
| 65 |
+
number of dilations performed on the structure with itself
|
| 66 |
+
origin : optional
|
| 67 |
+
If origin is None, only the iterated structure is returned. If
|
| 68 |
+
not, a tuple of the iterated structure and the modified origin is
|
| 69 |
+
returned.
|
| 70 |
+
|
| 71 |
+
Returns
|
| 72 |
+
-------
|
| 73 |
+
iterate_structure : ndarray of bools
|
| 74 |
+
A new structuring element obtained by dilating `structure`
|
| 75 |
+
(`iterations` - 1) times with itself.
|
| 76 |
+
|
| 77 |
+
See Also
|
| 78 |
+
--------
|
| 79 |
+
generate_binary_structure
|
| 80 |
+
|
| 81 |
+
Examples
|
| 82 |
+
--------
|
| 83 |
+
>>> from scipy import ndimage
|
| 84 |
+
>>> struct = ndimage.generate_binary_structure(2, 1)
|
| 85 |
+
>>> struct.astype(int)
|
| 86 |
+
array([[0, 1, 0],
|
| 87 |
+
[1, 1, 1],
|
| 88 |
+
[0, 1, 0]])
|
| 89 |
+
>>> ndimage.iterate_structure(struct, 2).astype(int)
|
| 90 |
+
array([[0, 0, 1, 0, 0],
|
| 91 |
+
[0, 1, 1, 1, 0],
|
| 92 |
+
[1, 1, 1, 1, 1],
|
| 93 |
+
[0, 1, 1, 1, 0],
|
| 94 |
+
[0, 0, 1, 0, 0]])
|
| 95 |
+
>>> ndimage.iterate_structure(struct, 3).astype(int)
|
| 96 |
+
array([[0, 0, 0, 1, 0, 0, 0],
|
| 97 |
+
[0, 0, 1, 1, 1, 0, 0],
|
| 98 |
+
[0, 1, 1, 1, 1, 1, 0],
|
| 99 |
+
[1, 1, 1, 1, 1, 1, 1],
|
| 100 |
+
[0, 1, 1, 1, 1, 1, 0],
|
| 101 |
+
[0, 0, 1, 1, 1, 0, 0],
|
| 102 |
+
[0, 0, 0, 1, 0, 0, 0]])
|
| 103 |
+
|
| 104 |
+
"""
|
| 105 |
+
structure = np.asarray(structure)
|
| 106 |
+
if iterations < 2:
|
| 107 |
+
return structure.copy()
|
| 108 |
+
ni = iterations - 1
|
| 109 |
+
shape = [ii + ni * (ii - 1) for ii in structure.shape]
|
| 110 |
+
pos = [ni * (structure.shape[ii] // 2) for ii in range(len(shape))]
|
| 111 |
+
slc = tuple(slice(pos[ii], pos[ii] + structure.shape[ii], None)
|
| 112 |
+
for ii in range(len(shape)))
|
| 113 |
+
out = np.zeros(shape, bool)
|
| 114 |
+
out[slc] = structure != 0
|
| 115 |
+
out = binary_dilation(out, structure, iterations=ni)
|
| 116 |
+
if origin is None:
|
| 117 |
+
return out
|
| 118 |
+
else:
|
| 119 |
+
origin = _ni_support._normalize_sequence(origin, structure.ndim)
|
| 120 |
+
origin = [iterations * o for o in origin]
|
| 121 |
+
return out, origin
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
def generate_binary_structure(rank, connectivity):
|
| 125 |
+
"""
|
| 126 |
+
Generate a binary structure for binary morphological operations.
|
| 127 |
+
|
| 128 |
+
Parameters
|
| 129 |
+
----------
|
| 130 |
+
rank : int
|
| 131 |
+
Number of dimensions of the array to which the structuring element
|
| 132 |
+
will be applied, as returned by `np.ndim`.
|
| 133 |
+
connectivity : int
|
| 134 |
+
`connectivity` determines which elements of the output array belong
|
| 135 |
+
to the structure, i.e., are considered as neighbors of the central
|
| 136 |
+
element. Elements up to a squared distance of `connectivity` from
|
| 137 |
+
the center are considered neighbors. `connectivity` may range from 1
|
| 138 |
+
(no diagonal elements are neighbors) to `rank` (all elements are
|
| 139 |
+
neighbors).
|
| 140 |
+
|
| 141 |
+
Returns
|
| 142 |
+
-------
|
| 143 |
+
output : ndarray of bools
|
| 144 |
+
Structuring element which may be used for binary morphological
|
| 145 |
+
operations, with `rank` dimensions and all dimensions equal to 3.
|
| 146 |
+
|
| 147 |
+
See Also
|
| 148 |
+
--------
|
| 149 |
+
iterate_structure, binary_dilation, binary_erosion
|
| 150 |
+
|
| 151 |
+
Notes
|
| 152 |
+
-----
|
| 153 |
+
`generate_binary_structure` can only create structuring elements with
|
| 154 |
+
dimensions equal to 3, i.e., minimal dimensions. For larger structuring
|
| 155 |
+
elements, that are useful e.g., for eroding large objects, one may either
|
| 156 |
+
use `iterate_structure`, or create directly custom arrays with
|
| 157 |
+
numpy functions such as `numpy.ones`.
|
| 158 |
+
|
| 159 |
+
Examples
|
| 160 |
+
--------
|
| 161 |
+
>>> from scipy import ndimage
|
| 162 |
+
>>> import numpy as np
|
| 163 |
+
>>> struct = ndimage.generate_binary_structure(2, 1)
|
| 164 |
+
>>> struct
|
| 165 |
+
array([[False, True, False],
|
| 166 |
+
[ True, True, True],
|
| 167 |
+
[False, True, False]], dtype=bool)
|
| 168 |
+
>>> a = np.zeros((5,5))
|
| 169 |
+
>>> a[2, 2] = 1
|
| 170 |
+
>>> a
|
| 171 |
+
array([[ 0., 0., 0., 0., 0.],
|
| 172 |
+
[ 0., 0., 0., 0., 0.],
|
| 173 |
+
[ 0., 0., 1., 0., 0.],
|
| 174 |
+
[ 0., 0., 0., 0., 0.],
|
| 175 |
+
[ 0., 0., 0., 0., 0.]])
|
| 176 |
+
>>> b = ndimage.binary_dilation(a, structure=struct).astype(a.dtype)
|
| 177 |
+
>>> b
|
| 178 |
+
array([[ 0., 0., 0., 0., 0.],
|
| 179 |
+
[ 0., 0., 1., 0., 0.],
|
| 180 |
+
[ 0., 1., 1., 1., 0.],
|
| 181 |
+
[ 0., 0., 1., 0., 0.],
|
| 182 |
+
[ 0., 0., 0., 0., 0.]])
|
| 183 |
+
>>> ndimage.binary_dilation(b, structure=struct).astype(a.dtype)
|
| 184 |
+
array([[ 0., 0., 1., 0., 0.],
|
| 185 |
+
[ 0., 1., 1., 1., 0.],
|
| 186 |
+
[ 1., 1., 1., 1., 1.],
|
| 187 |
+
[ 0., 1., 1., 1., 0.],
|
| 188 |
+
[ 0., 0., 1., 0., 0.]])
|
| 189 |
+
>>> struct = ndimage.generate_binary_structure(2, 2)
|
| 190 |
+
>>> struct
|
| 191 |
+
array([[ True, True, True],
|
| 192 |
+
[ True, True, True],
|
| 193 |
+
[ True, True, True]], dtype=bool)
|
| 194 |
+
>>> struct = ndimage.generate_binary_structure(3, 1)
|
| 195 |
+
>>> struct # no diagonal elements
|
| 196 |
+
array([[[False, False, False],
|
| 197 |
+
[False, True, False],
|
| 198 |
+
[False, False, False]],
|
| 199 |
+
[[False, True, False],
|
| 200 |
+
[ True, True, True],
|
| 201 |
+
[False, True, False]],
|
| 202 |
+
[[False, False, False],
|
| 203 |
+
[False, True, False],
|
| 204 |
+
[False, False, False]]], dtype=bool)
|
| 205 |
+
|
| 206 |
+
"""
|
| 207 |
+
if connectivity < 1:
|
| 208 |
+
connectivity = 1
|
| 209 |
+
if rank < 1:
|
| 210 |
+
return np.array(True, dtype=bool)
|
| 211 |
+
output = np.fabs(np.indices([3] * rank) - 1)
|
| 212 |
+
output = np.add.reduce(output, 0)
|
| 213 |
+
return output <= connectivity
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
def _binary_erosion(input, structure, iterations, mask, output,
|
| 217 |
+
border_value, origin, invert, brute_force):
|
| 218 |
+
try:
|
| 219 |
+
iterations = operator.index(iterations)
|
| 220 |
+
except TypeError as e:
|
| 221 |
+
raise TypeError('iterations parameter should be an integer') from e
|
| 222 |
+
|
| 223 |
+
input = np.asarray(input)
|
| 224 |
+
if np.iscomplexobj(input):
|
| 225 |
+
raise TypeError('Complex type not supported')
|
| 226 |
+
if structure is None:
|
| 227 |
+
structure = generate_binary_structure(input.ndim, 1)
|
| 228 |
+
else:
|
| 229 |
+
structure = np.asarray(structure, dtype=bool)
|
| 230 |
+
if structure.ndim != input.ndim:
|
| 231 |
+
raise RuntimeError('structure and input must have same dimensionality')
|
| 232 |
+
if not structure.flags.contiguous:
|
| 233 |
+
structure = structure.copy()
|
| 234 |
+
if structure.size < 1:
|
| 235 |
+
raise RuntimeError('structure must not be empty')
|
| 236 |
+
if mask is not None:
|
| 237 |
+
mask = np.asarray(mask)
|
| 238 |
+
if mask.shape != input.shape:
|
| 239 |
+
raise RuntimeError('mask and input must have equal sizes')
|
| 240 |
+
origin = _ni_support._normalize_sequence(origin, input.ndim)
|
| 241 |
+
cit = _center_is_true(structure, origin)
|
| 242 |
+
if isinstance(output, np.ndarray):
|
| 243 |
+
if np.iscomplexobj(output):
|
| 244 |
+
raise TypeError('Complex output type not supported')
|
| 245 |
+
else:
|
| 246 |
+
output = bool
|
| 247 |
+
output = _ni_support._get_output(output, input)
|
| 248 |
+
temp_needed = np.may_share_memory(input, output)
|
| 249 |
+
if temp_needed:
|
| 250 |
+
# input and output arrays cannot share memory
|
| 251 |
+
temp = output
|
| 252 |
+
output = _ni_support._get_output(output.dtype, input)
|
| 253 |
+
if iterations == 1:
|
| 254 |
+
_nd_image.binary_erosion(input, structure, mask, output,
|
| 255 |
+
border_value, origin, invert, cit, 0)
|
| 256 |
+
elif cit and not brute_force:
|
| 257 |
+
changed, coordinate_list = _nd_image.binary_erosion(
|
| 258 |
+
input, structure, mask, output,
|
| 259 |
+
border_value, origin, invert, cit, 1)
|
| 260 |
+
structure = structure[tuple([slice(None, None, -1)] *
|
| 261 |
+
structure.ndim)]
|
| 262 |
+
for ii in range(len(origin)):
|
| 263 |
+
origin[ii] = -origin[ii]
|
| 264 |
+
if not structure.shape[ii] & 1:
|
| 265 |
+
origin[ii] -= 1
|
| 266 |
+
if mask is not None:
|
| 267 |
+
mask = np.asarray(mask, dtype=np.int8)
|
| 268 |
+
if not structure.flags.contiguous:
|
| 269 |
+
structure = structure.copy()
|
| 270 |
+
_nd_image.binary_erosion2(output, structure, mask, iterations - 1,
|
| 271 |
+
origin, invert, coordinate_list)
|
| 272 |
+
else:
|
| 273 |
+
tmp_in = np.empty_like(input, dtype=bool)
|
| 274 |
+
tmp_out = output
|
| 275 |
+
if iterations >= 1 and not iterations & 1:
|
| 276 |
+
tmp_in, tmp_out = tmp_out, tmp_in
|
| 277 |
+
changed = _nd_image.binary_erosion(
|
| 278 |
+
input, structure, mask, tmp_out,
|
| 279 |
+
border_value, origin, invert, cit, 0)
|
| 280 |
+
ii = 1
|
| 281 |
+
while ii < iterations or (iterations < 1 and changed):
|
| 282 |
+
tmp_in, tmp_out = tmp_out, tmp_in
|
| 283 |
+
changed = _nd_image.binary_erosion(
|
| 284 |
+
tmp_in, structure, mask, tmp_out,
|
| 285 |
+
border_value, origin, invert, cit, 0)
|
| 286 |
+
ii += 1
|
| 287 |
+
if temp_needed:
|
| 288 |
+
temp[...] = output
|
| 289 |
+
output = temp
|
| 290 |
+
return output
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
def binary_erosion(input, structure=None, iterations=1, mask=None, output=None,
|
| 294 |
+
border_value=0, origin=0, brute_force=False):
|
| 295 |
+
"""
|
| 296 |
+
Multidimensional binary erosion with a given structuring element.
|
| 297 |
+
|
| 298 |
+
Binary erosion is a mathematical morphology operation used for image
|
| 299 |
+
processing.
|
| 300 |
+
|
| 301 |
+
Parameters
|
| 302 |
+
----------
|
| 303 |
+
input : array_like
|
| 304 |
+
Binary image to be eroded. Non-zero (True) elements form
|
| 305 |
+
the subset to be eroded.
|
| 306 |
+
structure : array_like, optional
|
| 307 |
+
Structuring element used for the erosion. Non-zero elements are
|
| 308 |
+
considered True. If no structuring element is provided, an element
|
| 309 |
+
is generated with a square connectivity equal to one.
|
| 310 |
+
iterations : int, optional
|
| 311 |
+
The erosion is repeated `iterations` times (one, by default).
|
| 312 |
+
If iterations is less than 1, the erosion is repeated until the
|
| 313 |
+
result does not change anymore.
|
| 314 |
+
mask : array_like, optional
|
| 315 |
+
If a mask is given, only those elements with a True value at
|
| 316 |
+
the corresponding mask element are modified at each iteration.
|
| 317 |
+
output : ndarray, optional
|
| 318 |
+
Array of the same shape as input, into which the output is placed.
|
| 319 |
+
By default, a new array is created.
|
| 320 |
+
border_value : int (cast to 0 or 1), optional
|
| 321 |
+
Value at the border in the output array.
|
| 322 |
+
origin : int or tuple of ints, optional
|
| 323 |
+
Placement of the filter, by default 0.
|
| 324 |
+
brute_force : boolean, optional
|
| 325 |
+
Memory condition: if False, only the pixels whose value was changed in
|
| 326 |
+
the last iteration are tracked as candidates to be updated (eroded) in
|
| 327 |
+
the current iteration; if True all pixels are considered as candidates
|
| 328 |
+
for erosion, regardless of what happened in the previous iteration.
|
| 329 |
+
False by default.
|
| 330 |
+
|
| 331 |
+
Returns
|
| 332 |
+
-------
|
| 333 |
+
binary_erosion : ndarray of bools
|
| 334 |
+
Erosion of the input by the structuring element.
|
| 335 |
+
|
| 336 |
+
See Also
|
| 337 |
+
--------
|
| 338 |
+
grey_erosion, binary_dilation, binary_closing, binary_opening,
|
| 339 |
+
generate_binary_structure
|
| 340 |
+
|
| 341 |
+
Notes
|
| 342 |
+
-----
|
| 343 |
+
Erosion [1]_ is a mathematical morphology operation [2]_ that uses a
|
| 344 |
+
structuring element for shrinking the shapes in an image. The binary
|
| 345 |
+
erosion of an image by a structuring element is the locus of the points
|
| 346 |
+
where a superimposition of the structuring element centered on the point
|
| 347 |
+
is entirely contained in the set of non-zero elements of the image.
|
| 348 |
+
|
| 349 |
+
References
|
| 350 |
+
----------
|
| 351 |
+
.. [1] https://en.wikipedia.org/wiki/Erosion_%28morphology%29
|
| 352 |
+
.. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
|
| 353 |
+
|
| 354 |
+
Examples
|
| 355 |
+
--------
|
| 356 |
+
>>> from scipy import ndimage
|
| 357 |
+
>>> import numpy as np
|
| 358 |
+
>>> a = np.zeros((7,7), dtype=int)
|
| 359 |
+
>>> a[1:6, 2:5] = 1
|
| 360 |
+
>>> a
|
| 361 |
+
array([[0, 0, 0, 0, 0, 0, 0],
|
| 362 |
+
[0, 0, 1, 1, 1, 0, 0],
|
| 363 |
+
[0, 0, 1, 1, 1, 0, 0],
|
| 364 |
+
[0, 0, 1, 1, 1, 0, 0],
|
| 365 |
+
[0, 0, 1, 1, 1, 0, 0],
|
| 366 |
+
[0, 0, 1, 1, 1, 0, 0],
|
| 367 |
+
[0, 0, 0, 0, 0, 0, 0]])
|
| 368 |
+
>>> ndimage.binary_erosion(a).astype(a.dtype)
|
| 369 |
+
array([[0, 0, 0, 0, 0, 0, 0],
|
| 370 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 371 |
+
[0, 0, 0, 1, 0, 0, 0],
|
| 372 |
+
[0, 0, 0, 1, 0, 0, 0],
|
| 373 |
+
[0, 0, 0, 1, 0, 0, 0],
|
| 374 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 375 |
+
[0, 0, 0, 0, 0, 0, 0]])
|
| 376 |
+
>>> #Erosion removes objects smaller than the structure
|
| 377 |
+
>>> ndimage.binary_erosion(a, structure=np.ones((5,5))).astype(a.dtype)
|
| 378 |
+
array([[0, 0, 0, 0, 0, 0, 0],
|
| 379 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 380 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 381 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 382 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 383 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 384 |
+
[0, 0, 0, 0, 0, 0, 0]])
|
| 385 |
+
|
| 386 |
+
"""
|
| 387 |
+
return _binary_erosion(input, structure, iterations, mask,
|
| 388 |
+
output, border_value, origin, 0, brute_force)
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
def binary_dilation(input, structure=None, iterations=1, mask=None,
|
| 392 |
+
output=None, border_value=0, origin=0,
|
| 393 |
+
brute_force=False):
|
| 394 |
+
"""
|
| 395 |
+
Multidimensional binary dilation with the given structuring element.
|
| 396 |
+
|
| 397 |
+
Parameters
|
| 398 |
+
----------
|
| 399 |
+
input : array_like
|
| 400 |
+
Binary array_like to be dilated. Non-zero (True) elements form
|
| 401 |
+
the subset to be dilated.
|
| 402 |
+
structure : array_like, optional
|
| 403 |
+
Structuring element used for the dilation. Non-zero elements are
|
| 404 |
+
considered True. If no structuring element is provided an element
|
| 405 |
+
is generated with a square connectivity equal to one.
|
| 406 |
+
iterations : int, optional
|
| 407 |
+
The dilation is repeated `iterations` times (one, by default).
|
| 408 |
+
If iterations is less than 1, the dilation is repeated until the
|
| 409 |
+
result does not change anymore. Only an integer of iterations is
|
| 410 |
+
accepted.
|
| 411 |
+
mask : array_like, optional
|
| 412 |
+
If a mask is given, only those elements with a True value at
|
| 413 |
+
the corresponding mask element are modified at each iteration.
|
| 414 |
+
output : ndarray, optional
|
| 415 |
+
Array of the same shape as input, into which the output is placed.
|
| 416 |
+
By default, a new array is created.
|
| 417 |
+
border_value : int (cast to 0 or 1), optional
|
| 418 |
+
Value at the border in the output array.
|
| 419 |
+
origin : int or tuple of ints, optional
|
| 420 |
+
Placement of the filter, by default 0.
|
| 421 |
+
brute_force : boolean, optional
|
| 422 |
+
Memory condition: if False, only the pixels whose value was changed in
|
| 423 |
+
the last iteration are tracked as candidates to be updated (dilated)
|
| 424 |
+
in the current iteration; if True all pixels are considered as
|
| 425 |
+
candidates for dilation, regardless of what happened in the previous
|
| 426 |
+
iteration. False by default.
|
| 427 |
+
|
| 428 |
+
Returns
|
| 429 |
+
-------
|
| 430 |
+
binary_dilation : ndarray of bools
|
| 431 |
+
Dilation of the input by the structuring element.
|
| 432 |
+
|
| 433 |
+
See Also
|
| 434 |
+
--------
|
| 435 |
+
grey_dilation, binary_erosion, binary_closing, binary_opening,
|
| 436 |
+
generate_binary_structure
|
| 437 |
+
|
| 438 |
+
Notes
|
| 439 |
+
-----
|
| 440 |
+
Dilation [1]_ is a mathematical morphology operation [2]_ that uses a
|
| 441 |
+
structuring element for expanding the shapes in an image. The binary
|
| 442 |
+
dilation of an image by a structuring element is the locus of the points
|
| 443 |
+
covered by the structuring element, when its center lies within the
|
| 444 |
+
non-zero points of the image.
|
| 445 |
+
|
| 446 |
+
References
|
| 447 |
+
----------
|
| 448 |
+
.. [1] https://en.wikipedia.org/wiki/Dilation_%28morphology%29
|
| 449 |
+
.. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
|
| 450 |
+
|
| 451 |
+
Examples
|
| 452 |
+
--------
|
| 453 |
+
>>> from scipy import ndimage
|
| 454 |
+
>>> import numpy as np
|
| 455 |
+
>>> a = np.zeros((5, 5))
|
| 456 |
+
>>> a[2, 2] = 1
|
| 457 |
+
>>> a
|
| 458 |
+
array([[ 0., 0., 0., 0., 0.],
|
| 459 |
+
[ 0., 0., 0., 0., 0.],
|
| 460 |
+
[ 0., 0., 1., 0., 0.],
|
| 461 |
+
[ 0., 0., 0., 0., 0.],
|
| 462 |
+
[ 0., 0., 0., 0., 0.]])
|
| 463 |
+
>>> ndimage.binary_dilation(a)
|
| 464 |
+
array([[False, False, False, False, False],
|
| 465 |
+
[False, False, True, False, False],
|
| 466 |
+
[False, True, True, True, False],
|
| 467 |
+
[False, False, True, False, False],
|
| 468 |
+
[False, False, False, False, False]], dtype=bool)
|
| 469 |
+
>>> ndimage.binary_dilation(a).astype(a.dtype)
|
| 470 |
+
array([[ 0., 0., 0., 0., 0.],
|
| 471 |
+
[ 0., 0., 1., 0., 0.],
|
| 472 |
+
[ 0., 1., 1., 1., 0.],
|
| 473 |
+
[ 0., 0., 1., 0., 0.],
|
| 474 |
+
[ 0., 0., 0., 0., 0.]])
|
| 475 |
+
>>> # 3x3 structuring element with connectivity 1, used by default
|
| 476 |
+
>>> struct1 = ndimage.generate_binary_structure(2, 1)
|
| 477 |
+
>>> struct1
|
| 478 |
+
array([[False, True, False],
|
| 479 |
+
[ True, True, True],
|
| 480 |
+
[False, True, False]], dtype=bool)
|
| 481 |
+
>>> # 3x3 structuring element with connectivity 2
|
| 482 |
+
>>> struct2 = ndimage.generate_binary_structure(2, 2)
|
| 483 |
+
>>> struct2
|
| 484 |
+
array([[ True, True, True],
|
| 485 |
+
[ True, True, True],
|
| 486 |
+
[ True, True, True]], dtype=bool)
|
| 487 |
+
>>> ndimage.binary_dilation(a, structure=struct1).astype(a.dtype)
|
| 488 |
+
array([[ 0., 0., 0., 0., 0.],
|
| 489 |
+
[ 0., 0., 1., 0., 0.],
|
| 490 |
+
[ 0., 1., 1., 1., 0.],
|
| 491 |
+
[ 0., 0., 1., 0., 0.],
|
| 492 |
+
[ 0., 0., 0., 0., 0.]])
|
| 493 |
+
>>> ndimage.binary_dilation(a, structure=struct2).astype(a.dtype)
|
| 494 |
+
array([[ 0., 0., 0., 0., 0.],
|
| 495 |
+
[ 0., 1., 1., 1., 0.],
|
| 496 |
+
[ 0., 1., 1., 1., 0.],
|
| 497 |
+
[ 0., 1., 1., 1., 0.],
|
| 498 |
+
[ 0., 0., 0., 0., 0.]])
|
| 499 |
+
>>> ndimage.binary_dilation(a, structure=struct1,\\
|
| 500 |
+
... iterations=2).astype(a.dtype)
|
| 501 |
+
array([[ 0., 0., 1., 0., 0.],
|
| 502 |
+
[ 0., 1., 1., 1., 0.],
|
| 503 |
+
[ 1., 1., 1., 1., 1.],
|
| 504 |
+
[ 0., 1., 1., 1., 0.],
|
| 505 |
+
[ 0., 0., 1., 0., 0.]])
|
| 506 |
+
|
| 507 |
+
"""
|
| 508 |
+
input = np.asarray(input)
|
| 509 |
+
if structure is None:
|
| 510 |
+
structure = generate_binary_structure(input.ndim, 1)
|
| 511 |
+
origin = _ni_support._normalize_sequence(origin, input.ndim)
|
| 512 |
+
structure = np.asarray(structure)
|
| 513 |
+
structure = structure[tuple([slice(None, None, -1)] *
|
| 514 |
+
structure.ndim)]
|
| 515 |
+
for ii in range(len(origin)):
|
| 516 |
+
origin[ii] = -origin[ii]
|
| 517 |
+
if not structure.shape[ii] & 1:
|
| 518 |
+
origin[ii] -= 1
|
| 519 |
+
|
| 520 |
+
return _binary_erosion(input, structure, iterations, mask,
|
| 521 |
+
output, border_value, origin, 1, brute_force)
|
| 522 |
+
|
| 523 |
+
|
| 524 |
+
def binary_opening(input, structure=None, iterations=1, output=None,
|
| 525 |
+
origin=0, mask=None, border_value=0, brute_force=False):
|
| 526 |
+
"""
|
| 527 |
+
Multidimensional binary opening with the given structuring element.
|
| 528 |
+
|
| 529 |
+
The *opening* of an input image by a structuring element is the
|
| 530 |
+
*dilation* of the *erosion* of the image by the structuring element.
|
| 531 |
+
|
| 532 |
+
Parameters
|
| 533 |
+
----------
|
| 534 |
+
input : array_like
|
| 535 |
+
Binary array_like to be opened. Non-zero (True) elements form
|
| 536 |
+
the subset to be opened.
|
| 537 |
+
structure : array_like, optional
|
| 538 |
+
Structuring element used for the opening. Non-zero elements are
|
| 539 |
+
considered True. If no structuring element is provided an element
|
| 540 |
+
is generated with a square connectivity equal to one (i.e., only
|
| 541 |
+
nearest neighbors are connected to the center, diagonally-connected
|
| 542 |
+
elements are not considered neighbors).
|
| 543 |
+
iterations : int, optional
|
| 544 |
+
The erosion step of the opening, then the dilation step are each
|
| 545 |
+
repeated `iterations` times (one, by default). If `iterations` is
|
| 546 |
+
less than 1, each operation is repeated until the result does
|
| 547 |
+
not change anymore. Only an integer of iterations is accepted.
|
| 548 |
+
output : ndarray, optional
|
| 549 |
+
Array of the same shape as input, into which the output is placed.
|
| 550 |
+
By default, a new array is created.
|
| 551 |
+
origin : int or tuple of ints, optional
|
| 552 |
+
Placement of the filter, by default 0.
|
| 553 |
+
mask : array_like, optional
|
| 554 |
+
If a mask is given, only those elements with a True value at
|
| 555 |
+
the corresponding mask element are modified at each iteration.
|
| 556 |
+
|
| 557 |
+
.. versionadded:: 1.1.0
|
| 558 |
+
border_value : int (cast to 0 or 1), optional
|
| 559 |
+
Value at the border in the output array.
|
| 560 |
+
|
| 561 |
+
.. versionadded:: 1.1.0
|
| 562 |
+
brute_force : boolean, optional
|
| 563 |
+
Memory condition: if False, only the pixels whose value was changed in
|
| 564 |
+
the last iteration are tracked as candidates to be updated in the
|
| 565 |
+
current iteration; if true all pixels are considered as candidates for
|
| 566 |
+
update, regardless of what happened in the previous iteration.
|
| 567 |
+
False by default.
|
| 568 |
+
|
| 569 |
+
.. versionadded:: 1.1.0
|
| 570 |
+
|
| 571 |
+
Returns
|
| 572 |
+
-------
|
| 573 |
+
binary_opening : ndarray of bools
|
| 574 |
+
Opening of the input by the structuring element.
|
| 575 |
+
|
| 576 |
+
See Also
|
| 577 |
+
--------
|
| 578 |
+
grey_opening, binary_closing, binary_erosion, binary_dilation,
|
| 579 |
+
generate_binary_structure
|
| 580 |
+
|
| 581 |
+
Notes
|
| 582 |
+
-----
|
| 583 |
+
*Opening* [1]_ is a mathematical morphology operation [2]_ that
|
| 584 |
+
consists in the succession of an erosion and a dilation of the
|
| 585 |
+
input with the same structuring element. Opening, therefore, removes
|
| 586 |
+
objects smaller than the structuring element.
|
| 587 |
+
|
| 588 |
+
Together with *closing* (`binary_closing`), opening can be used for
|
| 589 |
+
noise removal.
|
| 590 |
+
|
| 591 |
+
References
|
| 592 |
+
----------
|
| 593 |
+
.. [1] https://en.wikipedia.org/wiki/Opening_%28morphology%29
|
| 594 |
+
.. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
|
| 595 |
+
|
| 596 |
+
Examples
|
| 597 |
+
--------
|
| 598 |
+
>>> from scipy import ndimage
|
| 599 |
+
>>> import numpy as np
|
| 600 |
+
>>> a = np.zeros((5,5), dtype=int)
|
| 601 |
+
>>> a[1:4, 1:4] = 1; a[4, 4] = 1
|
| 602 |
+
>>> a
|
| 603 |
+
array([[0, 0, 0, 0, 0],
|
| 604 |
+
[0, 1, 1, 1, 0],
|
| 605 |
+
[0, 1, 1, 1, 0],
|
| 606 |
+
[0, 1, 1, 1, 0],
|
| 607 |
+
[0, 0, 0, 0, 1]])
|
| 608 |
+
>>> # Opening removes small objects
|
| 609 |
+
>>> ndimage.binary_opening(a, structure=np.ones((3,3))).astype(int)
|
| 610 |
+
array([[0, 0, 0, 0, 0],
|
| 611 |
+
[0, 1, 1, 1, 0],
|
| 612 |
+
[0, 1, 1, 1, 0],
|
| 613 |
+
[0, 1, 1, 1, 0],
|
| 614 |
+
[0, 0, 0, 0, 0]])
|
| 615 |
+
>>> # Opening can also smooth corners
|
| 616 |
+
>>> ndimage.binary_opening(a).astype(int)
|
| 617 |
+
array([[0, 0, 0, 0, 0],
|
| 618 |
+
[0, 0, 1, 0, 0],
|
| 619 |
+
[0, 1, 1, 1, 0],
|
| 620 |
+
[0, 0, 1, 0, 0],
|
| 621 |
+
[0, 0, 0, 0, 0]])
|
| 622 |
+
>>> # Opening is the dilation of the erosion of the input
|
| 623 |
+
>>> ndimage.binary_erosion(a).astype(int)
|
| 624 |
+
array([[0, 0, 0, 0, 0],
|
| 625 |
+
[0, 0, 0, 0, 0],
|
| 626 |
+
[0, 0, 1, 0, 0],
|
| 627 |
+
[0, 0, 0, 0, 0],
|
| 628 |
+
[0, 0, 0, 0, 0]])
|
| 629 |
+
>>> ndimage.binary_dilation(ndimage.binary_erosion(a)).astype(int)
|
| 630 |
+
array([[0, 0, 0, 0, 0],
|
| 631 |
+
[0, 0, 1, 0, 0],
|
| 632 |
+
[0, 1, 1, 1, 0],
|
| 633 |
+
[0, 0, 1, 0, 0],
|
| 634 |
+
[0, 0, 0, 0, 0]])
|
| 635 |
+
|
| 636 |
+
"""
|
| 637 |
+
input = np.asarray(input)
|
| 638 |
+
if structure is None:
|
| 639 |
+
rank = input.ndim
|
| 640 |
+
structure = generate_binary_structure(rank, 1)
|
| 641 |
+
|
| 642 |
+
tmp = binary_erosion(input, structure, iterations, mask, None,
|
| 643 |
+
border_value, origin, brute_force)
|
| 644 |
+
return binary_dilation(tmp, structure, iterations, mask, output,
|
| 645 |
+
border_value, origin, brute_force)
|
| 646 |
+
|
| 647 |
+
|
| 648 |
+
def binary_closing(input, structure=None, iterations=1, output=None,
|
| 649 |
+
origin=0, mask=None, border_value=0, brute_force=False):
|
| 650 |
+
"""
|
| 651 |
+
Multidimensional binary closing with the given structuring element.
|
| 652 |
+
|
| 653 |
+
The *closing* of an input image by a structuring element is the
|
| 654 |
+
*erosion* of the *dilation* of the image by the structuring element.
|
| 655 |
+
|
| 656 |
+
Parameters
|
| 657 |
+
----------
|
| 658 |
+
input : array_like
|
| 659 |
+
Binary array_like to be closed. Non-zero (True) elements form
|
| 660 |
+
the subset to be closed.
|
| 661 |
+
structure : array_like, optional
|
| 662 |
+
Structuring element used for the closing. Non-zero elements are
|
| 663 |
+
considered True. If no structuring element is provided an element
|
| 664 |
+
is generated with a square connectivity equal to one (i.e., only
|
| 665 |
+
nearest neighbors are connected to the center, diagonally-connected
|
| 666 |
+
elements are not considered neighbors).
|
| 667 |
+
iterations : int, optional
|
| 668 |
+
The dilation step of the closing, then the erosion step are each
|
| 669 |
+
repeated `iterations` times (one, by default). If iterations is
|
| 670 |
+
less than 1, each operations is repeated until the result does
|
| 671 |
+
not change anymore. Only an integer of iterations is accepted.
|
| 672 |
+
output : ndarray, optional
|
| 673 |
+
Array of the same shape as input, into which the output is placed.
|
| 674 |
+
By default, a new array is created.
|
| 675 |
+
origin : int or tuple of ints, optional
|
| 676 |
+
Placement of the filter, by default 0.
|
| 677 |
+
mask : array_like, optional
|
| 678 |
+
If a mask is given, only those elements with a True value at
|
| 679 |
+
the corresponding mask element are modified at each iteration.
|
| 680 |
+
|
| 681 |
+
.. versionadded:: 1.1.0
|
| 682 |
+
border_value : int (cast to 0 or 1), optional
|
| 683 |
+
Value at the border in the output array.
|
| 684 |
+
|
| 685 |
+
.. versionadded:: 1.1.0
|
| 686 |
+
brute_force : boolean, optional
|
| 687 |
+
Memory condition: if False, only the pixels whose value was changed in
|
| 688 |
+
the last iteration are tracked as candidates to be updated in the
|
| 689 |
+
current iteration; if true al pixels are considered as candidates for
|
| 690 |
+
update, regardless of what happened in the previous iteration.
|
| 691 |
+
False by default.
|
| 692 |
+
|
| 693 |
+
.. versionadded:: 1.1.0
|
| 694 |
+
|
| 695 |
+
Returns
|
| 696 |
+
-------
|
| 697 |
+
binary_closing : ndarray of bools
|
| 698 |
+
Closing of the input by the structuring element.
|
| 699 |
+
|
| 700 |
+
See Also
|
| 701 |
+
--------
|
| 702 |
+
grey_closing, binary_opening, binary_dilation, binary_erosion,
|
| 703 |
+
generate_binary_structure
|
| 704 |
+
|
| 705 |
+
Notes
|
| 706 |
+
-----
|
| 707 |
+
*Closing* [1]_ is a mathematical morphology operation [2]_ that
|
| 708 |
+
consists in the succession of a dilation and an erosion of the
|
| 709 |
+
input with the same structuring element. Closing therefore fills
|
| 710 |
+
holes smaller than the structuring element.
|
| 711 |
+
|
| 712 |
+
Together with *opening* (`binary_opening`), closing can be used for
|
| 713 |
+
noise removal.
|
| 714 |
+
|
| 715 |
+
References
|
| 716 |
+
----------
|
| 717 |
+
.. [1] https://en.wikipedia.org/wiki/Closing_%28morphology%29
|
| 718 |
+
.. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
|
| 719 |
+
|
| 720 |
+
Examples
|
| 721 |
+
--------
|
| 722 |
+
>>> from scipy import ndimage
|
| 723 |
+
>>> import numpy as np
|
| 724 |
+
>>> a = np.zeros((5,5), dtype=int)
|
| 725 |
+
>>> a[1:-1, 1:-1] = 1; a[2,2] = 0
|
| 726 |
+
>>> a
|
| 727 |
+
array([[0, 0, 0, 0, 0],
|
| 728 |
+
[0, 1, 1, 1, 0],
|
| 729 |
+
[0, 1, 0, 1, 0],
|
| 730 |
+
[0, 1, 1, 1, 0],
|
| 731 |
+
[0, 0, 0, 0, 0]])
|
| 732 |
+
>>> # Closing removes small holes
|
| 733 |
+
>>> ndimage.binary_closing(a).astype(int)
|
| 734 |
+
array([[0, 0, 0, 0, 0],
|
| 735 |
+
[0, 1, 1, 1, 0],
|
| 736 |
+
[0, 1, 1, 1, 0],
|
| 737 |
+
[0, 1, 1, 1, 0],
|
| 738 |
+
[0, 0, 0, 0, 0]])
|
| 739 |
+
>>> # Closing is the erosion of the dilation of the input
|
| 740 |
+
>>> ndimage.binary_dilation(a).astype(int)
|
| 741 |
+
array([[0, 1, 1, 1, 0],
|
| 742 |
+
[1, 1, 1, 1, 1],
|
| 743 |
+
[1, 1, 1, 1, 1],
|
| 744 |
+
[1, 1, 1, 1, 1],
|
| 745 |
+
[0, 1, 1, 1, 0]])
|
| 746 |
+
>>> ndimage.binary_erosion(ndimage.binary_dilation(a)).astype(int)
|
| 747 |
+
array([[0, 0, 0, 0, 0],
|
| 748 |
+
[0, 1, 1, 1, 0],
|
| 749 |
+
[0, 1, 1, 1, 0],
|
| 750 |
+
[0, 1, 1, 1, 0],
|
| 751 |
+
[0, 0, 0, 0, 0]])
|
| 752 |
+
|
| 753 |
+
|
| 754 |
+
>>> a = np.zeros((7,7), dtype=int)
|
| 755 |
+
>>> a[1:6, 2:5] = 1; a[1:3,3] = 0
|
| 756 |
+
>>> a
|
| 757 |
+
array([[0, 0, 0, 0, 0, 0, 0],
|
| 758 |
+
[0, 0, 1, 0, 1, 0, 0],
|
| 759 |
+
[0, 0, 1, 0, 1, 0, 0],
|
| 760 |
+
[0, 0, 1, 1, 1, 0, 0],
|
| 761 |
+
[0, 0, 1, 1, 1, 0, 0],
|
| 762 |
+
[0, 0, 1, 1, 1, 0, 0],
|
| 763 |
+
[0, 0, 0, 0, 0, 0, 0]])
|
| 764 |
+
>>> # In addition to removing holes, closing can also
|
| 765 |
+
>>> # coarsen boundaries with fine hollows.
|
| 766 |
+
>>> ndimage.binary_closing(a).astype(int)
|
| 767 |
+
array([[0, 0, 0, 0, 0, 0, 0],
|
| 768 |
+
[0, 0, 1, 0, 1, 0, 0],
|
| 769 |
+
[0, 0, 1, 1, 1, 0, 0],
|
| 770 |
+
[0, 0, 1, 1, 1, 0, 0],
|
| 771 |
+
[0, 0, 1, 1, 1, 0, 0],
|
| 772 |
+
[0, 0, 1, 1, 1, 0, 0],
|
| 773 |
+
[0, 0, 0, 0, 0, 0, 0]])
|
| 774 |
+
>>> ndimage.binary_closing(a, structure=np.ones((2,2))).astype(int)
|
| 775 |
+
array([[0, 0, 0, 0, 0, 0, 0],
|
| 776 |
+
[0, 0, 1, 1, 1, 0, 0],
|
| 777 |
+
[0, 0, 1, 1, 1, 0, 0],
|
| 778 |
+
[0, 0, 1, 1, 1, 0, 0],
|
| 779 |
+
[0, 0, 1, 1, 1, 0, 0],
|
| 780 |
+
[0, 0, 1, 1, 1, 0, 0],
|
| 781 |
+
[0, 0, 0, 0, 0, 0, 0]])
|
| 782 |
+
|
| 783 |
+
"""
|
| 784 |
+
input = np.asarray(input)
|
| 785 |
+
if structure is None:
|
| 786 |
+
rank = input.ndim
|
| 787 |
+
structure = generate_binary_structure(rank, 1)
|
| 788 |
+
|
| 789 |
+
tmp = binary_dilation(input, structure, iterations, mask, None,
|
| 790 |
+
border_value, origin, brute_force)
|
| 791 |
+
return binary_erosion(tmp, structure, iterations, mask, output,
|
| 792 |
+
border_value, origin, brute_force)
|
| 793 |
+
|
| 794 |
+
|
| 795 |
+
def binary_hit_or_miss(input, structure1=None, structure2=None,
|
| 796 |
+
output=None, origin1=0, origin2=None):
|
| 797 |
+
"""
|
| 798 |
+
Multidimensional binary hit-or-miss transform.
|
| 799 |
+
|
| 800 |
+
The hit-or-miss transform finds the locations of a given pattern
|
| 801 |
+
inside the input image.
|
| 802 |
+
|
| 803 |
+
Parameters
|
| 804 |
+
----------
|
| 805 |
+
input : array_like (cast to booleans)
|
| 806 |
+
Binary image where a pattern is to be detected.
|
| 807 |
+
structure1 : array_like (cast to booleans), optional
|
| 808 |
+
Part of the structuring element to be fitted to the foreground
|
| 809 |
+
(non-zero elements) of `input`. If no value is provided, a
|
| 810 |
+
structure of square connectivity 1 is chosen.
|
| 811 |
+
structure2 : array_like (cast to booleans), optional
|
| 812 |
+
Second part of the structuring element that has to miss completely
|
| 813 |
+
the foreground. If no value is provided, the complementary of
|
| 814 |
+
`structure1` is taken.
|
| 815 |
+
output : ndarray, optional
|
| 816 |
+
Array of the same shape as input, into which the output is placed.
|
| 817 |
+
By default, a new array is created.
|
| 818 |
+
origin1 : int or tuple of ints, optional
|
| 819 |
+
Placement of the first part of the structuring element `structure1`,
|
| 820 |
+
by default 0 for a centered structure.
|
| 821 |
+
origin2 : int or tuple of ints, optional
|
| 822 |
+
Placement of the second part of the structuring element `structure2`,
|
| 823 |
+
by default 0 for a centered structure. If a value is provided for
|
| 824 |
+
`origin1` and not for `origin2`, then `origin2` is set to `origin1`.
|
| 825 |
+
|
| 826 |
+
Returns
|
| 827 |
+
-------
|
| 828 |
+
binary_hit_or_miss : ndarray
|
| 829 |
+
Hit-or-miss transform of `input` with the given structuring
|
| 830 |
+
element (`structure1`, `structure2`).
|
| 831 |
+
|
| 832 |
+
See Also
|
| 833 |
+
--------
|
| 834 |
+
binary_erosion
|
| 835 |
+
|
| 836 |
+
References
|
| 837 |
+
----------
|
| 838 |
+
.. [1] https://en.wikipedia.org/wiki/Hit-or-miss_transform
|
| 839 |
+
|
| 840 |
+
Examples
|
| 841 |
+
--------
|
| 842 |
+
>>> from scipy import ndimage
|
| 843 |
+
>>> import numpy as np
|
| 844 |
+
>>> a = np.zeros((7,7), dtype=int)
|
| 845 |
+
>>> a[1, 1] = 1; a[2:4, 2:4] = 1; a[4:6, 4:6] = 1
|
| 846 |
+
>>> a
|
| 847 |
+
array([[0, 0, 0, 0, 0, 0, 0],
|
| 848 |
+
[0, 1, 0, 0, 0, 0, 0],
|
| 849 |
+
[0, 0, 1, 1, 0, 0, 0],
|
| 850 |
+
[0, 0, 1, 1, 0, 0, 0],
|
| 851 |
+
[0, 0, 0, 0, 1, 1, 0],
|
| 852 |
+
[0, 0, 0, 0, 1, 1, 0],
|
| 853 |
+
[0, 0, 0, 0, 0, 0, 0]])
|
| 854 |
+
>>> structure1 = np.array([[1, 0, 0], [0, 1, 1], [0, 1, 1]])
|
| 855 |
+
>>> structure1
|
| 856 |
+
array([[1, 0, 0],
|
| 857 |
+
[0, 1, 1],
|
| 858 |
+
[0, 1, 1]])
|
| 859 |
+
>>> # Find the matches of structure1 in the array a
|
| 860 |
+
>>> ndimage.binary_hit_or_miss(a, structure1=structure1).astype(int)
|
| 861 |
+
array([[0, 0, 0, 0, 0, 0, 0],
|
| 862 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 863 |
+
[0, 0, 1, 0, 0, 0, 0],
|
| 864 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 865 |
+
[0, 0, 0, 0, 1, 0, 0],
|
| 866 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 867 |
+
[0, 0, 0, 0, 0, 0, 0]])
|
| 868 |
+
>>> # Change the origin of the filter
|
| 869 |
+
>>> # origin1=1 is equivalent to origin1=(1,1) here
|
| 870 |
+
>>> ndimage.binary_hit_or_miss(a, structure1=structure1,\\
|
| 871 |
+
... origin1=1).astype(int)
|
| 872 |
+
array([[0, 0, 0, 0, 0, 0, 0],
|
| 873 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 874 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 875 |
+
[0, 0, 0, 1, 0, 0, 0],
|
| 876 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 877 |
+
[0, 0, 0, 0, 0, 1, 0],
|
| 878 |
+
[0, 0, 0, 0, 0, 0, 0]])
|
| 879 |
+
|
| 880 |
+
"""
|
| 881 |
+
input = np.asarray(input)
|
| 882 |
+
if structure1 is None:
|
| 883 |
+
structure1 = generate_binary_structure(input.ndim, 1)
|
| 884 |
+
if structure2 is None:
|
| 885 |
+
structure2 = np.logical_not(structure1)
|
| 886 |
+
origin1 = _ni_support._normalize_sequence(origin1, input.ndim)
|
| 887 |
+
if origin2 is None:
|
| 888 |
+
origin2 = origin1
|
| 889 |
+
else:
|
| 890 |
+
origin2 = _ni_support._normalize_sequence(origin2, input.ndim)
|
| 891 |
+
|
| 892 |
+
tmp1 = _binary_erosion(input, structure1, 1, None, None, 0, origin1,
|
| 893 |
+
0, False)
|
| 894 |
+
inplace = isinstance(output, np.ndarray)
|
| 895 |
+
result = _binary_erosion(input, structure2, 1, None, output, 0,
|
| 896 |
+
origin2, 1, False)
|
| 897 |
+
if inplace:
|
| 898 |
+
np.logical_not(output, output)
|
| 899 |
+
np.logical_and(tmp1, output, output)
|
| 900 |
+
else:
|
| 901 |
+
np.logical_not(result, result)
|
| 902 |
+
return np.logical_and(tmp1, result)
|
| 903 |
+
|
| 904 |
+
|
| 905 |
+
def binary_propagation(input, structure=None, mask=None,
|
| 906 |
+
output=None, border_value=0, origin=0):
|
| 907 |
+
"""
|
| 908 |
+
Multidimensional binary propagation with the given structuring element.
|
| 909 |
+
|
| 910 |
+
Parameters
|
| 911 |
+
----------
|
| 912 |
+
input : array_like
|
| 913 |
+
Binary image to be propagated inside `mask`.
|
| 914 |
+
structure : array_like, optional
|
| 915 |
+
Structuring element used in the successive dilations. The output
|
| 916 |
+
may depend on the structuring element, especially if `mask` has
|
| 917 |
+
several connex components. If no structuring element is
|
| 918 |
+
provided, an element is generated with a squared connectivity equal
|
| 919 |
+
to one.
|
| 920 |
+
mask : array_like, optional
|
| 921 |
+
Binary mask defining the region into which `input` is allowed to
|
| 922 |
+
propagate.
|
| 923 |
+
output : ndarray, optional
|
| 924 |
+
Array of the same shape as input, into which the output is placed.
|
| 925 |
+
By default, a new array is created.
|
| 926 |
+
border_value : int (cast to 0 or 1), optional
|
| 927 |
+
Value at the border in the output array.
|
| 928 |
+
origin : int or tuple of ints, optional
|
| 929 |
+
Placement of the filter, by default 0.
|
| 930 |
+
|
| 931 |
+
Returns
|
| 932 |
+
-------
|
| 933 |
+
binary_propagation : ndarray
|
| 934 |
+
Binary propagation of `input` inside `mask`.
|
| 935 |
+
|
| 936 |
+
Notes
|
| 937 |
+
-----
|
| 938 |
+
This function is functionally equivalent to calling binary_dilation
|
| 939 |
+
with the number of iterations less than one: iterative dilation until
|
| 940 |
+
the result does not change anymore.
|
| 941 |
+
|
| 942 |
+
The succession of an erosion and propagation inside the original image
|
| 943 |
+
can be used instead of an *opening* for deleting small objects while
|
| 944 |
+
keeping the contours of larger objects untouched.
|
| 945 |
+
|
| 946 |
+
References
|
| 947 |
+
----------
|
| 948 |
+
.. [1] http://cmm.ensmp.fr/~serra/cours/pdf/en/ch6en.pdf, slide 15.
|
| 949 |
+
.. [2] I.T. Young, J.J. Gerbrands, and L.J. van Vliet, "Fundamentals of
|
| 950 |
+
image processing", 1998
|
| 951 |
+
ftp://qiftp.tudelft.nl/DIPimage/docs/FIP2.3.pdf
|
| 952 |
+
|
| 953 |
+
Examples
|
| 954 |
+
--------
|
| 955 |
+
>>> from scipy import ndimage
|
| 956 |
+
>>> import numpy as np
|
| 957 |
+
>>> input = np.zeros((8, 8), dtype=int)
|
| 958 |
+
>>> input[2, 2] = 1
|
| 959 |
+
>>> mask = np.zeros((8, 8), dtype=int)
|
| 960 |
+
>>> mask[1:4, 1:4] = mask[4, 4] = mask[6:8, 6:8] = 1
|
| 961 |
+
>>> input
|
| 962 |
+
array([[0, 0, 0, 0, 0, 0, 0, 0],
|
| 963 |
+
[0, 0, 0, 0, 0, 0, 0, 0],
|
| 964 |
+
[0, 0, 1, 0, 0, 0, 0, 0],
|
| 965 |
+
[0, 0, 0, 0, 0, 0, 0, 0],
|
| 966 |
+
[0, 0, 0, 0, 0, 0, 0, 0],
|
| 967 |
+
[0, 0, 0, 0, 0, 0, 0, 0],
|
| 968 |
+
[0, 0, 0, 0, 0, 0, 0, 0],
|
| 969 |
+
[0, 0, 0, 0, 0, 0, 0, 0]])
|
| 970 |
+
>>> mask
|
| 971 |
+
array([[0, 0, 0, 0, 0, 0, 0, 0],
|
| 972 |
+
[0, 1, 1, 1, 0, 0, 0, 0],
|
| 973 |
+
[0, 1, 1, 1, 0, 0, 0, 0],
|
| 974 |
+
[0, 1, 1, 1, 0, 0, 0, 0],
|
| 975 |
+
[0, 0, 0, 0, 1, 0, 0, 0],
|
| 976 |
+
[0, 0, 0, 0, 0, 0, 0, 0],
|
| 977 |
+
[0, 0, 0, 0, 0, 0, 1, 1],
|
| 978 |
+
[0, 0, 0, 0, 0, 0, 1, 1]])
|
| 979 |
+
>>> ndimage.binary_propagation(input, mask=mask).astype(int)
|
| 980 |
+
array([[0, 0, 0, 0, 0, 0, 0, 0],
|
| 981 |
+
[0, 1, 1, 1, 0, 0, 0, 0],
|
| 982 |
+
[0, 1, 1, 1, 0, 0, 0, 0],
|
| 983 |
+
[0, 1, 1, 1, 0, 0, 0, 0],
|
| 984 |
+
[0, 0, 0, 0, 0, 0, 0, 0],
|
| 985 |
+
[0, 0, 0, 0, 0, 0, 0, 0],
|
| 986 |
+
[0, 0, 0, 0, 0, 0, 0, 0],
|
| 987 |
+
[0, 0, 0, 0, 0, 0, 0, 0]])
|
| 988 |
+
>>> ndimage.binary_propagation(input, mask=mask,\\
|
| 989 |
+
... structure=np.ones((3,3))).astype(int)
|
| 990 |
+
array([[0, 0, 0, 0, 0, 0, 0, 0],
|
| 991 |
+
[0, 1, 1, 1, 0, 0, 0, 0],
|
| 992 |
+
[0, 1, 1, 1, 0, 0, 0, 0],
|
| 993 |
+
[0, 1, 1, 1, 0, 0, 0, 0],
|
| 994 |
+
[0, 0, 0, 0, 1, 0, 0, 0],
|
| 995 |
+
[0, 0, 0, 0, 0, 0, 0, 0],
|
| 996 |
+
[0, 0, 0, 0, 0, 0, 0, 0],
|
| 997 |
+
[0, 0, 0, 0, 0, 0, 0, 0]])
|
| 998 |
+
|
| 999 |
+
>>> # Comparison between opening and erosion+propagation
|
| 1000 |
+
>>> a = np.zeros((6,6), dtype=int)
|
| 1001 |
+
>>> a[2:5, 2:5] = 1; a[0, 0] = 1; a[5, 5] = 1
|
| 1002 |
+
>>> a
|
| 1003 |
+
array([[1, 0, 0, 0, 0, 0],
|
| 1004 |
+
[0, 0, 0, 0, 0, 0],
|
| 1005 |
+
[0, 0, 1, 1, 1, 0],
|
| 1006 |
+
[0, 0, 1, 1, 1, 0],
|
| 1007 |
+
[0, 0, 1, 1, 1, 0],
|
| 1008 |
+
[0, 0, 0, 0, 0, 1]])
|
| 1009 |
+
>>> ndimage.binary_opening(a).astype(int)
|
| 1010 |
+
array([[0, 0, 0, 0, 0, 0],
|
| 1011 |
+
[0, 0, 0, 0, 0, 0],
|
| 1012 |
+
[0, 0, 0, 1, 0, 0],
|
| 1013 |
+
[0, 0, 1, 1, 1, 0],
|
| 1014 |
+
[0, 0, 0, 1, 0, 0],
|
| 1015 |
+
[0, 0, 0, 0, 0, 0]])
|
| 1016 |
+
>>> b = ndimage.binary_erosion(a)
|
| 1017 |
+
>>> b.astype(int)
|
| 1018 |
+
array([[0, 0, 0, 0, 0, 0],
|
| 1019 |
+
[0, 0, 0, 0, 0, 0],
|
| 1020 |
+
[0, 0, 0, 0, 0, 0],
|
| 1021 |
+
[0, 0, 0, 1, 0, 0],
|
| 1022 |
+
[0, 0, 0, 0, 0, 0],
|
| 1023 |
+
[0, 0, 0, 0, 0, 0]])
|
| 1024 |
+
>>> ndimage.binary_propagation(b, mask=a).astype(int)
|
| 1025 |
+
array([[0, 0, 0, 0, 0, 0],
|
| 1026 |
+
[0, 0, 0, 0, 0, 0],
|
| 1027 |
+
[0, 0, 1, 1, 1, 0],
|
| 1028 |
+
[0, 0, 1, 1, 1, 0],
|
| 1029 |
+
[0, 0, 1, 1, 1, 0],
|
| 1030 |
+
[0, 0, 0, 0, 0, 0]])
|
| 1031 |
+
|
| 1032 |
+
"""
|
| 1033 |
+
return binary_dilation(input, structure, -1, mask, output,
|
| 1034 |
+
border_value, origin)
|
| 1035 |
+
|
| 1036 |
+
|
| 1037 |
+
def binary_fill_holes(input, structure=None, output=None, origin=0):
|
| 1038 |
+
"""
|
| 1039 |
+
Fill the holes in binary objects.
|
| 1040 |
+
|
| 1041 |
+
|
| 1042 |
+
Parameters
|
| 1043 |
+
----------
|
| 1044 |
+
input : array_like
|
| 1045 |
+
N-D binary array with holes to be filled
|
| 1046 |
+
structure : array_like, optional
|
| 1047 |
+
Structuring element used in the computation; large-size elements
|
| 1048 |
+
make computations faster but may miss holes separated from the
|
| 1049 |
+
background by thin regions. The default element (with a square
|
| 1050 |
+
connectivity equal to one) yields the intuitive result where all
|
| 1051 |
+
holes in the input have been filled.
|
| 1052 |
+
output : ndarray, optional
|
| 1053 |
+
Array of the same shape as input, into which the output is placed.
|
| 1054 |
+
By default, a new array is created.
|
| 1055 |
+
origin : int, tuple of ints, optional
|
| 1056 |
+
Position of the structuring element.
|
| 1057 |
+
|
| 1058 |
+
Returns
|
| 1059 |
+
-------
|
| 1060 |
+
out : ndarray
|
| 1061 |
+
Transformation of the initial image `input` where holes have been
|
| 1062 |
+
filled.
|
| 1063 |
+
|
| 1064 |
+
See Also
|
| 1065 |
+
--------
|
| 1066 |
+
binary_dilation, binary_propagation, label
|
| 1067 |
+
|
| 1068 |
+
Notes
|
| 1069 |
+
-----
|
| 1070 |
+
The algorithm used in this function consists in invading the complementary
|
| 1071 |
+
of the shapes in `input` from the outer boundary of the image,
|
| 1072 |
+
using binary dilations. Holes are not connected to the boundary and are
|
| 1073 |
+
therefore not invaded. The result is the complementary subset of the
|
| 1074 |
+
invaded region.
|
| 1075 |
+
|
| 1076 |
+
References
|
| 1077 |
+
----------
|
| 1078 |
+
.. [1] https://en.wikipedia.org/wiki/Mathematical_morphology
|
| 1079 |
+
|
| 1080 |
+
|
| 1081 |
+
Examples
|
| 1082 |
+
--------
|
| 1083 |
+
>>> from scipy import ndimage
|
| 1084 |
+
>>> import numpy as np
|
| 1085 |
+
>>> a = np.zeros((5, 5), dtype=int)
|
| 1086 |
+
>>> a[1:4, 1:4] = 1
|
| 1087 |
+
>>> a[2,2] = 0
|
| 1088 |
+
>>> a
|
| 1089 |
+
array([[0, 0, 0, 0, 0],
|
| 1090 |
+
[0, 1, 1, 1, 0],
|
| 1091 |
+
[0, 1, 0, 1, 0],
|
| 1092 |
+
[0, 1, 1, 1, 0],
|
| 1093 |
+
[0, 0, 0, 0, 0]])
|
| 1094 |
+
>>> ndimage.binary_fill_holes(a).astype(int)
|
| 1095 |
+
array([[0, 0, 0, 0, 0],
|
| 1096 |
+
[0, 1, 1, 1, 0],
|
| 1097 |
+
[0, 1, 1, 1, 0],
|
| 1098 |
+
[0, 1, 1, 1, 0],
|
| 1099 |
+
[0, 0, 0, 0, 0]])
|
| 1100 |
+
>>> # Too big structuring element
|
| 1101 |
+
>>> ndimage.binary_fill_holes(a, structure=np.ones((5,5))).astype(int)
|
| 1102 |
+
array([[0, 0, 0, 0, 0],
|
| 1103 |
+
[0, 1, 1, 1, 0],
|
| 1104 |
+
[0, 1, 0, 1, 0],
|
| 1105 |
+
[0, 1, 1, 1, 0],
|
| 1106 |
+
[0, 0, 0, 0, 0]])
|
| 1107 |
+
|
| 1108 |
+
"""
|
| 1109 |
+
mask = np.logical_not(input)
|
| 1110 |
+
tmp = np.zeros(mask.shape, bool)
|
| 1111 |
+
inplace = isinstance(output, np.ndarray)
|
| 1112 |
+
if inplace:
|
| 1113 |
+
binary_dilation(tmp, structure, -1, mask, output, 1, origin)
|
| 1114 |
+
np.logical_not(output, output)
|
| 1115 |
+
else:
|
| 1116 |
+
output = binary_dilation(tmp, structure, -1, mask, None, 1,
|
| 1117 |
+
origin)
|
| 1118 |
+
np.logical_not(output, output)
|
| 1119 |
+
return output
|
| 1120 |
+
|
| 1121 |
+
|
| 1122 |
+
def grey_erosion(input, size=None, footprint=None, structure=None,
|
| 1123 |
+
output=None, mode="reflect", cval=0.0, origin=0):
|
| 1124 |
+
"""
|
| 1125 |
+
Calculate a greyscale erosion, using either a structuring element,
|
| 1126 |
+
or a footprint corresponding to a flat structuring element.
|
| 1127 |
+
|
| 1128 |
+
Grayscale erosion is a mathematical morphology operation. For the
|
| 1129 |
+
simple case of a full and flat structuring element, it can be viewed
|
| 1130 |
+
as a minimum filter over a sliding window.
|
| 1131 |
+
|
| 1132 |
+
Parameters
|
| 1133 |
+
----------
|
| 1134 |
+
input : array_like
|
| 1135 |
+
Array over which the grayscale erosion is to be computed.
|
| 1136 |
+
size : tuple of ints
|
| 1137 |
+
Shape of a flat and full structuring element used for the grayscale
|
| 1138 |
+
erosion. Optional if `footprint` or `structure` is provided.
|
| 1139 |
+
footprint : array of ints, optional
|
| 1140 |
+
Positions of non-infinite elements of a flat structuring element
|
| 1141 |
+
used for the grayscale erosion. Non-zero values give the set of
|
| 1142 |
+
neighbors of the center over which the minimum is chosen.
|
| 1143 |
+
structure : array of ints, optional
|
| 1144 |
+
Structuring element used for the grayscale erosion. `structure`
|
| 1145 |
+
may be a non-flat structuring element. The `structure` array applies a
|
| 1146 |
+
subtractive offset for each pixel in the neighborhood.
|
| 1147 |
+
output : array, optional
|
| 1148 |
+
An array used for storing the output of the erosion may be provided.
|
| 1149 |
+
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
|
| 1150 |
+
The `mode` parameter determines how the array borders are
|
| 1151 |
+
handled, where `cval` is the value when mode is equal to
|
| 1152 |
+
'constant'. Default is 'reflect'
|
| 1153 |
+
cval : scalar, optional
|
| 1154 |
+
Value to fill past edges of input if `mode` is 'constant'. Default
|
| 1155 |
+
is 0.0.
|
| 1156 |
+
origin : scalar, optional
|
| 1157 |
+
The `origin` parameter controls the placement of the filter.
|
| 1158 |
+
Default 0
|
| 1159 |
+
|
| 1160 |
+
Returns
|
| 1161 |
+
-------
|
| 1162 |
+
output : ndarray
|
| 1163 |
+
Grayscale erosion of `input`.
|
| 1164 |
+
|
| 1165 |
+
See Also
|
| 1166 |
+
--------
|
| 1167 |
+
binary_erosion, grey_dilation, grey_opening, grey_closing
|
| 1168 |
+
generate_binary_structure, minimum_filter
|
| 1169 |
+
|
| 1170 |
+
Notes
|
| 1171 |
+
-----
|
| 1172 |
+
The grayscale erosion of an image input by a structuring element s defined
|
| 1173 |
+
over a domain E is given by:
|
| 1174 |
+
|
| 1175 |
+
(input+s)(x) = min {input(y) - s(x-y), for y in E}
|
| 1176 |
+
|
| 1177 |
+
In particular, for structuring elements defined as
|
| 1178 |
+
s(y) = 0 for y in E, the grayscale erosion computes the minimum of the
|
| 1179 |
+
input image inside a sliding window defined by E.
|
| 1180 |
+
|
| 1181 |
+
Grayscale erosion [1]_ is a *mathematical morphology* operation [2]_.
|
| 1182 |
+
|
| 1183 |
+
References
|
| 1184 |
+
----------
|
| 1185 |
+
.. [1] https://en.wikipedia.org/wiki/Erosion_%28morphology%29
|
| 1186 |
+
.. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
|
| 1187 |
+
|
| 1188 |
+
Examples
|
| 1189 |
+
--------
|
| 1190 |
+
>>> from scipy import ndimage
|
| 1191 |
+
>>> import numpy as np
|
| 1192 |
+
>>> a = np.zeros((7,7), dtype=int)
|
| 1193 |
+
>>> a[1:6, 1:6] = 3
|
| 1194 |
+
>>> a[4,4] = 2; a[2,3] = 1
|
| 1195 |
+
>>> a
|
| 1196 |
+
array([[0, 0, 0, 0, 0, 0, 0],
|
| 1197 |
+
[0, 3, 3, 3, 3, 3, 0],
|
| 1198 |
+
[0, 3, 3, 1, 3, 3, 0],
|
| 1199 |
+
[0, 3, 3, 3, 3, 3, 0],
|
| 1200 |
+
[0, 3, 3, 3, 2, 3, 0],
|
| 1201 |
+
[0, 3, 3, 3, 3, 3, 0],
|
| 1202 |
+
[0, 0, 0, 0, 0, 0, 0]])
|
| 1203 |
+
>>> ndimage.grey_erosion(a, size=(3,3))
|
| 1204 |
+
array([[0, 0, 0, 0, 0, 0, 0],
|
| 1205 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1206 |
+
[0, 0, 1, 1, 1, 0, 0],
|
| 1207 |
+
[0, 0, 1, 1, 1, 0, 0],
|
| 1208 |
+
[0, 0, 3, 2, 2, 0, 0],
|
| 1209 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1210 |
+
[0, 0, 0, 0, 0, 0, 0]])
|
| 1211 |
+
>>> footprint = ndimage.generate_binary_structure(2, 1)
|
| 1212 |
+
>>> footprint
|
| 1213 |
+
array([[False, True, False],
|
| 1214 |
+
[ True, True, True],
|
| 1215 |
+
[False, True, False]], dtype=bool)
|
| 1216 |
+
>>> # Diagonally-connected elements are not considered neighbors
|
| 1217 |
+
>>> ndimage.grey_erosion(a, footprint=footprint)
|
| 1218 |
+
array([[0, 0, 0, 0, 0, 0, 0],
|
| 1219 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1220 |
+
[0, 0, 1, 1, 1, 0, 0],
|
| 1221 |
+
[0, 0, 3, 1, 2, 0, 0],
|
| 1222 |
+
[0, 0, 3, 2, 2, 0, 0],
|
| 1223 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1224 |
+
[0, 0, 0, 0, 0, 0, 0]])
|
| 1225 |
+
|
| 1226 |
+
"""
|
| 1227 |
+
if size is None and footprint is None and structure is None:
|
| 1228 |
+
raise ValueError("size, footprint, or structure must be specified")
|
| 1229 |
+
|
| 1230 |
+
return _filters._min_or_max_filter(input, size, footprint, structure,
|
| 1231 |
+
output, mode, cval, origin, 1)
|
| 1232 |
+
|
| 1233 |
+
|
| 1234 |
+
def grey_dilation(input, size=None, footprint=None, structure=None,
|
| 1235 |
+
output=None, mode="reflect", cval=0.0, origin=0):
|
| 1236 |
+
"""
|
| 1237 |
+
Calculate a greyscale dilation, using either a structuring element,
|
| 1238 |
+
or a footprint corresponding to a flat structuring element.
|
| 1239 |
+
|
| 1240 |
+
Grayscale dilation is a mathematical morphology operation. For the
|
| 1241 |
+
simple case of a full and flat structuring element, it can be viewed
|
| 1242 |
+
as a maximum filter over a sliding window.
|
| 1243 |
+
|
| 1244 |
+
Parameters
|
| 1245 |
+
----------
|
| 1246 |
+
input : array_like
|
| 1247 |
+
Array over which the grayscale dilation is to be computed.
|
| 1248 |
+
size : tuple of ints
|
| 1249 |
+
Shape of a flat and full structuring element used for the grayscale
|
| 1250 |
+
dilation. Optional if `footprint` or `structure` is provided.
|
| 1251 |
+
footprint : array of ints, optional
|
| 1252 |
+
Positions of non-infinite elements of a flat structuring element
|
| 1253 |
+
used for the grayscale dilation. Non-zero values give the set of
|
| 1254 |
+
neighbors of the center over which the maximum is chosen.
|
| 1255 |
+
structure : array of ints, optional
|
| 1256 |
+
Structuring element used for the grayscale dilation. `structure`
|
| 1257 |
+
may be a non-flat structuring element. The `structure` array applies an
|
| 1258 |
+
additive offset for each pixel in the neighborhood.
|
| 1259 |
+
output : array, optional
|
| 1260 |
+
An array used for storing the output of the dilation may be provided.
|
| 1261 |
+
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
|
| 1262 |
+
The `mode` parameter determines how the array borders are
|
| 1263 |
+
handled, where `cval` is the value when mode is equal to
|
| 1264 |
+
'constant'. Default is 'reflect'
|
| 1265 |
+
cval : scalar, optional
|
| 1266 |
+
Value to fill past edges of input if `mode` is 'constant'. Default
|
| 1267 |
+
is 0.0.
|
| 1268 |
+
origin : scalar, optional
|
| 1269 |
+
The `origin` parameter controls the placement of the filter.
|
| 1270 |
+
Default 0
|
| 1271 |
+
|
| 1272 |
+
Returns
|
| 1273 |
+
-------
|
| 1274 |
+
grey_dilation : ndarray
|
| 1275 |
+
Grayscale dilation of `input`.
|
| 1276 |
+
|
| 1277 |
+
See Also
|
| 1278 |
+
--------
|
| 1279 |
+
binary_dilation, grey_erosion, grey_closing, grey_opening
|
| 1280 |
+
generate_binary_structure, maximum_filter
|
| 1281 |
+
|
| 1282 |
+
Notes
|
| 1283 |
+
-----
|
| 1284 |
+
The grayscale dilation of an image input by a structuring element s defined
|
| 1285 |
+
over a domain E is given by:
|
| 1286 |
+
|
| 1287 |
+
(input+s)(x) = max {input(y) + s(x-y), for y in E}
|
| 1288 |
+
|
| 1289 |
+
In particular, for structuring elements defined as
|
| 1290 |
+
s(y) = 0 for y in E, the grayscale dilation computes the maximum of the
|
| 1291 |
+
input image inside a sliding window defined by E.
|
| 1292 |
+
|
| 1293 |
+
Grayscale dilation [1]_ is a *mathematical morphology* operation [2]_.
|
| 1294 |
+
|
| 1295 |
+
References
|
| 1296 |
+
----------
|
| 1297 |
+
.. [1] https://en.wikipedia.org/wiki/Dilation_%28morphology%29
|
| 1298 |
+
.. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
|
| 1299 |
+
|
| 1300 |
+
Examples
|
| 1301 |
+
--------
|
| 1302 |
+
>>> from scipy import ndimage
|
| 1303 |
+
>>> import numpy as np
|
| 1304 |
+
>>> a = np.zeros((7,7), dtype=int)
|
| 1305 |
+
>>> a[2:5, 2:5] = 1
|
| 1306 |
+
>>> a[4,4] = 2; a[2,3] = 3
|
| 1307 |
+
>>> a
|
| 1308 |
+
array([[0, 0, 0, 0, 0, 0, 0],
|
| 1309 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1310 |
+
[0, 0, 1, 3, 1, 0, 0],
|
| 1311 |
+
[0, 0, 1, 1, 1, 0, 0],
|
| 1312 |
+
[0, 0, 1, 1, 2, 0, 0],
|
| 1313 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1314 |
+
[0, 0, 0, 0, 0, 0, 0]])
|
| 1315 |
+
>>> ndimage.grey_dilation(a, size=(3,3))
|
| 1316 |
+
array([[0, 0, 0, 0, 0, 0, 0],
|
| 1317 |
+
[0, 1, 3, 3, 3, 1, 0],
|
| 1318 |
+
[0, 1, 3, 3, 3, 1, 0],
|
| 1319 |
+
[0, 1, 3, 3, 3, 2, 0],
|
| 1320 |
+
[0, 1, 1, 2, 2, 2, 0],
|
| 1321 |
+
[0, 1, 1, 2, 2, 2, 0],
|
| 1322 |
+
[0, 0, 0, 0, 0, 0, 0]])
|
| 1323 |
+
>>> ndimage.grey_dilation(a, footprint=np.ones((3,3)))
|
| 1324 |
+
array([[0, 0, 0, 0, 0, 0, 0],
|
| 1325 |
+
[0, 1, 3, 3, 3, 1, 0],
|
| 1326 |
+
[0, 1, 3, 3, 3, 1, 0],
|
| 1327 |
+
[0, 1, 3, 3, 3, 2, 0],
|
| 1328 |
+
[0, 1, 1, 2, 2, 2, 0],
|
| 1329 |
+
[0, 1, 1, 2, 2, 2, 0],
|
| 1330 |
+
[0, 0, 0, 0, 0, 0, 0]])
|
| 1331 |
+
>>> s = ndimage.generate_binary_structure(2,1)
|
| 1332 |
+
>>> s
|
| 1333 |
+
array([[False, True, False],
|
| 1334 |
+
[ True, True, True],
|
| 1335 |
+
[False, True, False]], dtype=bool)
|
| 1336 |
+
>>> ndimage.grey_dilation(a, footprint=s)
|
| 1337 |
+
array([[0, 0, 0, 0, 0, 0, 0],
|
| 1338 |
+
[0, 0, 1, 3, 1, 0, 0],
|
| 1339 |
+
[0, 1, 3, 3, 3, 1, 0],
|
| 1340 |
+
[0, 1, 1, 3, 2, 1, 0],
|
| 1341 |
+
[0, 1, 1, 2, 2, 2, 0],
|
| 1342 |
+
[0, 0, 1, 1, 2, 0, 0],
|
| 1343 |
+
[0, 0, 0, 0, 0, 0, 0]])
|
| 1344 |
+
>>> ndimage.grey_dilation(a, size=(3,3), structure=np.ones((3,3)))
|
| 1345 |
+
array([[1, 1, 1, 1, 1, 1, 1],
|
| 1346 |
+
[1, 2, 4, 4, 4, 2, 1],
|
| 1347 |
+
[1, 2, 4, 4, 4, 2, 1],
|
| 1348 |
+
[1, 2, 4, 4, 4, 3, 1],
|
| 1349 |
+
[1, 2, 2, 3, 3, 3, 1],
|
| 1350 |
+
[1, 2, 2, 3, 3, 3, 1],
|
| 1351 |
+
[1, 1, 1, 1, 1, 1, 1]])
|
| 1352 |
+
|
| 1353 |
+
"""
|
| 1354 |
+
if size is None and footprint is None and structure is None:
|
| 1355 |
+
raise ValueError("size, footprint, or structure must be specified")
|
| 1356 |
+
if structure is not None:
|
| 1357 |
+
structure = np.asarray(structure)
|
| 1358 |
+
structure = structure[tuple([slice(None, None, -1)] *
|
| 1359 |
+
structure.ndim)]
|
| 1360 |
+
if footprint is not None:
|
| 1361 |
+
footprint = np.asarray(footprint)
|
| 1362 |
+
footprint = footprint[tuple([slice(None, None, -1)] *
|
| 1363 |
+
footprint.ndim)]
|
| 1364 |
+
|
| 1365 |
+
input = np.asarray(input)
|
| 1366 |
+
origin = _ni_support._normalize_sequence(origin, input.ndim)
|
| 1367 |
+
for ii in range(len(origin)):
|
| 1368 |
+
origin[ii] = -origin[ii]
|
| 1369 |
+
if footprint is not None:
|
| 1370 |
+
sz = footprint.shape[ii]
|
| 1371 |
+
elif structure is not None:
|
| 1372 |
+
sz = structure.shape[ii]
|
| 1373 |
+
elif np.isscalar(size):
|
| 1374 |
+
sz = size
|
| 1375 |
+
else:
|
| 1376 |
+
sz = size[ii]
|
| 1377 |
+
if not sz & 1:
|
| 1378 |
+
origin[ii] -= 1
|
| 1379 |
+
|
| 1380 |
+
return _filters._min_or_max_filter(input, size, footprint, structure,
|
| 1381 |
+
output, mode, cval, origin, 0)
|
| 1382 |
+
|
| 1383 |
+
|
| 1384 |
+
def grey_opening(input, size=None, footprint=None, structure=None,
|
| 1385 |
+
output=None, mode="reflect", cval=0.0, origin=0):
|
| 1386 |
+
"""
|
| 1387 |
+
Multidimensional grayscale opening.
|
| 1388 |
+
|
| 1389 |
+
A grayscale opening consists in the succession of a grayscale erosion,
|
| 1390 |
+
and a grayscale dilation.
|
| 1391 |
+
|
| 1392 |
+
Parameters
|
| 1393 |
+
----------
|
| 1394 |
+
input : array_like
|
| 1395 |
+
Array over which the grayscale opening is to be computed.
|
| 1396 |
+
size : tuple of ints
|
| 1397 |
+
Shape of a flat and full structuring element used for the grayscale
|
| 1398 |
+
opening. Optional if `footprint` or `structure` is provided.
|
| 1399 |
+
footprint : array of ints, optional
|
| 1400 |
+
Positions of non-infinite elements of a flat structuring element
|
| 1401 |
+
used for the grayscale opening.
|
| 1402 |
+
structure : array of ints, optional
|
| 1403 |
+
Structuring element used for the grayscale opening. `structure`
|
| 1404 |
+
may be a non-flat structuring element. The `structure` array applies
|
| 1405 |
+
offsets to the pixels in a neighborhood (the offset is additive during
|
| 1406 |
+
dilation and subtractive during erosion).
|
| 1407 |
+
output : array, optional
|
| 1408 |
+
An array used for storing the output of the opening may be provided.
|
| 1409 |
+
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
|
| 1410 |
+
The `mode` parameter determines how the array borders are
|
| 1411 |
+
handled, where `cval` is the value when mode is equal to
|
| 1412 |
+
'constant'. Default is 'reflect'
|
| 1413 |
+
cval : scalar, optional
|
| 1414 |
+
Value to fill past edges of input if `mode` is 'constant'. Default
|
| 1415 |
+
is 0.0.
|
| 1416 |
+
origin : scalar, optional
|
| 1417 |
+
The `origin` parameter controls the placement of the filter.
|
| 1418 |
+
Default 0
|
| 1419 |
+
|
| 1420 |
+
Returns
|
| 1421 |
+
-------
|
| 1422 |
+
grey_opening : ndarray
|
| 1423 |
+
Result of the grayscale opening of `input` with `structure`.
|
| 1424 |
+
|
| 1425 |
+
See Also
|
| 1426 |
+
--------
|
| 1427 |
+
binary_opening, grey_dilation, grey_erosion, grey_closing
|
| 1428 |
+
generate_binary_structure
|
| 1429 |
+
|
| 1430 |
+
Notes
|
| 1431 |
+
-----
|
| 1432 |
+
The action of a grayscale opening with a flat structuring element amounts
|
| 1433 |
+
to smoothen high local maxima, whereas binary opening erases small objects.
|
| 1434 |
+
|
| 1435 |
+
References
|
| 1436 |
+
----------
|
| 1437 |
+
.. [1] https://en.wikipedia.org/wiki/Mathematical_morphology
|
| 1438 |
+
|
| 1439 |
+
Examples
|
| 1440 |
+
--------
|
| 1441 |
+
>>> from scipy import ndimage
|
| 1442 |
+
>>> import numpy as np
|
| 1443 |
+
>>> a = np.arange(36).reshape((6,6))
|
| 1444 |
+
>>> a[3, 3] = 50
|
| 1445 |
+
>>> a
|
| 1446 |
+
array([[ 0, 1, 2, 3, 4, 5],
|
| 1447 |
+
[ 6, 7, 8, 9, 10, 11],
|
| 1448 |
+
[12, 13, 14, 15, 16, 17],
|
| 1449 |
+
[18, 19, 20, 50, 22, 23],
|
| 1450 |
+
[24, 25, 26, 27, 28, 29],
|
| 1451 |
+
[30, 31, 32, 33, 34, 35]])
|
| 1452 |
+
>>> ndimage.grey_opening(a, size=(3,3))
|
| 1453 |
+
array([[ 0, 1, 2, 3, 4, 4],
|
| 1454 |
+
[ 6, 7, 8, 9, 10, 10],
|
| 1455 |
+
[12, 13, 14, 15, 16, 16],
|
| 1456 |
+
[18, 19, 20, 22, 22, 22],
|
| 1457 |
+
[24, 25, 26, 27, 28, 28],
|
| 1458 |
+
[24, 25, 26, 27, 28, 28]])
|
| 1459 |
+
>>> # Note that the local maximum a[3,3] has disappeared
|
| 1460 |
+
|
| 1461 |
+
"""
|
| 1462 |
+
if (size is not None) and (footprint is not None):
|
| 1463 |
+
warnings.warn("ignoring size because footprint is set",
|
| 1464 |
+
UserWarning, stacklevel=2)
|
| 1465 |
+
tmp = grey_erosion(input, size, footprint, structure, None, mode,
|
| 1466 |
+
cval, origin)
|
| 1467 |
+
return grey_dilation(tmp, size, footprint, structure, output, mode,
|
| 1468 |
+
cval, origin)
|
| 1469 |
+
|
| 1470 |
+
|
| 1471 |
+
def grey_closing(input, size=None, footprint=None, structure=None,
|
| 1472 |
+
output=None, mode="reflect", cval=0.0, origin=0):
|
| 1473 |
+
"""
|
| 1474 |
+
Multidimensional grayscale closing.
|
| 1475 |
+
|
| 1476 |
+
A grayscale closing consists in the succession of a grayscale dilation,
|
| 1477 |
+
and a grayscale erosion.
|
| 1478 |
+
|
| 1479 |
+
Parameters
|
| 1480 |
+
----------
|
| 1481 |
+
input : array_like
|
| 1482 |
+
Array over which the grayscale closing is to be computed.
|
| 1483 |
+
size : tuple of ints
|
| 1484 |
+
Shape of a flat and full structuring element used for the grayscale
|
| 1485 |
+
closing. Optional if `footprint` or `structure` is provided.
|
| 1486 |
+
footprint : array of ints, optional
|
| 1487 |
+
Positions of non-infinite elements of a flat structuring element
|
| 1488 |
+
used for the grayscale closing.
|
| 1489 |
+
structure : array of ints, optional
|
| 1490 |
+
Structuring element used for the grayscale closing. `structure`
|
| 1491 |
+
may be a non-flat structuring element. The `structure` array applies
|
| 1492 |
+
offsets to the pixels in a neighborhood (the offset is additive during
|
| 1493 |
+
dilation and subtractive during erosion)
|
| 1494 |
+
output : array, optional
|
| 1495 |
+
An array used for storing the output of the closing may be provided.
|
| 1496 |
+
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
|
| 1497 |
+
The `mode` parameter determines how the array borders are
|
| 1498 |
+
handled, where `cval` is the value when mode is equal to
|
| 1499 |
+
'constant'. Default is 'reflect'
|
| 1500 |
+
cval : scalar, optional
|
| 1501 |
+
Value to fill past edges of input if `mode` is 'constant'. Default
|
| 1502 |
+
is 0.0.
|
| 1503 |
+
origin : scalar, optional
|
| 1504 |
+
The `origin` parameter controls the placement of the filter.
|
| 1505 |
+
Default 0
|
| 1506 |
+
|
| 1507 |
+
Returns
|
| 1508 |
+
-------
|
| 1509 |
+
grey_closing : ndarray
|
| 1510 |
+
Result of the grayscale closing of `input` with `structure`.
|
| 1511 |
+
|
| 1512 |
+
See Also
|
| 1513 |
+
--------
|
| 1514 |
+
binary_closing, grey_dilation, grey_erosion, grey_opening,
|
| 1515 |
+
generate_binary_structure
|
| 1516 |
+
|
| 1517 |
+
Notes
|
| 1518 |
+
-----
|
| 1519 |
+
The action of a grayscale closing with a flat structuring element amounts
|
| 1520 |
+
to smoothen deep local minima, whereas binary closing fills small holes.
|
| 1521 |
+
|
| 1522 |
+
References
|
| 1523 |
+
----------
|
| 1524 |
+
.. [1] https://en.wikipedia.org/wiki/Mathematical_morphology
|
| 1525 |
+
|
| 1526 |
+
Examples
|
| 1527 |
+
--------
|
| 1528 |
+
>>> from scipy import ndimage
|
| 1529 |
+
>>> import numpy as np
|
| 1530 |
+
>>> a = np.arange(36).reshape((6,6))
|
| 1531 |
+
>>> a[3,3] = 0
|
| 1532 |
+
>>> a
|
| 1533 |
+
array([[ 0, 1, 2, 3, 4, 5],
|
| 1534 |
+
[ 6, 7, 8, 9, 10, 11],
|
| 1535 |
+
[12, 13, 14, 15, 16, 17],
|
| 1536 |
+
[18, 19, 20, 0, 22, 23],
|
| 1537 |
+
[24, 25, 26, 27, 28, 29],
|
| 1538 |
+
[30, 31, 32, 33, 34, 35]])
|
| 1539 |
+
>>> ndimage.grey_closing(a, size=(3,3))
|
| 1540 |
+
array([[ 7, 7, 8, 9, 10, 11],
|
| 1541 |
+
[ 7, 7, 8, 9, 10, 11],
|
| 1542 |
+
[13, 13, 14, 15, 16, 17],
|
| 1543 |
+
[19, 19, 20, 20, 22, 23],
|
| 1544 |
+
[25, 25, 26, 27, 28, 29],
|
| 1545 |
+
[31, 31, 32, 33, 34, 35]])
|
| 1546 |
+
>>> # Note that the local minimum a[3,3] has disappeared
|
| 1547 |
+
|
| 1548 |
+
"""
|
| 1549 |
+
if (size is not None) and (footprint is not None):
|
| 1550 |
+
warnings.warn("ignoring size because footprint is set",
|
| 1551 |
+
UserWarning, stacklevel=2)
|
| 1552 |
+
tmp = grey_dilation(input, size, footprint, structure, None, mode,
|
| 1553 |
+
cval, origin)
|
| 1554 |
+
return grey_erosion(tmp, size, footprint, structure, output, mode,
|
| 1555 |
+
cval, origin)
|
| 1556 |
+
|
| 1557 |
+
|
| 1558 |
+
def morphological_gradient(input, size=None, footprint=None, structure=None,
|
| 1559 |
+
output=None, mode="reflect", cval=0.0, origin=0):
|
| 1560 |
+
"""
|
| 1561 |
+
Multidimensional morphological gradient.
|
| 1562 |
+
|
| 1563 |
+
The morphological gradient is calculated as the difference between a
|
| 1564 |
+
dilation and an erosion of the input with a given structuring element.
|
| 1565 |
+
|
| 1566 |
+
Parameters
|
| 1567 |
+
----------
|
| 1568 |
+
input : array_like
|
| 1569 |
+
Array over which to compute the morphlogical gradient.
|
| 1570 |
+
size : tuple of ints
|
| 1571 |
+
Shape of a flat and full structuring element used for the mathematical
|
| 1572 |
+
morphology operations. Optional if `footprint` or `structure` is
|
| 1573 |
+
provided. A larger `size` yields a more blurred gradient.
|
| 1574 |
+
footprint : array of ints, optional
|
| 1575 |
+
Positions of non-infinite elements of a flat structuring element
|
| 1576 |
+
used for the morphology operations. Larger footprints
|
| 1577 |
+
give a more blurred morphological gradient.
|
| 1578 |
+
structure : array of ints, optional
|
| 1579 |
+
Structuring element used for the morphology operations. `structure` may
|
| 1580 |
+
be a non-flat structuring element. The `structure` array applies
|
| 1581 |
+
offsets to the pixels in a neighborhood (the offset is additive during
|
| 1582 |
+
dilation and subtractive during erosion)
|
| 1583 |
+
output : array, optional
|
| 1584 |
+
An array used for storing the output of the morphological gradient
|
| 1585 |
+
may be provided.
|
| 1586 |
+
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
|
| 1587 |
+
The `mode` parameter determines how the array borders are
|
| 1588 |
+
handled, where `cval` is the value when mode is equal to
|
| 1589 |
+
'constant'. Default is 'reflect'
|
| 1590 |
+
cval : scalar, optional
|
| 1591 |
+
Value to fill past edges of input if `mode` is 'constant'. Default
|
| 1592 |
+
is 0.0.
|
| 1593 |
+
origin : scalar, optional
|
| 1594 |
+
The `origin` parameter controls the placement of the filter.
|
| 1595 |
+
Default 0
|
| 1596 |
+
|
| 1597 |
+
Returns
|
| 1598 |
+
-------
|
| 1599 |
+
morphological_gradient : ndarray
|
| 1600 |
+
Morphological gradient of `input`.
|
| 1601 |
+
|
| 1602 |
+
See Also
|
| 1603 |
+
--------
|
| 1604 |
+
grey_dilation, grey_erosion, gaussian_gradient_magnitude
|
| 1605 |
+
|
| 1606 |
+
Notes
|
| 1607 |
+
-----
|
| 1608 |
+
For a flat structuring element, the morphological gradient
|
| 1609 |
+
computed at a given point corresponds to the maximal difference
|
| 1610 |
+
between elements of the input among the elements covered by the
|
| 1611 |
+
structuring element centered on the point.
|
| 1612 |
+
|
| 1613 |
+
References
|
| 1614 |
+
----------
|
| 1615 |
+
.. [1] https://en.wikipedia.org/wiki/Mathematical_morphology
|
| 1616 |
+
|
| 1617 |
+
Examples
|
| 1618 |
+
--------
|
| 1619 |
+
>>> from scipy import ndimage
|
| 1620 |
+
>>> import numpy as np
|
| 1621 |
+
>>> a = np.zeros((7,7), dtype=int)
|
| 1622 |
+
>>> a[2:5, 2:5] = 1
|
| 1623 |
+
>>> ndimage.morphological_gradient(a, size=(3,3))
|
| 1624 |
+
array([[0, 0, 0, 0, 0, 0, 0],
|
| 1625 |
+
[0, 1, 1, 1, 1, 1, 0],
|
| 1626 |
+
[0, 1, 1, 1, 1, 1, 0],
|
| 1627 |
+
[0, 1, 1, 0, 1, 1, 0],
|
| 1628 |
+
[0, 1, 1, 1, 1, 1, 0],
|
| 1629 |
+
[0, 1, 1, 1, 1, 1, 0],
|
| 1630 |
+
[0, 0, 0, 0, 0, 0, 0]])
|
| 1631 |
+
>>> # The morphological gradient is computed as the difference
|
| 1632 |
+
>>> # between a dilation and an erosion
|
| 1633 |
+
>>> ndimage.grey_dilation(a, size=(3,3)) -\\
|
| 1634 |
+
... ndimage.grey_erosion(a, size=(3,3))
|
| 1635 |
+
array([[0, 0, 0, 0, 0, 0, 0],
|
| 1636 |
+
[0, 1, 1, 1, 1, 1, 0],
|
| 1637 |
+
[0, 1, 1, 1, 1, 1, 0],
|
| 1638 |
+
[0, 1, 1, 0, 1, 1, 0],
|
| 1639 |
+
[0, 1, 1, 1, 1, 1, 0],
|
| 1640 |
+
[0, 1, 1, 1, 1, 1, 0],
|
| 1641 |
+
[0, 0, 0, 0, 0, 0, 0]])
|
| 1642 |
+
>>> a = np.zeros((7,7), dtype=int)
|
| 1643 |
+
>>> a[2:5, 2:5] = 1
|
| 1644 |
+
>>> a[4,4] = 2; a[2,3] = 3
|
| 1645 |
+
>>> a
|
| 1646 |
+
array([[0, 0, 0, 0, 0, 0, 0],
|
| 1647 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1648 |
+
[0, 0, 1, 3, 1, 0, 0],
|
| 1649 |
+
[0, 0, 1, 1, 1, 0, 0],
|
| 1650 |
+
[0, 0, 1, 1, 2, 0, 0],
|
| 1651 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1652 |
+
[0, 0, 0, 0, 0, 0, 0]])
|
| 1653 |
+
>>> ndimage.morphological_gradient(a, size=(3,3))
|
| 1654 |
+
array([[0, 0, 0, 0, 0, 0, 0],
|
| 1655 |
+
[0, 1, 3, 3, 3, 1, 0],
|
| 1656 |
+
[0, 1, 3, 3, 3, 1, 0],
|
| 1657 |
+
[0, 1, 3, 2, 3, 2, 0],
|
| 1658 |
+
[0, 1, 1, 2, 2, 2, 0],
|
| 1659 |
+
[0, 1, 1, 2, 2, 2, 0],
|
| 1660 |
+
[0, 0, 0, 0, 0, 0, 0]])
|
| 1661 |
+
|
| 1662 |
+
"""
|
| 1663 |
+
tmp = grey_dilation(input, size, footprint, structure, None, mode,
|
| 1664 |
+
cval, origin)
|
| 1665 |
+
if isinstance(output, np.ndarray):
|
| 1666 |
+
grey_erosion(input, size, footprint, structure, output, mode,
|
| 1667 |
+
cval, origin)
|
| 1668 |
+
return np.subtract(tmp, output, output)
|
| 1669 |
+
else:
|
| 1670 |
+
return (tmp - grey_erosion(input, size, footprint, structure,
|
| 1671 |
+
None, mode, cval, origin))
|
| 1672 |
+
|
| 1673 |
+
|
| 1674 |
+
def morphological_laplace(input, size=None, footprint=None,
|
| 1675 |
+
structure=None, output=None,
|
| 1676 |
+
mode="reflect", cval=0.0, origin=0):
|
| 1677 |
+
"""
|
| 1678 |
+
Multidimensional morphological laplace.
|
| 1679 |
+
|
| 1680 |
+
Parameters
|
| 1681 |
+
----------
|
| 1682 |
+
input : array_like
|
| 1683 |
+
Input.
|
| 1684 |
+
size : tuple of ints
|
| 1685 |
+
Shape of a flat and full structuring element used for the mathematical
|
| 1686 |
+
morphology operations. Optional if `footprint` or `structure` is
|
| 1687 |
+
provided.
|
| 1688 |
+
footprint : array of ints, optional
|
| 1689 |
+
Positions of non-infinite elements of a flat structuring element
|
| 1690 |
+
used for the morphology operations.
|
| 1691 |
+
structure : array of ints, optional
|
| 1692 |
+
Structuring element used for the morphology operations. `structure` may
|
| 1693 |
+
be a non-flat structuring element. The `structure` array applies
|
| 1694 |
+
offsets to the pixels in a neighborhood (the offset is additive during
|
| 1695 |
+
dilation and subtractive during erosion)
|
| 1696 |
+
output : ndarray, optional
|
| 1697 |
+
An output array can optionally be provided.
|
| 1698 |
+
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
|
| 1699 |
+
The mode parameter determines how the array borders are handled.
|
| 1700 |
+
For 'constant' mode, values beyond borders are set to be `cval`.
|
| 1701 |
+
Default is 'reflect'.
|
| 1702 |
+
cval : scalar, optional
|
| 1703 |
+
Value to fill past edges of input if mode is 'constant'.
|
| 1704 |
+
Default is 0.0
|
| 1705 |
+
origin : origin, optional
|
| 1706 |
+
The origin parameter controls the placement of the filter.
|
| 1707 |
+
|
| 1708 |
+
Returns
|
| 1709 |
+
-------
|
| 1710 |
+
morphological_laplace : ndarray
|
| 1711 |
+
Output
|
| 1712 |
+
|
| 1713 |
+
"""
|
| 1714 |
+
tmp1 = grey_dilation(input, size, footprint, structure, None, mode,
|
| 1715 |
+
cval, origin)
|
| 1716 |
+
if isinstance(output, np.ndarray):
|
| 1717 |
+
grey_erosion(input, size, footprint, structure, output, mode,
|
| 1718 |
+
cval, origin)
|
| 1719 |
+
np.add(tmp1, output, output)
|
| 1720 |
+
np.subtract(output, input, output)
|
| 1721 |
+
return np.subtract(output, input, output)
|
| 1722 |
+
else:
|
| 1723 |
+
tmp2 = grey_erosion(input, size, footprint, structure, None, mode,
|
| 1724 |
+
cval, origin)
|
| 1725 |
+
np.add(tmp1, tmp2, tmp2)
|
| 1726 |
+
np.subtract(tmp2, input, tmp2)
|
| 1727 |
+
np.subtract(tmp2, input, tmp2)
|
| 1728 |
+
return tmp2
|
| 1729 |
+
|
| 1730 |
+
|
| 1731 |
+
def white_tophat(input, size=None, footprint=None, structure=None,
|
| 1732 |
+
output=None, mode="reflect", cval=0.0, origin=0):
|
| 1733 |
+
"""
|
| 1734 |
+
Multidimensional white tophat filter.
|
| 1735 |
+
|
| 1736 |
+
Parameters
|
| 1737 |
+
----------
|
| 1738 |
+
input : array_like
|
| 1739 |
+
Input.
|
| 1740 |
+
size : tuple of ints
|
| 1741 |
+
Shape of a flat and full structuring element used for the filter.
|
| 1742 |
+
Optional if `footprint` or `structure` is provided.
|
| 1743 |
+
footprint : array of ints, optional
|
| 1744 |
+
Positions of elements of a flat structuring element
|
| 1745 |
+
used for the white tophat filter.
|
| 1746 |
+
structure : array of ints, optional
|
| 1747 |
+
Structuring element used for the filter. `structure` may be a non-flat
|
| 1748 |
+
structuring element. The `structure` array applies offsets to the
|
| 1749 |
+
pixels in a neighborhood (the offset is additive during dilation and
|
| 1750 |
+
subtractive during erosion)
|
| 1751 |
+
output : array, optional
|
| 1752 |
+
An array used for storing the output of the filter may be provided.
|
| 1753 |
+
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
|
| 1754 |
+
The `mode` parameter determines how the array borders are
|
| 1755 |
+
handled, where `cval` is the value when mode is equal to
|
| 1756 |
+
'constant'. Default is 'reflect'
|
| 1757 |
+
cval : scalar, optional
|
| 1758 |
+
Value to fill past edges of input if `mode` is 'constant'.
|
| 1759 |
+
Default is 0.0.
|
| 1760 |
+
origin : scalar, optional
|
| 1761 |
+
The `origin` parameter controls the placement of the filter.
|
| 1762 |
+
Default is 0.
|
| 1763 |
+
|
| 1764 |
+
Returns
|
| 1765 |
+
-------
|
| 1766 |
+
output : ndarray
|
| 1767 |
+
Result of the filter of `input` with `structure`.
|
| 1768 |
+
|
| 1769 |
+
See Also
|
| 1770 |
+
--------
|
| 1771 |
+
black_tophat
|
| 1772 |
+
|
| 1773 |
+
Examples
|
| 1774 |
+
--------
|
| 1775 |
+
Subtract gray background from a bright peak.
|
| 1776 |
+
|
| 1777 |
+
>>> from scipy.ndimage import generate_binary_structure, white_tophat
|
| 1778 |
+
>>> import numpy as np
|
| 1779 |
+
>>> square = generate_binary_structure(rank=2, connectivity=3)
|
| 1780 |
+
>>> bright_on_gray = np.array([[2, 3, 3, 3, 2],
|
| 1781 |
+
... [3, 4, 5, 4, 3],
|
| 1782 |
+
... [3, 5, 9, 5, 3],
|
| 1783 |
+
... [3, 4, 5, 4, 3],
|
| 1784 |
+
... [2, 3, 3, 3, 2]])
|
| 1785 |
+
>>> white_tophat(input=bright_on_gray, structure=square)
|
| 1786 |
+
array([[0, 0, 0, 0, 0],
|
| 1787 |
+
[0, 0, 1, 0, 0],
|
| 1788 |
+
[0, 1, 5, 1, 0],
|
| 1789 |
+
[0, 0, 1, 0, 0],
|
| 1790 |
+
[0, 0, 0, 0, 0]])
|
| 1791 |
+
|
| 1792 |
+
"""
|
| 1793 |
+
if (size is not None) and (footprint is not None):
|
| 1794 |
+
warnings.warn("ignoring size because footprint is set",
|
| 1795 |
+
UserWarning, stacklevel=2)
|
| 1796 |
+
tmp = grey_erosion(input, size, footprint, structure, None, mode,
|
| 1797 |
+
cval, origin)
|
| 1798 |
+
tmp = grey_dilation(tmp, size, footprint, structure, output, mode,
|
| 1799 |
+
cval, origin)
|
| 1800 |
+
if tmp is None:
|
| 1801 |
+
tmp = output
|
| 1802 |
+
|
| 1803 |
+
if input.dtype == np.bool_ and tmp.dtype == np.bool_:
|
| 1804 |
+
np.bitwise_xor(input, tmp, out=tmp)
|
| 1805 |
+
else:
|
| 1806 |
+
np.subtract(input, tmp, out=tmp)
|
| 1807 |
+
return tmp
|
| 1808 |
+
|
| 1809 |
+
|
| 1810 |
+
def black_tophat(input, size=None, footprint=None,
|
| 1811 |
+
structure=None, output=None, mode="reflect",
|
| 1812 |
+
cval=0.0, origin=0):
|
| 1813 |
+
"""
|
| 1814 |
+
Multidimensional black tophat filter.
|
| 1815 |
+
|
| 1816 |
+
Parameters
|
| 1817 |
+
----------
|
| 1818 |
+
input : array_like
|
| 1819 |
+
Input.
|
| 1820 |
+
size : tuple of ints, optional
|
| 1821 |
+
Shape of a flat and full structuring element used for the filter.
|
| 1822 |
+
Optional if `footprint` or `structure` is provided.
|
| 1823 |
+
footprint : array of ints, optional
|
| 1824 |
+
Positions of non-infinite elements of a flat structuring element
|
| 1825 |
+
used for the black tophat filter.
|
| 1826 |
+
structure : array of ints, optional
|
| 1827 |
+
Structuring element used for the filter. `structure` may be a non-flat
|
| 1828 |
+
structuring element. The `structure` array applies offsets to the
|
| 1829 |
+
pixels in a neighborhood (the offset is additive during dilation and
|
| 1830 |
+
subtractive during erosion)
|
| 1831 |
+
output : array, optional
|
| 1832 |
+
An array used for storing the output of the filter may be provided.
|
| 1833 |
+
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
|
| 1834 |
+
The `mode` parameter determines how the array borders are
|
| 1835 |
+
handled, where `cval` is the value when mode is equal to
|
| 1836 |
+
'constant'. Default is 'reflect'
|
| 1837 |
+
cval : scalar, optional
|
| 1838 |
+
Value to fill past edges of input if `mode` is 'constant'. Default
|
| 1839 |
+
is 0.0.
|
| 1840 |
+
origin : scalar, optional
|
| 1841 |
+
The `origin` parameter controls the placement of the filter.
|
| 1842 |
+
Default 0
|
| 1843 |
+
|
| 1844 |
+
Returns
|
| 1845 |
+
-------
|
| 1846 |
+
black_tophat : ndarray
|
| 1847 |
+
Result of the filter of `input` with `structure`.
|
| 1848 |
+
|
| 1849 |
+
See Also
|
| 1850 |
+
--------
|
| 1851 |
+
white_tophat, grey_opening, grey_closing
|
| 1852 |
+
|
| 1853 |
+
Examples
|
| 1854 |
+
--------
|
| 1855 |
+
Change dark peak to bright peak and subtract background.
|
| 1856 |
+
|
| 1857 |
+
>>> from scipy.ndimage import generate_binary_structure, black_tophat
|
| 1858 |
+
>>> import numpy as np
|
| 1859 |
+
>>> square = generate_binary_structure(rank=2, connectivity=3)
|
| 1860 |
+
>>> dark_on_gray = np.array([[7, 6, 6, 6, 7],
|
| 1861 |
+
... [6, 5, 4, 5, 6],
|
| 1862 |
+
... [6, 4, 0, 4, 6],
|
| 1863 |
+
... [6, 5, 4, 5, 6],
|
| 1864 |
+
... [7, 6, 6, 6, 7]])
|
| 1865 |
+
>>> black_tophat(input=dark_on_gray, structure=square)
|
| 1866 |
+
array([[0, 0, 0, 0, 0],
|
| 1867 |
+
[0, 0, 1, 0, 0],
|
| 1868 |
+
[0, 1, 5, 1, 0],
|
| 1869 |
+
[0, 0, 1, 0, 0],
|
| 1870 |
+
[0, 0, 0, 0, 0]])
|
| 1871 |
+
|
| 1872 |
+
"""
|
| 1873 |
+
if (size is not None) and (footprint is not None):
|
| 1874 |
+
warnings.warn("ignoring size because footprint is set",
|
| 1875 |
+
UserWarning, stacklevel=2)
|
| 1876 |
+
tmp = grey_dilation(input, size, footprint, structure, None, mode,
|
| 1877 |
+
cval, origin)
|
| 1878 |
+
tmp = grey_erosion(tmp, size, footprint, structure, output, mode,
|
| 1879 |
+
cval, origin)
|
| 1880 |
+
if tmp is None:
|
| 1881 |
+
tmp = output
|
| 1882 |
+
|
| 1883 |
+
if input.dtype == np.bool_ and tmp.dtype == np.bool_:
|
| 1884 |
+
np.bitwise_xor(tmp, input, out=tmp)
|
| 1885 |
+
else:
|
| 1886 |
+
np.subtract(tmp, input, out=tmp)
|
| 1887 |
+
return tmp
|
| 1888 |
+
|
| 1889 |
+
|
| 1890 |
+
def distance_transform_bf(input, metric="euclidean", sampling=None,
|
| 1891 |
+
return_distances=True, return_indices=False,
|
| 1892 |
+
distances=None, indices=None):
|
| 1893 |
+
"""
|
| 1894 |
+
Distance transform function by a brute force algorithm.
|
| 1895 |
+
|
| 1896 |
+
This function calculates the distance transform of the `input`, by
|
| 1897 |
+
replacing each foreground (non-zero) element, with its
|
| 1898 |
+
shortest distance to the background (any zero-valued element).
|
| 1899 |
+
|
| 1900 |
+
In addition to the distance transform, the feature transform can
|
| 1901 |
+
be calculated. In this case the index of the closest background
|
| 1902 |
+
element to each foreground element is returned in a separate array.
|
| 1903 |
+
|
| 1904 |
+
Parameters
|
| 1905 |
+
----------
|
| 1906 |
+
input : array_like
|
| 1907 |
+
Input
|
| 1908 |
+
metric : {'euclidean', 'taxicab', 'chessboard'}, optional
|
| 1909 |
+
'cityblock' and 'manhattan' are also valid, and map to 'taxicab'.
|
| 1910 |
+
The default is 'euclidean'.
|
| 1911 |
+
sampling : float, or sequence of float, optional
|
| 1912 |
+
This parameter is only used when `metric` is 'euclidean'.
|
| 1913 |
+
Spacing of elements along each dimension. If a sequence, must be of
|
| 1914 |
+
length equal to the input rank; if a single number, this is used for
|
| 1915 |
+
all axes. If not specified, a grid spacing of unity is implied.
|
| 1916 |
+
return_distances : bool, optional
|
| 1917 |
+
Whether to calculate the distance transform.
|
| 1918 |
+
Default is True.
|
| 1919 |
+
return_indices : bool, optional
|
| 1920 |
+
Whether to calculate the feature transform.
|
| 1921 |
+
Default is False.
|
| 1922 |
+
distances : ndarray, optional
|
| 1923 |
+
An output array to store the calculated distance transform, instead of
|
| 1924 |
+
returning it.
|
| 1925 |
+
`return_distances` must be True.
|
| 1926 |
+
It must be the same shape as `input`, and of type float64 if `metric`
|
| 1927 |
+
is 'euclidean', uint32 otherwise.
|
| 1928 |
+
indices : int32 ndarray, optional
|
| 1929 |
+
An output array to store the calculated feature transform, instead of
|
| 1930 |
+
returning it.
|
| 1931 |
+
`return_indicies` must be True.
|
| 1932 |
+
Its shape must be `(input.ndim,) + input.shape`.
|
| 1933 |
+
|
| 1934 |
+
Returns
|
| 1935 |
+
-------
|
| 1936 |
+
distances : ndarray, optional
|
| 1937 |
+
The calculated distance transform. Returned only when
|
| 1938 |
+
`return_distances` is True and `distances` is not supplied.
|
| 1939 |
+
It will have the same shape as the input array.
|
| 1940 |
+
indices : int32 ndarray, optional
|
| 1941 |
+
The calculated feature transform. It has an input-shaped array for each
|
| 1942 |
+
dimension of the input. See distance_transform_edt documentation for an
|
| 1943 |
+
example.
|
| 1944 |
+
Returned only when `return_indices` is True and `indices` is not
|
| 1945 |
+
supplied.
|
| 1946 |
+
|
| 1947 |
+
See Also
|
| 1948 |
+
--------
|
| 1949 |
+
distance_transform_cdt : Faster distance transform for taxicab and
|
| 1950 |
+
chessboard metrics
|
| 1951 |
+
distance_transform_edt : Faster distance transform for euclidean metric
|
| 1952 |
+
|
| 1953 |
+
Notes
|
| 1954 |
+
-----
|
| 1955 |
+
This function employs a slow brute force algorithm. See also the
|
| 1956 |
+
function `distance_transform_cdt` for more efficient taxicab [1]_ and
|
| 1957 |
+
chessboard algorithms [2]_.
|
| 1958 |
+
|
| 1959 |
+
References
|
| 1960 |
+
----------
|
| 1961 |
+
.. [1] Taxicab distance. Wikipedia, 2023.
|
| 1962 |
+
https://en.wikipedia.org/wiki/Taxicab_geometry
|
| 1963 |
+
.. [2] Chessboard distance. Wikipedia, 2023.
|
| 1964 |
+
https://en.wikipedia.org/wiki/Chebyshev_distance
|
| 1965 |
+
|
| 1966 |
+
Examples
|
| 1967 |
+
--------
|
| 1968 |
+
Import the necessary modules.
|
| 1969 |
+
|
| 1970 |
+
>>> import numpy as np
|
| 1971 |
+
>>> from scipy.ndimage import distance_transform_bf
|
| 1972 |
+
>>> import matplotlib.pyplot as plt
|
| 1973 |
+
>>> from mpl_toolkits.axes_grid1 import ImageGrid
|
| 1974 |
+
|
| 1975 |
+
First, we create a toy binary image.
|
| 1976 |
+
|
| 1977 |
+
>>> def add_circle(center_x, center_y, radius, image, fillvalue=1):
|
| 1978 |
+
... # fill circular area with 1
|
| 1979 |
+
... xx, yy = np.mgrid[:image.shape[0], :image.shape[1]]
|
| 1980 |
+
... circle = (xx - center_x) ** 2 + (yy - center_y) ** 2
|
| 1981 |
+
... circle_shape = np.sqrt(circle) < radius
|
| 1982 |
+
... image[circle_shape] = fillvalue
|
| 1983 |
+
... return image
|
| 1984 |
+
>>> image = np.zeros((100, 100), dtype=np.uint8)
|
| 1985 |
+
>>> image[35:65, 20:80] = 1
|
| 1986 |
+
>>> image = add_circle(28, 65, 10, image)
|
| 1987 |
+
>>> image = add_circle(37, 30, 10, image)
|
| 1988 |
+
>>> image = add_circle(70, 45, 20, image)
|
| 1989 |
+
>>> image = add_circle(45, 80, 10, image)
|
| 1990 |
+
|
| 1991 |
+
Next, we set up the figure.
|
| 1992 |
+
|
| 1993 |
+
>>> fig = plt.figure(figsize=(8, 8)) # set up the figure structure
|
| 1994 |
+
>>> grid = ImageGrid(fig, 111, nrows_ncols=(2, 2), axes_pad=(0.4, 0.3),
|
| 1995 |
+
... label_mode="1", share_all=True,
|
| 1996 |
+
... cbar_location="right", cbar_mode="each",
|
| 1997 |
+
... cbar_size="7%", cbar_pad="2%")
|
| 1998 |
+
>>> for ax in grid:
|
| 1999 |
+
... ax.axis('off') # remove axes from images
|
| 2000 |
+
|
| 2001 |
+
The top left image is the original binary image.
|
| 2002 |
+
|
| 2003 |
+
>>> binary_image = grid[0].imshow(image, cmap='gray')
|
| 2004 |
+
>>> cbar_binary_image = grid.cbar_axes[0].colorbar(binary_image)
|
| 2005 |
+
>>> cbar_binary_image.set_ticks([0, 1])
|
| 2006 |
+
>>> grid[0].set_title("Binary image: foreground in white")
|
| 2007 |
+
|
| 2008 |
+
The distance transform calculates the distance between foreground pixels
|
| 2009 |
+
and the image background according to a distance metric. Available metrics
|
| 2010 |
+
in `distance_transform_bf` are: ``euclidean`` (default), ``taxicab``
|
| 2011 |
+
and ``chessboard``. The top right image contains the distance transform
|
| 2012 |
+
based on the ``euclidean`` metric.
|
| 2013 |
+
|
| 2014 |
+
>>> distance_transform_euclidean = distance_transform_bf(image)
|
| 2015 |
+
>>> euclidean_transform = grid[1].imshow(distance_transform_euclidean,
|
| 2016 |
+
... cmap='gray')
|
| 2017 |
+
>>> cbar_euclidean = grid.cbar_axes[1].colorbar(euclidean_transform)
|
| 2018 |
+
>>> colorbar_ticks = [0, 10, 20]
|
| 2019 |
+
>>> cbar_euclidean.set_ticks(colorbar_ticks)
|
| 2020 |
+
>>> grid[1].set_title("Euclidean distance")
|
| 2021 |
+
|
| 2022 |
+
The lower left image contains the distance transform using the ``taxicab``
|
| 2023 |
+
metric.
|
| 2024 |
+
|
| 2025 |
+
>>> distance_transform_taxicab = distance_transform_bf(image,
|
| 2026 |
+
... metric='taxicab')
|
| 2027 |
+
>>> taxicab_transformation = grid[2].imshow(distance_transform_taxicab,
|
| 2028 |
+
... cmap='gray')
|
| 2029 |
+
>>> cbar_taxicab = grid.cbar_axes[2].colorbar(taxicab_transformation)
|
| 2030 |
+
>>> cbar_taxicab.set_ticks(colorbar_ticks)
|
| 2031 |
+
>>> grid[2].set_title("Taxicab distance")
|
| 2032 |
+
|
| 2033 |
+
Finally, the lower right image contains the distance transform using the
|
| 2034 |
+
``chessboard`` metric.
|
| 2035 |
+
|
| 2036 |
+
>>> distance_transform_cb = distance_transform_bf(image,
|
| 2037 |
+
... metric='chessboard')
|
| 2038 |
+
>>> chessboard_transformation = grid[3].imshow(distance_transform_cb,
|
| 2039 |
+
... cmap='gray')
|
| 2040 |
+
>>> cbar_taxicab = grid.cbar_axes[3].colorbar(chessboard_transformation)
|
| 2041 |
+
>>> cbar_taxicab.set_ticks(colorbar_ticks)
|
| 2042 |
+
>>> grid[3].set_title("Chessboard distance")
|
| 2043 |
+
>>> plt.show()
|
| 2044 |
+
|
| 2045 |
+
"""
|
| 2046 |
+
ft_inplace = isinstance(indices, np.ndarray)
|
| 2047 |
+
dt_inplace = isinstance(distances, np.ndarray)
|
| 2048 |
+
_distance_tranform_arg_check(
|
| 2049 |
+
dt_inplace, ft_inplace, return_distances, return_indices
|
| 2050 |
+
)
|
| 2051 |
+
|
| 2052 |
+
tmp1 = np.asarray(input) != 0
|
| 2053 |
+
struct = generate_binary_structure(tmp1.ndim, tmp1.ndim)
|
| 2054 |
+
tmp2 = binary_dilation(tmp1, struct)
|
| 2055 |
+
tmp2 = np.logical_xor(tmp1, tmp2)
|
| 2056 |
+
tmp1 = tmp1.astype(np.int8) - tmp2.astype(np.int8)
|
| 2057 |
+
metric = metric.lower()
|
| 2058 |
+
if metric == 'euclidean':
|
| 2059 |
+
metric = 1
|
| 2060 |
+
elif metric in ['taxicab', 'cityblock', 'manhattan']:
|
| 2061 |
+
metric = 2
|
| 2062 |
+
elif metric == 'chessboard':
|
| 2063 |
+
metric = 3
|
| 2064 |
+
else:
|
| 2065 |
+
raise RuntimeError('distance metric not supported')
|
| 2066 |
+
if sampling is not None:
|
| 2067 |
+
sampling = _ni_support._normalize_sequence(sampling, tmp1.ndim)
|
| 2068 |
+
sampling = np.asarray(sampling, dtype=np.float64)
|
| 2069 |
+
if not sampling.flags.contiguous:
|
| 2070 |
+
sampling = sampling.copy()
|
| 2071 |
+
if return_indices:
|
| 2072 |
+
ft = np.zeros(tmp1.shape, dtype=np.int32)
|
| 2073 |
+
else:
|
| 2074 |
+
ft = None
|
| 2075 |
+
if return_distances:
|
| 2076 |
+
if distances is None:
|
| 2077 |
+
if metric == 1:
|
| 2078 |
+
dt = np.zeros(tmp1.shape, dtype=np.float64)
|
| 2079 |
+
else:
|
| 2080 |
+
dt = np.zeros(tmp1.shape, dtype=np.uint32)
|
| 2081 |
+
else:
|
| 2082 |
+
if distances.shape != tmp1.shape:
|
| 2083 |
+
raise RuntimeError('distances array has wrong shape')
|
| 2084 |
+
if metric == 1:
|
| 2085 |
+
if distances.dtype.type != np.float64:
|
| 2086 |
+
raise RuntimeError('distances array must be float64')
|
| 2087 |
+
else:
|
| 2088 |
+
if distances.dtype.type != np.uint32:
|
| 2089 |
+
raise RuntimeError('distances array must be uint32')
|
| 2090 |
+
dt = distances
|
| 2091 |
+
else:
|
| 2092 |
+
dt = None
|
| 2093 |
+
|
| 2094 |
+
_nd_image.distance_transform_bf(tmp1, metric, sampling, dt, ft)
|
| 2095 |
+
if return_indices:
|
| 2096 |
+
if isinstance(indices, np.ndarray):
|
| 2097 |
+
if indices.dtype.type != np.int32:
|
| 2098 |
+
raise RuntimeError('indices array must be int32')
|
| 2099 |
+
if indices.shape != (tmp1.ndim,) + tmp1.shape:
|
| 2100 |
+
raise RuntimeError('indices array has wrong shape')
|
| 2101 |
+
tmp2 = indices
|
| 2102 |
+
else:
|
| 2103 |
+
tmp2 = np.indices(tmp1.shape, dtype=np.int32)
|
| 2104 |
+
ft = np.ravel(ft)
|
| 2105 |
+
for ii in range(tmp2.shape[0]):
|
| 2106 |
+
rtmp = np.ravel(tmp2[ii, ...])[ft]
|
| 2107 |
+
rtmp.shape = tmp1.shape
|
| 2108 |
+
tmp2[ii, ...] = rtmp
|
| 2109 |
+
ft = tmp2
|
| 2110 |
+
|
| 2111 |
+
# construct and return the result
|
| 2112 |
+
result = []
|
| 2113 |
+
if return_distances and not dt_inplace:
|
| 2114 |
+
result.append(dt)
|
| 2115 |
+
if return_indices and not ft_inplace:
|
| 2116 |
+
result.append(ft)
|
| 2117 |
+
|
| 2118 |
+
if len(result) == 2:
|
| 2119 |
+
return tuple(result)
|
| 2120 |
+
elif len(result) == 1:
|
| 2121 |
+
return result[0]
|
| 2122 |
+
else:
|
| 2123 |
+
return None
|
| 2124 |
+
|
| 2125 |
+
|
| 2126 |
+
def distance_transform_cdt(input, metric='chessboard', return_distances=True,
|
| 2127 |
+
return_indices=False, distances=None, indices=None):
|
| 2128 |
+
"""
|
| 2129 |
+
Distance transform for chamfer type of transforms.
|
| 2130 |
+
|
| 2131 |
+
This function calculates the distance transform of the `input`, by
|
| 2132 |
+
replacing each foreground (non-zero) element, with its
|
| 2133 |
+
shortest distance to the background (any zero-valued element).
|
| 2134 |
+
|
| 2135 |
+
In addition to the distance transform, the feature transform can
|
| 2136 |
+
be calculated. In this case the index of the closest background
|
| 2137 |
+
element to each foreground element is returned in a separate array.
|
| 2138 |
+
|
| 2139 |
+
Parameters
|
| 2140 |
+
----------
|
| 2141 |
+
input : array_like
|
| 2142 |
+
Input. Values of 0 are treated as background.
|
| 2143 |
+
metric : {'chessboard', 'taxicab'} or array_like, optional
|
| 2144 |
+
The `metric` determines the type of chamfering that is done. If the
|
| 2145 |
+
`metric` is equal to 'taxicab' a structure is generated using
|
| 2146 |
+
`generate_binary_structure` with a squared distance equal to 1. If
|
| 2147 |
+
the `metric` is equal to 'chessboard', a `metric` is generated
|
| 2148 |
+
using `generate_binary_structure` with a squared distance equal to
|
| 2149 |
+
the dimensionality of the array. These choices correspond to the
|
| 2150 |
+
common interpretations of the 'taxicab' and the 'chessboard'
|
| 2151 |
+
distance metrics in two dimensions.
|
| 2152 |
+
A custom metric may be provided, in the form of a matrix where
|
| 2153 |
+
each dimension has a length of three.
|
| 2154 |
+
'cityblock' and 'manhattan' are also valid, and map to 'taxicab'.
|
| 2155 |
+
The default is 'chessboard'.
|
| 2156 |
+
return_distances : bool, optional
|
| 2157 |
+
Whether to calculate the distance transform.
|
| 2158 |
+
Default is True.
|
| 2159 |
+
return_indices : bool, optional
|
| 2160 |
+
Whether to calculate the feature transform.
|
| 2161 |
+
Default is False.
|
| 2162 |
+
distances : int32 ndarray, optional
|
| 2163 |
+
An output array to store the calculated distance transform, instead of
|
| 2164 |
+
returning it.
|
| 2165 |
+
`return_distances` must be True.
|
| 2166 |
+
It must be the same shape as `input`.
|
| 2167 |
+
indices : int32 ndarray, optional
|
| 2168 |
+
An output array to store the calculated feature transform, instead of
|
| 2169 |
+
returning it.
|
| 2170 |
+
`return_indicies` must be True.
|
| 2171 |
+
Its shape must be `(input.ndim,) + input.shape`.
|
| 2172 |
+
|
| 2173 |
+
Returns
|
| 2174 |
+
-------
|
| 2175 |
+
distances : int32 ndarray, optional
|
| 2176 |
+
The calculated distance transform. Returned only when
|
| 2177 |
+
`return_distances` is True, and `distances` is not supplied.
|
| 2178 |
+
It will have the same shape as the input array.
|
| 2179 |
+
indices : int32 ndarray, optional
|
| 2180 |
+
The calculated feature transform. It has an input-shaped array for each
|
| 2181 |
+
dimension of the input. See distance_transform_edt documentation for an
|
| 2182 |
+
example.
|
| 2183 |
+
Returned only when `return_indices` is True, and `indices` is not
|
| 2184 |
+
supplied.
|
| 2185 |
+
|
| 2186 |
+
See Also
|
| 2187 |
+
--------
|
| 2188 |
+
distance_transform_edt : Fast distance transform for euclidean metric
|
| 2189 |
+
distance_transform_bf : Distance transform for different metrics using
|
| 2190 |
+
a slower brute force algorithm
|
| 2191 |
+
|
| 2192 |
+
Examples
|
| 2193 |
+
--------
|
| 2194 |
+
Import the necessary modules.
|
| 2195 |
+
|
| 2196 |
+
>>> import numpy as np
|
| 2197 |
+
>>> from scipy.ndimage import distance_transform_cdt
|
| 2198 |
+
>>> import matplotlib.pyplot as plt
|
| 2199 |
+
>>> from mpl_toolkits.axes_grid1 import ImageGrid
|
| 2200 |
+
|
| 2201 |
+
First, we create a toy binary image.
|
| 2202 |
+
|
| 2203 |
+
>>> def add_circle(center_x, center_y, radius, image, fillvalue=1):
|
| 2204 |
+
... # fill circular area with 1
|
| 2205 |
+
... xx, yy = np.mgrid[:image.shape[0], :image.shape[1]]
|
| 2206 |
+
... circle = (xx - center_x) ** 2 + (yy - center_y) ** 2
|
| 2207 |
+
... circle_shape = np.sqrt(circle) < radius
|
| 2208 |
+
... image[circle_shape] = fillvalue
|
| 2209 |
+
... return image
|
| 2210 |
+
>>> image = np.zeros((100, 100), dtype=np.uint8)
|
| 2211 |
+
>>> image[35:65, 20:80] = 1
|
| 2212 |
+
>>> image = add_circle(28, 65, 10, image)
|
| 2213 |
+
>>> image = add_circle(37, 30, 10, image)
|
| 2214 |
+
>>> image = add_circle(70, 45, 20, image)
|
| 2215 |
+
>>> image = add_circle(45, 80, 10, image)
|
| 2216 |
+
|
| 2217 |
+
Next, we set up the figure.
|
| 2218 |
+
|
| 2219 |
+
>>> fig = plt.figure(figsize=(5, 15))
|
| 2220 |
+
>>> grid = ImageGrid(fig, 111, nrows_ncols=(3, 1), axes_pad=(0.5, 0.3),
|
| 2221 |
+
... label_mode="1", share_all=True,
|
| 2222 |
+
... cbar_location="right", cbar_mode="each",
|
| 2223 |
+
... cbar_size="7%", cbar_pad="2%")
|
| 2224 |
+
>>> for ax in grid:
|
| 2225 |
+
... ax.axis('off')
|
| 2226 |
+
>>> top, middle, bottom = grid
|
| 2227 |
+
>>> colorbar_ticks = [0, 10, 20]
|
| 2228 |
+
|
| 2229 |
+
The top image contains the original binary image.
|
| 2230 |
+
|
| 2231 |
+
>>> binary_image = top.imshow(image, cmap='gray')
|
| 2232 |
+
>>> cbar_binary_image = top.cax.colorbar(binary_image)
|
| 2233 |
+
>>> cbar_binary_image.set_ticks([0, 1])
|
| 2234 |
+
>>> top.set_title("Binary image: foreground in white")
|
| 2235 |
+
|
| 2236 |
+
The middle image contains the distance transform using the ``taxicab``
|
| 2237 |
+
metric.
|
| 2238 |
+
|
| 2239 |
+
>>> distance_taxicab = distance_transform_cdt(image, metric="taxicab")
|
| 2240 |
+
>>> taxicab_transform = middle.imshow(distance_taxicab, cmap='gray')
|
| 2241 |
+
>>> cbar_taxicab = middle.cax.colorbar(taxicab_transform)
|
| 2242 |
+
>>> cbar_taxicab.set_ticks(colorbar_ticks)
|
| 2243 |
+
>>> middle.set_title("Taxicab metric")
|
| 2244 |
+
|
| 2245 |
+
The bottom image contains the distance transform using the ``chessboard``
|
| 2246 |
+
metric.
|
| 2247 |
+
|
| 2248 |
+
>>> distance_chessboard = distance_transform_cdt(image,
|
| 2249 |
+
... metric="chessboard")
|
| 2250 |
+
>>> chessboard_transform = bottom.imshow(distance_chessboard, cmap='gray')
|
| 2251 |
+
>>> cbar_chessboard = bottom.cax.colorbar(chessboard_transform)
|
| 2252 |
+
>>> cbar_chessboard.set_ticks(colorbar_ticks)
|
| 2253 |
+
>>> bottom.set_title("Chessboard metric")
|
| 2254 |
+
>>> plt.tight_layout()
|
| 2255 |
+
>>> plt.show()
|
| 2256 |
+
|
| 2257 |
+
"""
|
| 2258 |
+
ft_inplace = isinstance(indices, np.ndarray)
|
| 2259 |
+
dt_inplace = isinstance(distances, np.ndarray)
|
| 2260 |
+
_distance_tranform_arg_check(
|
| 2261 |
+
dt_inplace, ft_inplace, return_distances, return_indices
|
| 2262 |
+
)
|
| 2263 |
+
input = np.asarray(input)
|
| 2264 |
+
if isinstance(metric, str):
|
| 2265 |
+
if metric in ['taxicab', 'cityblock', 'manhattan']:
|
| 2266 |
+
rank = input.ndim
|
| 2267 |
+
metric = generate_binary_structure(rank, 1)
|
| 2268 |
+
elif metric == 'chessboard':
|
| 2269 |
+
rank = input.ndim
|
| 2270 |
+
metric = generate_binary_structure(rank, rank)
|
| 2271 |
+
else:
|
| 2272 |
+
raise ValueError('invalid metric provided')
|
| 2273 |
+
else:
|
| 2274 |
+
try:
|
| 2275 |
+
metric = np.asarray(metric)
|
| 2276 |
+
except Exception as e:
|
| 2277 |
+
raise ValueError('invalid metric provided') from e
|
| 2278 |
+
for s in metric.shape:
|
| 2279 |
+
if s != 3:
|
| 2280 |
+
raise ValueError('metric sizes must be equal to 3')
|
| 2281 |
+
|
| 2282 |
+
if not metric.flags.contiguous:
|
| 2283 |
+
metric = metric.copy()
|
| 2284 |
+
if dt_inplace:
|
| 2285 |
+
if distances.dtype.type != np.int32:
|
| 2286 |
+
raise ValueError('distances must be of int32 type')
|
| 2287 |
+
if distances.shape != input.shape:
|
| 2288 |
+
raise ValueError('distances has wrong shape')
|
| 2289 |
+
dt = distances
|
| 2290 |
+
dt[...] = np.where(input, -1, 0).astype(np.int32)
|
| 2291 |
+
else:
|
| 2292 |
+
dt = np.where(input, -1, 0).astype(np.int32)
|
| 2293 |
+
|
| 2294 |
+
rank = dt.ndim
|
| 2295 |
+
if return_indices:
|
| 2296 |
+
ft = np.arange(dt.size, dtype=np.int32)
|
| 2297 |
+
ft.shape = dt.shape
|
| 2298 |
+
else:
|
| 2299 |
+
ft = None
|
| 2300 |
+
|
| 2301 |
+
_nd_image.distance_transform_op(metric, dt, ft)
|
| 2302 |
+
dt = dt[tuple([slice(None, None, -1)] * rank)]
|
| 2303 |
+
if return_indices:
|
| 2304 |
+
ft = ft[tuple([slice(None, None, -1)] * rank)]
|
| 2305 |
+
_nd_image.distance_transform_op(metric, dt, ft)
|
| 2306 |
+
dt = dt[tuple([slice(None, None, -1)] * rank)]
|
| 2307 |
+
if return_indices:
|
| 2308 |
+
ft = ft[tuple([slice(None, None, -1)] * rank)]
|
| 2309 |
+
ft = np.ravel(ft)
|
| 2310 |
+
if ft_inplace:
|
| 2311 |
+
if indices.dtype.type != np.int32:
|
| 2312 |
+
raise ValueError('indices array must be int32')
|
| 2313 |
+
if indices.shape != (dt.ndim,) + dt.shape:
|
| 2314 |
+
raise ValueError('indices array has wrong shape')
|
| 2315 |
+
tmp = indices
|
| 2316 |
+
else:
|
| 2317 |
+
tmp = np.indices(dt.shape, dtype=np.int32)
|
| 2318 |
+
for ii in range(tmp.shape[0]):
|
| 2319 |
+
rtmp = np.ravel(tmp[ii, ...])[ft]
|
| 2320 |
+
rtmp.shape = dt.shape
|
| 2321 |
+
tmp[ii, ...] = rtmp
|
| 2322 |
+
ft = tmp
|
| 2323 |
+
|
| 2324 |
+
# construct and return the result
|
| 2325 |
+
result = []
|
| 2326 |
+
if return_distances and not dt_inplace:
|
| 2327 |
+
result.append(dt)
|
| 2328 |
+
if return_indices and not ft_inplace:
|
| 2329 |
+
result.append(ft)
|
| 2330 |
+
|
| 2331 |
+
if len(result) == 2:
|
| 2332 |
+
return tuple(result)
|
| 2333 |
+
elif len(result) == 1:
|
| 2334 |
+
return result[0]
|
| 2335 |
+
else:
|
| 2336 |
+
return None
|
| 2337 |
+
|
| 2338 |
+
|
| 2339 |
+
def distance_transform_edt(input, sampling=None, return_distances=True,
|
| 2340 |
+
return_indices=False, distances=None, indices=None):
|
| 2341 |
+
"""
|
| 2342 |
+
Exact Euclidean distance transform.
|
| 2343 |
+
|
| 2344 |
+
This function calculates the distance transform of the `input`, by
|
| 2345 |
+
replacing each foreground (non-zero) element, with its
|
| 2346 |
+
shortest distance to the background (any zero-valued element).
|
| 2347 |
+
|
| 2348 |
+
In addition to the distance transform, the feature transform can
|
| 2349 |
+
be calculated. In this case the index of the closest background
|
| 2350 |
+
element to each foreground element is returned in a separate array.
|
| 2351 |
+
|
| 2352 |
+
Parameters
|
| 2353 |
+
----------
|
| 2354 |
+
input : array_like
|
| 2355 |
+
Input data to transform. Can be any type but will be converted
|
| 2356 |
+
into binary: 1 wherever input equates to True, 0 elsewhere.
|
| 2357 |
+
sampling : float, or sequence of float, optional
|
| 2358 |
+
Spacing of elements along each dimension. If a sequence, must be of
|
| 2359 |
+
length equal to the input rank; if a single number, this is used for
|
| 2360 |
+
all axes. If not specified, a grid spacing of unity is implied.
|
| 2361 |
+
return_distances : bool, optional
|
| 2362 |
+
Whether to calculate the distance transform.
|
| 2363 |
+
Default is True.
|
| 2364 |
+
return_indices : bool, optional
|
| 2365 |
+
Whether to calculate the feature transform.
|
| 2366 |
+
Default is False.
|
| 2367 |
+
distances : float64 ndarray, optional
|
| 2368 |
+
An output array to store the calculated distance transform, instead of
|
| 2369 |
+
returning it.
|
| 2370 |
+
`return_distances` must be True.
|
| 2371 |
+
It must be the same shape as `input`.
|
| 2372 |
+
indices : int32 ndarray, optional
|
| 2373 |
+
An output array to store the calculated feature transform, instead of
|
| 2374 |
+
returning it.
|
| 2375 |
+
`return_indicies` must be True.
|
| 2376 |
+
Its shape must be `(input.ndim,) + input.shape`.
|
| 2377 |
+
|
| 2378 |
+
Returns
|
| 2379 |
+
-------
|
| 2380 |
+
distances : float64 ndarray, optional
|
| 2381 |
+
The calculated distance transform. Returned only when
|
| 2382 |
+
`return_distances` is True and `distances` is not supplied.
|
| 2383 |
+
It will have the same shape as the input array.
|
| 2384 |
+
indices : int32 ndarray, optional
|
| 2385 |
+
The calculated feature transform. It has an input-shaped array for each
|
| 2386 |
+
dimension of the input. See example below.
|
| 2387 |
+
Returned only when `return_indices` is True and `indices` is not
|
| 2388 |
+
supplied.
|
| 2389 |
+
|
| 2390 |
+
Notes
|
| 2391 |
+
-----
|
| 2392 |
+
The Euclidean distance transform gives values of the Euclidean
|
| 2393 |
+
distance::
|
| 2394 |
+
|
| 2395 |
+
n
|
| 2396 |
+
y_i = sqrt(sum (x[i]-b[i])**2)
|
| 2397 |
+
i
|
| 2398 |
+
|
| 2399 |
+
where b[i] is the background point (value 0) with the smallest
|
| 2400 |
+
Euclidean distance to input points x[i], and n is the
|
| 2401 |
+
number of dimensions.
|
| 2402 |
+
|
| 2403 |
+
Examples
|
| 2404 |
+
--------
|
| 2405 |
+
>>> from scipy import ndimage
|
| 2406 |
+
>>> import numpy as np
|
| 2407 |
+
>>> a = np.array(([0,1,1,1,1],
|
| 2408 |
+
... [0,0,1,1,1],
|
| 2409 |
+
... [0,1,1,1,1],
|
| 2410 |
+
... [0,1,1,1,0],
|
| 2411 |
+
... [0,1,1,0,0]))
|
| 2412 |
+
>>> ndimage.distance_transform_edt(a)
|
| 2413 |
+
array([[ 0. , 1. , 1.4142, 2.2361, 3. ],
|
| 2414 |
+
[ 0. , 0. , 1. , 2. , 2. ],
|
| 2415 |
+
[ 0. , 1. , 1.4142, 1.4142, 1. ],
|
| 2416 |
+
[ 0. , 1. , 1.4142, 1. , 0. ],
|
| 2417 |
+
[ 0. , 1. , 1. , 0. , 0. ]])
|
| 2418 |
+
|
| 2419 |
+
With a sampling of 2 units along x, 1 along y:
|
| 2420 |
+
|
| 2421 |
+
>>> ndimage.distance_transform_edt(a, sampling=[2,1])
|
| 2422 |
+
array([[ 0. , 1. , 2. , 2.8284, 3.6056],
|
| 2423 |
+
[ 0. , 0. , 1. , 2. , 3. ],
|
| 2424 |
+
[ 0. , 1. , 2. , 2.2361, 2. ],
|
| 2425 |
+
[ 0. , 1. , 2. , 1. , 0. ],
|
| 2426 |
+
[ 0. , 1. , 1. , 0. , 0. ]])
|
| 2427 |
+
|
| 2428 |
+
Asking for indices as well:
|
| 2429 |
+
|
| 2430 |
+
>>> edt, inds = ndimage.distance_transform_edt(a, return_indices=True)
|
| 2431 |
+
>>> inds
|
| 2432 |
+
array([[[0, 0, 1, 1, 3],
|
| 2433 |
+
[1, 1, 1, 1, 3],
|
| 2434 |
+
[2, 2, 1, 3, 3],
|
| 2435 |
+
[3, 3, 4, 4, 3],
|
| 2436 |
+
[4, 4, 4, 4, 4]],
|
| 2437 |
+
[[0, 0, 1, 1, 4],
|
| 2438 |
+
[0, 1, 1, 1, 4],
|
| 2439 |
+
[0, 0, 1, 4, 4],
|
| 2440 |
+
[0, 0, 3, 3, 4],
|
| 2441 |
+
[0, 0, 3, 3, 4]]])
|
| 2442 |
+
|
| 2443 |
+
With arrays provided for inplace outputs:
|
| 2444 |
+
|
| 2445 |
+
>>> indices = np.zeros(((np.ndim(a),) + a.shape), dtype=np.int32)
|
| 2446 |
+
>>> ndimage.distance_transform_edt(a, return_indices=True, indices=indices)
|
| 2447 |
+
array([[ 0. , 1. , 1.4142, 2.2361, 3. ],
|
| 2448 |
+
[ 0. , 0. , 1. , 2. , 2. ],
|
| 2449 |
+
[ 0. , 1. , 1.4142, 1.4142, 1. ],
|
| 2450 |
+
[ 0. , 1. , 1.4142, 1. , 0. ],
|
| 2451 |
+
[ 0. , 1. , 1. , 0. , 0. ]])
|
| 2452 |
+
>>> indices
|
| 2453 |
+
array([[[0, 0, 1, 1, 3],
|
| 2454 |
+
[1, 1, 1, 1, 3],
|
| 2455 |
+
[2, 2, 1, 3, 3],
|
| 2456 |
+
[3, 3, 4, 4, 3],
|
| 2457 |
+
[4, 4, 4, 4, 4]],
|
| 2458 |
+
[[0, 0, 1, 1, 4],
|
| 2459 |
+
[0, 1, 1, 1, 4],
|
| 2460 |
+
[0, 0, 1, 4, 4],
|
| 2461 |
+
[0, 0, 3, 3, 4],
|
| 2462 |
+
[0, 0, 3, 3, 4]]])
|
| 2463 |
+
|
| 2464 |
+
"""
|
| 2465 |
+
ft_inplace = isinstance(indices, np.ndarray)
|
| 2466 |
+
dt_inplace = isinstance(distances, np.ndarray)
|
| 2467 |
+
_distance_tranform_arg_check(
|
| 2468 |
+
dt_inplace, ft_inplace, return_distances, return_indices
|
| 2469 |
+
)
|
| 2470 |
+
|
| 2471 |
+
# calculate the feature transform
|
| 2472 |
+
input = np.atleast_1d(np.where(input, 1, 0).astype(np.int8))
|
| 2473 |
+
if sampling is not None:
|
| 2474 |
+
sampling = _ni_support._normalize_sequence(sampling, input.ndim)
|
| 2475 |
+
sampling = np.asarray(sampling, dtype=np.float64)
|
| 2476 |
+
if not sampling.flags.contiguous:
|
| 2477 |
+
sampling = sampling.copy()
|
| 2478 |
+
|
| 2479 |
+
if ft_inplace:
|
| 2480 |
+
ft = indices
|
| 2481 |
+
if ft.shape != (input.ndim,) + input.shape:
|
| 2482 |
+
raise RuntimeError('indices array has wrong shape')
|
| 2483 |
+
if ft.dtype.type != np.int32:
|
| 2484 |
+
raise RuntimeError('indices array must be int32')
|
| 2485 |
+
else:
|
| 2486 |
+
ft = np.zeros((input.ndim,) + input.shape, dtype=np.int32)
|
| 2487 |
+
|
| 2488 |
+
_nd_image.euclidean_feature_transform(input, sampling, ft)
|
| 2489 |
+
# if requested, calculate the distance transform
|
| 2490 |
+
if return_distances:
|
| 2491 |
+
dt = ft - np.indices(input.shape, dtype=ft.dtype)
|
| 2492 |
+
dt = dt.astype(np.float64)
|
| 2493 |
+
if sampling is not None:
|
| 2494 |
+
for ii in range(len(sampling)):
|
| 2495 |
+
dt[ii, ...] *= sampling[ii]
|
| 2496 |
+
np.multiply(dt, dt, dt)
|
| 2497 |
+
if dt_inplace:
|
| 2498 |
+
dt = np.add.reduce(dt, axis=0)
|
| 2499 |
+
if distances.shape != dt.shape:
|
| 2500 |
+
raise RuntimeError('distances array has wrong shape')
|
| 2501 |
+
if distances.dtype.type != np.float64:
|
| 2502 |
+
raise RuntimeError('distances array must be float64')
|
| 2503 |
+
np.sqrt(dt, distances)
|
| 2504 |
+
else:
|
| 2505 |
+
dt = np.add.reduce(dt, axis=0)
|
| 2506 |
+
dt = np.sqrt(dt)
|
| 2507 |
+
|
| 2508 |
+
# construct and return the result
|
| 2509 |
+
result = []
|
| 2510 |
+
if return_distances and not dt_inplace:
|
| 2511 |
+
result.append(dt)
|
| 2512 |
+
if return_indices and not ft_inplace:
|
| 2513 |
+
result.append(ft)
|
| 2514 |
+
|
| 2515 |
+
if len(result) == 2:
|
| 2516 |
+
return tuple(result)
|
| 2517 |
+
elif len(result) == 1:
|
| 2518 |
+
return result[0]
|
| 2519 |
+
else:
|
| 2520 |
+
return None
|
| 2521 |
+
|
| 2522 |
+
|
| 2523 |
+
def _distance_tranform_arg_check(distances_out, indices_out,
|
| 2524 |
+
return_distances, return_indices):
|
| 2525 |
+
"""Raise a RuntimeError if the arguments are invalid"""
|
| 2526 |
+
error_msgs = []
|
| 2527 |
+
if (not return_distances) and (not return_indices):
|
| 2528 |
+
error_msgs.append(
|
| 2529 |
+
'at least one of return_distances/return_indices must be True')
|
| 2530 |
+
if distances_out and not return_distances:
|
| 2531 |
+
error_msgs.append(
|
| 2532 |
+
'return_distances must be True if distances is supplied'
|
| 2533 |
+
)
|
| 2534 |
+
if indices_out and not return_indices:
|
| 2535 |
+
error_msgs.append('return_indices must be True if indices is supplied')
|
| 2536 |
+
if error_msgs:
|
| 2537 |
+
raise RuntimeError(', '.join(error_msgs))
|
parrot/lib/python3.10/site-packages/scipy/ndimage/_ni_support.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2003-2005 Peter J. Verveer
|
| 2 |
+
#
|
| 3 |
+
# Redistribution and use in source and binary forms, with or without
|
| 4 |
+
# modification, are permitted provided that the following conditions
|
| 5 |
+
# are met:
|
| 6 |
+
#
|
| 7 |
+
# 1. Redistributions of source code must retain the above copyright
|
| 8 |
+
# notice, this list of conditions and the following disclaimer.
|
| 9 |
+
#
|
| 10 |
+
# 2. Redistributions in binary form must reproduce the above
|
| 11 |
+
# copyright notice, this list of conditions and the following
|
| 12 |
+
# disclaimer in the documentation and/or other materials provided
|
| 13 |
+
# with the distribution.
|
| 14 |
+
#
|
| 15 |
+
# 3. The name of the author may not be used to endorse or promote
|
| 16 |
+
# products derived from this software without specific prior
|
| 17 |
+
# written permission.
|
| 18 |
+
#
|
| 19 |
+
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
|
| 20 |
+
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
| 21 |
+
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
| 22 |
+
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
|
| 23 |
+
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 24 |
+
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
|
| 25 |
+
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
| 26 |
+
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
| 27 |
+
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
| 28 |
+
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
| 29 |
+
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 30 |
+
|
| 31 |
+
from collections.abc import Iterable
|
| 32 |
+
import operator
|
| 33 |
+
import warnings
|
| 34 |
+
import numpy as np
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def _extend_mode_to_code(mode):
|
| 38 |
+
"""Convert an extension mode to the corresponding integer code.
|
| 39 |
+
"""
|
| 40 |
+
if mode == 'nearest':
|
| 41 |
+
return 0
|
| 42 |
+
elif mode == 'wrap':
|
| 43 |
+
return 1
|
| 44 |
+
elif mode in ['reflect', 'grid-mirror']:
|
| 45 |
+
return 2
|
| 46 |
+
elif mode == 'mirror':
|
| 47 |
+
return 3
|
| 48 |
+
elif mode == 'constant':
|
| 49 |
+
return 4
|
| 50 |
+
elif mode == 'grid-wrap':
|
| 51 |
+
return 5
|
| 52 |
+
elif mode == 'grid-constant':
|
| 53 |
+
return 6
|
| 54 |
+
else:
|
| 55 |
+
raise RuntimeError('boundary mode not supported')
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def _normalize_sequence(input, rank):
|
| 59 |
+
"""If input is a scalar, create a sequence of length equal to the
|
| 60 |
+
rank by duplicating the input. If input is a sequence,
|
| 61 |
+
check if its length is equal to the length of array.
|
| 62 |
+
"""
|
| 63 |
+
is_str = isinstance(input, str)
|
| 64 |
+
if not is_str and isinstance(input, Iterable):
|
| 65 |
+
normalized = list(input)
|
| 66 |
+
if len(normalized) != rank:
|
| 67 |
+
err = "sequence argument must have length equal to input rank"
|
| 68 |
+
raise RuntimeError(err)
|
| 69 |
+
else:
|
| 70 |
+
normalized = [input] * rank
|
| 71 |
+
return normalized
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def _get_output(output, input, shape=None, complex_output=False):
|
| 75 |
+
if shape is None:
|
| 76 |
+
shape = input.shape
|
| 77 |
+
if output is None:
|
| 78 |
+
if not complex_output:
|
| 79 |
+
output = np.zeros(shape, dtype=input.dtype.name)
|
| 80 |
+
else:
|
| 81 |
+
complex_type = np.promote_types(input.dtype, np.complex64)
|
| 82 |
+
output = np.zeros(shape, dtype=complex_type)
|
| 83 |
+
elif isinstance(output, (type, np.dtype)):
|
| 84 |
+
# Classes (like `np.float32`) and dtypes are interpreted as dtype
|
| 85 |
+
if complex_output and np.dtype(output).kind != 'c':
|
| 86 |
+
warnings.warn("promoting specified output dtype to complex", stacklevel=3)
|
| 87 |
+
output = np.promote_types(output, np.complex64)
|
| 88 |
+
output = np.zeros(shape, dtype=output)
|
| 89 |
+
elif isinstance(output, str):
|
| 90 |
+
output = np.dtype(output)
|
| 91 |
+
if complex_output and output.kind != 'c':
|
| 92 |
+
raise RuntimeError("output must have complex dtype")
|
| 93 |
+
elif not issubclass(output.type, np.number):
|
| 94 |
+
raise RuntimeError("output must have numeric dtype")
|
| 95 |
+
output = np.zeros(shape, dtype=output)
|
| 96 |
+
elif output.shape != shape:
|
| 97 |
+
raise RuntimeError("output shape not correct")
|
| 98 |
+
elif complex_output and output.dtype.kind != 'c':
|
| 99 |
+
raise RuntimeError("output must have complex dtype")
|
| 100 |
+
return output
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def _check_axes(axes, ndim):
|
| 104 |
+
if axes is None:
|
| 105 |
+
return tuple(range(ndim))
|
| 106 |
+
elif np.isscalar(axes):
|
| 107 |
+
axes = (operator.index(axes),)
|
| 108 |
+
elif isinstance(axes, Iterable):
|
| 109 |
+
for ax in axes:
|
| 110 |
+
axes = tuple(operator.index(ax) for ax in axes)
|
| 111 |
+
if ax < -ndim or ax > ndim - 1:
|
| 112 |
+
raise ValueError(f"specified axis: {ax} is out of range")
|
| 113 |
+
axes = tuple(ax % ndim if ax < 0 else ax for ax in axes)
|
| 114 |
+
else:
|
| 115 |
+
message = "axes must be an integer, iterable of integers, or None"
|
| 116 |
+
raise ValueError(message)
|
| 117 |
+
if len(tuple(set(axes))) != len(axes):
|
| 118 |
+
raise ValueError("axes must be unique")
|
| 119 |
+
return axes
|
parrot/lib/python3.10/site-packages/scipy/ndimage/filters.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
| 2 |
+
# Use the `scipy.ndimage` namespace for importing the functions
|
| 3 |
+
# included below.
|
| 4 |
+
|
| 5 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
__all__ = [ # noqa: F822
|
| 9 |
+
'correlate1d', 'convolve1d', 'gaussian_filter1d',
|
| 10 |
+
'gaussian_filter', 'prewitt', 'sobel', 'generic_laplace',
|
| 11 |
+
'laplace', 'gaussian_laplace', 'generic_gradient_magnitude',
|
| 12 |
+
'gaussian_gradient_magnitude', 'correlate', 'convolve',
|
| 13 |
+
'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
|
| 14 |
+
'maximum_filter1d', 'minimum_filter', 'maximum_filter',
|
| 15 |
+
'rank_filter', 'median_filter', 'percentile_filter',
|
| 16 |
+
'generic_filter1d', 'generic_filter'
|
| 17 |
+
]
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def __dir__():
|
| 21 |
+
return __all__
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def __getattr__(name):
|
| 25 |
+
return _sub_module_deprecation(sub_package='ndimage', module='filters',
|
| 26 |
+
private_modules=['_filters'], all=__all__,
|
| 27 |
+
attribute=name)
|
parrot/lib/python3.10/site-packages/scipy/ndimage/measurements.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
| 2 |
+
# Use the `scipy.ndimage` namespace for importing the functions
|
| 3 |
+
# included below.
|
| 4 |
+
|
| 5 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
__all__ = [ # noqa: F822
|
| 9 |
+
'label', 'find_objects', 'labeled_comprehension',
|
| 10 |
+
'sum', 'mean', 'variance', 'standard_deviation',
|
| 11 |
+
'minimum', 'maximum', 'median', 'minimum_position',
|
| 12 |
+
'maximum_position', 'extrema', 'center_of_mass',
|
| 13 |
+
'histogram', 'watershed_ift', 'sum_labels'
|
| 14 |
+
]
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def __dir__():
|
| 18 |
+
return __all__
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def __getattr__(name):
|
| 22 |
+
return _sub_module_deprecation(sub_package='ndimage', module='measurements',
|
| 23 |
+
private_modules=['_measurements'], all=__all__,
|
| 24 |
+
attribute=name)
|
parrot/lib/python3.10/site-packages/scipy/ndimage/morphology.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
| 2 |
+
# Use the `scipy.ndimage` namespace for importing the functions
|
| 3 |
+
# included below.
|
| 4 |
+
|
| 5 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
__all__ = [ # noqa: F822
|
| 9 |
+
'iterate_structure', 'generate_binary_structure',
|
| 10 |
+
'binary_erosion', 'binary_dilation', 'binary_opening',
|
| 11 |
+
'binary_closing', 'binary_hit_or_miss', 'binary_propagation',
|
| 12 |
+
'binary_fill_holes', 'grey_erosion', 'grey_dilation',
|
| 13 |
+
'grey_opening', 'grey_closing', 'morphological_gradient',
|
| 14 |
+
'morphological_laplace', 'white_tophat', 'black_tophat',
|
| 15 |
+
'distance_transform_bf', 'distance_transform_cdt',
|
| 16 |
+
'distance_transform_edt'
|
| 17 |
+
]
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def __dir__():
|
| 21 |
+
return __all__
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def __getattr__(name):
|
| 25 |
+
return _sub_module_deprecation(sub_package='ndimage', module='morphology',
|
| 26 |
+
private_modules=['_morphology'], all=__all__,
|
| 27 |
+
attribute=name)
|
parrot/lib/python3.10/site-packages/scipy/ndimage/tests/test_interpolation.py
ADDED
|
@@ -0,0 +1,1327 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from numpy.testing import (assert_, assert_equal, assert_array_equal,
|
| 5 |
+
assert_array_almost_equal, assert_allclose,
|
| 6 |
+
suppress_warnings)
|
| 7 |
+
import pytest
|
| 8 |
+
from pytest import raises as assert_raises
|
| 9 |
+
import scipy.ndimage as ndimage
|
| 10 |
+
|
| 11 |
+
from . import types
|
| 12 |
+
|
| 13 |
+
eps = 1e-12
|
| 14 |
+
|
| 15 |
+
ndimage_to_numpy_mode = {
|
| 16 |
+
'mirror': 'reflect',
|
| 17 |
+
'reflect': 'symmetric',
|
| 18 |
+
'grid-mirror': 'symmetric',
|
| 19 |
+
'grid-wrap': 'wrap',
|
| 20 |
+
'nearest': 'edge',
|
| 21 |
+
'grid-constant': 'constant',
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class TestNdimageInterpolation:
|
| 26 |
+
|
| 27 |
+
@pytest.mark.parametrize(
|
| 28 |
+
'mode, expected_value',
|
| 29 |
+
[('nearest', [1.5, 2.5, 3.5, 4, 4, 4, 4]),
|
| 30 |
+
('wrap', [1.5, 2.5, 3.5, 1.5, 2.5, 3.5, 1.5]),
|
| 31 |
+
('grid-wrap', [1.5, 2.5, 3.5, 2.5, 1.5, 2.5, 3.5]),
|
| 32 |
+
('mirror', [1.5, 2.5, 3.5, 3.5, 2.5, 1.5, 1.5]),
|
| 33 |
+
('reflect', [1.5, 2.5, 3.5, 4, 3.5, 2.5, 1.5]),
|
| 34 |
+
('constant', [1.5, 2.5, 3.5, -1, -1, -1, -1]),
|
| 35 |
+
('grid-constant', [1.5, 2.5, 3.5, 1.5, -1, -1, -1])]
|
| 36 |
+
)
|
| 37 |
+
def test_boundaries(self, mode, expected_value):
|
| 38 |
+
def shift(x):
|
| 39 |
+
return (x[0] + 0.5,)
|
| 40 |
+
|
| 41 |
+
data = np.array([1, 2, 3, 4.])
|
| 42 |
+
assert_array_equal(
|
| 43 |
+
expected_value,
|
| 44 |
+
ndimage.geometric_transform(data, shift, cval=-1, mode=mode,
|
| 45 |
+
output_shape=(7,), order=1))
|
| 46 |
+
|
| 47 |
+
@pytest.mark.parametrize(
|
| 48 |
+
'mode, expected_value',
|
| 49 |
+
[('nearest', [1, 1, 2, 3]),
|
| 50 |
+
('wrap', [3, 1, 2, 3]),
|
| 51 |
+
('grid-wrap', [4, 1, 2, 3]),
|
| 52 |
+
('mirror', [2, 1, 2, 3]),
|
| 53 |
+
('reflect', [1, 1, 2, 3]),
|
| 54 |
+
('constant', [-1, 1, 2, 3]),
|
| 55 |
+
('grid-constant', [-1, 1, 2, 3])]
|
| 56 |
+
)
|
| 57 |
+
def test_boundaries2(self, mode, expected_value):
|
| 58 |
+
def shift(x):
|
| 59 |
+
return (x[0] - 0.9,)
|
| 60 |
+
|
| 61 |
+
data = np.array([1, 2, 3, 4])
|
| 62 |
+
assert_array_equal(
|
| 63 |
+
expected_value,
|
| 64 |
+
ndimage.geometric_transform(data, shift, cval=-1, mode=mode,
|
| 65 |
+
output_shape=(4,)))
|
| 66 |
+
|
| 67 |
+
@pytest.mark.parametrize('mode', ['mirror', 'reflect', 'grid-mirror',
|
| 68 |
+
'grid-wrap', 'grid-constant',
|
| 69 |
+
'nearest'])
|
| 70 |
+
@pytest.mark.parametrize('order', range(6))
|
| 71 |
+
def test_boundary_spline_accuracy(self, mode, order):
|
| 72 |
+
"""Tests based on examples from gh-2640"""
|
| 73 |
+
data = np.arange(-6, 7, dtype=float)
|
| 74 |
+
x = np.linspace(-8, 15, num=1000)
|
| 75 |
+
y = ndimage.map_coordinates(data, [x], order=order, mode=mode)
|
| 76 |
+
|
| 77 |
+
# compute expected value using explicit padding via np.pad
|
| 78 |
+
npad = 32
|
| 79 |
+
pad_mode = ndimage_to_numpy_mode.get(mode)
|
| 80 |
+
padded = np.pad(data, npad, mode=pad_mode)
|
| 81 |
+
expected = ndimage.map_coordinates(padded, [npad + x], order=order,
|
| 82 |
+
mode=mode)
|
| 83 |
+
|
| 84 |
+
atol = 1e-5 if mode == 'grid-constant' else 1e-12
|
| 85 |
+
assert_allclose(y, expected, rtol=1e-7, atol=atol)
|
| 86 |
+
|
| 87 |
+
@pytest.mark.parametrize('order', range(2, 6))
|
| 88 |
+
@pytest.mark.parametrize('dtype', types)
|
| 89 |
+
def test_spline01(self, dtype, order):
|
| 90 |
+
data = np.ones([], dtype)
|
| 91 |
+
out = ndimage.spline_filter(data, order=order)
|
| 92 |
+
assert_array_almost_equal(out, 1)
|
| 93 |
+
|
| 94 |
+
@pytest.mark.parametrize('order', range(2, 6))
|
| 95 |
+
@pytest.mark.parametrize('dtype', types)
|
| 96 |
+
def test_spline02(self, dtype, order):
|
| 97 |
+
data = np.array([1], dtype)
|
| 98 |
+
out = ndimage.spline_filter(data, order=order)
|
| 99 |
+
assert_array_almost_equal(out, [1])
|
| 100 |
+
|
| 101 |
+
@pytest.mark.parametrize('order', range(2, 6))
|
| 102 |
+
@pytest.mark.parametrize('dtype', types)
|
| 103 |
+
def test_spline03(self, dtype, order):
|
| 104 |
+
data = np.ones([], dtype)
|
| 105 |
+
out = ndimage.spline_filter(data, order, output=dtype)
|
| 106 |
+
assert_array_almost_equal(out, 1)
|
| 107 |
+
|
| 108 |
+
@pytest.mark.parametrize('order', range(2, 6))
|
| 109 |
+
@pytest.mark.parametrize('dtype', types)
|
| 110 |
+
def test_spline04(self, dtype, order):
|
| 111 |
+
data = np.ones([4], dtype)
|
| 112 |
+
out = ndimage.spline_filter(data, order)
|
| 113 |
+
assert_array_almost_equal(out, [1, 1, 1, 1])
|
| 114 |
+
|
| 115 |
+
@pytest.mark.parametrize('order', range(2, 6))
|
| 116 |
+
@pytest.mark.parametrize('dtype', types)
|
| 117 |
+
def test_spline05(self, dtype, order):
|
| 118 |
+
data = np.ones([4, 4], dtype)
|
| 119 |
+
out = ndimage.spline_filter(data, order=order)
|
| 120 |
+
assert_array_almost_equal(out, [[1, 1, 1, 1],
|
| 121 |
+
[1, 1, 1, 1],
|
| 122 |
+
[1, 1, 1, 1],
|
| 123 |
+
[1, 1, 1, 1]])
|
| 124 |
+
|
| 125 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 126 |
+
def test_geometric_transform01(self, order):
|
| 127 |
+
data = np.array([1])
|
| 128 |
+
|
| 129 |
+
def mapping(x):
|
| 130 |
+
return x
|
| 131 |
+
|
| 132 |
+
out = ndimage.geometric_transform(data, mapping, data.shape,
|
| 133 |
+
order=order)
|
| 134 |
+
assert_array_almost_equal(out, [1])
|
| 135 |
+
|
| 136 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 137 |
+
def test_geometric_transform02(self, order):
|
| 138 |
+
data = np.ones([4])
|
| 139 |
+
|
| 140 |
+
def mapping(x):
|
| 141 |
+
return x
|
| 142 |
+
|
| 143 |
+
out = ndimage.geometric_transform(data, mapping, data.shape,
|
| 144 |
+
order=order)
|
| 145 |
+
assert_array_almost_equal(out, [1, 1, 1, 1])
|
| 146 |
+
|
| 147 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 148 |
+
def test_geometric_transform03(self, order):
|
| 149 |
+
data = np.ones([4])
|
| 150 |
+
|
| 151 |
+
def mapping(x):
|
| 152 |
+
return (x[0] - 1,)
|
| 153 |
+
|
| 154 |
+
out = ndimage.geometric_transform(data, mapping, data.shape,
|
| 155 |
+
order=order)
|
| 156 |
+
assert_array_almost_equal(out, [0, 1, 1, 1])
|
| 157 |
+
|
| 158 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 159 |
+
def test_geometric_transform04(self, order):
|
| 160 |
+
data = np.array([4, 1, 3, 2])
|
| 161 |
+
|
| 162 |
+
def mapping(x):
|
| 163 |
+
return (x[0] - 1,)
|
| 164 |
+
|
| 165 |
+
out = ndimage.geometric_transform(data, mapping, data.shape,
|
| 166 |
+
order=order)
|
| 167 |
+
assert_array_almost_equal(out, [0, 4, 1, 3])
|
| 168 |
+
|
| 169 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 170 |
+
@pytest.mark.parametrize('dtype', [np.float64, np.complex128])
|
| 171 |
+
def test_geometric_transform05(self, order, dtype):
|
| 172 |
+
data = np.array([[1, 1, 1, 1],
|
| 173 |
+
[1, 1, 1, 1],
|
| 174 |
+
[1, 1, 1, 1]], dtype=dtype)
|
| 175 |
+
expected = np.array([[0, 1, 1, 1],
|
| 176 |
+
[0, 1, 1, 1],
|
| 177 |
+
[0, 1, 1, 1]], dtype=dtype)
|
| 178 |
+
if data.dtype.kind == 'c':
|
| 179 |
+
data -= 1j * data
|
| 180 |
+
expected -= 1j * expected
|
| 181 |
+
|
| 182 |
+
def mapping(x):
|
| 183 |
+
return (x[0], x[1] - 1)
|
| 184 |
+
|
| 185 |
+
out = ndimage.geometric_transform(data, mapping, data.shape,
|
| 186 |
+
order=order)
|
| 187 |
+
assert_array_almost_equal(out, expected)
|
| 188 |
+
|
| 189 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 190 |
+
def test_geometric_transform06(self, order):
|
| 191 |
+
data = np.array([[4, 1, 3, 2],
|
| 192 |
+
[7, 6, 8, 5],
|
| 193 |
+
[3, 5, 3, 6]])
|
| 194 |
+
|
| 195 |
+
def mapping(x):
|
| 196 |
+
return (x[0], x[1] - 1)
|
| 197 |
+
|
| 198 |
+
out = ndimage.geometric_transform(data, mapping, data.shape,
|
| 199 |
+
order=order)
|
| 200 |
+
assert_array_almost_equal(out, [[0, 4, 1, 3],
|
| 201 |
+
[0, 7, 6, 8],
|
| 202 |
+
[0, 3, 5, 3]])
|
| 203 |
+
|
| 204 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 205 |
+
def test_geometric_transform07(self, order):
|
| 206 |
+
data = np.array([[4, 1, 3, 2],
|
| 207 |
+
[7, 6, 8, 5],
|
| 208 |
+
[3, 5, 3, 6]])
|
| 209 |
+
|
| 210 |
+
def mapping(x):
|
| 211 |
+
return (x[0] - 1, x[1])
|
| 212 |
+
|
| 213 |
+
out = ndimage.geometric_transform(data, mapping, data.shape,
|
| 214 |
+
order=order)
|
| 215 |
+
assert_array_almost_equal(out, [[0, 0, 0, 0],
|
| 216 |
+
[4, 1, 3, 2],
|
| 217 |
+
[7, 6, 8, 5]])
|
| 218 |
+
|
| 219 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 220 |
+
def test_geometric_transform08(self, order):
|
| 221 |
+
data = np.array([[4, 1, 3, 2],
|
| 222 |
+
[7, 6, 8, 5],
|
| 223 |
+
[3, 5, 3, 6]])
|
| 224 |
+
|
| 225 |
+
def mapping(x):
|
| 226 |
+
return (x[0] - 1, x[1] - 1)
|
| 227 |
+
|
| 228 |
+
out = ndimage.geometric_transform(data, mapping, data.shape,
|
| 229 |
+
order=order)
|
| 230 |
+
assert_array_almost_equal(out, [[0, 0, 0, 0],
|
| 231 |
+
[0, 4, 1, 3],
|
| 232 |
+
[0, 7, 6, 8]])
|
| 233 |
+
|
| 234 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 235 |
+
def test_geometric_transform10(self, order):
|
| 236 |
+
data = np.array([[4, 1, 3, 2],
|
| 237 |
+
[7, 6, 8, 5],
|
| 238 |
+
[3, 5, 3, 6]])
|
| 239 |
+
|
| 240 |
+
def mapping(x):
|
| 241 |
+
return (x[0] - 1, x[1] - 1)
|
| 242 |
+
|
| 243 |
+
if (order > 1):
|
| 244 |
+
filtered = ndimage.spline_filter(data, order=order)
|
| 245 |
+
else:
|
| 246 |
+
filtered = data
|
| 247 |
+
out = ndimage.geometric_transform(filtered, mapping, data.shape,
|
| 248 |
+
order=order, prefilter=False)
|
| 249 |
+
assert_array_almost_equal(out, [[0, 0, 0, 0],
|
| 250 |
+
[0, 4, 1, 3],
|
| 251 |
+
[0, 7, 6, 8]])
|
| 252 |
+
|
| 253 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 254 |
+
def test_geometric_transform13(self, order):
|
| 255 |
+
data = np.ones([2], np.float64)
|
| 256 |
+
|
| 257 |
+
def mapping(x):
|
| 258 |
+
return (x[0] // 2,)
|
| 259 |
+
|
| 260 |
+
out = ndimage.geometric_transform(data, mapping, [4], order=order)
|
| 261 |
+
assert_array_almost_equal(out, [1, 1, 1, 1])
|
| 262 |
+
|
| 263 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 264 |
+
def test_geometric_transform14(self, order):
|
| 265 |
+
data = [1, 5, 2, 6, 3, 7, 4, 4]
|
| 266 |
+
|
| 267 |
+
def mapping(x):
|
| 268 |
+
return (2 * x[0],)
|
| 269 |
+
|
| 270 |
+
out = ndimage.geometric_transform(data, mapping, [4], order=order)
|
| 271 |
+
assert_array_almost_equal(out, [1, 2, 3, 4])
|
| 272 |
+
|
| 273 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 274 |
+
def test_geometric_transform15(self, order):
|
| 275 |
+
data = [1, 2, 3, 4]
|
| 276 |
+
|
| 277 |
+
def mapping(x):
|
| 278 |
+
return (x[0] / 2,)
|
| 279 |
+
|
| 280 |
+
out = ndimage.geometric_transform(data, mapping, [8], order=order)
|
| 281 |
+
assert_array_almost_equal(out[::2], [1, 2, 3, 4])
|
| 282 |
+
|
| 283 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 284 |
+
def test_geometric_transform16(self, order):
|
| 285 |
+
data = [[1, 2, 3, 4],
|
| 286 |
+
[5, 6, 7, 8],
|
| 287 |
+
[9.0, 10, 11, 12]]
|
| 288 |
+
|
| 289 |
+
def mapping(x):
|
| 290 |
+
return (x[0], x[1] * 2)
|
| 291 |
+
|
| 292 |
+
out = ndimage.geometric_transform(data, mapping, (3, 2),
|
| 293 |
+
order=order)
|
| 294 |
+
assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]])
|
| 295 |
+
|
| 296 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 297 |
+
def test_geometric_transform17(self, order):
|
| 298 |
+
data = [[1, 2, 3, 4],
|
| 299 |
+
[5, 6, 7, 8],
|
| 300 |
+
[9, 10, 11, 12]]
|
| 301 |
+
|
| 302 |
+
def mapping(x):
|
| 303 |
+
return (x[0] * 2, x[1])
|
| 304 |
+
|
| 305 |
+
out = ndimage.geometric_transform(data, mapping, (1, 4),
|
| 306 |
+
order=order)
|
| 307 |
+
assert_array_almost_equal(out, [[1, 2, 3, 4]])
|
| 308 |
+
|
| 309 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 310 |
+
def test_geometric_transform18(self, order):
|
| 311 |
+
data = [[1, 2, 3, 4],
|
| 312 |
+
[5, 6, 7, 8],
|
| 313 |
+
[9, 10, 11, 12]]
|
| 314 |
+
|
| 315 |
+
def mapping(x):
|
| 316 |
+
return (x[0] * 2, x[1] * 2)
|
| 317 |
+
|
| 318 |
+
out = ndimage.geometric_transform(data, mapping, (1, 2),
|
| 319 |
+
order=order)
|
| 320 |
+
assert_array_almost_equal(out, [[1, 3]])
|
| 321 |
+
|
| 322 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 323 |
+
def test_geometric_transform19(self, order):
|
| 324 |
+
data = [[1, 2, 3, 4],
|
| 325 |
+
[5, 6, 7, 8],
|
| 326 |
+
[9, 10, 11, 12]]
|
| 327 |
+
|
| 328 |
+
def mapping(x):
|
| 329 |
+
return (x[0], x[1] / 2)
|
| 330 |
+
|
| 331 |
+
out = ndimage.geometric_transform(data, mapping, (3, 8),
|
| 332 |
+
order=order)
|
| 333 |
+
assert_array_almost_equal(out[..., ::2], data)
|
| 334 |
+
|
| 335 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 336 |
+
def test_geometric_transform20(self, order):
|
| 337 |
+
data = [[1, 2, 3, 4],
|
| 338 |
+
[5, 6, 7, 8],
|
| 339 |
+
[9, 10, 11, 12]]
|
| 340 |
+
|
| 341 |
+
def mapping(x):
|
| 342 |
+
return (x[0] / 2, x[1])
|
| 343 |
+
|
| 344 |
+
out = ndimage.geometric_transform(data, mapping, (6, 4),
|
| 345 |
+
order=order)
|
| 346 |
+
assert_array_almost_equal(out[::2, ...], data)
|
| 347 |
+
|
| 348 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 349 |
+
def test_geometric_transform21(self, order):
|
| 350 |
+
data = [[1, 2, 3, 4],
|
| 351 |
+
[5, 6, 7, 8],
|
| 352 |
+
[9, 10, 11, 12]]
|
| 353 |
+
|
| 354 |
+
def mapping(x):
|
| 355 |
+
return (x[0] / 2, x[1] / 2)
|
| 356 |
+
|
| 357 |
+
out = ndimage.geometric_transform(data, mapping, (6, 8),
|
| 358 |
+
order=order)
|
| 359 |
+
assert_array_almost_equal(out[::2, ::2], data)
|
| 360 |
+
|
| 361 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 362 |
+
def test_geometric_transform22(self, order):
|
| 363 |
+
data = np.array([[1, 2, 3, 4],
|
| 364 |
+
[5, 6, 7, 8],
|
| 365 |
+
[9, 10, 11, 12]], np.float64)
|
| 366 |
+
|
| 367 |
+
def mapping1(x):
|
| 368 |
+
return (x[0] / 2, x[1] / 2)
|
| 369 |
+
|
| 370 |
+
def mapping2(x):
|
| 371 |
+
return (x[0] * 2, x[1] * 2)
|
| 372 |
+
|
| 373 |
+
out = ndimage.geometric_transform(data, mapping1,
|
| 374 |
+
(6, 8), order=order)
|
| 375 |
+
out = ndimage.geometric_transform(out, mapping2,
|
| 376 |
+
(3, 4), order=order)
|
| 377 |
+
assert_array_almost_equal(out, data)
|
| 378 |
+
|
| 379 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 380 |
+
def test_geometric_transform23(self, order):
|
| 381 |
+
data = [[1, 2, 3, 4],
|
| 382 |
+
[5, 6, 7, 8],
|
| 383 |
+
[9, 10, 11, 12]]
|
| 384 |
+
|
| 385 |
+
def mapping(x):
|
| 386 |
+
return (1, x[0] * 2)
|
| 387 |
+
|
| 388 |
+
out = ndimage.geometric_transform(data, mapping, (2,), order=order)
|
| 389 |
+
out = out.astype(np.int32)
|
| 390 |
+
assert_array_almost_equal(out, [5, 7])
|
| 391 |
+
|
| 392 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 393 |
+
def test_geometric_transform24(self, order):
|
| 394 |
+
data = [[1, 2, 3, 4],
|
| 395 |
+
[5, 6, 7, 8],
|
| 396 |
+
[9, 10, 11, 12]]
|
| 397 |
+
|
| 398 |
+
def mapping(x, a, b):
|
| 399 |
+
return (a, x[0] * b)
|
| 400 |
+
|
| 401 |
+
out = ndimage.geometric_transform(
|
| 402 |
+
data, mapping, (2,), order=order, extra_arguments=(1,),
|
| 403 |
+
extra_keywords={'b': 2})
|
| 404 |
+
assert_array_almost_equal(out, [5, 7])
|
| 405 |
+
|
| 406 |
+
def test_geometric_transform_grid_constant_order1(self):
|
| 407 |
+
# verify interpolation outside the original bounds
|
| 408 |
+
x = np.array([[1, 2, 3],
|
| 409 |
+
[4, 5, 6]], dtype=float)
|
| 410 |
+
|
| 411 |
+
def mapping(x):
|
| 412 |
+
return (x[0] - 0.5), (x[1] - 0.5)
|
| 413 |
+
|
| 414 |
+
expected_result = np.array([[0.25, 0.75, 1.25],
|
| 415 |
+
[1.25, 3.00, 4.00]])
|
| 416 |
+
assert_array_almost_equal(
|
| 417 |
+
ndimage.geometric_transform(x, mapping, mode='grid-constant',
|
| 418 |
+
order=1),
|
| 419 |
+
expected_result,
|
| 420 |
+
)
|
| 421 |
+
|
| 422 |
+
@pytest.mark.parametrize('mode', ['grid-constant', 'grid-wrap', 'nearest',
|
| 423 |
+
'mirror', 'reflect'])
|
| 424 |
+
@pytest.mark.parametrize('order', range(6))
|
| 425 |
+
def test_geometric_transform_vs_padded(self, order, mode):
|
| 426 |
+
x = np.arange(144, dtype=float).reshape(12, 12)
|
| 427 |
+
|
| 428 |
+
def mapping(x):
|
| 429 |
+
return (x[0] - 0.4), (x[1] + 2.3)
|
| 430 |
+
|
| 431 |
+
# Manually pad and then extract center after the transform to get the
|
| 432 |
+
# expected result.
|
| 433 |
+
npad = 24
|
| 434 |
+
pad_mode = ndimage_to_numpy_mode.get(mode)
|
| 435 |
+
xp = np.pad(x, npad, mode=pad_mode)
|
| 436 |
+
center_slice = tuple([slice(npad, -npad)] * x.ndim)
|
| 437 |
+
expected_result = ndimage.geometric_transform(
|
| 438 |
+
xp, mapping, mode=mode, order=order)[center_slice]
|
| 439 |
+
|
| 440 |
+
assert_allclose(
|
| 441 |
+
ndimage.geometric_transform(x, mapping, mode=mode,
|
| 442 |
+
order=order),
|
| 443 |
+
expected_result,
|
| 444 |
+
rtol=1e-7,
|
| 445 |
+
)
|
| 446 |
+
|
| 447 |
+
def test_geometric_transform_endianness_with_output_parameter(self):
|
| 448 |
+
# geometric transform given output ndarray or dtype with
|
| 449 |
+
# non-native endianness. see issue #4127
|
| 450 |
+
data = np.array([1])
|
| 451 |
+
|
| 452 |
+
def mapping(x):
|
| 453 |
+
return x
|
| 454 |
+
|
| 455 |
+
for out in [data.dtype, data.dtype.newbyteorder(),
|
| 456 |
+
np.empty_like(data),
|
| 457 |
+
np.empty_like(data).astype(data.dtype.newbyteorder())]:
|
| 458 |
+
returned = ndimage.geometric_transform(data, mapping, data.shape,
|
| 459 |
+
output=out)
|
| 460 |
+
result = out if returned is None else returned
|
| 461 |
+
assert_array_almost_equal(result, [1])
|
| 462 |
+
|
| 463 |
+
def test_geometric_transform_with_string_output(self):
|
| 464 |
+
data = np.array([1])
|
| 465 |
+
|
| 466 |
+
def mapping(x):
|
| 467 |
+
return x
|
| 468 |
+
|
| 469 |
+
out = ndimage.geometric_transform(data, mapping, output='f')
|
| 470 |
+
assert_(out.dtype is np.dtype('f'))
|
| 471 |
+
assert_array_almost_equal(out, [1])
|
| 472 |
+
|
| 473 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 474 |
+
@pytest.mark.parametrize('dtype', [np.float64, np.complex128])
|
| 475 |
+
def test_map_coordinates01(self, order, dtype):
|
| 476 |
+
data = np.array([[4, 1, 3, 2],
|
| 477 |
+
[7, 6, 8, 5],
|
| 478 |
+
[3, 5, 3, 6]])
|
| 479 |
+
expected = np.array([[0, 0, 0, 0],
|
| 480 |
+
[0, 4, 1, 3],
|
| 481 |
+
[0, 7, 6, 8]])
|
| 482 |
+
if data.dtype.kind == 'c':
|
| 483 |
+
data = data - 1j * data
|
| 484 |
+
expected = expected - 1j * expected
|
| 485 |
+
|
| 486 |
+
idx = np.indices(data.shape)
|
| 487 |
+
idx -= 1
|
| 488 |
+
|
| 489 |
+
out = ndimage.map_coordinates(data, idx, order=order)
|
| 490 |
+
assert_array_almost_equal(out, expected)
|
| 491 |
+
|
| 492 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 493 |
+
def test_map_coordinates02(self, order):
|
| 494 |
+
data = np.array([[4, 1, 3, 2],
|
| 495 |
+
[7, 6, 8, 5],
|
| 496 |
+
[3, 5, 3, 6]])
|
| 497 |
+
idx = np.indices(data.shape, np.float64)
|
| 498 |
+
idx -= 0.5
|
| 499 |
+
|
| 500 |
+
out1 = ndimage.shift(data, 0.5, order=order)
|
| 501 |
+
out2 = ndimage.map_coordinates(data, idx, order=order)
|
| 502 |
+
assert_array_almost_equal(out1, out2)
|
| 503 |
+
|
| 504 |
+
def test_map_coordinates03(self):
|
| 505 |
+
data = np.array([[4, 1, 3, 2],
|
| 506 |
+
[7, 6, 8, 5],
|
| 507 |
+
[3, 5, 3, 6]], order='F')
|
| 508 |
+
idx = np.indices(data.shape) - 1
|
| 509 |
+
out = ndimage.map_coordinates(data, idx)
|
| 510 |
+
assert_array_almost_equal(out, [[0, 0, 0, 0],
|
| 511 |
+
[0, 4, 1, 3],
|
| 512 |
+
[0, 7, 6, 8]])
|
| 513 |
+
assert_array_almost_equal(out, ndimage.shift(data, (1, 1)))
|
| 514 |
+
idx = np.indices(data[::2].shape) - 1
|
| 515 |
+
out = ndimage.map_coordinates(data[::2], idx)
|
| 516 |
+
assert_array_almost_equal(out, [[0, 0, 0, 0],
|
| 517 |
+
[0, 4, 1, 3]])
|
| 518 |
+
assert_array_almost_equal(out, ndimage.shift(data[::2], (1, 1)))
|
| 519 |
+
idx = np.indices(data[:, ::2].shape) - 1
|
| 520 |
+
out = ndimage.map_coordinates(data[:, ::2], idx)
|
| 521 |
+
assert_array_almost_equal(out, [[0, 0], [0, 4], [0, 7]])
|
| 522 |
+
assert_array_almost_equal(out, ndimage.shift(data[:, ::2], (1, 1)))
|
| 523 |
+
|
| 524 |
+
def test_map_coordinates_endianness_with_output_parameter(self):
|
| 525 |
+
# output parameter given as array or dtype with either endianness
|
| 526 |
+
# see issue #4127
|
| 527 |
+
data = np.array([[1, 2], [7, 6]])
|
| 528 |
+
expected = np.array([[0, 0], [0, 1]])
|
| 529 |
+
idx = np.indices(data.shape)
|
| 530 |
+
idx -= 1
|
| 531 |
+
for out in [
|
| 532 |
+
data.dtype,
|
| 533 |
+
data.dtype.newbyteorder(),
|
| 534 |
+
np.empty_like(expected),
|
| 535 |
+
np.empty_like(expected).astype(expected.dtype.newbyteorder())
|
| 536 |
+
]:
|
| 537 |
+
returned = ndimage.map_coordinates(data, idx, output=out)
|
| 538 |
+
result = out if returned is None else returned
|
| 539 |
+
assert_array_almost_equal(result, expected)
|
| 540 |
+
|
| 541 |
+
def test_map_coordinates_with_string_output(self):
|
| 542 |
+
data = np.array([[1]])
|
| 543 |
+
idx = np.indices(data.shape)
|
| 544 |
+
out = ndimage.map_coordinates(data, idx, output='f')
|
| 545 |
+
assert_(out.dtype is np.dtype('f'))
|
| 546 |
+
assert_array_almost_equal(out, [[1]])
|
| 547 |
+
|
| 548 |
+
@pytest.mark.skipif('win32' in sys.platform or np.intp(0).itemsize < 8,
|
| 549 |
+
reason='do not run on 32 bit or windows '
|
| 550 |
+
'(no sparse memory)')
|
| 551 |
+
def test_map_coordinates_large_data(self):
|
| 552 |
+
# check crash on large data
|
| 553 |
+
try:
|
| 554 |
+
n = 30000
|
| 555 |
+
a = np.empty(n**2, dtype=np.float32).reshape(n, n)
|
| 556 |
+
# fill the part we might read
|
| 557 |
+
a[n - 3:, n - 3:] = 0
|
| 558 |
+
ndimage.map_coordinates(a, [[n - 1.5], [n - 1.5]], order=1)
|
| 559 |
+
except MemoryError as e:
|
| 560 |
+
raise pytest.skip('Not enough memory available') from e
|
| 561 |
+
|
| 562 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 563 |
+
def test_affine_transform01(self, order):
|
| 564 |
+
data = np.array([1])
|
| 565 |
+
out = ndimage.affine_transform(data, [[1]], order=order)
|
| 566 |
+
assert_array_almost_equal(out, [1])
|
| 567 |
+
|
| 568 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 569 |
+
def test_affine_transform02(self, order):
|
| 570 |
+
data = np.ones([4])
|
| 571 |
+
out = ndimage.affine_transform(data, [[1]], order=order)
|
| 572 |
+
assert_array_almost_equal(out, [1, 1, 1, 1])
|
| 573 |
+
|
| 574 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 575 |
+
def test_affine_transform03(self, order):
|
| 576 |
+
data = np.ones([4])
|
| 577 |
+
out = ndimage.affine_transform(data, [[1]], -1, order=order)
|
| 578 |
+
assert_array_almost_equal(out, [0, 1, 1, 1])
|
| 579 |
+
|
| 580 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 581 |
+
def test_affine_transform04(self, order):
|
| 582 |
+
data = np.array([4, 1, 3, 2])
|
| 583 |
+
out = ndimage.affine_transform(data, [[1]], -1, order=order)
|
| 584 |
+
assert_array_almost_equal(out, [0, 4, 1, 3])
|
| 585 |
+
|
| 586 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 587 |
+
@pytest.mark.parametrize('dtype', [np.float64, np.complex128])
|
| 588 |
+
def test_affine_transform05(self, order, dtype):
|
| 589 |
+
data = np.array([[1, 1, 1, 1],
|
| 590 |
+
[1, 1, 1, 1],
|
| 591 |
+
[1, 1, 1, 1]], dtype=dtype)
|
| 592 |
+
expected = np.array([[0, 1, 1, 1],
|
| 593 |
+
[0, 1, 1, 1],
|
| 594 |
+
[0, 1, 1, 1]], dtype=dtype)
|
| 595 |
+
if data.dtype.kind == 'c':
|
| 596 |
+
data -= 1j * data
|
| 597 |
+
expected -= 1j * expected
|
| 598 |
+
out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
|
| 599 |
+
[0, -1], order=order)
|
| 600 |
+
assert_array_almost_equal(out, expected)
|
| 601 |
+
|
| 602 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 603 |
+
def test_affine_transform06(self, order):
|
| 604 |
+
data = np.array([[4, 1, 3, 2],
|
| 605 |
+
[7, 6, 8, 5],
|
| 606 |
+
[3, 5, 3, 6]])
|
| 607 |
+
out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
|
| 608 |
+
[0, -1], order=order)
|
| 609 |
+
assert_array_almost_equal(out, [[0, 4, 1, 3],
|
| 610 |
+
[0, 7, 6, 8],
|
| 611 |
+
[0, 3, 5, 3]])
|
| 612 |
+
|
| 613 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 614 |
+
def test_affine_transform07(self, order):
|
| 615 |
+
data = np.array([[4, 1, 3, 2],
|
| 616 |
+
[7, 6, 8, 5],
|
| 617 |
+
[3, 5, 3, 6]])
|
| 618 |
+
out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
|
| 619 |
+
[-1, 0], order=order)
|
| 620 |
+
assert_array_almost_equal(out, [[0, 0, 0, 0],
|
| 621 |
+
[4, 1, 3, 2],
|
| 622 |
+
[7, 6, 8, 5]])
|
| 623 |
+
|
| 624 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 625 |
+
def test_affine_transform08(self, order):
|
| 626 |
+
data = np.array([[4, 1, 3, 2],
|
| 627 |
+
[7, 6, 8, 5],
|
| 628 |
+
[3, 5, 3, 6]])
|
| 629 |
+
out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
|
| 630 |
+
[-1, -1], order=order)
|
| 631 |
+
assert_array_almost_equal(out, [[0, 0, 0, 0],
|
| 632 |
+
[0, 4, 1, 3],
|
| 633 |
+
[0, 7, 6, 8]])
|
| 634 |
+
|
| 635 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 636 |
+
def test_affine_transform09(self, order):
|
| 637 |
+
data = np.array([[4, 1, 3, 2],
|
| 638 |
+
[7, 6, 8, 5],
|
| 639 |
+
[3, 5, 3, 6]])
|
| 640 |
+
if (order > 1):
|
| 641 |
+
filtered = ndimage.spline_filter(data, order=order)
|
| 642 |
+
else:
|
| 643 |
+
filtered = data
|
| 644 |
+
out = ndimage.affine_transform(filtered, [[1, 0], [0, 1]],
|
| 645 |
+
[-1, -1], order=order,
|
| 646 |
+
prefilter=False)
|
| 647 |
+
assert_array_almost_equal(out, [[0, 0, 0, 0],
|
| 648 |
+
[0, 4, 1, 3],
|
| 649 |
+
[0, 7, 6, 8]])
|
| 650 |
+
|
| 651 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 652 |
+
def test_affine_transform10(self, order):
|
| 653 |
+
data = np.ones([2], np.float64)
|
| 654 |
+
out = ndimage.affine_transform(data, [[0.5]], output_shape=(4,),
|
| 655 |
+
order=order)
|
| 656 |
+
assert_array_almost_equal(out, [1, 1, 1, 0])
|
| 657 |
+
|
| 658 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 659 |
+
def test_affine_transform11(self, order):
|
| 660 |
+
data = [1, 5, 2, 6, 3, 7, 4, 4]
|
| 661 |
+
out = ndimage.affine_transform(data, [[2]], 0, (4,), order=order)
|
| 662 |
+
assert_array_almost_equal(out, [1, 2, 3, 4])
|
| 663 |
+
|
| 664 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 665 |
+
def test_affine_transform12(self, order):
|
| 666 |
+
data = [1, 2, 3, 4]
|
| 667 |
+
out = ndimage.affine_transform(data, [[0.5]], 0, (8,), order=order)
|
| 668 |
+
assert_array_almost_equal(out[::2], [1, 2, 3, 4])
|
| 669 |
+
|
| 670 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 671 |
+
def test_affine_transform13(self, order):
|
| 672 |
+
data = [[1, 2, 3, 4],
|
| 673 |
+
[5, 6, 7, 8],
|
| 674 |
+
[9.0, 10, 11, 12]]
|
| 675 |
+
out = ndimage.affine_transform(data, [[1, 0], [0, 2]], 0, (3, 2),
|
| 676 |
+
order=order)
|
| 677 |
+
assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]])
|
| 678 |
+
|
| 679 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 680 |
+
def test_affine_transform14(self, order):
|
| 681 |
+
data = [[1, 2, 3, 4],
|
| 682 |
+
[5, 6, 7, 8],
|
| 683 |
+
[9, 10, 11, 12]]
|
| 684 |
+
out = ndimage.affine_transform(data, [[2, 0], [0, 1]], 0, (1, 4),
|
| 685 |
+
order=order)
|
| 686 |
+
assert_array_almost_equal(out, [[1, 2, 3, 4]])
|
| 687 |
+
|
| 688 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 689 |
+
def test_affine_transform15(self, order):
|
| 690 |
+
data = [[1, 2, 3, 4],
|
| 691 |
+
[5, 6, 7, 8],
|
| 692 |
+
[9, 10, 11, 12]]
|
| 693 |
+
out = ndimage.affine_transform(data, [[2, 0], [0, 2]], 0, (1, 2),
|
| 694 |
+
order=order)
|
| 695 |
+
assert_array_almost_equal(out, [[1, 3]])
|
| 696 |
+
|
| 697 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 698 |
+
def test_affine_transform16(self, order):
|
| 699 |
+
data = [[1, 2, 3, 4],
|
| 700 |
+
[5, 6, 7, 8],
|
| 701 |
+
[9, 10, 11, 12]]
|
| 702 |
+
out = ndimage.affine_transform(data, [[1, 0.0], [0, 0.5]], 0,
|
| 703 |
+
(3, 8), order=order)
|
| 704 |
+
assert_array_almost_equal(out[..., ::2], data)
|
| 705 |
+
|
| 706 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 707 |
+
def test_affine_transform17(self, order):
|
| 708 |
+
data = [[1, 2, 3, 4],
|
| 709 |
+
[5, 6, 7, 8],
|
| 710 |
+
[9, 10, 11, 12]]
|
| 711 |
+
out = ndimage.affine_transform(data, [[0.5, 0], [0, 1]], 0,
|
| 712 |
+
(6, 4), order=order)
|
| 713 |
+
assert_array_almost_equal(out[::2, ...], data)
|
| 714 |
+
|
| 715 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 716 |
+
def test_affine_transform18(self, order):
|
| 717 |
+
data = [[1, 2, 3, 4],
|
| 718 |
+
[5, 6, 7, 8],
|
| 719 |
+
[9, 10, 11, 12]]
|
| 720 |
+
out = ndimage.affine_transform(data, [[0.5, 0], [0, 0.5]], 0,
|
| 721 |
+
(6, 8), order=order)
|
| 722 |
+
assert_array_almost_equal(out[::2, ::2], data)
|
| 723 |
+
|
| 724 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 725 |
+
def test_affine_transform19(self, order):
|
| 726 |
+
data = np.array([[1, 2, 3, 4],
|
| 727 |
+
[5, 6, 7, 8],
|
| 728 |
+
[9, 10, 11, 12]], np.float64)
|
| 729 |
+
out = ndimage.affine_transform(data, [[0.5, 0], [0, 0.5]], 0,
|
| 730 |
+
(6, 8), order=order)
|
| 731 |
+
out = ndimage.affine_transform(out, [[2.0, 0], [0, 2.0]], 0,
|
| 732 |
+
(3, 4), order=order)
|
| 733 |
+
assert_array_almost_equal(out, data)
|
| 734 |
+
|
| 735 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 736 |
+
def test_affine_transform20(self, order):
|
| 737 |
+
data = [[1, 2, 3, 4],
|
| 738 |
+
[5, 6, 7, 8],
|
| 739 |
+
[9, 10, 11, 12]]
|
| 740 |
+
out = ndimage.affine_transform(data, [[0], [2]], 0, (2,),
|
| 741 |
+
order=order)
|
| 742 |
+
assert_array_almost_equal(out, [1, 3])
|
| 743 |
+
|
| 744 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 745 |
+
def test_affine_transform21(self, order):
|
| 746 |
+
data = [[1, 2, 3, 4],
|
| 747 |
+
[5, 6, 7, 8],
|
| 748 |
+
[9, 10, 11, 12]]
|
| 749 |
+
out = ndimage.affine_transform(data, [[2], [0]], 0, (2,),
|
| 750 |
+
order=order)
|
| 751 |
+
assert_array_almost_equal(out, [1, 9])
|
| 752 |
+
|
| 753 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 754 |
+
def test_affine_transform22(self, order):
|
| 755 |
+
# shift and offset interaction; see issue #1547
|
| 756 |
+
data = np.array([4, 1, 3, 2])
|
| 757 |
+
out = ndimage.affine_transform(data, [[2]], [-1], (3,),
|
| 758 |
+
order=order)
|
| 759 |
+
assert_array_almost_equal(out, [0, 1, 2])
|
| 760 |
+
|
| 761 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 762 |
+
def test_affine_transform23(self, order):
|
| 763 |
+
# shift and offset interaction; see issue #1547
|
| 764 |
+
data = np.array([4, 1, 3, 2])
|
| 765 |
+
out = ndimage.affine_transform(data, [[0.5]], [-1], (8,),
|
| 766 |
+
order=order)
|
| 767 |
+
assert_array_almost_equal(out[::2], [0, 4, 1, 3])
|
| 768 |
+
|
| 769 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 770 |
+
def test_affine_transform24(self, order):
|
| 771 |
+
# consistency between diagonal and non-diagonal case; see issue #1547
|
| 772 |
+
data = np.array([4, 1, 3, 2])
|
| 773 |
+
with suppress_warnings() as sup:
|
| 774 |
+
sup.filter(UserWarning,
|
| 775 |
+
'The behavior of affine_transform with a 1-D array .* '
|
| 776 |
+
'has changed')
|
| 777 |
+
out1 = ndimage.affine_transform(data, [2], -1, order=order)
|
| 778 |
+
out2 = ndimage.affine_transform(data, [[2]], -1, order=order)
|
| 779 |
+
assert_array_almost_equal(out1, out2)
|
| 780 |
+
|
| 781 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 782 |
+
def test_affine_transform25(self, order):
|
| 783 |
+
# consistency between diagonal and non-diagonal case; see issue #1547
|
| 784 |
+
data = np.array([4, 1, 3, 2])
|
| 785 |
+
with suppress_warnings() as sup:
|
| 786 |
+
sup.filter(UserWarning,
|
| 787 |
+
'The behavior of affine_transform with a 1-D array .* '
|
| 788 |
+
'has changed')
|
| 789 |
+
out1 = ndimage.affine_transform(data, [0.5], -1, order=order)
|
| 790 |
+
out2 = ndimage.affine_transform(data, [[0.5]], -1, order=order)
|
| 791 |
+
assert_array_almost_equal(out1, out2)
|
| 792 |
+
|
| 793 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 794 |
+
def test_affine_transform26(self, order):
|
| 795 |
+
# test homogeneous coordinates
|
| 796 |
+
data = np.array([[4, 1, 3, 2],
|
| 797 |
+
[7, 6, 8, 5],
|
| 798 |
+
[3, 5, 3, 6]])
|
| 799 |
+
if (order > 1):
|
| 800 |
+
filtered = ndimage.spline_filter(data, order=order)
|
| 801 |
+
else:
|
| 802 |
+
filtered = data
|
| 803 |
+
tform_original = np.eye(2)
|
| 804 |
+
offset_original = -np.ones((2, 1))
|
| 805 |
+
tform_h1 = np.hstack((tform_original, offset_original))
|
| 806 |
+
tform_h2 = np.vstack((tform_h1, [[0, 0, 1]]))
|
| 807 |
+
out1 = ndimage.affine_transform(filtered, tform_original,
|
| 808 |
+
offset_original.ravel(),
|
| 809 |
+
order=order, prefilter=False)
|
| 810 |
+
out2 = ndimage.affine_transform(filtered, tform_h1, order=order,
|
| 811 |
+
prefilter=False)
|
| 812 |
+
out3 = ndimage.affine_transform(filtered, tform_h2, order=order,
|
| 813 |
+
prefilter=False)
|
| 814 |
+
for out in [out1, out2, out3]:
|
| 815 |
+
assert_array_almost_equal(out, [[0, 0, 0, 0],
|
| 816 |
+
[0, 4, 1, 3],
|
| 817 |
+
[0, 7, 6, 8]])
|
| 818 |
+
|
| 819 |
+
def test_affine_transform27(self):
|
| 820 |
+
# test valid homogeneous transformation matrix
|
| 821 |
+
data = np.array([[4, 1, 3, 2],
|
| 822 |
+
[7, 6, 8, 5],
|
| 823 |
+
[3, 5, 3, 6]])
|
| 824 |
+
tform_h1 = np.hstack((np.eye(2), -np.ones((2, 1))))
|
| 825 |
+
tform_h2 = np.vstack((tform_h1, [[5, 2, 1]]))
|
| 826 |
+
assert_raises(ValueError, ndimage.affine_transform, data, tform_h2)
|
| 827 |
+
|
| 828 |
+
def test_affine_transform_1d_endianness_with_output_parameter(self):
|
| 829 |
+
# 1d affine transform given output ndarray or dtype with
|
| 830 |
+
# either endianness. see issue #7388
|
| 831 |
+
data = np.ones((2, 2))
|
| 832 |
+
for out in [np.empty_like(data),
|
| 833 |
+
np.empty_like(data).astype(data.dtype.newbyteorder()),
|
| 834 |
+
data.dtype, data.dtype.newbyteorder()]:
|
| 835 |
+
with suppress_warnings() as sup:
|
| 836 |
+
sup.filter(UserWarning,
|
| 837 |
+
'The behavior of affine_transform with a 1-D array '
|
| 838 |
+
'.* has changed')
|
| 839 |
+
returned = ndimage.affine_transform(data, [1, 1], output=out)
|
| 840 |
+
result = out if returned is None else returned
|
| 841 |
+
assert_array_almost_equal(result, [[1, 1], [1, 1]])
|
| 842 |
+
|
| 843 |
+
def test_affine_transform_multi_d_endianness_with_output_parameter(self):
|
| 844 |
+
# affine transform given output ndarray or dtype with either endianness
|
| 845 |
+
# see issue #4127
|
| 846 |
+
data = np.array([1])
|
| 847 |
+
for out in [data.dtype, data.dtype.newbyteorder(),
|
| 848 |
+
np.empty_like(data),
|
| 849 |
+
np.empty_like(data).astype(data.dtype.newbyteorder())]:
|
| 850 |
+
returned = ndimage.affine_transform(data, [[1]], output=out)
|
| 851 |
+
result = out if returned is None else returned
|
| 852 |
+
assert_array_almost_equal(result, [1])
|
| 853 |
+
|
| 854 |
+
def test_affine_transform_output_shape(self):
|
| 855 |
+
# don't require output_shape when out of a different size is given
|
| 856 |
+
data = np.arange(8, dtype=np.float64)
|
| 857 |
+
out = np.ones((16,))
|
| 858 |
+
|
| 859 |
+
ndimage.affine_transform(data, [[1]], output=out)
|
| 860 |
+
assert_array_almost_equal(out[:8], data)
|
| 861 |
+
|
| 862 |
+
# mismatched output shape raises an error
|
| 863 |
+
with pytest.raises(RuntimeError):
|
| 864 |
+
ndimage.affine_transform(
|
| 865 |
+
data, [[1]], output=out, output_shape=(12,))
|
| 866 |
+
|
| 867 |
+
def test_affine_transform_with_string_output(self):
|
| 868 |
+
data = np.array([1])
|
| 869 |
+
out = ndimage.affine_transform(data, [[1]], output='f')
|
| 870 |
+
assert_(out.dtype is np.dtype('f'))
|
| 871 |
+
assert_array_almost_equal(out, [1])
|
| 872 |
+
|
| 873 |
+
@pytest.mark.parametrize('shift',
|
| 874 |
+
[(1, 0), (0, 1), (-1, 1), (3, -5), (2, 7)])
|
| 875 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 876 |
+
def test_affine_transform_shift_via_grid_wrap(self, shift, order):
|
| 877 |
+
# For mode 'grid-wrap', integer shifts should match np.roll
|
| 878 |
+
x = np.array([[0, 1],
|
| 879 |
+
[2, 3]])
|
| 880 |
+
affine = np.zeros((2, 3))
|
| 881 |
+
affine[:2, :2] = np.eye(2)
|
| 882 |
+
affine[:, 2] = shift
|
| 883 |
+
assert_array_almost_equal(
|
| 884 |
+
ndimage.affine_transform(x, affine, mode='grid-wrap', order=order),
|
| 885 |
+
np.roll(x, shift, axis=(0, 1)),
|
| 886 |
+
)
|
| 887 |
+
|
| 888 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 889 |
+
def test_affine_transform_shift_reflect(self, order):
|
| 890 |
+
# shift by x.shape results in reflection
|
| 891 |
+
x = np.array([[0, 1, 2],
|
| 892 |
+
[3, 4, 5]])
|
| 893 |
+
affine = np.zeros((2, 3))
|
| 894 |
+
affine[:2, :2] = np.eye(2)
|
| 895 |
+
affine[:, 2] = x.shape
|
| 896 |
+
assert_array_almost_equal(
|
| 897 |
+
ndimage.affine_transform(x, affine, mode='reflect', order=order),
|
| 898 |
+
x[::-1, ::-1],
|
| 899 |
+
)
|
| 900 |
+
|
| 901 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 902 |
+
def test_shift01(self, order):
|
| 903 |
+
data = np.array([1])
|
| 904 |
+
out = ndimage.shift(data, [1], order=order)
|
| 905 |
+
assert_array_almost_equal(out, [0])
|
| 906 |
+
|
| 907 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 908 |
+
def test_shift02(self, order):
|
| 909 |
+
data = np.ones([4])
|
| 910 |
+
out = ndimage.shift(data, [1], order=order)
|
| 911 |
+
assert_array_almost_equal(out, [0, 1, 1, 1])
|
| 912 |
+
|
| 913 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 914 |
+
def test_shift03(self, order):
|
| 915 |
+
data = np.ones([4])
|
| 916 |
+
out = ndimage.shift(data, -1, order=order)
|
| 917 |
+
assert_array_almost_equal(out, [1, 1, 1, 0])
|
| 918 |
+
|
| 919 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 920 |
+
def test_shift04(self, order):
|
| 921 |
+
data = np.array([4, 1, 3, 2])
|
| 922 |
+
out = ndimage.shift(data, 1, order=order)
|
| 923 |
+
assert_array_almost_equal(out, [0, 4, 1, 3])
|
| 924 |
+
|
| 925 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 926 |
+
@pytest.mark.parametrize('dtype', [np.float64, np.complex128])
|
| 927 |
+
def test_shift05(self, order, dtype):
|
| 928 |
+
data = np.array([[1, 1, 1, 1],
|
| 929 |
+
[1, 1, 1, 1],
|
| 930 |
+
[1, 1, 1, 1]], dtype=dtype)
|
| 931 |
+
expected = np.array([[0, 1, 1, 1],
|
| 932 |
+
[0, 1, 1, 1],
|
| 933 |
+
[0, 1, 1, 1]], dtype=dtype)
|
| 934 |
+
if data.dtype.kind == 'c':
|
| 935 |
+
data -= 1j * data
|
| 936 |
+
expected -= 1j * expected
|
| 937 |
+
out = ndimage.shift(data, [0, 1], order=order)
|
| 938 |
+
assert_array_almost_equal(out, expected)
|
| 939 |
+
|
| 940 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 941 |
+
@pytest.mark.parametrize('mode', ['constant', 'grid-constant'])
|
| 942 |
+
@pytest.mark.parametrize('dtype', [np.float64, np.complex128])
|
| 943 |
+
def test_shift_with_nonzero_cval(self, order, mode, dtype):
|
| 944 |
+
data = np.array([[1, 1, 1, 1],
|
| 945 |
+
[1, 1, 1, 1],
|
| 946 |
+
[1, 1, 1, 1]], dtype=dtype)
|
| 947 |
+
|
| 948 |
+
expected = np.array([[0, 1, 1, 1],
|
| 949 |
+
[0, 1, 1, 1],
|
| 950 |
+
[0, 1, 1, 1]], dtype=dtype)
|
| 951 |
+
|
| 952 |
+
if data.dtype.kind == 'c':
|
| 953 |
+
data -= 1j * data
|
| 954 |
+
expected -= 1j * expected
|
| 955 |
+
cval = 5.0
|
| 956 |
+
expected[:, 0] = cval # specific to shift of [0, 1] used below
|
| 957 |
+
out = ndimage.shift(data, [0, 1], order=order, mode=mode, cval=cval)
|
| 958 |
+
assert_array_almost_equal(out, expected)
|
| 959 |
+
|
| 960 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 961 |
+
def test_shift06(self, order):
|
| 962 |
+
data = np.array([[4, 1, 3, 2],
|
| 963 |
+
[7, 6, 8, 5],
|
| 964 |
+
[3, 5, 3, 6]])
|
| 965 |
+
out = ndimage.shift(data, [0, 1], order=order)
|
| 966 |
+
assert_array_almost_equal(out, [[0, 4, 1, 3],
|
| 967 |
+
[0, 7, 6, 8],
|
| 968 |
+
[0, 3, 5, 3]])
|
| 969 |
+
|
| 970 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 971 |
+
def test_shift07(self, order):
|
| 972 |
+
data = np.array([[4, 1, 3, 2],
|
| 973 |
+
[7, 6, 8, 5],
|
| 974 |
+
[3, 5, 3, 6]])
|
| 975 |
+
out = ndimage.shift(data, [1, 0], order=order)
|
| 976 |
+
assert_array_almost_equal(out, [[0, 0, 0, 0],
|
| 977 |
+
[4, 1, 3, 2],
|
| 978 |
+
[7, 6, 8, 5]])
|
| 979 |
+
|
| 980 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 981 |
+
def test_shift08(self, order):
|
| 982 |
+
data = np.array([[4, 1, 3, 2],
|
| 983 |
+
[7, 6, 8, 5],
|
| 984 |
+
[3, 5, 3, 6]])
|
| 985 |
+
out = ndimage.shift(data, [1, 1], order=order)
|
| 986 |
+
assert_array_almost_equal(out, [[0, 0, 0, 0],
|
| 987 |
+
[0, 4, 1, 3],
|
| 988 |
+
[0, 7, 6, 8]])
|
| 989 |
+
|
| 990 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 991 |
+
def test_shift09(self, order):
|
| 992 |
+
data = np.array([[4, 1, 3, 2],
|
| 993 |
+
[7, 6, 8, 5],
|
| 994 |
+
[3, 5, 3, 6]])
|
| 995 |
+
if (order > 1):
|
| 996 |
+
filtered = ndimage.spline_filter(data, order=order)
|
| 997 |
+
else:
|
| 998 |
+
filtered = data
|
| 999 |
+
out = ndimage.shift(filtered, [1, 1], order=order, prefilter=False)
|
| 1000 |
+
assert_array_almost_equal(out, [[0, 0, 0, 0],
|
| 1001 |
+
[0, 4, 1, 3],
|
| 1002 |
+
[0, 7, 6, 8]])
|
| 1003 |
+
|
| 1004 |
+
@pytest.mark.parametrize('shift',
|
| 1005 |
+
[(1, 0), (0, 1), (-1, 1), (3, -5), (2, 7)])
|
| 1006 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1007 |
+
def test_shift_grid_wrap(self, shift, order):
|
| 1008 |
+
# For mode 'grid-wrap', integer shifts should match np.roll
|
| 1009 |
+
x = np.array([[0, 1],
|
| 1010 |
+
[2, 3]])
|
| 1011 |
+
assert_array_almost_equal(
|
| 1012 |
+
ndimage.shift(x, shift, mode='grid-wrap', order=order),
|
| 1013 |
+
np.roll(x, shift, axis=(0, 1)),
|
| 1014 |
+
)
|
| 1015 |
+
|
| 1016 |
+
@pytest.mark.parametrize('shift',
|
| 1017 |
+
[(1, 0), (0, 1), (-1, 1), (3, -5), (2, 7)])
|
| 1018 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1019 |
+
def test_shift_grid_constant1(self, shift, order):
|
| 1020 |
+
# For integer shifts, 'constant' and 'grid-constant' should be equal
|
| 1021 |
+
x = np.arange(20).reshape((5, 4))
|
| 1022 |
+
assert_array_almost_equal(
|
| 1023 |
+
ndimage.shift(x, shift, mode='grid-constant', order=order),
|
| 1024 |
+
ndimage.shift(x, shift, mode='constant', order=order),
|
| 1025 |
+
)
|
| 1026 |
+
|
| 1027 |
+
def test_shift_grid_constant_order1(self):
|
| 1028 |
+
x = np.array([[1, 2, 3],
|
| 1029 |
+
[4, 5, 6]], dtype=float)
|
| 1030 |
+
expected_result = np.array([[0.25, 0.75, 1.25],
|
| 1031 |
+
[1.25, 3.00, 4.00]])
|
| 1032 |
+
assert_array_almost_equal(
|
| 1033 |
+
ndimage.shift(x, (0.5, 0.5), mode='grid-constant', order=1),
|
| 1034 |
+
expected_result,
|
| 1035 |
+
)
|
| 1036 |
+
|
| 1037 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1038 |
+
def test_shift_reflect(self, order):
|
| 1039 |
+
# shift by x.shape results in reflection
|
| 1040 |
+
x = np.array([[0, 1, 2],
|
| 1041 |
+
[3, 4, 5]])
|
| 1042 |
+
assert_array_almost_equal(
|
| 1043 |
+
ndimage.shift(x, x.shape, mode='reflect', order=order),
|
| 1044 |
+
x[::-1, ::-1],
|
| 1045 |
+
)
|
| 1046 |
+
|
| 1047 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1048 |
+
@pytest.mark.parametrize('prefilter', [False, True])
|
| 1049 |
+
def test_shift_nearest_boundary(self, order, prefilter):
|
| 1050 |
+
# verify that shifting at least order // 2 beyond the end of the array
|
| 1051 |
+
# gives a value equal to the edge value.
|
| 1052 |
+
x = np.arange(16)
|
| 1053 |
+
kwargs = dict(mode='nearest', order=order, prefilter=prefilter)
|
| 1054 |
+
assert_array_almost_equal(
|
| 1055 |
+
ndimage.shift(x, order // 2 + 1, **kwargs)[0], x[0],
|
| 1056 |
+
)
|
| 1057 |
+
assert_array_almost_equal(
|
| 1058 |
+
ndimage.shift(x, -order // 2 - 1, **kwargs)[-1], x[-1],
|
| 1059 |
+
)
|
| 1060 |
+
|
| 1061 |
+
@pytest.mark.parametrize('mode', ['grid-constant', 'grid-wrap', 'nearest',
|
| 1062 |
+
'mirror', 'reflect'])
|
| 1063 |
+
@pytest.mark.parametrize('order', range(6))
|
| 1064 |
+
def test_shift_vs_padded(self, order, mode):
|
| 1065 |
+
x = np.arange(144, dtype=float).reshape(12, 12)
|
| 1066 |
+
shift = (0.4, -2.3)
|
| 1067 |
+
|
| 1068 |
+
# manually pad and then extract center to get expected result
|
| 1069 |
+
npad = 32
|
| 1070 |
+
pad_mode = ndimage_to_numpy_mode.get(mode)
|
| 1071 |
+
xp = np.pad(x, npad, mode=pad_mode)
|
| 1072 |
+
center_slice = tuple([slice(npad, -npad)] * x.ndim)
|
| 1073 |
+
expected_result = ndimage.shift(
|
| 1074 |
+
xp, shift, mode=mode, order=order)[center_slice]
|
| 1075 |
+
|
| 1076 |
+
assert_allclose(
|
| 1077 |
+
ndimage.shift(x, shift, mode=mode, order=order),
|
| 1078 |
+
expected_result,
|
| 1079 |
+
rtol=1e-7,
|
| 1080 |
+
)
|
| 1081 |
+
|
| 1082 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1083 |
+
def test_zoom1(self, order):
|
| 1084 |
+
for z in [2, [2, 2]]:
|
| 1085 |
+
arr = np.array(list(range(25))).reshape((5, 5)).astype(float)
|
| 1086 |
+
arr = ndimage.zoom(arr, z, order=order)
|
| 1087 |
+
assert_equal(arr.shape, (10, 10))
|
| 1088 |
+
assert_(np.all(arr[-1, :] != 0))
|
| 1089 |
+
assert_(np.all(arr[-1, :] >= (20 - eps)))
|
| 1090 |
+
assert_(np.all(arr[0, :] <= (5 + eps)))
|
| 1091 |
+
assert_(np.all(arr >= (0 - eps)))
|
| 1092 |
+
assert_(np.all(arr <= (24 + eps)))
|
| 1093 |
+
|
| 1094 |
+
def test_zoom2(self):
|
| 1095 |
+
arr = np.arange(12).reshape((3, 4))
|
| 1096 |
+
out = ndimage.zoom(ndimage.zoom(arr, 2), 0.5)
|
| 1097 |
+
assert_array_equal(out, arr)
|
| 1098 |
+
|
| 1099 |
+
def test_zoom3(self):
|
| 1100 |
+
arr = np.array([[1, 2]])
|
| 1101 |
+
out1 = ndimage.zoom(arr, (2, 1))
|
| 1102 |
+
out2 = ndimage.zoom(arr, (1, 2))
|
| 1103 |
+
|
| 1104 |
+
assert_array_almost_equal(out1, np.array([[1, 2], [1, 2]]))
|
| 1105 |
+
assert_array_almost_equal(out2, np.array([[1, 1, 2, 2]]))
|
| 1106 |
+
|
| 1107 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1108 |
+
@pytest.mark.parametrize('dtype', [np.float64, np.complex128])
|
| 1109 |
+
def test_zoom_affine01(self, order, dtype):
|
| 1110 |
+
data = np.asarray([[1, 2, 3, 4],
|
| 1111 |
+
[5, 6, 7, 8],
|
| 1112 |
+
[9, 10, 11, 12]], dtype=dtype)
|
| 1113 |
+
if data.dtype.kind == 'c':
|
| 1114 |
+
data -= 1j * data
|
| 1115 |
+
with suppress_warnings() as sup:
|
| 1116 |
+
sup.filter(UserWarning,
|
| 1117 |
+
'The behavior of affine_transform with a 1-D array .* '
|
| 1118 |
+
'has changed')
|
| 1119 |
+
out = ndimage.affine_transform(data, [0.5, 0.5], 0,
|
| 1120 |
+
(6, 8), order=order)
|
| 1121 |
+
assert_array_almost_equal(out[::2, ::2], data)
|
| 1122 |
+
|
| 1123 |
+
def test_zoom_infinity(self):
|
| 1124 |
+
# Ticket #1419 regression test
|
| 1125 |
+
dim = 8
|
| 1126 |
+
ndimage.zoom(np.zeros((dim, dim)), 1. / dim, mode='nearest')
|
| 1127 |
+
|
| 1128 |
+
def test_zoom_zoomfactor_one(self):
|
| 1129 |
+
# Ticket #1122 regression test
|
| 1130 |
+
arr = np.zeros((1, 5, 5))
|
| 1131 |
+
zoom = (1.0, 2.0, 2.0)
|
| 1132 |
+
|
| 1133 |
+
out = ndimage.zoom(arr, zoom, cval=7)
|
| 1134 |
+
ref = np.zeros((1, 10, 10))
|
| 1135 |
+
assert_array_almost_equal(out, ref)
|
| 1136 |
+
|
| 1137 |
+
def test_zoom_output_shape_roundoff(self):
|
| 1138 |
+
arr = np.zeros((3, 11, 25))
|
| 1139 |
+
zoom = (4.0 / 3, 15.0 / 11, 29.0 / 25)
|
| 1140 |
+
out = ndimage.zoom(arr, zoom)
|
| 1141 |
+
assert_array_equal(out.shape, (4, 15, 29))
|
| 1142 |
+
|
| 1143 |
+
@pytest.mark.parametrize('zoom', [(1, 1), (3, 5), (8, 2), (8, 8)])
|
| 1144 |
+
@pytest.mark.parametrize('mode', ['nearest', 'constant', 'wrap', 'reflect',
|
| 1145 |
+
'mirror', 'grid-wrap', 'grid-mirror',
|
| 1146 |
+
'grid-constant'])
|
| 1147 |
+
def test_zoom_by_int_order0(self, zoom, mode):
|
| 1148 |
+
# order 0 zoom should be the same as replication via np.kron
|
| 1149 |
+
# Note: This is not True for general x shapes when grid_mode is False,
|
| 1150 |
+
# but works here for all modes because the size ratio happens to
|
| 1151 |
+
# always be an integer when x.shape = (2, 2).
|
| 1152 |
+
x = np.array([[0, 1],
|
| 1153 |
+
[2, 3]], dtype=float)
|
| 1154 |
+
# x = np.arange(16, dtype=float).reshape(4, 4)
|
| 1155 |
+
assert_array_almost_equal(
|
| 1156 |
+
ndimage.zoom(x, zoom, order=0, mode=mode),
|
| 1157 |
+
np.kron(x, np.ones(zoom))
|
| 1158 |
+
)
|
| 1159 |
+
|
| 1160 |
+
@pytest.mark.parametrize('shape', [(2, 3), (4, 4)])
|
| 1161 |
+
@pytest.mark.parametrize('zoom', [(1, 1), (3, 5), (8, 2), (8, 8)])
|
| 1162 |
+
@pytest.mark.parametrize('mode', ['nearest', 'reflect', 'mirror',
|
| 1163 |
+
'grid-wrap', 'grid-constant'])
|
| 1164 |
+
def test_zoom_grid_by_int_order0(self, shape, zoom, mode):
|
| 1165 |
+
# When grid_mode is True, order 0 zoom should be the same as
|
| 1166 |
+
# replication via np.kron. The only exceptions to this are the
|
| 1167 |
+
# non-grid modes 'constant' and 'wrap'.
|
| 1168 |
+
x = np.arange(np.prod(shape), dtype=float).reshape(shape)
|
| 1169 |
+
assert_array_almost_equal(
|
| 1170 |
+
ndimage.zoom(x, zoom, order=0, mode=mode, grid_mode=True),
|
| 1171 |
+
np.kron(x, np.ones(zoom))
|
| 1172 |
+
)
|
| 1173 |
+
|
| 1174 |
+
@pytest.mark.parametrize('mode', ['constant', 'wrap'])
|
| 1175 |
+
def test_zoom_grid_mode_warnings(self, mode):
|
| 1176 |
+
# Warn on use of non-grid modes when grid_mode is True
|
| 1177 |
+
x = np.arange(9, dtype=float).reshape((3, 3))
|
| 1178 |
+
with pytest.warns(UserWarning,
|
| 1179 |
+
match="It is recommended to use mode"):
|
| 1180 |
+
ndimage.zoom(x, 2, mode=mode, grid_mode=True),
|
| 1181 |
+
|
| 1182 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1183 |
+
def test_rotate01(self, order):
|
| 1184 |
+
data = np.array([[0, 0, 0, 0],
|
| 1185 |
+
[0, 1, 1, 0],
|
| 1186 |
+
[0, 0, 0, 0]], dtype=np.float64)
|
| 1187 |
+
out = ndimage.rotate(data, 0, order=order)
|
| 1188 |
+
assert_array_almost_equal(out, data)
|
| 1189 |
+
|
| 1190 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1191 |
+
def test_rotate02(self, order):
|
| 1192 |
+
data = np.array([[0, 0, 0, 0],
|
| 1193 |
+
[0, 1, 0, 0],
|
| 1194 |
+
[0, 0, 0, 0]], dtype=np.float64)
|
| 1195 |
+
expected = np.array([[0, 0, 0],
|
| 1196 |
+
[0, 0, 0],
|
| 1197 |
+
[0, 1, 0],
|
| 1198 |
+
[0, 0, 0]], dtype=np.float64)
|
| 1199 |
+
out = ndimage.rotate(data, 90, order=order)
|
| 1200 |
+
assert_array_almost_equal(out, expected)
|
| 1201 |
+
|
| 1202 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1203 |
+
@pytest.mark.parametrize('dtype', [np.float64, np.complex128])
|
| 1204 |
+
def test_rotate03(self, order, dtype):
|
| 1205 |
+
data = np.array([[0, 0, 0, 0, 0],
|
| 1206 |
+
[0, 1, 1, 0, 0],
|
| 1207 |
+
[0, 0, 0, 0, 0]], dtype=dtype)
|
| 1208 |
+
expected = np.array([[0, 0, 0],
|
| 1209 |
+
[0, 0, 0],
|
| 1210 |
+
[0, 1, 0],
|
| 1211 |
+
[0, 1, 0],
|
| 1212 |
+
[0, 0, 0]], dtype=dtype)
|
| 1213 |
+
if data.dtype.kind == 'c':
|
| 1214 |
+
data -= 1j * data
|
| 1215 |
+
expected -= 1j * expected
|
| 1216 |
+
out = ndimage.rotate(data, 90, order=order)
|
| 1217 |
+
assert_array_almost_equal(out, expected)
|
| 1218 |
+
|
| 1219 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1220 |
+
def test_rotate04(self, order):
|
| 1221 |
+
data = np.array([[0, 0, 0, 0, 0],
|
| 1222 |
+
[0, 1, 1, 0, 0],
|
| 1223 |
+
[0, 0, 0, 0, 0]], dtype=np.float64)
|
| 1224 |
+
expected = np.array([[0, 0, 0, 0, 0],
|
| 1225 |
+
[0, 0, 1, 0, 0],
|
| 1226 |
+
[0, 0, 1, 0, 0]], dtype=np.float64)
|
| 1227 |
+
out = ndimage.rotate(data, 90, reshape=False, order=order)
|
| 1228 |
+
assert_array_almost_equal(out, expected)
|
| 1229 |
+
|
| 1230 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1231 |
+
def test_rotate05(self, order):
|
| 1232 |
+
data = np.empty((4, 3, 3))
|
| 1233 |
+
for i in range(3):
|
| 1234 |
+
data[:, :, i] = np.array([[0, 0, 0],
|
| 1235 |
+
[0, 1, 0],
|
| 1236 |
+
[0, 1, 0],
|
| 1237 |
+
[0, 0, 0]], dtype=np.float64)
|
| 1238 |
+
expected = np.array([[0, 0, 0, 0],
|
| 1239 |
+
[0, 1, 1, 0],
|
| 1240 |
+
[0, 0, 0, 0]], dtype=np.float64)
|
| 1241 |
+
out = ndimage.rotate(data, 90, order=order)
|
| 1242 |
+
for i in range(3):
|
| 1243 |
+
assert_array_almost_equal(out[:, :, i], expected)
|
| 1244 |
+
|
| 1245 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1246 |
+
def test_rotate06(self, order):
|
| 1247 |
+
data = np.empty((3, 4, 3))
|
| 1248 |
+
for i in range(3):
|
| 1249 |
+
data[:, :, i] = np.array([[0, 0, 0, 0],
|
| 1250 |
+
[0, 1, 1, 0],
|
| 1251 |
+
[0, 0, 0, 0]], dtype=np.float64)
|
| 1252 |
+
expected = np.array([[0, 0, 0],
|
| 1253 |
+
[0, 1, 0],
|
| 1254 |
+
[0, 1, 0],
|
| 1255 |
+
[0, 0, 0]], dtype=np.float64)
|
| 1256 |
+
out = ndimage.rotate(data, 90, order=order)
|
| 1257 |
+
for i in range(3):
|
| 1258 |
+
assert_array_almost_equal(out[:, :, i], expected)
|
| 1259 |
+
|
| 1260 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1261 |
+
def test_rotate07(self, order):
|
| 1262 |
+
data = np.array([[[0, 0, 0, 0, 0],
|
| 1263 |
+
[0, 1, 1, 0, 0],
|
| 1264 |
+
[0, 0, 0, 0, 0]]] * 2, dtype=np.float64)
|
| 1265 |
+
data = data.transpose()
|
| 1266 |
+
expected = np.array([[[0, 0, 0],
|
| 1267 |
+
[0, 1, 0],
|
| 1268 |
+
[0, 1, 0],
|
| 1269 |
+
[0, 0, 0],
|
| 1270 |
+
[0, 0, 0]]] * 2, dtype=np.float64)
|
| 1271 |
+
expected = expected.transpose([2, 1, 0])
|
| 1272 |
+
out = ndimage.rotate(data, 90, axes=(0, 1), order=order)
|
| 1273 |
+
assert_array_almost_equal(out, expected)
|
| 1274 |
+
|
| 1275 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1276 |
+
def test_rotate08(self, order):
|
| 1277 |
+
data = np.array([[[0, 0, 0, 0, 0],
|
| 1278 |
+
[0, 1, 1, 0, 0],
|
| 1279 |
+
[0, 0, 0, 0, 0]]] * 2, dtype=np.float64)
|
| 1280 |
+
data = data.transpose()
|
| 1281 |
+
expected = np.array([[[0, 0, 1, 0, 0],
|
| 1282 |
+
[0, 0, 1, 0, 0],
|
| 1283 |
+
[0, 0, 0, 0, 0]]] * 2, dtype=np.float64)
|
| 1284 |
+
expected = expected.transpose()
|
| 1285 |
+
out = ndimage.rotate(data, 90, axes=(0, 1), reshape=False, order=order)
|
| 1286 |
+
assert_array_almost_equal(out, expected)
|
| 1287 |
+
|
| 1288 |
+
def test_rotate09(self):
|
| 1289 |
+
data = np.array([[0, 0, 0, 0, 0],
|
| 1290 |
+
[0, 1, 1, 0, 0],
|
| 1291 |
+
[0, 0, 0, 0, 0]] * 2, dtype=np.float64)
|
| 1292 |
+
with assert_raises(ValueError):
|
| 1293 |
+
ndimage.rotate(data, 90, axes=(0, data.ndim))
|
| 1294 |
+
|
| 1295 |
+
def test_rotate10(self):
|
| 1296 |
+
data = np.arange(45, dtype=np.float64).reshape((3, 5, 3))
|
| 1297 |
+
|
| 1298 |
+
# The output of ndimage.rotate before refactoring
|
| 1299 |
+
expected = np.array([[[0.0, 0.0, 0.0],
|
| 1300 |
+
[0.0, 0.0, 0.0],
|
| 1301 |
+
[6.54914793, 7.54914793, 8.54914793],
|
| 1302 |
+
[10.84520162, 11.84520162, 12.84520162],
|
| 1303 |
+
[0.0, 0.0, 0.0]],
|
| 1304 |
+
[[6.19286575, 7.19286575, 8.19286575],
|
| 1305 |
+
[13.4730712, 14.4730712, 15.4730712],
|
| 1306 |
+
[21.0, 22.0, 23.0],
|
| 1307 |
+
[28.5269288, 29.5269288, 30.5269288],
|
| 1308 |
+
[35.80713425, 36.80713425, 37.80713425]],
|
| 1309 |
+
[[0.0, 0.0, 0.0],
|
| 1310 |
+
[31.15479838, 32.15479838, 33.15479838],
|
| 1311 |
+
[35.45085207, 36.45085207, 37.45085207],
|
| 1312 |
+
[0.0, 0.0, 0.0],
|
| 1313 |
+
[0.0, 0.0, 0.0]]])
|
| 1314 |
+
|
| 1315 |
+
out = ndimage.rotate(data, angle=12, reshape=False)
|
| 1316 |
+
assert_array_almost_equal(out, expected)
|
| 1317 |
+
|
| 1318 |
+
def test_rotate_exact_180(self):
|
| 1319 |
+
a = np.tile(np.arange(5), (5, 1))
|
| 1320 |
+
b = ndimage.rotate(ndimage.rotate(a, 180), -180)
|
| 1321 |
+
assert_equal(a, b)
|
| 1322 |
+
|
| 1323 |
+
|
| 1324 |
+
def test_zoom_output_shape():
|
| 1325 |
+
"""Ticket #643"""
|
| 1326 |
+
x = np.arange(12).reshape((3, 4))
|
| 1327 |
+
ndimage.zoom(x, 2, output=np.zeros((6, 8)))
|
parrot/lib/python3.10/site-packages/scipy/ndimage/tests/test_measurements.py
ADDED
|
@@ -0,0 +1,1419 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from numpy.testing import (
|
| 5 |
+
assert_,
|
| 6 |
+
assert_allclose,
|
| 7 |
+
assert_almost_equal,
|
| 8 |
+
assert_array_almost_equal,
|
| 9 |
+
assert_array_equal,
|
| 10 |
+
assert_equal,
|
| 11 |
+
suppress_warnings,
|
| 12 |
+
)
|
| 13 |
+
import pytest
|
| 14 |
+
from pytest import raises as assert_raises
|
| 15 |
+
|
| 16 |
+
import scipy.ndimage as ndimage
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
from . import types
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class Test_measurements_stats:
|
| 23 |
+
"""ndimage._measurements._stats() is a utility used by other functions."""
|
| 24 |
+
|
| 25 |
+
def test_a(self):
|
| 26 |
+
x = [0, 1, 2, 6]
|
| 27 |
+
labels = [0, 0, 1, 1]
|
| 28 |
+
index = [0, 1]
|
| 29 |
+
for shp in [(4,), (2, 2)]:
|
| 30 |
+
x = np.array(x).reshape(shp)
|
| 31 |
+
labels = np.array(labels).reshape(shp)
|
| 32 |
+
counts, sums = ndimage._measurements._stats(
|
| 33 |
+
x, labels=labels, index=index)
|
| 34 |
+
assert_array_equal(counts, [2, 2])
|
| 35 |
+
assert_array_equal(sums, [1.0, 8.0])
|
| 36 |
+
|
| 37 |
+
def test_b(self):
|
| 38 |
+
# Same data as test_a, but different labels. The label 9 exceeds the
|
| 39 |
+
# length of 'labels', so this test will follow a different code path.
|
| 40 |
+
x = [0, 1, 2, 6]
|
| 41 |
+
labels = [0, 0, 9, 9]
|
| 42 |
+
index = [0, 9]
|
| 43 |
+
for shp in [(4,), (2, 2)]:
|
| 44 |
+
x = np.array(x).reshape(shp)
|
| 45 |
+
labels = np.array(labels).reshape(shp)
|
| 46 |
+
counts, sums = ndimage._measurements._stats(
|
| 47 |
+
x, labels=labels, index=index)
|
| 48 |
+
assert_array_equal(counts, [2, 2])
|
| 49 |
+
assert_array_equal(sums, [1.0, 8.0])
|
| 50 |
+
|
| 51 |
+
def test_a_centered(self):
|
| 52 |
+
x = [0, 1, 2, 6]
|
| 53 |
+
labels = [0, 0, 1, 1]
|
| 54 |
+
index = [0, 1]
|
| 55 |
+
for shp in [(4,), (2, 2)]:
|
| 56 |
+
x = np.array(x).reshape(shp)
|
| 57 |
+
labels = np.array(labels).reshape(shp)
|
| 58 |
+
counts, sums, centers = ndimage._measurements._stats(
|
| 59 |
+
x, labels=labels, index=index, centered=True)
|
| 60 |
+
assert_array_equal(counts, [2, 2])
|
| 61 |
+
assert_array_equal(sums, [1.0, 8.0])
|
| 62 |
+
assert_array_equal(centers, [0.5, 8.0])
|
| 63 |
+
|
| 64 |
+
def test_b_centered(self):
|
| 65 |
+
x = [0, 1, 2, 6]
|
| 66 |
+
labels = [0, 0, 9, 9]
|
| 67 |
+
index = [0, 9]
|
| 68 |
+
for shp in [(4,), (2, 2)]:
|
| 69 |
+
x = np.array(x).reshape(shp)
|
| 70 |
+
labels = np.array(labels).reshape(shp)
|
| 71 |
+
counts, sums, centers = ndimage._measurements._stats(
|
| 72 |
+
x, labels=labels, index=index, centered=True)
|
| 73 |
+
assert_array_equal(counts, [2, 2])
|
| 74 |
+
assert_array_equal(sums, [1.0, 8.0])
|
| 75 |
+
assert_array_equal(centers, [0.5, 8.0])
|
| 76 |
+
|
| 77 |
+
def test_nonint_labels(self):
|
| 78 |
+
x = [0, 1, 2, 6]
|
| 79 |
+
labels = [0.0, 0.0, 9.0, 9.0]
|
| 80 |
+
index = [0.0, 9.0]
|
| 81 |
+
for shp in [(4,), (2, 2)]:
|
| 82 |
+
x = np.array(x).reshape(shp)
|
| 83 |
+
labels = np.array(labels).reshape(shp)
|
| 84 |
+
counts, sums, centers = ndimage._measurements._stats(
|
| 85 |
+
x, labels=labels, index=index, centered=True)
|
| 86 |
+
assert_array_equal(counts, [2, 2])
|
| 87 |
+
assert_array_equal(sums, [1.0, 8.0])
|
| 88 |
+
assert_array_equal(centers, [0.5, 8.0])
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
class Test_measurements_select:
|
| 92 |
+
"""ndimage._measurements._select() is a utility used by other functions."""
|
| 93 |
+
|
| 94 |
+
def test_basic(self):
|
| 95 |
+
x = [0, 1, 6, 2]
|
| 96 |
+
cases = [
|
| 97 |
+
([0, 0, 1, 1], [0, 1]), # "Small" integer labels
|
| 98 |
+
([0, 0, 9, 9], [0, 9]), # A label larger than len(labels)
|
| 99 |
+
([0.0, 0.0, 7.0, 7.0], [0.0, 7.0]), # Non-integer labels
|
| 100 |
+
]
|
| 101 |
+
for labels, index in cases:
|
| 102 |
+
result = ndimage._measurements._select(
|
| 103 |
+
x, labels=labels, index=index)
|
| 104 |
+
assert_(len(result) == 0)
|
| 105 |
+
result = ndimage._measurements._select(
|
| 106 |
+
x, labels=labels, index=index, find_max=True)
|
| 107 |
+
assert_(len(result) == 1)
|
| 108 |
+
assert_array_equal(result[0], [1, 6])
|
| 109 |
+
result = ndimage._measurements._select(
|
| 110 |
+
x, labels=labels, index=index, find_min=True)
|
| 111 |
+
assert_(len(result) == 1)
|
| 112 |
+
assert_array_equal(result[0], [0, 2])
|
| 113 |
+
result = ndimage._measurements._select(
|
| 114 |
+
x, labels=labels, index=index, find_min=True,
|
| 115 |
+
find_min_positions=True)
|
| 116 |
+
assert_(len(result) == 2)
|
| 117 |
+
assert_array_equal(result[0], [0, 2])
|
| 118 |
+
assert_array_equal(result[1], [0, 3])
|
| 119 |
+
assert_equal(result[1].dtype.kind, 'i')
|
| 120 |
+
result = ndimage._measurements._select(
|
| 121 |
+
x, labels=labels, index=index, find_max=True,
|
| 122 |
+
find_max_positions=True)
|
| 123 |
+
assert_(len(result) == 2)
|
| 124 |
+
assert_array_equal(result[0], [1, 6])
|
| 125 |
+
assert_array_equal(result[1], [1, 2])
|
| 126 |
+
assert_equal(result[1].dtype.kind, 'i')
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def test_label01():
|
| 130 |
+
data = np.ones([])
|
| 131 |
+
out, n = ndimage.label(data)
|
| 132 |
+
assert_array_almost_equal(out, 1)
|
| 133 |
+
assert_equal(n, 1)
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def test_label02():
|
| 137 |
+
data = np.zeros([])
|
| 138 |
+
out, n = ndimage.label(data)
|
| 139 |
+
assert_array_almost_equal(out, 0)
|
| 140 |
+
assert_equal(n, 0)
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def test_label03():
|
| 144 |
+
data = np.ones([1])
|
| 145 |
+
out, n = ndimage.label(data)
|
| 146 |
+
assert_array_almost_equal(out, [1])
|
| 147 |
+
assert_equal(n, 1)
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def test_label04():
|
| 151 |
+
data = np.zeros([1])
|
| 152 |
+
out, n = ndimage.label(data)
|
| 153 |
+
assert_array_almost_equal(out, [0])
|
| 154 |
+
assert_equal(n, 0)
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def test_label05():
|
| 158 |
+
data = np.ones([5])
|
| 159 |
+
out, n = ndimage.label(data)
|
| 160 |
+
assert_array_almost_equal(out, [1, 1, 1, 1, 1])
|
| 161 |
+
assert_equal(n, 1)
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def test_label06():
|
| 165 |
+
data = np.array([1, 0, 1, 1, 0, 1])
|
| 166 |
+
out, n = ndimage.label(data)
|
| 167 |
+
assert_array_almost_equal(out, [1, 0, 2, 2, 0, 3])
|
| 168 |
+
assert_equal(n, 3)
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def test_label07():
|
| 172 |
+
data = np.array([[0, 0, 0, 0, 0, 0],
|
| 173 |
+
[0, 0, 0, 0, 0, 0],
|
| 174 |
+
[0, 0, 0, 0, 0, 0],
|
| 175 |
+
[0, 0, 0, 0, 0, 0],
|
| 176 |
+
[0, 0, 0, 0, 0, 0],
|
| 177 |
+
[0, 0, 0, 0, 0, 0]])
|
| 178 |
+
out, n = ndimage.label(data)
|
| 179 |
+
assert_array_almost_equal(out, [[0, 0, 0, 0, 0, 0],
|
| 180 |
+
[0, 0, 0, 0, 0, 0],
|
| 181 |
+
[0, 0, 0, 0, 0, 0],
|
| 182 |
+
[0, 0, 0, 0, 0, 0],
|
| 183 |
+
[0, 0, 0, 0, 0, 0],
|
| 184 |
+
[0, 0, 0, 0, 0, 0]])
|
| 185 |
+
assert_equal(n, 0)
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def test_label08():
|
| 189 |
+
data = np.array([[1, 0, 0, 0, 0, 0],
|
| 190 |
+
[0, 0, 1, 1, 0, 0],
|
| 191 |
+
[0, 0, 1, 1, 1, 0],
|
| 192 |
+
[1, 1, 0, 0, 0, 0],
|
| 193 |
+
[1, 1, 0, 0, 0, 0],
|
| 194 |
+
[0, 0, 0, 1, 1, 0]])
|
| 195 |
+
out, n = ndimage.label(data)
|
| 196 |
+
assert_array_almost_equal(out, [[1, 0, 0, 0, 0, 0],
|
| 197 |
+
[0, 0, 2, 2, 0, 0],
|
| 198 |
+
[0, 0, 2, 2, 2, 0],
|
| 199 |
+
[3, 3, 0, 0, 0, 0],
|
| 200 |
+
[3, 3, 0, 0, 0, 0],
|
| 201 |
+
[0, 0, 0, 4, 4, 0]])
|
| 202 |
+
assert_equal(n, 4)
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
def test_label09():
|
| 206 |
+
data = np.array([[1, 0, 0, 0, 0, 0],
|
| 207 |
+
[0, 0, 1, 1, 0, 0],
|
| 208 |
+
[0, 0, 1, 1, 1, 0],
|
| 209 |
+
[1, 1, 0, 0, 0, 0],
|
| 210 |
+
[1, 1, 0, 0, 0, 0],
|
| 211 |
+
[0, 0, 0, 1, 1, 0]])
|
| 212 |
+
struct = ndimage.generate_binary_structure(2, 2)
|
| 213 |
+
out, n = ndimage.label(data, struct)
|
| 214 |
+
assert_array_almost_equal(out, [[1, 0, 0, 0, 0, 0],
|
| 215 |
+
[0, 0, 2, 2, 0, 0],
|
| 216 |
+
[0, 0, 2, 2, 2, 0],
|
| 217 |
+
[2, 2, 0, 0, 0, 0],
|
| 218 |
+
[2, 2, 0, 0, 0, 0],
|
| 219 |
+
[0, 0, 0, 3, 3, 0]])
|
| 220 |
+
assert_equal(n, 3)
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
def test_label10():
|
| 224 |
+
data = np.array([[0, 0, 0, 0, 0, 0],
|
| 225 |
+
[0, 1, 1, 0, 1, 0],
|
| 226 |
+
[0, 1, 1, 1, 1, 0],
|
| 227 |
+
[0, 0, 0, 0, 0, 0]])
|
| 228 |
+
struct = ndimage.generate_binary_structure(2, 2)
|
| 229 |
+
out, n = ndimage.label(data, struct)
|
| 230 |
+
assert_array_almost_equal(out, [[0, 0, 0, 0, 0, 0],
|
| 231 |
+
[0, 1, 1, 0, 1, 0],
|
| 232 |
+
[0, 1, 1, 1, 1, 0],
|
| 233 |
+
[0, 0, 0, 0, 0, 0]])
|
| 234 |
+
assert_equal(n, 1)
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
def test_label11():
|
| 238 |
+
for type in types:
|
| 239 |
+
data = np.array([[1, 0, 0, 0, 0, 0],
|
| 240 |
+
[0, 0, 1, 1, 0, 0],
|
| 241 |
+
[0, 0, 1, 1, 1, 0],
|
| 242 |
+
[1, 1, 0, 0, 0, 0],
|
| 243 |
+
[1, 1, 0, 0, 0, 0],
|
| 244 |
+
[0, 0, 0, 1, 1, 0]], type)
|
| 245 |
+
out, n = ndimage.label(data)
|
| 246 |
+
expected = [[1, 0, 0, 0, 0, 0],
|
| 247 |
+
[0, 0, 2, 2, 0, 0],
|
| 248 |
+
[0, 0, 2, 2, 2, 0],
|
| 249 |
+
[3, 3, 0, 0, 0, 0],
|
| 250 |
+
[3, 3, 0, 0, 0, 0],
|
| 251 |
+
[0, 0, 0, 4, 4, 0]]
|
| 252 |
+
assert_array_almost_equal(out, expected)
|
| 253 |
+
assert_equal(n, 4)
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
def test_label11_inplace():
|
| 257 |
+
for type in types:
|
| 258 |
+
data = np.array([[1, 0, 0, 0, 0, 0],
|
| 259 |
+
[0, 0, 1, 1, 0, 0],
|
| 260 |
+
[0, 0, 1, 1, 1, 0],
|
| 261 |
+
[1, 1, 0, 0, 0, 0],
|
| 262 |
+
[1, 1, 0, 0, 0, 0],
|
| 263 |
+
[0, 0, 0, 1, 1, 0]], type)
|
| 264 |
+
n = ndimage.label(data, output=data)
|
| 265 |
+
expected = [[1, 0, 0, 0, 0, 0],
|
| 266 |
+
[0, 0, 2, 2, 0, 0],
|
| 267 |
+
[0, 0, 2, 2, 2, 0],
|
| 268 |
+
[3, 3, 0, 0, 0, 0],
|
| 269 |
+
[3, 3, 0, 0, 0, 0],
|
| 270 |
+
[0, 0, 0, 4, 4, 0]]
|
| 271 |
+
assert_array_almost_equal(data, expected)
|
| 272 |
+
assert_equal(n, 4)
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
def test_label12():
|
| 276 |
+
for type in types:
|
| 277 |
+
data = np.array([[0, 0, 0, 0, 1, 1],
|
| 278 |
+
[0, 0, 0, 0, 0, 1],
|
| 279 |
+
[0, 0, 1, 0, 1, 1],
|
| 280 |
+
[0, 0, 1, 1, 1, 1],
|
| 281 |
+
[0, 0, 0, 1, 1, 0]], type)
|
| 282 |
+
out, n = ndimage.label(data)
|
| 283 |
+
expected = [[0, 0, 0, 0, 1, 1],
|
| 284 |
+
[0, 0, 0, 0, 0, 1],
|
| 285 |
+
[0, 0, 1, 0, 1, 1],
|
| 286 |
+
[0, 0, 1, 1, 1, 1],
|
| 287 |
+
[0, 0, 0, 1, 1, 0]]
|
| 288 |
+
assert_array_almost_equal(out, expected)
|
| 289 |
+
assert_equal(n, 1)
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
def test_label13():
|
| 293 |
+
for type in types:
|
| 294 |
+
data = np.array([[1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1],
|
| 295 |
+
[1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1],
|
| 296 |
+
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
|
| 297 |
+
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]],
|
| 298 |
+
type)
|
| 299 |
+
out, n = ndimage.label(data)
|
| 300 |
+
expected = [[1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1],
|
| 301 |
+
[1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1],
|
| 302 |
+
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
|
| 303 |
+
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
|
| 304 |
+
assert_array_almost_equal(out, expected)
|
| 305 |
+
assert_equal(n, 1)
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
def test_label_output_typed():
|
| 309 |
+
data = np.ones([5])
|
| 310 |
+
for t in types:
|
| 311 |
+
output = np.zeros([5], dtype=t)
|
| 312 |
+
n = ndimage.label(data, output=output)
|
| 313 |
+
assert_array_almost_equal(output, 1)
|
| 314 |
+
assert_equal(n, 1)
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
def test_label_output_dtype():
|
| 318 |
+
data = np.ones([5])
|
| 319 |
+
for t in types:
|
| 320 |
+
output, n = ndimage.label(data, output=t)
|
| 321 |
+
assert_array_almost_equal(output, 1)
|
| 322 |
+
assert output.dtype == t
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
def test_label_output_wrong_size():
|
| 326 |
+
data = np.ones([5])
|
| 327 |
+
for t in types:
|
| 328 |
+
output = np.zeros([10], t)
|
| 329 |
+
assert_raises((RuntimeError, ValueError),
|
| 330 |
+
ndimage.label, data, output=output)
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
def test_label_structuring_elements():
|
| 334 |
+
data = np.loadtxt(os.path.join(os.path.dirname(
|
| 335 |
+
__file__), "data", "label_inputs.txt"))
|
| 336 |
+
strels = np.loadtxt(os.path.join(
|
| 337 |
+
os.path.dirname(__file__), "data", "label_strels.txt"))
|
| 338 |
+
results = np.loadtxt(os.path.join(
|
| 339 |
+
os.path.dirname(__file__), "data", "label_results.txt"))
|
| 340 |
+
data = data.reshape((-1, 7, 7))
|
| 341 |
+
strels = strels.reshape((-1, 3, 3))
|
| 342 |
+
results = results.reshape((-1, 7, 7))
|
| 343 |
+
r = 0
|
| 344 |
+
for i in range(data.shape[0]):
|
| 345 |
+
d = data[i, :, :]
|
| 346 |
+
for j in range(strels.shape[0]):
|
| 347 |
+
s = strels[j, :, :]
|
| 348 |
+
assert_equal(ndimage.label(d, s)[0], results[r, :, :])
|
| 349 |
+
r += 1
|
| 350 |
+
|
| 351 |
+
|
| 352 |
+
def test_ticket_742():
|
| 353 |
+
def SE(img, thresh=.7, size=4):
|
| 354 |
+
mask = img > thresh
|
| 355 |
+
rank = len(mask.shape)
|
| 356 |
+
la, co = ndimage.label(mask,
|
| 357 |
+
ndimage.generate_binary_structure(rank, rank))
|
| 358 |
+
_ = ndimage.find_objects(la)
|
| 359 |
+
|
| 360 |
+
if np.dtype(np.intp) != np.dtype('i'):
|
| 361 |
+
shape = (3, 1240, 1240)
|
| 362 |
+
a = np.random.rand(np.prod(shape)).reshape(shape)
|
| 363 |
+
# shouldn't crash
|
| 364 |
+
SE(a)
|
| 365 |
+
|
| 366 |
+
|
| 367 |
+
def test_gh_issue_3025():
|
| 368 |
+
"""Github issue #3025 - improper merging of labels"""
|
| 369 |
+
d = np.zeros((60, 320))
|
| 370 |
+
d[:, :257] = 1
|
| 371 |
+
d[:, 260:] = 1
|
| 372 |
+
d[36, 257] = 1
|
| 373 |
+
d[35, 258] = 1
|
| 374 |
+
d[35, 259] = 1
|
| 375 |
+
assert ndimage.label(d, np.ones((3, 3)))[1] == 1
|
| 376 |
+
|
| 377 |
+
|
| 378 |
+
def test_label_default_dtype():
|
| 379 |
+
test_array = np.random.rand(10, 10)
|
| 380 |
+
label, no_features = ndimage.label(test_array > 0.5)
|
| 381 |
+
assert_(label.dtype in (np.int32, np.int64))
|
| 382 |
+
# Shouldn't raise an exception
|
| 383 |
+
ndimage.find_objects(label)
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
def test_find_objects01():
|
| 387 |
+
data = np.ones([], dtype=int)
|
| 388 |
+
out = ndimage.find_objects(data)
|
| 389 |
+
assert_(out == [()])
|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
def test_find_objects02():
|
| 393 |
+
data = np.zeros([], dtype=int)
|
| 394 |
+
out = ndimage.find_objects(data)
|
| 395 |
+
assert_(out == [])
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
def test_find_objects03():
|
| 399 |
+
data = np.ones([1], dtype=int)
|
| 400 |
+
out = ndimage.find_objects(data)
|
| 401 |
+
assert_equal(out, [(slice(0, 1, None),)])
|
| 402 |
+
|
| 403 |
+
|
| 404 |
+
def test_find_objects04():
|
| 405 |
+
data = np.zeros([1], dtype=int)
|
| 406 |
+
out = ndimage.find_objects(data)
|
| 407 |
+
assert_equal(out, [])
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
def test_find_objects05():
|
| 411 |
+
data = np.ones([5], dtype=int)
|
| 412 |
+
out = ndimage.find_objects(data)
|
| 413 |
+
assert_equal(out, [(slice(0, 5, None),)])
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
def test_find_objects06():
|
| 417 |
+
data = np.array([1, 0, 2, 2, 0, 3])
|
| 418 |
+
out = ndimage.find_objects(data)
|
| 419 |
+
assert_equal(out, [(slice(0, 1, None),),
|
| 420 |
+
(slice(2, 4, None),),
|
| 421 |
+
(slice(5, 6, None),)])
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
def test_find_objects07():
|
| 425 |
+
data = np.array([[0, 0, 0, 0, 0, 0],
|
| 426 |
+
[0, 0, 0, 0, 0, 0],
|
| 427 |
+
[0, 0, 0, 0, 0, 0],
|
| 428 |
+
[0, 0, 0, 0, 0, 0],
|
| 429 |
+
[0, 0, 0, 0, 0, 0],
|
| 430 |
+
[0, 0, 0, 0, 0, 0]])
|
| 431 |
+
out = ndimage.find_objects(data)
|
| 432 |
+
assert_equal(out, [])
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
def test_find_objects08():
|
| 436 |
+
data = np.array([[1, 0, 0, 0, 0, 0],
|
| 437 |
+
[0, 0, 2, 2, 0, 0],
|
| 438 |
+
[0, 0, 2, 2, 2, 0],
|
| 439 |
+
[3, 3, 0, 0, 0, 0],
|
| 440 |
+
[3, 3, 0, 0, 0, 0],
|
| 441 |
+
[0, 0, 0, 4, 4, 0]])
|
| 442 |
+
out = ndimage.find_objects(data)
|
| 443 |
+
assert_equal(out, [(slice(0, 1, None), slice(0, 1, None)),
|
| 444 |
+
(slice(1, 3, None), slice(2, 5, None)),
|
| 445 |
+
(slice(3, 5, None), slice(0, 2, None)),
|
| 446 |
+
(slice(5, 6, None), slice(3, 5, None))])
|
| 447 |
+
|
| 448 |
+
|
| 449 |
+
def test_find_objects09():
|
| 450 |
+
data = np.array([[1, 0, 0, 0, 0, 0],
|
| 451 |
+
[0, 0, 2, 2, 0, 0],
|
| 452 |
+
[0, 0, 2, 2, 2, 0],
|
| 453 |
+
[0, 0, 0, 0, 0, 0],
|
| 454 |
+
[0, 0, 0, 0, 0, 0],
|
| 455 |
+
[0, 0, 0, 4, 4, 0]])
|
| 456 |
+
out = ndimage.find_objects(data)
|
| 457 |
+
assert_equal(out, [(slice(0, 1, None), slice(0, 1, None)),
|
| 458 |
+
(slice(1, 3, None), slice(2, 5, None)),
|
| 459 |
+
None,
|
| 460 |
+
(slice(5, 6, None), slice(3, 5, None))])
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
def test_value_indices01():
|
| 464 |
+
"Test dictionary keys and entries"
|
| 465 |
+
data = np.array([[1, 0, 0, 0, 0, 0],
|
| 466 |
+
[0, 0, 2, 2, 0, 0],
|
| 467 |
+
[0, 0, 2, 2, 2, 0],
|
| 468 |
+
[0, 0, 0, 0, 0, 0],
|
| 469 |
+
[0, 0, 0, 0, 0, 0],
|
| 470 |
+
[0, 0, 0, 4, 4, 0]])
|
| 471 |
+
vi = ndimage.value_indices(data, ignore_value=0)
|
| 472 |
+
true_keys = [1, 2, 4]
|
| 473 |
+
assert_equal(list(vi.keys()), true_keys)
|
| 474 |
+
|
| 475 |
+
truevi = {}
|
| 476 |
+
for k in true_keys:
|
| 477 |
+
truevi[k] = np.where(data == k)
|
| 478 |
+
|
| 479 |
+
vi = ndimage.value_indices(data, ignore_value=0)
|
| 480 |
+
assert_equal(vi, truevi)
|
| 481 |
+
|
| 482 |
+
|
| 483 |
+
def test_value_indices02():
|
| 484 |
+
"Test input checking"
|
| 485 |
+
data = np.zeros((5, 4), dtype=np.float32)
|
| 486 |
+
msg = "Parameter 'arr' must be an integer array"
|
| 487 |
+
with assert_raises(ValueError, match=msg):
|
| 488 |
+
ndimage.value_indices(data)
|
| 489 |
+
|
| 490 |
+
|
| 491 |
+
def test_value_indices03():
|
| 492 |
+
"Test different input array shapes, from 1-D to 4-D"
|
| 493 |
+
for shape in [(36,), (18, 2), (3, 3, 4), (3, 3, 2, 2)]:
|
| 494 |
+
a = np.array((12*[1]+12*[2]+12*[3]), dtype=np.int32).reshape(shape)
|
| 495 |
+
trueKeys = np.unique(a)
|
| 496 |
+
vi = ndimage.value_indices(a)
|
| 497 |
+
assert_equal(list(vi.keys()), list(trueKeys))
|
| 498 |
+
for k in trueKeys:
|
| 499 |
+
trueNdx = np.where(a == k)
|
| 500 |
+
assert_equal(vi[k], trueNdx)
|
| 501 |
+
|
| 502 |
+
|
| 503 |
+
def test_sum01():
|
| 504 |
+
for type in types:
|
| 505 |
+
input = np.array([], type)
|
| 506 |
+
output = ndimage.sum(input)
|
| 507 |
+
assert_equal(output, 0.0)
|
| 508 |
+
|
| 509 |
+
|
| 510 |
+
def test_sum02():
|
| 511 |
+
for type in types:
|
| 512 |
+
input = np.zeros([0, 4], type)
|
| 513 |
+
output = ndimage.sum(input)
|
| 514 |
+
assert_equal(output, 0.0)
|
| 515 |
+
|
| 516 |
+
|
| 517 |
+
def test_sum03():
|
| 518 |
+
for type in types:
|
| 519 |
+
input = np.ones([], type)
|
| 520 |
+
output = ndimage.sum(input)
|
| 521 |
+
assert_almost_equal(output, 1.0)
|
| 522 |
+
|
| 523 |
+
|
| 524 |
+
def test_sum04():
|
| 525 |
+
for type in types:
|
| 526 |
+
input = np.array([1, 2], type)
|
| 527 |
+
output = ndimage.sum(input)
|
| 528 |
+
assert_almost_equal(output, 3.0)
|
| 529 |
+
|
| 530 |
+
|
| 531 |
+
def test_sum05():
|
| 532 |
+
for type in types:
|
| 533 |
+
input = np.array([[1, 2], [3, 4]], type)
|
| 534 |
+
output = ndimage.sum(input)
|
| 535 |
+
assert_almost_equal(output, 10.0)
|
| 536 |
+
|
| 537 |
+
|
| 538 |
+
def test_sum06():
|
| 539 |
+
labels = np.array([], bool)
|
| 540 |
+
for type in types:
|
| 541 |
+
input = np.array([], type)
|
| 542 |
+
output = ndimage.sum(input, labels=labels)
|
| 543 |
+
assert_equal(output, 0.0)
|
| 544 |
+
|
| 545 |
+
|
| 546 |
+
def test_sum07():
|
| 547 |
+
labels = np.ones([0, 4], bool)
|
| 548 |
+
for type in types:
|
| 549 |
+
input = np.zeros([0, 4], type)
|
| 550 |
+
output = ndimage.sum(input, labels=labels)
|
| 551 |
+
assert_equal(output, 0.0)
|
| 552 |
+
|
| 553 |
+
|
| 554 |
+
def test_sum08():
|
| 555 |
+
labels = np.array([1, 0], bool)
|
| 556 |
+
for type in types:
|
| 557 |
+
input = np.array([1, 2], type)
|
| 558 |
+
output = ndimage.sum(input, labels=labels)
|
| 559 |
+
assert_equal(output, 1.0)
|
| 560 |
+
|
| 561 |
+
|
| 562 |
+
def test_sum09():
|
| 563 |
+
labels = np.array([1, 0], bool)
|
| 564 |
+
for type in types:
|
| 565 |
+
input = np.array([[1, 2], [3, 4]], type)
|
| 566 |
+
output = ndimage.sum(input, labels=labels)
|
| 567 |
+
assert_almost_equal(output, 4.0)
|
| 568 |
+
|
| 569 |
+
|
| 570 |
+
def test_sum10():
|
| 571 |
+
labels = np.array([1, 0], bool)
|
| 572 |
+
input = np.array([[1, 2], [3, 4]], bool)
|
| 573 |
+
output = ndimage.sum(input, labels=labels)
|
| 574 |
+
assert_almost_equal(output, 2.0)
|
| 575 |
+
|
| 576 |
+
|
| 577 |
+
def test_sum11():
|
| 578 |
+
labels = np.array([1, 2], np.int8)
|
| 579 |
+
for type in types:
|
| 580 |
+
input = np.array([[1, 2], [3, 4]], type)
|
| 581 |
+
output = ndimage.sum(input, labels=labels,
|
| 582 |
+
index=2)
|
| 583 |
+
assert_almost_equal(output, 6.0)
|
| 584 |
+
|
| 585 |
+
|
| 586 |
+
def test_sum12():
|
| 587 |
+
labels = np.array([[1, 2], [2, 4]], np.int8)
|
| 588 |
+
for type in types:
|
| 589 |
+
input = np.array([[1, 2], [3, 4]], type)
|
| 590 |
+
output = ndimage.sum(input, labels=labels, index=[4, 8, 2])
|
| 591 |
+
assert_array_almost_equal(output, [4.0, 0.0, 5.0])
|
| 592 |
+
|
| 593 |
+
|
| 594 |
+
def test_sum_labels():
|
| 595 |
+
labels = np.array([[1, 2], [2, 4]], np.int8)
|
| 596 |
+
for type in types:
|
| 597 |
+
input = np.array([[1, 2], [3, 4]], type)
|
| 598 |
+
output_sum = ndimage.sum(input, labels=labels, index=[4, 8, 2])
|
| 599 |
+
output_labels = ndimage.sum_labels(
|
| 600 |
+
input, labels=labels, index=[4, 8, 2])
|
| 601 |
+
|
| 602 |
+
assert (output_sum == output_labels).all()
|
| 603 |
+
assert_array_almost_equal(output_labels, [4.0, 0.0, 5.0])
|
| 604 |
+
|
| 605 |
+
|
| 606 |
+
def test_mean01():
|
| 607 |
+
labels = np.array([1, 0], bool)
|
| 608 |
+
for type in types:
|
| 609 |
+
input = np.array([[1, 2], [3, 4]], type)
|
| 610 |
+
output = ndimage.mean(input, labels=labels)
|
| 611 |
+
assert_almost_equal(output, 2.0)
|
| 612 |
+
|
| 613 |
+
|
| 614 |
+
def test_mean02():
|
| 615 |
+
labels = np.array([1, 0], bool)
|
| 616 |
+
input = np.array([[1, 2], [3, 4]], bool)
|
| 617 |
+
output = ndimage.mean(input, labels=labels)
|
| 618 |
+
assert_almost_equal(output, 1.0)
|
| 619 |
+
|
| 620 |
+
|
| 621 |
+
def test_mean03():
|
| 622 |
+
labels = np.array([1, 2])
|
| 623 |
+
for type in types:
|
| 624 |
+
input = np.array([[1, 2], [3, 4]], type)
|
| 625 |
+
output = ndimage.mean(input, labels=labels,
|
| 626 |
+
index=2)
|
| 627 |
+
assert_almost_equal(output, 3.0)
|
| 628 |
+
|
| 629 |
+
|
| 630 |
+
def test_mean04():
|
| 631 |
+
labels = np.array([[1, 2], [2, 4]], np.int8)
|
| 632 |
+
with np.errstate(all='ignore'):
|
| 633 |
+
for type in types:
|
| 634 |
+
input = np.array([[1, 2], [3, 4]], type)
|
| 635 |
+
output = ndimage.mean(input, labels=labels,
|
| 636 |
+
index=[4, 8, 2])
|
| 637 |
+
assert_array_almost_equal(output[[0, 2]], [4.0, 2.5])
|
| 638 |
+
assert_(np.isnan(output[1]))
|
| 639 |
+
|
| 640 |
+
|
| 641 |
+
def test_minimum01():
|
| 642 |
+
labels = np.array([1, 0], bool)
|
| 643 |
+
for type in types:
|
| 644 |
+
input = np.array([[1, 2], [3, 4]], type)
|
| 645 |
+
output = ndimage.minimum(input, labels=labels)
|
| 646 |
+
assert_almost_equal(output, 1.0)
|
| 647 |
+
|
| 648 |
+
|
| 649 |
+
def test_minimum02():
|
| 650 |
+
labels = np.array([1, 0], bool)
|
| 651 |
+
input = np.array([[2, 2], [2, 4]], bool)
|
| 652 |
+
output = ndimage.minimum(input, labels=labels)
|
| 653 |
+
assert_almost_equal(output, 1.0)
|
| 654 |
+
|
| 655 |
+
|
| 656 |
+
def test_minimum03():
|
| 657 |
+
labels = np.array([1, 2])
|
| 658 |
+
for type in types:
|
| 659 |
+
input = np.array([[1, 2], [3, 4]], type)
|
| 660 |
+
output = ndimage.minimum(input, labels=labels,
|
| 661 |
+
index=2)
|
| 662 |
+
assert_almost_equal(output, 2.0)
|
| 663 |
+
|
| 664 |
+
|
| 665 |
+
def test_minimum04():
|
| 666 |
+
labels = np.array([[1, 2], [2, 3]])
|
| 667 |
+
for type in types:
|
| 668 |
+
input = np.array([[1, 2], [3, 4]], type)
|
| 669 |
+
output = ndimage.minimum(input, labels=labels,
|
| 670 |
+
index=[2, 3, 8])
|
| 671 |
+
assert_array_almost_equal(output, [2.0, 4.0, 0.0])
|
| 672 |
+
|
| 673 |
+
|
| 674 |
+
def test_maximum01():
|
| 675 |
+
labels = np.array([1, 0], bool)
|
| 676 |
+
for type in types:
|
| 677 |
+
input = np.array([[1, 2], [3, 4]], type)
|
| 678 |
+
output = ndimage.maximum(input, labels=labels)
|
| 679 |
+
assert_almost_equal(output, 3.0)
|
| 680 |
+
|
| 681 |
+
|
| 682 |
+
def test_maximum02():
|
| 683 |
+
labels = np.array([1, 0], bool)
|
| 684 |
+
input = np.array([[2, 2], [2, 4]], bool)
|
| 685 |
+
output = ndimage.maximum(input, labels=labels)
|
| 686 |
+
assert_almost_equal(output, 1.0)
|
| 687 |
+
|
| 688 |
+
|
| 689 |
+
def test_maximum03():
|
| 690 |
+
labels = np.array([1, 2])
|
| 691 |
+
for type in types:
|
| 692 |
+
input = np.array([[1, 2], [3, 4]], type)
|
| 693 |
+
output = ndimage.maximum(input, labels=labels,
|
| 694 |
+
index=2)
|
| 695 |
+
assert_almost_equal(output, 4.0)
|
| 696 |
+
|
| 697 |
+
|
| 698 |
+
def test_maximum04():
|
| 699 |
+
labels = np.array([[1, 2], [2, 3]])
|
| 700 |
+
for type in types:
|
| 701 |
+
input = np.array([[1, 2], [3, 4]], type)
|
| 702 |
+
output = ndimage.maximum(input, labels=labels,
|
| 703 |
+
index=[2, 3, 8])
|
| 704 |
+
assert_array_almost_equal(output, [3.0, 4.0, 0.0])
|
| 705 |
+
|
| 706 |
+
|
| 707 |
+
def test_maximum05():
|
| 708 |
+
# Regression test for ticket #501 (Trac)
|
| 709 |
+
x = np.array([-3, -2, -1])
|
| 710 |
+
assert_equal(ndimage.maximum(x), -1)
|
| 711 |
+
|
| 712 |
+
|
| 713 |
+
def test_median01():
|
| 714 |
+
a = np.array([[1, 2, 0, 1],
|
| 715 |
+
[5, 3, 0, 4],
|
| 716 |
+
[0, 0, 0, 7],
|
| 717 |
+
[9, 3, 0, 0]])
|
| 718 |
+
labels = np.array([[1, 1, 0, 2],
|
| 719 |
+
[1, 1, 0, 2],
|
| 720 |
+
[0, 0, 0, 2],
|
| 721 |
+
[3, 3, 0, 0]])
|
| 722 |
+
output = ndimage.median(a, labels=labels, index=[1, 2, 3])
|
| 723 |
+
assert_array_almost_equal(output, [2.5, 4.0, 6.0])
|
| 724 |
+
|
| 725 |
+
|
| 726 |
+
def test_median02():
|
| 727 |
+
a = np.array([[1, 2, 0, 1],
|
| 728 |
+
[5, 3, 0, 4],
|
| 729 |
+
[0, 0, 0, 7],
|
| 730 |
+
[9, 3, 0, 0]])
|
| 731 |
+
output = ndimage.median(a)
|
| 732 |
+
assert_almost_equal(output, 1.0)
|
| 733 |
+
|
| 734 |
+
|
| 735 |
+
def test_median03():
|
| 736 |
+
a = np.array([[1, 2, 0, 1],
|
| 737 |
+
[5, 3, 0, 4],
|
| 738 |
+
[0, 0, 0, 7],
|
| 739 |
+
[9, 3, 0, 0]])
|
| 740 |
+
labels = np.array([[1, 1, 0, 2],
|
| 741 |
+
[1, 1, 0, 2],
|
| 742 |
+
[0, 0, 0, 2],
|
| 743 |
+
[3, 3, 0, 0]])
|
| 744 |
+
output = ndimage.median(a, labels=labels)
|
| 745 |
+
assert_almost_equal(output, 3.0)
|
| 746 |
+
|
| 747 |
+
|
| 748 |
+
def test_median_gh12836_bool():
|
| 749 |
+
# test boolean addition fix on example from gh-12836
|
| 750 |
+
a = np.asarray([1, 1], dtype=bool)
|
| 751 |
+
output = ndimage.median(a, labels=np.ones((2,)), index=[1])
|
| 752 |
+
assert_array_almost_equal(output, [1.0])
|
| 753 |
+
|
| 754 |
+
|
| 755 |
+
def test_median_no_int_overflow():
|
| 756 |
+
# test integer overflow fix on example from gh-12836
|
| 757 |
+
a = np.asarray([65, 70], dtype=np.int8)
|
| 758 |
+
output = ndimage.median(a, labels=np.ones((2,)), index=[1])
|
| 759 |
+
assert_array_almost_equal(output, [67.5])
|
| 760 |
+
|
| 761 |
+
|
| 762 |
+
def test_variance01():
|
| 763 |
+
with np.errstate(all='ignore'):
|
| 764 |
+
for type in types:
|
| 765 |
+
input = np.array([], type)
|
| 766 |
+
with suppress_warnings() as sup:
|
| 767 |
+
sup.filter(RuntimeWarning, "Mean of empty slice")
|
| 768 |
+
output = ndimage.variance(input)
|
| 769 |
+
assert_(np.isnan(output))
|
| 770 |
+
|
| 771 |
+
|
| 772 |
+
def test_variance02():
|
| 773 |
+
for type in types:
|
| 774 |
+
input = np.array([1], type)
|
| 775 |
+
output = ndimage.variance(input)
|
| 776 |
+
assert_almost_equal(output, 0.0)
|
| 777 |
+
|
| 778 |
+
|
| 779 |
+
def test_variance03():
|
| 780 |
+
for type in types:
|
| 781 |
+
input = np.array([1, 3], type)
|
| 782 |
+
output = ndimage.variance(input)
|
| 783 |
+
assert_almost_equal(output, 1.0)
|
| 784 |
+
|
| 785 |
+
|
| 786 |
+
def test_variance04():
|
| 787 |
+
input = np.array([1, 0], bool)
|
| 788 |
+
output = ndimage.variance(input)
|
| 789 |
+
assert_almost_equal(output, 0.25)
|
| 790 |
+
|
| 791 |
+
|
| 792 |
+
def test_variance05():
|
| 793 |
+
labels = [2, 2, 3]
|
| 794 |
+
for type in types:
|
| 795 |
+
input = np.array([1, 3, 8], type)
|
| 796 |
+
output = ndimage.variance(input, labels, 2)
|
| 797 |
+
assert_almost_equal(output, 1.0)
|
| 798 |
+
|
| 799 |
+
|
| 800 |
+
def test_variance06():
|
| 801 |
+
labels = [2, 2, 3, 3, 4]
|
| 802 |
+
with np.errstate(all='ignore'):
|
| 803 |
+
for type in types:
|
| 804 |
+
input = np.array([1, 3, 8, 10, 8], type)
|
| 805 |
+
output = ndimage.variance(input, labels, [2, 3, 4])
|
| 806 |
+
assert_array_almost_equal(output, [1.0, 1.0, 0.0])
|
| 807 |
+
|
| 808 |
+
|
| 809 |
+
def test_standard_deviation01():
|
| 810 |
+
with np.errstate(all='ignore'):
|
| 811 |
+
for type in types:
|
| 812 |
+
input = np.array([], type)
|
| 813 |
+
with suppress_warnings() as sup:
|
| 814 |
+
sup.filter(RuntimeWarning, "Mean of empty slice")
|
| 815 |
+
output = ndimage.standard_deviation(input)
|
| 816 |
+
assert_(np.isnan(output))
|
| 817 |
+
|
| 818 |
+
|
| 819 |
+
def test_standard_deviation02():
|
| 820 |
+
for type in types:
|
| 821 |
+
input = np.array([1], type)
|
| 822 |
+
output = ndimage.standard_deviation(input)
|
| 823 |
+
assert_almost_equal(output, 0.0)
|
| 824 |
+
|
| 825 |
+
|
| 826 |
+
def test_standard_deviation03():
|
| 827 |
+
for type in types:
|
| 828 |
+
input = np.array([1, 3], type)
|
| 829 |
+
output = ndimage.standard_deviation(input)
|
| 830 |
+
assert_almost_equal(output, np.sqrt(1.0))
|
| 831 |
+
|
| 832 |
+
|
| 833 |
+
def test_standard_deviation04():
|
| 834 |
+
input = np.array([1, 0], bool)
|
| 835 |
+
output = ndimage.standard_deviation(input)
|
| 836 |
+
assert_almost_equal(output, 0.5)
|
| 837 |
+
|
| 838 |
+
|
| 839 |
+
def test_standard_deviation05():
|
| 840 |
+
labels = [2, 2, 3]
|
| 841 |
+
for type in types:
|
| 842 |
+
input = np.array([1, 3, 8], type)
|
| 843 |
+
output = ndimage.standard_deviation(input, labels, 2)
|
| 844 |
+
assert_almost_equal(output, 1.0)
|
| 845 |
+
|
| 846 |
+
|
| 847 |
+
def test_standard_deviation06():
|
| 848 |
+
labels = [2, 2, 3, 3, 4]
|
| 849 |
+
with np.errstate(all='ignore'):
|
| 850 |
+
for type in types:
|
| 851 |
+
input = np.array([1, 3, 8, 10, 8], type)
|
| 852 |
+
output = ndimage.standard_deviation(input, labels, [2, 3, 4])
|
| 853 |
+
assert_array_almost_equal(output, [1.0, 1.0, 0.0])
|
| 854 |
+
|
| 855 |
+
|
| 856 |
+
def test_standard_deviation07():
|
| 857 |
+
labels = [1]
|
| 858 |
+
with np.errstate(all='ignore'):
|
| 859 |
+
for type in types:
|
| 860 |
+
input = np.array([-0.00619519], type)
|
| 861 |
+
output = ndimage.standard_deviation(input, labels, [1])
|
| 862 |
+
assert_array_almost_equal(output, [0])
|
| 863 |
+
|
| 864 |
+
|
| 865 |
+
def test_minimum_position01():
|
| 866 |
+
labels = np.array([1, 0], bool)
|
| 867 |
+
for type in types:
|
| 868 |
+
input = np.array([[1, 2], [3, 4]], type)
|
| 869 |
+
output = ndimage.minimum_position(input, labels=labels)
|
| 870 |
+
assert_equal(output, (0, 0))
|
| 871 |
+
|
| 872 |
+
|
| 873 |
+
def test_minimum_position02():
|
| 874 |
+
for type in types:
|
| 875 |
+
input = np.array([[5, 4, 2, 5],
|
| 876 |
+
[3, 7, 0, 2],
|
| 877 |
+
[1, 5, 1, 1]], type)
|
| 878 |
+
output = ndimage.minimum_position(input)
|
| 879 |
+
assert_equal(output, (1, 2))
|
| 880 |
+
|
| 881 |
+
|
| 882 |
+
def test_minimum_position03():
|
| 883 |
+
input = np.array([[5, 4, 2, 5],
|
| 884 |
+
[3, 7, 0, 2],
|
| 885 |
+
[1, 5, 1, 1]], bool)
|
| 886 |
+
output = ndimage.minimum_position(input)
|
| 887 |
+
assert_equal(output, (1, 2))
|
| 888 |
+
|
| 889 |
+
|
| 890 |
+
def test_minimum_position04():
|
| 891 |
+
input = np.array([[5, 4, 2, 5],
|
| 892 |
+
[3, 7, 1, 2],
|
| 893 |
+
[1, 5, 1, 1]], bool)
|
| 894 |
+
output = ndimage.minimum_position(input)
|
| 895 |
+
assert_equal(output, (0, 0))
|
| 896 |
+
|
| 897 |
+
|
| 898 |
+
def test_minimum_position05():
|
| 899 |
+
labels = [1, 2, 0, 4]
|
| 900 |
+
for type in types:
|
| 901 |
+
input = np.array([[5, 4, 2, 5],
|
| 902 |
+
[3, 7, 0, 2],
|
| 903 |
+
[1, 5, 2, 3]], type)
|
| 904 |
+
output = ndimage.minimum_position(input, labels)
|
| 905 |
+
assert_equal(output, (2, 0))
|
| 906 |
+
|
| 907 |
+
|
| 908 |
+
def test_minimum_position06():
|
| 909 |
+
labels = [1, 2, 3, 4]
|
| 910 |
+
for type in types:
|
| 911 |
+
input = np.array([[5, 4, 2, 5],
|
| 912 |
+
[3, 7, 0, 2],
|
| 913 |
+
[1, 5, 1, 1]], type)
|
| 914 |
+
output = ndimage.minimum_position(input, labels, 2)
|
| 915 |
+
assert_equal(output, (0, 1))
|
| 916 |
+
|
| 917 |
+
|
| 918 |
+
def test_minimum_position07():
|
| 919 |
+
labels = [1, 2, 3, 4]
|
| 920 |
+
for type in types:
|
| 921 |
+
input = np.array([[5, 4, 2, 5],
|
| 922 |
+
[3, 7, 0, 2],
|
| 923 |
+
[1, 5, 1, 1]], type)
|
| 924 |
+
output = ndimage.minimum_position(input, labels,
|
| 925 |
+
[2, 3])
|
| 926 |
+
assert_equal(output[0], (0, 1))
|
| 927 |
+
assert_equal(output[1], (1, 2))
|
| 928 |
+
|
| 929 |
+
|
| 930 |
+
def test_maximum_position01():
|
| 931 |
+
labels = np.array([1, 0], bool)
|
| 932 |
+
for type in types:
|
| 933 |
+
input = np.array([[1, 2], [3, 4]], type)
|
| 934 |
+
output = ndimage.maximum_position(input,
|
| 935 |
+
labels=labels)
|
| 936 |
+
assert_equal(output, (1, 0))
|
| 937 |
+
|
| 938 |
+
|
| 939 |
+
def test_maximum_position02():
|
| 940 |
+
for type in types:
|
| 941 |
+
input = np.array([[5, 4, 2, 5],
|
| 942 |
+
[3, 7, 8, 2],
|
| 943 |
+
[1, 5, 1, 1]], type)
|
| 944 |
+
output = ndimage.maximum_position(input)
|
| 945 |
+
assert_equal(output, (1, 2))
|
| 946 |
+
|
| 947 |
+
|
| 948 |
+
def test_maximum_position03():
|
| 949 |
+
input = np.array([[5, 4, 2, 5],
|
| 950 |
+
[3, 7, 8, 2],
|
| 951 |
+
[1, 5, 1, 1]], bool)
|
| 952 |
+
output = ndimage.maximum_position(input)
|
| 953 |
+
assert_equal(output, (0, 0))
|
| 954 |
+
|
| 955 |
+
|
| 956 |
+
def test_maximum_position04():
|
| 957 |
+
labels = [1, 2, 0, 4]
|
| 958 |
+
for type in types:
|
| 959 |
+
input = np.array([[5, 4, 2, 5],
|
| 960 |
+
[3, 7, 8, 2],
|
| 961 |
+
[1, 5, 1, 1]], type)
|
| 962 |
+
output = ndimage.maximum_position(input, labels)
|
| 963 |
+
assert_equal(output, (1, 1))
|
| 964 |
+
|
| 965 |
+
|
| 966 |
+
def test_maximum_position05():
|
| 967 |
+
labels = [1, 2, 0, 4]
|
| 968 |
+
for type in types:
|
| 969 |
+
input = np.array([[5, 4, 2, 5],
|
| 970 |
+
[3, 7, 8, 2],
|
| 971 |
+
[1, 5, 1, 1]], type)
|
| 972 |
+
output = ndimage.maximum_position(input, labels, 1)
|
| 973 |
+
assert_equal(output, (0, 0))
|
| 974 |
+
|
| 975 |
+
|
| 976 |
+
def test_maximum_position06():
|
| 977 |
+
labels = [1, 2, 0, 4]
|
| 978 |
+
for type in types:
|
| 979 |
+
input = np.array([[5, 4, 2, 5],
|
| 980 |
+
[3, 7, 8, 2],
|
| 981 |
+
[1, 5, 1, 1]], type)
|
| 982 |
+
output = ndimage.maximum_position(input, labels,
|
| 983 |
+
[1, 2])
|
| 984 |
+
assert_equal(output[0], (0, 0))
|
| 985 |
+
assert_equal(output[1], (1, 1))
|
| 986 |
+
|
| 987 |
+
|
| 988 |
+
def test_maximum_position07():
|
| 989 |
+
# Test float labels
|
| 990 |
+
labels = np.array([1.0, 2.5, 0.0, 4.5])
|
| 991 |
+
for type in types:
|
| 992 |
+
input = np.array([[5, 4, 2, 5],
|
| 993 |
+
[3, 7, 8, 2],
|
| 994 |
+
[1, 5, 1, 1]], type)
|
| 995 |
+
output = ndimage.maximum_position(input, labels,
|
| 996 |
+
[1.0, 4.5])
|
| 997 |
+
assert_equal(output[0], (0, 0))
|
| 998 |
+
assert_equal(output[1], (0, 3))
|
| 999 |
+
|
| 1000 |
+
|
| 1001 |
+
def test_extrema01():
|
| 1002 |
+
labels = np.array([1, 0], bool)
|
| 1003 |
+
for type in types:
|
| 1004 |
+
input = np.array([[1, 2], [3, 4]], type)
|
| 1005 |
+
output1 = ndimage.extrema(input, labels=labels)
|
| 1006 |
+
output2 = ndimage.minimum(input, labels=labels)
|
| 1007 |
+
output3 = ndimage.maximum(input, labels=labels)
|
| 1008 |
+
output4 = ndimage.minimum_position(input,
|
| 1009 |
+
labels=labels)
|
| 1010 |
+
output5 = ndimage.maximum_position(input,
|
| 1011 |
+
labels=labels)
|
| 1012 |
+
assert_equal(output1, (output2, output3, output4, output5))
|
| 1013 |
+
|
| 1014 |
+
|
| 1015 |
+
def test_extrema02():
|
| 1016 |
+
labels = np.array([1, 2])
|
| 1017 |
+
for type in types:
|
| 1018 |
+
input = np.array([[1, 2], [3, 4]], type)
|
| 1019 |
+
output1 = ndimage.extrema(input, labels=labels,
|
| 1020 |
+
index=2)
|
| 1021 |
+
output2 = ndimage.minimum(input, labels=labels,
|
| 1022 |
+
index=2)
|
| 1023 |
+
output3 = ndimage.maximum(input, labels=labels,
|
| 1024 |
+
index=2)
|
| 1025 |
+
output4 = ndimage.minimum_position(input,
|
| 1026 |
+
labels=labels, index=2)
|
| 1027 |
+
output5 = ndimage.maximum_position(input,
|
| 1028 |
+
labels=labels, index=2)
|
| 1029 |
+
assert_equal(output1, (output2, output3, output4, output5))
|
| 1030 |
+
|
| 1031 |
+
|
| 1032 |
+
def test_extrema03():
|
| 1033 |
+
labels = np.array([[1, 2], [2, 3]])
|
| 1034 |
+
for type in types:
|
| 1035 |
+
input = np.array([[1, 2], [3, 4]], type)
|
| 1036 |
+
output1 = ndimage.extrema(input, labels=labels,
|
| 1037 |
+
index=[2, 3, 8])
|
| 1038 |
+
output2 = ndimage.minimum(input, labels=labels,
|
| 1039 |
+
index=[2, 3, 8])
|
| 1040 |
+
output3 = ndimage.maximum(input, labels=labels,
|
| 1041 |
+
index=[2, 3, 8])
|
| 1042 |
+
output4 = ndimage.minimum_position(input,
|
| 1043 |
+
labels=labels, index=[2, 3, 8])
|
| 1044 |
+
output5 = ndimage.maximum_position(input,
|
| 1045 |
+
labels=labels, index=[2, 3, 8])
|
| 1046 |
+
assert_array_almost_equal(output1[0], output2)
|
| 1047 |
+
assert_array_almost_equal(output1[1], output3)
|
| 1048 |
+
assert_array_almost_equal(output1[2], output4)
|
| 1049 |
+
assert_array_almost_equal(output1[3], output5)
|
| 1050 |
+
|
| 1051 |
+
|
| 1052 |
+
def test_extrema04():
|
| 1053 |
+
labels = [1, 2, 0, 4]
|
| 1054 |
+
for type in types:
|
| 1055 |
+
input = np.array([[5, 4, 2, 5],
|
| 1056 |
+
[3, 7, 8, 2],
|
| 1057 |
+
[1, 5, 1, 1]], type)
|
| 1058 |
+
output1 = ndimage.extrema(input, labels, [1, 2])
|
| 1059 |
+
output2 = ndimage.minimum(input, labels, [1, 2])
|
| 1060 |
+
output3 = ndimage.maximum(input, labels, [1, 2])
|
| 1061 |
+
output4 = ndimage.minimum_position(input, labels,
|
| 1062 |
+
[1, 2])
|
| 1063 |
+
output5 = ndimage.maximum_position(input, labels,
|
| 1064 |
+
[1, 2])
|
| 1065 |
+
assert_array_almost_equal(output1[0], output2)
|
| 1066 |
+
assert_array_almost_equal(output1[1], output3)
|
| 1067 |
+
assert_array_almost_equal(output1[2], output4)
|
| 1068 |
+
assert_array_almost_equal(output1[3], output5)
|
| 1069 |
+
|
| 1070 |
+
|
| 1071 |
+
def test_center_of_mass01():
|
| 1072 |
+
expected = [0.0, 0.0]
|
| 1073 |
+
for type in types:
|
| 1074 |
+
input = np.array([[1, 0], [0, 0]], type)
|
| 1075 |
+
output = ndimage.center_of_mass(input)
|
| 1076 |
+
assert_array_almost_equal(output, expected)
|
| 1077 |
+
|
| 1078 |
+
|
| 1079 |
+
def test_center_of_mass02():
|
| 1080 |
+
expected = [1, 0]
|
| 1081 |
+
for type in types:
|
| 1082 |
+
input = np.array([[0, 0], [1, 0]], type)
|
| 1083 |
+
output = ndimage.center_of_mass(input)
|
| 1084 |
+
assert_array_almost_equal(output, expected)
|
| 1085 |
+
|
| 1086 |
+
|
| 1087 |
+
def test_center_of_mass03():
|
| 1088 |
+
expected = [0, 1]
|
| 1089 |
+
for type in types:
|
| 1090 |
+
input = np.array([[0, 1], [0, 0]], type)
|
| 1091 |
+
output = ndimage.center_of_mass(input)
|
| 1092 |
+
assert_array_almost_equal(output, expected)
|
| 1093 |
+
|
| 1094 |
+
|
| 1095 |
+
def test_center_of_mass04():
|
| 1096 |
+
expected = [1, 1]
|
| 1097 |
+
for type in types:
|
| 1098 |
+
input = np.array([[0, 0], [0, 1]], type)
|
| 1099 |
+
output = ndimage.center_of_mass(input)
|
| 1100 |
+
assert_array_almost_equal(output, expected)
|
| 1101 |
+
|
| 1102 |
+
|
| 1103 |
+
def test_center_of_mass05():
|
| 1104 |
+
expected = [0.5, 0.5]
|
| 1105 |
+
for type in types:
|
| 1106 |
+
input = np.array([[1, 1], [1, 1]], type)
|
| 1107 |
+
output = ndimage.center_of_mass(input)
|
| 1108 |
+
assert_array_almost_equal(output, expected)
|
| 1109 |
+
|
| 1110 |
+
|
| 1111 |
+
def test_center_of_mass06():
|
| 1112 |
+
expected = [0.5, 0.5]
|
| 1113 |
+
input = np.array([[1, 2], [3, 1]], bool)
|
| 1114 |
+
output = ndimage.center_of_mass(input)
|
| 1115 |
+
assert_array_almost_equal(output, expected)
|
| 1116 |
+
|
| 1117 |
+
|
| 1118 |
+
def test_center_of_mass07():
|
| 1119 |
+
labels = [1, 0]
|
| 1120 |
+
expected = [0.5, 0.0]
|
| 1121 |
+
input = np.array([[1, 2], [3, 1]], bool)
|
| 1122 |
+
output = ndimage.center_of_mass(input, labels)
|
| 1123 |
+
assert_array_almost_equal(output, expected)
|
| 1124 |
+
|
| 1125 |
+
|
| 1126 |
+
def test_center_of_mass08():
|
| 1127 |
+
labels = [1, 2]
|
| 1128 |
+
expected = [0.5, 1.0]
|
| 1129 |
+
input = np.array([[5, 2], [3, 1]], bool)
|
| 1130 |
+
output = ndimage.center_of_mass(input, labels, 2)
|
| 1131 |
+
assert_array_almost_equal(output, expected)
|
| 1132 |
+
|
| 1133 |
+
|
| 1134 |
+
def test_center_of_mass09():
|
| 1135 |
+
labels = [1, 2]
|
| 1136 |
+
expected = [(0.5, 0.0), (0.5, 1.0)]
|
| 1137 |
+
input = np.array([[1, 2], [1, 1]], bool)
|
| 1138 |
+
output = ndimage.center_of_mass(input, labels, [1, 2])
|
| 1139 |
+
assert_array_almost_equal(output, expected)
|
| 1140 |
+
|
| 1141 |
+
|
| 1142 |
+
def test_histogram01():
|
| 1143 |
+
expected = np.ones(10)
|
| 1144 |
+
input = np.arange(10)
|
| 1145 |
+
output = ndimage.histogram(input, 0, 10, 10)
|
| 1146 |
+
assert_array_almost_equal(output, expected)
|
| 1147 |
+
|
| 1148 |
+
|
| 1149 |
+
def test_histogram02():
|
| 1150 |
+
labels = [1, 1, 1, 1, 2, 2, 2, 2]
|
| 1151 |
+
expected = [0, 2, 0, 1, 1]
|
| 1152 |
+
input = np.array([1, 1, 3, 4, 3, 3, 3, 3])
|
| 1153 |
+
output = ndimage.histogram(input, 0, 4, 5, labels, 1)
|
| 1154 |
+
assert_array_almost_equal(output, expected)
|
| 1155 |
+
|
| 1156 |
+
|
| 1157 |
+
def test_histogram03():
|
| 1158 |
+
labels = [1, 0, 1, 1, 2, 2, 2, 2]
|
| 1159 |
+
expected1 = [0, 1, 0, 1, 1]
|
| 1160 |
+
expected2 = [0, 0, 0, 3, 0]
|
| 1161 |
+
input = np.array([1, 1, 3, 4, 3, 5, 3, 3])
|
| 1162 |
+
output = ndimage.histogram(input, 0, 4, 5, labels, (1, 2))
|
| 1163 |
+
|
| 1164 |
+
assert_array_almost_equal(output[0], expected1)
|
| 1165 |
+
assert_array_almost_equal(output[1], expected2)
|
| 1166 |
+
|
| 1167 |
+
|
| 1168 |
+
def test_stat_funcs_2d():
|
| 1169 |
+
a = np.array([[5, 6, 0, 0, 0], [8, 9, 0, 0, 0], [0, 0, 0, 3, 5]])
|
| 1170 |
+
lbl = np.array([[1, 1, 0, 0, 0], [1, 1, 0, 0, 0], [0, 0, 0, 2, 2]])
|
| 1171 |
+
|
| 1172 |
+
mean = ndimage.mean(a, labels=lbl, index=[1, 2])
|
| 1173 |
+
assert_array_equal(mean, [7.0, 4.0])
|
| 1174 |
+
|
| 1175 |
+
var = ndimage.variance(a, labels=lbl, index=[1, 2])
|
| 1176 |
+
assert_array_equal(var, [2.5, 1.0])
|
| 1177 |
+
|
| 1178 |
+
std = ndimage.standard_deviation(a, labels=lbl, index=[1, 2])
|
| 1179 |
+
assert_array_almost_equal(std, np.sqrt([2.5, 1.0]))
|
| 1180 |
+
|
| 1181 |
+
med = ndimage.median(a, labels=lbl, index=[1, 2])
|
| 1182 |
+
assert_array_equal(med, [7.0, 4.0])
|
| 1183 |
+
|
| 1184 |
+
min = ndimage.minimum(a, labels=lbl, index=[1, 2])
|
| 1185 |
+
assert_array_equal(min, [5, 3])
|
| 1186 |
+
|
| 1187 |
+
max = ndimage.maximum(a, labels=lbl, index=[1, 2])
|
| 1188 |
+
assert_array_equal(max, [9, 5])
|
| 1189 |
+
|
| 1190 |
+
|
| 1191 |
+
class TestWatershedIft:
|
| 1192 |
+
|
| 1193 |
+
def test_watershed_ift01(self):
|
| 1194 |
+
data = np.array([[0, 0, 0, 0, 0, 0, 0],
|
| 1195 |
+
[0, 1, 1, 1, 1, 1, 0],
|
| 1196 |
+
[0, 1, 0, 0, 0, 1, 0],
|
| 1197 |
+
[0, 1, 0, 0, 0, 1, 0],
|
| 1198 |
+
[0, 1, 0, 0, 0, 1, 0],
|
| 1199 |
+
[0, 1, 1, 1, 1, 1, 0],
|
| 1200 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1201 |
+
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
|
| 1202 |
+
markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
|
| 1203 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1204 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1205 |
+
[0, 0, 0, 1, 0, 0, 0],
|
| 1206 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1207 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1208 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1209 |
+
[0, 0, 0, 0, 0, 0, 0]], np.int8)
|
| 1210 |
+
out = ndimage.watershed_ift(data, markers, structure=[[1, 1, 1],
|
| 1211 |
+
[1, 1, 1],
|
| 1212 |
+
[1, 1, 1]])
|
| 1213 |
+
expected = [[-1, -1, -1, -1, -1, -1, -1],
|
| 1214 |
+
[-1, 1, 1, 1, 1, 1, -1],
|
| 1215 |
+
[-1, 1, 1, 1, 1, 1, -1],
|
| 1216 |
+
[-1, 1, 1, 1, 1, 1, -1],
|
| 1217 |
+
[-1, 1, 1, 1, 1, 1, -1],
|
| 1218 |
+
[-1, 1, 1, 1, 1, 1, -1],
|
| 1219 |
+
[-1, -1, -1, -1, -1, -1, -1],
|
| 1220 |
+
[-1, -1, -1, -1, -1, -1, -1]]
|
| 1221 |
+
assert_array_almost_equal(out, expected)
|
| 1222 |
+
|
| 1223 |
+
def test_watershed_ift02(self):
|
| 1224 |
+
data = np.array([[0, 0, 0, 0, 0, 0, 0],
|
| 1225 |
+
[0, 1, 1, 1, 1, 1, 0],
|
| 1226 |
+
[0, 1, 0, 0, 0, 1, 0],
|
| 1227 |
+
[0, 1, 0, 0, 0, 1, 0],
|
| 1228 |
+
[0, 1, 0, 0, 0, 1, 0],
|
| 1229 |
+
[0, 1, 1, 1, 1, 1, 0],
|
| 1230 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1231 |
+
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
|
| 1232 |
+
markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
|
| 1233 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1234 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1235 |
+
[0, 0, 0, 1, 0, 0, 0],
|
| 1236 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1237 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1238 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1239 |
+
[0, 0, 0, 0, 0, 0, 0]], np.int8)
|
| 1240 |
+
out = ndimage.watershed_ift(data, markers)
|
| 1241 |
+
expected = [[-1, -1, -1, -1, -1, -1, -1],
|
| 1242 |
+
[-1, -1, 1, 1, 1, -1, -1],
|
| 1243 |
+
[-1, 1, 1, 1, 1, 1, -1],
|
| 1244 |
+
[-1, 1, 1, 1, 1, 1, -1],
|
| 1245 |
+
[-1, 1, 1, 1, 1, 1, -1],
|
| 1246 |
+
[-1, -1, 1, 1, 1, -1, -1],
|
| 1247 |
+
[-1, -1, -1, -1, -1, -1, -1],
|
| 1248 |
+
[-1, -1, -1, -1, -1, -1, -1]]
|
| 1249 |
+
assert_array_almost_equal(out, expected)
|
| 1250 |
+
|
| 1251 |
+
def test_watershed_ift03(self):
|
| 1252 |
+
data = np.array([[0, 0, 0, 0, 0, 0, 0],
|
| 1253 |
+
[0, 1, 1, 1, 1, 1, 0],
|
| 1254 |
+
[0, 1, 0, 1, 0, 1, 0],
|
| 1255 |
+
[0, 1, 0, 1, 0, 1, 0],
|
| 1256 |
+
[0, 1, 0, 1, 0, 1, 0],
|
| 1257 |
+
[0, 1, 1, 1, 1, 1, 0],
|
| 1258 |
+
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
|
| 1259 |
+
markers = np.array([[0, 0, 0, 0, 0, 0, 0],
|
| 1260 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1261 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1262 |
+
[0, 0, 2, 0, 3, 0, 0],
|
| 1263 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1264 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1265 |
+
[0, 0, 0, 0, 0, 0, -1]], np.int8)
|
| 1266 |
+
out = ndimage.watershed_ift(data, markers)
|
| 1267 |
+
expected = [[-1, -1, -1, -1, -1, -1, -1],
|
| 1268 |
+
[-1, -1, 2, -1, 3, -1, -1],
|
| 1269 |
+
[-1, 2, 2, 3, 3, 3, -1],
|
| 1270 |
+
[-1, 2, 2, 3, 3, 3, -1],
|
| 1271 |
+
[-1, 2, 2, 3, 3, 3, -1],
|
| 1272 |
+
[-1, -1, 2, -1, 3, -1, -1],
|
| 1273 |
+
[-1, -1, -1, -1, -1, -1, -1]]
|
| 1274 |
+
assert_array_almost_equal(out, expected)
|
| 1275 |
+
|
| 1276 |
+
def test_watershed_ift04(self):
|
| 1277 |
+
data = np.array([[0, 0, 0, 0, 0, 0, 0],
|
| 1278 |
+
[0, 1, 1, 1, 1, 1, 0],
|
| 1279 |
+
[0, 1, 0, 1, 0, 1, 0],
|
| 1280 |
+
[0, 1, 0, 1, 0, 1, 0],
|
| 1281 |
+
[0, 1, 0, 1, 0, 1, 0],
|
| 1282 |
+
[0, 1, 1, 1, 1, 1, 0],
|
| 1283 |
+
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
|
| 1284 |
+
markers = np.array([[0, 0, 0, 0, 0, 0, 0],
|
| 1285 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1286 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1287 |
+
[0, 0, 2, 0, 3, 0, 0],
|
| 1288 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1289 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1290 |
+
[0, 0, 0, 0, 0, 0, -1]],
|
| 1291 |
+
np.int8)
|
| 1292 |
+
out = ndimage.watershed_ift(data, markers,
|
| 1293 |
+
structure=[[1, 1, 1],
|
| 1294 |
+
[1, 1, 1],
|
| 1295 |
+
[1, 1, 1]])
|
| 1296 |
+
expected = [[-1, -1, -1, -1, -1, -1, -1],
|
| 1297 |
+
[-1, 2, 2, 3, 3, 3, -1],
|
| 1298 |
+
[-1, 2, 2, 3, 3, 3, -1],
|
| 1299 |
+
[-1, 2, 2, 3, 3, 3, -1],
|
| 1300 |
+
[-1, 2, 2, 3, 3, 3, -1],
|
| 1301 |
+
[-1, 2, 2, 3, 3, 3, -1],
|
| 1302 |
+
[-1, -1, -1, -1, -1, -1, -1]]
|
| 1303 |
+
assert_array_almost_equal(out, expected)
|
| 1304 |
+
|
| 1305 |
+
def test_watershed_ift05(self):
|
| 1306 |
+
data = np.array([[0, 0, 0, 0, 0, 0, 0],
|
| 1307 |
+
[0, 1, 1, 1, 1, 1, 0],
|
| 1308 |
+
[0, 1, 0, 1, 0, 1, 0],
|
| 1309 |
+
[0, 1, 0, 1, 0, 1, 0],
|
| 1310 |
+
[0, 1, 0, 1, 0, 1, 0],
|
| 1311 |
+
[0, 1, 1, 1, 1, 1, 0],
|
| 1312 |
+
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
|
| 1313 |
+
markers = np.array([[0, 0, 0, 0, 0, 0, 0],
|
| 1314 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1315 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1316 |
+
[0, 0, 3, 0, 2, 0, 0],
|
| 1317 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1318 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1319 |
+
[0, 0, 0, 0, 0, 0, -1]],
|
| 1320 |
+
np.int8)
|
| 1321 |
+
out = ndimage.watershed_ift(data, markers,
|
| 1322 |
+
structure=[[1, 1, 1],
|
| 1323 |
+
[1, 1, 1],
|
| 1324 |
+
[1, 1, 1]])
|
| 1325 |
+
expected = [[-1, -1, -1, -1, -1, -1, -1],
|
| 1326 |
+
[-1, 3, 3, 2, 2, 2, -1],
|
| 1327 |
+
[-1, 3, 3, 2, 2, 2, -1],
|
| 1328 |
+
[-1, 3, 3, 2, 2, 2, -1],
|
| 1329 |
+
[-1, 3, 3, 2, 2, 2, -1],
|
| 1330 |
+
[-1, 3, 3, 2, 2, 2, -1],
|
| 1331 |
+
[-1, -1, -1, -1, -1, -1, -1]]
|
| 1332 |
+
assert_array_almost_equal(out, expected)
|
| 1333 |
+
|
| 1334 |
+
def test_watershed_ift06(self):
|
| 1335 |
+
data = np.array([[0, 1, 0, 0, 0, 1, 0],
|
| 1336 |
+
[0, 1, 0, 0, 0, 1, 0],
|
| 1337 |
+
[0, 1, 0, 0, 0, 1, 0],
|
| 1338 |
+
[0, 1, 1, 1, 1, 1, 0],
|
| 1339 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1340 |
+
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
|
| 1341 |
+
markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
|
| 1342 |
+
[0, 0, 0, 1, 0, 0, 0],
|
| 1343 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1344 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1345 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1346 |
+
[0, 0, 0, 0, 0, 0, 0]], np.int8)
|
| 1347 |
+
out = ndimage.watershed_ift(data, markers,
|
| 1348 |
+
structure=[[1, 1, 1],
|
| 1349 |
+
[1, 1, 1],
|
| 1350 |
+
[1, 1, 1]])
|
| 1351 |
+
expected = [[-1, 1, 1, 1, 1, 1, -1],
|
| 1352 |
+
[-1, 1, 1, 1, 1, 1, -1],
|
| 1353 |
+
[-1, 1, 1, 1, 1, 1, -1],
|
| 1354 |
+
[-1, 1, 1, 1, 1, 1, -1],
|
| 1355 |
+
[-1, -1, -1, -1, -1, -1, -1],
|
| 1356 |
+
[-1, -1, -1, -1, -1, -1, -1]]
|
| 1357 |
+
assert_array_almost_equal(out, expected)
|
| 1358 |
+
|
| 1359 |
+
def test_watershed_ift07(self):
|
| 1360 |
+
shape = (7, 6)
|
| 1361 |
+
data = np.zeros(shape, dtype=np.uint8)
|
| 1362 |
+
data = data.transpose()
|
| 1363 |
+
data[...] = np.array([[0, 1, 0, 0, 0, 1, 0],
|
| 1364 |
+
[0, 1, 0, 0, 0, 1, 0],
|
| 1365 |
+
[0, 1, 0, 0, 0, 1, 0],
|
| 1366 |
+
[0, 1, 1, 1, 1, 1, 0],
|
| 1367 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1368 |
+
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
|
| 1369 |
+
markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
|
| 1370 |
+
[0, 0, 0, 1, 0, 0, 0],
|
| 1371 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1372 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1373 |
+
[0, 0, 0, 0, 0, 0, 0],
|
| 1374 |
+
[0, 0, 0, 0, 0, 0, 0]], np.int8)
|
| 1375 |
+
out = np.zeros(shape, dtype=np.int16)
|
| 1376 |
+
out = out.transpose()
|
| 1377 |
+
ndimage.watershed_ift(data, markers,
|
| 1378 |
+
structure=[[1, 1, 1],
|
| 1379 |
+
[1, 1, 1],
|
| 1380 |
+
[1, 1, 1]],
|
| 1381 |
+
output=out)
|
| 1382 |
+
expected = [[-1, 1, 1, 1, 1, 1, -1],
|
| 1383 |
+
[-1, 1, 1, 1, 1, 1, -1],
|
| 1384 |
+
[-1, 1, 1, 1, 1, 1, -1],
|
| 1385 |
+
[-1, 1, 1, 1, 1, 1, -1],
|
| 1386 |
+
[-1, -1, -1, -1, -1, -1, -1],
|
| 1387 |
+
[-1, -1, -1, -1, -1, -1, -1]]
|
| 1388 |
+
assert_array_almost_equal(out, expected)
|
| 1389 |
+
|
| 1390 |
+
def test_watershed_ift08(self):
|
| 1391 |
+
# Test cost larger than uint8. See gh-10069.
|
| 1392 |
+
data = np.array([[256, 0],
|
| 1393 |
+
[0, 0]], np.uint16)
|
| 1394 |
+
markers = np.array([[1, 0],
|
| 1395 |
+
[0, 0]], np.int8)
|
| 1396 |
+
out = ndimage.watershed_ift(data, markers)
|
| 1397 |
+
expected = [[1, 1],
|
| 1398 |
+
[1, 1]]
|
| 1399 |
+
assert_array_almost_equal(out, expected)
|
| 1400 |
+
|
| 1401 |
+
def test_watershed_ift09(self):
|
| 1402 |
+
# Test large cost. See gh-19575
|
| 1403 |
+
data = np.array([[np.iinfo(np.uint16).max, 0],
|
| 1404 |
+
[0, 0]], np.uint16)
|
| 1405 |
+
markers = np.array([[1, 0],
|
| 1406 |
+
[0, 0]], np.int8)
|
| 1407 |
+
out = ndimage.watershed_ift(data, markers)
|
| 1408 |
+
expected = [[1, 1],
|
| 1409 |
+
[1, 1]]
|
| 1410 |
+
assert_allclose(out, expected)
|
| 1411 |
+
|
| 1412 |
+
|
| 1413 |
+
@pytest.mark.parametrize("dt", [np.intc, np.uintc])
|
| 1414 |
+
def test_gh_19423(dt):
|
| 1415 |
+
rng = np.random.default_rng(123)
|
| 1416 |
+
max_val = 8
|
| 1417 |
+
image = rng.integers(low=0, high=max_val, size=(10, 12)).astype(dtype=dt)
|
| 1418 |
+
val_idx = ndimage.value_indices(image)
|
| 1419 |
+
assert len(val_idx.keys()) == max_val
|
parrot/lib/python3.10/site-packages/scipy/odr/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (4.49 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/scipy/odr/__pycache__/odrpack.cpython-310.pyc
ADDED
|
Binary file (666 Bytes). View file
|
|
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_req.h
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/autograd/rpc_messages/autograd_metadata.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/message.h>
|
| 5 |
+
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
|
| 6 |
+
|
| 7 |
+
namespace torch {
|
| 8 |
+
namespace distributed {
|
| 9 |
+
namespace autograd {
|
| 10 |
+
|
| 11 |
+
// Used to request other workers to clean up their autograd context.
|
| 12 |
+
class TORCH_API CleanupAutogradContextReq : public rpc::RpcCommandBase {
|
| 13 |
+
public:
|
| 14 |
+
explicit CleanupAutogradContextReq(int64_t context_id);
|
| 15 |
+
// Serialization and deserialization methods.
|
| 16 |
+
c10::intrusive_ptr<rpc::Message> toMessageImpl() && override;
|
| 17 |
+
static std::unique_ptr<CleanupAutogradContextReq> fromMessage(
|
| 18 |
+
const rpc::Message& message);
|
| 19 |
+
|
| 20 |
+
// Retrieve the context id we are cleaning up with this message.
|
| 21 |
+
int64_t getContextId();
|
| 22 |
+
|
| 23 |
+
private:
|
| 24 |
+
int64_t context_id_;
|
| 25 |
+
};
|
| 26 |
+
|
| 27 |
+
} // namespace autograd
|
| 28 |
+
} // namespace distributed
|
| 29 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rpc_with_autograd.h
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/autograd/rpc_messages/autograd_metadata.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/rpc_agent.h>
|
| 5 |
+
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
|
| 6 |
+
|
| 7 |
+
namespace torch {
|
| 8 |
+
namespace distributed {
|
| 9 |
+
namespace autograd {
|
| 10 |
+
|
| 11 |
+
// Represents an RPC that includes autograd information. This class basically
|
| 12 |
+
// wraps another `RpcCommandBase` object which represents the actual RPC and has
|
| 13 |
+
// additional autograd information associated with that RPC.
|
| 14 |
+
class TORCH_API RpcWithAutograd final : public rpc::RpcCommandBase {
|
| 15 |
+
public:
|
| 16 |
+
// Used when we are sending an RPC over the wire.
|
| 17 |
+
RpcWithAutograd(
|
| 18 |
+
rpc::worker_id_t fromWorkerId,
|
| 19 |
+
rpc::MessageType messageType,
|
| 20 |
+
const AutogradMetadata& autogradMetadata,
|
| 21 |
+
c10::intrusive_ptr<rpc::Message> wrappedMessage,
|
| 22 |
+
rpc::DeviceMap deviceMap = {});
|
| 23 |
+
|
| 24 |
+
// Used when receiving an RPC over the wire.
|
| 25 |
+
RpcWithAutograd(
|
| 26 |
+
rpc::worker_id_t fromWorkerId,
|
| 27 |
+
rpc::MessageType messageType,
|
| 28 |
+
const AutogradMetadata& autogradMetadata,
|
| 29 |
+
std::unique_ptr<rpc::RpcCommandBase> wrappedRpc,
|
| 30 |
+
rpc::MessageType wrappedMessageType,
|
| 31 |
+
std::vector<torch::Tensor> tensors,
|
| 32 |
+
rpc::DeviceMap deviceMap = {});
|
| 33 |
+
|
| 34 |
+
c10::intrusive_ptr<rpc::Message> toMessageImpl() && override;
|
| 35 |
+
|
| 36 |
+
static std::unique_ptr<RpcWithAutograd> fromMessage(
|
| 37 |
+
const rpc::Message& message);
|
| 38 |
+
|
| 39 |
+
// Retrieves tensors as part of this RPC, which need to be considered for
|
| 40 |
+
// autograd computations.
|
| 41 |
+
std::vector<torch::Tensor>& tensors();
|
| 42 |
+
|
| 43 |
+
const AutogradMetadata& autogradMetadata() const;
|
| 44 |
+
|
| 45 |
+
RpcCommandBase& wrappedRpc();
|
| 46 |
+
|
| 47 |
+
void setWrappedRpc(std::unique_ptr<RpcCommandBase> wrappedRpc);
|
| 48 |
+
|
| 49 |
+
std::unique_ptr<RpcCommandBase> moveWrappedRpc() &&;
|
| 50 |
+
|
| 51 |
+
// Message type of the wrapped RPC.
|
| 52 |
+
rpc::MessageType wrappedMessageType() const;
|
| 53 |
+
|
| 54 |
+
// Retrieve the worker id from which the RPC originated.
|
| 55 |
+
rpc::worker_id_t fromWorkerId() const;
|
| 56 |
+
|
| 57 |
+
// Retrieve the device map.
|
| 58 |
+
const rpc::DeviceMap& deviceMap();
|
| 59 |
+
|
| 60 |
+
private:
|
| 61 |
+
// WorkerId from which this RPC originated. This is necessary for knowing
|
| 62 |
+
// which worker we need to contact during the backward pass.
|
| 63 |
+
rpc::worker_id_t fromWorkerId_;
|
| 64 |
+
|
| 65 |
+
// Message type for this call.
|
| 66 |
+
rpc::MessageType messageType_;
|
| 67 |
+
|
| 68 |
+
AutogradMetadata autogradMetadata_;
|
| 69 |
+
|
| 70 |
+
// Since wrappedMessage_ is destructively constructed from wrappedRpc_,
|
| 71 |
+
// they are valid exclusively. They are used for different purpose.
|
| 72 |
+
// wrappedRpc_ is used while constructing receive rpcWithAutograd;
|
| 73 |
+
// wrappedMessage_ is used while constructing send rpcWithAutograd;
|
| 74 |
+
|
| 75 |
+
// When receive rpcWithAutograd is constructed fromMessage, it is valid;
|
| 76 |
+
// When send rpcWithAutograd is constructed before toMessage, it is nullptr;
|
| 77 |
+
std::unique_ptr<RpcCommandBase> wrappedRpc_;
|
| 78 |
+
|
| 79 |
+
// Serialized message representing wrappedRpc_. Used mostly as a cache to
|
| 80 |
+
// avoid serializing the request twice.
|
| 81 |
+
// When receive rpcWithAutograd is constructed fromMessage, it is nullptr;
|
| 82 |
+
// When send rpcWithAutograd is constructed before toMessage, it is valid;
|
| 83 |
+
c10::intrusive_ptr<rpc::Message> wrappedMessage_;
|
| 84 |
+
|
| 85 |
+
// message type of the wrappedMessage, this is stored separately since
|
| 86 |
+
// wrappedMessage_ is not always guaranteed to be populated.
|
| 87 |
+
rpc::MessageType wrappedMessageType_;
|
| 88 |
+
|
| 89 |
+
// Tensors part of the wrappedRpc that need to be considered for autograd.
|
| 90 |
+
std::vector<torch::Tensor> tensors_;
|
| 91 |
+
|
| 92 |
+
// Device mapping for tensors that are sent across an RPC to another node.
|
| 93 |
+
rpc::DeviceMap deviceMap_;
|
| 94 |
+
};
|
| 95 |
+
|
| 96 |
+
} // namespace autograd
|
| 97 |
+
} // namespace distributed
|
| 98 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rref_backward_req.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/message.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
|
| 5 |
+
#include <torch/csrc/distributed/rpc/types.h>
|
| 6 |
+
|
| 7 |
+
namespace torch {
|
| 8 |
+
namespace distributed {
|
| 9 |
+
namespace autograd {
|
| 10 |
+
|
| 11 |
+
// Internal system RPC to invoke distributed backward pass on remote nodes when
|
| 12 |
+
// 'rref.backward()' is invoked.
|
| 13 |
+
class TORCH_API RRefBackwardReq : public rpc::RpcCommandBase {
|
| 14 |
+
public:
|
| 15 |
+
RRefBackwardReq(
|
| 16 |
+
const rpc::RRefId& rrefId,
|
| 17 |
+
int64_t autogradContextId,
|
| 18 |
+
bool retainGraph = false);
|
| 19 |
+
|
| 20 |
+
const rpc::RRefId& getRRefId() const;
|
| 21 |
+
|
| 22 |
+
int64_t getAutogradContextId() const;
|
| 23 |
+
|
| 24 |
+
bool retainGraph() const;
|
| 25 |
+
|
| 26 |
+
// Serialization and deserialization methods.
|
| 27 |
+
c10::intrusive_ptr<rpc::Message> toMessageImpl() && override;
|
| 28 |
+
static std::unique_ptr<RRefBackwardReq> fromMessage(
|
| 29 |
+
const rpc::Message& message);
|
| 30 |
+
|
| 31 |
+
private:
|
| 32 |
+
const rpc::RRefId rrefId_;
|
| 33 |
+
const int64_t autogradContextId_;
|
| 34 |
+
const bool retainGraph_;
|
| 35 |
+
};
|
| 36 |
+
|
| 37 |
+
} // namespace autograd
|
| 38 |
+
} // namespace distributed
|
| 39 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Backend.hpp
ADDED
|
@@ -0,0 +1,383 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <condition_variable>
|
| 4 |
+
#include <memory>
|
| 5 |
+
#include <mutex>
|
| 6 |
+
#include <stdexcept>
|
| 7 |
+
#include <unordered_map>
|
| 8 |
+
#include <utility>
|
| 9 |
+
#include <vector>
|
| 10 |
+
|
| 11 |
+
#include <ATen/ATen.h>
|
| 12 |
+
#include <c10/macros/Macros.h>
|
| 13 |
+
|
| 14 |
+
#include <torch/csrc/distributed/c10d/Types.hpp>
|
| 15 |
+
#include <torch/csrc/distributed/c10d/Utils.hpp>
|
| 16 |
+
#include <torch/csrc/distributed/c10d/Work.hpp>
|
| 17 |
+
#include <torch/csrc/distributed/c10d/debug.h>
|
| 18 |
+
|
| 19 |
+
constexpr auto kBackendDefaultTimeout =
|
| 20 |
+
std::chrono::milliseconds(30 * 60 * 1000);
|
| 21 |
+
|
| 22 |
+
namespace c10d {
|
| 23 |
+
|
| 24 |
+
class TORCH_API Backend : public torch::CustomClassHolder {
|
| 25 |
+
public:
|
| 26 |
+
// Backend Options is a base struct that defines the basic options
|
| 27 |
+
// when constructing a Backend. Each Backend subclass should
|
| 28 |
+
// extend this struct and define its options if it wants to provide more
|
| 29 |
+
// config options (beyond basic ones defined here) to end user.
|
| 30 |
+
struct TORCH_API Options : torch::CustomClassHolder {
|
| 31 |
+
explicit Options(
|
| 32 |
+
std::string backend,
|
| 33 |
+
std::chrono::milliseconds timeout = kBackendDefaultTimeout)
|
| 34 |
+
: timeout(timeout), backend(std::move(backend)) {}
|
| 35 |
+
~Options() override = default;
|
| 36 |
+
|
| 37 |
+
std::chrono::milliseconds timeout;
|
| 38 |
+
|
| 39 |
+
// backend name
|
| 40 |
+
const std::string backend;
|
| 41 |
+
};
|
| 42 |
+
|
| 43 |
+
explicit Backend(int rank, int size);
|
| 44 |
+
~Backend() override = 0;
|
| 45 |
+
|
| 46 |
+
int getRank() const {
|
| 47 |
+
return rank_;
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
int getSize() const {
|
| 51 |
+
return size_;
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
// Returns an unique opaque ID of this backend that can be used to correlate
|
| 55 |
+
// with its collectives.
|
| 56 |
+
int64_t getID() const {
|
| 57 |
+
return reinterpret_cast<std::intptr_t>(this);
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
virtual void startCoalescing() {
|
| 61 |
+
TORCH_CHECK(
|
| 62 |
+
false,
|
| 63 |
+
c10::str(
|
| 64 |
+
"Backend ",
|
| 65 |
+
getBackendName(),
|
| 66 |
+
" does not implement startCoalescing"));
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
virtual c10::intrusive_ptr<Work> endCoalescing() {
|
| 70 |
+
TORCH_CHECK(
|
| 71 |
+
false,
|
| 72 |
+
c10::str(
|
| 73 |
+
"Backend ", getBackendName(), " does not implement endCoalescing"));
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
// Subclasses must override this method to return the backend name
|
| 77 |
+
virtual const std::string getBackendName() const {
|
| 78 |
+
TORCH_INTERNAL_ASSERT(false, "getBackendName is not implemented.");
|
| 79 |
+
};
|
| 80 |
+
|
| 81 |
+
virtual c10::intrusive_ptr<Work> broadcast(
|
| 82 |
+
std::vector<at::Tensor>& /* tensors */,
|
| 83 |
+
const BroadcastOptions& /* opts */ = BroadcastOptions()) {
|
| 84 |
+
TORCH_CHECK(
|
| 85 |
+
false,
|
| 86 |
+
c10::str("Backend ", getBackendName(), " does not support broadcast"));
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
virtual c10::intrusive_ptr<Work> allreduce(
|
| 90 |
+
std::vector<at::Tensor>& /* tensors */,
|
| 91 |
+
const AllreduceOptions& /* opts */ = AllreduceOptions()) {
|
| 92 |
+
TORCH_CHECK(
|
| 93 |
+
false,
|
| 94 |
+
c10::str("Backend ", getBackendName(), " does not support allreduce"));
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
virtual c10::intrusive_ptr<Work> allreduce_sparse(
|
| 98 |
+
std::vector<at::Tensor>& /* tensors */,
|
| 99 |
+
const AllreduceOptions& /* opts */ = AllreduceOptions()) {
|
| 100 |
+
TORCH_CHECK(
|
| 101 |
+
false,
|
| 102 |
+
c10::str(
|
| 103 |
+
"Backend ",
|
| 104 |
+
getBackendName(),
|
| 105 |
+
" does not support allreduce sparse"));
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
virtual c10::intrusive_ptr<Work> allreduce_coalesced(
|
| 109 |
+
std::vector<at::Tensor>& /* tensors */,
|
| 110 |
+
const AllreduceCoalescedOptions& /* opts */ =
|
| 111 |
+
AllreduceCoalescedOptions()) {
|
| 112 |
+
TORCH_CHECK(
|
| 113 |
+
false,
|
| 114 |
+
c10::str(
|
| 115 |
+
"Backend ",
|
| 116 |
+
getBackendName(),
|
| 117 |
+
" does not support allreduce_coalesced"));
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
virtual c10::intrusive_ptr<Work> reduce(
|
| 121 |
+
std::vector<at::Tensor>& /* tensors */,
|
| 122 |
+
const ReduceOptions& /* opts */ = ReduceOptions()) {
|
| 123 |
+
TORCH_CHECK(
|
| 124 |
+
false,
|
| 125 |
+
c10::str("Backend ", getBackendName(), " does not support reduce"));
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
virtual c10::intrusive_ptr<Work> allgather(
|
| 129 |
+
std::vector<std::vector<at::Tensor>>& /* outputTensors */,
|
| 130 |
+
std::vector<at::Tensor>& /* inputTensors */,
|
| 131 |
+
const AllgatherOptions& /* opts */ = AllgatherOptions()) {
|
| 132 |
+
TORCH_CHECK(
|
| 133 |
+
false,
|
| 134 |
+
c10::str("Backend ", getBackendName(), " does not support allgather"));
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
// Gathers a single tensor inputBuffer into a single buffer outputBuffer that
|
| 138 |
+
// is interpreted as a contiguous collection of size inputBuffer * WORLD_SIZE.
|
| 139 |
+
// For implementers of ProcessGroup API and advanced users only.
|
| 140 |
+
// Note: this function will be deprecated in near future.
|
| 141 |
+
virtual c10::intrusive_ptr<Work> _allgather_base(
|
| 142 |
+
at::Tensor& /* outputBuffer */,
|
| 143 |
+
at::Tensor& /* inputBuffer */,
|
| 144 |
+
const AllgatherOptions& /* opts */ = AllgatherOptions()) {
|
| 145 |
+
TORCH_CHECK(
|
| 146 |
+
false,
|
| 147 |
+
c10::str(
|
| 148 |
+
"Backend ", getBackendName(), " does not support _allgather_base"));
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
// This function is deprecated and will be moved out of Backend to comms:
|
| 152 |
+
// * do not add dependencies on this function,
|
| 153 |
+
// * do not implement it in your Backend, implement _allgather_base
|
| 154 |
+
// instead.
|
| 155 |
+
virtual c10::intrusive_ptr<Work> allgather_coalesced(
|
| 156 |
+
std::vector<std::vector<at::Tensor>>& /* outputTensorLists */,
|
| 157 |
+
std::vector<at::Tensor>& /* inputTensors */,
|
| 158 |
+
const AllgatherOptions& /* opts */ = AllgatherOptions()) {
|
| 159 |
+
TORCH_CHECK(
|
| 160 |
+
false,
|
| 161 |
+
c10::str(
|
| 162 |
+
"Backend ",
|
| 163 |
+
getBackendName(),
|
| 164 |
+
" does not support allgather_coalesced"));
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
// This function is a coalesced version of `allgather_into_tensor` (currently
|
| 168 |
+
// still named as `_allgather_base`). Each tensor in the vector corresponds to
|
| 169 |
+
// an input/output of one `allgather_into_tensor` operation.
|
| 170 |
+
virtual c10::intrusive_ptr<Work> allgather_into_tensor_coalesced(
|
| 171 |
+
std::vector<at::Tensor>& /* outputs */,
|
| 172 |
+
std::vector<at::Tensor>& /* inputs */,
|
| 173 |
+
const AllgatherOptions& /* opts */ = AllgatherOptions()) {
|
| 174 |
+
TORCH_CHECK(
|
| 175 |
+
false,
|
| 176 |
+
c10::str(
|
| 177 |
+
"Backend ",
|
| 178 |
+
getBackendName(),
|
| 179 |
+
" does not support allgather_into_tensor_coalesced"));
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
virtual c10::intrusive_ptr<Work> gather(
|
| 183 |
+
std::vector<std::vector<at::Tensor>>& /* outputTensors */,
|
| 184 |
+
std::vector<at::Tensor>& /* inputTensors */,
|
| 185 |
+
const GatherOptions& /* opts */ = GatherOptions()) {
|
| 186 |
+
TORCH_CHECK(
|
| 187 |
+
false,
|
| 188 |
+
c10::str("Backend ", getBackendName(), " does not support gather"));
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
virtual c10::intrusive_ptr<Work> scatter(
|
| 192 |
+
std::vector<at::Tensor>& /* outputTensors */,
|
| 193 |
+
std::vector<std::vector<at::Tensor>>& /* inputTensors */,
|
| 194 |
+
const ScatterOptions& /* opts */ = ScatterOptions()) {
|
| 195 |
+
TORCH_CHECK(
|
| 196 |
+
false,
|
| 197 |
+
c10::str("Backend ", getBackendName(), " does not support scatter"));
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
virtual c10::intrusive_ptr<Work> reduce_scatter(
|
| 201 |
+
std::vector<at::Tensor>& /* outputTensors */,
|
| 202 |
+
std::vector<std::vector<at::Tensor>>& /* inputTensors */,
|
| 203 |
+
const ReduceScatterOptions& /* opts */ = ReduceScatterOptions()) {
|
| 204 |
+
TORCH_CHECK(
|
| 205 |
+
false,
|
| 206 |
+
c10::str(
|
| 207 |
+
"Backend ", getBackendName(), " does not support reduce_scatter"));
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
virtual c10::intrusive_ptr<Work> _reduce_scatter_base(
|
| 211 |
+
at::Tensor& /* outputBuffer */,
|
| 212 |
+
at::Tensor& /* inputBuffer */,
|
| 213 |
+
const ReduceScatterOptions& /* opts */ = ReduceScatterOptions()) {
|
| 214 |
+
TORCH_CHECK(
|
| 215 |
+
false,
|
| 216 |
+
c10::str(
|
| 217 |
+
"Backend ",
|
| 218 |
+
getBackendName(),
|
| 219 |
+
" does not support _reduce_scatter_base"));
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
// This function is a coalesced version of `reduce_scatter_tensor` (currently
|
| 223 |
+
// still named as `_reduce_scatter_base`). Each tensor in the vector
|
| 224 |
+
// corresponds to an input/output of one `reduce_scatter_tensor` operation.
|
| 225 |
+
virtual c10::intrusive_ptr<Work> reduce_scatter_tensor_coalesced(
|
| 226 |
+
std::vector<at::Tensor>& /* outputs */,
|
| 227 |
+
std::vector<at::Tensor>& /* inputs */,
|
| 228 |
+
const ReduceScatterOptions& /* opts */ = ReduceScatterOptions()) {
|
| 229 |
+
TORCH_CHECK(
|
| 230 |
+
false,
|
| 231 |
+
c10::str(
|
| 232 |
+
"Backend ",
|
| 233 |
+
getBackendName(),
|
| 234 |
+
" does not support reduce_scatter_tensor_coalesced"));
|
| 235 |
+
}
|
| 236 |
+
|
| 237 |
+
virtual c10::intrusive_ptr<Work> alltoall_base(
|
| 238 |
+
at::Tensor& /* outputBuffer */,
|
| 239 |
+
at::Tensor& /* inputBuffer */,
|
| 240 |
+
std::vector<int64_t>& /* outputSplitSizes */,
|
| 241 |
+
std::vector<int64_t>& /* inputSplitSizes */,
|
| 242 |
+
const AllToAllOptions& /* opts */ = AllToAllOptions()) {
|
| 243 |
+
TORCH_CHECK(
|
| 244 |
+
false,
|
| 245 |
+
c10::str(
|
| 246 |
+
"Backend ", getBackendName(), " does not support alltoall_base"));
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
virtual c10::intrusive_ptr<Work> alltoall(
|
| 250 |
+
std::vector<at::Tensor>& /* outputTensors */,
|
| 251 |
+
std::vector<at::Tensor>& /* inputTensors */,
|
| 252 |
+
const AllToAllOptions& opts = AllToAllOptions()) {
|
| 253 |
+
TORCH_CHECK(
|
| 254 |
+
false,
|
| 255 |
+
c10::str("Backend ", getBackendName(), " does not support alltoall"));
|
| 256 |
+
}
|
| 257 |
+
|
| 258 |
+
virtual void monitoredBarrier(
|
| 259 |
+
const BarrierOptions& /* unused */,
|
| 260 |
+
bool /* unused */ = false) {
|
| 261 |
+
auto backendName = getBackendName();
|
| 262 |
+
TORCH_CHECK(
|
| 263 |
+
false,
|
| 264 |
+
c10::str(
|
| 265 |
+
"Backend ",
|
| 266 |
+
backendName,
|
| 267 |
+
" does not support monitoredBarrier, only GLOO supports monitored barrier."));
|
| 268 |
+
}
|
| 269 |
+
|
| 270 |
+
// Agrees on an initial sequence number for the whole group by having rank 0
|
| 271 |
+
// create it and broadcast it to other ranks using the store. Only implemented
|
| 272 |
+
// for GLOO and NCCL backends currently.
|
| 273 |
+
virtual void setSequenceNumberForGroup() {
|
| 274 |
+
auto backendName = getBackendName();
|
| 275 |
+
TORCH_CHECK(
|
| 276 |
+
false,
|
| 277 |
+
c10::str(
|
| 278 |
+
"Backend ",
|
| 279 |
+
backendName,
|
| 280 |
+
" does not yet support sequence numbers."));
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
// Retrieves the current sequence number for the whole group, which should be
|
| 284 |
+
// in sync. If the returned number is not consistent across the group, it
|
| 285 |
+
// may indicate that there is some sort of collective desynchronization.
|
| 286 |
+
virtual uint64_t getSequenceNumberForGroup() {
|
| 287 |
+
auto backendName = getBackendName();
|
| 288 |
+
TORCH_CHECK(
|
| 289 |
+
false,
|
| 290 |
+
c10::str(
|
| 291 |
+
"Backend ",
|
| 292 |
+
backendName,
|
| 293 |
+
" does not yet support sequence numbers."));
|
| 294 |
+
}
|
| 295 |
+
|
| 296 |
+
virtual c10::intrusive_ptr<Work> send(
|
| 297 |
+
std::vector<at::Tensor>& /* tensors */,
|
| 298 |
+
int /* dstRank */,
|
| 299 |
+
int /* tag */) {
|
| 300 |
+
TORCH_CHECK(
|
| 301 |
+
false,
|
| 302 |
+
c10::str("Backend ", getBackendName(), " does not support send"));
|
| 303 |
+
}
|
| 304 |
+
|
| 305 |
+
virtual c10::intrusive_ptr<Work> recv(
|
| 306 |
+
std::vector<at::Tensor>& /* tensors */,
|
| 307 |
+
int /* srcRank */,
|
| 308 |
+
int /* tag */) {
|
| 309 |
+
TORCH_CHECK(
|
| 310 |
+
false,
|
| 311 |
+
c10::str("Backend ", getBackendName(), " does not support recv"));
|
| 312 |
+
}
|
| 313 |
+
|
| 314 |
+
virtual c10::intrusive_ptr<Work> recvAnysource(
|
| 315 |
+
std::vector<at::Tensor>& /* tensors */,
|
| 316 |
+
int /* tag */) {
|
| 317 |
+
TORCH_CHECK(
|
| 318 |
+
false,
|
| 319 |
+
c10::str(
|
| 320 |
+
"Backend ", getBackendName(), " does not support recvAnysource"));
|
| 321 |
+
}
|
| 322 |
+
|
| 323 |
+
virtual c10::intrusive_ptr<Work> barrier(
|
| 324 |
+
const BarrierOptions& /* opts */ = BarrierOptions()) {
|
| 325 |
+
TORCH_CHECK(
|
| 326 |
+
false,
|
| 327 |
+
c10::str("Backend ", getBackendName(), " does not support barrier"));
|
| 328 |
+
}
|
| 329 |
+
|
| 330 |
+
virtual void registerOnCompletionHook(
|
| 331 |
+
std::function<void(std::shared_ptr<WorkInfo>)>&& hook) {
|
| 332 |
+
TORCH_CHECK(
|
| 333 |
+
false,
|
| 334 |
+
"Only ProcessGrouppNCCL supports onCompletion hook, but got ",
|
| 335 |
+
getBackendName(),
|
| 336 |
+
" backend.");
|
| 337 |
+
}
|
| 338 |
+
|
| 339 |
+
virtual void waitForPendingWorks() {
|
| 340 |
+
TORCH_CHECK(
|
| 341 |
+
false,
|
| 342 |
+
"Only ProcessGrouppNCCL supports waitForPendingWorks, but got ",
|
| 343 |
+
getBackendName(),
|
| 344 |
+
" backend.");
|
| 345 |
+
}
|
| 346 |
+
|
| 347 |
+
virtual void enableCollectivesTiming() {
|
| 348 |
+
TORCH_CHECK(
|
| 349 |
+
false,
|
| 350 |
+
"Backend ",
|
| 351 |
+
getBackendName(),
|
| 352 |
+
" is missing implementation of enableCollectivesTiming.");
|
| 353 |
+
}
|
| 354 |
+
|
| 355 |
+
bool hasHooks() const {
|
| 356 |
+
return onCompletionHook_ != nullptr;
|
| 357 |
+
}
|
| 358 |
+
|
| 359 |
+
// Do not call this directly, use ProcessGroup::setGroupName instead.
|
| 360 |
+
void setGroupName(const std::string& name) {
|
| 361 |
+
pg_name_ = name;
|
| 362 |
+
}
|
| 363 |
+
|
| 364 |
+
const std::string& getGroupName() const {
|
| 365 |
+
return pg_name_;
|
| 366 |
+
}
|
| 367 |
+
|
| 368 |
+
protected:
|
| 369 |
+
// Implementations of this interface need to call this to setup
|
| 370 |
+
// appropriate logging etc.
|
| 371 |
+
void init();
|
| 372 |
+
|
| 373 |
+
const int rank_;
|
| 374 |
+
const int size_;
|
| 375 |
+
// Debug level setting. It is parsed once when ProcessGroup is constructed and
|
| 376 |
+
// remains the same across use of this process group.
|
| 377 |
+
DebugLevel dist_debug_level_;
|
| 378 |
+
std::string pg_name_;
|
| 379 |
+
|
| 380 |
+
std::function<void(std::shared_ptr<WorkInfo>)> onCompletionHook_;
|
| 381 |
+
};
|
| 382 |
+
|
| 383 |
+
} // namespace c10d
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Functional.hpp
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
|
| 2 |
+
|
| 3 |
+
namespace c10d_functional {
|
| 4 |
+
|
| 5 |
+
void register_process_group(
|
| 6 |
+
const std::string& tag,
|
| 7 |
+
c10::intrusive_ptr<c10d::ProcessGroup> pg);
|
| 8 |
+
|
| 9 |
+
c10::intrusive_ptr<c10d::ProcessGroup> resolve_process_group(
|
| 10 |
+
const std::string& tag);
|
| 11 |
+
|
| 12 |
+
} // namespace c10d_functional
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/GroupRegistry.hpp
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
|
| 4 |
+
|
| 5 |
+
namespace c10d {
|
| 6 |
+
|
| 7 |
+
C10_EXPORT void register_process_group(
|
| 8 |
+
const std::string& group_name,
|
| 9 |
+
c10::intrusive_ptr<c10d::ProcessGroup> group);
|
| 10 |
+
|
| 11 |
+
C10_EXPORT c10::intrusive_ptr<c10d::ProcessGroup> resolve_process_group(
|
| 12 |
+
const std::string& group_name);
|
| 13 |
+
|
| 14 |
+
} // namespace c10d
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/HashStore.hpp
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <sys/types.h>
|
| 4 |
+
|
| 5 |
+
#include <condition_variable>
|
| 6 |
+
#include <mutex>
|
| 7 |
+
#include <unordered_map>
|
| 8 |
+
|
| 9 |
+
#include <torch/csrc/distributed/c10d/Store.hpp>
|
| 10 |
+
|
| 11 |
+
namespace c10d {
|
| 12 |
+
|
| 13 |
+
class TORCH_API HashStore : public Store {
|
| 14 |
+
public:
|
| 15 |
+
~HashStore() override = default;
|
| 16 |
+
|
| 17 |
+
void set(const std::string& key, const std::vector<uint8_t>& data) override;
|
| 18 |
+
|
| 19 |
+
std::vector<uint8_t> compareSet(
|
| 20 |
+
const std::string& key,
|
| 21 |
+
const std::vector<uint8_t>& expectedValue,
|
| 22 |
+
const std::vector<uint8_t>& desiredValue) override;
|
| 23 |
+
|
| 24 |
+
std::vector<uint8_t> get(const std::string& key) override;
|
| 25 |
+
|
| 26 |
+
void wait(const std::vector<std::string>& keys) override {
|
| 27 |
+
wait(keys, Store::kDefaultTimeout);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
void wait(
|
| 31 |
+
const std::vector<std::string>& keys,
|
| 32 |
+
const std::chrono::milliseconds& timeout) override;
|
| 33 |
+
|
| 34 |
+
int64_t add(const std::string& key, int64_t value) override;
|
| 35 |
+
|
| 36 |
+
int64_t getNumKeys() override;
|
| 37 |
+
|
| 38 |
+
bool check(const std::vector<std::string>& keys) override;
|
| 39 |
+
|
| 40 |
+
bool deleteKey(const std::string& key) override;
|
| 41 |
+
|
| 42 |
+
void append(const std::string& key, const std::vector<uint8_t>& value)
|
| 43 |
+
override;
|
| 44 |
+
|
| 45 |
+
std::vector<std::vector<uint8_t>> multiGet(
|
| 46 |
+
const std::vector<std::string>& keys) override;
|
| 47 |
+
|
| 48 |
+
void multiSet(
|
| 49 |
+
const std::vector<std::string>& keys,
|
| 50 |
+
const std::vector<std::vector<uint8_t>>& values) override;
|
| 51 |
+
|
| 52 |
+
// Returns true if this store support append, multiGet and multiSet
|
| 53 |
+
bool hasExtendedApi() const override;
|
| 54 |
+
|
| 55 |
+
protected:
|
| 56 |
+
std::unordered_map<std::string, std::vector<uint8_t>> map_;
|
| 57 |
+
std::mutex m_;
|
| 58 |
+
std::condition_variable cv_;
|
| 59 |
+
};
|
| 60 |
+
|
| 61 |
+
} // namespace c10d
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ParamCommsUtils.hpp
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/ivalue.h>
|
| 4 |
+
#include <ATen/record_function.h>
|
| 5 |
+
#include <c10/macros/Macros.h>
|
| 6 |
+
#include <c10/util/ThreadLocalDebugInfo.h>
|
| 7 |
+
#include <string>
|
| 8 |
+
#include <vector>
|
| 9 |
+
|
| 10 |
+
namespace torch {
|
| 11 |
+
|
| 12 |
+
class TORCH_API ParamCommsDebugInfo : public c10::DebugInfoBase {
|
| 13 |
+
public:
|
| 14 |
+
ParamCommsDebugInfo() = default;
|
| 15 |
+
ParamCommsDebugInfo(
|
| 16 |
+
int rank,
|
| 17 |
+
std::string&& colName,
|
| 18 |
+
int inNelems,
|
| 19 |
+
int outNelems,
|
| 20 |
+
at::ScalarType dType,
|
| 21 |
+
std::vector<int64_t> inSplitSizes,
|
| 22 |
+
std::vector<int64_t> outSplitSizes,
|
| 23 |
+
int worldSize);
|
| 24 |
+
|
| 25 |
+
~ParamCommsDebugInfo() override = default;
|
| 26 |
+
|
| 27 |
+
int getRank() const {
|
| 28 |
+
return rank_;
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
int getWorldSize() const {
|
| 32 |
+
return worldSize_;
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
const std::string getColumnName() const {
|
| 36 |
+
return columnName_;
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
int getInMessageNelems() const {
|
| 40 |
+
return inMessageNelems_;
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
int getOutMessageNelems() const {
|
| 44 |
+
return outMessageNelems_;
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
at::ScalarType getDType() const {
|
| 48 |
+
return dType_;
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
const std::vector<int64_t>& getInputSplitSizes() const {
|
| 52 |
+
return inputSplitSizes_;
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
const std::vector<int64_t>& getOutputSplitSizes() const {
|
| 56 |
+
return outputSplitSizes_;
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
private:
|
| 60 |
+
int rank_{};
|
| 61 |
+
int worldSize_{};
|
| 62 |
+
std::string columnName_;
|
| 63 |
+
int inMessageNelems_{};
|
| 64 |
+
int outMessageNelems_{};
|
| 65 |
+
at::ScalarType dType_ = at::kByte;
|
| 66 |
+
std::vector<int64_t> inputSplitSizes_;
|
| 67 |
+
std::vector<int64_t> outputSplitSizes_;
|
| 68 |
+
};
|
| 69 |
+
|
| 70 |
+
#define RECORD_PARAM_COMMS( \
|
| 71 |
+
seq, \
|
| 72 |
+
pg_ptr, \
|
| 73 |
+
rank, \
|
| 74 |
+
colName, \
|
| 75 |
+
inNelems, \
|
| 76 |
+
outNelems, \
|
| 77 |
+
dType, \
|
| 78 |
+
inSplitSizes, \
|
| 79 |
+
outSplitSizes, \
|
| 80 |
+
worldSize) \
|
| 81 |
+
auto paramCommsInfo = std::make_shared<torch::ParamCommsDebugInfo>( \
|
| 82 |
+
rank, \
|
| 83 |
+
colName, \
|
| 84 |
+
inNelems, \
|
| 85 |
+
outNelems, \
|
| 86 |
+
dType, \
|
| 87 |
+
inSplitSizes, \
|
| 88 |
+
outSplitSizes, \
|
| 89 |
+
worldSize); \
|
| 90 |
+
c10::DebugInfoGuard g(c10::DebugInfoKind::PARAM_COMMS_INFO, paramCommsInfo); \
|
| 91 |
+
std::initializer_list<const c10::IValue> paramList = { \
|
| 92 |
+
c10::IValue(seq), \
|
| 93 |
+
c10::IValue(pg_ptr), \
|
| 94 |
+
rank, \
|
| 95 |
+
colName, \
|
| 96 |
+
inSplitSizes, \
|
| 97 |
+
outSplitSizes, \
|
| 98 |
+
worldSize}; \
|
| 99 |
+
c10::ArrayRef<const c10::IValue> paramInputs(paramList); \
|
| 100 |
+
RECORD_FUNCTION(at::kParamCommsCallName, paramInputs);
|
| 101 |
+
|
| 102 |
+
#define RECORD_PARAM_COMMS_DATA( \
|
| 103 |
+
seq, \
|
| 104 |
+
pg_ptr, \
|
| 105 |
+
InputTensors, \
|
| 106 |
+
OutputTensors, \
|
| 107 |
+
rank, \
|
| 108 |
+
colName, \
|
| 109 |
+
inNelems, \
|
| 110 |
+
outNelems, \
|
| 111 |
+
dType, \
|
| 112 |
+
inSplitSizes, \
|
| 113 |
+
outSplitSizes, \
|
| 114 |
+
worldSize) \
|
| 115 |
+
auto paramCommsInfo = std::make_shared<torch::ParamCommsDebugInfo>( \
|
| 116 |
+
rank, \
|
| 117 |
+
colName, \
|
| 118 |
+
inNelems, \
|
| 119 |
+
outNelems, \
|
| 120 |
+
dType, \
|
| 121 |
+
inSplitSizes, \
|
| 122 |
+
outSplitSizes, \
|
| 123 |
+
worldSize); \
|
| 124 |
+
c10::DebugInfoGuard g(c10::DebugInfoKind::PARAM_COMMS_INFO, paramCommsInfo); \
|
| 125 |
+
std::initializer_list<const c10::IValue> paramList = { \
|
| 126 |
+
c10::IValue(InputTensors), \
|
| 127 |
+
c10::IValue(seq), \
|
| 128 |
+
c10::IValue(pg_ptr), \
|
| 129 |
+
rank, \
|
| 130 |
+
colName, \
|
| 131 |
+
inSplitSizes, \
|
| 132 |
+
outSplitSizes, \
|
| 133 |
+
worldSize}; \
|
| 134 |
+
c10::ArrayRef<const c10::IValue> paramInputs(paramList); \
|
| 135 |
+
RECORD_FUNCTION_WITH_INPUTS_OUTPUTS( \
|
| 136 |
+
at::kParamCommsCallName, \
|
| 137 |
+
paramInputs, \
|
| 138 |
+
std::vector<c10::IValue>(1, c10::IValue(OutputTensors)));
|
| 139 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PrefixStore.hpp
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/c10d/Store.hpp>
|
| 4 |
+
#include <memory>
|
| 5 |
+
|
| 6 |
+
namespace c10d {
|
| 7 |
+
|
| 8 |
+
class TORCH_API PrefixStore : public Store {
|
| 9 |
+
public:
|
| 10 |
+
explicit PrefixStore(std::string prefix, c10::intrusive_ptr<Store> store);
|
| 11 |
+
|
| 12 |
+
using Store::set;
|
| 13 |
+
void set(const std::string& key, const std::vector<uint8_t>& value) override;
|
| 14 |
+
|
| 15 |
+
using Store::compareSet;
|
| 16 |
+
std::vector<uint8_t> compareSet(
|
| 17 |
+
const std::string& key,
|
| 18 |
+
const std::vector<uint8_t>& expectedValue,
|
| 19 |
+
const std::vector<uint8_t>& desiredValue) override;
|
| 20 |
+
|
| 21 |
+
std::vector<uint8_t> get(const std::string& key) override;
|
| 22 |
+
|
| 23 |
+
int64_t add(const std::string& key, int64_t value) override;
|
| 24 |
+
|
| 25 |
+
bool deleteKey(const std::string& key) override;
|
| 26 |
+
|
| 27 |
+
int64_t getNumKeys() override;
|
| 28 |
+
|
| 29 |
+
bool check(const std::vector<std::string>& keys) override;
|
| 30 |
+
|
| 31 |
+
void wait(const std::vector<std::string>& keys) override;
|
| 32 |
+
|
| 33 |
+
void wait(
|
| 34 |
+
const std::vector<std::string>& keys,
|
| 35 |
+
const std::chrono::milliseconds& timeout) override;
|
| 36 |
+
|
| 37 |
+
const std::chrono::milliseconds& getTimeout() const noexcept override;
|
| 38 |
+
|
| 39 |
+
void setTimeout(const std::chrono::milliseconds& timeout) override;
|
| 40 |
+
|
| 41 |
+
void append(const std::string& key, const std::vector<uint8_t>& value)
|
| 42 |
+
override;
|
| 43 |
+
|
| 44 |
+
std::vector<std::vector<uint8_t>> multiGet(
|
| 45 |
+
const std::vector<std::string>& keys) override;
|
| 46 |
+
|
| 47 |
+
void multiSet(
|
| 48 |
+
const std::vector<std::string>& keys,
|
| 49 |
+
const std::vector<std::vector<uint8_t>>& values) override;
|
| 50 |
+
|
| 51 |
+
// Returns true if this store support append, multiGet and multiSet
|
| 52 |
+
bool hasExtendedApi() const override;
|
| 53 |
+
|
| 54 |
+
c10::intrusive_ptr<Store> getUnderlyingStore();
|
| 55 |
+
|
| 56 |
+
protected:
|
| 57 |
+
std::string prefix_;
|
| 58 |
+
c10::intrusive_ptr<Store> store_;
|
| 59 |
+
|
| 60 |
+
std::string joinKey(const std::string& key);
|
| 61 |
+
std::vector<std::string> joinKeys(const std::vector<std::string>& keys);
|
| 62 |
+
};
|
| 63 |
+
|
| 64 |
+
} // namespace c10d
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp
ADDED
|
@@ -0,0 +1,918 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#ifdef USE_C10D_NCCL
|
| 4 |
+
|
| 5 |
+
#include <chrono>
|
| 6 |
+
#include <iostream>
|
| 7 |
+
#include <list>
|
| 8 |
+
#include <mutex>
|
| 9 |
+
#include <thread>
|
| 10 |
+
#include <unordered_map>
|
| 11 |
+
|
| 12 |
+
#include <torch/csrc/distributed/c10d/Backend.hpp>
|
| 13 |
+
#include <torch/csrc/distributed/c10d/NCCLUtils.hpp>
|
| 14 |
+
#include <torch/csrc/distributed/c10d/Store.hpp>
|
| 15 |
+
|
| 16 |
+
#include <ATen/DynamicLibrary.h>
|
| 17 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 18 |
+
#include <ATen/cuda/CUDAEvent.h>
|
| 19 |
+
#include <c10/core/Stream.h>
|
| 20 |
+
#include <c10/core/StreamGuard.h>
|
| 21 |
+
#include <c10/cuda/CUDACachingAllocator.h>
|
| 22 |
+
#include <c10/cuda/CUDAGuard.h>
|
| 23 |
+
#include <c10/cuda/CUDAStream.h>
|
| 24 |
+
|
| 25 |
+
#include <torch/custom_class.h>
|
| 26 |
+
|
| 27 |
+
namespace c10d {
|
| 28 |
+
// Environment variable which controls whether we perform a NCCL healt check
|
| 29 |
+
// which ensures communicators are healthy at the beginning of init.
|
| 30 |
+
static std::vector<std::string> TORCH_ENABLE_NCCL_HEALTH_CHECK = {
|
| 31 |
+
"TORCH_ENABLE_NCCL_HEALTH_CHECK",
|
| 32 |
+
"ENABLE_NCCL_HEALTH_CHECK"};
|
| 33 |
+
|
| 34 |
+
// Environment variable which controls whether or not wait() is blocking or
|
| 35 |
+
// non-blocking.
|
| 36 |
+
static std::vector<std::string> TORCH_NCCL_BLOCKING_WAIT = {
|
| 37 |
+
"TORCH_NCCL_BLOCKING_WAIT",
|
| 38 |
+
"NCCL_BLOCKING_WAIT"};
|
| 39 |
+
|
| 40 |
+
// Environment variable which controls whether or not we perform Async Error
|
| 41 |
+
// Handling with NCCL.
|
| 42 |
+
static std::vector<std::string> TORCH_NCCL_ASYNC_ERROR_HANDLING = {
|
| 43 |
+
"TORCH_NCCL_ASYNC_ERROR_HANDLING",
|
| 44 |
+
"NCCL_ASYNC_ERROR_HANDLING"};
|
| 45 |
+
|
| 46 |
+
// Environment Variable to control whether dumping debug info on watchdog
|
| 47 |
+
// timeout is enabled. This variable must be set together with
|
| 48 |
+
// TORCH_NCCL_ENABLE_MONITORING=1 and TORCH_NCCL_TRACE_BUFFER_SIZE > 0.
|
| 49 |
+
static std::vector<std::string> TORCH_NCCL_DUMP_ON_TIMEOUT = {
|
| 50 |
+
"TORCH_NCCL_DUMP_ON_TIMEOUT"};
|
| 51 |
+
|
| 52 |
+
// Environment Variable to control whether Desync Debug is enabled.
|
| 53 |
+
// This variable must be set together with TORCH_NCCL_ASYNC_ERROR_HANDLING.
|
| 54 |
+
static std::vector<std::string> TORCH_NCCL_DESYNC_DEBUG = {
|
| 55 |
+
"TORCH_NCCL_DESYNC_DEBUG",
|
| 56 |
+
"NCCL_DESYNC_DEBUG"};
|
| 57 |
+
|
| 58 |
+
static std::vector<std::string> TORCH_NCCL_ENABLE_TIMING = {
|
| 59 |
+
"TORCH_NCCL_ENABLE_TIMING",
|
| 60 |
+
"NCCL_ENABLE_TIMING"};
|
| 61 |
+
|
| 62 |
+
static std::vector<std::string> TORCH_NCCL_ENABLE_MONITORING = {
|
| 63 |
+
"TORCH_NCCL_ENABLE_MONITORING"};
|
| 64 |
+
|
| 65 |
+
static std::vector<std::string> TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC = {
|
| 66 |
+
"TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC"};
|
| 67 |
+
|
| 68 |
+
static std::vector<std::string> TORCH_NCCL_TRACE_BUFFER_SIZE = {
|
| 69 |
+
"TORCH_NCCL_TRACE_BUFFER_SIZE"};
|
| 70 |
+
|
| 71 |
+
constexpr const char* NCCL_BACKEND_NAME = "nccl";
|
| 72 |
+
|
| 73 |
+
constexpr auto kProcessGroupNCCLDefaultTimeout =
|
| 74 |
+
std::chrono::milliseconds(10 * 60 * 1000);
|
| 75 |
+
|
| 76 |
+
// NoHandling: do not handle asynchronous NCCL errors
|
| 77 |
+
// TearDown: tear down process upon error, see `WorkNCCL::handleException`
|
| 78 |
+
// CleanUpOnly: just clean up collectives and abort communicators without
|
| 79 |
+
// tearing down process SkipCleanUp: (this is a temporary option and can be
|
| 80 |
+
// removed in future) tear down process without cleaning up NCCL communicators.
|
| 81 |
+
// This should be used as a last resort in case `ncclCommAbort` itself is
|
| 82 |
+
// hanging
|
| 83 |
+
enum ErrorHandlingMode {
|
| 84 |
+
NoHandling = 0,
|
| 85 |
+
TearDown = 1,
|
| 86 |
+
CleanUpOnly = 2,
|
| 87 |
+
SkipCleanUp = 3
|
| 88 |
+
};
|
| 89 |
+
|
| 90 |
+
#define SHOULD_CLEAN_UP(a) (a != NoHandling && a != SkipCleanUp)
|
| 91 |
+
|
| 92 |
+
#define SHOULD_TEAR_DOWN(a) (a != NoHandling && a != CleanUpOnly)
|
| 93 |
+
|
| 94 |
+
// If set, ProcessGroupNCCL doesn't use recordStream calls to ensure
|
| 95 |
+
// caching allocator safety for tensors used on both user-facing and
|
| 96 |
+
// internal comm streams.
|
| 97 |
+
// Instead, it stashes live references to those tensors until after
|
| 98 |
+
// user-facing streams are synced with comm streams.
|
| 99 |
+
// See stashed_for_allocator_safety_ below.
|
| 100 |
+
static std::vector<std::string> TORCH_NCCL_AVOID_RECORD_STREAMS = {
|
| 101 |
+
"TORCH_NCCL_AVOID_RECORD_STREAMS"};
|
| 102 |
+
|
| 103 |
+
// If set, ProcessGroupNCCL registers postAlloc and preFree hooks to cuda cache
|
| 104 |
+
// allocator so that whenever a tensor is allocated or freed, ProcessGroupNCCL
|
| 105 |
+
// can register/deregister the tensor on all available NCCL communicators.
|
| 106 |
+
static std::vector<std::string> TORCH_NCCL_USE_TENSOR_REGISTER_ALLOCATOR_HOOK =
|
| 107 |
+
{"TORCH_NCCL_USE_TENSOR_REGISTER_ALLOCATOR_HOOK",
|
| 108 |
+
"NCCL_USE_TENSOR_REGISTER_ALLOCATOR_HOOK"};
|
| 109 |
+
|
| 110 |
+
// ProcessGroupNCCL implements NCCL bindings for c10d.
|
| 111 |
+
//
|
| 112 |
+
// All functions of the class are expected to be called in the same order
|
| 113 |
+
// across all processes in the process group. This is the only way that we
|
| 114 |
+
// can guarantee to match up the same calls among all processes.
|
| 115 |
+
//
|
| 116 |
+
// All NCCL functions provided by this class are asynchronous functions. More
|
| 117 |
+
// specifically, each NCCL call is scheduled on a separate CUDA stream that is
|
| 118 |
+
// different from the current CUDA stream. This is for the purpose of
|
| 119 |
+
// achieving potentially concurrency and better performance. As a result,
|
| 120 |
+
// it is the callers' responsibility to make sure that the CUDA stream their
|
| 121 |
+
// code works on needs to wait for the NCCL operation from
|
| 122 |
+
// this class.
|
| 123 |
+
//
|
| 124 |
+
// This can be done by calling:
|
| 125 |
+
//
|
| 126 |
+
// either WorkNCCL::wait() or WorkNCCL::synchronize(), both achieves the same
|
| 127 |
+
// functionality and are synonyms.
|
| 128 |
+
//
|
| 129 |
+
// Also note that WorkNCCL::finishedGPUExecution() is a helper function only
|
| 130 |
+
// provided by ProcessGroupNCCL to check if the NCCL operation of WorkNCCL has
|
| 131 |
+
// finished execution on the GPU (not just scheduled).
|
| 132 |
+
//
|
| 133 |
+
// Example on using the NCCL process group
|
| 134 |
+
//
|
| 135 |
+
// ProcessGroupNCCL pg(store, rank, size);
|
| 136 |
+
// std::shared_ptr<WorkNCCL> work = pg.allreduce(tensors);
|
| 137 |
+
//
|
| 138 |
+
// // At this point, NCCL kernel has already by queued successfully
|
| 139 |
+
// // Now, let current stream wait for the NCCL to finish, this function is
|
| 140 |
+
// // async operation as well
|
| 141 |
+
//
|
| 142 |
+
// work->wait()
|
| 143 |
+
//
|
| 144 |
+
// // Now continue on other work in the current stream.
|
| 145 |
+
class TORCH_API ProcessGroupNCCL : public Backend {
|
| 146 |
+
public:
|
| 147 |
+
class WorkNCCL : public Work, public std::enable_shared_from_this<WorkNCCL> {
|
| 148 |
+
public:
|
| 149 |
+
friend struct WorkInfo;
|
| 150 |
+
|
| 151 |
+
// Constructor takes a list of CUDA devices
|
| 152 |
+
WorkNCCL(
|
| 153 |
+
const std::vector<at::Device>& devices,
|
| 154 |
+
int rank,
|
| 155 |
+
OpType opType,
|
| 156 |
+
uint64_t seq,
|
| 157 |
+
const char* profilingTitle = nullptr,
|
| 158 |
+
const c10::optional<std::vector<at::Tensor>>& inputs = c10::nullopt,
|
| 159 |
+
bool desyncDebug = false,
|
| 160 |
+
bool enableTiming = false);
|
| 161 |
+
// Copy constructor doing partial copy without outputs_. Cleanup thread
|
| 162 |
+
// monitors and removes finished works. However it will deadlock when
|
| 163 |
+
// destructs outputs_ tensors who are view tensors in autograd graph.
|
| 164 |
+
WorkNCCL(const WorkNCCL& w);
|
| 165 |
+
|
| 166 |
+
~WorkNCCL() override;
|
| 167 |
+
|
| 168 |
+
// Checks if the NCCL kernel has started to execute.
|
| 169 |
+
bool isStarted();
|
| 170 |
+
|
| 171 |
+
// Checks if request has completed. In this specific case of NCCL, it checks
|
| 172 |
+
// if the NCCL operation has completed on the GPU in its own NCCL stream.
|
| 173 |
+
// Non-blocking operation.
|
| 174 |
+
bool isCompleted() override;
|
| 175 |
+
|
| 176 |
+
bool isSuccess() const override;
|
| 177 |
+
|
| 178 |
+
// Same as calling synchronize() for NCCL work.
|
| 179 |
+
bool wait(std::chrono::milliseconds timeout = kNoTimeout) override;
|
| 180 |
+
|
| 181 |
+
void abort() override;
|
| 182 |
+
|
| 183 |
+
// Let current stream wait on the completing of the NCCL work
|
| 184 |
+
// Throws on exceptions. Blocking operation, which will wait for work
|
| 185 |
+
// completion.
|
| 186 |
+
void synchronize() override;
|
| 187 |
+
|
| 188 |
+
// Synchronize streams by blocking each on the NCCL stream
|
| 189 |
+
void synchronizeStreams();
|
| 190 |
+
|
| 191 |
+
// Helper function to handle exception (throw if needed).
|
| 192 |
+
void handleException(ErrorHandlingMode asyncErrorHandling);
|
| 193 |
+
|
| 194 |
+
// Helper function that checks if the NCCL kernels have finished
|
| 195 |
+
// execution on the GPUs
|
| 196 |
+
bool finishedGPUExecution();
|
| 197 |
+
|
| 198 |
+
// Get a Future object that will be marked as completed internally.
|
| 199 |
+
c10::intrusive_ptr<c10::ivalue::Future> getFuture() override;
|
| 200 |
+
|
| 201 |
+
float getDuration() const override;
|
| 202 |
+
|
| 203 |
+
uint64_t getSequencenumber() const override;
|
| 204 |
+
|
| 205 |
+
// Helper function that sets an exception_ptr on the WorkNCCL object.
|
| 206 |
+
void setException(std::exception_ptr exception_ptr);
|
| 207 |
+
|
| 208 |
+
// Helper function that returns True if the WorkNCCL object has timed out
|
| 209 |
+
// and False otherwise.
|
| 210 |
+
// In case of timeout, set exception on the WorkNCCL object.
|
| 211 |
+
bool checkTimeout(
|
| 212 |
+
c10::optional<std::chrono::milliseconds> timeout = c10::nullopt);
|
| 213 |
+
|
| 214 |
+
std::vector<at::Tensor> result() override;
|
| 215 |
+
|
| 216 |
+
protected:
|
| 217 |
+
// The cached list of CUDA devices to operate on
|
| 218 |
+
std::vector<at::Device> devices_;
|
| 219 |
+
|
| 220 |
+
// The start CUDA events of NCCL operator tracking this work item on
|
| 221 |
+
// multiple CUDA devices. These start CUDA events are needed by desync
|
| 222 |
+
// debugging if enabled.
|
| 223 |
+
std::shared_ptr<std::vector<at::cuda::CUDAEvent>> ncclStartEvents_;
|
| 224 |
+
|
| 225 |
+
// The end CUDA events of NCCL operator tracking this work item on
|
| 226 |
+
// multiple CUDA devices.
|
| 227 |
+
std::shared_ptr<std::vector<at::cuda::CUDAEvent>> ncclEndEvents_;
|
| 228 |
+
|
| 229 |
+
// The NCCL communicators used for this work item.
|
| 230 |
+
std::vector<std::shared_ptr<NCCLComm>> ncclComms_;
|
| 231 |
+
|
| 232 |
+
// Tensors used for barrier op
|
| 233 |
+
std::vector<at::Tensor> barrierTensors_;
|
| 234 |
+
|
| 235 |
+
// Clone of blockingWait_ from ProcessGroupNCCL.
|
| 236 |
+
bool blockingWait_ = false;
|
| 237 |
+
|
| 238 |
+
// Clone of avoidRecordStreams_ from ProcessGroupNCCL.
|
| 239 |
+
bool avoidRecordStreams_ = false;
|
| 240 |
+
|
| 241 |
+
// Clone of opTimeout_ from ProcessGroupNCCL.
|
| 242 |
+
std::chrono::milliseconds opTimeout_;
|
| 243 |
+
|
| 244 |
+
// Time point representing when the work started.
|
| 245 |
+
std::chrono::time_point<std::chrono::steady_clock> workStartTime_;
|
| 246 |
+
|
| 247 |
+
// Record the collective sequential number.
|
| 248 |
+
uint64_t seq_;
|
| 249 |
+
|
| 250 |
+
// Indicates if the nccl start event has been updated to the store trace.
|
| 251 |
+
// This will be used by desync debug.
|
| 252 |
+
bool startTraceUpdated_{false};
|
| 253 |
+
|
| 254 |
+
// Record collective sizes for debug. We only record the size on the first
|
| 255 |
+
// device as multi-device per process is deprecated
|
| 256 |
+
size_t numelIn_ = -1;
|
| 257 |
+
size_t numelOut_ = -1;
|
| 258 |
+
|
| 259 |
+
// Wrapper method for the static checkForNCCLErrors which can be overridden
|
| 260 |
+
// for tests.
|
| 261 |
+
virtual std::exception_ptr checkForNCCLErrors(
|
| 262 |
+
const std::vector<std::shared_ptr<NCCLComm>>& ncclComms) const;
|
| 263 |
+
|
| 264 |
+
friend std::ostream& operator<<(
|
| 265 |
+
std::ostream& output,
|
| 266 |
+
const WorkNCCL& workNCCL);
|
| 267 |
+
|
| 268 |
+
private:
|
| 269 |
+
// Helper function for synchronize
|
| 270 |
+
void synchronizeInternal(std::chrono::milliseconds timeout);
|
| 271 |
+
|
| 272 |
+
// Checks for NCCL errors and sets an appropriate exception_ptr.
|
| 273 |
+
void checkAndSetException();
|
| 274 |
+
|
| 275 |
+
// Just checks whether GPU execution has started, without modifying
|
| 276 |
+
// exception_ptr.
|
| 277 |
+
bool startedGPUExecutionInternal() const;
|
| 278 |
+
|
| 279 |
+
// Just checks whether GPU execution has completed, without modifying
|
| 280 |
+
// exception_ptr.
|
| 281 |
+
bool finishedGPUExecutionInternal() const;
|
| 282 |
+
|
| 283 |
+
// Reference to the store so that we can write aborted communicators
|
| 284 |
+
// to the store.
|
| 285 |
+
c10::intrusive_ptr<Store> store_;
|
| 286 |
+
|
| 287 |
+
// Store a reference to NCCL collective's outputs, used by result and to
|
| 288 |
+
// give a more descriptive message when representing the Work as a string.
|
| 289 |
+
std::shared_ptr<std::vector<at::Tensor>> outputs_;
|
| 290 |
+
|
| 291 |
+
// TORCH_NCCL_AVOID_RECORD_STREAMS implementation helper.
|
| 292 |
+
// Stores references to participating non-output tensors (ie inputs,
|
| 293 |
+
// flattened intermediates).
|
| 294 |
+
// We'll clear this list in synchronizeStreams, just after user-facing
|
| 295 |
+
// stream(s) are synced with the nccl work stream(s).
|
| 296 |
+
// By keeping these refs (as well as outputs_) alive until after the
|
| 297 |
+
// collective's work rejoins the user-facing streams, we achieve
|
| 298 |
+
// caching allocator safety without any recordStream calls.
|
| 299 |
+
// For in-place collectives, some refs stashed here may alias outputs_,
|
| 300 |
+
// but that doesn't do any harm.
|
| 301 |
+
std::shared_ptr<std::vector<at::Tensor>> stashed_for_allocator_safety_;
|
| 302 |
+
|
| 303 |
+
// The future returned by getFuture.
|
| 304 |
+
c10::intrusive_ptr<at::ivalue::Future> future_;
|
| 305 |
+
|
| 306 |
+
bool timingEnabled_;
|
| 307 |
+
// unique id used to tell the trace buffer that this
|
| 308 |
+
// work has completed
|
| 309 |
+
c10::optional<uint64_t> trace_id_;
|
| 310 |
+
friend class ProcessGroupNCCL;
|
| 311 |
+
};
|
| 312 |
+
|
| 313 |
+
class CoalescedWorkNCCL
|
| 314 |
+
: public Work,
|
| 315 |
+
public std::enable_shared_from_this<CoalescedWorkNCCL> {
|
| 316 |
+
public:
|
| 317 |
+
// Constructor takes a list of WorkNCCL works
|
| 318 |
+
CoalescedWorkNCCL(
|
| 319 |
+
std::vector<ProcessGroupNCCL::WorkNCCL> works,
|
| 320 |
+
int rank,
|
| 321 |
+
OpType opType);
|
| 322 |
+
|
| 323 |
+
~CoalescedWorkNCCL() override;
|
| 324 |
+
|
| 325 |
+
// Same as calling synchronize() for NCCL work.
|
| 326 |
+
bool wait(std::chrono::milliseconds timeout = kNoTimeout) override;
|
| 327 |
+
|
| 328 |
+
protected:
|
| 329 |
+
// The cached list of CUDA devices to operate on
|
| 330 |
+
std::vector<ProcessGroupNCCL::WorkNCCL> works_;
|
| 331 |
+
|
| 332 |
+
friend class ProcessGroupNCCL;
|
| 333 |
+
};
|
| 334 |
+
|
| 335 |
+
struct Options : Backend::Options {
|
| 336 |
+
// NOTE: timeout in ProcessGroupNCCL::Options denote the timeout for
|
| 337 |
+
// operations. This is only used when blockingWait_ is enabled.
|
| 338 |
+
explicit Options(bool is_high_priority_stream = false);
|
| 339 |
+
|
| 340 |
+
// return intrusive_ptr of the object
|
| 341 |
+
static c10::intrusive_ptr<Options> create(
|
| 342 |
+
bool is_high_priority_stream = false) {
|
| 343 |
+
return c10::make_intrusive<Options>(is_high_priority_stream);
|
| 344 |
+
}
|
| 345 |
+
|
| 346 |
+
// Schedule NCCL operations on high priority CUDA streams
|
| 347 |
+
bool is_high_priority_stream;
|
| 348 |
+
|
| 349 |
+
#ifdef NCCL_HAS_COMM_NONBLOCKING
|
| 350 |
+
// Configure ranks
|
| 351 |
+
ncclConfig_t config = NCCL_CONFIG_INITIALIZER;
|
| 352 |
+
#endif
|
| 353 |
+
|
| 354 |
+
// Optional "parent" backend and color to create communicators from
|
| 355 |
+
// via `ncclCommSplit`
|
| 356 |
+
std::shared_ptr<ProcessGroupNCCL> split_from;
|
| 357 |
+
int64_t split_color{0};
|
| 358 |
+
};
|
| 359 |
+
|
| 360 |
+
// If you wish to create multiple process groups, each with a potentially
|
| 361 |
+
// different rank and size, you can do so by passing a new store instance
|
| 362 |
+
// to each one. If you have only a single store object, you can
|
| 363 |
+
// use the `c10d::PrefixStore` to derive scoped instances.
|
| 364 |
+
// This is also what the Python API in torch.distributed does.
|
| 365 |
+
//
|
| 366 |
+
// The process group instance keeps a reference to the store because
|
| 367 |
+
// it may be used long after the constructor runs. In fact, the constructor
|
| 368 |
+
// doesn't create any NCCL communicators. A single NCCL communicator can
|
| 369 |
+
// only be used on a specific set of devices, and are therefore created
|
| 370 |
+
// on-demand when a collective runs. If another collective is executed later,
|
| 371 |
+
// against a different set of devices, the process group creates another NCCL
|
| 372 |
+
// communicator. These NCCL communicators are cached and reused if possible.
|
| 373 |
+
//
|
| 374 |
+
ProcessGroupNCCL(
|
| 375 |
+
const c10::intrusive_ptr<Store>& store,
|
| 376 |
+
int rank,
|
| 377 |
+
int size,
|
| 378 |
+
c10::intrusive_ptr<Options> options = Options::create());
|
| 379 |
+
|
| 380 |
+
// This constructor includes the deprecated `groupName` argument.
|
| 381 |
+
// If you have existing code that uses the `groupName`, you can replace
|
| 382 |
+
// it by specifying a `c10d::PrefixStore(groupName, store)` for store.
|
| 383 |
+
C10_DEPRECATED ProcessGroupNCCL(
|
| 384 |
+
const c10::intrusive_ptr<Store>& store,
|
| 385 |
+
int rank,
|
| 386 |
+
int size,
|
| 387 |
+
const std::string& groupName,
|
| 388 |
+
c10::intrusive_ptr<Options> options = Options::create())
|
| 389 |
+
: ProcessGroupNCCL(store, rank, size, options) {}
|
| 390 |
+
|
| 391 |
+
~ProcessGroupNCCL() override;
|
| 392 |
+
|
| 393 |
+
c10::intrusive_ptr<Options> getOptions() {
|
| 394 |
+
return options_;
|
| 395 |
+
}
|
| 396 |
+
|
| 397 |
+
const std::string getBackendName() const override {
|
| 398 |
+
return std::string(NCCL_BACKEND_NAME);
|
| 399 |
+
}
|
| 400 |
+
|
| 401 |
+
void startCoalescing() override;
|
| 402 |
+
|
| 403 |
+
c10::intrusive_ptr<Work> endCoalescing() override;
|
| 404 |
+
|
| 405 |
+
c10::intrusive_ptr<Work> broadcast(
|
| 406 |
+
std::vector<at::Tensor>& tensors,
|
| 407 |
+
const BroadcastOptions& opts = BroadcastOptions()) override;
|
| 408 |
+
|
| 409 |
+
c10::intrusive_ptr<Work> _broadcast_oop(
|
| 410 |
+
std::vector<at::Tensor>& outputTensors,
|
| 411 |
+
std::vector<at::Tensor>& inputTensors,
|
| 412 |
+
const BroadcastOptions& opts = BroadcastOptions());
|
| 413 |
+
|
| 414 |
+
c10::intrusive_ptr<Work> allreduce_sparse(
|
| 415 |
+
std::vector<at::Tensor>& tensors,
|
| 416 |
+
const AllreduceOptions& opts = AllreduceOptions()) override;
|
| 417 |
+
|
| 418 |
+
c10::intrusive_ptr<Work> allreduce(
|
| 419 |
+
std::vector<at::Tensor>& tensors,
|
| 420 |
+
const AllreduceOptions& opts = AllreduceOptions()) override;
|
| 421 |
+
|
| 422 |
+
c10::intrusive_ptr<Work> allreduce_coalesced(
|
| 423 |
+
std::vector<at::Tensor>& tensors,
|
| 424 |
+
const AllreduceCoalescedOptions& opts =
|
| 425 |
+
AllreduceCoalescedOptions()) override;
|
| 426 |
+
|
| 427 |
+
c10::intrusive_ptr<Work> reduce(
|
| 428 |
+
std::vector<at::Tensor>& tensors,
|
| 429 |
+
const ReduceOptions& opts = ReduceOptions()) override;
|
| 430 |
+
|
| 431 |
+
c10::intrusive_ptr<Work> _reduce_oop(
|
| 432 |
+
std::vector<at::Tensor>& outputTensors,
|
| 433 |
+
std::vector<at::Tensor>& inputTensors,
|
| 434 |
+
const ReduceOptions& opts = ReduceOptions());
|
| 435 |
+
|
| 436 |
+
c10::intrusive_ptr<Work> allgather(
|
| 437 |
+
std::vector<std::vector<at::Tensor>>& outputTensors,
|
| 438 |
+
std::vector<at::Tensor>& inputTensors,
|
| 439 |
+
const AllgatherOptions& opts = AllgatherOptions()) override;
|
| 440 |
+
|
| 441 |
+
c10::intrusive_ptr<Work> _allgather_base(
|
| 442 |
+
at::Tensor& outputbuffer,
|
| 443 |
+
at::Tensor& inputbuffer,
|
| 444 |
+
const AllgatherOptions& opts = AllgatherOptions()) override;
|
| 445 |
+
|
| 446 |
+
c10::intrusive_ptr<Work> allgather_coalesced(
|
| 447 |
+
std::vector<std::vector<at::Tensor>>& outputTensorLists,
|
| 448 |
+
std::vector<at::Tensor>& inputTensors,
|
| 449 |
+
const AllgatherOptions& opts = AllgatherOptions()) override;
|
| 450 |
+
|
| 451 |
+
c10::intrusive_ptr<Work> allgather_into_tensor_coalesced(
|
| 452 |
+
std::vector<at::Tensor>& outputs,
|
| 453 |
+
std::vector<at::Tensor>& inputs,
|
| 454 |
+
const AllgatherOptions& opts = AllgatherOptions()) override;
|
| 455 |
+
|
| 456 |
+
c10::intrusive_ptr<Work> reduce_scatter(
|
| 457 |
+
std::vector<at::Tensor>& outputTensors,
|
| 458 |
+
std::vector<std::vector<at::Tensor>>& inputTensors,
|
| 459 |
+
const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
|
| 460 |
+
|
| 461 |
+
c10::intrusive_ptr<Work> _reduce_scatter_base(
|
| 462 |
+
at::Tensor& outputTensor,
|
| 463 |
+
at::Tensor& inputTensor,
|
| 464 |
+
const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
|
| 465 |
+
|
| 466 |
+
c10::intrusive_ptr<Work> reduce_scatter_tensor_coalesced(
|
| 467 |
+
std::vector<at::Tensor>& outputs,
|
| 468 |
+
std::vector<at::Tensor>& inputs,
|
| 469 |
+
const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
|
| 470 |
+
|
| 471 |
+
c10::intrusive_ptr<Work> barrier(
|
| 472 |
+
const BarrierOptions& opts = BarrierOptions()) override;
|
| 473 |
+
|
| 474 |
+
c10::intrusive_ptr<Work> alltoall_base(
|
| 475 |
+
at::Tensor& outputTensor,
|
| 476 |
+
at::Tensor& inputTensor,
|
| 477 |
+
std::vector<int64_t>& outputSplitSizes,
|
| 478 |
+
std::vector<int64_t>& inputSplitSizes,
|
| 479 |
+
const AllToAllOptions& opts = AllToAllOptions()) override;
|
| 480 |
+
|
| 481 |
+
c10::intrusive_ptr<Work> alltoall(
|
| 482 |
+
std::vector<at::Tensor>& outputTensors,
|
| 483 |
+
std::vector<at::Tensor>& inputTensors,
|
| 484 |
+
const AllToAllOptions& opts = AllToAllOptions()) override;
|
| 485 |
+
|
| 486 |
+
c10::intrusive_ptr<Work> send(
|
| 487 |
+
std::vector<at::Tensor>& tensors,
|
| 488 |
+
int dstRank,
|
| 489 |
+
int tag) override;
|
| 490 |
+
|
| 491 |
+
c10::intrusive_ptr<Work> recv(
|
| 492 |
+
std::vector<at::Tensor>& tensors,
|
| 493 |
+
int srcRank,
|
| 494 |
+
int tag) override;
|
| 495 |
+
|
| 496 |
+
void groupStart();
|
| 497 |
+
|
| 498 |
+
void groupEnd();
|
| 499 |
+
|
| 500 |
+
void groupEndNonblocking(std::vector<std::shared_ptr<NCCLComm>> comms);
|
| 501 |
+
|
| 502 |
+
// Unsupported Ops
|
| 503 |
+
c10::intrusive_ptr<Work> gather(
|
| 504 |
+
std::vector<std::vector<at::Tensor>>& outputTensors,
|
| 505 |
+
std::vector<at::Tensor>& inputTensors,
|
| 506 |
+
const GatherOptions& opts = GatherOptions()) override;
|
| 507 |
+
|
| 508 |
+
c10::intrusive_ptr<Work> scatter(
|
| 509 |
+
std::vector<at::Tensor>& outputTensors,
|
| 510 |
+
std::vector<std::vector<at::Tensor>>& inputTensors,
|
| 511 |
+
const ScatterOptions& opts = ScatterOptions()) override;
|
| 512 |
+
|
| 513 |
+
c10::intrusive_ptr<Work> recvAnysource(
|
| 514 |
+
std::vector<at::Tensor>& tensors,
|
| 515 |
+
int tag) override;
|
| 516 |
+
|
| 517 |
+
// Agrees on an initial sequence number for the whole group by having rank 0
|
| 518 |
+
// create it and broadcast it to other ranks using the store.
|
| 519 |
+
void setSequenceNumberForGroup() override;
|
| 520 |
+
|
| 521 |
+
// Retrieves the current sequence number for the whole group, which should be
|
| 522 |
+
// in sync. If the returned number is not consistent across the group, it
|
| 523 |
+
// may indicate that there is some sort of collective desynchronization.
|
| 524 |
+
uint64_t getSequenceNumberForGroup() override;
|
| 525 |
+
|
| 526 |
+
// Return the total number of splits the communicators held by this process
|
| 527 |
+
// group have performed.
|
| 528 |
+
uint64_t getCommSplitCounter() const;
|
| 529 |
+
|
| 530 |
+
void registerOnCompletionHook(
|
| 531 |
+
std::function<void(std::shared_ptr<WorkInfo>)>&& hook) override;
|
| 532 |
+
void waitForPendingWorks() override;
|
| 533 |
+
|
| 534 |
+
void enableCollectivesTiming() override;
|
| 535 |
+
|
| 536 |
+
// Provide an API for users to define their own ways to store NCCL debug info.
|
| 537 |
+
void registerDebugInfoWriter(std::unique_ptr<DebugInfoWriter> writer);
|
| 538 |
+
|
| 539 |
+
// Provides an API to abort the ProcessGroup (similar to ncclCommAbort)
|
| 540 |
+
// instead of relying on ProcessGroupNCCL destructor.
|
| 541 |
+
void abort(c10::optional<std::string> abortReason = c10::nullopt);
|
| 542 |
+
|
| 543 |
+
void shutdown();
|
| 544 |
+
|
| 545 |
+
protected:
|
| 546 |
+
// Helper that broadcasts nccl unique ID to all ranks through the store
|
| 547 |
+
void broadcastUniqueNCCLID(
|
| 548 |
+
ncclUniqueId* ncclID,
|
| 549 |
+
bool isSingleP2POp,
|
| 550 |
+
const std::string& devicesKey,
|
| 551 |
+
int p2pRank);
|
| 552 |
+
|
| 553 |
+
// Helper that either looks up the cached NCCL communicators or creates
|
| 554 |
+
// a new set of NCCL communicators as a cache entry
|
| 555 |
+
std::vector<std::shared_ptr<NCCLComm>>& getNCCLComm(
|
| 556 |
+
const std::string& devicesKey,
|
| 557 |
+
const std::vector<at::Device>& devices,
|
| 558 |
+
OpType opType,
|
| 559 |
+
int p2pRank = 0,
|
| 560 |
+
bool isSendRecvSelf = false);
|
| 561 |
+
|
| 562 |
+
// Wrapper method which can be overridden for tests.
|
| 563 |
+
virtual std::exception_ptr checkForNCCLErrors(
|
| 564 |
+
const std::vector<std::shared_ptr<NCCLComm>>& ncclComms);
|
| 565 |
+
|
| 566 |
+
virtual c10::intrusive_ptr<ProcessGroupNCCL::WorkNCCL> initWork(
|
| 567 |
+
std::vector<at::Device> devices,
|
| 568 |
+
int rank,
|
| 569 |
+
OpType opType,
|
| 570 |
+
const char* profilingTitle = nullptr,
|
| 571 |
+
const std::vector<at::Tensor>& inputs = {},
|
| 572 |
+
const std::vector<at::Tensor>& outputs = {});
|
| 573 |
+
|
| 574 |
+
virtual c10::intrusive_ptr<ProcessGroupNCCL::CoalescedWorkNCCL>
|
| 575 |
+
initCoalescedWork(
|
| 576 |
+
const std::vector<c10::intrusive_ptr<Work>>& works,
|
| 577 |
+
int rank,
|
| 578 |
+
OpType opType);
|
| 579 |
+
|
| 580 |
+
private:
|
| 581 |
+
// Helper that encapsulates work shared across all collective communication
|
| 582 |
+
// primitives. The callbacks have the following signatures:
|
| 583 |
+
//
|
| 584 |
+
// ncclResult_t fn(at::Tensor& input, at::Tensor& output,
|
| 585 |
+
// ncclComm_t, at::cuda::CUDAStream&);
|
| 586 |
+
// void {pre,post}(std::vector<at::cuda::CUDAStream&>);
|
| 587 |
+
template <typename Fn>
|
| 588 |
+
c10::intrusive_ptr<Work> collective(
|
| 589 |
+
std::vector<at::Tensor>& input,
|
| 590 |
+
std::vector<at::Tensor>& output,
|
| 591 |
+
Fn fn,
|
| 592 |
+
OpType opType,
|
| 593 |
+
const char* profilingTitle = nullptr,
|
| 594 |
+
bool avoidRecordStreams = false);
|
| 595 |
+
|
| 596 |
+
template <typename Fn, typename PreProcess, typename PostProcess>
|
| 597 |
+
c10::intrusive_ptr<Work> collective(
|
| 598 |
+
std::vector<at::Tensor>& input,
|
| 599 |
+
std::vector<at::Tensor>& output,
|
| 600 |
+
Fn fn,
|
| 601 |
+
PreProcess pre,
|
| 602 |
+
PostProcess post,
|
| 603 |
+
OpType opType,
|
| 604 |
+
const char* profilingTitle = nullptr,
|
| 605 |
+
bool avoidRecordStreams = false);
|
| 606 |
+
|
| 607 |
+
// Helper that encapsulates work shared across point-to-point communication
|
| 608 |
+
// primitives. It is the same structure as the helper used for collective
|
| 609 |
+
// communication primitives.
|
| 610 |
+
template <typename Fn>
|
| 611 |
+
c10::intrusive_ptr<Work> pointToPoint(
|
| 612 |
+
std::vector<at::Tensor>& tensor,
|
| 613 |
+
Fn fn,
|
| 614 |
+
int peer,
|
| 615 |
+
OpType opType,
|
| 616 |
+
const char* profilingTitle = nullptr);
|
| 617 |
+
template <typename Fn, typename PreProcess, typename PostProcess>
|
| 618 |
+
c10::intrusive_ptr<Work> pointToPoint(
|
| 619 |
+
std::vector<at::Tensor>& tensor,
|
| 620 |
+
Fn fn,
|
| 621 |
+
int peer,
|
| 622 |
+
OpType opType,
|
| 623 |
+
PreProcess pre,
|
| 624 |
+
PostProcess post,
|
| 625 |
+
const char* profilingTitle);
|
| 626 |
+
|
| 627 |
+
c10::intrusive_ptr<Work> allreduce_impl(
|
| 628 |
+
std::vector<at::Tensor>& tensors,
|
| 629 |
+
const AllreduceOptions& opts = AllreduceOptions());
|
| 630 |
+
|
| 631 |
+
// Checks for NCCL errors on each of the communicators and returns an
|
| 632 |
+
// appropriate exception_ptr (nullptr if no errors).
|
| 633 |
+
static std::exception_ptr checkForNCCLErrorsInternal(
|
| 634 |
+
const std::vector<std::shared_ptr<NCCLComm>>& ncclComms);
|
| 635 |
+
|
| 636 |
+
// Function that runs as part of a separate thread and checks for errors on
|
| 637 |
+
// NCCL communicators. We need a separate thread to check for NCCL errors
|
| 638 |
+
// since we can't rely on the user calling certain methods like wait(),
|
| 639 |
+
// isCompleted() etc. to detect and remediate errors. In addition to this, we
|
| 640 |
+
// need a mechanism to safely abort and remove NCCL communicators from our
|
| 641 |
+
// cache. This can be done cleanly by having a thread for the ProcessGroupNCCL
|
| 642 |
+
// class. Attempting to modify the communicator cache from the WorkNCCL class
|
| 643 |
+
// might run into issues with object lifetime since the ProcessGroupNCCL
|
| 644 |
+
// object might get destroyed before the WorkNCCL object.
|
| 645 |
+
void ncclCommWatchdog();
|
| 646 |
+
|
| 647 |
+
// Performs a health check by initializing dummy NCCL communicators and then
|
| 648 |
+
// destroying them. This will help indicate and signal any NCCL-related issues
|
| 649 |
+
// prior to the first collective. The actual initialization and subsequent
|
| 650 |
+
// destruction is ran on a separate thread and the main thread is signalled
|
| 651 |
+
// about timeouts/errors to report to the application.
|
| 652 |
+
void runHealthCheck();
|
| 653 |
+
|
| 654 |
+
// Destroys initialized NCCL communicators in devNCCLComMap_ given by input
|
| 655 |
+
// key. Throws if there are no communicators to destroy. Also removes
|
| 656 |
+
// communicators from the cache and clears used device indices.
|
| 657 |
+
void destroyNCCLComms(const std::string& devNCCLCommMapKey);
|
| 658 |
+
|
| 659 |
+
// Watchdog's inside loop.
|
| 660 |
+
// Takes care of cleaning up completed work, and aborting upon failure or
|
| 661 |
+
// timeout.
|
| 662 |
+
void workCleanupLoop();
|
| 663 |
+
|
| 664 |
+
void runHookLoop();
|
| 665 |
+
|
| 666 |
+
// In the timeout case and we will dump debug info such as the NCCL flight
|
| 667 |
+
// recorder to storage. Down the road, if we have more complicated or blocking
|
| 668 |
+
// operations, we might need to use a side thread to do it.
|
| 669 |
+
void dumpDebuggingInfo();
|
| 670 |
+
|
| 671 |
+
// Desync debug helper
|
| 672 |
+
void logWorkStart(WorkNCCL& work);
|
| 673 |
+
|
| 674 |
+
// Desync debug helper
|
| 675 |
+
void logWorkEnd(WorkNCCL& work);
|
| 676 |
+
|
| 677 |
+
protected:
|
| 678 |
+
// Function that runs as part of a separate thread aside from watchdog
|
| 679 |
+
// thread because we need to check the heartbeat from watchdog thread
|
| 680 |
+
// so that when we get stuck in some NCCL/CUDA calls,
|
| 681 |
+
// we can dump the debugging information and abort the process.
|
| 682 |
+
virtual void heartbeatMonitor();
|
| 683 |
+
|
| 684 |
+
// Function that directly trigger std::abort so that the whole process
|
| 685 |
+
// gets terminated.
|
| 686 |
+
virtual void terminateProcess(std::string errMsg);
|
| 687 |
+
|
| 688 |
+
// Check the writeDebugInfo_ flag and if it is true, we do nothing.
|
| 689 |
+
// If not, we first set the flag to be true and return a thread which will
|
| 690 |
+
// get and write the debug info into storage.
|
| 691 |
+
c10::optional<std::thread> tryWriteDebugInfo();
|
| 692 |
+
|
| 693 |
+
// When watchdog timeout, this function will be called and return debug info
|
| 694 |
+
// for users. For now we only get information from retrieveDesyncReport.
|
| 695 |
+
// We are working on enabling more useful debug information for watchdog
|
| 696 |
+
// timeout.
|
| 697 |
+
virtual std::string getNCCLWatchdogDebugInfo();
|
| 698 |
+
|
| 699 |
+
static const int64_t kWatchdogThreadSleepMillis;
|
| 700 |
+
|
| 701 |
+
// The store is used to broadcast the NCCL unique ID of rank 0.
|
| 702 |
+
c10::intrusive_ptr<Store> store_;
|
| 703 |
+
|
| 704 |
+
bool storeError_{false};
|
| 705 |
+
|
| 706 |
+
const c10::intrusive_ptr<Options> options_;
|
| 707 |
+
|
| 708 |
+
// The number of NCCL communicators that have been created during
|
| 709 |
+
// the lifetime of this process group. This sequence number is
|
| 710 |
+
// used to scope keys used in the store.
|
| 711 |
+
uint64_t ncclCommCounter_{0};
|
| 712 |
+
|
| 713 |
+
// The store keys to trace the last NCCL collective kernel CUDA events - start
|
| 714 |
+
// event and end event respectively. These are used to do desync root cause
|
| 715 |
+
// analysis.
|
| 716 |
+
const std::string traceKeyStart_;
|
| 717 |
+
const std::string traceKeyEnd_;
|
| 718 |
+
|
| 719 |
+
// The NCCL communicator that the process group has cached.
|
| 720 |
+
//
|
| 721 |
+
// For collective operations:
|
| 722 |
+
// The key is a list of GPU devices that an operation is operating on
|
| 723 |
+
// The GPU devices are stored in a device sequence and the cache NCCL
|
| 724 |
+
// communicator is associated with this GPU device sequence
|
| 725 |
+
//
|
| 726 |
+
// e.g. If the process group op only uses device 0, then the value of
|
| 727 |
+
// the used device string stored (value of the hashmap) would be "0".
|
| 728 |
+
//
|
| 729 |
+
// If the process group op uses device 0 - 7 and the each tensor of the
|
| 730 |
+
// input tensor list is on device, 0, 1, 2, 3, 4, 5, 6, 7 separately,
|
| 731 |
+
// then the value of the used device string (key) stored would be
|
| 732 |
+
// "0,1,2,3,4,5,6,7"
|
| 733 |
+
//
|
| 734 |
+
// If the process group op uses device 0 - 7 and the each tensor of the
|
| 735 |
+
// input tensor list is on device, 0, 4, 5, 6, 7, 1, 2, 3 separately,
|
| 736 |
+
// then the value of the used device string stored would be
|
| 737 |
+
// "0,4,5,6,7,1,2,3"
|
| 738 |
+
//
|
| 739 |
+
// Note that the order of the device for the tensor list matters.
|
| 740 |
+
//
|
| 741 |
+
// For point-to-point operations:
|
| 742 |
+
// The key is a string of my current rank and the peer process rank.
|
| 743 |
+
// e.g. If process 1 and process 2 are involved in a point-to-point
|
| 744 |
+
// communication, the key will be "1:2" on both processes. Note: this is for
|
| 745 |
+
// the scenario where there is only 1 GPU per process. When it comes to
|
| 746 |
+
// multiple GPUs per process, this part may need to redesigned.
|
| 747 |
+
std::unordered_map<std::string, std::vector<std::shared_ptr<NCCLComm>>>
|
| 748 |
+
devNCCLCommMap_;
|
| 749 |
+
|
| 750 |
+
// The NCCL communicators currently in process of being initialized.
|
| 751 |
+
std::unordered_map<std::string, std::vector<std::shared_ptr<NCCLComm>>>
|
| 752 |
+
inInitializationCommMap_;
|
| 753 |
+
|
| 754 |
+
// Map from ncclUniqueId to appropriate communicator.
|
| 755 |
+
std::unordered_map<std::string, std::vector<std::shared_ptr<NCCLComm>>>
|
| 756 |
+
ncclIdToCommMap_;
|
| 757 |
+
|
| 758 |
+
// Mutex to guard maps like devNCCLCommMap_ and ncclIdToCommMap_.
|
| 759 |
+
std::mutex mutex_;
|
| 760 |
+
|
| 761 |
+
// Heartbeat of watchdog thread.
|
| 762 |
+
uint64_t heartbeat_;
|
| 763 |
+
|
| 764 |
+
// The time interval used for deciding whether there is no watchdog heartbeat.
|
| 765 |
+
int heartbeatTimeoutInSec_;
|
| 766 |
+
|
| 767 |
+
// Size of ring buffer where we store NCCL Traces for debugging.
|
| 768 |
+
int ncclTraceBufferSize_;
|
| 769 |
+
|
| 770 |
+
// We gate the heartbeat monitor thread so that we can roll it out gradually.
|
| 771 |
+
std::atomic<bool> monitorThreadEnabled_;
|
| 772 |
+
|
| 773 |
+
// Monitor thread which checks the heartbeat of Watchdog thread.
|
| 774 |
+
// If the monitor thread finds there is no heartbeat, it will dump debug info
|
| 775 |
+
// and then kill the watchdog thread to avoid hang.
|
| 776 |
+
std::thread ncclHeartbeatMonitorThread_;
|
| 777 |
+
|
| 778 |
+
// Watchdog thread which looks for errors on the cached NCCL communicators.
|
| 779 |
+
std::thread ncclCommWatchdogThread_;
|
| 780 |
+
|
| 781 |
+
std::thread onCompletionHookThread_;
|
| 782 |
+
|
| 783 |
+
// Whether or not we should terminate the watchdog and workCleanup threads.
|
| 784 |
+
std::atomic<bool> terminateProcessGroup_;
|
| 785 |
+
|
| 786 |
+
// Whether or not we should terminate the heartbeat monitoring threads.
|
| 787 |
+
std::atomic<bool> terminateHeartbeatMonitorThread_;
|
| 788 |
+
|
| 789 |
+
// Whether we are in the shutdown mode when we are trying to get debug info,
|
| 790 |
+
// such as desync report.
|
| 791 |
+
std::atomic<bool> collectiveDebugInfoMode_;
|
| 792 |
+
|
| 793 |
+
// Whether there are hooks pending to be fired
|
| 794 |
+
std::atomic<bool> hasPendingHooks_;
|
| 795 |
+
|
| 796 |
+
// Mutex to Guard workMetaList_
|
| 797 |
+
std::mutex workMetaListMutex_;
|
| 798 |
+
|
| 799 |
+
// Mutex to Guard monitorWakeUpCV_
|
| 800 |
+
std::mutex monitorMutex_;
|
| 801 |
+
|
| 802 |
+
bool writeDebugInfo_ = false;
|
| 803 |
+
|
| 804 |
+
// Mutex to Guard the check of writeDebugInfo_
|
| 805 |
+
std::mutex writeDebugInfoMutex_;
|
| 806 |
+
|
| 807 |
+
// Condition Variable for watchdog thread sleep
|
| 808 |
+
std::condition_variable workMetaListCV_;
|
| 809 |
+
|
| 810 |
+
// Condition Variable for monitor thread to wake up early
|
| 811 |
+
std::condition_variable monitorWakeUpCV_;
|
| 812 |
+
|
| 813 |
+
// Vector to Store WorkNCCL pointers
|
| 814 |
+
std::list<ProcessGroupNCCL::WorkNCCL> workMetaList_;
|
| 815 |
+
|
| 816 |
+
// Mutex to Guard workMetaList_
|
| 817 |
+
std::mutex completedWorkListMutex_;
|
| 818 |
+
|
| 819 |
+
// Condition Variable for watchdog thread sleep
|
| 820 |
+
std::condition_variable completedWorkListCV_;
|
| 821 |
+
|
| 822 |
+
std::list<ProcessGroupNCCL::WorkNCCL> completedWorkList_;
|
| 823 |
+
|
| 824 |
+
// Add Work Pointer to workVector
|
| 825 |
+
void workEnqueue(c10::intrusive_ptr<ProcessGroupNCCL::WorkNCCL>);
|
| 826 |
+
|
| 827 |
+
// The CUDA streams used by NCCL kernels
|
| 828 |
+
std::unordered_map<std::string, std::vector<at::cuda::CUDAStream>>
|
| 829 |
+
ncclStreams_;
|
| 830 |
+
|
| 831 |
+
// The CUDA events used to sync NCCL streams
|
| 832 |
+
std::unordered_map<std::string, std::vector<at::cuda::CUDAEvent>> ncclEvents_;
|
| 833 |
+
|
| 834 |
+
// Device Indexes used for all collectives in this group
|
| 835 |
+
std::set<int> usedDeviceIdxs_;
|
| 836 |
+
|
| 837 |
+
// Flag to denote if a coalescing groupStart/groupEnd block is active
|
| 838 |
+
int coalescing_state_ = 0;
|
| 839 |
+
|
| 840 |
+
// Stores device indexes for all collectives run inside a coalescing block
|
| 841 |
+
std::vector<std::vector<at::Device>> coalescedDevices_;
|
| 842 |
+
|
| 843 |
+
// Stores communicators for all collectives run inside a coalescing block
|
| 844 |
+
std::vector<std::vector<std::shared_ptr<NCCLComm>>> coalescedComms_;
|
| 845 |
+
|
| 846 |
+
// map from the key: "group name + pg counter (ID)" to the
|
| 847 |
+
// unique NCCL ID count. This needs to be group and pg specific
|
| 848 |
+
//
|
| 849 |
+
// For each process group, we need a uniform unique NCCL ID counter to ensure
|
| 850 |
+
// that NCCL operation in this process group can be completed successfully.
|
| 851 |
+
// Since each process group ID belongs to a group name, the key to this map
|
| 852 |
+
// is a combination of group name and ProcessGroupNCCL ID.
|
| 853 |
+
static std::unordered_map<std::string, ssize_t> pgUniqueNCCLIDCnt_;
|
| 854 |
+
|
| 855 |
+
// map from group name to the pg counter (ID) within that group
|
| 856 |
+
//
|
| 857 |
+
// For each group with the "group name" (which is the key), we need to
|
| 858 |
+
// keep track of a unique process group ID when creating a new
|
| 859 |
+
// ProcessGroupNCCL for this "group name". Therefore, the value of this
|
| 860 |
+
// map keeps the unique ProcessGroupNCCL's ID for a specific group with
|
| 861 |
+
// the "group name". The reason we need a per-group process group ID counter
|
| 862 |
+
// is that different group can have different ranks and we need ensure that
|
| 863 |
+
// each group has its own uniform process group ID for all its ranks.
|
| 864 |
+
static std::unordered_map<std::string, ssize_t> processGroupCounterMap_;
|
| 865 |
+
|
| 866 |
+
// Whether or not wait() and synchronize() are blocking operations that wait
|
| 867 |
+
// for the operation to complete.
|
| 868 |
+
bool blockingWait_ = false;
|
| 869 |
+
|
| 870 |
+
// Whether or not to hook the cache allocator to register all allocated
|
| 871 |
+
// tensors
|
| 872 |
+
bool useTensorRegisterAllocatorHook_ = false;
|
| 873 |
+
|
| 874 |
+
// Whether or not the workCleanupThread is used to perform async error
|
| 875 |
+
// handling.
|
| 876 |
+
ErrorHandlingMode asyncErrorHandling_ = NoHandling;
|
| 877 |
+
|
| 878 |
+
// Whether or not to enable timeout root cause analysis.
|
| 879 |
+
bool desyncDebug_;
|
| 880 |
+
|
| 881 |
+
// Whether or not to dump debug info on timeout
|
| 882 |
+
bool dumpOnTimeout_;
|
| 883 |
+
|
| 884 |
+
// Whether or not to create start CUDAEvent and enable timing for start
|
| 885 |
+
// and end events. Note that enableTiming_ is always true if desyncDebug_
|
| 886 |
+
// is set to true.
|
| 887 |
+
std::atomic<bool> enableTiming_;
|
| 888 |
+
|
| 889 |
+
// Whether or not TORCH_NCCL_AVOID_RECORD_STREAMS was set
|
| 890 |
+
bool avoidRecordStreams_ = false;
|
| 891 |
+
|
| 892 |
+
// Set of communicators that this process group has aborted and their
|
| 893 |
+
// ncclUniqueId has been written to the store. We don't need a lock
|
| 894 |
+
// for this map since only the watchdog thread accesses this set. The
|
| 895 |
+
// set contains the string representation of ncclUniqueId.
|
| 896 |
+
std::unordered_set<std::string> abortedComms_;
|
| 897 |
+
|
| 898 |
+
// The number of active ncclGroupStart() calls. This counter will be increased
|
| 899 |
+
// by 1 when ncclGroupStart() is called and decreased by 1 when ncclGroupEnd()
|
| 900 |
+
// is called.
|
| 901 |
+
static thread_local uint64_t ncclActiveGroupCounter_;
|
| 902 |
+
|
| 903 |
+
// Counting for the sequential number of NCCL collective call.
|
| 904 |
+
uint64_t seq_{0};
|
| 905 |
+
|
| 906 |
+
std::exception_ptr watchDogException_ = nullptr;
|
| 907 |
+
|
| 908 |
+
// The callback function to store NCCL debug info.
|
| 909 |
+
std::unique_ptr<DebugInfoWriter> debugInfoWriter_ = nullptr;
|
| 910 |
+
|
| 911 |
+
size_t uid_;
|
| 912 |
+
};
|
| 913 |
+
|
| 914 |
+
TORCH_API std::string dump_nccl_trace();
|
| 915 |
+
|
| 916 |
+
} // namespace c10d
|
| 917 |
+
|
| 918 |
+
#endif // USE_C10D_NCCL
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupRoundRobin.hpp
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <vector>
|
| 4 |
+
|
| 5 |
+
#include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
|
| 6 |
+
|
| 7 |
+
namespace c10d {
|
| 8 |
+
|
| 9 |
+
constexpr const char* ROUND_ROBIN_BACKEND_NAME = "round_robin";
|
| 10 |
+
|
| 11 |
+
// ProcessGroupRoundRobin implements simple load balancing.
|
| 12 |
+
//
|
| 13 |
+
// It is constructed with multiple processes groups. Each call is dispatched to
|
| 14 |
+
// one of the specified process groups in a round robin fashion. Each process
|
| 15 |
+
// group instance must have the same rank and size.
|
| 16 |
+
//
|
| 17 |
+
// All functions of the class are expected to be called in the same order
|
| 18 |
+
// across all processes in the process group. This is the only way that we
|
| 19 |
+
// can guarantee to match up the same calls among all processes.
|
| 20 |
+
//
|
| 21 |
+
class TORCH_API ProcessGroupRoundRobin final : public ProcessGroup {
|
| 22 |
+
public:
|
| 23 |
+
explicit ProcessGroupRoundRobin(
|
| 24 |
+
int rank,
|
| 25 |
+
int size,
|
| 26 |
+
std::vector<c10::intrusive_ptr<ProcessGroup>> processGroups);
|
| 27 |
+
|
| 28 |
+
~ProcessGroupRoundRobin() override;
|
| 29 |
+
|
| 30 |
+
const std::string getBackendName() const override {
|
| 31 |
+
return std::string(ROUND_ROBIN_BACKEND_NAME);
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
c10::intrusive_ptr<Work> broadcast(
|
| 35 |
+
std::vector<at::Tensor>& tensors,
|
| 36 |
+
const BroadcastOptions& opts = BroadcastOptions()) override;
|
| 37 |
+
|
| 38 |
+
c10::intrusive_ptr<Work> allreduce(
|
| 39 |
+
std::vector<at::Tensor>& tensors,
|
| 40 |
+
const AllreduceOptions& opts = AllreduceOptions()) override;
|
| 41 |
+
|
| 42 |
+
c10::intrusive_ptr<Work> allreduce_coalesced(
|
| 43 |
+
std::vector<at::Tensor>& tensors,
|
| 44 |
+
const AllreduceCoalescedOptions& opts =
|
| 45 |
+
AllreduceCoalescedOptions()) override;
|
| 46 |
+
|
| 47 |
+
c10::intrusive_ptr<Work> reduce(
|
| 48 |
+
std::vector<at::Tensor>& tensors,
|
| 49 |
+
const ReduceOptions& opts = ReduceOptions()) override;
|
| 50 |
+
|
| 51 |
+
c10::intrusive_ptr<Work> allgather(
|
| 52 |
+
std::vector<std::vector<at::Tensor>>& outputs,
|
| 53 |
+
std::vector<at::Tensor>& inputs,
|
| 54 |
+
const AllgatherOptions& opts = AllgatherOptions()) override;
|
| 55 |
+
|
| 56 |
+
c10::intrusive_ptr<Work> _allgather_base(
|
| 57 |
+
at::Tensor& outputBuffer,
|
| 58 |
+
at::Tensor& inputBuffer,
|
| 59 |
+
const AllgatherOptions& opts = AllgatherOptions()) override;
|
| 60 |
+
|
| 61 |
+
c10::intrusive_ptr<Work> allgather_coalesced(
|
| 62 |
+
std::vector<std::vector<at::Tensor>>& outputTensorLists,
|
| 63 |
+
std::vector<at::Tensor>& inputTensors,
|
| 64 |
+
const AllgatherOptions& opts = AllgatherOptions()) override;
|
| 65 |
+
|
| 66 |
+
c10::intrusive_ptr<Work> gather(
|
| 67 |
+
std::vector<std::vector<at::Tensor>>& outputs,
|
| 68 |
+
std::vector<at::Tensor>& inputs,
|
| 69 |
+
const GatherOptions& opts = GatherOptions()) override;
|
| 70 |
+
|
| 71 |
+
c10::intrusive_ptr<Work> scatter(
|
| 72 |
+
std::vector<at::Tensor>& outputs,
|
| 73 |
+
std::vector<std::vector<at::Tensor>>& inputs,
|
| 74 |
+
const ScatterOptions& opts = ScatterOptions()) override;
|
| 75 |
+
|
| 76 |
+
c10::intrusive_ptr<Work> reduce_scatter(
|
| 77 |
+
std::vector<at::Tensor>& outputs,
|
| 78 |
+
std::vector<std::vector<at::Tensor>>& inputs,
|
| 79 |
+
const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
|
| 80 |
+
|
| 81 |
+
c10::intrusive_ptr<Work> alltoall_base(
|
| 82 |
+
at::Tensor& outputTensor,
|
| 83 |
+
at::Tensor& inputTensor,
|
| 84 |
+
std::vector<int64_t>& outputSplitSizes,
|
| 85 |
+
std::vector<int64_t>& inputSplitSizes,
|
| 86 |
+
const AllToAllOptions& opts = AllToAllOptions()) override;
|
| 87 |
+
|
| 88 |
+
c10::intrusive_ptr<Work> send(
|
| 89 |
+
std::vector<at::Tensor>& tensors,
|
| 90 |
+
int dstRank,
|
| 91 |
+
int tag) override;
|
| 92 |
+
|
| 93 |
+
c10::intrusive_ptr<Work> recv(
|
| 94 |
+
std::vector<at::Tensor>& tensors,
|
| 95 |
+
int srcRank,
|
| 96 |
+
int tag) override;
|
| 97 |
+
|
| 98 |
+
c10::intrusive_ptr<Work> recvAnysource(
|
| 99 |
+
std::vector<at::Tensor>& tensors,
|
| 100 |
+
int tag) override;
|
| 101 |
+
|
| 102 |
+
c10::intrusive_ptr<Work> barrier(
|
| 103 |
+
const BarrierOptions& opts = BarrierOptions()) override;
|
| 104 |
+
|
| 105 |
+
private:
|
| 106 |
+
std::vector<c10::intrusive_ptr<ProcessGroup>> processGroups_;
|
| 107 |
+
std::vector<c10::intrusive_ptr<ProcessGroup>>::const_iterator iterator_;
|
| 108 |
+
|
| 109 |
+
// Returns the next ProcessGroup to use.
|
| 110 |
+
const c10::intrusive_ptr<ProcessGroup>& next();
|
| 111 |
+
};
|
| 112 |
+
|
| 113 |
+
} // namespace c10d
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupUCC.hpp
ADDED
|
@@ -0,0 +1,353 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#ifdef USE_C10D_UCC
|
| 4 |
+
|
| 5 |
+
#include <torch/csrc/distributed/c10d/UCCUtils.hpp>
|
| 6 |
+
|
| 7 |
+
#include <exception>
|
| 8 |
+
#include <memory>
|
| 9 |
+
#include <mutex>
|
| 10 |
+
#include <queue>
|
| 11 |
+
#include <thread>
|
| 12 |
+
#include <vector>
|
| 13 |
+
|
| 14 |
+
#include <torch/csrc/distributed/c10d/Backend.hpp>
|
| 15 |
+
#include <torch/csrc/distributed/c10d/Store.hpp>
|
| 16 |
+
#include <torch/csrc/distributed/c10d/Types.hpp>
|
| 17 |
+
#include <torch/csrc/distributed/c10d/Utils.hpp>
|
| 18 |
+
#ifdef USE_CUDA
|
| 19 |
+
#include <ATen/cuda/CUDAEvent.h>
|
| 20 |
+
#include <c10/cuda/CUDAStream.h>
|
| 21 |
+
#endif
|
| 22 |
+
|
| 23 |
+
namespace c10d {
|
| 24 |
+
|
| 25 |
+
#define TORCH_UCC_DEVICE_NOT_SET -2
|
| 26 |
+
|
| 27 |
+
#ifdef USE_CUDA
|
| 28 |
+
#define SAVE_TENSORS(_TENSORS, _DATA) \
|
| 29 |
+
do { \
|
| 30 |
+
if ((_TENSORS)[0].device().is_cuda()) { \
|
| 31 |
+
for (const auto i : c10::irange((_TENSORS).size())) { \
|
| 32 |
+
c10::cuda::CUDACachingAllocator::recordStream( \
|
| 33 |
+
(_TENSORS)[i].storage().data_ptr(), (*stream)); \
|
| 34 |
+
} \
|
| 35 |
+
} else { \
|
| 36 |
+
(_DATA) = (_TENSORS); \
|
| 37 |
+
} \
|
| 38 |
+
} while (0)
|
| 39 |
+
|
| 40 |
+
#else
|
| 41 |
+
#define SAVE_TENSORS(_TENSORS, _DATA) (_DATA) = (_TENSORS);
|
| 42 |
+
#endif
|
| 43 |
+
|
| 44 |
+
constexpr const char* UCC_BACKEND_NAME = "ucc";
|
| 45 |
+
|
| 46 |
+
struct event_pool_t {
|
| 47 |
+
#ifdef USE_CUDA
|
| 48 |
+
std::queue<std::unique_ptr<at::cuda::CUDAEvent>> event_pool;
|
| 49 |
+
#endif
|
| 50 |
+
std::mutex event_pool_mutex;
|
| 51 |
+
};
|
| 52 |
+
|
| 53 |
+
class Comm;
|
| 54 |
+
|
| 55 |
+
// UCC does not support multiple CUDA devices per process.
|
| 56 |
+
class TORCH_API ProcessGroupUCC : public Backend {
|
| 57 |
+
private:
|
| 58 |
+
void set_timeout(ucc_coll_args_t& args);
|
| 59 |
+
|
| 60 |
+
public:
|
| 61 |
+
class WorkData {
|
| 62 |
+
public:
|
| 63 |
+
std::vector<at::Tensor> src;
|
| 64 |
+
std::vector<at::Tensor> dst;
|
| 65 |
+
std::vector<at::Tensor> flat;
|
| 66 |
+
WorkData() {}
|
| 67 |
+
virtual ~WorkData() = default;
|
| 68 |
+
};
|
| 69 |
+
class AlltoallWorkData : public WorkData {
|
| 70 |
+
public:
|
| 71 |
+
AlltoallWorkData(int size)
|
| 72 |
+
: send_lengths(size),
|
| 73 |
+
send_offsets(size),
|
| 74 |
+
recv_lengths(size),
|
| 75 |
+
recv_offsets(size) {}
|
| 76 |
+
std::vector<uint64_t> send_lengths;
|
| 77 |
+
std::vector<uint64_t> send_offsets;
|
| 78 |
+
std::vector<uint64_t> recv_lengths;
|
| 79 |
+
std::vector<uint64_t> recv_offsets;
|
| 80 |
+
};
|
| 81 |
+
|
| 82 |
+
class AllgathervWorkData : public WorkData {
|
| 83 |
+
public:
|
| 84 |
+
AllgathervWorkData(int size) : recv_lengths(size), recv_offsets(size) {}
|
| 85 |
+
std::vector<uint64_t> recv_lengths;
|
| 86 |
+
std::vector<uint64_t> recv_offsets;
|
| 87 |
+
};
|
| 88 |
+
|
| 89 |
+
class ScattervWorkData : public WorkData {
|
| 90 |
+
public:
|
| 91 |
+
ScattervWorkData(int size) : send_lengths(size), send_offsets(size) {}
|
| 92 |
+
std::vector<uint64_t> send_lengths;
|
| 93 |
+
std::vector<uint64_t> send_offsets;
|
| 94 |
+
};
|
| 95 |
+
|
| 96 |
+
class ProgressEntry {
|
| 97 |
+
friend class ProcessGroupUCC;
|
| 98 |
+
friend class Comm;
|
| 99 |
+
|
| 100 |
+
public:
|
| 101 |
+
ProgressEntry(CommBase* comm, ucc_coll_req_h request)
|
| 102 |
+
: status_(UCC_INPROGRESS), comm_(comm), request_(request) {}
|
| 103 |
+
// Finalizes UCC status or exception of collective request.
|
| 104 |
+
void finalize(std::exception_ptr eptr = nullptr);
|
| 105 |
+
ucc_status_t status_;
|
| 106 |
+
CommBase* comm_;
|
| 107 |
+
ucc_coll_req_h request_;
|
| 108 |
+
std::unique_ptr<WorkData> data;
|
| 109 |
+
c10::intrusive_ptr<c10::ivalue::Future> future_;
|
| 110 |
+
std::exception_ptr eptr_;
|
| 111 |
+
};
|
| 112 |
+
|
| 113 |
+
class WorkUCC : public Work {
|
| 114 |
+
friend class ProcessGroupUCC;
|
| 115 |
+
friend class Comm;
|
| 116 |
+
|
| 117 |
+
public:
|
| 118 |
+
WorkUCC(
|
| 119 |
+
OpType opType,
|
| 120 |
+
uint64_t seq,
|
| 121 |
+
const char* prof_title,
|
| 122 |
+
const c10::optional<std::vector<at::Tensor>>& inputs,
|
| 123 |
+
const c10::intrusive_ptr<ProcessGroupUCCLogger>& logger)
|
| 124 |
+
: Work(-1, opType, prof_title, inputs), logger_(logger), seq_(seq) {}
|
| 125 |
+
~WorkUCC();
|
| 126 |
+
void setException();
|
| 127 |
+
void setAndThrowException();
|
| 128 |
+
bool isCompleted() override;
|
| 129 |
+
bool isSuccess() const override;
|
| 130 |
+
bool wait(std::chrono::milliseconds timeout = kUnsetTimeout) override;
|
| 131 |
+
c10::intrusive_ptr<c10::ivalue::Future> getFuture() override;
|
| 132 |
+
std::vector<at::Tensor> result() override;
|
| 133 |
+
int sourceRank() const override;
|
| 134 |
+
#ifdef USE_CUDA
|
| 135 |
+
std::unique_ptr<at::cuda::CUDAEvent> fence = nullptr;
|
| 136 |
+
event_pool_t* ep = nullptr;
|
| 137 |
+
#endif
|
| 138 |
+
int sourceRank_;
|
| 139 |
+
|
| 140 |
+
protected:
|
| 141 |
+
std::shared_ptr<ProgressEntry> entry_;
|
| 142 |
+
c10::intrusive_ptr<ProcessGroupUCCLogger> logger_;
|
| 143 |
+
uint64_t seq_;
|
| 144 |
+
|
| 145 |
+
private:
|
| 146 |
+
// The future returned by getFuture.
|
| 147 |
+
c10::intrusive_ptr<at::ivalue::Future> future_;
|
| 148 |
+
// Store a reference to collective's outputs, used by result
|
| 149 |
+
std::shared_ptr<std::vector<at::Tensor>> outputs_;
|
| 150 |
+
};
|
| 151 |
+
|
| 152 |
+
explicit ProcessGroupUCC(
|
| 153 |
+
const c10::intrusive_ptr<Store>& store,
|
| 154 |
+
int rank = -1,
|
| 155 |
+
int size = -1,
|
| 156 |
+
std::chrono::duration<float> timeout = kBackendDefaultTimeout);
|
| 157 |
+
|
| 158 |
+
void initComm(c10::Device dev);
|
| 159 |
+
|
| 160 |
+
~ProcessGroupUCC() override;
|
| 161 |
+
|
| 162 |
+
const std::string getBackendName() const override {
|
| 163 |
+
return std::string(UCC_BACKEND_NAME);
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
#ifdef USE_CUDA
|
| 167 |
+
std::unique_ptr<at::cuda::CUDAEvent> getPooledEvent();
|
| 168 |
+
#endif
|
| 169 |
+
|
| 170 |
+
// Performs a health check by initializing dummy UCC & UCX communicators and
|
| 171 |
+
// then destroying them. This will help indicate and signal any
|
| 172 |
+
// UCC/UCX-related issues prior to the first collective. The actual
|
| 173 |
+
// initialization and subsequent destruction is ran on a separate thread and
|
| 174 |
+
// the main thread is signalled about timeouts/errors to report to the
|
| 175 |
+
// application.
|
| 176 |
+
void runHealthCheck();
|
| 177 |
+
|
| 178 |
+
template <typename PreProcess, typename PostProcess>
|
| 179 |
+
c10::intrusive_ptr<Work> collective_post(
|
| 180 |
+
OpType opType,
|
| 181 |
+
PreProcess preproc,
|
| 182 |
+
PostProcess postproc,
|
| 183 |
+
ucc_coll_args_t& coll,
|
| 184 |
+
std::unique_ptr<ProcessGroupUCC::WorkData> data,
|
| 185 |
+
c10::Device dev,
|
| 186 |
+
std::vector<at::Tensor>& inputTensors,
|
| 187 |
+
std::vector<at::Tensor>& outputTensors,
|
| 188 |
+
const char* prof_title);
|
| 189 |
+
|
| 190 |
+
c10::intrusive_ptr<Work> broadcast(
|
| 191 |
+
std::vector<at::Tensor>& data,
|
| 192 |
+
const BroadcastOptions& opts = BroadcastOptions()) override;
|
| 193 |
+
|
| 194 |
+
c10::intrusive_ptr<Work> allreduce(
|
| 195 |
+
std::vector<at::Tensor>& tensors,
|
| 196 |
+
const AllreduceOptions& opts = AllreduceOptions()) override;
|
| 197 |
+
|
| 198 |
+
c10::intrusive_ptr<Work> allreduce_coalesced(
|
| 199 |
+
std::vector<at::Tensor>& tensors,
|
| 200 |
+
const AllreduceCoalescedOptions& opts =
|
| 201 |
+
AllreduceCoalescedOptions()) override;
|
| 202 |
+
|
| 203 |
+
c10::intrusive_ptr<Work> reduce(
|
| 204 |
+
std::vector<at::Tensor>& tensors,
|
| 205 |
+
const ReduceOptions& opts = ReduceOptions()) override;
|
| 206 |
+
|
| 207 |
+
c10::intrusive_ptr<Work> allgather(
|
| 208 |
+
std::vector<std::vector<at::Tensor>>& outputTensors,
|
| 209 |
+
std::vector<at::Tensor>& inputTensors,
|
| 210 |
+
const AllgatherOptions& opts = AllgatherOptions()) override;
|
| 211 |
+
|
| 212 |
+
c10::intrusive_ptr<Work> _allgather_base(
|
| 213 |
+
at::Tensor& outputBuffer,
|
| 214 |
+
at::Tensor& inputBuffer,
|
| 215 |
+
const AllgatherOptions& opts = AllgatherOptions()) override;
|
| 216 |
+
|
| 217 |
+
c10::intrusive_ptr<Work> barrier(
|
| 218 |
+
const BarrierOptions& opts = BarrierOptions()) override;
|
| 219 |
+
|
| 220 |
+
c10::intrusive_ptr<Work> gather(
|
| 221 |
+
std::vector<std::vector<at::Tensor>>& outputTensors,
|
| 222 |
+
std::vector<at::Tensor>& inputTensors,
|
| 223 |
+
const GatherOptions& opts = GatherOptions()) override;
|
| 224 |
+
|
| 225 |
+
c10::intrusive_ptr<Work> scatter(
|
| 226 |
+
std::vector<at::Tensor>& outputTensors,
|
| 227 |
+
std::vector<std::vector<at::Tensor>>& inputTensors,
|
| 228 |
+
const ScatterOptions& opts = ScatterOptions()) override;
|
| 229 |
+
|
| 230 |
+
c10::intrusive_ptr<Work> reduce_scatter(
|
| 231 |
+
std::vector<at::Tensor>& outputTensors,
|
| 232 |
+
std::vector<std::vector<at::Tensor>>& inputTensors,
|
| 233 |
+
const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
|
| 234 |
+
|
| 235 |
+
c10::intrusive_ptr<Work> alltoall_base(
|
| 236 |
+
at::Tensor& outputTensor,
|
| 237 |
+
at::Tensor& inputTensor,
|
| 238 |
+
std::vector<int64_t>& outputSplitSizes,
|
| 239 |
+
std::vector<int64_t>& inputSplitSizes,
|
| 240 |
+
const AllToAllOptions& opts = AllToAllOptions()) override;
|
| 241 |
+
|
| 242 |
+
c10::intrusive_ptr<Work> alltoall(
|
| 243 |
+
std::vector<at::Tensor>& outputTensors,
|
| 244 |
+
std::vector<at::Tensor>& inputTensors,
|
| 245 |
+
const AllToAllOptions& opts = AllToAllOptions()) override;
|
| 246 |
+
|
| 247 |
+
c10::intrusive_ptr<Work> send(
|
| 248 |
+
std::vector<at::Tensor>& tensors,
|
| 249 |
+
int dstRank,
|
| 250 |
+
int tag) override;
|
| 251 |
+
|
| 252 |
+
c10::intrusive_ptr<Work> recv(
|
| 253 |
+
std::vector<at::Tensor>& tensors,
|
| 254 |
+
int srcRank,
|
| 255 |
+
int tag) override;
|
| 256 |
+
|
| 257 |
+
// Counting for the sequential number of UCC collective_post call.
|
| 258 |
+
uint64_t seq_{0};
|
| 259 |
+
|
| 260 |
+
// Agrees on an initial sequence number for the whole group by having rank 0
|
| 261 |
+
// create it and broadcast it to other ranks using the store.
|
| 262 |
+
void setSequenceNumberForGroup() override;
|
| 263 |
+
|
| 264 |
+
// Retrieves the current sequence number for the whole group, which should be
|
| 265 |
+
// in sync. If the returned number is not consistent across the group, it
|
| 266 |
+
// may indicate that there is some sort of collective desynchronization.
|
| 267 |
+
uint64_t getSequenceNumberForGroup() override;
|
| 268 |
+
|
| 269 |
+
static c10::intrusive_ptr<Backend> createProcessGroupUCC(
|
| 270 |
+
const c10::intrusive_ptr<::c10d::Store>& store,
|
| 271 |
+
int rank,
|
| 272 |
+
int size,
|
| 273 |
+
const std::chrono::duration<float>& timeout);
|
| 274 |
+
|
| 275 |
+
protected:
|
| 276 |
+
const std::chrono::duration<float> timeout_;
|
| 277 |
+
std::shared_ptr<torch_ucc_oob_coll_info_t> oob;
|
| 278 |
+
std::shared_ptr<Comm> comm = {nullptr};
|
| 279 |
+
uint32_t comm_id;
|
| 280 |
+
ucc_team_h team{nullptr};
|
| 281 |
+
ucc_ee_h cuda_ee{nullptr};
|
| 282 |
+
ucc_ee_h cuda_ee_p2p[2]{nullptr, nullptr};
|
| 283 |
+
|
| 284 |
+
#ifdef USE_CUDA
|
| 285 |
+
std::unique_ptr<at::cuda::CUDAStream> stream = nullptr;
|
| 286 |
+
std::unique_ptr<at::cuda::CUDAStream> stream_p2p[2] = {nullptr, nullptr};
|
| 287 |
+
event_pool_t ep;
|
| 288 |
+
#endif
|
| 289 |
+
c10::intrusive_ptr<ProcessGroupUCCLogger> logger;
|
| 290 |
+
};
|
| 291 |
+
|
| 292 |
+
class Comm {
|
| 293 |
+
c10::intrusive_ptr<ProcessGroupUCCLogger> logger;
|
| 294 |
+
std::shared_ptr<torch_ucc_oob_coll_info_t> oob;
|
| 295 |
+
CommUCC ucc_comm;
|
| 296 |
+
std::mutex mutex;
|
| 297 |
+
std::thread progress_thread;
|
| 298 |
+
std::condition_variable queue_produce_cv;
|
| 299 |
+
std::condition_variable queue_consume_cv;
|
| 300 |
+
std::deque<std::shared_ptr<ProcessGroupUCC::ProgressEntry>> progress_queue;
|
| 301 |
+
bool stop_progress_loop;
|
| 302 |
+
bool collective_inprogress;
|
| 303 |
+
torch_ucc_phase_t finalize_phase;
|
| 304 |
+
|
| 305 |
+
public:
|
| 306 |
+
c10::DeviceIndex cuda_device_index;
|
| 307 |
+
Comm(
|
| 308 |
+
const c10::intrusive_ptr<ProcessGroupUCCLogger>& logger,
|
| 309 |
+
std::shared_ptr<torch_ucc_oob_coll_info_t> oob,
|
| 310 |
+
c10::Device dev,
|
| 311 |
+
bool is_health_check);
|
| 312 |
+
|
| 313 |
+
~Comm();
|
| 314 |
+
|
| 315 |
+
void ucc_create_team(
|
| 316 |
+
ucc_team_h& team,
|
| 317 |
+
std::shared_ptr<torch_ucc_oob_coll_info_t> oob);
|
| 318 |
+
|
| 319 |
+
void ucc_destroy_team(ucc_team_h& team);
|
| 320 |
+
|
| 321 |
+
c10::intrusive_ptr<Work> enqueue_p2p(
|
| 322 |
+
OpType opType,
|
| 323 |
+
ucc_coll_req_h request,
|
| 324 |
+
const char* prof_title);
|
| 325 |
+
|
| 326 |
+
#ifdef USE_CUDA
|
| 327 |
+
void enqueue_cuda_collective(
|
| 328 |
+
std::unique_ptr<ProcessGroupUCC::WorkData> data,
|
| 329 |
+
c10::intrusive_ptr<ProcessGroupUCC::WorkUCC> work,
|
| 330 |
+
ucc_coll_args_t& coll,
|
| 331 |
+
ucc_team_h team,
|
| 332 |
+
ucc_ee_h ee);
|
| 333 |
+
#endif
|
| 334 |
+
|
| 335 |
+
void enqueue_collective(
|
| 336 |
+
std::unique_ptr<ProcessGroupUCC::WorkData> data,
|
| 337 |
+
c10::intrusive_ptr<ProcessGroupUCC::WorkUCC> work,
|
| 338 |
+
ucc_coll_args_t& coll,
|
| 339 |
+
ucc_team_h team);
|
| 340 |
+
|
| 341 |
+
static std::shared_ptr<Comm> get_comm(
|
| 342 |
+
uint32_t& id,
|
| 343 |
+
c10::Device dev,
|
| 344 |
+
std::shared_ptr<torch_ucc_oob_coll_info_t> oob,
|
| 345 |
+
const c10::intrusive_ptr<ProcessGroupUCCLogger>& logger,
|
| 346 |
+
bool is_health_check = false);
|
| 347 |
+
|
| 348 |
+
void progress_loop();
|
| 349 |
+
};
|
| 350 |
+
|
| 351 |
+
} // namespace c10d
|
| 352 |
+
|
| 353 |
+
#endif // USE_C10D_UCC
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupWrapper.hpp
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#ifdef USE_C10D_GLOO
|
| 4 |
+
|
| 5 |
+
#include <torch/csrc/distributed/c10d/ProcessGroupGloo.hpp>
|
| 6 |
+
#include <torch/csrc/distributed/c10d/Types.hpp>
|
| 7 |
+
#include <torch/csrc/distributed/c10d/Utils.hpp>
|
| 8 |
+
|
| 9 |
+
namespace c10d {
|
| 10 |
+
|
| 11 |
+
class TORCH_API ProcessGroupWrapper : public Backend {
|
| 12 |
+
public:
|
| 13 |
+
explicit ProcessGroupWrapper(
|
| 14 |
+
c10::intrusive_ptr<Backend> backend,
|
| 15 |
+
c10::intrusive_ptr<Backend> glooBackend);
|
| 16 |
+
|
| 17 |
+
const std::string getBackendName() const override;
|
| 18 |
+
|
| 19 |
+
c10::intrusive_ptr<Work> broadcast(
|
| 20 |
+
std::vector<at::Tensor>& data,
|
| 21 |
+
const BroadcastOptions& opts = BroadcastOptions()) override;
|
| 22 |
+
|
| 23 |
+
c10::intrusive_ptr<Work> allreduce(
|
| 24 |
+
std::vector<at::Tensor>& data,
|
| 25 |
+
const AllreduceOptions& opts = AllreduceOptions()) override;
|
| 26 |
+
|
| 27 |
+
c10::intrusive_ptr<Work> allreduce_coalesced(
|
| 28 |
+
std::vector<at::Tensor>& tensors,
|
| 29 |
+
const AllreduceCoalescedOptions& opts =
|
| 30 |
+
AllreduceCoalescedOptions()) override;
|
| 31 |
+
|
| 32 |
+
c10::intrusive_ptr<Work> reduce(
|
| 33 |
+
std::vector<at::Tensor>& tensors,
|
| 34 |
+
const ReduceOptions& opts = ReduceOptions()) override;
|
| 35 |
+
|
| 36 |
+
c10::intrusive_ptr<Work> allgather(
|
| 37 |
+
std::vector<std::vector<at::Tensor>>& outputTensors,
|
| 38 |
+
std::vector<at::Tensor>& inputTensors,
|
| 39 |
+
const AllgatherOptions& opts = AllgatherOptions()) override;
|
| 40 |
+
|
| 41 |
+
c10::intrusive_ptr<Work> _allgather_base(
|
| 42 |
+
at::Tensor& outputBuffer,
|
| 43 |
+
at::Tensor& inputBuffer,
|
| 44 |
+
const AllgatherOptions& opts = AllgatherOptions()) override;
|
| 45 |
+
|
| 46 |
+
// This function is deprecated and will be moved out of ProcessGroup to comms:
|
| 47 |
+
// * do not add dependencies on this function,
|
| 48 |
+
// * do not implement it in your ProcessGroup, implement _allgather_base
|
| 49 |
+
// instead.
|
| 50 |
+
c10::intrusive_ptr<Work> allgather_coalesced(
|
| 51 |
+
std::vector<std::vector<at::Tensor>>& outputTensorLists,
|
| 52 |
+
std::vector<at::Tensor>& inputTensors,
|
| 53 |
+
const AllgatherOptions& opts = AllgatherOptions()) override;
|
| 54 |
+
|
| 55 |
+
c10::intrusive_ptr<Work> gather(
|
| 56 |
+
std::vector<std::vector<at::Tensor>>& outputTensors,
|
| 57 |
+
std::vector<at::Tensor>& inputTensors,
|
| 58 |
+
const GatherOptions& opts = GatherOptions()) override;
|
| 59 |
+
|
| 60 |
+
c10::intrusive_ptr<Work> scatter(
|
| 61 |
+
std::vector<at::Tensor>& outputTensors,
|
| 62 |
+
std::vector<std::vector<at::Tensor>>& inputTensors,
|
| 63 |
+
const ScatterOptions& opts = ScatterOptions()) override;
|
| 64 |
+
|
| 65 |
+
c10::intrusive_ptr<Work> reduce_scatter(
|
| 66 |
+
std::vector<at::Tensor>& outputTensors,
|
| 67 |
+
std::vector<std::vector<at::Tensor>>& inputTensors,
|
| 68 |
+
const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
|
| 69 |
+
|
| 70 |
+
c10::intrusive_ptr<Work> alltoall_base(
|
| 71 |
+
at::Tensor& outputTensor,
|
| 72 |
+
at::Tensor& inputTensor,
|
| 73 |
+
std::vector<int64_t>& outputSplitSizes,
|
| 74 |
+
std::vector<int64_t>& inputSplitSizes,
|
| 75 |
+
const AllToAllOptions& opts = AllToAllOptions()) override;
|
| 76 |
+
|
| 77 |
+
c10::intrusive_ptr<Work> alltoall(
|
| 78 |
+
std::vector<at::Tensor>& outputTensors,
|
| 79 |
+
std::vector<at::Tensor>& inputTensors,
|
| 80 |
+
const AllToAllOptions& opts = AllToAllOptions()) override;
|
| 81 |
+
|
| 82 |
+
void monitoredBarrier(const BarrierOptions& opts, bool waitAllRanks = false)
|
| 83 |
+
override;
|
| 84 |
+
|
| 85 |
+
// Agrees on an initial sequence number for the whole group by having rank 0
|
| 86 |
+
// create it and broadcast it to other ranks using the store. Only implemented
|
| 87 |
+
// for GLOO and NCCL backends currently.
|
| 88 |
+
// dont implement this
|
| 89 |
+
void setSequenceNumberForGroup() override;
|
| 90 |
+
|
| 91 |
+
// Retrieves the current sequence number for the whole group, which should be
|
| 92 |
+
// in sync. If the returned number is not consistent across the group, it
|
| 93 |
+
// may indicate that there is some sort of collective desynchronization.
|
| 94 |
+
uint64_t getSequenceNumberForGroup() override; // just call underlying
|
| 95 |
+
|
| 96 |
+
c10::intrusive_ptr<Work> send(
|
| 97 |
+
std::vector<at::Tensor>& tensors,
|
| 98 |
+
int dstRank,
|
| 99 |
+
int tag) override;
|
| 100 |
+
|
| 101 |
+
c10::intrusive_ptr<Work> recv(
|
| 102 |
+
std::vector<at::Tensor>& tensors,
|
| 103 |
+
int srcRank,
|
| 104 |
+
int tag) override;
|
| 105 |
+
|
| 106 |
+
c10::intrusive_ptr<Work> recvAnysource(
|
| 107 |
+
std::vector<at::Tensor>& tensors,
|
| 108 |
+
int tag) override;
|
| 109 |
+
|
| 110 |
+
c10::intrusive_ptr<Work> barrier(
|
| 111 |
+
const BarrierOptions& opts = BarrierOptions()) override;
|
| 112 |
+
|
| 113 |
+
c10::intrusive_ptr<Work> _reduce_scatter_base(
|
| 114 |
+
at::Tensor& outputBuffer,
|
| 115 |
+
at::Tensor& inputBuffer,
|
| 116 |
+
const ReduceScatterOptions& opts) override;
|
| 117 |
+
|
| 118 |
+
void startCoalescing() override;
|
| 119 |
+
|
| 120 |
+
c10::intrusive_ptr<Work> endCoalescing() override;
|
| 121 |
+
|
| 122 |
+
c10::intrusive_ptr<Backend> getWrappedPg() const;
|
| 123 |
+
|
| 124 |
+
private:
|
| 125 |
+
// Underlying process group that actual application collectives will be
|
| 126 |
+
// dispatched to
|
| 127 |
+
c10::intrusive_ptr<Backend> backend_;
|
| 128 |
+
// Gloo process group responsible for internal coordination such as monitored
|
| 129 |
+
// barrier, sequence number checking, collective fingerprint collecting.
|
| 130 |
+
c10::intrusive_ptr<Backend> glooBackend_;
|
| 131 |
+
// Conducts several checks to ensure that the underlying collective is well
|
| 132 |
+
// formed with the goal of notifying the user about incorrect collective use
|
| 133 |
+
// in the application.
|
| 134 |
+
void runCollectiveChecks(
|
| 135 |
+
OpType op_type,
|
| 136 |
+
const std::vector<at::Tensor>& tensors);
|
| 137 |
+
};
|
| 138 |
+
} // namespace c10d
|
| 139 |
+
|
| 140 |
+
#endif // USE_C10D_GLOO
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/RankLocal.hpp
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
#pragma once
|
| 3 |
+
|
| 4 |
+
#include <shared_mutex>
|
| 5 |
+
|
| 6 |
+
#include <torch/csrc/autograd/function.h>
|
| 7 |
+
|
| 8 |
+
namespace c10d {
|
| 9 |
+
|
| 10 |
+
// `RankLocal` maintains a unique instance of T for each non-autograd thread.
|
| 11 |
+
// For non-autograd threads, `RankLocal<T>::get()` functions similar to
|
| 12 |
+
// thread_local. For autograd threads, `RankLocal<T>::get()` returns the
|
| 13 |
+
// instance of T corresponding to the enqueuing non-autograd thread. The
|
| 14 |
+
// mechanism allows for rank-specific context shared between forward and
|
| 15 |
+
// backward. It works for both the one-rank-per-process and one-rank-per-thread
|
| 16 |
+
// scenarios.
|
| 17 |
+
//
|
| 18 |
+
// NOTE: RankLocal doesn't make the underlying objects thread-safe.
|
| 19 |
+
template <typename T>
|
| 20 |
+
class RankLocal {
|
| 21 |
+
public:
|
| 22 |
+
RankLocal(const RankLocal&) = delete;
|
| 23 |
+
RankLocal& operator=(const RankLocal&) = delete;
|
| 24 |
+
|
| 25 |
+
static T& get() {
|
| 26 |
+
// Fast path: non-autograd threads can simply return
|
| 27 |
+
// the object reference cached in TLS.
|
| 28 |
+
if (cached_ != nullptr) {
|
| 29 |
+
return *cached_;
|
| 30 |
+
}
|
| 31 |
+
const auto node = torch::autograd::get_current_node();
|
| 32 |
+
auto fwd_thread_id = node == nullptr ? at::RecordFunction::currentThreadId()
|
| 33 |
+
: node->thread_id();
|
| 34 |
+
// Optimistically aquire the read lock first, since most likely we are in
|
| 35 |
+
// an autograd thread and the object has already been constructed.
|
| 36 |
+
{
|
| 37 |
+
std::shared_lock read_lock(lock_);
|
| 38 |
+
auto it = thread_id_to_rank_local_.find(fwd_thread_id);
|
| 39 |
+
if (it != thread_id_to_rank_local_.end()) {
|
| 40 |
+
// Cache for non-autograd threads
|
| 41 |
+
if (node == nullptr) {
|
| 42 |
+
cached_ = &it->second;
|
| 43 |
+
}
|
| 44 |
+
return it->second;
|
| 45 |
+
}
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
std::unique_lock write_lock(lock_);
|
| 49 |
+
auto [it, _] = thread_id_to_rank_local_.try_emplace(fwd_thread_id);
|
| 50 |
+
// Cache for non-autograd threads
|
| 51 |
+
if (node == nullptr) {
|
| 52 |
+
cached_ = &it->second;
|
| 53 |
+
}
|
| 54 |
+
return it->second;
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
private:
|
| 58 |
+
RankLocal(){};
|
| 59 |
+
thread_local static T* cached_;
|
| 60 |
+
static std::unordered_map<uint64_t, T> thread_id_to_rank_local_;
|
| 61 |
+
static std::shared_mutex lock_;
|
| 62 |
+
};
|
| 63 |
+
|
| 64 |
+
template <typename T>
|
| 65 |
+
thread_local T* RankLocal<T>::cached_ = nullptr;
|
| 66 |
+
|
| 67 |
+
template <typename T>
|
| 68 |
+
std::unordered_map<uint64_t, T> RankLocal<T>::thread_id_to_rank_local_;
|
| 69 |
+
|
| 70 |
+
template <typename T>
|
| 71 |
+
std::shared_mutex RankLocal<T>::lock_;
|
| 72 |
+
|
| 73 |
+
} // namespace c10d
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TCPStore.hpp
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <cstddef>
|
| 4 |
+
#include <cstdint>
|
| 5 |
+
#include <memory>
|
| 6 |
+
|
| 7 |
+
#include <torch/csrc/distributed/c10d/Store.hpp>
|
| 8 |
+
|
| 9 |
+
namespace c10d {
|
| 10 |
+
namespace detail {
|
| 11 |
+
|
| 12 |
+
class TCPServer;
|
| 13 |
+
|
| 14 |
+
class TCPClient;
|
| 15 |
+
|
| 16 |
+
struct SocketAddress {
|
| 17 |
+
std::string host{};
|
| 18 |
+
std::uint16_t port{};
|
| 19 |
+
};
|
| 20 |
+
|
| 21 |
+
class Counter {
|
| 22 |
+
public:
|
| 23 |
+
void update(double val);
|
| 24 |
+
std::unordered_map<std::string, double> observe() const;
|
| 25 |
+
|
| 26 |
+
double mean() const noexcept {
|
| 27 |
+
return mean_;
|
| 28 |
+
}
|
| 29 |
+
int64_t count() const noexcept {
|
| 30 |
+
return count_;
|
| 31 |
+
}
|
| 32 |
+
double variance() const noexcept {
|
| 33 |
+
return m2_ / count_;
|
| 34 |
+
}
|
| 35 |
+
double sample_variance() const noexcept {
|
| 36 |
+
return m2_ / (count_ - 1);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
private:
|
| 40 |
+
int64_t count_ = 0;
|
| 41 |
+
double mean_ = 0;
|
| 42 |
+
double m2_ = 0;
|
| 43 |
+
};
|
| 44 |
+
|
| 45 |
+
} // namespace detail
|
| 46 |
+
|
| 47 |
+
struct TCPStoreOptions {
|
| 48 |
+
static constexpr std::uint16_t kDefaultPort = 29500;
|
| 49 |
+
|
| 50 |
+
std::uint16_t port = kDefaultPort;
|
| 51 |
+
bool isServer = false;
|
| 52 |
+
c10::optional<std::size_t> numWorkers = c10::nullopt;
|
| 53 |
+
bool waitWorkers = true;
|
| 54 |
+
std::chrono::milliseconds timeout = Store::kDefaultTimeout;
|
| 55 |
+
|
| 56 |
+
// A boolean value indicating whether multiple store instances can be
|
| 57 |
+
// initialized with the same host:port pair.
|
| 58 |
+
bool multiTenant = false;
|
| 59 |
+
|
| 60 |
+
// If specified, and if isServer is true, the underlying TCPServer will take
|
| 61 |
+
// over the bound socket associated to this fd. This option is useful to avoid
|
| 62 |
+
// port assignment races in certain scenarios.
|
| 63 |
+
c10::optional<int> masterListenFd = c10::nullopt;
|
| 64 |
+
|
| 65 |
+
// A boolean value indicating whether to use the experimental libUV backend.
|
| 66 |
+
bool useLibUV = false;
|
| 67 |
+
};
|
| 68 |
+
|
| 69 |
+
class TORCH_API TCPStore : public Store {
|
| 70 |
+
public:
|
| 71 |
+
explicit TCPStore(std::string host, const TCPStoreOptions& opts = {});
|
| 72 |
+
|
| 73 |
+
[[deprecated("Use TCPStore(host, opts) instead.")]] explicit TCPStore(
|
| 74 |
+
const std::string& masterAddr,
|
| 75 |
+
std::uint16_t masterPort,
|
| 76 |
+
c10::optional<int> numWorkers = c10::nullopt,
|
| 77 |
+
bool isServer = false,
|
| 78 |
+
const std::chrono::milliseconds& timeout = kDefaultTimeout,
|
| 79 |
+
bool waitWorkers = true);
|
| 80 |
+
|
| 81 |
+
~TCPStore() override;
|
| 82 |
+
|
| 83 |
+
void set(const std::string& key, const std::vector<uint8_t>& value) override;
|
| 84 |
+
|
| 85 |
+
std::vector<uint8_t> compareSet(
|
| 86 |
+
const std::string& key,
|
| 87 |
+
const std::vector<uint8_t>& expectedValue,
|
| 88 |
+
const std::vector<uint8_t>& desiredValue) override;
|
| 89 |
+
|
| 90 |
+
std::vector<uint8_t> get(const std::string& key) override;
|
| 91 |
+
|
| 92 |
+
int64_t add(const std::string& key, int64_t value) override;
|
| 93 |
+
|
| 94 |
+
bool deleteKey(const std::string& key) override;
|
| 95 |
+
|
| 96 |
+
bool check(const std::vector<std::string>& keys) override;
|
| 97 |
+
|
| 98 |
+
int64_t getNumKeys() override;
|
| 99 |
+
|
| 100 |
+
void wait(const std::vector<std::string>& keys) override;
|
| 101 |
+
|
| 102 |
+
void wait(
|
| 103 |
+
const std::vector<std::string>& keys,
|
| 104 |
+
const std::chrono::milliseconds& timeout) override;
|
| 105 |
+
|
| 106 |
+
void append(const std::string& key, const std::vector<uint8_t>& value)
|
| 107 |
+
override;
|
| 108 |
+
|
| 109 |
+
std::vector<std::vector<uint8_t>> multiGet(
|
| 110 |
+
const std::vector<std::string>& keys) override;
|
| 111 |
+
|
| 112 |
+
void multiSet(
|
| 113 |
+
const std::vector<std::string>& keys,
|
| 114 |
+
const std::vector<std::vector<uint8_t>>& values) override;
|
| 115 |
+
|
| 116 |
+
bool hasExtendedApi() const override;
|
| 117 |
+
|
| 118 |
+
// Waits for all workers to join.
|
| 119 |
+
void waitForWorkers();
|
| 120 |
+
|
| 121 |
+
// Returns the hostname used by the TCPStore.
|
| 122 |
+
const std::string& getHost() const noexcept {
|
| 123 |
+
return addr_.host;
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
// Returns the port used by the TCPStore.
|
| 127 |
+
std::uint16_t getPort() const noexcept {
|
| 128 |
+
return addr_.port;
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
std::unordered_map<std::string, std::unordered_map<std::string, double>>
|
| 132 |
+
collectClientCounters() const noexcept;
|
| 133 |
+
|
| 134 |
+
bool isLibUvBackend() const noexcept {
|
| 135 |
+
return usingLibUv_;
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
private:
|
| 139 |
+
int64_t incrementValueBy(const std::string& key, int64_t delta);
|
| 140 |
+
|
| 141 |
+
void validate(void);
|
| 142 |
+
|
| 143 |
+
std::vector<uint8_t> doGet(const std::string& key);
|
| 144 |
+
|
| 145 |
+
void doWait(
|
| 146 |
+
c10::ArrayRef<std::string> keys,
|
| 147 |
+
std::chrono::milliseconds timeout);
|
| 148 |
+
|
| 149 |
+
detail::SocketAddress addr_;
|
| 150 |
+
std::shared_ptr<detail::TCPServer> server_;
|
| 151 |
+
std::unique_ptr<detail::TCPClient> client_;
|
| 152 |
+
c10::optional<std::size_t> numWorkers_;
|
| 153 |
+
|
| 154 |
+
const std::string initKey_ = "init/";
|
| 155 |
+
const std::string keyPrefix_ = "/";
|
| 156 |
+
std::mutex activeOpLock_;
|
| 157 |
+
std::unordered_map<std::string, detail::Counter> clientCounters_;
|
| 158 |
+
bool usingLibUv_ = false;
|
| 159 |
+
};
|
| 160 |
+
|
| 161 |
+
} // namespace c10d
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TCPStoreBackend.hpp
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <chrono>
|
| 4 |
+
#include <thread>
|
| 5 |
+
#include <vector>
|
| 6 |
+
|
| 7 |
+
#include <torch/csrc/distributed/c10d/TCPStore.hpp>
|
| 8 |
+
#include <torch/csrc/distributed/c10d/socket.h>
|
| 9 |
+
|
| 10 |
+
#ifdef _WIN32
|
| 11 |
+
#include <io.h>
|
| 12 |
+
#include <winsock2.h>
|
| 13 |
+
#else
|
| 14 |
+
#include <poll.h>
|
| 15 |
+
#include <unistd.h>
|
| 16 |
+
#endif
|
| 17 |
+
|
| 18 |
+
namespace c10d {
|
| 19 |
+
namespace detail {
|
| 20 |
+
|
| 21 |
+
// Magic number for client validation.
|
| 22 |
+
static const uint32_t validationMagicNumber = 0x3C85F7CE;
|
| 23 |
+
|
| 24 |
+
enum class QueryType : uint8_t {
|
| 25 |
+
VALIDATE,
|
| 26 |
+
SET,
|
| 27 |
+
COMPARE_SET,
|
| 28 |
+
GET,
|
| 29 |
+
ADD,
|
| 30 |
+
CHECK,
|
| 31 |
+
WAIT,
|
| 32 |
+
GETNUMKEYS,
|
| 33 |
+
DELETE_KEY,
|
| 34 |
+
APPEND,
|
| 35 |
+
MULTI_GET,
|
| 36 |
+
MULTI_SET,
|
| 37 |
+
CANCEL_WAIT,
|
| 38 |
+
};
|
| 39 |
+
|
| 40 |
+
enum class CheckResponseType : uint8_t { READY, NOT_READY };
|
| 41 |
+
|
| 42 |
+
enum class WaitResponseType : uint8_t { STOP_WAITING, WAIT_CANCELED };
|
| 43 |
+
|
| 44 |
+
// Abstract base class to handle thread state for TCPStoreMasterDaemon.
|
| 45 |
+
// Contains the windows/unix implementations to signal a
|
| 46 |
+
// shutdown sequence for the thread
|
| 47 |
+
class BackgroundThread {
|
| 48 |
+
public:
|
| 49 |
+
explicit BackgroundThread();
|
| 50 |
+
|
| 51 |
+
virtual ~BackgroundThread() = 0;
|
| 52 |
+
virtual std::uint16_t port() const = 0;
|
| 53 |
+
|
| 54 |
+
void start();
|
| 55 |
+
bool stop_requested();
|
| 56 |
+
|
| 57 |
+
protected:
|
| 58 |
+
void dispose();
|
| 59 |
+
virtual void run() = 0;
|
| 60 |
+
virtual void stop() = 0;
|
| 61 |
+
bool is_running() {
|
| 62 |
+
return is_running_.load();
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
private:
|
| 66 |
+
std::atomic<bool> is_running_;
|
| 67 |
+
std::thread daemonThread_{};
|
| 68 |
+
};
|
| 69 |
+
|
| 70 |
+
std::unique_ptr<BackgroundThread> create_tcpstore_backend(
|
| 71 |
+
const TCPStoreOptions& opts);
|
| 72 |
+
std::unique_ptr<BackgroundThread> create_libuv_tcpstore_backend(
|
| 73 |
+
const TCPStoreOptions& opts);
|
| 74 |
+
bool is_libuv_tcpstore_backend_available();
|
| 75 |
+
|
| 76 |
+
} // namespace detail
|
| 77 |
+
} // namespace c10d
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TraceUtils.h
ADDED
|
@@ -0,0 +1,543 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/util/ApproximateClock.h>
|
| 4 |
+
#include <c10/util/irange.h>
|
| 5 |
+
#include <torch/csrc/distributed/c10d/Store.hpp>
|
| 6 |
+
#include <torch/csrc/distributed/c10d/Types.hpp>
|
| 7 |
+
#include <torch/csrc/jit/serialization/pickler.h>
|
| 8 |
+
#include <torch/csrc/profiler/combined_traceback.h>
|
| 9 |
+
|
| 10 |
+
#include <sys/types.h>
|
| 11 |
+
|
| 12 |
+
#include <cstdlib>
|
| 13 |
+
#include <string>
|
| 14 |
+
#include <system_error>
|
| 15 |
+
#include <vector>
|
| 16 |
+
|
| 17 |
+
namespace c10d {
|
| 18 |
+
|
| 19 |
+
/* Trace Utils Related to TORCH_NCCL_DESYNC_DEBUG */
|
| 20 |
+
|
| 21 |
+
inline std::string getTraceStartKey(const std::string& pgName, int rank) {
|
| 22 |
+
return pgName + "_" + std::to_string(rank) + "_trace_start";
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
inline std::string getTraceEndKey(const std::string& pgName, int rank) {
|
| 26 |
+
return pgName + "_" + std::to_string(rank) + "_trace_end";
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
inline bool traceUpdate(
|
| 30 |
+
c10::intrusive_ptr<Store>& store,
|
| 31 |
+
const std::string& key,
|
| 32 |
+
uint64_t seq,
|
| 33 |
+
const std::string& col) {
|
| 34 |
+
std::vector<uint8_t> value(col.size() + sizeof(seq) + 1);
|
| 35 |
+
memcpy(value.data(), &seq, sizeof(seq));
|
| 36 |
+
memcpy(value.data() + sizeof(seq), col.data(), col.size());
|
| 37 |
+
try {
|
| 38 |
+
store->set(key, value);
|
| 39 |
+
return true;
|
| 40 |
+
} catch (...) {
|
| 41 |
+
LOG(ERROR) << "Store is down while updating #" << seq << " with key "
|
| 42 |
+
<< key;
|
| 43 |
+
return false;
|
| 44 |
+
}
|
| 45 |
+
return true;
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
enum TraceDebugEvent {
|
| 49 |
+
kEventStart,
|
| 50 |
+
kEventEnd,
|
| 51 |
+
};
|
| 52 |
+
// <seq, <rank, <col, start/end>>>
|
| 53 |
+
using TraceMap =
|
| 54 |
+
std::map<uint64_t, std::map<int, std::pair<std::string, TraceDebugEvent>>>;
|
| 55 |
+
|
| 56 |
+
inline std::string ranksToString(const std::vector<int>& ranks) {
|
| 57 |
+
std::string str;
|
| 58 |
+
for (int rank : ranks) {
|
| 59 |
+
if (str.empty()) {
|
| 60 |
+
str = std::to_string(rank);
|
| 61 |
+
} else {
|
| 62 |
+
str += ", " + std::to_string(rank);
|
| 63 |
+
}
|
| 64 |
+
}
|
| 65 |
+
return str;
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
inline std::string ranksFromTrace(
|
| 69 |
+
const std::vector<std::pair<int, std::string>>& items) {
|
| 70 |
+
std::string ranks;
|
| 71 |
+
for (auto& p : items) {
|
| 72 |
+
if (ranks.empty()) {
|
| 73 |
+
ranks = std::to_string(p.first);
|
| 74 |
+
} else {
|
| 75 |
+
ranks += ", " + std::to_string(p.first);
|
| 76 |
+
}
|
| 77 |
+
}
|
| 78 |
+
return ranks;
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
inline std::string analyzeMissingRanks(const std::vector<int>& missingRanks) {
|
| 82 |
+
return c10::str(
|
| 83 |
+
"\n\t - To our best knowledge, ranks [",
|
| 84 |
+
ranksToString(missingRanks),
|
| 85 |
+
"] are the lagging ranks that caused this timeout. "
|
| 86 |
+
"They never joined any collectives");
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
inline std::string analyzeLaggingRanks(const TraceMap& traceMap) {
|
| 90 |
+
uint64_t lagSeq = traceMap.begin()->first;
|
| 91 |
+
std::vector<int> startRanks;
|
| 92 |
+
std::vector<int> endRanks;
|
| 93 |
+
for (auto& p : traceMap.begin()->second) {
|
| 94 |
+
if (p.second.second == kEventStart) {
|
| 95 |
+
startRanks.push_back(p.first);
|
| 96 |
+
} else {
|
| 97 |
+
endRanks.push_back(p.first);
|
| 98 |
+
}
|
| 99 |
+
}
|
| 100 |
+
std::string report =
|
| 101 |
+
"\n\t - To our best knowledge, the lagging/dead/mismatched ranks "
|
| 102 |
+
"that caused the desync are:";
|
| 103 |
+
if (startRanks.size()) {
|
| 104 |
+
report += c10::str(
|
| 105 |
+
"\n\t - [",
|
| 106 |
+
ranksToString(startRanks),
|
| 107 |
+
"] joined but didn't finish collective #",
|
| 108 |
+
lagSeq,
|
| 109 |
+
" (count from 1)");
|
| 110 |
+
}
|
| 111 |
+
if (endRanks.size()) {
|
| 112 |
+
report += c10::str(
|
| 113 |
+
"\n\t [",
|
| 114 |
+
ranksToString(endRanks),
|
| 115 |
+
"] finished collective #",
|
| 116 |
+
lagSeq,
|
| 117 |
+
", but didn't join collective #",
|
| 118 |
+
lagSeq + 1,
|
| 119 |
+
" (count from 1)");
|
| 120 |
+
}
|
| 121 |
+
return report;
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
inline std::string dumpSnapshot(TraceMap& traceMap) {
|
| 125 |
+
std::string report = "\n\t - Snapshot of ranks' latest states:";
|
| 126 |
+
for (auto& tracePair : traceMap) {
|
| 127 |
+
uint64_t seq = tracePair.first;
|
| 128 |
+
std::map<int, std::pair<std::string, TraceDebugEvent>>& subMap =
|
| 129 |
+
tracePair.second;
|
| 130 |
+
|
| 131 |
+
std::unordered_map<std::string, std::vector<int>> collectivesStart;
|
| 132 |
+
std::unordered_map<std::string, std::vector<int>> collectivesEnd;
|
| 133 |
+
for (auto& p : subMap) {
|
| 134 |
+
int rank = p.first;
|
| 135 |
+
const std::string& col = p.second.first;
|
| 136 |
+
if (p.second.second == kEventStart) {
|
| 137 |
+
collectivesStart[col].push_back(rank);
|
| 138 |
+
} else {
|
| 139 |
+
collectivesEnd[col].push_back(rank);
|
| 140 |
+
}
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
if (collectivesStart.size()) {
|
| 144 |
+
report += c10::str("\n\t #", seq, " started ranks:");
|
| 145 |
+
for (auto& mapPair : collectivesStart) {
|
| 146 |
+
report += c10::str(
|
| 147 |
+
"\n\t [",
|
| 148 |
+
ranksToString(mapPair.second),
|
| 149 |
+
"] started ",
|
| 150 |
+
mapPair.first);
|
| 151 |
+
}
|
| 152 |
+
}
|
| 153 |
+
if (collectivesEnd.size()) {
|
| 154 |
+
report += c10::str("\n\t #", seq, " finished ranks:");
|
| 155 |
+
for (auto& mapPair : collectivesEnd) {
|
| 156 |
+
report += c10::str(
|
| 157 |
+
"\n\t [",
|
| 158 |
+
ranksToString(mapPair.second),
|
| 159 |
+
"] finished ",
|
| 160 |
+
mapPair.first);
|
| 161 |
+
}
|
| 162 |
+
}
|
| 163 |
+
}
|
| 164 |
+
return report;
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
inline bool parseTraceValue(
|
| 168 |
+
c10::intrusive_ptr<Store>& store,
|
| 169 |
+
const std::string& key,
|
| 170 |
+
uint64_t& seq,
|
| 171 |
+
std::string& col) {
|
| 172 |
+
try {
|
| 173 |
+
std::vector<uint8_t> traceValue = store->get(key);
|
| 174 |
+
memcpy(&seq, traceValue.data(), sizeof(seq));
|
| 175 |
+
std::string colName((char*)traceValue.data() + sizeof(seq));
|
| 176 |
+
col = colName;
|
| 177 |
+
return true;
|
| 178 |
+
} catch (...) {
|
| 179 |
+
LOG(ERROR) << "Store is down while getting key " << key;
|
| 180 |
+
return false;
|
| 181 |
+
}
|
| 182 |
+
return true;
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
inline std::string retrieveDesyncReport(
|
| 186 |
+
c10::intrusive_ptr<Store>& store,
|
| 187 |
+
const std::string& pgName,
|
| 188 |
+
int myRank,
|
| 189 |
+
int worldSize) {
|
| 190 |
+
std::string report;
|
| 191 |
+
|
| 192 |
+
uint64_t thisSeq;
|
| 193 |
+
std::string thisCol;
|
| 194 |
+
|
| 195 |
+
std::vector<int> missingRanks;
|
| 196 |
+
TraceMap traceMap;
|
| 197 |
+
|
| 198 |
+
for (const auto rank : c10::irange(worldSize)) {
|
| 199 |
+
// Build traceMapStart.
|
| 200 |
+
uint64_t seqStart;
|
| 201 |
+
{
|
| 202 |
+
std::string traceKeyStart = getTraceStartKey(pgName, rank);
|
| 203 |
+
if (!store->check({traceKeyStart})) {
|
| 204 |
+
missingRanks.push_back(rank);
|
| 205 |
+
continue;
|
| 206 |
+
}
|
| 207 |
+
std::string col;
|
| 208 |
+
if (!parseTraceValue(store, traceKeyStart, seqStart, col)) {
|
| 209 |
+
return report;
|
| 210 |
+
}
|
| 211 |
+
traceMap[seqStart].emplace(rank, std::make_pair(col, kEventStart));
|
| 212 |
+
if (rank == myRank) {
|
| 213 |
+
thisSeq = seqStart;
|
| 214 |
+
thisCol = std::move(col);
|
| 215 |
+
}
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
// Build traceMapEnd.
|
| 219 |
+
{
|
| 220 |
+
std::string traceKeyEnd = getTraceEndKey(pgName, rank);
|
| 221 |
+
if (!store->check({traceKeyEnd})) {
|
| 222 |
+
continue;
|
| 223 |
+
}
|
| 224 |
+
uint64_t seq;
|
| 225 |
+
std::string col;
|
| 226 |
+
if (!parseTraceValue(store, traceKeyEnd, seq, col)) {
|
| 227 |
+
return report;
|
| 228 |
+
}
|
| 229 |
+
if (seq == seqStart) {
|
| 230 |
+
traceMap[seq][rank].second = kEventEnd;
|
| 231 |
+
}
|
| 232 |
+
}
|
| 233 |
+
}
|
| 234 |
+
|
| 235 |
+
TORCH_INTERNAL_ASSERT(
|
| 236 |
+
!missingRanks.empty() || !traceMap.empty(),
|
| 237 |
+
"Trace shouldn't be empty while enabled GLOO_ASYNC_TIMEOUT_DEBUG");
|
| 238 |
+
TORCH_INTERNAL_ASSERT(
|
| 239 |
+
!thisCol.empty(),
|
| 240 |
+
"Timeout rank [",
|
| 241 |
+
myRank,
|
| 242 |
+
"] must have collective tracking iteam in c10::Store trace");
|
| 243 |
+
TORCH_INTERNAL_ASSERT(
|
| 244 |
+
traceMap[thisSeq][myRank].second == kEventStart,
|
| 245 |
+
"Timeout rank [",
|
| 246 |
+
myRank,
|
| 247 |
+
"] last trace item must be kEventStart. thisSeq = ",
|
| 248 |
+
thisSeq,
|
| 249 |
+
", col = ",
|
| 250 |
+
thisCol);
|
| 251 |
+
|
| 252 |
+
report += c10::str(
|
| 253 |
+
"\n\t - [", myRank, "] Timeout at collective: ", thisCol, ", #", thisSeq);
|
| 254 |
+
|
| 255 |
+
if (!missingRanks.empty()) {
|
| 256 |
+
report += analyzeMissingRanks(missingRanks);
|
| 257 |
+
} else {
|
| 258 |
+
report += analyzeLaggingRanks(traceMap);
|
| 259 |
+
report += dumpSnapshot(traceMap);
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
return report;
|
| 263 |
+
}
|
| 264 |
+
|
| 265 |
+
/* Trace Utils Related to Flight Recorder */
|
| 266 |
+
|
| 267 |
+
/* Note: this is only used by PGNCCL (could be generalized in an ideal world but
|
| 268 |
+
* wasn't done that way, so isn't expected to be fully general at the moment) */
|
| 269 |
+
|
| 270 |
+
#ifdef USE_C10D_NCCL
|
| 271 |
+
|
| 272 |
+
DebugInfoWriter::DebugInfoWriter(int rank) {
|
| 273 |
+
std::string fileName = getCvarString(
|
| 274 |
+
{"TORCH_NCCL_DEBUG_INFO_TEMP_FILE"}, "/tmp/nccl_trace_rank_");
|
| 275 |
+
filename_ = c10::str(fileName, rank);
|
| 276 |
+
}
|
| 277 |
+
|
| 278 |
+
DebugInfoWriter::~DebugInfoWriter() = default;
|
| 279 |
+
|
| 280 |
+
void DebugInfoWriter::write(const std::string& ncclTrace) {
|
| 281 |
+
// Open a file for writing. The ios::binary flag is used to write data as
|
| 282 |
+
// binary.
|
| 283 |
+
std::ofstream file(filename_, std::ios::binary);
|
| 284 |
+
|
| 285 |
+
// Check if the file was opened successfully.
|
| 286 |
+
if (!file.is_open()) {
|
| 287 |
+
LOG(ERROR) << "Error opening file for writing NCCLPG debug info: "
|
| 288 |
+
<< filename_;
|
| 289 |
+
return;
|
| 290 |
+
}
|
| 291 |
+
|
| 292 |
+
file.write(ncclTrace.data(), ncclTrace.size());
|
| 293 |
+
LOG(INFO) << "Finished writing NCCLPG debug info to " << filename_;
|
| 294 |
+
}
|
| 295 |
+
|
| 296 |
+
inline std::string pickle_str(const c10::IValue& v) {
|
| 297 |
+
std::vector<char> result;
|
| 298 |
+
{
|
| 299 |
+
auto writer = [&](const char* data, size_t size) {
|
| 300 |
+
result.insert(result.end(), data, data + size);
|
| 301 |
+
};
|
| 302 |
+
torch::jit::Pickler pickler(
|
| 303 |
+
writer, nullptr, nullptr, nullptr, nullptr, false);
|
| 304 |
+
pickler.protocol();
|
| 305 |
+
pickler.pushIValue(v);
|
| 306 |
+
pickler.stop();
|
| 307 |
+
}
|
| 308 |
+
return std::string(result.begin(), result.end());
|
| 309 |
+
}
|
| 310 |
+
|
| 311 |
+
inline c10::Dict<c10::IValue, c10::IValue> new_dict() {
|
| 312 |
+
return c10::Dict<c10::IValue, c10::IValue>(
|
| 313 |
+
c10::AnyType::get(), c10::AnyType::get());
|
| 314 |
+
}
|
| 315 |
+
|
| 316 |
+
inline c10::List<c10::IValue> new_list() {
|
| 317 |
+
return c10::List<c10::IValue>(c10::AnyType::get());
|
| 318 |
+
}
|
| 319 |
+
|
| 320 |
+
struct NCCLTraceBuffer {
|
| 321 |
+
static NCCLTraceBuffer* get() {
|
| 322 |
+
// intentionally leak on exit
|
| 323 |
+
// because this will hold python state that may get destructed
|
| 324 |
+
static NCCLTraceBuffer* instance = new NCCLTraceBuffer();
|
| 325 |
+
return instance;
|
| 326 |
+
}
|
| 327 |
+
NCCLTraceBuffer() {
|
| 328 |
+
max_entries_ = getCvarInt({"TORCH_NCCL_TRACE_BUFFER_SIZE"}, 0);
|
| 329 |
+
capture_cpp_stack_ = getCvarBool({"TORCH_NCCL_TRACE_CPP_STACK"}, false);
|
| 330 |
+
enabled_ = max_entries_ > 0;
|
| 331 |
+
}
|
| 332 |
+
using EventList = std::vector<at::cuda::CUDAEvent>;
|
| 333 |
+
struct Entry {
|
| 334 |
+
size_t id_; // incremented id in the trace buffer
|
| 335 |
+
// used to figure out where in the circular entries
|
| 336 |
+
// buffer this entry will be located to
|
| 337 |
+
// update state information
|
| 338 |
+
size_t pg_id_;
|
| 339 |
+
size_t seq_id_; // as tracked by the process group
|
| 340 |
+
const char* profiling_name_;
|
| 341 |
+
|
| 342 |
+
std::shared_ptr<torch::CapturedTraceback> traceback_;
|
| 343 |
+
// we borrow pointser to start_ and end_ so we can query the state
|
| 344 |
+
// on reporting. However, once the event is completed, the call
|
| 345 |
+
// to `complete` will clear these.
|
| 346 |
+
EventList *start_, *end_;
|
| 347 |
+
|
| 348 |
+
// timestamp when the entry was created, likely close to the time the work
|
| 349 |
+
// was 'enqueued'- not necessarily started
|
| 350 |
+
c10::time_t time_created_;
|
| 351 |
+
|
| 352 |
+
const char* state_ = "scheduled";
|
| 353 |
+
|
| 354 |
+
// size information for input/output tensors
|
| 355 |
+
c10::SmallVector<int, 4> input_dims_;
|
| 356 |
+
c10::SmallVector<int, 4> output_dims_;
|
| 357 |
+
c10::SmallVector<int64_t, 8> sizes_; // flattened from inputs, outputs
|
| 358 |
+
bool retired_ = false; // is this work entry no longer in the workMetaList_?
|
| 359 |
+
// a retired but not completed event has timed out
|
| 360 |
+
};
|
| 361 |
+
|
| 362 |
+
bool enabled_ = false;
|
| 363 |
+
bool capture_cpp_stack_ = false;
|
| 364 |
+
std::mutex mutex_;
|
| 365 |
+
std::vector<Entry> entries_;
|
| 366 |
+
size_t max_entries_ = 0;
|
| 367 |
+
size_t next_ = 0;
|
| 368 |
+
size_t id_ = 0;
|
| 369 |
+
|
| 370 |
+
c10::optional<size_t> record(
|
| 371 |
+
size_t pg_id,
|
| 372 |
+
size_t seq_id,
|
| 373 |
+
const char* profiling_name,
|
| 374 |
+
const std::vector<at::Tensor>& inputs,
|
| 375 |
+
const std::vector<at::Tensor>& outputs,
|
| 376 |
+
EventList* start,
|
| 377 |
+
EventList* end) {
|
| 378 |
+
if (!enabled_) {
|
| 379 |
+
return c10::nullopt;
|
| 380 |
+
}
|
| 381 |
+
auto traceback =
|
| 382 |
+
torch::CapturedTraceback::gather(true, true, capture_cpp_stack_);
|
| 383 |
+
std::lock_guard<std::mutex> guard(mutex_);
|
| 384 |
+
|
| 385 |
+
auto te = Entry{
|
| 386 |
+
id_,
|
| 387 |
+
pg_id,
|
| 388 |
+
seq_id,
|
| 389 |
+
profiling_name,
|
| 390 |
+
std::move(traceback),
|
| 391 |
+
std::move(start),
|
| 392 |
+
std::move(end),
|
| 393 |
+
c10::getTime()};
|
| 394 |
+
|
| 395 |
+
for (const auto& input : inputs) {
|
| 396 |
+
c10::IntArrayRef sizes = input.sizes();
|
| 397 |
+
te.input_dims_.push_back(sizes.size());
|
| 398 |
+
te.sizes_.insert(te.sizes_.end(), sizes.begin(), sizes.end());
|
| 399 |
+
}
|
| 400 |
+
|
| 401 |
+
for (const auto& output : outputs) {
|
| 402 |
+
c10::IntArrayRef sizes = output.sizes();
|
| 403 |
+
te.output_dims_.push_back(sizes.size());
|
| 404 |
+
te.sizes_.insert(te.sizes_.end(), sizes.begin(), sizes.end());
|
| 405 |
+
}
|
| 406 |
+
|
| 407 |
+
if (entries_.size() < max_entries_) {
|
| 408 |
+
entries_.emplace_back(std::move(te));
|
| 409 |
+
} else {
|
| 410 |
+
entries_[next_++] = std::move(te);
|
| 411 |
+
if (next_ == max_entries_) {
|
| 412 |
+
next_ = 0;
|
| 413 |
+
}
|
| 414 |
+
}
|
| 415 |
+
return id_++;
|
| 416 |
+
}
|
| 417 |
+
|
| 418 |
+
void update_state(Entry& r) {
|
| 419 |
+
if (r.start_ != nullptr) {
|
| 420 |
+
bool started = true;
|
| 421 |
+
for (auto& ev : *r.start_) {
|
| 422 |
+
if (!ev.query()) {
|
| 423 |
+
started = false;
|
| 424 |
+
break;
|
| 425 |
+
}
|
| 426 |
+
}
|
| 427 |
+
if (started) {
|
| 428 |
+
r.state_ = "started";
|
| 429 |
+
}
|
| 430 |
+
}
|
| 431 |
+
if (r.end_ != nullptr) {
|
| 432 |
+
bool completed = true;
|
| 433 |
+
for (auto& ev : *r.end_) {
|
| 434 |
+
if (!ev.query()) {
|
| 435 |
+
completed = false;
|
| 436 |
+
break;
|
| 437 |
+
}
|
| 438 |
+
}
|
| 439 |
+
if (completed) {
|
| 440 |
+
r.state_ = "completed";
|
| 441 |
+
}
|
| 442 |
+
}
|
| 443 |
+
}
|
| 444 |
+
|
| 445 |
+
std::vector<Entry> dump_entries() {
|
| 446 |
+
std::lock_guard<std::mutex> guard(mutex_);
|
| 447 |
+
std::vector<Entry> result;
|
| 448 |
+
result.reserve(entries_.size());
|
| 449 |
+
result.insert(result.end(), entries_.begin() + next_, entries_.end());
|
| 450 |
+
result.insert(result.end(), entries_.begin(), entries_.begin() + next_);
|
| 451 |
+
// query any remaining events
|
| 452 |
+
for (auto& r : result) {
|
| 453 |
+
update_state(r);
|
| 454 |
+
r.start_ = r.end_ = nullptr;
|
| 455 |
+
}
|
| 456 |
+
return result;
|
| 457 |
+
}
|
| 458 |
+
|
| 459 |
+
void retire_id(c10::optional<size_t> id) {
|
| 460 |
+
if (!enabled_ || !id) {
|
| 461 |
+
return;
|
| 462 |
+
}
|
| 463 |
+
std::lock_guard<std::mutex> guard(mutex_);
|
| 464 |
+
auto& entry = entries_.at(*id % max_entries_);
|
| 465 |
+
if (entry.id_ == *id) {
|
| 466 |
+
update_state(entry);
|
| 467 |
+
entry.retired_ = true;
|
| 468 |
+
entry.start_ = entry.end_ = nullptr;
|
| 469 |
+
}
|
| 470 |
+
}
|
| 471 |
+
|
| 472 |
+
std::string dump() {
|
| 473 |
+
auto result = dump_entries();
|
| 474 |
+
auto entries = new_list();
|
| 475 |
+
c10::IValue pg_id_s = "pg_id";
|
| 476 |
+
c10::IValue seq_id_s = "seq_id";
|
| 477 |
+
c10::IValue profiling_name_s = "profiling_name";
|
| 478 |
+
c10::IValue input_sizes_s = "input_sizes";
|
| 479 |
+
c10::IValue output_sizes_s = "output_sizes";
|
| 480 |
+
c10::IValue time_created_s = "time_created_us";
|
| 481 |
+
|
| 482 |
+
c10::IValue frames_s = "frames";
|
| 483 |
+
c10::IValue state_s = "state";
|
| 484 |
+
c10::IValue line_s = "line";
|
| 485 |
+
c10::IValue name_s = "name";
|
| 486 |
+
c10::IValue filename_s = "filename";
|
| 487 |
+
c10::IValue retired_s = "retired";
|
| 488 |
+
|
| 489 |
+
std::vector<torch::CapturedTraceback*> tracebacks;
|
| 490 |
+
for (auto& e : result) {
|
| 491 |
+
tracebacks.push_back(e.traceback_.get());
|
| 492 |
+
}
|
| 493 |
+
torch::SymbolizedTracebacks stracebacks = torch::symbolize(tracebacks);
|
| 494 |
+
std::vector<c10::IValue> all_frames;
|
| 495 |
+
for (const auto& f : stracebacks.all_frames) {
|
| 496 |
+
auto d = new_dict();
|
| 497 |
+
d.insert(name_s, f.funcname);
|
| 498 |
+
d.insert(filename_s, f.filename);
|
| 499 |
+
d.insert(line_s, int64_t(f.lineno));
|
| 500 |
+
all_frames.emplace_back(std::move(d));
|
| 501 |
+
}
|
| 502 |
+
|
| 503 |
+
for (auto i : c10::irange(result.size())) {
|
| 504 |
+
auto& e = result.at(i);
|
| 505 |
+
auto& tb = stracebacks.tracebacks.at(i);
|
| 506 |
+
auto dict = new_dict();
|
| 507 |
+
dict.insert(pg_id_s, int64_t(e.pg_id_));
|
| 508 |
+
dict.insert(seq_id_s, int64_t(e.seq_id_));
|
| 509 |
+
dict.insert(profiling_name_s, e.profiling_name_);
|
| 510 |
+
dict.insert(time_created_s, int64_t(e.time_created_ / 1000));
|
| 511 |
+
|
| 512 |
+
auto it = e.sizes_.begin();
|
| 513 |
+
auto read_sizes = [&](const c10::SmallVector<int, 4>& dims) {
|
| 514 |
+
auto sizes = new_list();
|
| 515 |
+
for (auto dim : dims) {
|
| 516 |
+
auto arg_sizes = new_list();
|
| 517 |
+
for (auto i : c10::irange(dim)) {
|
| 518 |
+
(void)i;
|
| 519 |
+
arg_sizes.push_back(*it++);
|
| 520 |
+
}
|
| 521 |
+
sizes.push_back(arg_sizes);
|
| 522 |
+
}
|
| 523 |
+
return sizes;
|
| 524 |
+
};
|
| 525 |
+
|
| 526 |
+
dict.insert(input_sizes_s, read_sizes(e.input_dims_));
|
| 527 |
+
dict.insert(output_sizes_s, read_sizes(e.output_dims_));
|
| 528 |
+
dict.insert(state_s, e.state_);
|
| 529 |
+
dict.insert(retired_s, e.retired_);
|
| 530 |
+
|
| 531 |
+
auto frames = new_list();
|
| 532 |
+
for (int64_t frame : tb) {
|
| 533 |
+
frames.push_back(all_frames.at(frame));
|
| 534 |
+
}
|
| 535 |
+
dict.insert(frames_s, frames);
|
| 536 |
+
entries.push_back(dict);
|
| 537 |
+
}
|
| 538 |
+
return pickle_str(entries);
|
| 539 |
+
}
|
| 540 |
+
};
|
| 541 |
+
|
| 542 |
+
#endif
|
| 543 |
+
} // namespace c10d
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Types.hpp
ADDED
|
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/c10d/Store.hpp>
|
| 4 |
+
|
| 5 |
+
#include <chrono>
|
| 6 |
+
#include <cstdint>
|
| 7 |
+
|
| 8 |
+
#include <ATen/core/Tensor.h>
|
| 9 |
+
#include <ATen/core/ivalue.h>
|
| 10 |
+
|
| 11 |
+
#include <c10/macros/Macros.h>
|
| 12 |
+
#include <c10/util/intrusive_ptr.h>
|
| 13 |
+
|
| 14 |
+
namespace c10d {
|
| 15 |
+
|
| 16 |
+
// Base class for supplementary data potentially needed by ReduceOps
|
| 17 |
+
struct TORCH_API _SupplementBase : torch::CustomClassHolder {
|
| 18 |
+
~_SupplementBase() override = default;
|
| 19 |
+
};
|
| 20 |
+
|
| 21 |
+
// Supplementary data specific to NCCL PREMUL_SUM
|
| 22 |
+
// The point of use in ProcessGroupNCCL knows how to unpack it.
|
| 23 |
+
struct NCCLPreMulSumSupplement : _SupplementBase {
|
| 24 |
+
double double_factor{0.0};
|
| 25 |
+
at::Tensor tensor_factor;
|
| 26 |
+
NCCLPreMulSumSupplement(double f) : double_factor{f} {}
|
| 27 |
+
NCCLPreMulSumSupplement(at::Tensor t) : tensor_factor{std::move(t)} {
|
| 28 |
+
TORCH_CHECK_EQ(tensor_factor.numel(), 1);
|
| 29 |
+
}
|
| 30 |
+
};
|
| 31 |
+
|
| 32 |
+
// Other ReduceOps that need different supplementary data can also
|
| 33 |
+
// derive from _SupplementBase.
|
| 34 |
+
struct TORCH_API ReduceOp : torch::CustomClassHolder {
|
| 35 |
+
// note(crcrpar): RedOpType could be defined outside of `ReduceOp`
|
| 36 |
+
enum RedOpType : uint8_t {
|
| 37 |
+
SUM = 0,
|
| 38 |
+
AVG = 1,
|
| 39 |
+
PRODUCT = 2,
|
| 40 |
+
MIN = 3,
|
| 41 |
+
MAX = 4,
|
| 42 |
+
BAND = 5, // Bitwise AND
|
| 43 |
+
BOR = 6, // Bitwise OR
|
| 44 |
+
BXOR = 7, // Bitwise XOR
|
| 45 |
+
PREMUL_SUM = 8, // Multiply by a user-supplied constant before summing.
|
| 46 |
+
UNUSED = 9
|
| 47 |
+
};
|
| 48 |
+
|
| 49 |
+
ReduceOp() = default;
|
| 50 |
+
|
| 51 |
+
ReduceOp(RedOpType op) : op_(op) {
|
| 52 |
+
TORCH_INTERNAL_ASSERT(
|
| 53 |
+
op_ != PREMUL_SUM,
|
| 54 |
+
"Use `torch.distributed._make_nccl_premul_sum` to create an instance of ReduceOp with PREMUL_SUM");
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
ReduceOp(
|
| 58 |
+
RedOpType op,
|
| 59 |
+
c10::intrusive_ptr<_SupplementBase> optional_supplement) {
|
| 60 |
+
if (optional_supplement.get()) {
|
| 61 |
+
op_ = op;
|
| 62 |
+
} else {
|
| 63 |
+
supplement_ = optional_supplement;
|
| 64 |
+
}
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
// The heap resource supplement_, if it exists, is managed by a
|
| 68 |
+
// c10::intrusive_ptr, so constructors and operator= can be simple
|
| 69 |
+
ReduceOp(const ReduceOp& other)
|
| 70 |
+
: op_(other.op_), supplement_(other.supplement_) {}
|
| 71 |
+
|
| 72 |
+
const ReduceOp& operator=(const ReduceOp& other) {
|
| 73 |
+
op_ = other.op_;
|
| 74 |
+
supplement_ = other.supplement_;
|
| 75 |
+
return *this;
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
operator RedOpType() const {
|
| 79 |
+
return op_;
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
bool operator==(const std::uint8_t other) {
|
| 83 |
+
TORCH_INTERNAL_ASSERT(other < 9, "Invalid other op value");
|
| 84 |
+
return other == op_;
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
bool operator==(const ReduceOp::RedOpType other) {
|
| 88 |
+
return *this == static_cast<std::uint8_t>(other);
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
// todo(crcrpar): Handle `RedOpType::PREMUL_SUM` with its scaling factor.
|
| 92 |
+
bool operator==(const ReduceOp& other) {
|
| 93 |
+
return *this == other.op_;
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
RedOpType op_ = SUM;
|
| 97 |
+
// supplement_ is "type-erased" storage for optional supplementary
|
| 98 |
+
// data the op might need.
|
| 99 |
+
// The point of use will know the derived type supplement_ really is,
|
| 100 |
+
// and downcast its pointer to extract the data as the needed type(s).
|
| 101 |
+
// Right now, only PREMUL_SUM needs supplementary data, but the same
|
| 102 |
+
// mechanism could extend to support other nontrivial reduce ops with
|
| 103 |
+
// different supplementary payloads.
|
| 104 |
+
c10::intrusive_ptr<_SupplementBase> supplement_;
|
| 105 |
+
};
|
| 106 |
+
|
| 107 |
+
template <typename T>
|
| 108 |
+
ReduceOp makeNCCLPreMulSum(const T& factor) {
|
| 109 |
+
ReduceOp rop;
|
| 110 |
+
rop.op_ = ReduceOp::PREMUL_SUM;
|
| 111 |
+
rop.supplement_ = c10::make_intrusive<NCCLPreMulSumSupplement>(factor);
|
| 112 |
+
return rop;
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
constexpr auto kUnsetTimeout = std::chrono::milliseconds(-1);
|
| 116 |
+
|
| 117 |
+
struct BroadcastOptions {
|
| 118 |
+
int64_t rootRank = 0;
|
| 119 |
+
int64_t rootTensor = 0;
|
| 120 |
+
std::chrono::milliseconds timeout = kUnsetTimeout;
|
| 121 |
+
bool asyncOp = true;
|
| 122 |
+
};
|
| 123 |
+
|
| 124 |
+
struct AllreduceOptions {
|
| 125 |
+
ReduceOp reduceOp = ReduceOp::SUM;
|
| 126 |
+
std::chrono::milliseconds timeout = kUnsetTimeout;
|
| 127 |
+
c10::optional<at::Tensor> sparseIndices = c10::nullopt;
|
| 128 |
+
};
|
| 129 |
+
|
| 130 |
+
struct AllreduceCoalescedOptions : AllreduceOptions {};
|
| 131 |
+
|
| 132 |
+
struct ReduceOptions {
|
| 133 |
+
ReduceOp reduceOp = ReduceOp::SUM;
|
| 134 |
+
int64_t rootRank = 0;
|
| 135 |
+
int64_t rootTensor = 0;
|
| 136 |
+
std::chrono::milliseconds timeout = kUnsetTimeout;
|
| 137 |
+
};
|
| 138 |
+
|
| 139 |
+
struct AllgatherOptions {
|
| 140 |
+
std::chrono::milliseconds timeout = kUnsetTimeout;
|
| 141 |
+
bool asyncOp = true;
|
| 142 |
+
};
|
| 143 |
+
|
| 144 |
+
struct GatherOptions {
|
| 145 |
+
int64_t rootRank = 0;
|
| 146 |
+
std::chrono::milliseconds timeout = kUnsetTimeout;
|
| 147 |
+
};
|
| 148 |
+
|
| 149 |
+
struct ScatterOptions {
|
| 150 |
+
int64_t rootRank = 0;
|
| 151 |
+
std::chrono::milliseconds timeout = kUnsetTimeout;
|
| 152 |
+
bool asyncOp = true;
|
| 153 |
+
};
|
| 154 |
+
|
| 155 |
+
struct ReduceScatterOptions {
|
| 156 |
+
ReduceOp reduceOp = ReduceOp::SUM;
|
| 157 |
+
std::chrono::milliseconds timeout = kUnsetTimeout;
|
| 158 |
+
bool asyncOp = true;
|
| 159 |
+
};
|
| 160 |
+
|
| 161 |
+
struct AllToAllOptions {
|
| 162 |
+
std::chrono::milliseconds timeout = kUnsetTimeout;
|
| 163 |
+
};
|
| 164 |
+
|
| 165 |
+
struct BarrierOptions {
|
| 166 |
+
std::vector<int64_t> device_ids;
|
| 167 |
+
std::chrono::milliseconds timeout = kUnsetTimeout;
|
| 168 |
+
c10::optional<at::Device> device;
|
| 169 |
+
};
|
| 170 |
+
|
| 171 |
+
struct DistributedBackendOptions {
|
| 172 |
+
c10::intrusive_ptr<::c10d::Store> store;
|
| 173 |
+
int group_rank;
|
| 174 |
+
int group_size;
|
| 175 |
+
std::chrono::duration<float> timeout;
|
| 176 |
+
std::string group_id;
|
| 177 |
+
std::vector<int64_t> global_ranks_in_group;
|
| 178 |
+
};
|
| 179 |
+
|
| 180 |
+
} // namespace c10d
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCTracing.hpp
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#ifdef USE_C10D_UCC
|
| 4 |
+
|
| 5 |
+
#include <torch/csrc/distributed/c10d/UCCUtils.hpp>
|
| 6 |
+
|
| 7 |
+
namespace c10d {
|
| 8 |
+
|
| 9 |
+
#define RECORD_COMMS_TRACE( \
|
| 10 |
+
_comms_tracer, _work, _opType, _rank, _comm_size, _inTensors, _outTensors) \
|
| 11 |
+
do { \
|
| 12 |
+
if (torch_ucc_config.enable_comms_logger) { \
|
| 13 |
+
_comms_tracer->recordComms( \
|
| 14 |
+
opTypeToString(_opType), \
|
| 15 |
+
(uintptr_t)_work.get(), \
|
| 16 |
+
_rank, \
|
| 17 |
+
_comm_size, \
|
| 18 |
+
_inTensors, \
|
| 19 |
+
_outTensors); \
|
| 20 |
+
} \
|
| 21 |
+
} while (0)
|
| 22 |
+
|
| 23 |
+
// interfaces to collect communication traces
|
| 24 |
+
class TORCH_API CommTraceLogger : public torch::CustomClassHolder {
|
| 25 |
+
private:
|
| 26 |
+
std::vector<std::string> comms_trace_;
|
| 27 |
+
std::vector<std::string> curBlocks_; /* unused */
|
| 28 |
+
std::vector<int64_t> curOutSplitSizes_;
|
| 29 |
+
std::vector<int64_t> curInSplitSizes_;
|
| 30 |
+
int curRoot_ = -1;
|
| 31 |
+
unsigned long seqnum = 0;
|
| 32 |
+
|
| 33 |
+
public:
|
| 34 |
+
void setCurBlock(const std::string& name); /* unused */
|
| 35 |
+
void popBlock(); /* unused */
|
| 36 |
+
// record root info if applicable, e.g., broadcast, gather, scatter
|
| 37 |
+
void recordOptionalInfo(int root = -1);
|
| 38 |
+
// record input/output splits of Alltoallv
|
| 39 |
+
void recordOptionalInfo(
|
| 40 |
+
const std::vector<int64_t>& outputSplitSizes = {},
|
| 41 |
+
const std::vector<int64_t>& inputSplitSizes = {});
|
| 42 |
+
// record essential comms information
|
| 43 |
+
void recordComms(
|
| 44 |
+
const std::string& collName,
|
| 45 |
+
const uintptr_t workReq = 0,
|
| 46 |
+
const int rank = -1,
|
| 47 |
+
const int world_size = -1,
|
| 48 |
+
const std::vector<at::Tensor>& inputTensors = {},
|
| 49 |
+
const std::vector<at::Tensor>& outputTensor = {});
|
| 50 |
+
// return collected comms traces
|
| 51 |
+
std::vector<std::string>& getCommsTrace() {
|
| 52 |
+
return comms_trace_;
|
| 53 |
+
}
|
| 54 |
+
};
|
| 55 |
+
|
| 56 |
+
} // namespace c10d
|
| 57 |
+
|
| 58 |
+
#endif // USE_C10D_UCC
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCUtils.hpp
ADDED
|
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#ifdef USE_C10D_UCC
|
| 4 |
+
|
| 5 |
+
#include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
|
| 6 |
+
#include <torch/csrc/distributed/c10d/Store.hpp>
|
| 7 |
+
#include <ucc/api/ucc.h>
|
| 8 |
+
|
| 9 |
+
namespace c10d {
|
| 10 |
+
|
| 11 |
+
// Macro to generate the error message on a non-successful UCC return value.
|
| 12 |
+
#define TORCH_UCC_GET_ERROR_MSG(_err, _error_msg, _result) \
|
| 13 |
+
do { \
|
| 14 |
+
_err = c10::str( \
|
| 15 |
+
"[", \
|
| 16 |
+
std::string(__FILE__), \
|
| 17 |
+
":", \
|
| 18 |
+
std::to_string(__LINE__), \
|
| 19 |
+
"] ", \
|
| 20 |
+
logger->getLogPrefix(), \
|
| 21 |
+
_error_msg, \
|
| 22 |
+
", error code ", \
|
| 23 |
+
_result, \
|
| 24 |
+
": ", \
|
| 25 |
+
ucc_status_string(_result), \
|
| 26 |
+
", system error code ", \
|
| 27 |
+
errno); \
|
| 28 |
+
} while (0)
|
| 29 |
+
|
| 30 |
+
// Macro to throw on a non-successful UCC return value.
|
| 31 |
+
#define TORCH_UCC_CHECK(_cmd, _error_msg) \
|
| 32 |
+
do { \
|
| 33 |
+
ucc_status_t result = _cmd; \
|
| 34 |
+
if (result != UCC_OK) { \
|
| 35 |
+
std::string err; \
|
| 36 |
+
TORCH_UCC_GET_ERROR_MSG(err, _error_msg, result); \
|
| 37 |
+
TORCH_CHECK(false, err); \
|
| 38 |
+
} \
|
| 39 |
+
} while (0)
|
| 40 |
+
|
| 41 |
+
// Macro and throw on a non-successful UCC return value and free its request.
|
| 42 |
+
#define TORCH_UCC_CHECK_REQUEST(_request, _cmd, _error_msg) \
|
| 43 |
+
do { \
|
| 44 |
+
ucc_status_t result = _cmd; \
|
| 45 |
+
if (result != UCC_OK) { \
|
| 46 |
+
std::string err; \
|
| 47 |
+
TORCH_UCC_GET_ERROR_MSG(err, _error_msg, result); \
|
| 48 |
+
if (_request != nullptr) { \
|
| 49 |
+
ucc_collective_finalize(_request); \
|
| 50 |
+
} \
|
| 51 |
+
TORCH_CHECK(false, err); \
|
| 52 |
+
} \
|
| 53 |
+
} while (0)
|
| 54 |
+
|
| 55 |
+
// Macros to print logs with unified format
|
| 56 |
+
#define TORCH_UCC_LOG_ERROR(_phase, _msg) \
|
| 57 |
+
LOG(ERROR) << logger->getLogPrefix(_phase) << "[ERROR] " << _msg;
|
| 58 |
+
#define TORCH_UCC_LOG_INFO(_phase, _msg) \
|
| 59 |
+
LOG(INFO) << logger->getLogPrefix(_phase) << "[INFO] " << _msg;
|
| 60 |
+
#define TORCH_UCC_LOG_DEBUG(_phase, _msg) \
|
| 61 |
+
VLOG(1) << logger->getLogPrefix(_phase) << "[DEBUG] " << _msg;
|
| 62 |
+
|
| 63 |
+
enum torch_ucc_phase_t {
|
| 64 |
+
TORCH_UCC_UNKNOWN = -1,
|
| 65 |
+
TORCH_UCC_INIT,
|
| 66 |
+
TORCH_UCC_HEALTH_CHECK,
|
| 67 |
+
TORCH_UCC_READY,
|
| 68 |
+
TORCH_UCC_COLL_POST,
|
| 69 |
+
TORCH_UCC_COLL_PROGRESS,
|
| 70 |
+
TORCH_UCC_FINALIZE,
|
| 71 |
+
};
|
| 72 |
+
|
| 73 |
+
const std::map<torch_ucc_phase_t, std::string> ucc_phase_map = {
|
| 74 |
+
{TORCH_UCC_UNKNOWN, "UNKNOWN"},
|
| 75 |
+
{TORCH_UCC_INIT, "INIT"},
|
| 76 |
+
{TORCH_UCC_HEALTH_CHECK, "HEALTH_CHECK"},
|
| 77 |
+
{TORCH_UCC_READY, "READY"},
|
| 78 |
+
{TORCH_UCC_COLL_POST, "COLL_POST"},
|
| 79 |
+
{TORCH_UCC_COLL_PROGRESS, "COLL_PROGRESS"},
|
| 80 |
+
{TORCH_UCC_FINALIZE, "FINALIZE"},
|
| 81 |
+
};
|
| 82 |
+
|
| 83 |
+
class CommTraceLogger;
|
| 84 |
+
|
| 85 |
+
class TORCH_API ProcessGroupUCCLogger : public torch::CustomClassHolder {
|
| 86 |
+
public:
|
| 87 |
+
ProcessGroupUCCLogger();
|
| 88 |
+
ProcessGroupUCCLogger(std::string log_prefix, torch_ucc_phase_t phase);
|
| 89 |
+
|
| 90 |
+
std::string getLogPrefix(torch_ucc_phase_t phase = TORCH_UCC_UNKNOWN);
|
| 91 |
+
void setLogPrefix(std::string log_prefix);
|
| 92 |
+
inline void setPhase(torch_ucc_phase_t phase) {
|
| 93 |
+
local_phase = phase;
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
void initCommsTracer();
|
| 97 |
+
void flushComms(int rank, int world_size);
|
| 98 |
+
std::shared_ptr<CommTraceLogger> trace_generator = nullptr;
|
| 99 |
+
|
| 100 |
+
protected:
|
| 101 |
+
std::string log_prefix;
|
| 102 |
+
torch_ucc_phase_t local_phase = TORCH_UCC_UNKNOWN;
|
| 103 |
+
bool initialized_CommTraceLogger = false;
|
| 104 |
+
};
|
| 105 |
+
|
| 106 |
+
struct torch_ucc_oob_coll_info_t {
|
| 107 |
+
c10::intrusive_ptr<Store> store;
|
| 108 |
+
uint32_t comm_id;
|
| 109 |
+
int rank;
|
| 110 |
+
int size;
|
| 111 |
+
void* rbuf;
|
| 112 |
+
size_t msglen;
|
| 113 |
+
std::string getKey(std::string key) {
|
| 114 |
+
return std::to_string(comm_id) + key;
|
| 115 |
+
}
|
| 116 |
+
};
|
| 117 |
+
|
| 118 |
+
class CommBase {
|
| 119 |
+
public:
|
| 120 |
+
CommBase(const c10::intrusive_ptr<ProcessGroupUCCLogger>& logger_)
|
| 121 |
+
: logger(logger_) {}
|
| 122 |
+
virtual void progress() = 0;
|
| 123 |
+
virtual void free_request(ucc_coll_req_h request) = 0;
|
| 124 |
+
virtual ~CommBase() {}
|
| 125 |
+
c10::intrusive_ptr<ProcessGroupUCCLogger> logger;
|
| 126 |
+
};
|
| 127 |
+
class CommUCC : public CommBase {
|
| 128 |
+
public:
|
| 129 |
+
ucc_lib_h lib{nullptr};
|
| 130 |
+
ucc_context_h context{nullptr};
|
| 131 |
+
|
| 132 |
+
public:
|
| 133 |
+
void progress() override;
|
| 134 |
+
CommUCC(
|
| 135 |
+
std::shared_ptr<torch_ucc_oob_coll_info_t> oob,
|
| 136 |
+
const c10::intrusive_ptr<ProcessGroupUCCLogger>& logger);
|
| 137 |
+
void free_request(ucc_coll_req_h request) override;
|
| 138 |
+
~CommUCC();
|
| 139 |
+
};
|
| 140 |
+
|
| 141 |
+
ucc_status_t oob_allgather(
|
| 142 |
+
void* sbuf,
|
| 143 |
+
void* rbuf,
|
| 144 |
+
size_t msglen,
|
| 145 |
+
void* coll_info,
|
| 146 |
+
void** req);
|
| 147 |
+
|
| 148 |
+
ucc_status_t oob_allgather_test(void* req);
|
| 149 |
+
|
| 150 |
+
ucc_status_t oob_allgather_free(void* req);
|
| 151 |
+
|
| 152 |
+
// trim: remove spaces before and after the string view
|
| 153 |
+
// implementation borrowed from https://stackoverflow.com/a/17976541
|
| 154 |
+
inline c10::string_view trim(c10::string_view s) {
|
| 155 |
+
auto wsfront = std::find_if_not(
|
| 156 |
+
s.begin(), s.end(), [](int c) { return std::isspace(c); });
|
| 157 |
+
auto wsback = std::find_if_not(s.rbegin(), s.rend(), [](int c) {
|
| 158 |
+
return std::isspace(c);
|
| 159 |
+
}).base();
|
| 160 |
+
return (
|
| 161 |
+
wsback <= wsfront ? "" : s.substr(wsfront - s.begin(), wsback - wsfront));
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
inline std::string tolower(c10::string_view s) {
|
| 165 |
+
std::string result;
|
| 166 |
+
result.reserve(s.size());
|
| 167 |
+
for (auto c : s) {
|
| 168 |
+
result.push_back(std::tolower(c));
|
| 169 |
+
}
|
| 170 |
+
return result;
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
inline std::vector<std::string> parse_list(std::string list) {
|
| 174 |
+
std::vector<std::string> result;
|
| 175 |
+
list = tolower(trim(list));
|
| 176 |
+
while (!list.empty()) {
|
| 177 |
+
const auto end_pos = list.find_first_of(',');
|
| 178 |
+
const auto token = trim(list.substr(0, end_pos));
|
| 179 |
+
result.push_back(std::string(token));
|
| 180 |
+
list = (end_pos != c10::string_view::npos) ? list.substr(end_pos + 1) : "";
|
| 181 |
+
}
|
| 182 |
+
return result;
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
} // namespace c10d
|
| 186 |
+
|
| 187 |
+
#endif // USE_C10D_UCC
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UnixSockUtils.hpp
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/c10d/Utils.hpp>
|
| 4 |
+
|
| 5 |
+
namespace c10d {
|
| 6 |
+
namespace tcputil {
|
| 7 |
+
|
| 8 |
+
#define CONNECT_SOCKET_OFFSET 2
|
| 9 |
+
|
| 10 |
+
inline int poll(struct pollfd* fds, unsigned long nfds, int timeout) {
|
| 11 |
+
return ::poll(fds, nfds, timeout);
|
| 12 |
+
}
|
| 13 |
+
|
| 14 |
+
inline void addPollfd(
|
| 15 |
+
std::vector<struct pollfd>& fds,
|
| 16 |
+
int socket,
|
| 17 |
+
short events) {
|
| 18 |
+
fds.push_back({.fd = socket, .events = events});
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
inline struct ::pollfd getPollfd(int socket, short events) {
|
| 22 |
+
struct ::pollfd res = {.fd = socket, .events = events};
|
| 23 |
+
return res;
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
} // namespace tcputil
|
| 27 |
+
} // namespace c10d
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/WinSockUtils.hpp
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/c10d/Utils.hpp>
|
| 4 |
+
|
| 5 |
+
namespace c10d {
|
| 6 |
+
namespace tcputil {
|
| 7 |
+
|
| 8 |
+
#define CONNECT_SOCKET_OFFSET 1
|
| 9 |
+
|
| 10 |
+
inline int poll(struct pollfd* fdArray, unsigned long fds, int timeout) {
|
| 11 |
+
return WSAPoll(fdArray, fds, timeout);
|
| 12 |
+
}
|
| 13 |
+
|
| 14 |
+
inline void addPollfd(
|
| 15 |
+
std::vector<struct pollfd>& fds,
|
| 16 |
+
int socket,
|
| 17 |
+
short events) {
|
| 18 |
+
fds.push_back({(SOCKET)socket, events});
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
inline struct ::pollfd getPollfd(int socket, short events) {
|
| 22 |
+
struct ::pollfd res = {(SOCKET)socket, events};
|
| 23 |
+
return res;
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
} // namespace tcputil
|
| 27 |
+
} // namespace c10d
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Work.hpp
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/ATen.h>
|
| 4 |
+
#include <stdexcept>
|
| 5 |
+
#include <vector>
|
| 6 |
+
|
| 7 |
+
constexpr auto kNoTimeout = std::chrono::milliseconds(0);
|
| 8 |
+
|
| 9 |
+
namespace c10d {
|
| 10 |
+
|
| 11 |
+
constexpr const char* const kSeqNumStoreKey = "SEQ_NUM_STORE_KEY";
|
| 12 |
+
|
| 13 |
+
enum class OpType : std::uint8_t {
|
| 14 |
+
BROADCAST = 0,
|
| 15 |
+
ALLREDUCE = 1,
|
| 16 |
+
ALLREDUCE_COALESCED = 2,
|
| 17 |
+
REDUCE = 3,
|
| 18 |
+
ALLGATHER = 4,
|
| 19 |
+
_ALLGATHER_BASE = 5,
|
| 20 |
+
ALLGATHER_COALESCED = 6,
|
| 21 |
+
GATHER = 7,
|
| 22 |
+
SCATTER = 8,
|
| 23 |
+
REDUCE_SCATTER = 9,
|
| 24 |
+
ALLTOALL_BASE = 10,
|
| 25 |
+
ALLTOALL = 11,
|
| 26 |
+
SEND = 12,
|
| 27 |
+
RECV = 13,
|
| 28 |
+
RECVANYSOURCE = 14,
|
| 29 |
+
BARRIER = 15,
|
| 30 |
+
_REDUCE_SCATTER_BASE = 16,
|
| 31 |
+
COALESCED = 17,
|
| 32 |
+
_ALLREDUCE_SPARSE = 18,
|
| 33 |
+
UNKNOWN = 100,
|
| 34 |
+
};
|
| 35 |
+
|
| 36 |
+
// Converts OpType to human readable string.
|
| 37 |
+
TORCH_API std::string opTypeToString(OpType opType);
|
| 38 |
+
|
| 39 |
+
// Whether or not an OP is an p2p op (SEND, RECV, RECVANYSOURCE)
|
| 40 |
+
TORCH_API bool isP2POp(OpType opType, bool batchP2P = false);
|
| 41 |
+
|
| 42 |
+
// Please do not use Work API, it is going away, to be
|
| 43 |
+
// replaced by ivalue::Future.
|
| 44 |
+
// Python binding for this class might change, please do not assume
|
| 45 |
+
// this will be bound using pybind.
|
| 46 |
+
class TORCH_API Work : public torch::CustomClassHolder {
|
| 47 |
+
public:
|
| 48 |
+
Work(
|
| 49 |
+
int rank = -1,
|
| 50 |
+
OpType opType = OpType::UNKNOWN,
|
| 51 |
+
const char* profilingTitle = nullptr,
|
| 52 |
+
const c10::optional<std::vector<at::Tensor>>& inputTensors =
|
| 53 |
+
c10::nullopt);
|
| 54 |
+
|
| 55 |
+
~Work() override;
|
| 56 |
+
|
| 57 |
+
// Checks if request has completed. Non-blocking operation.
|
| 58 |
+
virtual bool isCompleted();
|
| 59 |
+
|
| 60 |
+
// Returns if the work completed successfully.
|
| 61 |
+
// If false, the exception function can be called to get details.
|
| 62 |
+
virtual bool isSuccess() const;
|
| 63 |
+
|
| 64 |
+
// Returns exception if isSuccess() returned false.
|
| 65 |
+
virtual std::exception_ptr exception() const;
|
| 66 |
+
|
| 67 |
+
// Returns source rank if this objects represents a recv-from-any.
|
| 68 |
+
virtual int sourceRank() const;
|
| 69 |
+
|
| 70 |
+
// Returns result tensors, if applicable.
|
| 71 |
+
// If work is not supposed to have result, we return empty list.
|
| 72 |
+
virtual std::vector<at::Tensor> result();
|
| 73 |
+
|
| 74 |
+
// Ensures that operations on the output tensors that are invoked
|
| 75 |
+
// after this function returns are correctly sequenced after the
|
| 76 |
+
// asynchronous completion of this work.
|
| 77 |
+
//
|
| 78 |
+
// For CUDA tensors, it inserts stream synchronization such that
|
| 79 |
+
// the streams of the caller wait for completion of the
|
| 80 |
+
// asynchronous operations on the destination tensors.
|
| 81 |
+
//
|
| 82 |
+
// For CPU tensors, it is currently a nop.
|
| 83 |
+
//
|
| 84 |
+
// This function should only be used if the caller polls for
|
| 85 |
+
// completion through the `isCompleted` function, it has returned
|
| 86 |
+
// true, and the `isSuccess` function also has returned true.
|
| 87 |
+
//
|
| 88 |
+
virtual void synchronize();
|
| 89 |
+
|
| 90 |
+
// Waits until request completes. Blocking operation.
|
| 91 |
+
// Throws if the work completed with an exception.
|
| 92 |
+
// Returns false if the work is aborted.
|
| 93 |
+
// Otherwise, it always returns true, indicating the work is completed.
|
| 94 |
+
//
|
| 95 |
+
// Functionally equivalent to:
|
| 96 |
+
//
|
| 97 |
+
// while (!isCompleted()) { /* nop */ }
|
| 98 |
+
// auto success = isSuccess();
|
| 99 |
+
// if (!success) { std::rethrow_exception(exception()); }
|
| 100 |
+
// return success;
|
| 101 |
+
//
|
| 102 |
+
virtual bool wait(std::chrono::milliseconds timeout = kNoTimeout);
|
| 103 |
+
|
| 104 |
+
virtual void abort();
|
| 105 |
+
|
| 106 |
+
// Returns a Future object that will be associated with the completion of
|
| 107 |
+
// work. Only NCCL backend is currently supported.
|
| 108 |
+
virtual c10::intrusive_ptr<c10::ivalue::Future> getFuture();
|
| 109 |
+
|
| 110 |
+
virtual float getDuration() const;
|
| 111 |
+
|
| 112 |
+
virtual uint64_t getSequencenumber() const;
|
| 113 |
+
|
| 114 |
+
OpType retrieveOpType() const;
|
| 115 |
+
|
| 116 |
+
static c10::intrusive_ptr<Work> create_from_future(
|
| 117 |
+
const c10::intrusive_ptr<c10::ivalue::Future>&);
|
| 118 |
+
|
| 119 |
+
protected:
|
| 120 |
+
// Completes the work object and optionally sets the exception in a
|
| 121 |
+
// thread-safe manner. Notifies all waiting condition variables as well.
|
| 122 |
+
void finish(std::exception_ptr exception = nullptr);
|
| 123 |
+
|
| 124 |
+
// Similar to finish, but throws an exception if one is already set or
|
| 125 |
+
// provided by the user.
|
| 126 |
+
void finishAndThrow(std::exception_ptr exception);
|
| 127 |
+
|
| 128 |
+
mutable std::mutex mutex_;
|
| 129 |
+
std::condition_variable cv_;
|
| 130 |
+
bool completed_ = false;
|
| 131 |
+
std::exception_ptr exception_;
|
| 132 |
+
|
| 133 |
+
// Current rank of the node.
|
| 134 |
+
const int rank_;
|
| 135 |
+
|
| 136 |
+
// Operation type that this work object refers to.
|
| 137 |
+
OpType opType_;
|
| 138 |
+
|
| 139 |
+
// When profiling, the callback to record end of operation event. This
|
| 140 |
+
// callback needs to be called when collective operation is complete.
|
| 141 |
+
std::function<void()> recordFunctionEndCallback_;
|
| 142 |
+
};
|
| 143 |
+
|
| 144 |
+
struct TORCH_API WorkInfo {
|
| 145 |
+
WorkInfo(
|
| 146 |
+
const OpType& opType,
|
| 147 |
+
const std::chrono::time_point<std::chrono::system_clock>& timeStarted,
|
| 148 |
+
const std::chrono::time_point<std::chrono::system_clock>& timeFinished,
|
| 149 |
+
const std::chrono::duration<float>& activeDuration)
|
| 150 |
+
: opType(opType),
|
| 151 |
+
timeStarted(timeStarted),
|
| 152 |
+
timeFinished(timeFinished),
|
| 153 |
+
activeDuration(activeDuration) {}
|
| 154 |
+
|
| 155 |
+
OpType opType;
|
| 156 |
+
std::chrono::time_point<std::chrono::system_clock> timeStarted;
|
| 157 |
+
std::chrono::time_point<std::chrono::system_clock> timeFinished;
|
| 158 |
+
std::chrono::duration<float> activeDuration;
|
| 159 |
+
};
|
| 160 |
+
|
| 161 |
+
} // namespace c10d
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/c10d.h
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/python_headers.h>
|
| 4 |
+
|
| 5 |
+
namespace torch {
|
| 6 |
+
namespace distributed {
|
| 7 |
+
namespace c10d {
|
| 8 |
+
|
| 9 |
+
PyMethodDef* python_functions();
|
| 10 |
+
|
| 11 |
+
} // namespace c10d
|
| 12 |
+
} // namespace distributed
|
| 13 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/comm.hpp
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/ATen.h>
|
| 4 |
+
#include <ATen/core/ivalue.h>
|
| 5 |
+
#include <torch/csrc/Export.h>
|
| 6 |
+
#include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
|
| 7 |
+
#include <utility>
|
| 8 |
+
|
| 9 |
+
namespace c10d {
|
| 10 |
+
|
| 11 |
+
// Broadcast many tensors to all processes in the process group.
|
| 12 |
+
TORCH_API void broadcast_coalesced(
|
| 13 |
+
const c10::intrusive_ptr<c10d::ProcessGroup>& process_group,
|
| 14 |
+
at::TensorList tensors,
|
| 15 |
+
size_t buffer_size,
|
| 16 |
+
int rank = 0);
|
| 17 |
+
|
| 18 |
+
// This class passes bucket contents tensor to DDP communication hook.
|
| 19 |
+
class TORCH_API GradBucket {
|
| 20 |
+
public:
|
| 21 |
+
explicit GradBucket(
|
| 22 |
+
size_t index,
|
| 23 |
+
size_t bucket_count,
|
| 24 |
+
at::Tensor tensor,
|
| 25 |
+
std::vector<size_t> offsets,
|
| 26 |
+
std::vector<size_t> lengths,
|
| 27 |
+
std::vector<c10::IntArrayRef> sizes_vec,
|
| 28 |
+
std::vector<at::Tensor> parameters,
|
| 29 |
+
c10::optional<at::Tensor> sparse_grad_indices)
|
| 30 |
+
: index_(index),
|
| 31 |
+
bucket_count_(bucket_count),
|
| 32 |
+
buffer_(std::move(tensor)),
|
| 33 |
+
offsets_(std::move(offsets)),
|
| 34 |
+
lengths_(std::move(lengths)),
|
| 35 |
+
sizes_vec_(std::move(sizes_vec)),
|
| 36 |
+
parameters_(std::move(parameters)),
|
| 37 |
+
sparse_grad_indices_(std::move(sparse_grad_indices)) {}
|
| 38 |
+
|
| 39 |
+
// Returns the index of the bucket, which is unique across all the buckets.
|
| 40 |
+
size_t getIndex() const {
|
| 41 |
+
return index_;
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
const at::Tensor& getBuffer() const {
|
| 45 |
+
return buffer_;
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
// Returns a mutable buffer compared with the above method.
|
| 49 |
+
at::Tensor& getBufferRef() {
|
| 50 |
+
return buffer_;
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
// Overwrites the buffer at a specific index.
|
| 54 |
+
void setBuffer(at::Tensor& buffer) {
|
| 55 |
+
buffer_ = buffer;
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
// Each tensor in the list that getGradients corresponds to a
|
| 59 |
+
// parameter.
|
| 60 |
+
std::vector<at::Tensor> getGradients() const;
|
| 61 |
+
|
| 62 |
+
// Returns model parameters belonging to this bucket. They are returned in the
|
| 63 |
+
// same order as gradient tensors via getGradients(). For example,
|
| 64 |
+
// getParameters[i] will have its gradient stored in
|
| 65 |
+
// getGradients[i]
|
| 66 |
+
const std::vector<at::Tensor> getParameters() const {
|
| 67 |
+
return parameters_;
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
// Returns whther this bucket is the last bucket to allreduce in an iteration.
|
| 71 |
+
bool isLast() const {
|
| 72 |
+
return index_ == bucket_count_ - 1;
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
c10::optional<at::Tensor>& getSparseGradIndices() {
|
| 76 |
+
return sparse_grad_indices_;
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
private:
|
| 80 |
+
size_t index_;
|
| 81 |
+
size_t bucket_count_;
|
| 82 |
+
at::Tensor buffer_;
|
| 83 |
+
|
| 84 |
+
// Per-variable info in buffer_.
|
| 85 |
+
std::vector<size_t> offsets_;
|
| 86 |
+
std::vector<size_t> lengths_;
|
| 87 |
+
std::vector<c10::IntArrayRef> sizes_vec_;
|
| 88 |
+
|
| 89 |
+
// Model parameters for this bucket.
|
| 90 |
+
const std::vector<at::Tensor> parameters_;
|
| 91 |
+
|
| 92 |
+
// Predefined sparse indices for this bucket (only used for sparse tensors).
|
| 93 |
+
// The gradients will be updated to have indices with these tensor values
|
| 94 |
+
c10::optional<at::Tensor> sparse_grad_indices_;
|
| 95 |
+
};
|
| 96 |
+
|
| 97 |
+
// Base class of both `PythonCommHook` and `CppCommHook`.
|
| 98 |
+
// Requires implementing 1) `runHook` method that communicates gradients
|
| 99 |
+
// asynchronously, and 2) `parseHookResult` method that converts the hook
|
| 100 |
+
// result into a tensor.
|
| 101 |
+
class TORCH_API CommHookInterface {
|
| 102 |
+
public:
|
| 103 |
+
virtual ~CommHookInterface() = default;
|
| 104 |
+
|
| 105 |
+
// Passes the input grad bucket to the registered communication hook.
|
| 106 |
+
// Once the tensor in the bucket are ready, kicks off the hook asynchronously
|
| 107 |
+
// and returns a future that holds the communication results.
|
| 108 |
+
virtual c10::intrusive_ptr<c10::ivalue::Future> runHook(
|
| 109 |
+
GradBucket& bucket) = 0;
|
| 110 |
+
|
| 111 |
+
// Returns the resulting tensor once the communication hook result is
|
| 112 |
+
// ready. The resulting tensor will then be copied to the grads of
|
| 113 |
+
// individual parameters.
|
| 114 |
+
virtual at::Tensor parseHookResult(const c10::IValue& result) = 0;
|
| 115 |
+
};
|
| 116 |
+
|
| 117 |
+
namespace detail {
|
| 118 |
+
// This helper function is called both by CppCommHookInterface below and inside
|
| 119 |
+
// reducer.
|
| 120 |
+
TORCH_API at::Tensor parseCppCommHookResult(const c10::IValue& result);
|
| 121 |
+
} // namespace detail
|
| 122 |
+
|
| 123 |
+
// This CppCommHook interface only requires implementing runHook method that
|
| 124 |
+
// potentially uses a state.
|
| 125 |
+
template <typename T>
|
| 126 |
+
class CppCommHookInterface : public CommHookInterface {
|
| 127 |
+
public:
|
| 128 |
+
explicit CppCommHookInterface(T state) : state_(std::move(state)) {}
|
| 129 |
+
|
| 130 |
+
~CppCommHookInterface() override = default;
|
| 131 |
+
|
| 132 |
+
at::Tensor parseHookResult(const c10::IValue& result) override {
|
| 133 |
+
return detail::parseCppCommHookResult(result);
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
protected:
|
| 137 |
+
T state_;
|
| 138 |
+
};
|
| 139 |
+
|
| 140 |
+
} // namespace c10d
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/default_comm_hooks.hpp
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
|
| 4 |
+
#include <torch/csrc/distributed/c10d/comm.hpp>
|
| 5 |
+
|
| 6 |
+
namespace c10d {
|
| 7 |
+
|
| 8 |
+
enum class BuiltinCommHookType {
|
| 9 |
+
ALLREDUCE = 1,
|
| 10 |
+
FP16_COMPRESS = 2,
|
| 11 |
+
};
|
| 12 |
+
|
| 13 |
+
class AllReduceCommHook
|
| 14 |
+
: public CppCommHookInterface<c10::intrusive_ptr<ProcessGroup>> {
|
| 15 |
+
public:
|
| 16 |
+
explicit AllReduceCommHook(const c10::intrusive_ptr<ProcessGroup>& state)
|
| 17 |
+
: CppCommHookInterface<c10::intrusive_ptr<ProcessGroup>>(state) {}
|
| 18 |
+
|
| 19 |
+
~AllReduceCommHook() override = default;
|
| 20 |
+
|
| 21 |
+
c10::intrusive_ptr<c10::ivalue::Future> runHook(GradBucket& bucket) override;
|
| 22 |
+
};
|
| 23 |
+
|
| 24 |
+
class FP16CompressCommHook
|
| 25 |
+
: public CppCommHookInterface<c10::intrusive_ptr<ProcessGroup>> {
|
| 26 |
+
public:
|
| 27 |
+
explicit FP16CompressCommHook(const c10::intrusive_ptr<ProcessGroup>& state)
|
| 28 |
+
: CppCommHookInterface<c10::intrusive_ptr<ProcessGroup>>(state) {}
|
| 29 |
+
|
| 30 |
+
~FP16CompressCommHook() override = default;
|
| 31 |
+
|
| 32 |
+
c10::intrusive_ptr<c10::ivalue::Future> runHook(GradBucket& bucket) override;
|
| 33 |
+
};
|
| 34 |
+
|
| 35 |
+
// Almost same as AllReduceCommHook, but without division inside the hook.
|
| 36 |
+
// This enables the optimization of fusing copy and division and saves one scan
|
| 37 |
+
// over all the input parameters, when no communication hook is provided by the
|
| 38 |
+
// user. Only used internally and not released as a public built-in
|
| 39 |
+
// communication hook.
|
| 40 |
+
class _AllReduceBySumCommHook
|
| 41 |
+
: public CppCommHookInterface<c10::intrusive_ptr<ProcessGroup>> {
|
| 42 |
+
public:
|
| 43 |
+
explicit _AllReduceBySumCommHook(
|
| 44 |
+
const c10::intrusive_ptr<ProcessGroup>& state)
|
| 45 |
+
: CppCommHookInterface<c10::intrusive_ptr<ProcessGroup>>(state) {}
|
| 46 |
+
|
| 47 |
+
~_AllReduceBySumCommHook() override = default;
|
| 48 |
+
|
| 49 |
+
c10::intrusive_ptr<c10::ivalue::Future> runHook(GradBucket& bucket) override;
|
| 50 |
+
};
|
| 51 |
+
|
| 52 |
+
} // namespace c10d
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/exception.h
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
// All rights reserved.
|
| 3 |
+
//
|
| 4 |
+
// This source code is licensed under the BSD-style license found in the
|
| 5 |
+
// LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
#pragma once
|
| 8 |
+
|
| 9 |
+
#include <stdexcept>
|
| 10 |
+
|
| 11 |
+
#include <c10/macros/Macros.h>
|
| 12 |
+
#include <c10/util/Exception.h>
|
| 13 |
+
|
| 14 |
+
// Utility macro similar to C10_THROW_ERROR, the major difference is that this
|
| 15 |
+
// macro handles exception types defined in the c10d namespace, whereas
|
| 16 |
+
// C10_THROW_ERROR requires an exception to be defined in the c10 namespace.
|
| 17 |
+
#define C10D_THROW_ERROR(err_type, msg) \
|
| 18 |
+
throw ::c10d::err_type( \
|
| 19 |
+
{__func__, __FILE__, static_cast<uint32_t>(__LINE__)}, msg)
|
| 20 |
+
|
| 21 |
+
namespace c10d {
|
| 22 |
+
|
| 23 |
+
using c10::DistNetworkError;
|
| 24 |
+
|
| 25 |
+
class TORCH_API SocketError : public DistNetworkError {
|
| 26 |
+
using DistNetworkError::DistNetworkError;
|
| 27 |
+
};
|
| 28 |
+
|
| 29 |
+
class TORCH_API TimeoutError : public DistNetworkError {
|
| 30 |
+
using DistNetworkError::DistNetworkError;
|
| 31 |
+
};
|
| 32 |
+
|
| 33 |
+
} // namespace c10d
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/logger.hpp
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <c10/util/Logging.h>
|
| 2 |
+
#include <torch/csrc/distributed/c10d/reducer.hpp>
|
| 3 |
+
|
| 4 |
+
#include <mutex>
|
| 5 |
+
|
| 6 |
+
namespace c10d {
|
| 7 |
+
|
| 8 |
+
class TORCH_API Logger {
|
| 9 |
+
public:
|
| 10 |
+
explicit Logger(std::shared_ptr<c10d::Reducer> reducer);
|
| 11 |
+
// Set logging data that can be got during DistributedDataParallel
|
| 12 |
+
// construction time.
|
| 13 |
+
void set_construction_data_and_log(
|
| 14 |
+
const std::string& module_name,
|
| 15 |
+
const std::vector<int>& device_ids,
|
| 16 |
+
int output_device,
|
| 17 |
+
bool broadcast_buffers,
|
| 18 |
+
bool has_sync_bn,
|
| 19 |
+
bool static_graph);
|
| 20 |
+
|
| 21 |
+
void set_static_graph();
|
| 22 |
+
|
| 23 |
+
// An interface for users to get DDPLoggingData and log them
|
| 24 |
+
// in the applications. Explanation of logging fields are in
|
| 25 |
+
// "struct DDPLoggingData" of "torch/c10/util/Logging.h".
|
| 26 |
+
at::DDPLoggingData get_ddp_logging_data();
|
| 27 |
+
|
| 28 |
+
// Stream insertion operator for logging data to stream under
|
| 29 |
+
// TORCH_DISTRIBUTED_DEBUG.
|
| 30 |
+
friend std::ostream& operator<<(std::ostream& output, const Logger& logger);
|
| 31 |
+
|
| 32 |
+
~Logger() noexcept(false) {
|
| 33 |
+
// Log if DDP graph is static in Logger dtor instead of Reducer dtor since
|
| 34 |
+
// Logger is deleted before Reducer.
|
| 35 |
+
log_if_graph_static(reducer_->ddp_graph_static());
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
// Set environment variables.
|
| 39 |
+
void set_env_variables();
|
| 40 |
+
// Set parameters stats.
|
| 41 |
+
void set_parameter_stats();
|
| 42 |
+
// Get size of each bucket (Bytes).
|
| 43 |
+
std::vector<int64_t> get_bucket_sizes();
|
| 44 |
+
// Get variable indices for each bucket.
|
| 45 |
+
std::vector<std::vector<size_t>> get_per_bucket_variable_indices();
|
| 46 |
+
// Set comm. hook, if used
|
| 47 |
+
void set_comm_hook(const std::string& hook);
|
| 48 |
+
// Set running with uneven input detection (model.join() context manager)
|
| 49 |
+
void set_uneven_input_join();
|
| 50 |
+
|
| 51 |
+
// Reset performance stats at current iteration
|
| 52 |
+
void reset_performance_stats();
|
| 53 |
+
|
| 54 |
+
// Calculate avg stats using cpu timer and gpu timer
|
| 55 |
+
// that has been recorded in reducer.
|
| 56 |
+
void calculate_avg_time(
|
| 57 |
+
int64_t& avg_time,
|
| 58 |
+
int64_t& time_duration,
|
| 59 |
+
Timer& timer,
|
| 60 |
+
Timer::Event start_event,
|
| 61 |
+
Timer::Event end_event);
|
| 62 |
+
|
| 63 |
+
// Set the absolute time of the event that has been recorded in reducer.
|
| 64 |
+
void set_event_time(int64_t& event_time, Timer& timer, Timer::Event event);
|
| 65 |
+
// Set stats that can be collected only during
|
| 66 |
+
// training loop. It is called at the beginning of forward call
|
| 67 |
+
// to record the run time stats of sampled iterations that previously ran.
|
| 68 |
+
// GPU performance stats are collected only for single process
|
| 69 |
+
// single device program and single device module right now.
|
| 70 |
+
// TODO to support single process multiple devices and multi device modules,
|
| 71 |
+
// events need to be created and recorded on multiple devices.
|
| 72 |
+
void set_runtime_stats_and_log();
|
| 73 |
+
|
| 74 |
+
// Called when DDP/reducer is failing with an error. The
|
| 75 |
+
// logging data structure will have two fields filled: "has_error" indicating
|
| 76 |
+
// that this iteration encountered an error and other fields are not valid,
|
| 77 |
+
// and "error", a string which contains the error message that DDP failed
|
| 78 |
+
// with.
|
| 79 |
+
template <typename... Args>
|
| 80 |
+
void set_error_and_log(const std::string& ddp_error, const Args&... args) {
|
| 81 |
+
ddp_logging_data_->ints_map["has_error"] = 1;
|
| 82 |
+
auto err = c10::str(ddp_error, args...);
|
| 83 |
+
ddp_logging_data_->strs_map["error"] = err;
|
| 84 |
+
// Report the iteration we are erroring at so user knows how many examples
|
| 85 |
+
// successfully processed before this error was hit.
|
| 86 |
+
ddp_logging_data_->ints_map["iteration"] = reducer_->num_iterations_;
|
| 87 |
+
at::LogPyTorchDDPUsage(*ddp_logging_data_);
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
// When running without static graph, called when reducer is destroyed to log
|
| 91 |
+
// if graph was actually static and is a candidate for static graph
|
| 92 |
+
// optimization.
|
| 93 |
+
void log_if_graph_static(bool is_static);
|
| 94 |
+
|
| 95 |
+
private:
|
| 96 |
+
// ddp_logging_data_ is used to hold all the ddp related logging
|
| 97 |
+
// data fields.
|
| 98 |
+
std::unique_ptr<at::DDPLoggingData> ddp_logging_data_;
|
| 99 |
+
std::shared_ptr<c10d::Reducer> reducer_;
|
| 100 |
+
// track the number of iterations when runtime stats are collected so far.
|
| 101 |
+
long num_iterations_stats_recorded_ = 0;
|
| 102 |
+
};
|
| 103 |
+
|
| 104 |
+
} // namespace c10d
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/logging.h
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Meta Platforms, Inc. and its affiliates.
|
| 2 |
+
// All rights reserved.
|
| 3 |
+
//
|
| 4 |
+
// This source code is licensed under the BSD-style license found in the
|
| 5 |
+
// LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
#pragma once
|
| 8 |
+
|
| 9 |
+
#include <string>
|
| 10 |
+
|
| 11 |
+
#include <c10/macros/Macros.h>
|
| 12 |
+
#include <c10/util/Logging.h>
|
| 13 |
+
#include <fmt/format.h>
|
| 14 |
+
|
| 15 |
+
namespace c10d {
|
| 16 |
+
namespace detail {
|
| 17 |
+
|
| 18 |
+
enum class LogLevel { Trace, Debug, Info, Warning, Error };
|
| 19 |
+
|
| 20 |
+
TORCH_API bool isLogLevelEnabled(LogLevel level) noexcept;
|
| 21 |
+
|
| 22 |
+
template <typename... T>
|
| 23 |
+
std::string formatLogMessage(fmt::string_view fmt, T&&... args) {
|
| 24 |
+
return fmt::vformat(fmt, fmt::make_format_args(args...));
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
} // namespace detail
|
| 28 |
+
} // namespace c10d
|
| 29 |
+
|
| 30 |
+
#define C10D_ERROR(...) \
|
| 31 |
+
LOG_IF( \
|
| 32 |
+
ERROR, c10d::detail::isLogLevelEnabled(c10d::detail::LogLevel::Error)) \
|
| 33 |
+
<< "[c10d] " << c10d::detail::formatLogMessage(__VA_ARGS__)
|
| 34 |
+
|
| 35 |
+
#define C10D_WARNING(...) \
|
| 36 |
+
LOG_IF( \
|
| 37 |
+
WARNING, \
|
| 38 |
+
c10d::detail::isLogLevelEnabled(c10d::detail::LogLevel::Warning)) \
|
| 39 |
+
<< "[c10d] " << c10d::detail::formatLogMessage(__VA_ARGS__)
|
| 40 |
+
|
| 41 |
+
#define C10D_INFO(...) \
|
| 42 |
+
LOG_IF(INFO, c10d::detail::isLogLevelEnabled(c10d::detail::LogLevel::Info)) \
|
| 43 |
+
<< "[c10d] " << c10d::detail::formatLogMessage(__VA_ARGS__)
|
| 44 |
+
|
| 45 |
+
#define C10D_DEBUG(...) \
|
| 46 |
+
LOG_IF(INFO, c10d::detail::isLogLevelEnabled(c10d::detail::LogLevel::Debug)) \
|
| 47 |
+
<< "[c10d - debug] " << c10d::detail::formatLogMessage(__VA_ARGS__)
|
| 48 |
+
|
| 49 |
+
#define C10D_TRACE(...) \
|
| 50 |
+
LOG_IF(INFO, c10d::detail::isLogLevelEnabled(c10d::detail::LogLevel::Trace)) \
|
| 51 |
+
<< "[c10d - trace] " << c10d::detail::formatLogMessage(__VA_ARGS__)
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/python_comm_hook.h
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/c10d/comm.hpp>
|
| 4 |
+
|
| 5 |
+
#include <ATen/ATen.h>
|
| 6 |
+
#include <ATen/core/ivalue.h>
|
| 7 |
+
#include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
|
| 8 |
+
#include <torch/csrc/utils/pybind.h>
|
| 9 |
+
|
| 10 |
+
namespace c10d {
|
| 11 |
+
|
| 12 |
+
class TORCH_PYTHON_API PythonCommHook : public CommHookInterface {
|
| 13 |
+
public:
|
| 14 |
+
// Takes a state and a callable hook. The inputs are Python objects.
|
| 15 |
+
// The state is passed to the hook in runHook method, and it can be used to
|
| 16 |
+
// maintain and update any state information during the execution of the hook.
|
| 17 |
+
// The hook performs user-specified processing and returns a future indicating
|
| 18 |
+
// asychronous communication of gradients.
|
| 19 |
+
PythonCommHook(py::object state, py::object hook)
|
| 20 |
+
: state_(std::move(state)), hook_(std::move(hook)) {}
|
| 21 |
+
|
| 22 |
+
~PythonCommHook() override;
|
| 23 |
+
|
| 24 |
+
c10::intrusive_ptr<c10::ivalue::Future> runHook(GradBucket& bucket) override;
|
| 25 |
+
|
| 26 |
+
at::Tensor parseHookResult(const c10::IValue& result) override;
|
| 27 |
+
|
| 28 |
+
private:
|
| 29 |
+
// Only needed for stateful communication.
|
| 30 |
+
py::object state_;
|
| 31 |
+
py::object hook_;
|
| 32 |
+
};
|
| 33 |
+
|
| 34 |
+
} // namespace c10d
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer.hpp
ADDED
|
@@ -0,0 +1,589 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/ScalarType.h>
|
| 4 |
+
#include <atomic>
|
| 5 |
+
#include <memory>
|
| 6 |
+
#include <mutex>
|
| 7 |
+
#include <tuple>
|
| 8 |
+
#include <unordered_map>
|
| 9 |
+
#include <vector>
|
| 10 |
+
|
| 11 |
+
#include <ATen/core/ivalue_inl.h>
|
| 12 |
+
#include <c10/macros/Macros.h>
|
| 13 |
+
#include <c10/util/intrusive_ptr.h>
|
| 14 |
+
#include <torch/csrc/autograd/function.h>
|
| 15 |
+
#include <torch/csrc/autograd/profiler.h>
|
| 16 |
+
#include <torch/csrc/autograd/variable.h>
|
| 17 |
+
#include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
|
| 18 |
+
#include <torch/csrc/distributed/c10d/Utils.hpp>
|
| 19 |
+
#include <torch/csrc/distributed/c10d/comm.hpp>
|
| 20 |
+
#include <torch/csrc/distributed/c10d/debug.h>
|
| 21 |
+
#include <torch/csrc/distributed/c10d/default_comm_hooks.hpp>
|
| 22 |
+
#include <torch/csrc/distributed/c10d/reducer_timer.hpp>
|
| 23 |
+
#ifndef _WIN32
|
| 24 |
+
#include <torch/csrc/distributed/autograd/context/context.h>
|
| 25 |
+
#endif
|
| 26 |
+
|
| 27 |
+
namespace c10d {
|
| 28 |
+
|
| 29 |
+
constexpr int kDefaultFirstBucketBytes = int(1024 * 1024);
|
| 30 |
+
constexpr int kDefaultBucketBytesCap = int(25 * 1024 * 1024);
|
| 31 |
+
// Collect runtime stats once for every kDDPRuntimeLoggingSampleRate iterations.
|
| 32 |
+
constexpr int kDDPRuntimeLoggingSampleRate = 100;
|
| 33 |
+
|
| 34 |
+
// Forward declaration
|
| 35 |
+
class Logger;
|
| 36 |
+
|
| 37 |
+
// Local accumulator type for a single bucket.
|
| 38 |
+
struct BucketAccumulator {
|
| 39 |
+
std::vector<size_t> indices;
|
| 40 |
+
size_t size = 0;
|
| 41 |
+
size_t size_limit = 0;
|
| 42 |
+
};
|
| 43 |
+
|
| 44 |
+
class TORCH_API Reducer {
|
| 45 |
+
public:
|
| 46 |
+
// The constructor takes a list of variables (i.e. parameters) for this
|
| 47 |
+
// process's single model replica (as DDP assumes single-process
|
| 48 |
+
// single-device). The bucket assignment for this reducer, `bucket_indices`,
|
| 49 |
+
// is specified as a list of buckets, each of which is specified as a list of
|
| 50 |
+
// indices into the bucket's `variables` list.
|
| 51 |
+
explicit Reducer(
|
| 52 |
+
std::vector<at::Tensor> params,
|
| 53 |
+
std::vector<std::vector<size_t>> bucket_indices,
|
| 54 |
+
std::vector<size_t> per_bucket_size_limits,
|
| 55 |
+
c10::intrusive_ptr<c10d::ProcessGroup> process_group,
|
| 56 |
+
std::vector<bool> expect_sparse_gradients,
|
| 57 |
+
int64_t bucket_bytes_cap,
|
| 58 |
+
bool find_unused_parameters,
|
| 59 |
+
bool gradient_as_bucket_view,
|
| 60 |
+
std::unordered_map<size_t, std::string> param_names,
|
| 61 |
+
int64_t first_bucket_bytes_cap);
|
| 62 |
+
|
| 63 |
+
~Reducer() noexcept(false);
|
| 64 |
+
|
| 65 |
+
// To (re-)initialize bucket assignment, pass a list of buckets, each of
|
| 66 |
+
// which is specified by a list of indices in the bucket's `variables` list.
|
| 67 |
+
// This function performs validation that the variables within a bucket
|
| 68 |
+
// all live on the same device and have the same dimensionality.
|
| 69 |
+
void initialize_buckets(std::vector<std::vector<size_t>> bucket_indices);
|
| 70 |
+
|
| 71 |
+
void autograd_hook(size_t index);
|
| 72 |
+
|
| 73 |
+
// This function is called when the forward function has produced an output,
|
| 74 |
+
// and the user wishes to reduce gradients in the backwards pass.
|
| 75 |
+
// If they don't, and wish to accumulate gradients before reducing them,
|
| 76 |
+
// a call to this function can simply be omitted.
|
| 77 |
+
void prepare_for_backward(const std::vector<at::Tensor>& outputs);
|
| 78 |
+
|
| 79 |
+
// Called at the beginning of forward() inside DistributedDataParallel,
|
| 80 |
+
// right now it captures the starting time of forward in each iteration.
|
| 81 |
+
void prepare_for_forward();
|
| 82 |
+
|
| 83 |
+
// Returns the relative time in nanoseconds when gradients were ready,
|
| 84 |
+
// with respect to the time `prepare_for_backward` was called. The
|
| 85 |
+
// vector is for parameters for a single model replica.
|
| 86 |
+
std::vector<int64_t> get_backward_stats() const {
|
| 87 |
+
return backward_stats_;
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
// Registers a hook to the reducer. The hook is `CommHookInterface`
|
| 91 |
+
// type to allow both Python and CPP hooks. This function can only
|
| 92 |
+
// be called once before calling backward.
|
| 93 |
+
// Cannot combine with the call of `register_builtin_comm_hook`.
|
| 94 |
+
void register_comm_hook(std::unique_ptr<CommHookInterface> iface);
|
| 95 |
+
|
| 96 |
+
// Registers a built-in C++ comm hook to the reducer. This function can only
|
| 97 |
+
// be called once before calling backward.
|
| 98 |
+
// Cannot combine with the call of `register_comm_hook`.
|
| 99 |
+
void register_builtin_comm_hook(c10d::BuiltinCommHookType comm_hook_type);
|
| 100 |
+
|
| 101 |
+
// Informs reducer that optimizer is running in backward, so gradients
|
| 102 |
+
// don't need to be copied from buckets as the optimizer would've already
|
| 103 |
+
// been applied.
|
| 104 |
+
void set_optimizer_in_backward() {
|
| 105 |
+
optim_in_backward_ = true;
|
| 106 |
+
};
|
| 107 |
+
|
| 108 |
+
// Runs allreduce or installed communication hook given GradBucket instance.
|
| 109 |
+
c10::intrusive_ptr<c10::ivalue::Future> run_comm_hook(
|
| 110 |
+
GradBucket& grad_bucket);
|
| 111 |
+
|
| 112 |
+
// Runs default allreduce hook.
|
| 113 |
+
c10::intrusive_ptr<c10::ivalue::Future> run_allreduce_hook(
|
| 114 |
+
GradBucket& grad_bucket);
|
| 115 |
+
|
| 116 |
+
// Returns gradient buckets in sequential order of buckets_. This is the order
|
| 117 |
+
// in which buckets are reduced across processes. If return_zero_tensors=true,
|
| 118 |
+
// will return zero tensors of the same shape instead of the true tensors.
|
| 119 |
+
std::vector<c10d::GradBucket> get_grad_buckets(
|
| 120 |
+
bool return_zero_tensors = true) const;
|
| 121 |
+
|
| 122 |
+
// Rebuild buckets based on rebuilt_params_ and rebuilt_param_indices_
|
| 123 |
+
// according to when tensors received grads in the backward pass.
|
| 124 |
+
// TODO this function makes broadcast communication call and
|
| 125 |
+
// could be overlapped with next forward() call, thus
|
| 126 |
+
// it could be async. Will make it async when rebuilding buckets for
|
| 127 |
+
// find_unused_parameters = true case, as we could rebuild buckets more than
|
| 128 |
+
// once for find_unused_parameters = true case, where subgraphs are trained
|
| 129 |
+
// and parameter indices order may change more frequently.
|
| 130 |
+
// For find_unused_parameters = false case, buckets are only rebuilt once,
|
| 131 |
+
// the performance cost is negligible. Returns true if the buckets were
|
| 132 |
+
// rebuilt.
|
| 133 |
+
bool rebuild_buckets();
|
| 134 |
+
|
| 135 |
+
void setSparseMetadata(std::map<std::string, at::Tensor>& metadata);
|
| 136 |
+
|
| 137 |
+
// Install futures that should be awaited at end of backwards. Currently these
|
| 138 |
+
// are only used by user-defined custom buffer reduction hooks, but can be
|
| 139 |
+
// generalized to any user-originating futures that need to be awaited.
|
| 140 |
+
void install_futures(c10::List<c10::intrusive_ptr<c10::ivalue::Future>> futs);
|
| 141 |
+
|
| 142 |
+
// Returns true if we should rebuild buckets, else false. We only rebuild
|
| 143 |
+
// buckets once after the first iteration and never rebuild them if
|
| 144 |
+
// find_unused_parameters_.
|
| 145 |
+
inline bool should_rebuild_buckets() const {
|
| 146 |
+
return (static_graph_ || !find_unused_parameters_) && !has_rebuilt_bucket_;
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
// Pushes all parameters to be rebuilt.
|
| 150 |
+
void push_rebuilt_params_for_all_indices();
|
| 151 |
+
|
| 152 |
+
// Creates and sets ForwardPassWorkHandle given a Work and the
|
| 153 |
+
// corresponding tensor being reduced.
|
| 154 |
+
void set_forward_pass_work_handle(
|
| 155 |
+
c10::intrusive_ptr<c10d::Work> forwardPassWorkHandle,
|
| 156 |
+
bool useStaticWorldSize);
|
| 157 |
+
|
| 158 |
+
// Retrieve on-device tensors used to track locally unused parameters. It is
|
| 159 |
+
// a tensor where index i = 1 if the Variable with that index has been used.
|
| 160 |
+
at::Tensor get_local_used_map_on_device() const;
|
| 161 |
+
|
| 162 |
+
// An function for users to set sample_rate of collecting
|
| 163 |
+
// runtime stats. The time stats will be recorded for the
|
| 164 |
+
// first 10 iterations, after 10 iterations time stats will be
|
| 165 |
+
// recorded once every "sample_rate" training iterations.
|
| 166 |
+
void set_ddp_runtime_logging_sample_rate(int sample_rate);
|
| 167 |
+
|
| 168 |
+
// Specify the training graph is static.
|
| 169 |
+
void set_static_graph();
|
| 170 |
+
|
| 171 |
+
// Delay all reduce to be after all gradients' calculation is complete.
|
| 172 |
+
void delay_all_reduce();
|
| 173 |
+
|
| 174 |
+
void set_mixed_precision_param_dtype(c10::ScalarType dtype);
|
| 175 |
+
|
| 176 |
+
// Weak reference to associated DDP logger. The reference is weak to avoid
|
| 177 |
+
// refcycle between reducer and logger.
|
| 178 |
+
void set_logger(std::weak_ptr<c10d::Logger> logger);
|
| 179 |
+
|
| 180 |
+
// When graph is not explicitly set by user as static and has unused
|
| 181 |
+
// parameters, this will return whether the graph has been static until the
|
| 182 |
+
// current iteration, which means unused params set has not changed.
|
| 183 |
+
bool ddp_graph_static();
|
| 184 |
+
|
| 185 |
+
// Removes autograd hooks registered by the Reducer on the model parameters.
|
| 186 |
+
void remove_autograd_hooks();
|
| 187 |
+
|
| 188 |
+
// Checks whether or not the reducer has finalized the current backward
|
| 189 |
+
// iteration.
|
| 190 |
+
void check_finalized();
|
| 191 |
+
|
| 192 |
+
// Updates the underlying process group used by DDP with the new process
|
| 193 |
+
// group.
|
| 194 |
+
void update_process_group(
|
| 195 |
+
c10::intrusive_ptr<c10d::ProcessGroup> new_process_group);
|
| 196 |
+
|
| 197 |
+
// Resets reducer state.
|
| 198 |
+
void reset_state();
|
| 199 |
+
|
| 200 |
+
protected:
|
| 201 |
+
// Forward declaration.
|
| 202 |
+
struct Bucket;
|
| 203 |
+
|
| 204 |
+
void push_rebuilt_params(const size_t& index);
|
| 205 |
+
|
| 206 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 207 |
+
mutable std::mutex mutex_;
|
| 208 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 209 |
+
const std::vector<at::Tensor> params_;
|
| 210 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 211 |
+
c10::intrusive_ptr<::c10d::ProcessGroup> process_group_;
|
| 212 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 213 |
+
std::vector<bool> expect_sparse_gradients_;
|
| 214 |
+
|
| 215 |
+
std::vector<std::shared_ptr<torch::autograd::Node>>
|
| 216 |
+
grad_accumulators_; // NOLINT(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 217 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 218 |
+
std::unordered_map<torch::autograd::Node*, size_t> gradAccToVariableMap_;
|
| 219 |
+
std::vector<std::pair<uintptr_t, std::shared_ptr<torch::autograd::Node>>>
|
| 220 |
+
hooks_; // NOLINT(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 221 |
+
|
| 222 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 223 |
+
bool expect_autograd_hooks_;
|
| 224 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 225 |
+
bool require_finalize_;
|
| 226 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 227 |
+
size_t next_bucket_;
|
| 228 |
+
|
| 229 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 230 |
+
bool has_marked_unused_parameters_;
|
| 231 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 232 |
+
const bool find_unused_parameters_;
|
| 233 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 234 |
+
const bool gradient_as_bucket_view_;
|
| 235 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 236 |
+
std::vector<size_t> unused_parameters_;
|
| 237 |
+
// Previous iteration's unused params, used for checking if unused parameters
|
| 238 |
+
// change between iterations. Only filled during the first backwards call.
|
| 239 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 240 |
+
std::vector<size_t> prev_iteration_unused_parameters_;
|
| 241 |
+
// Whether graph is static or not. When user does not explicitly set static
|
| 242 |
+
// graph, the only possible dynamism is set of unused parameters changing
|
| 243 |
+
// between iterations which is tracked by this flag.
|
| 244 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 245 |
+
bool ddp_graph_static_{true};
|
| 246 |
+
// Locally used parameter maps indicating if parameters are used locally
|
| 247 |
+
// during the current iteration or no_sync session if no_sync is on.
|
| 248 |
+
// Each map is a one-dim int32 tensor of number of parameters. These tensors
|
| 249 |
+
// are marked in autograd_hook to indicate the corresponding param has been
|
| 250 |
+
// used, and get allreduced in the end of backward step of current iteration
|
| 251 |
+
// or no_sync session for figuring out the globally unused parameters.
|
| 252 |
+
//
|
| 253 |
+
// local_used_map_: CPU tensor for bookkeeping locally used params
|
| 254 |
+
// local_used_map_dev_: dev tensor for reducing globally unused params
|
| 255 |
+
at::Tensor local_used_map_;
|
| 256 |
+
at::Tensor local_used_map_dev_;
|
| 257 |
+
// Indicate that reduction is done and D2H copy is done as well.
|
| 258 |
+
bool local_used_map_reduced_;
|
| 259 |
+
|
| 260 |
+
// Weak pointer to associated DDP logger.
|
| 261 |
+
std::weak_ptr<c10d::Logger> logger_;
|
| 262 |
+
// List of futures installed by Reducer::install_futures that should be
|
| 263 |
+
// awaited at the end of backwards pass.
|
| 264 |
+
c10::optional<c10::List<c10::intrusive_ptr<c10::ivalue::Future>>>
|
| 265 |
+
installed_futures_{c10::nullopt};
|
| 266 |
+
// Mixed precision parameter dtype for bucket type checking.
|
| 267 |
+
c10::optional<c10::ScalarType> mixed_precision_param_dtype_{c10::nullopt};
|
| 268 |
+
|
| 269 |
+
// Work handle for allreduce on local_used_map_
|
| 270 |
+
c10::intrusive_ptr<c10d::Work> local_used_work_;
|
| 271 |
+
|
| 272 |
+
void mark_variable_ready_dense(size_t variable_index);
|
| 273 |
+
|
| 274 |
+
void mark_variable_ready_sparse(size_t variable_index);
|
| 275 |
+
|
| 276 |
+
void mark_variable_ready(size_t variable_index);
|
| 277 |
+
|
| 278 |
+
void mark_bucket_ready(size_t bucket_index);
|
| 279 |
+
|
| 280 |
+
void finalize_bucket_dense(Bucket& bucket);
|
| 281 |
+
|
| 282 |
+
void finalize_backward();
|
| 283 |
+
|
| 284 |
+
// Returns list of model parameters corresponding to the given bucket.
|
| 285 |
+
// bucket_index is a key to cache after buckets are rebuilt, after which this
|
| 286 |
+
// mapping never changes.
|
| 287 |
+
std::vector<at::Tensor> get_variables_for_bucket(
|
| 288 |
+
size_t bucket_index,
|
| 289 |
+
const Bucket& bucket) const;
|
| 290 |
+
|
| 291 |
+
// Asserts that the reduction for the previous iteration has finished before
|
| 292 |
+
// rebuilding buckets or kicking off the next one.
|
| 293 |
+
void ensure_prior_reduction_finished();
|
| 294 |
+
|
| 295 |
+
// Broadcast rebuilt buckets from rank 0 to other ranks before initializing
|
| 296 |
+
// the buckets
|
| 297 |
+
void sync_bucket_indices(std::vector<std::vector<size_t>>& bucket_indices);
|
| 298 |
+
|
| 299 |
+
// We'd like to use DistAutogradContext::GradCallback here but dist autograd
|
| 300 |
+
// doesn't exist under Windows. So we just directly use the concrete type but
|
| 301 |
+
// to preserve and enforce our original intent we do a static assert when dist
|
| 302 |
+
// autograd is available.
|
| 303 |
+
using GradCallback = std::function<bool(at::Tensor&)>;
|
| 304 |
+
#ifndef _WIN32
|
| 305 |
+
static_assert(
|
| 306 |
+
std::is_same<
|
| 307 |
+
GradCallback,
|
| 308 |
+
torch::distributed::autograd::DistAutogradContext::GradCallback>::
|
| 309 |
+
value,
|
| 310 |
+
"");
|
| 311 |
+
#endif
|
| 312 |
+
void runGradCallbackForVariable(at::Tensor& variable, GradCallback&& cb);
|
| 313 |
+
|
| 314 |
+
// This function is called inside `initialize_buckets()`. It initializes both
|
| 315 |
+
// `bucket_views_in` and `bucket_views_out` with views for each variable's
|
| 316 |
+
// gradient into the bucket's flattened `gradients` tensor. Views serve as
|
| 317 |
+
// entry points to `copy_()` each grad's data in/out of the flattened
|
| 318 |
+
// `gradients` tensor.
|
| 319 |
+
void initialize_bucket_views(Bucket& bucket);
|
| 320 |
+
|
| 321 |
+
// This function is called inside `finalize_backward`, it happens only if
|
| 322 |
+
// DDP communication hook was registered to recreate just bucket_views_out
|
| 323 |
+
// with the result of `future_work`.
|
| 324 |
+
void populate_bucket_views_out(Bucket& bucket, at::Tensor& tensor);
|
| 325 |
+
|
| 326 |
+
// If gradient_as_bucket_view_ is false, after allreduce buckets,
|
| 327 |
+
// copy bucket results back to grads.
|
| 328 |
+
void copy_bucket_to_grad(
|
| 329 |
+
at::Tensor& variable,
|
| 330 |
+
Reducer::Bucket& bucket,
|
| 331 |
+
size_t intra_bucket_index,
|
| 332 |
+
bool global_unused);
|
| 333 |
+
// Check layout of grad and bucket_view before copying the grad to bucket.
|
| 334 |
+
void check_grad_layout(const at::Tensor& grad, const at::Tensor& bucket_view);
|
| 335 |
+
|
| 336 |
+
// A bucket contains [1..N] gradients to be reduced, where the gradients
|
| 337 |
+
// have the same dtype and device.
|
| 338 |
+
// Coalescing gradients together before reducing can result in lower overhead
|
| 339 |
+
// and/or faster time to completion. Coalescing requires the constituent
|
| 340 |
+
// gradients to have the same dtype and device, and the resulting flattened
|
| 341 |
+
// tensor uses that common dtype and device. The flattened tensor is filled
|
| 342 |
+
// as the corresponding gradients are computed (triggered by autograd hooks),
|
| 343 |
+
// and the buckets are reduced in a predetermined order consistent across
|
| 344 |
+
// processes.
|
| 345 |
+
struct Bucket {
|
| 346 |
+
// Gradients of the bucket flattened into a 1-dimensional tensor
|
| 347 |
+
at::Tensor gradients;
|
| 348 |
+
|
| 349 |
+
// Views into the `gradients` tensor for each individual gradient
|
| 350 |
+
// Each view is created with layout (size and stride) matching the
|
| 351 |
+
// gradient's expected layout (see the "Gradient Layout Contract" in
|
| 352 |
+
// torch/csrc/autograd/functions/accumulate_grad.h).
|
| 353 |
+
// `bucket_views_in[i].copy_(grad)` and `grad.copy_(bucket_views_out[i])`
|
| 354 |
+
// provide convenient ways to copy gradient data in/out of `gradients`,
|
| 355 |
+
// respectively.
|
| 356 |
+
// We keep both `bucket_views_in` and `bucket_views_out` because
|
| 357 |
+
// registering a DDP communication hook may re-initialize
|
| 358 |
+
// `bucket_views_out` with the value of the hook's `future_work` but we
|
| 359 |
+
// still need separate views into the bucket's original flattened gradient
|
| 360 |
+
// to copy in gradient data.
|
| 361 |
+
std::vector<at::Tensor> bucket_views_in;
|
| 362 |
+
std::vector<at::Tensor> bucket_views_out;
|
| 363 |
+
|
| 364 |
+
// Variables whose gradients are held in this bucket
|
| 365 |
+
// We use refcounted tensors here so that we can easily unflatten the
|
| 366 |
+
// bucket's flattened `gradients` tensor into the participating variables
|
| 367 |
+
// after reduction has completed.
|
| 368 |
+
std::vector<at::Tensor> variables;
|
| 369 |
+
|
| 370 |
+
// Per-variable offset/length into the flattened `gradients` tensor and
|
| 371 |
+
// the corresponding `GradBucket` instance for communication hooks
|
| 372 |
+
std::vector<size_t> offsets;
|
| 373 |
+
std::vector<size_t> lengths;
|
| 374 |
+
|
| 375 |
+
// Per-variable sizes slicing into the bucket's `gradients` tensor
|
| 376 |
+
std::vector<c10::IntArrayRef> sizes_vec;
|
| 377 |
+
|
| 378 |
+
// Number of gradients left to be computed before the bucket is ready to
|
| 379 |
+
// be reduced
|
| 380 |
+
size_t pending;
|
| 381 |
+
|
| 382 |
+
// Global indices of participating variables in the bucket
|
| 383 |
+
std::vector<size_t> variable_indices;
|
| 384 |
+
|
| 385 |
+
// Future work handle for DDP communication hook
|
| 386 |
+
// If no hook is registered, a temporary vanilla allreduce hook is used.
|
| 387 |
+
c10::intrusive_ptr<at::ivalue::Future> future_work;
|
| 388 |
+
|
| 389 |
+
// If this bucket should expect a single sparse gradient
|
| 390 |
+
// If `true`, then this implies that `bucket.variables.size() == 1`.
|
| 391 |
+
bool expect_sparse_gradient = false;
|
| 392 |
+
|
| 393 |
+
// Sparse indices tensor
|
| 394 |
+
c10::optional<at::Tensor> sparse_tensor_indices = c10::nullopt;
|
| 395 |
+
|
| 396 |
+
// TODO(@pietern)
|
| 397 |
+
// Memory copies from gradient tensors into the bucket are potentially
|
| 398 |
+
// done on different CUDA streams. We record an event for every copy
|
| 399 |
+
// so that we can synchronize with them prior to kicking off the reduction.
|
| 400 |
+
// std::vector<at::cuda::CUDAEvent> events;
|
| 401 |
+
};
|
| 402 |
+
|
| 403 |
+
std::vector<Bucket> buckets_;
|
| 404 |
+
|
| 405 |
+
// A variable locator locates a particular variable in the reducer's buckets
|
| 406 |
+
struct VariableLocator {
|
| 407 |
+
// Index of the bucket containing the variable in the `buckets_` vector
|
| 408 |
+
size_t bucket_index;
|
| 409 |
+
// Index of the variable in the bucket, which may be used consistently
|
| 410 |
+
// across `bucket_views_in`, `bucket_views_out`, `variables`, `offsets`,
|
| 411 |
+
// `lengths`, `sizes_vec`, and `variable_indices` in `Bucket`
|
| 412 |
+
size_t intra_bucket_index;
|
| 413 |
+
|
| 414 |
+
VariableLocator() = default;
|
| 415 |
+
|
| 416 |
+
VariableLocator(size_t bucket_index_, size_t intra_bucket_index_)
|
| 417 |
+
: bucket_index(bucket_index_),
|
| 418 |
+
intra_bucket_index(intra_bucket_index_) {}
|
| 419 |
+
};
|
| 420 |
+
|
| 421 |
+
// Map the index of a variable to its location in the bucket structure.
|
| 422 |
+
std::vector<VariableLocator> variable_locators_;
|
| 423 |
+
|
| 424 |
+
// track the number of iterations to synchronize grads in training so far.
|
| 425 |
+
long num_iterations_;
|
| 426 |
+
// track distinct iteration of backward call. This is distinct from
|
| 427 |
+
// num_iterations_, for example in the case of multiple forward before
|
| 428 |
+
// backward.
|
| 429 |
+
long num_bwd_calls_;
|
| 430 |
+
// whether the first autograd hook for a distinct backward pass has been
|
| 431 |
+
// called.
|
| 432 |
+
bool first_autograd_hook_called_;
|
| 433 |
+
// track the number of buckets that have been ready for
|
| 434 |
+
// communication calls like allReduce or communication hooks.
|
| 435 |
+
int num_buckets_ready_;
|
| 436 |
+
|
| 437 |
+
// Timing information.
|
| 438 |
+
int64_t backward_compute_start_time_ = -1;
|
| 439 |
+
std::unique_ptr<Timer> timer_;
|
| 440 |
+
|
| 441 |
+
// We collect the relative timestamp of every gradient being ready
|
| 442 |
+
// when executing autograd. This can be used to derive a timeline of
|
| 443 |
+
// the point in time buckets were ready, or ideal bucket assignment/ordering.
|
| 444 |
+
std::vector<int64_t> backward_stats_;
|
| 445 |
+
|
| 446 |
+
bool should_collect_runtime_stats();
|
| 447 |
+
void record_forward_compute_start_time();
|
| 448 |
+
void record_backward_compute_start_time();
|
| 449 |
+
void record_backward_compute_end_time();
|
| 450 |
+
void record_backward_comm_start_time();
|
| 451 |
+
void record_backward_comm_end_time();
|
| 452 |
+
|
| 453 |
+
int get_ddp_runtime_logging_sample_rate();
|
| 454 |
+
int ddp_runtime_logging_sample_rate_ = kDDPRuntimeLoggingSampleRate;
|
| 455 |
+
|
| 456 |
+
bool is_multi_device_module_ = false;
|
| 457 |
+
|
| 458 |
+
// Following variables are to help build dynamic bucket order
|
| 459 |
+
bool has_rebuilt_bucket_;
|
| 460 |
+
std::vector<at::Tensor> rebuilt_params_;
|
| 461 |
+
std::vector<int64_t> rebuilt_param_indices_;
|
| 462 |
+
const int64_t bucket_bytes_cap_;
|
| 463 |
+
|
| 464 |
+
#ifndef _WIN32
|
| 465 |
+
struct RpcContext {
|
| 466 |
+
using ContextPtr = torch::distributed::autograd::ContextPtr;
|
| 467 |
+
// The shared_ptr is to hold the context instance.
|
| 468 |
+
ContextPtr context_ptr_holder;
|
| 469 |
+
std::atomic<ContextPtr::element_type*> context_ptr{nullptr};
|
| 470 |
+
|
| 471 |
+
void set(ContextPtr&& new_context_ptr);
|
| 472 |
+
};
|
| 473 |
+
RpcContext rpc_context_;
|
| 474 |
+
#endif
|
| 475 |
+
|
| 476 |
+
// A struct containing work handle and tensor for allreduce scheduled in
|
| 477 |
+
// forward pass, if applicable.
|
| 478 |
+
struct ForwardPassAllreduceWork {
|
| 479 |
+
c10::intrusive_ptr<c10d::Work> workHandle;
|
| 480 |
+
at::Tensor resultTensor;
|
| 481 |
+
// whether we should divide by the initial world_size or the no. of
|
| 482 |
+
// remaining DDP ranks.
|
| 483 |
+
bool useStaticWorldSize;
|
| 484 |
+
};
|
| 485 |
+
|
| 486 |
+
// Handle for the currently scheduled allreduce in the forward pass, if
|
| 487 |
+
// applicable.
|
| 488 |
+
ForwardPassAllreduceWork forwardPassWorkHandle_;
|
| 489 |
+
|
| 490 |
+
// Division factor for reduction of gradients.
|
| 491 |
+
// Equal to the process group size, with an exception of handling uneven
|
| 492 |
+
// input.
|
| 493 |
+
int div_factor_;
|
| 494 |
+
|
| 495 |
+
bool static_graph_;
|
| 496 |
+
|
| 497 |
+
// Key: size_t (index), Value: the number of times that a variable's
|
| 498 |
+
// autograd_hook() should be triggered before marking this variable's grad as
|
| 499 |
+
// ready for communication. Map will not change after 1st iteration.
|
| 500 |
+
std::unordered_map<size_t, int> numGradHooksTriggeredMap_;
|
| 501 |
+
// Key: size_t (index), Value: the number of times that a variable's
|
| 502 |
+
// autograd_hook() are left to be triggered before marking this variable's
|
| 503 |
+
// grad as ready for communication. Map will change after 1st iteration to
|
| 504 |
+
// track a grad is ready for communication or not.
|
| 505 |
+
std::unordered_map<size_t, int> numGradHooksTriggeredMapPerIteration_;
|
| 506 |
+
|
| 507 |
+
private:
|
| 508 |
+
// reset counting for buckets before backward starts
|
| 509 |
+
void reset_bucket_counting();
|
| 510 |
+
// search unused parameters beore backward starts
|
| 511 |
+
void search_unused_parameters(
|
| 512 |
+
const std::vector<torch::autograd::Variable>& outputs);
|
| 513 |
+
void set_divide_factor();
|
| 514 |
+
// kick off all reduce for the ready bucket
|
| 515 |
+
void all_reduce_bucket(Bucket& bucket);
|
| 516 |
+
// kick off all reduce to local used map, it can help find global unused
|
| 517 |
+
// parameters
|
| 518 |
+
void all_reduce_local_used_map();
|
| 519 |
+
// initialize locally used parameter maps
|
| 520 |
+
void initialize_local_used_map();
|
| 521 |
+
// get current cuda stream
|
| 522 |
+
const c10::Stream get_current_stream();
|
| 523 |
+
bool dynamic_graph_find_unused();
|
| 524 |
+
bool static_graph_first_iteration();
|
| 525 |
+
bool static_graph_after_first_iteration();
|
| 526 |
+
|
| 527 |
+
// comm_hook_ is used to access the DDP communication hook if registered.
|
| 528 |
+
std::unique_ptr<CommHookInterface> comm_hook_;
|
| 529 |
+
|
| 530 |
+
// Sparse metadata contains the indices that will be used
|
| 531 |
+
// when calling into sparse allreduce.
|
| 532 |
+
// This is only used in the sparse allreduce collective calls
|
| 533 |
+
std::unique_ptr<std::map<std::string, at::Tensor>> sparse_metadata_;
|
| 534 |
+
|
| 535 |
+
// Debug level setting. It is parsed once when Reducer is constructed, and
|
| 536 |
+
// remains the same across a single invocation of DDP training.
|
| 537 |
+
DebugLevel ddp_debug_level_;
|
| 538 |
+
// Mapping of variable index to fully qualified name of model to notify users
|
| 539 |
+
// about errors when certain parameters do not get gradient.
|
| 540 |
+
std::unordered_map<size_t, std::string> param_names_;
|
| 541 |
+
// Variable indices stored sequentially in order of when the gradient is ready
|
| 542 |
+
// for the current backwards pass.
|
| 543 |
+
std::vector<int> grad_ready_order_indices_;
|
| 544 |
+
// Bytes capacity of first bucket, can be configured by user
|
| 545 |
+
int64_t first_bucket_bytes_cap_;
|
| 546 |
+
// Per iteration set of parameter indices that have been marked ready.
|
| 547 |
+
std::unordered_set<size_t> perIterationReadyParams_;
|
| 548 |
+
// Retrieves parameter names that have not been marked as ready as part of
|
| 549 |
+
// previous iteration.
|
| 550 |
+
std::vector<std::string> getUnmarkedParamsForIteration();
|
| 551 |
+
// Retrieves parameter indices that have not been marked as ready as part of
|
| 552 |
+
// previous iteration.
|
| 553 |
+
std::vector<size_t> getUnmarkedParamIndicesForIteration();
|
| 554 |
+
// Raises appropriate error if mark_variable_ready is called on the same
|
| 555 |
+
// variable twice, which is unexpected.
|
| 556 |
+
void checkAndRaiseMarkedTwiceError(size_t curVariableIndex);
|
| 557 |
+
// Retrieves parameter corresponding to the given VariableIndex.
|
| 558 |
+
at::Tensor& get_param_from_index(size_t index);
|
| 559 |
+
|
| 560 |
+
// Cached bucket index to model parameter mapping. Populated after buckets
|
| 561 |
+
// are rebuilt after which this mapping is static.
|
| 562 |
+
mutable std::unordered_map<size_t, std::vector<at::Tensor>>
|
| 563 |
+
cached_variables_for_bucket_;
|
| 564 |
+
|
| 565 |
+
bool optim_in_backward_{false};
|
| 566 |
+
friend class Logger;
|
| 567 |
+
};
|
| 568 |
+
|
| 569 |
+
// This is equivalent to take_tensors but returns indices into the
|
| 570 |
+
// tensor list argument for bucket assignment. Also, it is aware
|
| 571 |
+
// of device placement and will not allow buckets to span devices.
|
| 572 |
+
// The index of tensors[i] assigned to bucket is tensor_indices[i],
|
| 573 |
+
// when tensor_indices is empty, the index of tensors[i] assigned to
|
| 574 |
+
// bucket is i.
|
| 575 |
+
TORCH_API std::tuple<std::vector<std::vector<size_t>>, std::vector<size_t>>
|
| 576 |
+
compute_bucket_assignment_by_size(
|
| 577 |
+
const std::vector<at::Tensor>& tensors,
|
| 578 |
+
const std::vector<size_t>& bucket_size,
|
| 579 |
+
const std::vector<bool>& expect_sparse_gradient = {},
|
| 580 |
+
const std::vector<int64_t>& tensor_indices = {},
|
| 581 |
+
const c10::optional<std::weak_ptr<c10d::Logger>>& logger = {});
|
| 582 |
+
|
| 583 |
+
// Verify models across all processes are the same as model on rank 0 with
|
| 584 |
+
// respect to no. of params and matching dtype/size/layout.
|
| 585 |
+
TORCH_API void verify_params_across_processes(
|
| 586 |
+
const c10::intrusive_ptr<c10d::ProcessGroup>& process_group,
|
| 587 |
+
const std::vector<at::Tensor>& params,
|
| 588 |
+
const c10::optional<std::weak_ptr<c10d::Logger>>& logger);
|
| 589 |
+
} // namespace c10d
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer_timer.hpp
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <c10/util/ApproximateClock.h>
|
| 3 |
+
#include <torch/csrc/autograd/profiler.h>
|
| 4 |
+
|
| 5 |
+
namespace c10d {
|
| 6 |
+
constexpr int kUnsetTime = -1;
|
| 7 |
+
|
| 8 |
+
inline int64_t current_time_in_nanos() {
|
| 9 |
+
return c10::getTime();
|
| 10 |
+
}
|
| 11 |
+
|
| 12 |
+
class TORCH_API Timer {
|
| 13 |
+
private:
|
| 14 |
+
// The timestamp of forward call start time in each iteration.
|
| 15 |
+
int64_t forward_start_time = kUnsetTime;
|
| 16 |
+
// The timestamp of backward computation start and end time in each
|
| 17 |
+
// iteration.
|
| 18 |
+
int64_t backward_compute_start_time = kUnsetTime;
|
| 19 |
+
int64_t backward_compute_end_time = kUnsetTime;
|
| 20 |
+
// The timestamp of first communication call start time in each iteration.
|
| 21 |
+
int64_t backward_comm_start_time = kUnsetTime;
|
| 22 |
+
// The timestamp of last communication call end time in each iteration.
|
| 23 |
+
int64_t backward_comm_end_time = kUnsetTime;
|
| 24 |
+
|
| 25 |
+
public:
|
| 26 |
+
enum class Event {
|
| 27 |
+
kForwardStart,
|
| 28 |
+
kBackwardComputeStart,
|
| 29 |
+
kBackwardComputeEnd,
|
| 30 |
+
kBackwardCommStart,
|
| 31 |
+
kBackwardCommEnd,
|
| 32 |
+
};
|
| 33 |
+
|
| 34 |
+
// Record the current event, i.e., mark it as having occurred now. Default
|
| 35 |
+
// CPU implementation.
|
| 36 |
+
virtual void record(Event event) {
|
| 37 |
+
getTimeRef(event) = current_time_in_nanos();
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
// Return the difference between when two events occurred, in nanoseconds.
|
| 41 |
+
// Or nullopt if one of them hasn't been recorded.
|
| 42 |
+
virtual c10::optional<int64_t> measureDifference(Event start, Event end) = 0;
|
| 43 |
+
|
| 44 |
+
virtual ~Timer() = default;
|
| 45 |
+
|
| 46 |
+
// Return host-side timestamp, or nullopt if it has not yet been recorded.
|
| 47 |
+
c10::optional<int64_t> getTimestamp(Event event) {
|
| 48 |
+
auto time = getTimeRef(event);
|
| 49 |
+
if (time == kUnsetTime) {
|
| 50 |
+
return c10::nullopt;
|
| 51 |
+
} else {
|
| 52 |
+
return time;
|
| 53 |
+
}
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
// Return host-side time member variable corresponding to the given event.
|
| 57 |
+
int64_t& getTimeRef(Event event) {
|
| 58 |
+
switch (event) {
|
| 59 |
+
case Event::kForwardStart:
|
| 60 |
+
return forward_start_time;
|
| 61 |
+
case Event::kBackwardComputeStart:
|
| 62 |
+
return backward_compute_start_time;
|
| 63 |
+
case Event::kBackwardComputeEnd:
|
| 64 |
+
return backward_compute_end_time;
|
| 65 |
+
case Event::kBackwardCommStart:
|
| 66 |
+
return backward_comm_start_time;
|
| 67 |
+
case Event::kBackwardCommEnd:
|
| 68 |
+
return backward_comm_end_time;
|
| 69 |
+
default:
|
| 70 |
+
TORCH_INTERNAL_ASSERT(false);
|
| 71 |
+
}
|
| 72 |
+
}
|
| 73 |
+
};
|
| 74 |
+
|
| 75 |
+
TORCH_DECLARE_TYPED_REGISTRY(
|
| 76 |
+
TimerRegistry,
|
| 77 |
+
c10::DeviceType,
|
| 78 |
+
Timer,
|
| 79 |
+
std::unique_ptr,
|
| 80 |
+
c10::Device);
|
| 81 |
+
} // namespace c10d
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/sequence_num.hpp
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/macros/Macros.h>
|
| 4 |
+
#include <c10/util/Optional.h>
|
| 5 |
+
#include <c10/util/irange.h>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
namespace c10d {
|
| 9 |
+
const int kUnsetSeqNum = 0;
|
| 10 |
+
|
| 11 |
+
namespace {
|
| 12 |
+
constexpr int kByteOffset = 8;
|
| 13 |
+
}
|
| 14 |
+
|
| 15 |
+
// Converts from int to char vec to write in store
|
| 16 |
+
template <typename T>
|
| 17 |
+
inline std::vector<T> toVec(uint64_t num, int numBytes) {
|
| 18 |
+
std::vector<T> values;
|
| 19 |
+
// Read off bytes from right to left, pushing them into
|
| 20 |
+
// char array.
|
| 21 |
+
for (const auto i : c10::irange(numBytes)) {
|
| 22 |
+
uint8_t x = (num >> (kByteOffset * i)) & 0xff;
|
| 23 |
+
values.push_back(static_cast<T>(x));
|
| 24 |
+
}
|
| 25 |
+
return values;
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
// Converts from char vec (such as from store read) to int.
|
| 29 |
+
template <typename T>
|
| 30 |
+
inline uint64_t fromVec(const std::vector<T>& values) {
|
| 31 |
+
uint64_t num = 0;
|
| 32 |
+
// Set each byte at the correct location on num
|
| 33 |
+
for (const auto i : c10::irange(values.size())) {
|
| 34 |
+
uint8_t x = static_cast<uint8_t>(values[i]);
|
| 35 |
+
num |= (static_cast<int64_t>(x) << (kByteOffset * i));
|
| 36 |
+
}
|
| 37 |
+
return num;
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
class TORCH_API SequenceNum {
|
| 41 |
+
public:
|
| 42 |
+
SequenceNum();
|
| 43 |
+
explicit SequenceNum(const uint64_t num);
|
| 44 |
+
// Retrieve num_. Will throw if not set.
|
| 45 |
+
uint64_t get() const;
|
| 46 |
+
// Increment num_. Will throw if not set.
|
| 47 |
+
void increment();
|
| 48 |
+
// Increment num_ and return the old value. Will throw if not set.
|
| 49 |
+
uint64_t getAndIncrement();
|
| 50 |
+
// Sets num_
|
| 51 |
+
void set(const uint64_t num);
|
| 52 |
+
// Returns true if this SequenceNum is properly initialized with a value, else
|
| 53 |
+
// false.
|
| 54 |
+
bool isSet() const;
|
| 55 |
+
|
| 56 |
+
SequenceNum& operator=(const SequenceNum& other);
|
| 57 |
+
|
| 58 |
+
SequenceNum(const SequenceNum& other);
|
| 59 |
+
|
| 60 |
+
private:
|
| 61 |
+
c10::optional<uint64_t> num_;
|
| 62 |
+
mutable std::mutex lock_;
|
| 63 |
+
};
|
| 64 |
+
|
| 65 |
+
} // namespace c10d
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/socket.h
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Meta Platforms, Inc. and its affiliates.
|
| 2 |
+
// All rights reserved.
|
| 3 |
+
//
|
| 4 |
+
// This source code is licensed under the BSD-style license found in the
|
| 5 |
+
// LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
#pragma once
|
| 8 |
+
|
| 9 |
+
#include <chrono>
|
| 10 |
+
#include <cstdint>
|
| 11 |
+
#include <memory>
|
| 12 |
+
#include <string>
|
| 13 |
+
|
| 14 |
+
#include <c10/macros/Macros.h>
|
| 15 |
+
#include <c10/util/Exception.h>
|
| 16 |
+
#include <torch/csrc/distributed/c10d/exception.h>
|
| 17 |
+
|
| 18 |
+
namespace c10d {
|
| 19 |
+
namespace detail {
|
| 20 |
+
|
| 21 |
+
class SocketOptions {
|
| 22 |
+
public:
|
| 23 |
+
SocketOptions& prefer_ipv6(bool value) noexcept {
|
| 24 |
+
prefer_ipv6_ = value;
|
| 25 |
+
|
| 26 |
+
return *this;
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
bool prefer_ipv6() const noexcept {
|
| 30 |
+
return prefer_ipv6_;
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
SocketOptions& connect_timeout(std::chrono::seconds value) noexcept {
|
| 34 |
+
connect_timeout_ = value;
|
| 35 |
+
|
| 36 |
+
return *this;
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
std::chrono::seconds connect_timeout() const noexcept {
|
| 40 |
+
return connect_timeout_;
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
private:
|
| 44 |
+
bool prefer_ipv6_ = true;
|
| 45 |
+
std::chrono::seconds connect_timeout_{30};
|
| 46 |
+
};
|
| 47 |
+
|
| 48 |
+
class SocketImpl;
|
| 49 |
+
|
| 50 |
+
class Socket {
|
| 51 |
+
public:
|
| 52 |
+
// This function initializes the underlying socket library and must be called
|
| 53 |
+
// before any other socket function.
|
| 54 |
+
static void initialize();
|
| 55 |
+
|
| 56 |
+
static Socket listen(std::uint16_t port, const SocketOptions& opts = {});
|
| 57 |
+
|
| 58 |
+
static Socket listenFromFd(int fd, std::uint16_t expected_port);
|
| 59 |
+
|
| 60 |
+
static Socket connect(
|
| 61 |
+
const std::string& host,
|
| 62 |
+
std::uint16_t port,
|
| 63 |
+
const SocketOptions& opts = {});
|
| 64 |
+
|
| 65 |
+
Socket() noexcept = default;
|
| 66 |
+
|
| 67 |
+
Socket(const Socket& other) = delete;
|
| 68 |
+
|
| 69 |
+
Socket& operator=(const Socket& other) = delete;
|
| 70 |
+
|
| 71 |
+
Socket(Socket&& other) noexcept;
|
| 72 |
+
|
| 73 |
+
Socket& operator=(Socket&& other) noexcept;
|
| 74 |
+
|
| 75 |
+
~Socket();
|
| 76 |
+
|
| 77 |
+
Socket accept() const;
|
| 78 |
+
|
| 79 |
+
int handle() const noexcept;
|
| 80 |
+
|
| 81 |
+
std::uint16_t port() const;
|
| 82 |
+
|
| 83 |
+
bool waitForInput(std::chrono::milliseconds timeout);
|
| 84 |
+
|
| 85 |
+
private:
|
| 86 |
+
explicit Socket(std::unique_ptr<SocketImpl>&& impl) noexcept;
|
| 87 |
+
|
| 88 |
+
std::unique_ptr<SocketImpl> impl_;
|
| 89 |
+
};
|
| 90 |
+
|
| 91 |
+
} // namespace detail
|
| 92 |
+
|
| 93 |
+
} // namespace c10d
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/py_rref.h
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/rref_impl.h>
|
| 4 |
+
#include <torch/csrc/python_headers.h>
|
| 5 |
+
#include <torch/csrc/utils/pybind.h>
|
| 6 |
+
|
| 7 |
+
namespace torch {
|
| 8 |
+
namespace distributed {
|
| 9 |
+
namespace rpc {
|
| 10 |
+
|
| 11 |
+
enum RRefProxyType { RPC_SYNC, RPC_ASYNC, REMOTE };
|
| 12 |
+
|
| 13 |
+
// Python wrapper of an RRef shared_ptr that supports Python
|
| 14 |
+
// pickle and unpickle.
|
| 15 |
+
class PYBIND11_EXPORT PyRRef {
|
| 16 |
+
public:
|
| 17 |
+
// The first ctor can only be called while holding GIL. See its implementation
|
| 18 |
+
// for more explanations.
|
| 19 |
+
explicit PyRRef(const py::object& value, const py::object& type_hint);
|
| 20 |
+
explicit PyRRef(c10::intrusive_ptr<RRef> rref);
|
| 21 |
+
PyRRef(const PyRRef&) = default;
|
| 22 |
+
~PyRRef();
|
| 23 |
+
|
| 24 |
+
bool isOwner() const;
|
| 25 |
+
bool confirmedByOwner() const;
|
| 26 |
+
WorkerInfo owner() const;
|
| 27 |
+
std::string ownerName() const;
|
| 28 |
+
py::object toHere(
|
| 29 |
+
const float timeoutSeconds =
|
| 30 |
+
torch::distributed::rpc::kUnsetRpcTimeout) const;
|
| 31 |
+
py::object localValue() const;
|
| 32 |
+
std::string str() const;
|
| 33 |
+
py::tuple pickle() const;
|
| 34 |
+
static PyRRef unpickle(const py::tuple& t);
|
| 35 |
+
c10::IValue toIValue() const;
|
| 36 |
+
// Future that is associated with the creation of this RRef on the remote end.
|
| 37 |
+
// This is only used to get the future corresponding to the rref for profiling
|
| 38 |
+
// use cases.
|
| 39 |
+
c10::intrusive_ptr<JitFuture> getFuture() const;
|
| 40 |
+
// Keeps track of the future responsible for profiling owner creation
|
| 41 |
+
// acknowledgement
|
| 42 |
+
c10::intrusive_ptr<JitFuture> getProfilingFuture() const;
|
| 43 |
+
// Sets the future responsible for profiling owner creation acknowledgement.
|
| 44 |
+
// This future is set from python to be a future that returns when profiling
|
| 45 |
+
// callbacks have been run.
|
| 46 |
+
void setProfilingFuture(c10::intrusive_ptr<JitFuture> profilingFuture);
|
| 47 |
+
|
| 48 |
+
// create a proxy on this RRef, which can be used to launch RPC on the owner
|
| 49 |
+
// of this RRef to run functions on the object referenced by this RRef.
|
| 50 |
+
py::object createRRefProxy(
|
| 51 |
+
const RRefProxyType& mode,
|
| 52 |
+
float timeoutSeconds = rpc::kUnsetRpcTimeout) const;
|
| 53 |
+
|
| 54 |
+
// get the type of the data object referenced by this RRef. Timeout argument
|
| 55 |
+
// is only used in the first invocation of this function as an argument to the
|
| 56 |
+
// RPC to the owner node of the RRef.
|
| 57 |
+
py::object getRRefType(
|
| 58 |
+
float timeout = rpc::kUnsetRpcTimeout,
|
| 59 |
+
bool blocking = true);
|
| 60 |
+
|
| 61 |
+
// Run the backward pass with the RRef as the root.
|
| 62 |
+
void backward(int64_t autogradContextId, bool retainGraph);
|
| 63 |
+
|
| 64 |
+
// Helper static function to run backward on a given rref.
|
| 65 |
+
static void backward(
|
| 66 |
+
int64_t autogradContextId,
|
| 67 |
+
bool retainGraph,
|
| 68 |
+
const c10::intrusive_ptr<RRef>& rref);
|
| 69 |
+
|
| 70 |
+
// Specialization of backward if the rref is an OwnerRRef.
|
| 71 |
+
static void backwardOwnerRRef(
|
| 72 |
+
int64_t autogradContextId,
|
| 73 |
+
bool retainGraph,
|
| 74 |
+
IValue value);
|
| 75 |
+
|
| 76 |
+
private:
|
| 77 |
+
c10::intrusive_ptr<RRef> rref_;
|
| 78 |
+
c10::optional<c10::intrusive_ptr<JitFuture>> profilingFuture_;
|
| 79 |
+
c10::optional<py::object> type_;
|
| 80 |
+
};
|
| 81 |
+
|
| 82 |
+
} // namespace rpc
|
| 83 |
+
} // namespace distributed
|
| 84 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_call.h
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/types.h>
|
| 5 |
+
|
| 6 |
+
namespace torch {
|
| 7 |
+
namespace distributed {
|
| 8 |
+
namespace rpc {
|
| 9 |
+
|
| 10 |
+
// RPC call representing calling a Python function over RPC.
|
| 11 |
+
class TORCH_API PythonCall final : public RpcCommandBase {
|
| 12 |
+
public:
|
| 13 |
+
PythonCall(SerializedPyObj&& serializedPyObj, bool isAsyncExecution);
|
| 14 |
+
|
| 15 |
+
c10::intrusive_ptr<Message> toMessageImpl() && override;
|
| 16 |
+
|
| 17 |
+
static std::unique_ptr<PythonCall> fromMessage(const Message& message);
|
| 18 |
+
|
| 19 |
+
const SerializedPyObj& serializedPyObj() const;
|
| 20 |
+
|
| 21 |
+
inline bool isAsyncExecution() const {
|
| 22 |
+
return isAsyncExecution_;
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
private:
|
| 26 |
+
SerializedPyObj serializedPyObj_;
|
| 27 |
+
const bool isAsyncExecution_;
|
| 28 |
+
};
|
| 29 |
+
|
| 30 |
+
} // namespace rpc
|
| 31 |
+
} // namespace distributed
|
| 32 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_remote_call.h
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/message.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
|
| 5 |
+
#include <torch/csrc/distributed/rpc/types.h>
|
| 6 |
+
#include <torch/csrc/jit/serialization/pickler.h>
|
| 7 |
+
#include <vector>
|
| 8 |
+
|
| 9 |
+
namespace torch {
|
| 10 |
+
namespace distributed {
|
| 11 |
+
namespace rpc {
|
| 12 |
+
|
| 13 |
+
class TORCH_API PythonRemoteCall : public RpcCommandBase {
|
| 14 |
+
public:
|
| 15 |
+
PythonRemoteCall(
|
| 16 |
+
SerializedPyObj&& serializedPyObj,
|
| 17 |
+
at::IValue retRRefId,
|
| 18 |
+
at::IValue retForkId,
|
| 19 |
+
const bool isAsyncExecution);
|
| 20 |
+
|
| 21 |
+
inline const SerializedPyObj& serializedPyObj() const {
|
| 22 |
+
return serializedPyObj_;
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
inline const at::IValue& retRRefId() const {
|
| 26 |
+
return retRRefId_;
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
inline const at::IValue& retForkId() const {
|
| 30 |
+
return retForkId_;
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
inline bool isAsyncExecution() const {
|
| 34 |
+
return isAsyncExecution_;
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
c10::intrusive_ptr<Message> toMessageImpl() && override;
|
| 38 |
+
static std::unique_ptr<PythonRemoteCall> fromMessage(const Message& message);
|
| 39 |
+
|
| 40 |
+
private:
|
| 41 |
+
SerializedPyObj serializedPyObj_;
|
| 42 |
+
const at::IValue retRRefId_;
|
| 43 |
+
const at::IValue retForkId_;
|
| 44 |
+
const bool isAsyncExecution_;
|
| 45 |
+
};
|
| 46 |
+
|
| 47 |
+
} // namespace rpc
|
| 48 |
+
} // namespace distributed
|
| 49 |
+
} // namespace torch
|