Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_chordal.cpython-310.pyc +0 -0
- mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_cluster.cpython-310.pyc +0 -0
- mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_cuts.cpython-310.pyc +0 -0
- mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_cycles.cpython-310.pyc +0 -0
- mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_dag.cpython-310.pyc +0 -0
- mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_hybrid.cpython-310.pyc +0 -0
- mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_lowest_common_ancestors.cpython-310.pyc +0 -0
- mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_matching.cpython-310.pyc +0 -0
- mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_smetric.cpython-310.pyc +0 -0
- mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_sparsifiers.cpython-310.pyc +0 -0
- mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_swap.cpython-310.pyc +0 -0
- mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_tournament.cpython-310.pyc +0 -0
- mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_voronoi.cpython-310.pyc +0 -0
- mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tree/__init__.py +6 -0
- mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tree/coding.py +413 -0
- mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tree/decomposition.py +88 -0
- mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tree/mst.py +1284 -0
- mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tree/operations.py +105 -0
- mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tree/recognition.py +273 -0
- pllava/lib/python3.10/site-packages/numpy/core/__init__.pyi +2 -0
- pllava/lib/python3.10/site-packages/numpy/core/_add_newdocs.py +0 -0
- pllava/lib/python3.10/site-packages/numpy/core/_asarray.pyi +42 -0
- pllava/lib/python3.10/site-packages/numpy/core/_dtype_ctypes.py +117 -0
- pllava/lib/python3.10/site-packages/numpy/core/_exceptions.py +172 -0
- pllava/lib/python3.10/site-packages/numpy/core/_operand_flag_tests.cpython-310-x86_64-linux-gnu.so +0 -0
- pllava/lib/python3.10/site-packages/numpy/core/_simd.cpython-310-x86_64-linux-gnu.so +3 -0
- pllava/lib/python3.10/site-packages/numpy/core/_type_aliases.pyi +13 -0
- pllava/lib/python3.10/site-packages/numpy/core/_ufunc_config.py +466 -0
- pllava/lib/python3.10/site-packages/numpy/core/_umath_tests.cpython-310-x86_64-linux-gnu.so +0 -0
- pllava/lib/python3.10/site-packages/numpy/core/defchararray.py +2914 -0
- pllava/lib/python3.10/site-packages/numpy/core/einsumfunc.py +1443 -0
- pllava/lib/python3.10/site-packages/numpy/core/einsumfunc.pyi +187 -0
- pllava/lib/python3.10/site-packages/numpy/core/fromnumeric.py +0 -0
- pllava/lib/python3.10/site-packages/numpy/core/fromnumeric.pyi +1060 -0
- pllava/lib/python3.10/site-packages/numpy/core/function_base.py +551 -0
- pllava/lib/python3.10/site-packages/numpy/core/function_base.pyi +187 -0
- pllava/lib/python3.10/site-packages/numpy/core/getlimits.pyi +6 -0
- pllava/lib/python3.10/site-packages/numpy/core/numeric.pyi +660 -0
- pllava/lib/python3.10/site-packages/numpy/core/numerictypes.pyi +156 -0
- pllava/lib/python3.10/site-packages/numpy/core/records.pyi +234 -0
- pllava/lib/python3.10/site-packages/numpy/core/shape_base.pyi +123 -0
- pllava/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_scalar_ctors.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_simd_module.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/numpy/core/tests/_locales.py +74 -0
- pllava/lib/python3.10/site-packages/numpy/core/tests/test_api.py +615 -0
- pllava/lib/python3.10/site-packages/numpy/core/tests/test_array_coercion.py +898 -0
- pllava/lib/python3.10/site-packages/numpy/core/tests/test_arrayprint.py +1047 -0
- pllava/lib/python3.10/site-packages/numpy/core/tests/test_custom_dtypes.py +253 -0
.gitattributes
CHANGED
|
@@ -697,3 +697,4 @@ mplug_owl2/lib/python3.10/site-packages/pillow.libs/liblcms2-525547ec.so.2.0.16
|
|
| 697 |
mplug_owl2/lib/python3.10/site-packages/pillow.libs/libtiff-a92b430c.so.6.0.2 filter=lfs diff=lfs merge=lfs -text
|
| 698 |
mplug_owl2/lib/python3.10/site-packages/pillow.libs/libharfbuzz-07d0ad17.so.0.61010.0 filter=lfs diff=lfs merge=lfs -text
|
| 699 |
mplug_owl2/lib/python3.10/site-packages/idna/__pycache__/uts46data.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 697 |
mplug_owl2/lib/python3.10/site-packages/pillow.libs/libtiff-a92b430c.so.6.0.2 filter=lfs diff=lfs merge=lfs -text
|
| 698 |
mplug_owl2/lib/python3.10/site-packages/pillow.libs/libharfbuzz-07d0ad17.so.0.61010.0 filter=lfs diff=lfs merge=lfs -text
|
| 699 |
mplug_owl2/lib/python3.10/site-packages/idna/__pycache__/uts46data.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 700 |
+
pllava/lib/python3.10/site-packages/numpy/core/_simd.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (181 Bytes). View file
|
|
|
mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_chordal.cpython-310.pyc
ADDED
|
Binary file (4.4 kB). View file
|
|
|
mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_cluster.cpython-310.pyc
ADDED
|
Binary file (15.3 kB). View file
|
|
|
mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_cuts.cpython-310.pyc
ADDED
|
Binary file (6.27 kB). View file
|
|
|
mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_cycles.cpython-310.pyc
ADDED
|
Binary file (35.8 kB). View file
|
|
|
mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_dag.cpython-310.pyc
ADDED
|
Binary file (29.8 kB). View file
|
|
|
mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_hybrid.cpython-310.pyc
ADDED
|
Binary file (831 Bytes). View file
|
|
|
mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_lowest_common_ancestors.cpython-310.pyc
ADDED
|
Binary file (16 kB). View file
|
|
|
mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_matching.cpython-310.pyc
ADDED
|
Binary file (18 kB). View file
|
|
|
mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_smetric.cpython-310.pyc
ADDED
|
Binary file (446 Bytes). View file
|
|
|
mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_sparsifiers.cpython-310.pyc
ADDED
|
Binary file (4.22 kB). View file
|
|
|
mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_swap.cpython-310.pyc
ADDED
|
Binary file (8.5 kB). View file
|
|
|
mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_tournament.cpython-310.pyc
ADDED
|
Binary file (6.14 kB). View file
|
|
|
mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_voronoi.cpython-310.pyc
ADDED
|
Binary file (3.9 kB). View file
|
|
|
mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tree/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .branchings import *
|
| 2 |
+
from .coding import *
|
| 3 |
+
from .mst import *
|
| 4 |
+
from .recognition import *
|
| 5 |
+
from .operations import *
|
| 6 |
+
from .decomposition import *
|
mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tree/coding.py
ADDED
|
@@ -0,0 +1,413 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Functions for encoding and decoding trees.
|
| 2 |
+
|
| 3 |
+
Since a tree is a highly restricted form of graph, it can be represented
|
| 4 |
+
concisely in several ways. This module includes functions for encoding
|
| 5 |
+
and decoding trees in the form of nested tuples and Prüfer
|
| 6 |
+
sequences. The former requires a rooted tree, whereas the latter can be
|
| 7 |
+
applied to unrooted trees. Furthermore, there is a bijection from Prüfer
|
| 8 |
+
sequences to labeled trees.
|
| 9 |
+
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
from collections import Counter
|
| 13 |
+
from itertools import chain
|
| 14 |
+
|
| 15 |
+
import networkx as nx
|
| 16 |
+
from networkx.utils import not_implemented_for
|
| 17 |
+
|
| 18 |
+
__all__ = [
|
| 19 |
+
"from_nested_tuple",
|
| 20 |
+
"from_prufer_sequence",
|
| 21 |
+
"NotATree",
|
| 22 |
+
"to_nested_tuple",
|
| 23 |
+
"to_prufer_sequence",
|
| 24 |
+
]
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class NotATree(nx.NetworkXException):
|
| 28 |
+
"""Raised when a function expects a tree (that is, a connected
|
| 29 |
+
undirected graph with no cycles) but gets a non-tree graph as input
|
| 30 |
+
instead.
|
| 31 |
+
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@not_implemented_for("directed")
|
| 36 |
+
@nx._dispatchable(graphs="T")
|
| 37 |
+
def to_nested_tuple(T, root, canonical_form=False):
|
| 38 |
+
"""Returns a nested tuple representation of the given tree.
|
| 39 |
+
|
| 40 |
+
The nested tuple representation of a tree is defined
|
| 41 |
+
recursively. The tree with one node and no edges is represented by
|
| 42 |
+
the empty tuple, ``()``. A tree with ``k`` subtrees is represented
|
| 43 |
+
by a tuple of length ``k`` in which each element is the nested tuple
|
| 44 |
+
representation of a subtree.
|
| 45 |
+
|
| 46 |
+
Parameters
|
| 47 |
+
----------
|
| 48 |
+
T : NetworkX graph
|
| 49 |
+
An undirected graph object representing a tree.
|
| 50 |
+
|
| 51 |
+
root : node
|
| 52 |
+
The node in ``T`` to interpret as the root of the tree.
|
| 53 |
+
|
| 54 |
+
canonical_form : bool
|
| 55 |
+
If ``True``, each tuple is sorted so that the function returns
|
| 56 |
+
a canonical form for rooted trees. This means "lighter" subtrees
|
| 57 |
+
will appear as nested tuples before "heavier" subtrees. In this
|
| 58 |
+
way, each isomorphic rooted tree has the same nested tuple
|
| 59 |
+
representation.
|
| 60 |
+
|
| 61 |
+
Returns
|
| 62 |
+
-------
|
| 63 |
+
tuple
|
| 64 |
+
A nested tuple representation of the tree.
|
| 65 |
+
|
| 66 |
+
Notes
|
| 67 |
+
-----
|
| 68 |
+
This function is *not* the inverse of :func:`from_nested_tuple`; the
|
| 69 |
+
only guarantee is that the rooted trees are isomorphic.
|
| 70 |
+
|
| 71 |
+
See also
|
| 72 |
+
--------
|
| 73 |
+
from_nested_tuple
|
| 74 |
+
to_prufer_sequence
|
| 75 |
+
|
| 76 |
+
Examples
|
| 77 |
+
--------
|
| 78 |
+
The tree need not be a balanced binary tree::
|
| 79 |
+
|
| 80 |
+
>>> T = nx.Graph()
|
| 81 |
+
>>> T.add_edges_from([(0, 1), (0, 2), (0, 3)])
|
| 82 |
+
>>> T.add_edges_from([(1, 4), (1, 5)])
|
| 83 |
+
>>> T.add_edges_from([(3, 6), (3, 7)])
|
| 84 |
+
>>> root = 0
|
| 85 |
+
>>> nx.to_nested_tuple(T, root)
|
| 86 |
+
(((), ()), (), ((), ()))
|
| 87 |
+
|
| 88 |
+
Continuing the above example, if ``canonical_form`` is ``True``, the
|
| 89 |
+
nested tuples will be sorted::
|
| 90 |
+
|
| 91 |
+
>>> nx.to_nested_tuple(T, root, canonical_form=True)
|
| 92 |
+
((), ((), ()), ((), ()))
|
| 93 |
+
|
| 94 |
+
Even the path graph can be interpreted as a tree::
|
| 95 |
+
|
| 96 |
+
>>> T = nx.path_graph(4)
|
| 97 |
+
>>> root = 0
|
| 98 |
+
>>> nx.to_nested_tuple(T, root)
|
| 99 |
+
((((),),),)
|
| 100 |
+
|
| 101 |
+
"""
|
| 102 |
+
|
| 103 |
+
def _make_tuple(T, root, _parent):
|
| 104 |
+
"""Recursively compute the nested tuple representation of the
|
| 105 |
+
given rooted tree.
|
| 106 |
+
|
| 107 |
+
``_parent`` is the parent node of ``root`` in the supertree in
|
| 108 |
+
which ``T`` is a subtree, or ``None`` if ``root`` is the root of
|
| 109 |
+
the supertree. This argument is used to determine which
|
| 110 |
+
neighbors of ``root`` are children and which is the parent.
|
| 111 |
+
|
| 112 |
+
"""
|
| 113 |
+
# Get the neighbors of `root` that are not the parent node. We
|
| 114 |
+
# are guaranteed that `root` is always in `T` by construction.
|
| 115 |
+
children = set(T[root]) - {_parent}
|
| 116 |
+
if len(children) == 0:
|
| 117 |
+
return ()
|
| 118 |
+
nested = (_make_tuple(T, v, root) for v in children)
|
| 119 |
+
if canonical_form:
|
| 120 |
+
nested = sorted(nested)
|
| 121 |
+
return tuple(nested)
|
| 122 |
+
|
| 123 |
+
# Do some sanity checks on the input.
|
| 124 |
+
if not nx.is_tree(T):
|
| 125 |
+
raise nx.NotATree("provided graph is not a tree")
|
| 126 |
+
if root not in T:
|
| 127 |
+
raise nx.NodeNotFound(f"Graph {T} contains no node {root}")
|
| 128 |
+
|
| 129 |
+
return _make_tuple(T, root, None)
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
@nx._dispatchable(graphs=None, returns_graph=True)
|
| 133 |
+
def from_nested_tuple(sequence, sensible_relabeling=False):
|
| 134 |
+
"""Returns the rooted tree corresponding to the given nested tuple.
|
| 135 |
+
|
| 136 |
+
The nested tuple representation of a tree is defined
|
| 137 |
+
recursively. The tree with one node and no edges is represented by
|
| 138 |
+
the empty tuple, ``()``. A tree with ``k`` subtrees is represented
|
| 139 |
+
by a tuple of length ``k`` in which each element is the nested tuple
|
| 140 |
+
representation of a subtree.
|
| 141 |
+
|
| 142 |
+
Parameters
|
| 143 |
+
----------
|
| 144 |
+
sequence : tuple
|
| 145 |
+
A nested tuple representing a rooted tree.
|
| 146 |
+
|
| 147 |
+
sensible_relabeling : bool
|
| 148 |
+
Whether to relabel the nodes of the tree so that nodes are
|
| 149 |
+
labeled in increasing order according to their breadth-first
|
| 150 |
+
search order from the root node.
|
| 151 |
+
|
| 152 |
+
Returns
|
| 153 |
+
-------
|
| 154 |
+
NetworkX graph
|
| 155 |
+
The tree corresponding to the given nested tuple, whose root
|
| 156 |
+
node is node 0. If ``sensible_labeling`` is ``True``, nodes will
|
| 157 |
+
be labeled in breadth-first search order starting from the root
|
| 158 |
+
node.
|
| 159 |
+
|
| 160 |
+
Notes
|
| 161 |
+
-----
|
| 162 |
+
This function is *not* the inverse of :func:`to_nested_tuple`; the
|
| 163 |
+
only guarantee is that the rooted trees are isomorphic.
|
| 164 |
+
|
| 165 |
+
See also
|
| 166 |
+
--------
|
| 167 |
+
to_nested_tuple
|
| 168 |
+
from_prufer_sequence
|
| 169 |
+
|
| 170 |
+
Examples
|
| 171 |
+
--------
|
| 172 |
+
Sensible relabeling ensures that the nodes are labeled from the root
|
| 173 |
+
starting at 0::
|
| 174 |
+
|
| 175 |
+
>>> balanced = (((), ()), ((), ()))
|
| 176 |
+
>>> T = nx.from_nested_tuple(balanced, sensible_relabeling=True)
|
| 177 |
+
>>> edges = [(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)]
|
| 178 |
+
>>> all((u, v) in T.edges() or (v, u) in T.edges() for (u, v) in edges)
|
| 179 |
+
True
|
| 180 |
+
|
| 181 |
+
"""
|
| 182 |
+
|
| 183 |
+
def _make_tree(sequence):
|
| 184 |
+
"""Recursively creates a tree from the given sequence of nested
|
| 185 |
+
tuples.
|
| 186 |
+
|
| 187 |
+
This function employs the :func:`~networkx.tree.join` function
|
| 188 |
+
to recursively join subtrees into a larger tree.
|
| 189 |
+
|
| 190 |
+
"""
|
| 191 |
+
# The empty sequence represents the empty tree, which is the
|
| 192 |
+
# (unique) graph with a single node. We mark the single node
|
| 193 |
+
# with an attribute that indicates that it is the root of the
|
| 194 |
+
# graph.
|
| 195 |
+
if len(sequence) == 0:
|
| 196 |
+
return nx.empty_graph(1)
|
| 197 |
+
# For a nonempty sequence, get the subtrees for each child
|
| 198 |
+
# sequence and join all the subtrees at their roots. After
|
| 199 |
+
# joining the subtrees, the root is node 0.
|
| 200 |
+
return nx.tree.join_trees([(_make_tree(child), 0) for child in sequence])
|
| 201 |
+
|
| 202 |
+
# Make the tree and remove the `is_root` node attribute added by the
|
| 203 |
+
# helper function.
|
| 204 |
+
T = _make_tree(sequence)
|
| 205 |
+
if sensible_relabeling:
|
| 206 |
+
# Relabel the nodes according to their breadth-first search
|
| 207 |
+
# order, starting from the root node (that is, the node 0).
|
| 208 |
+
bfs_nodes = chain([0], (v for u, v in nx.bfs_edges(T, 0)))
|
| 209 |
+
labels = {v: i for i, v in enumerate(bfs_nodes)}
|
| 210 |
+
# We would like to use `copy=False`, but `relabel_nodes` doesn't
|
| 211 |
+
# allow a relabel mapping that can't be topologically sorted.
|
| 212 |
+
T = nx.relabel_nodes(T, labels)
|
| 213 |
+
return T
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
@not_implemented_for("directed")
|
| 217 |
+
@nx._dispatchable(graphs="T")
|
| 218 |
+
def to_prufer_sequence(T):
|
| 219 |
+
r"""Returns the Prüfer sequence of the given tree.
|
| 220 |
+
|
| 221 |
+
A *Prüfer sequence* is a list of *n* - 2 numbers between 0 and
|
| 222 |
+
*n* - 1, inclusive. The tree corresponding to a given Prüfer
|
| 223 |
+
sequence can be recovered by repeatedly joining a node in the
|
| 224 |
+
sequence with a node with the smallest potential degree according to
|
| 225 |
+
the sequence.
|
| 226 |
+
|
| 227 |
+
Parameters
|
| 228 |
+
----------
|
| 229 |
+
T : NetworkX graph
|
| 230 |
+
An undirected graph object representing a tree.
|
| 231 |
+
|
| 232 |
+
Returns
|
| 233 |
+
-------
|
| 234 |
+
list
|
| 235 |
+
The Prüfer sequence of the given tree.
|
| 236 |
+
|
| 237 |
+
Raises
|
| 238 |
+
------
|
| 239 |
+
NetworkXPointlessConcept
|
| 240 |
+
If the number of nodes in `T` is less than two.
|
| 241 |
+
|
| 242 |
+
NotATree
|
| 243 |
+
If `T` is not a tree.
|
| 244 |
+
|
| 245 |
+
KeyError
|
| 246 |
+
If the set of nodes in `T` is not {0, …, *n* - 1}.
|
| 247 |
+
|
| 248 |
+
Notes
|
| 249 |
+
-----
|
| 250 |
+
There is a bijection from labeled trees to Prüfer sequences. This
|
| 251 |
+
function is the inverse of the :func:`from_prufer_sequence`
|
| 252 |
+
function.
|
| 253 |
+
|
| 254 |
+
Sometimes Prüfer sequences use nodes labeled from 1 to *n* instead
|
| 255 |
+
of from 0 to *n* - 1. This function requires nodes to be labeled in
|
| 256 |
+
the latter form. You can use :func:`~networkx.relabel_nodes` to
|
| 257 |
+
relabel the nodes of your tree to the appropriate format.
|
| 258 |
+
|
| 259 |
+
This implementation is from [1]_ and has a running time of
|
| 260 |
+
$O(n)$.
|
| 261 |
+
|
| 262 |
+
See also
|
| 263 |
+
--------
|
| 264 |
+
to_nested_tuple
|
| 265 |
+
from_prufer_sequence
|
| 266 |
+
|
| 267 |
+
References
|
| 268 |
+
----------
|
| 269 |
+
.. [1] Wang, Xiaodong, Lei Wang, and Yingjie Wu.
|
| 270 |
+
"An optimal algorithm for Prufer codes."
|
| 271 |
+
*Journal of Software Engineering and Applications* 2.02 (2009): 111.
|
| 272 |
+
<https://doi.org/10.4236/jsea.2009.22016>
|
| 273 |
+
|
| 274 |
+
Examples
|
| 275 |
+
--------
|
| 276 |
+
There is a bijection between Prüfer sequences and labeled trees, so
|
| 277 |
+
this function is the inverse of the :func:`from_prufer_sequence`
|
| 278 |
+
function:
|
| 279 |
+
|
| 280 |
+
>>> edges = [(0, 3), (1, 3), (2, 3), (3, 4), (4, 5)]
|
| 281 |
+
>>> tree = nx.Graph(edges)
|
| 282 |
+
>>> sequence = nx.to_prufer_sequence(tree)
|
| 283 |
+
>>> sequence
|
| 284 |
+
[3, 3, 3, 4]
|
| 285 |
+
>>> tree2 = nx.from_prufer_sequence(sequence)
|
| 286 |
+
>>> list(tree2.edges()) == edges
|
| 287 |
+
True
|
| 288 |
+
|
| 289 |
+
"""
|
| 290 |
+
# Perform some sanity checks on the input.
|
| 291 |
+
n = len(T)
|
| 292 |
+
if n < 2:
|
| 293 |
+
msg = "Prüfer sequence undefined for trees with fewer than two nodes"
|
| 294 |
+
raise nx.NetworkXPointlessConcept(msg)
|
| 295 |
+
if not nx.is_tree(T):
|
| 296 |
+
raise nx.NotATree("provided graph is not a tree")
|
| 297 |
+
if set(T) != set(range(n)):
|
| 298 |
+
raise KeyError("tree must have node labels {0, ..., n - 1}")
|
| 299 |
+
|
| 300 |
+
degree = dict(T.degree())
|
| 301 |
+
|
| 302 |
+
def parents(u):
|
| 303 |
+
return next(v for v in T[u] if degree[v] > 1)
|
| 304 |
+
|
| 305 |
+
index = u = next(k for k in range(n) if degree[k] == 1)
|
| 306 |
+
result = []
|
| 307 |
+
for i in range(n - 2):
|
| 308 |
+
v = parents(u)
|
| 309 |
+
result.append(v)
|
| 310 |
+
degree[v] -= 1
|
| 311 |
+
if v < index and degree[v] == 1:
|
| 312 |
+
u = v
|
| 313 |
+
else:
|
| 314 |
+
index = u = next(k for k in range(index + 1, n) if degree[k] == 1)
|
| 315 |
+
return result
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
@nx._dispatchable(graphs=None, returns_graph=True)
|
| 319 |
+
def from_prufer_sequence(sequence):
|
| 320 |
+
r"""Returns the tree corresponding to the given Prüfer sequence.
|
| 321 |
+
|
| 322 |
+
A *Prüfer sequence* is a list of *n* - 2 numbers between 0 and
|
| 323 |
+
*n* - 1, inclusive. The tree corresponding to a given Prüfer
|
| 324 |
+
sequence can be recovered by repeatedly joining a node in the
|
| 325 |
+
sequence with a node with the smallest potential degree according to
|
| 326 |
+
the sequence.
|
| 327 |
+
|
| 328 |
+
Parameters
|
| 329 |
+
----------
|
| 330 |
+
sequence : list
|
| 331 |
+
A Prüfer sequence, which is a list of *n* - 2 integers between
|
| 332 |
+
zero and *n* - 1, inclusive.
|
| 333 |
+
|
| 334 |
+
Returns
|
| 335 |
+
-------
|
| 336 |
+
NetworkX graph
|
| 337 |
+
The tree corresponding to the given Prüfer sequence.
|
| 338 |
+
|
| 339 |
+
Raises
|
| 340 |
+
------
|
| 341 |
+
NetworkXError
|
| 342 |
+
If the Prüfer sequence is not valid.
|
| 343 |
+
|
| 344 |
+
Notes
|
| 345 |
+
-----
|
| 346 |
+
There is a bijection from labeled trees to Prüfer sequences. This
|
| 347 |
+
function is the inverse of the :func:`from_prufer_sequence` function.
|
| 348 |
+
|
| 349 |
+
Sometimes Prüfer sequences use nodes labeled from 1 to *n* instead
|
| 350 |
+
of from 0 to *n* - 1. This function requires nodes to be labeled in
|
| 351 |
+
the latter form. You can use :func:`networkx.relabel_nodes` to
|
| 352 |
+
relabel the nodes of your tree to the appropriate format.
|
| 353 |
+
|
| 354 |
+
This implementation is from [1]_ and has a running time of
|
| 355 |
+
$O(n)$.
|
| 356 |
+
|
| 357 |
+
References
|
| 358 |
+
----------
|
| 359 |
+
.. [1] Wang, Xiaodong, Lei Wang, and Yingjie Wu.
|
| 360 |
+
"An optimal algorithm for Prufer codes."
|
| 361 |
+
*Journal of Software Engineering and Applications* 2.02 (2009): 111.
|
| 362 |
+
<https://doi.org/10.4236/jsea.2009.22016>
|
| 363 |
+
|
| 364 |
+
See also
|
| 365 |
+
--------
|
| 366 |
+
from_nested_tuple
|
| 367 |
+
to_prufer_sequence
|
| 368 |
+
|
| 369 |
+
Examples
|
| 370 |
+
--------
|
| 371 |
+
There is a bijection between Prüfer sequences and labeled trees, so
|
| 372 |
+
this function is the inverse of the :func:`to_prufer_sequence`
|
| 373 |
+
function:
|
| 374 |
+
|
| 375 |
+
>>> edges = [(0, 3), (1, 3), (2, 3), (3, 4), (4, 5)]
|
| 376 |
+
>>> tree = nx.Graph(edges)
|
| 377 |
+
>>> sequence = nx.to_prufer_sequence(tree)
|
| 378 |
+
>>> sequence
|
| 379 |
+
[3, 3, 3, 4]
|
| 380 |
+
>>> tree2 = nx.from_prufer_sequence(sequence)
|
| 381 |
+
>>> list(tree2.edges()) == edges
|
| 382 |
+
True
|
| 383 |
+
|
| 384 |
+
"""
|
| 385 |
+
n = len(sequence) + 2
|
| 386 |
+
# `degree` stores the remaining degree (plus one) for each node. The
|
| 387 |
+
# degree of a node in the decoded tree is one more than the number
|
| 388 |
+
# of times it appears in the code.
|
| 389 |
+
degree = Counter(chain(sequence, range(n)))
|
| 390 |
+
T = nx.empty_graph(n)
|
| 391 |
+
# `not_orphaned` is the set of nodes that have a parent in the
|
| 392 |
+
# tree. After the loop, there should be exactly two nodes that are
|
| 393 |
+
# not in this set.
|
| 394 |
+
not_orphaned = set()
|
| 395 |
+
index = u = next(k for k in range(n) if degree[k] == 1)
|
| 396 |
+
for v in sequence:
|
| 397 |
+
# check the validity of the prufer sequence
|
| 398 |
+
if v < 0 or v > n - 1:
|
| 399 |
+
raise nx.NetworkXError(
|
| 400 |
+
f"Invalid Prufer sequence: Values must be between 0 and {n-1}, got {v}"
|
| 401 |
+
)
|
| 402 |
+
T.add_edge(u, v)
|
| 403 |
+
not_orphaned.add(u)
|
| 404 |
+
degree[v] -= 1
|
| 405 |
+
if v < index and degree[v] == 1:
|
| 406 |
+
u = v
|
| 407 |
+
else:
|
| 408 |
+
index = u = next(k for k in range(index + 1, n) if degree[k] == 1)
|
| 409 |
+
# At this point, there must be exactly two orphaned nodes; join them.
|
| 410 |
+
orphans = set(T) - not_orphaned
|
| 411 |
+
u, v = orphans
|
| 412 |
+
T.add_edge(u, v)
|
| 413 |
+
return T
|
mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tree/decomposition.py
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
r"""Function for computing a junction tree of a graph."""
|
| 2 |
+
|
| 3 |
+
from itertools import combinations
|
| 4 |
+
|
| 5 |
+
import networkx as nx
|
| 6 |
+
from networkx.algorithms import chordal_graph_cliques, complete_to_chordal_graph, moral
|
| 7 |
+
from networkx.utils import not_implemented_for
|
| 8 |
+
|
| 9 |
+
__all__ = ["junction_tree"]
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@not_implemented_for("multigraph")
|
| 13 |
+
@nx._dispatchable(returns_graph=True)
|
| 14 |
+
def junction_tree(G):
|
| 15 |
+
r"""Returns a junction tree of a given graph.
|
| 16 |
+
|
| 17 |
+
A junction tree (or clique tree) is constructed from a (un)directed graph G.
|
| 18 |
+
The tree is constructed based on a moralized and triangulated version of G.
|
| 19 |
+
The tree's nodes consist of maximal cliques and sepsets of the revised graph.
|
| 20 |
+
The sepset of two cliques is the intersection of the nodes of these cliques,
|
| 21 |
+
e.g. the sepset of (A,B,C) and (A,C,E,F) is (A,C). These nodes are often called
|
| 22 |
+
"variables" in this literature. The tree is bipartite with each sepset
|
| 23 |
+
connected to its two cliques.
|
| 24 |
+
|
| 25 |
+
Junction Trees are not unique as the order of clique consideration determines
|
| 26 |
+
which sepsets are included.
|
| 27 |
+
|
| 28 |
+
The junction tree algorithm consists of five steps [1]_:
|
| 29 |
+
|
| 30 |
+
1. Moralize the graph
|
| 31 |
+
2. Triangulate the graph
|
| 32 |
+
3. Find maximal cliques
|
| 33 |
+
4. Build the tree from cliques, connecting cliques with shared
|
| 34 |
+
nodes, set edge-weight to number of shared variables
|
| 35 |
+
5. Find maximum spanning tree
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
Parameters
|
| 39 |
+
----------
|
| 40 |
+
G : networkx.Graph
|
| 41 |
+
Directed or undirected graph.
|
| 42 |
+
|
| 43 |
+
Returns
|
| 44 |
+
-------
|
| 45 |
+
junction_tree : networkx.Graph
|
| 46 |
+
The corresponding junction tree of `G`.
|
| 47 |
+
|
| 48 |
+
Raises
|
| 49 |
+
------
|
| 50 |
+
NetworkXNotImplemented
|
| 51 |
+
Raised if `G` is an instance of `MultiGraph` or `MultiDiGraph`.
|
| 52 |
+
|
| 53 |
+
References
|
| 54 |
+
----------
|
| 55 |
+
.. [1] Junction tree algorithm:
|
| 56 |
+
https://en.wikipedia.org/wiki/Junction_tree_algorithm
|
| 57 |
+
|
| 58 |
+
.. [2] Finn V. Jensen and Frank Jensen. 1994. Optimal
|
| 59 |
+
junction trees. In Proceedings of the Tenth international
|
| 60 |
+
conference on Uncertainty in artificial intelligence (UAI’94).
|
| 61 |
+
Morgan Kaufmann Publishers Inc., San Francisco, CA, USA, 360–366.
|
| 62 |
+
"""
|
| 63 |
+
|
| 64 |
+
clique_graph = nx.Graph()
|
| 65 |
+
|
| 66 |
+
if G.is_directed():
|
| 67 |
+
G = moral.moral_graph(G)
|
| 68 |
+
chordal_graph, _ = complete_to_chordal_graph(G)
|
| 69 |
+
|
| 70 |
+
cliques = [tuple(sorted(i)) for i in chordal_graph_cliques(chordal_graph)]
|
| 71 |
+
clique_graph.add_nodes_from(cliques, type="clique")
|
| 72 |
+
|
| 73 |
+
for edge in combinations(cliques, 2):
|
| 74 |
+
set_edge_0 = set(edge[0])
|
| 75 |
+
set_edge_1 = set(edge[1])
|
| 76 |
+
if not set_edge_0.isdisjoint(set_edge_1):
|
| 77 |
+
sepset = tuple(sorted(set_edge_0.intersection(set_edge_1)))
|
| 78 |
+
clique_graph.add_edge(edge[0], edge[1], weight=len(sepset), sepset=sepset)
|
| 79 |
+
|
| 80 |
+
junction_tree = nx.maximum_spanning_tree(clique_graph)
|
| 81 |
+
|
| 82 |
+
for edge in list(junction_tree.edges(data=True)):
|
| 83 |
+
junction_tree.add_node(edge[2]["sepset"], type="sepset")
|
| 84 |
+
junction_tree.add_edge(edge[0], edge[2]["sepset"])
|
| 85 |
+
junction_tree.add_edge(edge[1], edge[2]["sepset"])
|
| 86 |
+
junction_tree.remove_edge(edge[0], edge[1])
|
| 87 |
+
|
| 88 |
+
return junction_tree
|
mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tree/mst.py
ADDED
|
@@ -0,0 +1,1284 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Algorithms for calculating min/max spanning trees/forests.
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from dataclasses import dataclass, field
|
| 7 |
+
from enum import Enum
|
| 8 |
+
from heapq import heappop, heappush
|
| 9 |
+
from itertools import count
|
| 10 |
+
from math import isnan
|
| 11 |
+
from operator import itemgetter
|
| 12 |
+
from queue import PriorityQueue
|
| 13 |
+
|
| 14 |
+
import networkx as nx
|
| 15 |
+
from networkx.utils import UnionFind, not_implemented_for, py_random_state
|
| 16 |
+
|
| 17 |
+
__all__ = [
|
| 18 |
+
"minimum_spanning_edges",
|
| 19 |
+
"maximum_spanning_edges",
|
| 20 |
+
"minimum_spanning_tree",
|
| 21 |
+
"maximum_spanning_tree",
|
| 22 |
+
"number_of_spanning_trees",
|
| 23 |
+
"random_spanning_tree",
|
| 24 |
+
"partition_spanning_tree",
|
| 25 |
+
"EdgePartition",
|
| 26 |
+
"SpanningTreeIterator",
|
| 27 |
+
]
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class EdgePartition(Enum):
|
| 31 |
+
"""
|
| 32 |
+
An enum to store the state of an edge partition. The enum is written to the
|
| 33 |
+
edges of a graph before being pasted to `kruskal_mst_edges`. Options are:
|
| 34 |
+
|
| 35 |
+
- EdgePartition.OPEN
|
| 36 |
+
- EdgePartition.INCLUDED
|
| 37 |
+
- EdgePartition.EXCLUDED
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
OPEN = 0
|
| 41 |
+
INCLUDED = 1
|
| 42 |
+
EXCLUDED = 2
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
@not_implemented_for("multigraph")
|
| 46 |
+
@nx._dispatchable(edge_attrs="weight", preserve_edge_attrs="data")
|
| 47 |
+
def boruvka_mst_edges(
|
| 48 |
+
G, minimum=True, weight="weight", keys=False, data=True, ignore_nan=False
|
| 49 |
+
):
|
| 50 |
+
"""Iterate over edges of a Borůvka's algorithm min/max spanning tree.
|
| 51 |
+
|
| 52 |
+
Parameters
|
| 53 |
+
----------
|
| 54 |
+
G : NetworkX Graph
|
| 55 |
+
The edges of `G` must have distinct weights,
|
| 56 |
+
otherwise the edges may not form a tree.
|
| 57 |
+
|
| 58 |
+
minimum : bool (default: True)
|
| 59 |
+
Find the minimum (True) or maximum (False) spanning tree.
|
| 60 |
+
|
| 61 |
+
weight : string (default: 'weight')
|
| 62 |
+
The name of the edge attribute holding the edge weights.
|
| 63 |
+
|
| 64 |
+
keys : bool (default: True)
|
| 65 |
+
This argument is ignored since this function is not
|
| 66 |
+
implemented for multigraphs; it exists only for consistency
|
| 67 |
+
with the other minimum spanning tree functions.
|
| 68 |
+
|
| 69 |
+
data : bool (default: True)
|
| 70 |
+
Flag for whether to yield edge attribute dicts.
|
| 71 |
+
If True, yield edges `(u, v, d)`, where `d` is the attribute dict.
|
| 72 |
+
If False, yield edges `(u, v)`.
|
| 73 |
+
|
| 74 |
+
ignore_nan : bool (default: False)
|
| 75 |
+
If a NaN is found as an edge weight normally an exception is raised.
|
| 76 |
+
If `ignore_nan is True` then that edge is ignored instead.
|
| 77 |
+
|
| 78 |
+
"""
|
| 79 |
+
# Initialize a forest, assuming initially that it is the discrete
|
| 80 |
+
# partition of the nodes of the graph.
|
| 81 |
+
forest = UnionFind(G)
|
| 82 |
+
|
| 83 |
+
def best_edge(component):
|
| 84 |
+
"""Returns the optimum (minimum or maximum) edge on the edge
|
| 85 |
+
boundary of the given set of nodes.
|
| 86 |
+
|
| 87 |
+
A return value of ``None`` indicates an empty boundary.
|
| 88 |
+
|
| 89 |
+
"""
|
| 90 |
+
sign = 1 if minimum else -1
|
| 91 |
+
minwt = float("inf")
|
| 92 |
+
boundary = None
|
| 93 |
+
for e in nx.edge_boundary(G, component, data=True):
|
| 94 |
+
wt = e[-1].get(weight, 1) * sign
|
| 95 |
+
if isnan(wt):
|
| 96 |
+
if ignore_nan:
|
| 97 |
+
continue
|
| 98 |
+
msg = f"NaN found as an edge weight. Edge {e}"
|
| 99 |
+
raise ValueError(msg)
|
| 100 |
+
if wt < minwt:
|
| 101 |
+
minwt = wt
|
| 102 |
+
boundary = e
|
| 103 |
+
return boundary
|
| 104 |
+
|
| 105 |
+
# Determine the optimum edge in the edge boundary of each component
|
| 106 |
+
# in the forest.
|
| 107 |
+
best_edges = (best_edge(component) for component in forest.to_sets())
|
| 108 |
+
best_edges = [edge for edge in best_edges if edge is not None]
|
| 109 |
+
# If each entry was ``None``, that means the graph was disconnected,
|
| 110 |
+
# so we are done generating the forest.
|
| 111 |
+
while best_edges:
|
| 112 |
+
# Determine the optimum edge in the edge boundary of each
|
| 113 |
+
# component in the forest.
|
| 114 |
+
#
|
| 115 |
+
# This must be a sequence, not an iterator. In this list, the
|
| 116 |
+
# same edge may appear twice, in different orientations (but
|
| 117 |
+
# that's okay, since a union operation will be called on the
|
| 118 |
+
# endpoints the first time it is seen, but not the second time).
|
| 119 |
+
#
|
| 120 |
+
# Any ``None`` indicates that the edge boundary for that
|
| 121 |
+
# component was empty, so that part of the forest has been
|
| 122 |
+
# completed.
|
| 123 |
+
#
|
| 124 |
+
# TODO This can be parallelized, both in the outer loop over
|
| 125 |
+
# each component in the forest and in the computation of the
|
| 126 |
+
# minimum. (Same goes for the identical lines outside the loop.)
|
| 127 |
+
best_edges = (best_edge(component) for component in forest.to_sets())
|
| 128 |
+
best_edges = [edge for edge in best_edges if edge is not None]
|
| 129 |
+
# Join trees in the forest using the best edges, and yield that
|
| 130 |
+
# edge, since it is part of the spanning tree.
|
| 131 |
+
#
|
| 132 |
+
# TODO This loop can be parallelized, to an extent (the union
|
| 133 |
+
# operation must be atomic).
|
| 134 |
+
for u, v, d in best_edges:
|
| 135 |
+
if forest[u] != forest[v]:
|
| 136 |
+
if data:
|
| 137 |
+
yield u, v, d
|
| 138 |
+
else:
|
| 139 |
+
yield u, v
|
| 140 |
+
forest.union(u, v)
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
@nx._dispatchable(
|
| 144 |
+
edge_attrs={"weight": None, "partition": None}, preserve_edge_attrs="data"
|
| 145 |
+
)
|
| 146 |
+
def kruskal_mst_edges(
|
| 147 |
+
G, minimum, weight="weight", keys=True, data=True, ignore_nan=False, partition=None
|
| 148 |
+
):
|
| 149 |
+
"""
|
| 150 |
+
Iterate over edge of a Kruskal's algorithm min/max spanning tree.
|
| 151 |
+
|
| 152 |
+
Parameters
|
| 153 |
+
----------
|
| 154 |
+
G : NetworkX Graph
|
| 155 |
+
The graph holding the tree of interest.
|
| 156 |
+
|
| 157 |
+
minimum : bool (default: True)
|
| 158 |
+
Find the minimum (True) or maximum (False) spanning tree.
|
| 159 |
+
|
| 160 |
+
weight : string (default: 'weight')
|
| 161 |
+
The name of the edge attribute holding the edge weights.
|
| 162 |
+
|
| 163 |
+
keys : bool (default: True)
|
| 164 |
+
If `G` is a multigraph, `keys` controls whether edge keys ar yielded.
|
| 165 |
+
Otherwise `keys` is ignored.
|
| 166 |
+
|
| 167 |
+
data : bool (default: True)
|
| 168 |
+
Flag for whether to yield edge attribute dicts.
|
| 169 |
+
If True, yield edges `(u, v, d)`, where `d` is the attribute dict.
|
| 170 |
+
If False, yield edges `(u, v)`.
|
| 171 |
+
|
| 172 |
+
ignore_nan : bool (default: False)
|
| 173 |
+
If a NaN is found as an edge weight normally an exception is raised.
|
| 174 |
+
If `ignore_nan is True` then that edge is ignored instead.
|
| 175 |
+
|
| 176 |
+
partition : string (default: None)
|
| 177 |
+
The name of the edge attribute holding the partition data, if it exists.
|
| 178 |
+
Partition data is written to the edges using the `EdgePartition` enum.
|
| 179 |
+
If a partition exists, all included edges and none of the excluded edges
|
| 180 |
+
will appear in the final tree. Open edges may or may not be used.
|
| 181 |
+
|
| 182 |
+
Yields
|
| 183 |
+
------
|
| 184 |
+
edge tuple
|
| 185 |
+
The edges as discovered by Kruskal's method. Each edge can
|
| 186 |
+
take the following forms: `(u, v)`, `(u, v, d)` or `(u, v, k, d)`
|
| 187 |
+
depending on the `key` and `data` parameters
|
| 188 |
+
"""
|
| 189 |
+
subtrees = UnionFind()
|
| 190 |
+
if G.is_multigraph():
|
| 191 |
+
edges = G.edges(keys=True, data=True)
|
| 192 |
+
else:
|
| 193 |
+
edges = G.edges(data=True)
|
| 194 |
+
|
| 195 |
+
"""
|
| 196 |
+
Sort the edges of the graph with respect to the partition data.
|
| 197 |
+
Edges are returned in the following order:
|
| 198 |
+
|
| 199 |
+
* Included edges
|
| 200 |
+
* Open edges from smallest to largest weight
|
| 201 |
+
* Excluded edges
|
| 202 |
+
"""
|
| 203 |
+
included_edges = []
|
| 204 |
+
open_edges = []
|
| 205 |
+
for e in edges:
|
| 206 |
+
d = e[-1]
|
| 207 |
+
wt = d.get(weight, 1)
|
| 208 |
+
if isnan(wt):
|
| 209 |
+
if ignore_nan:
|
| 210 |
+
continue
|
| 211 |
+
raise ValueError(f"NaN found as an edge weight. Edge {e}")
|
| 212 |
+
|
| 213 |
+
edge = (wt,) + e
|
| 214 |
+
if d.get(partition) == EdgePartition.INCLUDED:
|
| 215 |
+
included_edges.append(edge)
|
| 216 |
+
elif d.get(partition) == EdgePartition.EXCLUDED:
|
| 217 |
+
continue
|
| 218 |
+
else:
|
| 219 |
+
open_edges.append(edge)
|
| 220 |
+
|
| 221 |
+
if minimum:
|
| 222 |
+
sorted_open_edges = sorted(open_edges, key=itemgetter(0))
|
| 223 |
+
else:
|
| 224 |
+
sorted_open_edges = sorted(open_edges, key=itemgetter(0), reverse=True)
|
| 225 |
+
|
| 226 |
+
# Condense the lists into one
|
| 227 |
+
included_edges.extend(sorted_open_edges)
|
| 228 |
+
sorted_edges = included_edges
|
| 229 |
+
del open_edges, sorted_open_edges, included_edges
|
| 230 |
+
|
| 231 |
+
# Multigraphs need to handle edge keys in addition to edge data.
|
| 232 |
+
if G.is_multigraph():
|
| 233 |
+
for wt, u, v, k, d in sorted_edges:
|
| 234 |
+
if subtrees[u] != subtrees[v]:
|
| 235 |
+
if keys:
|
| 236 |
+
if data:
|
| 237 |
+
yield u, v, k, d
|
| 238 |
+
else:
|
| 239 |
+
yield u, v, k
|
| 240 |
+
else:
|
| 241 |
+
if data:
|
| 242 |
+
yield u, v, d
|
| 243 |
+
else:
|
| 244 |
+
yield u, v
|
| 245 |
+
subtrees.union(u, v)
|
| 246 |
+
else:
|
| 247 |
+
for wt, u, v, d in sorted_edges:
|
| 248 |
+
if subtrees[u] != subtrees[v]:
|
| 249 |
+
if data:
|
| 250 |
+
yield u, v, d
|
| 251 |
+
else:
|
| 252 |
+
yield u, v
|
| 253 |
+
subtrees.union(u, v)
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
@nx._dispatchable(edge_attrs="weight", preserve_edge_attrs="data")
|
| 257 |
+
def prim_mst_edges(G, minimum, weight="weight", keys=True, data=True, ignore_nan=False):
|
| 258 |
+
"""Iterate over edges of Prim's algorithm min/max spanning tree.
|
| 259 |
+
|
| 260 |
+
Parameters
|
| 261 |
+
----------
|
| 262 |
+
G : NetworkX Graph
|
| 263 |
+
The graph holding the tree of interest.
|
| 264 |
+
|
| 265 |
+
minimum : bool (default: True)
|
| 266 |
+
Find the minimum (True) or maximum (False) spanning tree.
|
| 267 |
+
|
| 268 |
+
weight : string (default: 'weight')
|
| 269 |
+
The name of the edge attribute holding the edge weights.
|
| 270 |
+
|
| 271 |
+
keys : bool (default: True)
|
| 272 |
+
If `G` is a multigraph, `keys` controls whether edge keys ar yielded.
|
| 273 |
+
Otherwise `keys` is ignored.
|
| 274 |
+
|
| 275 |
+
data : bool (default: True)
|
| 276 |
+
Flag for whether to yield edge attribute dicts.
|
| 277 |
+
If True, yield edges `(u, v, d)`, where `d` is the attribute dict.
|
| 278 |
+
If False, yield edges `(u, v)`.
|
| 279 |
+
|
| 280 |
+
ignore_nan : bool (default: False)
|
| 281 |
+
If a NaN is found as an edge weight normally an exception is raised.
|
| 282 |
+
If `ignore_nan is True` then that edge is ignored instead.
|
| 283 |
+
|
| 284 |
+
"""
|
| 285 |
+
is_multigraph = G.is_multigraph()
|
| 286 |
+
push = heappush
|
| 287 |
+
pop = heappop
|
| 288 |
+
|
| 289 |
+
nodes = set(G)
|
| 290 |
+
c = count()
|
| 291 |
+
|
| 292 |
+
sign = 1 if minimum else -1
|
| 293 |
+
|
| 294 |
+
while nodes:
|
| 295 |
+
u = nodes.pop()
|
| 296 |
+
frontier = []
|
| 297 |
+
visited = {u}
|
| 298 |
+
if is_multigraph:
|
| 299 |
+
for v, keydict in G.adj[u].items():
|
| 300 |
+
for k, d in keydict.items():
|
| 301 |
+
wt = d.get(weight, 1) * sign
|
| 302 |
+
if isnan(wt):
|
| 303 |
+
if ignore_nan:
|
| 304 |
+
continue
|
| 305 |
+
msg = f"NaN found as an edge weight. Edge {(u, v, k, d)}"
|
| 306 |
+
raise ValueError(msg)
|
| 307 |
+
push(frontier, (wt, next(c), u, v, k, d))
|
| 308 |
+
else:
|
| 309 |
+
for v, d in G.adj[u].items():
|
| 310 |
+
wt = d.get(weight, 1) * sign
|
| 311 |
+
if isnan(wt):
|
| 312 |
+
if ignore_nan:
|
| 313 |
+
continue
|
| 314 |
+
msg = f"NaN found as an edge weight. Edge {(u, v, d)}"
|
| 315 |
+
raise ValueError(msg)
|
| 316 |
+
push(frontier, (wt, next(c), u, v, d))
|
| 317 |
+
while nodes and frontier:
|
| 318 |
+
if is_multigraph:
|
| 319 |
+
W, _, u, v, k, d = pop(frontier)
|
| 320 |
+
else:
|
| 321 |
+
W, _, u, v, d = pop(frontier)
|
| 322 |
+
if v in visited or v not in nodes:
|
| 323 |
+
continue
|
| 324 |
+
# Multigraphs need to handle edge keys in addition to edge data.
|
| 325 |
+
if is_multigraph and keys:
|
| 326 |
+
if data:
|
| 327 |
+
yield u, v, k, d
|
| 328 |
+
else:
|
| 329 |
+
yield u, v, k
|
| 330 |
+
else:
|
| 331 |
+
if data:
|
| 332 |
+
yield u, v, d
|
| 333 |
+
else:
|
| 334 |
+
yield u, v
|
| 335 |
+
# update frontier
|
| 336 |
+
visited.add(v)
|
| 337 |
+
nodes.discard(v)
|
| 338 |
+
if is_multigraph:
|
| 339 |
+
for w, keydict in G.adj[v].items():
|
| 340 |
+
if w in visited:
|
| 341 |
+
continue
|
| 342 |
+
for k2, d2 in keydict.items():
|
| 343 |
+
new_weight = d2.get(weight, 1) * sign
|
| 344 |
+
if isnan(new_weight):
|
| 345 |
+
if ignore_nan:
|
| 346 |
+
continue
|
| 347 |
+
msg = f"NaN found as an edge weight. Edge {(v, w, k2, d2)}"
|
| 348 |
+
raise ValueError(msg)
|
| 349 |
+
push(frontier, (new_weight, next(c), v, w, k2, d2))
|
| 350 |
+
else:
|
| 351 |
+
for w, d2 in G.adj[v].items():
|
| 352 |
+
if w in visited:
|
| 353 |
+
continue
|
| 354 |
+
new_weight = d2.get(weight, 1) * sign
|
| 355 |
+
if isnan(new_weight):
|
| 356 |
+
if ignore_nan:
|
| 357 |
+
continue
|
| 358 |
+
msg = f"NaN found as an edge weight. Edge {(v, w, d2)}"
|
| 359 |
+
raise ValueError(msg)
|
| 360 |
+
push(frontier, (new_weight, next(c), v, w, d2))
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
ALGORITHMS = {
|
| 364 |
+
"boruvka": boruvka_mst_edges,
|
| 365 |
+
"borůvka": boruvka_mst_edges,
|
| 366 |
+
"kruskal": kruskal_mst_edges,
|
| 367 |
+
"prim": prim_mst_edges,
|
| 368 |
+
}
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
@not_implemented_for("directed")
|
| 372 |
+
@nx._dispatchable(edge_attrs="weight", preserve_edge_attrs="data")
|
| 373 |
+
def minimum_spanning_edges(
|
| 374 |
+
G, algorithm="kruskal", weight="weight", keys=True, data=True, ignore_nan=False
|
| 375 |
+
):
|
| 376 |
+
"""Generate edges in a minimum spanning forest of an undirected
|
| 377 |
+
weighted graph.
|
| 378 |
+
|
| 379 |
+
A minimum spanning tree is a subgraph of the graph (a tree)
|
| 380 |
+
with the minimum sum of edge weights. A spanning forest is a
|
| 381 |
+
union of the spanning trees for each connected component of the graph.
|
| 382 |
+
|
| 383 |
+
Parameters
|
| 384 |
+
----------
|
| 385 |
+
G : undirected Graph
|
| 386 |
+
An undirected graph. If `G` is connected, then the algorithm finds a
|
| 387 |
+
spanning tree. Otherwise, a spanning forest is found.
|
| 388 |
+
|
| 389 |
+
algorithm : string
|
| 390 |
+
The algorithm to use when finding a minimum spanning tree. Valid
|
| 391 |
+
choices are 'kruskal', 'prim', or 'boruvka'. The default is 'kruskal'.
|
| 392 |
+
|
| 393 |
+
weight : string
|
| 394 |
+
Edge data key to use for weight (default 'weight').
|
| 395 |
+
|
| 396 |
+
keys : bool
|
| 397 |
+
Whether to yield edge key in multigraphs in addition to the edge.
|
| 398 |
+
If `G` is not a multigraph, this is ignored.
|
| 399 |
+
|
| 400 |
+
data : bool, optional
|
| 401 |
+
If True yield the edge data along with the edge.
|
| 402 |
+
|
| 403 |
+
ignore_nan : bool (default: False)
|
| 404 |
+
If a NaN is found as an edge weight normally an exception is raised.
|
| 405 |
+
If `ignore_nan is True` then that edge is ignored instead.
|
| 406 |
+
|
| 407 |
+
Returns
|
| 408 |
+
-------
|
| 409 |
+
edges : iterator
|
| 410 |
+
An iterator over edges in a maximum spanning tree of `G`.
|
| 411 |
+
Edges connecting nodes `u` and `v` are represented as tuples:
|
| 412 |
+
`(u, v, k, d)` or `(u, v, k)` or `(u, v, d)` or `(u, v)`
|
| 413 |
+
|
| 414 |
+
If `G` is a multigraph, `keys` indicates whether the edge key `k` will
|
| 415 |
+
be reported in the third position in the edge tuple. `data` indicates
|
| 416 |
+
whether the edge datadict `d` will appear at the end of the edge tuple.
|
| 417 |
+
|
| 418 |
+
If `G` is not a multigraph, the tuples are `(u, v, d)` if `data` is True
|
| 419 |
+
or `(u, v)` if `data` is False.
|
| 420 |
+
|
| 421 |
+
Examples
|
| 422 |
+
--------
|
| 423 |
+
>>> from networkx.algorithms import tree
|
| 424 |
+
|
| 425 |
+
Find minimum spanning edges by Kruskal's algorithm
|
| 426 |
+
|
| 427 |
+
>>> G = nx.cycle_graph(4)
|
| 428 |
+
>>> G.add_edge(0, 3, weight=2)
|
| 429 |
+
>>> mst = tree.minimum_spanning_edges(G, algorithm="kruskal", data=False)
|
| 430 |
+
>>> edgelist = list(mst)
|
| 431 |
+
>>> sorted(sorted(e) for e in edgelist)
|
| 432 |
+
[[0, 1], [1, 2], [2, 3]]
|
| 433 |
+
|
| 434 |
+
Find minimum spanning edges by Prim's algorithm
|
| 435 |
+
|
| 436 |
+
>>> G = nx.cycle_graph(4)
|
| 437 |
+
>>> G.add_edge(0, 3, weight=2)
|
| 438 |
+
>>> mst = tree.minimum_spanning_edges(G, algorithm="prim", data=False)
|
| 439 |
+
>>> edgelist = list(mst)
|
| 440 |
+
>>> sorted(sorted(e) for e in edgelist)
|
| 441 |
+
[[0, 1], [1, 2], [2, 3]]
|
| 442 |
+
|
| 443 |
+
Notes
|
| 444 |
+
-----
|
| 445 |
+
For Borůvka's algorithm, each edge must have a weight attribute, and
|
| 446 |
+
each edge weight must be distinct.
|
| 447 |
+
|
| 448 |
+
For the other algorithms, if the graph edges do not have a weight
|
| 449 |
+
attribute a default weight of 1 will be used.
|
| 450 |
+
|
| 451 |
+
Modified code from David Eppstein, April 2006
|
| 452 |
+
http://www.ics.uci.edu/~eppstein/PADS/
|
| 453 |
+
|
| 454 |
+
"""
|
| 455 |
+
try:
|
| 456 |
+
algo = ALGORITHMS[algorithm]
|
| 457 |
+
except KeyError as err:
|
| 458 |
+
msg = f"{algorithm} is not a valid choice for an algorithm."
|
| 459 |
+
raise ValueError(msg) from err
|
| 460 |
+
|
| 461 |
+
return algo(
|
| 462 |
+
G, minimum=True, weight=weight, keys=keys, data=data, ignore_nan=ignore_nan
|
| 463 |
+
)
|
| 464 |
+
|
| 465 |
+
|
| 466 |
+
@not_implemented_for("directed")
|
| 467 |
+
@nx._dispatchable(edge_attrs="weight", preserve_edge_attrs="data")
|
| 468 |
+
def maximum_spanning_edges(
|
| 469 |
+
G, algorithm="kruskal", weight="weight", keys=True, data=True, ignore_nan=False
|
| 470 |
+
):
|
| 471 |
+
"""Generate edges in a maximum spanning forest of an undirected
|
| 472 |
+
weighted graph.
|
| 473 |
+
|
| 474 |
+
A maximum spanning tree is a subgraph of the graph (a tree)
|
| 475 |
+
with the maximum possible sum of edge weights. A spanning forest is a
|
| 476 |
+
union of the spanning trees for each connected component of the graph.
|
| 477 |
+
|
| 478 |
+
Parameters
|
| 479 |
+
----------
|
| 480 |
+
G : undirected Graph
|
| 481 |
+
An undirected graph. If `G` is connected, then the algorithm finds a
|
| 482 |
+
spanning tree. Otherwise, a spanning forest is found.
|
| 483 |
+
|
| 484 |
+
algorithm : string
|
| 485 |
+
The algorithm to use when finding a maximum spanning tree. Valid
|
| 486 |
+
choices are 'kruskal', 'prim', or 'boruvka'. The default is 'kruskal'.
|
| 487 |
+
|
| 488 |
+
weight : string
|
| 489 |
+
Edge data key to use for weight (default 'weight').
|
| 490 |
+
|
| 491 |
+
keys : bool
|
| 492 |
+
Whether to yield edge key in multigraphs in addition to the edge.
|
| 493 |
+
If `G` is not a multigraph, this is ignored.
|
| 494 |
+
|
| 495 |
+
data : bool, optional
|
| 496 |
+
If True yield the edge data along with the edge.
|
| 497 |
+
|
| 498 |
+
ignore_nan : bool (default: False)
|
| 499 |
+
If a NaN is found as an edge weight normally an exception is raised.
|
| 500 |
+
If `ignore_nan is True` then that edge is ignored instead.
|
| 501 |
+
|
| 502 |
+
Returns
|
| 503 |
+
-------
|
| 504 |
+
edges : iterator
|
| 505 |
+
An iterator over edges in a maximum spanning tree of `G`.
|
| 506 |
+
Edges connecting nodes `u` and `v` are represented as tuples:
|
| 507 |
+
`(u, v, k, d)` or `(u, v, k)` or `(u, v, d)` or `(u, v)`
|
| 508 |
+
|
| 509 |
+
If `G` is a multigraph, `keys` indicates whether the edge key `k` will
|
| 510 |
+
be reported in the third position in the edge tuple. `data` indicates
|
| 511 |
+
whether the edge datadict `d` will appear at the end of the edge tuple.
|
| 512 |
+
|
| 513 |
+
If `G` is not a multigraph, the tuples are `(u, v, d)` if `data` is True
|
| 514 |
+
or `(u, v)` if `data` is False.
|
| 515 |
+
|
| 516 |
+
Examples
|
| 517 |
+
--------
|
| 518 |
+
>>> from networkx.algorithms import tree
|
| 519 |
+
|
| 520 |
+
Find maximum spanning edges by Kruskal's algorithm
|
| 521 |
+
|
| 522 |
+
>>> G = nx.cycle_graph(4)
|
| 523 |
+
>>> G.add_edge(0, 3, weight=2)
|
| 524 |
+
>>> mst = tree.maximum_spanning_edges(G, algorithm="kruskal", data=False)
|
| 525 |
+
>>> edgelist = list(mst)
|
| 526 |
+
>>> sorted(sorted(e) for e in edgelist)
|
| 527 |
+
[[0, 1], [0, 3], [1, 2]]
|
| 528 |
+
|
| 529 |
+
Find maximum spanning edges by Prim's algorithm
|
| 530 |
+
|
| 531 |
+
>>> G = nx.cycle_graph(4)
|
| 532 |
+
>>> G.add_edge(0, 3, weight=2) # assign weight 2 to edge 0-3
|
| 533 |
+
>>> mst = tree.maximum_spanning_edges(G, algorithm="prim", data=False)
|
| 534 |
+
>>> edgelist = list(mst)
|
| 535 |
+
>>> sorted(sorted(e) for e in edgelist)
|
| 536 |
+
[[0, 1], [0, 3], [2, 3]]
|
| 537 |
+
|
| 538 |
+
Notes
|
| 539 |
+
-----
|
| 540 |
+
For Borůvka's algorithm, each edge must have a weight attribute, and
|
| 541 |
+
each edge weight must be distinct.
|
| 542 |
+
|
| 543 |
+
For the other algorithms, if the graph edges do not have a weight
|
| 544 |
+
attribute a default weight of 1 will be used.
|
| 545 |
+
|
| 546 |
+
Modified code from David Eppstein, April 2006
|
| 547 |
+
http://www.ics.uci.edu/~eppstein/PADS/
|
| 548 |
+
"""
|
| 549 |
+
try:
|
| 550 |
+
algo = ALGORITHMS[algorithm]
|
| 551 |
+
except KeyError as err:
|
| 552 |
+
msg = f"{algorithm} is not a valid choice for an algorithm."
|
| 553 |
+
raise ValueError(msg) from err
|
| 554 |
+
|
| 555 |
+
return algo(
|
| 556 |
+
G, minimum=False, weight=weight, keys=keys, data=data, ignore_nan=ignore_nan
|
| 557 |
+
)
|
| 558 |
+
|
| 559 |
+
|
| 560 |
+
@nx._dispatchable(preserve_all_attrs=True, returns_graph=True)
|
| 561 |
+
def minimum_spanning_tree(G, weight="weight", algorithm="kruskal", ignore_nan=False):
|
| 562 |
+
"""Returns a minimum spanning tree or forest on an undirected graph `G`.
|
| 563 |
+
|
| 564 |
+
Parameters
|
| 565 |
+
----------
|
| 566 |
+
G : undirected graph
|
| 567 |
+
An undirected graph. If `G` is connected, then the algorithm finds a
|
| 568 |
+
spanning tree. Otherwise, a spanning forest is found.
|
| 569 |
+
|
| 570 |
+
weight : str
|
| 571 |
+
Data key to use for edge weights.
|
| 572 |
+
|
| 573 |
+
algorithm : string
|
| 574 |
+
The algorithm to use when finding a minimum spanning tree. Valid
|
| 575 |
+
choices are 'kruskal', 'prim', or 'boruvka'. The default is
|
| 576 |
+
'kruskal'.
|
| 577 |
+
|
| 578 |
+
ignore_nan : bool (default: False)
|
| 579 |
+
If a NaN is found as an edge weight normally an exception is raised.
|
| 580 |
+
If `ignore_nan is True` then that edge is ignored instead.
|
| 581 |
+
|
| 582 |
+
Returns
|
| 583 |
+
-------
|
| 584 |
+
G : NetworkX Graph
|
| 585 |
+
A minimum spanning tree or forest.
|
| 586 |
+
|
| 587 |
+
Examples
|
| 588 |
+
--------
|
| 589 |
+
>>> G = nx.cycle_graph(4)
|
| 590 |
+
>>> G.add_edge(0, 3, weight=2)
|
| 591 |
+
>>> T = nx.minimum_spanning_tree(G)
|
| 592 |
+
>>> sorted(T.edges(data=True))
|
| 593 |
+
[(0, 1, {}), (1, 2, {}), (2, 3, {})]
|
| 594 |
+
|
| 595 |
+
|
| 596 |
+
Notes
|
| 597 |
+
-----
|
| 598 |
+
For Borůvka's algorithm, each edge must have a weight attribute, and
|
| 599 |
+
each edge weight must be distinct.
|
| 600 |
+
|
| 601 |
+
For the other algorithms, if the graph edges do not have a weight
|
| 602 |
+
attribute a default weight of 1 will be used.
|
| 603 |
+
|
| 604 |
+
There may be more than one tree with the same minimum or maximum weight.
|
| 605 |
+
See :mod:`networkx.tree.recognition` for more detailed definitions.
|
| 606 |
+
|
| 607 |
+
Isolated nodes with self-loops are in the tree as edgeless isolated nodes.
|
| 608 |
+
|
| 609 |
+
"""
|
| 610 |
+
edges = minimum_spanning_edges(
|
| 611 |
+
G, algorithm, weight, keys=True, data=True, ignore_nan=ignore_nan
|
| 612 |
+
)
|
| 613 |
+
T = G.__class__() # Same graph class as G
|
| 614 |
+
T.graph.update(G.graph)
|
| 615 |
+
T.add_nodes_from(G.nodes.items())
|
| 616 |
+
T.add_edges_from(edges)
|
| 617 |
+
return T
|
| 618 |
+
|
| 619 |
+
|
| 620 |
+
@nx._dispatchable(preserve_all_attrs=True, returns_graph=True)
|
| 621 |
+
def partition_spanning_tree(
|
| 622 |
+
G, minimum=True, weight="weight", partition="partition", ignore_nan=False
|
| 623 |
+
):
|
| 624 |
+
"""
|
| 625 |
+
Find a spanning tree while respecting a partition of edges.
|
| 626 |
+
|
| 627 |
+
Edges can be flagged as either `INCLUDED` which are required to be in the
|
| 628 |
+
returned tree, `EXCLUDED`, which cannot be in the returned tree and `OPEN`.
|
| 629 |
+
|
| 630 |
+
This is used in the SpanningTreeIterator to create new partitions following
|
| 631 |
+
the algorithm of Sörensen and Janssens [1]_.
|
| 632 |
+
|
| 633 |
+
Parameters
|
| 634 |
+
----------
|
| 635 |
+
G : undirected graph
|
| 636 |
+
An undirected graph.
|
| 637 |
+
|
| 638 |
+
minimum : bool (default: True)
|
| 639 |
+
Determines whether the returned tree is the minimum spanning tree of
|
| 640 |
+
the partition of the maximum one.
|
| 641 |
+
|
| 642 |
+
weight : str
|
| 643 |
+
Data key to use for edge weights.
|
| 644 |
+
|
| 645 |
+
partition : str
|
| 646 |
+
The key for the edge attribute containing the partition
|
| 647 |
+
data on the graph. Edges can be included, excluded or open using the
|
| 648 |
+
`EdgePartition` enum.
|
| 649 |
+
|
| 650 |
+
ignore_nan : bool (default: False)
|
| 651 |
+
If a NaN is found as an edge weight normally an exception is raised.
|
| 652 |
+
If `ignore_nan is True` then that edge is ignored instead.
|
| 653 |
+
|
| 654 |
+
|
| 655 |
+
Returns
|
| 656 |
+
-------
|
| 657 |
+
G : NetworkX Graph
|
| 658 |
+
A minimum spanning tree using all of the included edges in the graph and
|
| 659 |
+
none of the excluded edges.
|
| 660 |
+
|
| 661 |
+
References
|
| 662 |
+
----------
|
| 663 |
+
.. [1] G.K. Janssens, K. Sörensen, An algorithm to generate all spanning
|
| 664 |
+
trees in order of increasing cost, Pesquisa Operacional, 2005-08,
|
| 665 |
+
Vol. 25 (2), p. 219-229,
|
| 666 |
+
https://www.scielo.br/j/pope/a/XHswBwRwJyrfL88dmMwYNWp/?lang=en
|
| 667 |
+
"""
|
| 668 |
+
edges = kruskal_mst_edges(
|
| 669 |
+
G,
|
| 670 |
+
minimum,
|
| 671 |
+
weight,
|
| 672 |
+
keys=True,
|
| 673 |
+
data=True,
|
| 674 |
+
ignore_nan=ignore_nan,
|
| 675 |
+
partition=partition,
|
| 676 |
+
)
|
| 677 |
+
T = G.__class__() # Same graph class as G
|
| 678 |
+
T.graph.update(G.graph)
|
| 679 |
+
T.add_nodes_from(G.nodes.items())
|
| 680 |
+
T.add_edges_from(edges)
|
| 681 |
+
return T
|
| 682 |
+
|
| 683 |
+
|
| 684 |
+
@nx._dispatchable(preserve_all_attrs=True, returns_graph=True)
|
| 685 |
+
def maximum_spanning_tree(G, weight="weight", algorithm="kruskal", ignore_nan=False):
|
| 686 |
+
"""Returns a maximum spanning tree or forest on an undirected graph `G`.
|
| 687 |
+
|
| 688 |
+
Parameters
|
| 689 |
+
----------
|
| 690 |
+
G : undirected graph
|
| 691 |
+
An undirected graph. If `G` is connected, then the algorithm finds a
|
| 692 |
+
spanning tree. Otherwise, a spanning forest is found.
|
| 693 |
+
|
| 694 |
+
weight : str
|
| 695 |
+
Data key to use for edge weights.
|
| 696 |
+
|
| 697 |
+
algorithm : string
|
| 698 |
+
The algorithm to use when finding a maximum spanning tree. Valid
|
| 699 |
+
choices are 'kruskal', 'prim', or 'boruvka'. The default is
|
| 700 |
+
'kruskal'.
|
| 701 |
+
|
| 702 |
+
ignore_nan : bool (default: False)
|
| 703 |
+
If a NaN is found as an edge weight normally an exception is raised.
|
| 704 |
+
If `ignore_nan is True` then that edge is ignored instead.
|
| 705 |
+
|
| 706 |
+
|
| 707 |
+
Returns
|
| 708 |
+
-------
|
| 709 |
+
G : NetworkX Graph
|
| 710 |
+
A maximum spanning tree or forest.
|
| 711 |
+
|
| 712 |
+
|
| 713 |
+
Examples
|
| 714 |
+
--------
|
| 715 |
+
>>> G = nx.cycle_graph(4)
|
| 716 |
+
>>> G.add_edge(0, 3, weight=2)
|
| 717 |
+
>>> T = nx.maximum_spanning_tree(G)
|
| 718 |
+
>>> sorted(T.edges(data=True))
|
| 719 |
+
[(0, 1, {}), (0, 3, {'weight': 2}), (1, 2, {})]
|
| 720 |
+
|
| 721 |
+
|
| 722 |
+
Notes
|
| 723 |
+
-----
|
| 724 |
+
For Borůvka's algorithm, each edge must have a weight attribute, and
|
| 725 |
+
each edge weight must be distinct.
|
| 726 |
+
|
| 727 |
+
For the other algorithms, if the graph edges do not have a weight
|
| 728 |
+
attribute a default weight of 1 will be used.
|
| 729 |
+
|
| 730 |
+
There may be more than one tree with the same minimum or maximum weight.
|
| 731 |
+
See :mod:`networkx.tree.recognition` for more detailed definitions.
|
| 732 |
+
|
| 733 |
+
Isolated nodes with self-loops are in the tree as edgeless isolated nodes.
|
| 734 |
+
|
| 735 |
+
"""
|
| 736 |
+
edges = maximum_spanning_edges(
|
| 737 |
+
G, algorithm, weight, keys=True, data=True, ignore_nan=ignore_nan
|
| 738 |
+
)
|
| 739 |
+
edges = list(edges)
|
| 740 |
+
T = G.__class__() # Same graph class as G
|
| 741 |
+
T.graph.update(G.graph)
|
| 742 |
+
T.add_nodes_from(G.nodes.items())
|
| 743 |
+
T.add_edges_from(edges)
|
| 744 |
+
return T
|
| 745 |
+
|
| 746 |
+
|
| 747 |
+
@py_random_state(3)
|
| 748 |
+
@nx._dispatchable(preserve_edge_attrs=True, returns_graph=True)
|
| 749 |
+
def random_spanning_tree(G, weight=None, *, multiplicative=True, seed=None):
|
| 750 |
+
"""
|
| 751 |
+
Sample a random spanning tree using the edges weights of `G`.
|
| 752 |
+
|
| 753 |
+
This function supports two different methods for determining the
|
| 754 |
+
probability of the graph. If ``multiplicative=True``, the probability
|
| 755 |
+
is based on the product of edge weights, and if ``multiplicative=False``
|
| 756 |
+
it is based on the sum of the edge weight. However, since it is
|
| 757 |
+
easier to determine the total weight of all spanning trees for the
|
| 758 |
+
multiplicative version, that is significantly faster and should be used if
|
| 759 |
+
possible. Additionally, setting `weight` to `None` will cause a spanning tree
|
| 760 |
+
to be selected with uniform probability.
|
| 761 |
+
|
| 762 |
+
The function uses algorithm A8 in [1]_ .
|
| 763 |
+
|
| 764 |
+
Parameters
|
| 765 |
+
----------
|
| 766 |
+
G : nx.Graph
|
| 767 |
+
An undirected version of the original graph.
|
| 768 |
+
|
| 769 |
+
weight : string
|
| 770 |
+
The edge key for the edge attribute holding edge weight.
|
| 771 |
+
|
| 772 |
+
multiplicative : bool, default=True
|
| 773 |
+
If `True`, the probability of each tree is the product of its edge weight
|
| 774 |
+
over the sum of the product of all the spanning trees in the graph. If
|
| 775 |
+
`False`, the probability is the sum of its edge weight over the sum of
|
| 776 |
+
the sum of weights for all spanning trees in the graph.
|
| 777 |
+
|
| 778 |
+
seed : integer, random_state, or None (default)
|
| 779 |
+
Indicator of random number generation state.
|
| 780 |
+
See :ref:`Randomness<randomness>`.
|
| 781 |
+
|
| 782 |
+
Returns
|
| 783 |
+
-------
|
| 784 |
+
nx.Graph
|
| 785 |
+
A spanning tree using the distribution defined by the weight of the tree.
|
| 786 |
+
|
| 787 |
+
References
|
| 788 |
+
----------
|
| 789 |
+
.. [1] V. Kulkarni, Generating random combinatorial objects, Journal of
|
| 790 |
+
Algorithms, 11 (1990), pp. 185–207
|
| 791 |
+
"""
|
| 792 |
+
|
| 793 |
+
def find_node(merged_nodes, node):
|
| 794 |
+
"""
|
| 795 |
+
We can think of clusters of contracted nodes as having one
|
| 796 |
+
representative in the graph. Each node which is not in merged_nodes
|
| 797 |
+
is still its own representative. Since a representative can be later
|
| 798 |
+
contracted, we need to recursively search though the dict to find
|
| 799 |
+
the final representative, but once we know it we can use path
|
| 800 |
+
compression to speed up the access of the representative for next time.
|
| 801 |
+
|
| 802 |
+
This cannot be replaced by the standard NetworkX union_find since that
|
| 803 |
+
data structure will merge nodes with less representing nodes into the
|
| 804 |
+
one with more representing nodes but this function requires we merge
|
| 805 |
+
them using the order that contract_edges contracts using.
|
| 806 |
+
|
| 807 |
+
Parameters
|
| 808 |
+
----------
|
| 809 |
+
merged_nodes : dict
|
| 810 |
+
The dict storing the mapping from node to representative
|
| 811 |
+
node
|
| 812 |
+
The node whose representative we seek
|
| 813 |
+
|
| 814 |
+
Returns
|
| 815 |
+
-------
|
| 816 |
+
The representative of the `node`
|
| 817 |
+
"""
|
| 818 |
+
if node not in merged_nodes:
|
| 819 |
+
return node
|
| 820 |
+
else:
|
| 821 |
+
rep = find_node(merged_nodes, merged_nodes[node])
|
| 822 |
+
merged_nodes[node] = rep
|
| 823 |
+
return rep
|
| 824 |
+
|
| 825 |
+
def prepare_graph():
|
| 826 |
+
"""
|
| 827 |
+
For the graph `G`, remove all edges not in the set `V` and then
|
| 828 |
+
contract all edges in the set `U`.
|
| 829 |
+
|
| 830 |
+
Returns
|
| 831 |
+
-------
|
| 832 |
+
A copy of `G` which has had all edges not in `V` removed and all edges
|
| 833 |
+
in `U` contracted.
|
| 834 |
+
"""
|
| 835 |
+
|
| 836 |
+
# The result is a MultiGraph version of G so that parallel edges are
|
| 837 |
+
# allowed during edge contraction
|
| 838 |
+
result = nx.MultiGraph(incoming_graph_data=G)
|
| 839 |
+
|
| 840 |
+
# Remove all edges not in V
|
| 841 |
+
edges_to_remove = set(result.edges()).difference(V)
|
| 842 |
+
result.remove_edges_from(edges_to_remove)
|
| 843 |
+
|
| 844 |
+
# Contract all edges in U
|
| 845 |
+
#
|
| 846 |
+
# Imagine that you have two edges to contract and they share an
|
| 847 |
+
# endpoint like this:
|
| 848 |
+
# [0] ----- [1] ----- [2]
|
| 849 |
+
# If we contract (0, 1) first, the contraction function will always
|
| 850 |
+
# delete the second node it is passed so the resulting graph would be
|
| 851 |
+
# [0] ----- [2]
|
| 852 |
+
# and edge (1, 2) no longer exists but (0, 2) would need to be contracted
|
| 853 |
+
# in its place now. That is why I use the below dict as a merge-find
|
| 854 |
+
# data structure with path compression to track how the nodes are merged.
|
| 855 |
+
merged_nodes = {}
|
| 856 |
+
|
| 857 |
+
for u, v in U:
|
| 858 |
+
u_rep = find_node(merged_nodes, u)
|
| 859 |
+
v_rep = find_node(merged_nodes, v)
|
| 860 |
+
# We cannot contract a node with itself
|
| 861 |
+
if u_rep == v_rep:
|
| 862 |
+
continue
|
| 863 |
+
nx.contracted_nodes(result, u_rep, v_rep, self_loops=False, copy=False)
|
| 864 |
+
merged_nodes[v_rep] = u_rep
|
| 865 |
+
|
| 866 |
+
return merged_nodes, result
|
| 867 |
+
|
| 868 |
+
def spanning_tree_total_weight(G, weight):
|
| 869 |
+
"""
|
| 870 |
+
Find the sum of weights of the spanning trees of `G` using the
|
| 871 |
+
appropriate `method`.
|
| 872 |
+
|
| 873 |
+
This is easy if the chosen method is 'multiplicative', since we can
|
| 874 |
+
use Kirchhoff's Tree Matrix Theorem directly. However, with the
|
| 875 |
+
'additive' method, this process is slightly more complex and less
|
| 876 |
+
computationally efficient as we have to find the number of spanning
|
| 877 |
+
trees which contain each possible edge in the graph.
|
| 878 |
+
|
| 879 |
+
Parameters
|
| 880 |
+
----------
|
| 881 |
+
G : NetworkX Graph
|
| 882 |
+
The graph to find the total weight of all spanning trees on.
|
| 883 |
+
|
| 884 |
+
weight : string
|
| 885 |
+
The key for the weight edge attribute of the graph.
|
| 886 |
+
|
| 887 |
+
Returns
|
| 888 |
+
-------
|
| 889 |
+
float
|
| 890 |
+
The sum of either the multiplicative or additive weight for all
|
| 891 |
+
spanning trees in the graph.
|
| 892 |
+
"""
|
| 893 |
+
if multiplicative:
|
| 894 |
+
return nx.total_spanning_tree_weight(G, weight)
|
| 895 |
+
else:
|
| 896 |
+
# There are two cases for the total spanning tree additive weight.
|
| 897 |
+
# 1. There is one edge in the graph. Then the only spanning tree is
|
| 898 |
+
# that edge itself, which will have a total weight of that edge
|
| 899 |
+
# itself.
|
| 900 |
+
if G.number_of_edges() == 1:
|
| 901 |
+
return G.edges(data=weight).__iter__().__next__()[2]
|
| 902 |
+
# 2. There are no edges or two or more edges in the graph. Then, we find the
|
| 903 |
+
# total weight of the spanning trees using the formula in the
|
| 904 |
+
# reference paper: take the weight of each edge and multiply it by
|
| 905 |
+
# the number of spanning trees which include that edge. This
|
| 906 |
+
# can be accomplished by contracting the edge and finding the
|
| 907 |
+
# multiplicative total spanning tree weight if the weight of each edge
|
| 908 |
+
# is assumed to be 1, which is conveniently built into networkx already,
|
| 909 |
+
# by calling total_spanning_tree_weight with weight=None.
|
| 910 |
+
# Note that with no edges the returned value is just zero.
|
| 911 |
+
else:
|
| 912 |
+
total = 0
|
| 913 |
+
for u, v, w in G.edges(data=weight):
|
| 914 |
+
total += w * nx.total_spanning_tree_weight(
|
| 915 |
+
nx.contracted_edge(G, edge=(u, v), self_loops=False), None
|
| 916 |
+
)
|
| 917 |
+
return total
|
| 918 |
+
|
| 919 |
+
if G.number_of_nodes() < 2:
|
| 920 |
+
# no edges in the spanning tree
|
| 921 |
+
return nx.empty_graph(G.nodes)
|
| 922 |
+
|
| 923 |
+
U = set()
|
| 924 |
+
st_cached_value = 0
|
| 925 |
+
V = set(G.edges())
|
| 926 |
+
shuffled_edges = list(G.edges())
|
| 927 |
+
seed.shuffle(shuffled_edges)
|
| 928 |
+
|
| 929 |
+
for u, v in shuffled_edges:
|
| 930 |
+
e_weight = G[u][v][weight] if weight is not None else 1
|
| 931 |
+
node_map, prepared_G = prepare_graph()
|
| 932 |
+
G_total_tree_weight = spanning_tree_total_weight(prepared_G, weight)
|
| 933 |
+
# Add the edge to U so that we can compute the total tree weight
|
| 934 |
+
# assuming we include that edge
|
| 935 |
+
# Now, if (u, v) cannot exist in G because it is fully contracted out
|
| 936 |
+
# of existence, then it by definition cannot influence G_e's Kirchhoff
|
| 937 |
+
# value. But, we also cannot pick it.
|
| 938 |
+
rep_edge = (find_node(node_map, u), find_node(node_map, v))
|
| 939 |
+
# Check to see if the 'representative edge' for the current edge is
|
| 940 |
+
# in prepared_G. If so, then we can pick it.
|
| 941 |
+
if rep_edge in prepared_G.edges:
|
| 942 |
+
prepared_G_e = nx.contracted_edge(
|
| 943 |
+
prepared_G, edge=rep_edge, self_loops=False
|
| 944 |
+
)
|
| 945 |
+
G_e_total_tree_weight = spanning_tree_total_weight(prepared_G_e, weight)
|
| 946 |
+
if multiplicative:
|
| 947 |
+
threshold = e_weight * G_e_total_tree_weight / G_total_tree_weight
|
| 948 |
+
else:
|
| 949 |
+
numerator = (
|
| 950 |
+
st_cached_value + e_weight
|
| 951 |
+
) * nx.total_spanning_tree_weight(prepared_G_e) + G_e_total_tree_weight
|
| 952 |
+
denominator = (
|
| 953 |
+
st_cached_value * nx.total_spanning_tree_weight(prepared_G)
|
| 954 |
+
+ G_total_tree_weight
|
| 955 |
+
)
|
| 956 |
+
threshold = numerator / denominator
|
| 957 |
+
else:
|
| 958 |
+
threshold = 0.0
|
| 959 |
+
z = seed.uniform(0.0, 1.0)
|
| 960 |
+
if z > threshold:
|
| 961 |
+
# Remove the edge from V since we did not pick it.
|
| 962 |
+
V.remove((u, v))
|
| 963 |
+
else:
|
| 964 |
+
# Add the edge to U since we picked it.
|
| 965 |
+
st_cached_value += e_weight
|
| 966 |
+
U.add((u, v))
|
| 967 |
+
# If we decide to keep an edge, it may complete the spanning tree.
|
| 968 |
+
if len(U) == G.number_of_nodes() - 1:
|
| 969 |
+
spanning_tree = nx.Graph()
|
| 970 |
+
spanning_tree.add_edges_from(U)
|
| 971 |
+
return spanning_tree
|
| 972 |
+
raise Exception(f"Something went wrong! Only {len(U)} edges in the spanning tree!")
|
| 973 |
+
|
| 974 |
+
|
| 975 |
+
class SpanningTreeIterator:
|
| 976 |
+
"""
|
| 977 |
+
Iterate over all spanning trees of a graph in either increasing or
|
| 978 |
+
decreasing cost.
|
| 979 |
+
|
| 980 |
+
Notes
|
| 981 |
+
-----
|
| 982 |
+
This iterator uses the partition scheme from [1]_ (included edges,
|
| 983 |
+
excluded edges and open edges) as well as a modified Kruskal's Algorithm
|
| 984 |
+
to generate minimum spanning trees which respect the partition of edges.
|
| 985 |
+
For spanning trees with the same weight, ties are broken arbitrarily.
|
| 986 |
+
|
| 987 |
+
References
|
| 988 |
+
----------
|
| 989 |
+
.. [1] G.K. Janssens, K. Sörensen, An algorithm to generate all spanning
|
| 990 |
+
trees in order of increasing cost, Pesquisa Operacional, 2005-08,
|
| 991 |
+
Vol. 25 (2), p. 219-229,
|
| 992 |
+
https://www.scielo.br/j/pope/a/XHswBwRwJyrfL88dmMwYNWp/?lang=en
|
| 993 |
+
"""
|
| 994 |
+
|
| 995 |
+
@dataclass(order=True)
|
| 996 |
+
class Partition:
|
| 997 |
+
"""
|
| 998 |
+
This dataclass represents a partition and stores a dict with the edge
|
| 999 |
+
data and the weight of the minimum spanning tree of the partition dict.
|
| 1000 |
+
"""
|
| 1001 |
+
|
| 1002 |
+
mst_weight: float
|
| 1003 |
+
partition_dict: dict = field(compare=False)
|
| 1004 |
+
|
| 1005 |
+
def __copy__(self):
|
| 1006 |
+
return SpanningTreeIterator.Partition(
|
| 1007 |
+
self.mst_weight, self.partition_dict.copy()
|
| 1008 |
+
)
|
| 1009 |
+
|
| 1010 |
+
def __init__(self, G, weight="weight", minimum=True, ignore_nan=False):
|
| 1011 |
+
"""
|
| 1012 |
+
Initialize the iterator
|
| 1013 |
+
|
| 1014 |
+
Parameters
|
| 1015 |
+
----------
|
| 1016 |
+
G : nx.Graph
|
| 1017 |
+
The directed graph which we need to iterate trees over
|
| 1018 |
+
|
| 1019 |
+
weight : String, default = "weight"
|
| 1020 |
+
The edge attribute used to store the weight of the edge
|
| 1021 |
+
|
| 1022 |
+
minimum : bool, default = True
|
| 1023 |
+
Return the trees in increasing order while true and decreasing order
|
| 1024 |
+
while false.
|
| 1025 |
+
|
| 1026 |
+
ignore_nan : bool, default = False
|
| 1027 |
+
If a NaN is found as an edge weight normally an exception is raised.
|
| 1028 |
+
If `ignore_nan is True` then that edge is ignored instead.
|
| 1029 |
+
"""
|
| 1030 |
+
self.G = G.copy()
|
| 1031 |
+
self.G.__networkx_cache__ = None # Disable caching
|
| 1032 |
+
self.weight = weight
|
| 1033 |
+
self.minimum = minimum
|
| 1034 |
+
self.ignore_nan = ignore_nan
|
| 1035 |
+
# Randomly create a key for an edge attribute to hold the partition data
|
| 1036 |
+
self.partition_key = (
|
| 1037 |
+
"SpanningTreeIterators super secret partition attribute name"
|
| 1038 |
+
)
|
| 1039 |
+
|
| 1040 |
+
def __iter__(self):
|
| 1041 |
+
"""
|
| 1042 |
+
Returns
|
| 1043 |
+
-------
|
| 1044 |
+
SpanningTreeIterator
|
| 1045 |
+
The iterator object for this graph
|
| 1046 |
+
"""
|
| 1047 |
+
self.partition_queue = PriorityQueue()
|
| 1048 |
+
self._clear_partition(self.G)
|
| 1049 |
+
mst_weight = partition_spanning_tree(
|
| 1050 |
+
self.G, self.minimum, self.weight, self.partition_key, self.ignore_nan
|
| 1051 |
+
).size(weight=self.weight)
|
| 1052 |
+
|
| 1053 |
+
self.partition_queue.put(
|
| 1054 |
+
self.Partition(mst_weight if self.minimum else -mst_weight, {})
|
| 1055 |
+
)
|
| 1056 |
+
|
| 1057 |
+
return self
|
| 1058 |
+
|
| 1059 |
+
def __next__(self):
|
| 1060 |
+
"""
|
| 1061 |
+
Returns
|
| 1062 |
+
-------
|
| 1063 |
+
(multi)Graph
|
| 1064 |
+
The spanning tree of next greatest weight, which ties broken
|
| 1065 |
+
arbitrarily.
|
| 1066 |
+
"""
|
| 1067 |
+
if self.partition_queue.empty():
|
| 1068 |
+
del self.G, self.partition_queue
|
| 1069 |
+
raise StopIteration
|
| 1070 |
+
|
| 1071 |
+
partition = self.partition_queue.get()
|
| 1072 |
+
self._write_partition(partition)
|
| 1073 |
+
next_tree = partition_spanning_tree(
|
| 1074 |
+
self.G, self.minimum, self.weight, self.partition_key, self.ignore_nan
|
| 1075 |
+
)
|
| 1076 |
+
self._partition(partition, next_tree)
|
| 1077 |
+
|
| 1078 |
+
self._clear_partition(next_tree)
|
| 1079 |
+
return next_tree
|
| 1080 |
+
|
| 1081 |
+
def _partition(self, partition, partition_tree):
|
| 1082 |
+
"""
|
| 1083 |
+
Create new partitions based of the minimum spanning tree of the
|
| 1084 |
+
current minimum partition.
|
| 1085 |
+
|
| 1086 |
+
Parameters
|
| 1087 |
+
----------
|
| 1088 |
+
partition : Partition
|
| 1089 |
+
The Partition instance used to generate the current minimum spanning
|
| 1090 |
+
tree.
|
| 1091 |
+
partition_tree : nx.Graph
|
| 1092 |
+
The minimum spanning tree of the input partition.
|
| 1093 |
+
"""
|
| 1094 |
+
# create two new partitions with the data from the input partition dict
|
| 1095 |
+
p1 = self.Partition(0, partition.partition_dict.copy())
|
| 1096 |
+
p2 = self.Partition(0, partition.partition_dict.copy())
|
| 1097 |
+
for e in partition_tree.edges:
|
| 1098 |
+
# determine if the edge was open or included
|
| 1099 |
+
if e not in partition.partition_dict:
|
| 1100 |
+
# This is an open edge
|
| 1101 |
+
p1.partition_dict[e] = EdgePartition.EXCLUDED
|
| 1102 |
+
p2.partition_dict[e] = EdgePartition.INCLUDED
|
| 1103 |
+
|
| 1104 |
+
self._write_partition(p1)
|
| 1105 |
+
p1_mst = partition_spanning_tree(
|
| 1106 |
+
self.G,
|
| 1107 |
+
self.minimum,
|
| 1108 |
+
self.weight,
|
| 1109 |
+
self.partition_key,
|
| 1110 |
+
self.ignore_nan,
|
| 1111 |
+
)
|
| 1112 |
+
p1_mst_weight = p1_mst.size(weight=self.weight)
|
| 1113 |
+
if nx.is_connected(p1_mst):
|
| 1114 |
+
p1.mst_weight = p1_mst_weight if self.minimum else -p1_mst_weight
|
| 1115 |
+
self.partition_queue.put(p1.__copy__())
|
| 1116 |
+
p1.partition_dict = p2.partition_dict.copy()
|
| 1117 |
+
|
| 1118 |
+
def _write_partition(self, partition):
|
| 1119 |
+
"""
|
| 1120 |
+
Writes the desired partition into the graph to calculate the minimum
|
| 1121 |
+
spanning tree.
|
| 1122 |
+
|
| 1123 |
+
Parameters
|
| 1124 |
+
----------
|
| 1125 |
+
partition : Partition
|
| 1126 |
+
A Partition dataclass describing a partition on the edges of the
|
| 1127 |
+
graph.
|
| 1128 |
+
"""
|
| 1129 |
+
|
| 1130 |
+
partition_dict = partition.partition_dict
|
| 1131 |
+
partition_key = self.partition_key
|
| 1132 |
+
G = self.G
|
| 1133 |
+
|
| 1134 |
+
edges = (
|
| 1135 |
+
G.edges(keys=True, data=True) if G.is_multigraph() else G.edges(data=True)
|
| 1136 |
+
)
|
| 1137 |
+
for *e, d in edges:
|
| 1138 |
+
d[partition_key] = partition_dict.get(tuple(e), EdgePartition.OPEN)
|
| 1139 |
+
|
| 1140 |
+
def _clear_partition(self, G):
|
| 1141 |
+
"""
|
| 1142 |
+
Removes partition data from the graph
|
| 1143 |
+
"""
|
| 1144 |
+
partition_key = self.partition_key
|
| 1145 |
+
edges = (
|
| 1146 |
+
G.edges(keys=True, data=True) if G.is_multigraph() else G.edges(data=True)
|
| 1147 |
+
)
|
| 1148 |
+
for *e, d in edges:
|
| 1149 |
+
if partition_key in d:
|
| 1150 |
+
del d[partition_key]
|
| 1151 |
+
|
| 1152 |
+
|
| 1153 |
+
@nx._dispatchable(edge_attrs="weight")
|
| 1154 |
+
def number_of_spanning_trees(G, *, root=None, weight=None):
|
| 1155 |
+
"""Returns the number of spanning trees in `G`.
|
| 1156 |
+
|
| 1157 |
+
A spanning tree for an undirected graph is a tree that connects
|
| 1158 |
+
all nodes in the graph. For a directed graph, the analog of a
|
| 1159 |
+
spanning tree is called a (spanning) arborescence. The arborescence
|
| 1160 |
+
includes a unique directed path from the `root` node to each other node.
|
| 1161 |
+
The graph must be weakly connected, and the root must be a node
|
| 1162 |
+
that includes all nodes as successors [3]_. Note that to avoid
|
| 1163 |
+
discussing sink-roots and reverse-arborescences, we have reversed
|
| 1164 |
+
the edge orientation from [3]_ and use the in-degree laplacian.
|
| 1165 |
+
|
| 1166 |
+
This function (when `weight` is `None`) returns the number of
|
| 1167 |
+
spanning trees for an undirected graph and the number of
|
| 1168 |
+
arborescences from a single root node for a directed graph.
|
| 1169 |
+
When `weight` is the name of an edge attribute which holds the
|
| 1170 |
+
weight value of each edge, the function returns the sum over
|
| 1171 |
+
all trees of the multiplicative weight of each tree. That is,
|
| 1172 |
+
the weight of the tree is the product of its edge weights.
|
| 1173 |
+
|
| 1174 |
+
Kirchoff's Tree Matrix Theorem states that any cofactor of the
|
| 1175 |
+
Laplacian matrix of a graph is the number of spanning trees in the
|
| 1176 |
+
graph. (Here we use cofactors for a diagonal entry so that the
|
| 1177 |
+
cofactor becomes the determinant of the matrix with one row
|
| 1178 |
+
and its matching column removed.) For a weighted Laplacian matrix,
|
| 1179 |
+
the cofactor is the sum across all spanning trees of the
|
| 1180 |
+
multiplicative weight of each tree. That is, the weight of each
|
| 1181 |
+
tree is the product of its edge weights. The theorem is also
|
| 1182 |
+
known as Kirchhoff's theorem [1]_ and the Matrix-Tree theorem [2]_.
|
| 1183 |
+
|
| 1184 |
+
For directed graphs, a similar theorem (Tutte's Theorem) holds with
|
| 1185 |
+
the cofactor chosen to be the one with row and column removed that
|
| 1186 |
+
correspond to the root. The cofactor is the number of arborescences
|
| 1187 |
+
with the specified node as root. And the weighted version gives the
|
| 1188 |
+
sum of the arborescence weights with root `root`. The arborescence
|
| 1189 |
+
weight is the product of its edge weights.
|
| 1190 |
+
|
| 1191 |
+
Parameters
|
| 1192 |
+
----------
|
| 1193 |
+
G : NetworkX graph
|
| 1194 |
+
|
| 1195 |
+
root : node
|
| 1196 |
+
A node in the directed graph `G` that has all nodes as descendants.
|
| 1197 |
+
(This is ignored for undirected graphs.)
|
| 1198 |
+
|
| 1199 |
+
weight : string or None, optional (default=None)
|
| 1200 |
+
The name of the edge attribute holding the edge weight.
|
| 1201 |
+
If `None`, then each edge is assumed to have a weight of 1.
|
| 1202 |
+
|
| 1203 |
+
Returns
|
| 1204 |
+
-------
|
| 1205 |
+
Number
|
| 1206 |
+
Undirected graphs:
|
| 1207 |
+
The number of spanning trees of the graph `G`.
|
| 1208 |
+
Or the sum of all spanning tree weights of the graph `G`
|
| 1209 |
+
where the weight of a tree is the product of its edge weights.
|
| 1210 |
+
Directed graphs:
|
| 1211 |
+
The number of arborescences of `G` rooted at node `root`.
|
| 1212 |
+
Or the sum of all arborescence weights of the graph `G` with
|
| 1213 |
+
specified root where the weight of an arborescence is the product
|
| 1214 |
+
of its edge weights.
|
| 1215 |
+
|
| 1216 |
+
Raises
|
| 1217 |
+
------
|
| 1218 |
+
NetworkXPointlessConcept
|
| 1219 |
+
If `G` does not contain any nodes.
|
| 1220 |
+
|
| 1221 |
+
NetworkXError
|
| 1222 |
+
If the graph `G` is directed and the root node
|
| 1223 |
+
is not specified or is not in G.
|
| 1224 |
+
|
| 1225 |
+
Examples
|
| 1226 |
+
--------
|
| 1227 |
+
>>> G = nx.complete_graph(5)
|
| 1228 |
+
>>> round(nx.number_of_spanning_trees(G))
|
| 1229 |
+
125
|
| 1230 |
+
|
| 1231 |
+
>>> G = nx.Graph()
|
| 1232 |
+
>>> G.add_edge(1, 2, weight=2)
|
| 1233 |
+
>>> G.add_edge(1, 3, weight=1)
|
| 1234 |
+
>>> G.add_edge(2, 3, weight=1)
|
| 1235 |
+
>>> round(nx.number_of_spanning_trees(G, weight="weight"))
|
| 1236 |
+
5
|
| 1237 |
+
|
| 1238 |
+
Notes
|
| 1239 |
+
-----
|
| 1240 |
+
Self-loops are excluded. Multi-edges are contracted in one edge
|
| 1241 |
+
equal to the sum of the weights.
|
| 1242 |
+
|
| 1243 |
+
References
|
| 1244 |
+
----------
|
| 1245 |
+
.. [1] Wikipedia
|
| 1246 |
+
"Kirchhoff's theorem."
|
| 1247 |
+
https://en.wikipedia.org/wiki/Kirchhoff%27s_theorem
|
| 1248 |
+
.. [2] Kirchhoff, G. R.
|
| 1249 |
+
Über die Auflösung der Gleichungen, auf welche man
|
| 1250 |
+
bei der Untersuchung der linearen Vertheilung
|
| 1251 |
+
Galvanischer Ströme geführt wird
|
| 1252 |
+
Annalen der Physik und Chemie, vol. 72, pp. 497-508, 1847.
|
| 1253 |
+
.. [3] Margoliash, J.
|
| 1254 |
+
"Matrix-Tree Theorem for Directed Graphs"
|
| 1255 |
+
https://www.math.uchicago.edu/~may/VIGRE/VIGRE2010/REUPapers/Margoliash.pdf
|
| 1256 |
+
"""
|
| 1257 |
+
import numpy as np
|
| 1258 |
+
|
| 1259 |
+
if len(G) == 0:
|
| 1260 |
+
raise nx.NetworkXPointlessConcept("Graph G must contain at least one node.")
|
| 1261 |
+
|
| 1262 |
+
# undirected G
|
| 1263 |
+
if not nx.is_directed(G):
|
| 1264 |
+
if not nx.is_connected(G):
|
| 1265 |
+
return 0
|
| 1266 |
+
G_laplacian = nx.laplacian_matrix(G, weight=weight).toarray()
|
| 1267 |
+
return float(np.linalg.det(G_laplacian[1:, 1:]))
|
| 1268 |
+
|
| 1269 |
+
# directed G
|
| 1270 |
+
if root is None:
|
| 1271 |
+
raise nx.NetworkXError("Input `root` must be provided when G is directed")
|
| 1272 |
+
if root not in G:
|
| 1273 |
+
raise nx.NetworkXError("The node root is not in the graph G.")
|
| 1274 |
+
if not nx.is_weakly_connected(G):
|
| 1275 |
+
return 0
|
| 1276 |
+
|
| 1277 |
+
# Compute directed Laplacian matrix
|
| 1278 |
+
nodelist = [root] + [n for n in G if n != root]
|
| 1279 |
+
A = nx.adjacency_matrix(G, nodelist=nodelist, weight=weight)
|
| 1280 |
+
D = np.diag(A.sum(axis=0))
|
| 1281 |
+
G_laplacian = D - A
|
| 1282 |
+
|
| 1283 |
+
# Compute number of spanning trees
|
| 1284 |
+
return float(np.linalg.det(G_laplacian[1:, 1:]))
|
mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tree/operations.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Operations on trees."""
|
| 2 |
+
|
| 3 |
+
from functools import partial
|
| 4 |
+
from itertools import accumulate, chain
|
| 5 |
+
|
| 6 |
+
import networkx as nx
|
| 7 |
+
|
| 8 |
+
__all__ = ["join_trees"]
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
# Argument types don't match dispatching, but allow manual selection of backend
|
| 12 |
+
@nx._dispatchable(graphs=None, returns_graph=True)
|
| 13 |
+
def join_trees(rooted_trees, *, label_attribute=None, first_label=0):
|
| 14 |
+
"""Returns a new rooted tree made by joining `rooted_trees`
|
| 15 |
+
|
| 16 |
+
Constructs a new tree by joining each tree in `rooted_trees`.
|
| 17 |
+
A new root node is added and connected to each of the roots
|
| 18 |
+
of the input trees. While copying the nodes from the trees,
|
| 19 |
+
relabeling to integers occurs. If the `label_attribute` is provided,
|
| 20 |
+
the old node labels will be stored in the new tree under this attribute.
|
| 21 |
+
|
| 22 |
+
Parameters
|
| 23 |
+
----------
|
| 24 |
+
rooted_trees : list
|
| 25 |
+
A list of pairs in which each left element is a NetworkX graph
|
| 26 |
+
object representing a tree and each right element is the root
|
| 27 |
+
node of that tree. The nodes of these trees will be relabeled to
|
| 28 |
+
integers.
|
| 29 |
+
|
| 30 |
+
label_attribute : str
|
| 31 |
+
If provided, the old node labels will be stored in the new tree
|
| 32 |
+
under this node attribute. If not provided, the original labels
|
| 33 |
+
of the nodes in the input trees are not stored.
|
| 34 |
+
|
| 35 |
+
first_label : int, optional (default=0)
|
| 36 |
+
Specifies the label for the new root node. If provided, the root node of the joined tree
|
| 37 |
+
will have this label. If not provided, the root node will default to a label of 0.
|
| 38 |
+
|
| 39 |
+
Returns
|
| 40 |
+
-------
|
| 41 |
+
NetworkX graph
|
| 42 |
+
The rooted tree resulting from joining the provided `rooted_trees`. The new tree has a root node
|
| 43 |
+
labeled as specified by `first_label` (defaulting to 0 if not provided). Subtrees from the input
|
| 44 |
+
`rooted_trees` are attached to this new root node. Each non-root node, if the `label_attribute`
|
| 45 |
+
is provided, has an attribute that indicates the original label of the node in the input tree.
|
| 46 |
+
|
| 47 |
+
Notes
|
| 48 |
+
-----
|
| 49 |
+
Trees are stored in NetworkX as NetworkX Graphs. There is no specific
|
| 50 |
+
enforcement of the fact that these are trees. Testing for each tree
|
| 51 |
+
can be done using :func:`networkx.is_tree`.
|
| 52 |
+
|
| 53 |
+
Graph, edge, and node attributes are propagated from the given
|
| 54 |
+
rooted trees to the created tree. If there are any overlapping graph
|
| 55 |
+
attributes, those from later trees will overwrite those from earlier
|
| 56 |
+
trees in the tuple of positional arguments.
|
| 57 |
+
|
| 58 |
+
Examples
|
| 59 |
+
--------
|
| 60 |
+
Join two full balanced binary trees of height *h* to get a full
|
| 61 |
+
balanced binary tree of depth *h* + 1::
|
| 62 |
+
|
| 63 |
+
>>> h = 4
|
| 64 |
+
>>> left = nx.balanced_tree(2, h)
|
| 65 |
+
>>> right = nx.balanced_tree(2, h)
|
| 66 |
+
>>> joined_tree = nx.join_trees([(left, 0), (right, 0)])
|
| 67 |
+
>>> nx.is_isomorphic(joined_tree, nx.balanced_tree(2, h + 1))
|
| 68 |
+
True
|
| 69 |
+
|
| 70 |
+
"""
|
| 71 |
+
if not rooted_trees:
|
| 72 |
+
return nx.empty_graph(1)
|
| 73 |
+
|
| 74 |
+
# Unzip the zipped list of (tree, root) pairs.
|
| 75 |
+
trees, roots = zip(*rooted_trees)
|
| 76 |
+
|
| 77 |
+
# The join of the trees has the same type as the type of the first tree.
|
| 78 |
+
R = type(trees[0])()
|
| 79 |
+
|
| 80 |
+
lengths = (len(tree) for tree in trees[:-1])
|
| 81 |
+
first_labels = list(accumulate(lengths, initial=first_label + 1))
|
| 82 |
+
|
| 83 |
+
new_roots = []
|
| 84 |
+
for tree, root, first_node in zip(trees, roots, first_labels):
|
| 85 |
+
new_root = first_node + list(tree.nodes()).index(root)
|
| 86 |
+
new_roots.append(new_root)
|
| 87 |
+
|
| 88 |
+
# Relabel the nodes so that their union is the integers starting at first_label.
|
| 89 |
+
relabel = partial(
|
| 90 |
+
nx.convert_node_labels_to_integers, label_attribute=label_attribute
|
| 91 |
+
)
|
| 92 |
+
new_trees = [
|
| 93 |
+
relabel(tree, first_label=first_label)
|
| 94 |
+
for tree, first_label in zip(trees, first_labels)
|
| 95 |
+
]
|
| 96 |
+
|
| 97 |
+
# Add all sets of nodes and edges, attributes
|
| 98 |
+
for tree in new_trees:
|
| 99 |
+
R.update(tree)
|
| 100 |
+
|
| 101 |
+
# Finally, join the subtrees at the root. We know first_label is unused by the way we relabeled the subtrees.
|
| 102 |
+
R.add_node(first_label)
|
| 103 |
+
R.add_edges_from((first_label, root) for root in new_roots)
|
| 104 |
+
|
| 105 |
+
return R
|
mplug_owl2/lib/python3.10/site-packages/networkx/algorithms/tree/recognition.py
ADDED
|
@@ -0,0 +1,273 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Recognition Tests
|
| 3 |
+
=================
|
| 4 |
+
|
| 5 |
+
A *forest* is an acyclic, undirected graph, and a *tree* is a connected forest.
|
| 6 |
+
Depending on the subfield, there are various conventions for generalizing these
|
| 7 |
+
definitions to directed graphs.
|
| 8 |
+
|
| 9 |
+
In one convention, directed variants of forest and tree are defined in an
|
| 10 |
+
identical manner, except that the direction of the edges is ignored. In effect,
|
| 11 |
+
each directed edge is treated as a single undirected edge. Then, additional
|
| 12 |
+
restrictions are imposed to define *branchings* and *arborescences*.
|
| 13 |
+
|
| 14 |
+
In another convention, directed variants of forest and tree correspond to
|
| 15 |
+
the previous convention's branchings and arborescences, respectively. Then two
|
| 16 |
+
new terms, *polyforest* and *polytree*, are defined to correspond to the other
|
| 17 |
+
convention's forest and tree.
|
| 18 |
+
|
| 19 |
+
Summarizing::
|
| 20 |
+
|
| 21 |
+
+-----------------------------+
|
| 22 |
+
| Convention A | Convention B |
|
| 23 |
+
+=============================+
|
| 24 |
+
| forest | polyforest |
|
| 25 |
+
| tree | polytree |
|
| 26 |
+
| branching | forest |
|
| 27 |
+
| arborescence | tree |
|
| 28 |
+
+-----------------------------+
|
| 29 |
+
|
| 30 |
+
Each convention has its reasons. The first convention emphasizes definitional
|
| 31 |
+
similarity in that directed forests and trees are only concerned with
|
| 32 |
+
acyclicity and do not have an in-degree constraint, just as their undirected
|
| 33 |
+
counterparts do not. The second convention emphasizes functional similarity
|
| 34 |
+
in the sense that the directed analog of a spanning tree is a spanning
|
| 35 |
+
arborescence. That is, take any spanning tree and choose one node as the root.
|
| 36 |
+
Then every edge is assigned a direction such there is a directed path from the
|
| 37 |
+
root to every other node. The result is a spanning arborescence.
|
| 38 |
+
|
| 39 |
+
NetworkX follows convention "A". Explicitly, these are:
|
| 40 |
+
|
| 41 |
+
undirected forest
|
| 42 |
+
An undirected graph with no undirected cycles.
|
| 43 |
+
|
| 44 |
+
undirected tree
|
| 45 |
+
A connected, undirected forest.
|
| 46 |
+
|
| 47 |
+
directed forest
|
| 48 |
+
A directed graph with no undirected cycles. Equivalently, the underlying
|
| 49 |
+
graph structure (which ignores edge orientations) is an undirected forest.
|
| 50 |
+
In convention B, this is known as a polyforest.
|
| 51 |
+
|
| 52 |
+
directed tree
|
| 53 |
+
A weakly connected, directed forest. Equivalently, the underlying graph
|
| 54 |
+
structure (which ignores edge orientations) is an undirected tree. In
|
| 55 |
+
convention B, this is known as a polytree.
|
| 56 |
+
|
| 57 |
+
branching
|
| 58 |
+
A directed forest with each node having, at most, one parent. So the maximum
|
| 59 |
+
in-degree is equal to 1. In convention B, this is known as a forest.
|
| 60 |
+
|
| 61 |
+
arborescence
|
| 62 |
+
A directed tree with each node having, at most, one parent. So the maximum
|
| 63 |
+
in-degree is equal to 1. In convention B, this is known as a tree.
|
| 64 |
+
|
| 65 |
+
For trees and arborescences, the adjective "spanning" may be added to designate
|
| 66 |
+
that the graph, when considered as a forest/branching, consists of a single
|
| 67 |
+
tree/arborescence that includes all nodes in the graph. It is true, by
|
| 68 |
+
definition, that every tree/arborescence is spanning with respect to the nodes
|
| 69 |
+
that define the tree/arborescence and so, it might seem redundant to introduce
|
| 70 |
+
the notion of "spanning". However, the nodes may represent a subset of
|
| 71 |
+
nodes from a larger graph, and it is in this context that the term "spanning"
|
| 72 |
+
becomes a useful notion.
|
| 73 |
+
|
| 74 |
+
"""
|
| 75 |
+
|
| 76 |
+
import networkx as nx
|
| 77 |
+
|
| 78 |
+
__all__ = ["is_arborescence", "is_branching", "is_forest", "is_tree"]
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
@nx.utils.not_implemented_for("undirected")
|
| 82 |
+
@nx._dispatchable
|
| 83 |
+
def is_arborescence(G):
|
| 84 |
+
"""
|
| 85 |
+
Returns True if `G` is an arborescence.
|
| 86 |
+
|
| 87 |
+
An arborescence is a directed tree with maximum in-degree equal to 1.
|
| 88 |
+
|
| 89 |
+
Parameters
|
| 90 |
+
----------
|
| 91 |
+
G : graph
|
| 92 |
+
The graph to test.
|
| 93 |
+
|
| 94 |
+
Returns
|
| 95 |
+
-------
|
| 96 |
+
b : bool
|
| 97 |
+
A boolean that is True if `G` is an arborescence.
|
| 98 |
+
|
| 99 |
+
Examples
|
| 100 |
+
--------
|
| 101 |
+
>>> G = nx.DiGraph([(0, 1), (0, 2), (2, 3), (3, 4)])
|
| 102 |
+
>>> nx.is_arborescence(G)
|
| 103 |
+
True
|
| 104 |
+
>>> G.remove_edge(0, 1)
|
| 105 |
+
>>> G.add_edge(1, 2) # maximum in-degree is 2
|
| 106 |
+
>>> nx.is_arborescence(G)
|
| 107 |
+
False
|
| 108 |
+
|
| 109 |
+
Notes
|
| 110 |
+
-----
|
| 111 |
+
In another convention, an arborescence is known as a *tree*.
|
| 112 |
+
|
| 113 |
+
See Also
|
| 114 |
+
--------
|
| 115 |
+
is_tree
|
| 116 |
+
|
| 117 |
+
"""
|
| 118 |
+
return is_tree(G) and max(d for n, d in G.in_degree()) <= 1
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
@nx.utils.not_implemented_for("undirected")
|
| 122 |
+
@nx._dispatchable
|
| 123 |
+
def is_branching(G):
|
| 124 |
+
"""
|
| 125 |
+
Returns True if `G` is a branching.
|
| 126 |
+
|
| 127 |
+
A branching is a directed forest with maximum in-degree equal to 1.
|
| 128 |
+
|
| 129 |
+
Parameters
|
| 130 |
+
----------
|
| 131 |
+
G : directed graph
|
| 132 |
+
The directed graph to test.
|
| 133 |
+
|
| 134 |
+
Returns
|
| 135 |
+
-------
|
| 136 |
+
b : bool
|
| 137 |
+
A boolean that is True if `G` is a branching.
|
| 138 |
+
|
| 139 |
+
Examples
|
| 140 |
+
--------
|
| 141 |
+
>>> G = nx.DiGraph([(0, 1), (1, 2), (2, 3), (3, 4)])
|
| 142 |
+
>>> nx.is_branching(G)
|
| 143 |
+
True
|
| 144 |
+
>>> G.remove_edge(2, 3)
|
| 145 |
+
>>> G.add_edge(3, 1) # maximum in-degree is 2
|
| 146 |
+
>>> nx.is_branching(G)
|
| 147 |
+
False
|
| 148 |
+
|
| 149 |
+
Notes
|
| 150 |
+
-----
|
| 151 |
+
In another convention, a branching is also known as a *forest*.
|
| 152 |
+
|
| 153 |
+
See Also
|
| 154 |
+
--------
|
| 155 |
+
is_forest
|
| 156 |
+
|
| 157 |
+
"""
|
| 158 |
+
return is_forest(G) and max(d for n, d in G.in_degree()) <= 1
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
@nx._dispatchable
|
| 162 |
+
def is_forest(G):
|
| 163 |
+
"""
|
| 164 |
+
Returns True if `G` is a forest.
|
| 165 |
+
|
| 166 |
+
A forest is a graph with no undirected cycles.
|
| 167 |
+
|
| 168 |
+
For directed graphs, `G` is a forest if the underlying graph is a forest.
|
| 169 |
+
The underlying graph is obtained by treating each directed edge as a single
|
| 170 |
+
undirected edge in a multigraph.
|
| 171 |
+
|
| 172 |
+
Parameters
|
| 173 |
+
----------
|
| 174 |
+
G : graph
|
| 175 |
+
The graph to test.
|
| 176 |
+
|
| 177 |
+
Returns
|
| 178 |
+
-------
|
| 179 |
+
b : bool
|
| 180 |
+
A boolean that is True if `G` is a forest.
|
| 181 |
+
|
| 182 |
+
Raises
|
| 183 |
+
------
|
| 184 |
+
NetworkXPointlessConcept
|
| 185 |
+
If `G` is empty.
|
| 186 |
+
|
| 187 |
+
Examples
|
| 188 |
+
--------
|
| 189 |
+
>>> G = nx.Graph()
|
| 190 |
+
>>> G.add_edges_from([(1, 2), (1, 3), (2, 4), (2, 5)])
|
| 191 |
+
>>> nx.is_forest(G)
|
| 192 |
+
True
|
| 193 |
+
>>> G.add_edge(4, 1)
|
| 194 |
+
>>> nx.is_forest(G)
|
| 195 |
+
False
|
| 196 |
+
|
| 197 |
+
Notes
|
| 198 |
+
-----
|
| 199 |
+
In another convention, a directed forest is known as a *polyforest* and
|
| 200 |
+
then *forest* corresponds to a *branching*.
|
| 201 |
+
|
| 202 |
+
See Also
|
| 203 |
+
--------
|
| 204 |
+
is_branching
|
| 205 |
+
|
| 206 |
+
"""
|
| 207 |
+
if len(G) == 0:
|
| 208 |
+
raise nx.exception.NetworkXPointlessConcept("G has no nodes.")
|
| 209 |
+
|
| 210 |
+
if G.is_directed():
|
| 211 |
+
components = (G.subgraph(c) for c in nx.weakly_connected_components(G))
|
| 212 |
+
else:
|
| 213 |
+
components = (G.subgraph(c) for c in nx.connected_components(G))
|
| 214 |
+
|
| 215 |
+
return all(len(c) - 1 == c.number_of_edges() for c in components)
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
@nx._dispatchable
|
| 219 |
+
def is_tree(G):
|
| 220 |
+
"""
|
| 221 |
+
Returns True if `G` is a tree.
|
| 222 |
+
|
| 223 |
+
A tree is a connected graph with no undirected cycles.
|
| 224 |
+
|
| 225 |
+
For directed graphs, `G` is a tree if the underlying graph is a tree. The
|
| 226 |
+
underlying graph is obtained by treating each directed edge as a single
|
| 227 |
+
undirected edge in a multigraph.
|
| 228 |
+
|
| 229 |
+
Parameters
|
| 230 |
+
----------
|
| 231 |
+
G : graph
|
| 232 |
+
The graph to test.
|
| 233 |
+
|
| 234 |
+
Returns
|
| 235 |
+
-------
|
| 236 |
+
b : bool
|
| 237 |
+
A boolean that is True if `G` is a tree.
|
| 238 |
+
|
| 239 |
+
Raises
|
| 240 |
+
------
|
| 241 |
+
NetworkXPointlessConcept
|
| 242 |
+
If `G` is empty.
|
| 243 |
+
|
| 244 |
+
Examples
|
| 245 |
+
--------
|
| 246 |
+
>>> G = nx.Graph()
|
| 247 |
+
>>> G.add_edges_from([(1, 2), (1, 3), (2, 4), (2, 5)])
|
| 248 |
+
>>> nx.is_tree(G) # n-1 edges
|
| 249 |
+
True
|
| 250 |
+
>>> G.add_edge(3, 4)
|
| 251 |
+
>>> nx.is_tree(G) # n edges
|
| 252 |
+
False
|
| 253 |
+
|
| 254 |
+
Notes
|
| 255 |
+
-----
|
| 256 |
+
In another convention, a directed tree is known as a *polytree* and then
|
| 257 |
+
*tree* corresponds to an *arborescence*.
|
| 258 |
+
|
| 259 |
+
See Also
|
| 260 |
+
--------
|
| 261 |
+
is_arborescence
|
| 262 |
+
|
| 263 |
+
"""
|
| 264 |
+
if len(G) == 0:
|
| 265 |
+
raise nx.exception.NetworkXPointlessConcept("G has no nodes.")
|
| 266 |
+
|
| 267 |
+
if G.is_directed():
|
| 268 |
+
is_connected = nx.is_weakly_connected
|
| 269 |
+
else:
|
| 270 |
+
is_connected = nx.is_connected
|
| 271 |
+
|
| 272 |
+
# A connected graph with no cycles has n-1 edges.
|
| 273 |
+
return len(G) - 1 == G.number_of_edges() and is_connected(G)
|
pllava/lib/python3.10/site-packages/numpy/core/__init__.pyi
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# NOTE: The `np.core` namespace is deliberately kept empty due to it
|
| 2 |
+
# being private (despite the lack of leading underscore)
|
pllava/lib/python3.10/site-packages/numpy/core/_add_newdocs.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
pllava/lib/python3.10/site-packages/numpy/core/_asarray.pyi
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections.abc import Iterable
|
| 2 |
+
from typing import Any, TypeVar, Union, overload, Literal
|
| 3 |
+
|
| 4 |
+
from numpy import ndarray
|
| 5 |
+
from numpy._typing import DTypeLike, _SupportsArrayFunc
|
| 6 |
+
|
| 7 |
+
_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any])
|
| 8 |
+
|
| 9 |
+
_Requirements = Literal[
|
| 10 |
+
"C", "C_CONTIGUOUS", "CONTIGUOUS",
|
| 11 |
+
"F", "F_CONTIGUOUS", "FORTRAN",
|
| 12 |
+
"A", "ALIGNED",
|
| 13 |
+
"W", "WRITEABLE",
|
| 14 |
+
"O", "OWNDATA"
|
| 15 |
+
]
|
| 16 |
+
_E = Literal["E", "ENSUREARRAY"]
|
| 17 |
+
_RequirementsWithE = Union[_Requirements, _E]
|
| 18 |
+
|
| 19 |
+
@overload
|
| 20 |
+
def require(
|
| 21 |
+
a: _ArrayType,
|
| 22 |
+
dtype: None = ...,
|
| 23 |
+
requirements: None | _Requirements | Iterable[_Requirements] = ...,
|
| 24 |
+
*,
|
| 25 |
+
like: _SupportsArrayFunc = ...
|
| 26 |
+
) -> _ArrayType: ...
|
| 27 |
+
@overload
|
| 28 |
+
def require(
|
| 29 |
+
a: object,
|
| 30 |
+
dtype: DTypeLike = ...,
|
| 31 |
+
requirements: _E | Iterable[_RequirementsWithE] = ...,
|
| 32 |
+
*,
|
| 33 |
+
like: _SupportsArrayFunc = ...
|
| 34 |
+
) -> ndarray[Any, Any]: ...
|
| 35 |
+
@overload
|
| 36 |
+
def require(
|
| 37 |
+
a: object,
|
| 38 |
+
dtype: DTypeLike = ...,
|
| 39 |
+
requirements: None | _Requirements | Iterable[_Requirements] = ...,
|
| 40 |
+
*,
|
| 41 |
+
like: _SupportsArrayFunc = ...
|
| 42 |
+
) -> ndarray[Any, Any]: ...
|
pllava/lib/python3.10/site-packages/numpy/core/_dtype_ctypes.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Conversion from ctypes to dtype.
|
| 3 |
+
|
| 4 |
+
In an ideal world, we could achieve this through the PEP3118 buffer protocol,
|
| 5 |
+
something like::
|
| 6 |
+
|
| 7 |
+
def dtype_from_ctypes_type(t):
|
| 8 |
+
# needed to ensure that the shape of `t` is within memoryview.format
|
| 9 |
+
class DummyStruct(ctypes.Structure):
|
| 10 |
+
_fields_ = [('a', t)]
|
| 11 |
+
|
| 12 |
+
# empty to avoid memory allocation
|
| 13 |
+
ctype_0 = (DummyStruct * 0)()
|
| 14 |
+
mv = memoryview(ctype_0)
|
| 15 |
+
|
| 16 |
+
# convert the struct, and slice back out the field
|
| 17 |
+
return _dtype_from_pep3118(mv.format)['a']
|
| 18 |
+
|
| 19 |
+
Unfortunately, this fails because:
|
| 20 |
+
|
| 21 |
+
* ctypes cannot handle length-0 arrays with PEP3118 (bpo-32782)
|
| 22 |
+
* PEP3118 cannot represent unions, but both numpy and ctypes can
|
| 23 |
+
* ctypes cannot handle big-endian structs with PEP3118 (bpo-32780)
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
# We delay-import ctypes for distributions that do not include it.
|
| 27 |
+
# While this module is not used unless the user passes in ctypes
|
| 28 |
+
# members, it is eagerly imported from numpy/core/__init__.py.
|
| 29 |
+
import numpy as np
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def _from_ctypes_array(t):
|
| 33 |
+
return np.dtype((dtype_from_ctypes_type(t._type_), (t._length_,)))
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def _from_ctypes_structure(t):
|
| 37 |
+
for item in t._fields_:
|
| 38 |
+
if len(item) > 2:
|
| 39 |
+
raise TypeError(
|
| 40 |
+
"ctypes bitfields have no dtype equivalent")
|
| 41 |
+
|
| 42 |
+
if hasattr(t, "_pack_"):
|
| 43 |
+
import ctypes
|
| 44 |
+
formats = []
|
| 45 |
+
offsets = []
|
| 46 |
+
names = []
|
| 47 |
+
current_offset = 0
|
| 48 |
+
for fname, ftyp in t._fields_:
|
| 49 |
+
names.append(fname)
|
| 50 |
+
formats.append(dtype_from_ctypes_type(ftyp))
|
| 51 |
+
# Each type has a default offset, this is platform dependent for some types.
|
| 52 |
+
effective_pack = min(t._pack_, ctypes.alignment(ftyp))
|
| 53 |
+
current_offset = ((current_offset + effective_pack - 1) // effective_pack) * effective_pack
|
| 54 |
+
offsets.append(current_offset)
|
| 55 |
+
current_offset += ctypes.sizeof(ftyp)
|
| 56 |
+
|
| 57 |
+
return np.dtype(dict(
|
| 58 |
+
formats=formats,
|
| 59 |
+
offsets=offsets,
|
| 60 |
+
names=names,
|
| 61 |
+
itemsize=ctypes.sizeof(t)))
|
| 62 |
+
else:
|
| 63 |
+
fields = []
|
| 64 |
+
for fname, ftyp in t._fields_:
|
| 65 |
+
fields.append((fname, dtype_from_ctypes_type(ftyp)))
|
| 66 |
+
|
| 67 |
+
# by default, ctypes structs are aligned
|
| 68 |
+
return np.dtype(fields, align=True)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def _from_ctypes_scalar(t):
|
| 72 |
+
"""
|
| 73 |
+
Return the dtype type with endianness included if it's the case
|
| 74 |
+
"""
|
| 75 |
+
if getattr(t, '__ctype_be__', None) is t:
|
| 76 |
+
return np.dtype('>' + t._type_)
|
| 77 |
+
elif getattr(t, '__ctype_le__', None) is t:
|
| 78 |
+
return np.dtype('<' + t._type_)
|
| 79 |
+
else:
|
| 80 |
+
return np.dtype(t._type_)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def _from_ctypes_union(t):
|
| 84 |
+
import ctypes
|
| 85 |
+
formats = []
|
| 86 |
+
offsets = []
|
| 87 |
+
names = []
|
| 88 |
+
for fname, ftyp in t._fields_:
|
| 89 |
+
names.append(fname)
|
| 90 |
+
formats.append(dtype_from_ctypes_type(ftyp))
|
| 91 |
+
offsets.append(0) # Union fields are offset to 0
|
| 92 |
+
|
| 93 |
+
return np.dtype(dict(
|
| 94 |
+
formats=formats,
|
| 95 |
+
offsets=offsets,
|
| 96 |
+
names=names,
|
| 97 |
+
itemsize=ctypes.sizeof(t)))
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def dtype_from_ctypes_type(t):
|
| 101 |
+
"""
|
| 102 |
+
Construct a dtype object from a ctypes type
|
| 103 |
+
"""
|
| 104 |
+
import _ctypes
|
| 105 |
+
if issubclass(t, _ctypes.Array):
|
| 106 |
+
return _from_ctypes_array(t)
|
| 107 |
+
elif issubclass(t, _ctypes._Pointer):
|
| 108 |
+
raise TypeError("ctypes pointers have no dtype equivalent")
|
| 109 |
+
elif issubclass(t, _ctypes.Structure):
|
| 110 |
+
return _from_ctypes_structure(t)
|
| 111 |
+
elif issubclass(t, _ctypes.Union):
|
| 112 |
+
return _from_ctypes_union(t)
|
| 113 |
+
elif isinstance(getattr(t, '_type_', None), str):
|
| 114 |
+
return _from_ctypes_scalar(t)
|
| 115 |
+
else:
|
| 116 |
+
raise NotImplementedError(
|
| 117 |
+
"Unknown ctypes type {}".format(t.__name__))
|
pllava/lib/python3.10/site-packages/numpy/core/_exceptions.py
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Various richly-typed exceptions, that also help us deal with string formatting
|
| 3 |
+
in python where it's easier.
|
| 4 |
+
|
| 5 |
+
By putting the formatting in `__str__`, we also avoid paying the cost for
|
| 6 |
+
users who silence the exceptions.
|
| 7 |
+
"""
|
| 8 |
+
from .._utils import set_module
|
| 9 |
+
|
| 10 |
+
def _unpack_tuple(tup):
|
| 11 |
+
if len(tup) == 1:
|
| 12 |
+
return tup[0]
|
| 13 |
+
else:
|
| 14 |
+
return tup
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def _display_as_base(cls):
|
| 18 |
+
"""
|
| 19 |
+
A decorator that makes an exception class look like its base.
|
| 20 |
+
|
| 21 |
+
We use this to hide subclasses that are implementation details - the user
|
| 22 |
+
should catch the base type, which is what the traceback will show them.
|
| 23 |
+
|
| 24 |
+
Classes decorated with this decorator are subject to removal without a
|
| 25 |
+
deprecation warning.
|
| 26 |
+
"""
|
| 27 |
+
assert issubclass(cls, Exception)
|
| 28 |
+
cls.__name__ = cls.__base__.__name__
|
| 29 |
+
return cls
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class UFuncTypeError(TypeError):
|
| 33 |
+
""" Base class for all ufunc exceptions """
|
| 34 |
+
def __init__(self, ufunc):
|
| 35 |
+
self.ufunc = ufunc
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
@_display_as_base
|
| 39 |
+
class _UFuncNoLoopError(UFuncTypeError):
|
| 40 |
+
""" Thrown when a ufunc loop cannot be found """
|
| 41 |
+
def __init__(self, ufunc, dtypes):
|
| 42 |
+
super().__init__(ufunc)
|
| 43 |
+
self.dtypes = tuple(dtypes)
|
| 44 |
+
|
| 45 |
+
def __str__(self):
|
| 46 |
+
return (
|
| 47 |
+
"ufunc {!r} did not contain a loop with signature matching types "
|
| 48 |
+
"{!r} -> {!r}"
|
| 49 |
+
).format(
|
| 50 |
+
self.ufunc.__name__,
|
| 51 |
+
_unpack_tuple(self.dtypes[:self.ufunc.nin]),
|
| 52 |
+
_unpack_tuple(self.dtypes[self.ufunc.nin:])
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
@_display_as_base
|
| 57 |
+
class _UFuncBinaryResolutionError(_UFuncNoLoopError):
|
| 58 |
+
""" Thrown when a binary resolution fails """
|
| 59 |
+
def __init__(self, ufunc, dtypes):
|
| 60 |
+
super().__init__(ufunc, dtypes)
|
| 61 |
+
assert len(self.dtypes) == 2
|
| 62 |
+
|
| 63 |
+
def __str__(self):
|
| 64 |
+
return (
|
| 65 |
+
"ufunc {!r} cannot use operands with types {!r} and {!r}"
|
| 66 |
+
).format(
|
| 67 |
+
self.ufunc.__name__, *self.dtypes
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
@_display_as_base
|
| 72 |
+
class _UFuncCastingError(UFuncTypeError):
|
| 73 |
+
def __init__(self, ufunc, casting, from_, to):
|
| 74 |
+
super().__init__(ufunc)
|
| 75 |
+
self.casting = casting
|
| 76 |
+
self.from_ = from_
|
| 77 |
+
self.to = to
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
@_display_as_base
|
| 81 |
+
class _UFuncInputCastingError(_UFuncCastingError):
|
| 82 |
+
""" Thrown when a ufunc input cannot be casted """
|
| 83 |
+
def __init__(self, ufunc, casting, from_, to, i):
|
| 84 |
+
super().__init__(ufunc, casting, from_, to)
|
| 85 |
+
self.in_i = i
|
| 86 |
+
|
| 87 |
+
def __str__(self):
|
| 88 |
+
# only show the number if more than one input exists
|
| 89 |
+
i_str = "{} ".format(self.in_i) if self.ufunc.nin != 1 else ""
|
| 90 |
+
return (
|
| 91 |
+
"Cannot cast ufunc {!r} input {}from {!r} to {!r} with casting "
|
| 92 |
+
"rule {!r}"
|
| 93 |
+
).format(
|
| 94 |
+
self.ufunc.__name__, i_str, self.from_, self.to, self.casting
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
@_display_as_base
|
| 99 |
+
class _UFuncOutputCastingError(_UFuncCastingError):
|
| 100 |
+
""" Thrown when a ufunc output cannot be casted """
|
| 101 |
+
def __init__(self, ufunc, casting, from_, to, i):
|
| 102 |
+
super().__init__(ufunc, casting, from_, to)
|
| 103 |
+
self.out_i = i
|
| 104 |
+
|
| 105 |
+
def __str__(self):
|
| 106 |
+
# only show the number if more than one output exists
|
| 107 |
+
i_str = "{} ".format(self.out_i) if self.ufunc.nout != 1 else ""
|
| 108 |
+
return (
|
| 109 |
+
"Cannot cast ufunc {!r} output {}from {!r} to {!r} with casting "
|
| 110 |
+
"rule {!r}"
|
| 111 |
+
).format(
|
| 112 |
+
self.ufunc.__name__, i_str, self.from_, self.to, self.casting
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
@_display_as_base
|
| 117 |
+
class _ArrayMemoryError(MemoryError):
|
| 118 |
+
""" Thrown when an array cannot be allocated"""
|
| 119 |
+
def __init__(self, shape, dtype):
|
| 120 |
+
self.shape = shape
|
| 121 |
+
self.dtype = dtype
|
| 122 |
+
|
| 123 |
+
@property
|
| 124 |
+
def _total_size(self):
|
| 125 |
+
num_bytes = self.dtype.itemsize
|
| 126 |
+
for dim in self.shape:
|
| 127 |
+
num_bytes *= dim
|
| 128 |
+
return num_bytes
|
| 129 |
+
|
| 130 |
+
@staticmethod
|
| 131 |
+
def _size_to_string(num_bytes):
|
| 132 |
+
""" Convert a number of bytes into a binary size string """
|
| 133 |
+
|
| 134 |
+
# https://en.wikipedia.org/wiki/Binary_prefix
|
| 135 |
+
LOG2_STEP = 10
|
| 136 |
+
STEP = 1024
|
| 137 |
+
units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB']
|
| 138 |
+
|
| 139 |
+
unit_i = max(num_bytes.bit_length() - 1, 1) // LOG2_STEP
|
| 140 |
+
unit_val = 1 << (unit_i * LOG2_STEP)
|
| 141 |
+
n_units = num_bytes / unit_val
|
| 142 |
+
del unit_val
|
| 143 |
+
|
| 144 |
+
# ensure we pick a unit that is correct after rounding
|
| 145 |
+
if round(n_units) == STEP:
|
| 146 |
+
unit_i += 1
|
| 147 |
+
n_units /= STEP
|
| 148 |
+
|
| 149 |
+
# deal with sizes so large that we don't have units for them
|
| 150 |
+
if unit_i >= len(units):
|
| 151 |
+
new_unit_i = len(units) - 1
|
| 152 |
+
n_units *= 1 << ((unit_i - new_unit_i) * LOG2_STEP)
|
| 153 |
+
unit_i = new_unit_i
|
| 154 |
+
|
| 155 |
+
unit_name = units[unit_i]
|
| 156 |
+
# format with a sensible number of digits
|
| 157 |
+
if unit_i == 0:
|
| 158 |
+
# no decimal point on bytes
|
| 159 |
+
return '{:.0f} {}'.format(n_units, unit_name)
|
| 160 |
+
elif round(n_units) < 1000:
|
| 161 |
+
# 3 significant figures, if none are dropped to the left of the .
|
| 162 |
+
return '{:#.3g} {}'.format(n_units, unit_name)
|
| 163 |
+
else:
|
| 164 |
+
# just give all the digits otherwise
|
| 165 |
+
return '{:#.0f} {}'.format(n_units, unit_name)
|
| 166 |
+
|
| 167 |
+
def __str__(self):
|
| 168 |
+
size_str = self._size_to_string(self._total_size)
|
| 169 |
+
return (
|
| 170 |
+
"Unable to allocate {} for an array with shape {} and data type {}"
|
| 171 |
+
.format(size_str, self.shape, self.dtype)
|
| 172 |
+
)
|
pllava/lib/python3.10/site-packages/numpy/core/_operand_flag_tests.cpython-310-x86_64-linux-gnu.so
ADDED
|
Binary file (16.9 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/numpy/core/_simd.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b2c96bec20e3c7a59f8f78b30e7fd5142d015e42f2cbd27223c3e862c53e4113
|
| 3 |
+
size 3527040
|
pllava/lib/python3.10/site-packages/numpy/core/_type_aliases.pyi
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, TypedDict
|
| 2 |
+
|
| 3 |
+
from numpy import generic, signedinteger, unsignedinteger, floating, complexfloating
|
| 4 |
+
|
| 5 |
+
class _SCTypes(TypedDict):
|
| 6 |
+
int: list[type[signedinteger[Any]]]
|
| 7 |
+
uint: list[type[unsignedinteger[Any]]]
|
| 8 |
+
float: list[type[floating[Any]]]
|
| 9 |
+
complex: list[type[complexfloating[Any, Any]]]
|
| 10 |
+
others: list[type]
|
| 11 |
+
|
| 12 |
+
sctypeDict: dict[int | str, type[generic]]
|
| 13 |
+
sctypes: _SCTypes
|
pllava/lib/python3.10/site-packages/numpy/core/_ufunc_config.py
ADDED
|
@@ -0,0 +1,466 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Functions for changing global ufunc configuration
|
| 3 |
+
|
| 4 |
+
This provides helpers which wrap `umath.geterrobj` and `umath.seterrobj`
|
| 5 |
+
"""
|
| 6 |
+
import collections.abc
|
| 7 |
+
import contextlib
|
| 8 |
+
import contextvars
|
| 9 |
+
|
| 10 |
+
from .._utils import set_module
|
| 11 |
+
from .umath import (
|
| 12 |
+
UFUNC_BUFSIZE_DEFAULT,
|
| 13 |
+
ERR_IGNORE, ERR_WARN, ERR_RAISE, ERR_CALL, ERR_PRINT, ERR_LOG, ERR_DEFAULT,
|
| 14 |
+
SHIFT_DIVIDEBYZERO, SHIFT_OVERFLOW, SHIFT_UNDERFLOW, SHIFT_INVALID,
|
| 15 |
+
)
|
| 16 |
+
from . import umath
|
| 17 |
+
|
| 18 |
+
__all__ = [
|
| 19 |
+
"seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall",
|
| 20 |
+
"errstate", '_no_nep50_warning'
|
| 21 |
+
]
|
| 22 |
+
|
| 23 |
+
_errdict = {"ignore": ERR_IGNORE,
|
| 24 |
+
"warn": ERR_WARN,
|
| 25 |
+
"raise": ERR_RAISE,
|
| 26 |
+
"call": ERR_CALL,
|
| 27 |
+
"print": ERR_PRINT,
|
| 28 |
+
"log": ERR_LOG}
|
| 29 |
+
|
| 30 |
+
_errdict_rev = {value: key for key, value in _errdict.items()}
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
@set_module('numpy')
|
| 34 |
+
def seterr(all=None, divide=None, over=None, under=None, invalid=None):
|
| 35 |
+
"""
|
| 36 |
+
Set how floating-point errors are handled.
|
| 37 |
+
|
| 38 |
+
Note that operations on integer scalar types (such as `int16`) are
|
| 39 |
+
handled like floating point, and are affected by these settings.
|
| 40 |
+
|
| 41 |
+
Parameters
|
| 42 |
+
----------
|
| 43 |
+
all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
|
| 44 |
+
Set treatment for all types of floating-point errors at once:
|
| 45 |
+
|
| 46 |
+
- ignore: Take no action when the exception occurs.
|
| 47 |
+
- warn: Print a `RuntimeWarning` (via the Python `warnings` module).
|
| 48 |
+
- raise: Raise a `FloatingPointError`.
|
| 49 |
+
- call: Call a function specified using the `seterrcall` function.
|
| 50 |
+
- print: Print a warning directly to ``stdout``.
|
| 51 |
+
- log: Record error in a Log object specified by `seterrcall`.
|
| 52 |
+
|
| 53 |
+
The default is not to change the current behavior.
|
| 54 |
+
divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
|
| 55 |
+
Treatment for division by zero.
|
| 56 |
+
over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
|
| 57 |
+
Treatment for floating-point overflow.
|
| 58 |
+
under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
|
| 59 |
+
Treatment for floating-point underflow.
|
| 60 |
+
invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
|
| 61 |
+
Treatment for invalid floating-point operation.
|
| 62 |
+
|
| 63 |
+
Returns
|
| 64 |
+
-------
|
| 65 |
+
old_settings : dict
|
| 66 |
+
Dictionary containing the old settings.
|
| 67 |
+
|
| 68 |
+
See also
|
| 69 |
+
--------
|
| 70 |
+
seterrcall : Set a callback function for the 'call' mode.
|
| 71 |
+
geterr, geterrcall, errstate
|
| 72 |
+
|
| 73 |
+
Notes
|
| 74 |
+
-----
|
| 75 |
+
The floating-point exceptions are defined in the IEEE 754 standard [1]_:
|
| 76 |
+
|
| 77 |
+
- Division by zero: infinite result obtained from finite numbers.
|
| 78 |
+
- Overflow: result too large to be expressed.
|
| 79 |
+
- Underflow: result so close to zero that some precision
|
| 80 |
+
was lost.
|
| 81 |
+
- Invalid operation: result is not an expressible number, typically
|
| 82 |
+
indicates that a NaN was produced.
|
| 83 |
+
|
| 84 |
+
.. [1] https://en.wikipedia.org/wiki/IEEE_754
|
| 85 |
+
|
| 86 |
+
Examples
|
| 87 |
+
--------
|
| 88 |
+
>>> old_settings = np.seterr(all='ignore') #seterr to known value
|
| 89 |
+
>>> np.seterr(over='raise')
|
| 90 |
+
{'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}
|
| 91 |
+
>>> np.seterr(**old_settings) # reset to default
|
| 92 |
+
{'divide': 'ignore', 'over': 'raise', 'under': 'ignore', 'invalid': 'ignore'}
|
| 93 |
+
|
| 94 |
+
>>> np.int16(32000) * np.int16(3)
|
| 95 |
+
30464
|
| 96 |
+
>>> old_settings = np.seterr(all='warn', over='raise')
|
| 97 |
+
>>> np.int16(32000) * np.int16(3)
|
| 98 |
+
Traceback (most recent call last):
|
| 99 |
+
File "<stdin>", line 1, in <module>
|
| 100 |
+
FloatingPointError: overflow encountered in scalar multiply
|
| 101 |
+
|
| 102 |
+
>>> old_settings = np.seterr(all='print')
|
| 103 |
+
>>> np.geterr()
|
| 104 |
+
{'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'}
|
| 105 |
+
>>> np.int16(32000) * np.int16(3)
|
| 106 |
+
30464
|
| 107 |
+
|
| 108 |
+
"""
|
| 109 |
+
|
| 110 |
+
pyvals = umath.geterrobj()
|
| 111 |
+
old = geterr()
|
| 112 |
+
|
| 113 |
+
if divide is None:
|
| 114 |
+
divide = all or old['divide']
|
| 115 |
+
if over is None:
|
| 116 |
+
over = all or old['over']
|
| 117 |
+
if under is None:
|
| 118 |
+
under = all or old['under']
|
| 119 |
+
if invalid is None:
|
| 120 |
+
invalid = all or old['invalid']
|
| 121 |
+
|
| 122 |
+
maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) +
|
| 123 |
+
(_errdict[over] << SHIFT_OVERFLOW) +
|
| 124 |
+
(_errdict[under] << SHIFT_UNDERFLOW) +
|
| 125 |
+
(_errdict[invalid] << SHIFT_INVALID))
|
| 126 |
+
|
| 127 |
+
pyvals[1] = maskvalue
|
| 128 |
+
umath.seterrobj(pyvals)
|
| 129 |
+
return old
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
@set_module('numpy')
|
| 133 |
+
def geterr():
|
| 134 |
+
"""
|
| 135 |
+
Get the current way of handling floating-point errors.
|
| 136 |
+
|
| 137 |
+
Returns
|
| 138 |
+
-------
|
| 139 |
+
res : dict
|
| 140 |
+
A dictionary with keys "divide", "over", "under", and "invalid",
|
| 141 |
+
whose values are from the strings "ignore", "print", "log", "warn",
|
| 142 |
+
"raise", and "call". The keys represent possible floating-point
|
| 143 |
+
exceptions, and the values define how these exceptions are handled.
|
| 144 |
+
|
| 145 |
+
See Also
|
| 146 |
+
--------
|
| 147 |
+
geterrcall, seterr, seterrcall
|
| 148 |
+
|
| 149 |
+
Notes
|
| 150 |
+
-----
|
| 151 |
+
For complete documentation of the types of floating-point exceptions and
|
| 152 |
+
treatment options, see `seterr`.
|
| 153 |
+
|
| 154 |
+
Examples
|
| 155 |
+
--------
|
| 156 |
+
>>> np.geterr()
|
| 157 |
+
{'divide': 'warn', 'over': 'warn', 'under': 'ignore', 'invalid': 'warn'}
|
| 158 |
+
>>> np.arange(3.) / np.arange(3.)
|
| 159 |
+
array([nan, 1., 1.])
|
| 160 |
+
|
| 161 |
+
>>> oldsettings = np.seterr(all='warn', over='raise')
|
| 162 |
+
>>> np.geterr()
|
| 163 |
+
{'divide': 'warn', 'over': 'raise', 'under': 'warn', 'invalid': 'warn'}
|
| 164 |
+
>>> np.arange(3.) / np.arange(3.)
|
| 165 |
+
array([nan, 1., 1.])
|
| 166 |
+
|
| 167 |
+
"""
|
| 168 |
+
maskvalue = umath.geterrobj()[1]
|
| 169 |
+
mask = 7
|
| 170 |
+
res = {}
|
| 171 |
+
val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask
|
| 172 |
+
res['divide'] = _errdict_rev[val]
|
| 173 |
+
val = (maskvalue >> SHIFT_OVERFLOW) & mask
|
| 174 |
+
res['over'] = _errdict_rev[val]
|
| 175 |
+
val = (maskvalue >> SHIFT_UNDERFLOW) & mask
|
| 176 |
+
res['under'] = _errdict_rev[val]
|
| 177 |
+
val = (maskvalue >> SHIFT_INVALID) & mask
|
| 178 |
+
res['invalid'] = _errdict_rev[val]
|
| 179 |
+
return res
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
@set_module('numpy')
|
| 183 |
+
def setbufsize(size):
|
| 184 |
+
"""
|
| 185 |
+
Set the size of the buffer used in ufuncs.
|
| 186 |
+
|
| 187 |
+
Parameters
|
| 188 |
+
----------
|
| 189 |
+
size : int
|
| 190 |
+
Size of buffer.
|
| 191 |
+
|
| 192 |
+
"""
|
| 193 |
+
if size > 10e6:
|
| 194 |
+
raise ValueError("Buffer size, %s, is too big." % size)
|
| 195 |
+
if size < 5:
|
| 196 |
+
raise ValueError("Buffer size, %s, is too small." % size)
|
| 197 |
+
if size % 16 != 0:
|
| 198 |
+
raise ValueError("Buffer size, %s, is not a multiple of 16." % size)
|
| 199 |
+
|
| 200 |
+
pyvals = umath.geterrobj()
|
| 201 |
+
old = getbufsize()
|
| 202 |
+
pyvals[0] = size
|
| 203 |
+
umath.seterrobj(pyvals)
|
| 204 |
+
return old
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
@set_module('numpy')
|
| 208 |
+
def getbufsize():
|
| 209 |
+
"""
|
| 210 |
+
Return the size of the buffer used in ufuncs.
|
| 211 |
+
|
| 212 |
+
Returns
|
| 213 |
+
-------
|
| 214 |
+
getbufsize : int
|
| 215 |
+
Size of ufunc buffer in bytes.
|
| 216 |
+
|
| 217 |
+
"""
|
| 218 |
+
return umath.geterrobj()[0]
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
@set_module('numpy')
|
| 222 |
+
def seterrcall(func):
|
| 223 |
+
"""
|
| 224 |
+
Set the floating-point error callback function or log object.
|
| 225 |
+
|
| 226 |
+
There are two ways to capture floating-point error messages. The first
|
| 227 |
+
is to set the error-handler to 'call', using `seterr`. Then, set
|
| 228 |
+
the function to call using this function.
|
| 229 |
+
|
| 230 |
+
The second is to set the error-handler to 'log', using `seterr`.
|
| 231 |
+
Floating-point errors then trigger a call to the 'write' method of
|
| 232 |
+
the provided object.
|
| 233 |
+
|
| 234 |
+
Parameters
|
| 235 |
+
----------
|
| 236 |
+
func : callable f(err, flag) or object with write method
|
| 237 |
+
Function to call upon floating-point errors ('call'-mode) or
|
| 238 |
+
object whose 'write' method is used to log such message ('log'-mode).
|
| 239 |
+
|
| 240 |
+
The call function takes two arguments. The first is a string describing
|
| 241 |
+
the type of error (such as "divide by zero", "overflow", "underflow",
|
| 242 |
+
or "invalid value"), and the second is the status flag. The flag is a
|
| 243 |
+
byte, whose four least-significant bits indicate the type of error, one
|
| 244 |
+
of "divide", "over", "under", "invalid"::
|
| 245 |
+
|
| 246 |
+
[0 0 0 0 divide over under invalid]
|
| 247 |
+
|
| 248 |
+
In other words, ``flags = divide + 2*over + 4*under + 8*invalid``.
|
| 249 |
+
|
| 250 |
+
If an object is provided, its write method should take one argument,
|
| 251 |
+
a string.
|
| 252 |
+
|
| 253 |
+
Returns
|
| 254 |
+
-------
|
| 255 |
+
h : callable, log instance or None
|
| 256 |
+
The old error handler.
|
| 257 |
+
|
| 258 |
+
See Also
|
| 259 |
+
--------
|
| 260 |
+
seterr, geterr, geterrcall
|
| 261 |
+
|
| 262 |
+
Examples
|
| 263 |
+
--------
|
| 264 |
+
Callback upon error:
|
| 265 |
+
|
| 266 |
+
>>> def err_handler(type, flag):
|
| 267 |
+
... print("Floating point error (%s), with flag %s" % (type, flag))
|
| 268 |
+
...
|
| 269 |
+
|
| 270 |
+
>>> saved_handler = np.seterrcall(err_handler)
|
| 271 |
+
>>> save_err = np.seterr(all='call')
|
| 272 |
+
|
| 273 |
+
>>> np.array([1, 2, 3]) / 0.0
|
| 274 |
+
Floating point error (divide by zero), with flag 1
|
| 275 |
+
array([inf, inf, inf])
|
| 276 |
+
|
| 277 |
+
>>> np.seterrcall(saved_handler)
|
| 278 |
+
<function err_handler at 0x...>
|
| 279 |
+
>>> np.seterr(**save_err)
|
| 280 |
+
{'divide': 'call', 'over': 'call', 'under': 'call', 'invalid': 'call'}
|
| 281 |
+
|
| 282 |
+
Log error message:
|
| 283 |
+
|
| 284 |
+
>>> class Log:
|
| 285 |
+
... def write(self, msg):
|
| 286 |
+
... print("LOG: %s" % msg)
|
| 287 |
+
...
|
| 288 |
+
|
| 289 |
+
>>> log = Log()
|
| 290 |
+
>>> saved_handler = np.seterrcall(log)
|
| 291 |
+
>>> save_err = np.seterr(all='log')
|
| 292 |
+
|
| 293 |
+
>>> np.array([1, 2, 3]) / 0.0
|
| 294 |
+
LOG: Warning: divide by zero encountered in divide
|
| 295 |
+
array([inf, inf, inf])
|
| 296 |
+
|
| 297 |
+
>>> np.seterrcall(saved_handler)
|
| 298 |
+
<numpy.core.numeric.Log object at 0x...>
|
| 299 |
+
>>> np.seterr(**save_err)
|
| 300 |
+
{'divide': 'log', 'over': 'log', 'under': 'log', 'invalid': 'log'}
|
| 301 |
+
|
| 302 |
+
"""
|
| 303 |
+
if func is not None and not isinstance(func, collections.abc.Callable):
|
| 304 |
+
if (not hasattr(func, 'write') or
|
| 305 |
+
not isinstance(func.write, collections.abc.Callable)):
|
| 306 |
+
raise ValueError("Only callable can be used as callback")
|
| 307 |
+
pyvals = umath.geterrobj()
|
| 308 |
+
old = geterrcall()
|
| 309 |
+
pyvals[2] = func
|
| 310 |
+
umath.seterrobj(pyvals)
|
| 311 |
+
return old
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
@set_module('numpy')
|
| 315 |
+
def geterrcall():
|
| 316 |
+
"""
|
| 317 |
+
Return the current callback function used on floating-point errors.
|
| 318 |
+
|
| 319 |
+
When the error handling for a floating-point error (one of "divide",
|
| 320 |
+
"over", "under", or "invalid") is set to 'call' or 'log', the function
|
| 321 |
+
that is called or the log instance that is written to is returned by
|
| 322 |
+
`geterrcall`. This function or log instance has been set with
|
| 323 |
+
`seterrcall`.
|
| 324 |
+
|
| 325 |
+
Returns
|
| 326 |
+
-------
|
| 327 |
+
errobj : callable, log instance or None
|
| 328 |
+
The current error handler. If no handler was set through `seterrcall`,
|
| 329 |
+
``None`` is returned.
|
| 330 |
+
|
| 331 |
+
See Also
|
| 332 |
+
--------
|
| 333 |
+
seterrcall, seterr, geterr
|
| 334 |
+
|
| 335 |
+
Notes
|
| 336 |
+
-----
|
| 337 |
+
For complete documentation of the types of floating-point exceptions and
|
| 338 |
+
treatment options, see `seterr`.
|
| 339 |
+
|
| 340 |
+
Examples
|
| 341 |
+
--------
|
| 342 |
+
>>> np.geterrcall() # we did not yet set a handler, returns None
|
| 343 |
+
|
| 344 |
+
>>> oldsettings = np.seterr(all='call')
|
| 345 |
+
>>> def err_handler(type, flag):
|
| 346 |
+
... print("Floating point error (%s), with flag %s" % (type, flag))
|
| 347 |
+
>>> oldhandler = np.seterrcall(err_handler)
|
| 348 |
+
>>> np.array([1, 2, 3]) / 0.0
|
| 349 |
+
Floating point error (divide by zero), with flag 1
|
| 350 |
+
array([inf, inf, inf])
|
| 351 |
+
|
| 352 |
+
>>> cur_handler = np.geterrcall()
|
| 353 |
+
>>> cur_handler is err_handler
|
| 354 |
+
True
|
| 355 |
+
|
| 356 |
+
"""
|
| 357 |
+
return umath.geterrobj()[2]
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
class _unspecified:
|
| 361 |
+
pass
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
_Unspecified = _unspecified()
|
| 365 |
+
|
| 366 |
+
|
| 367 |
+
@set_module('numpy')
|
| 368 |
+
class errstate(contextlib.ContextDecorator):
|
| 369 |
+
"""
|
| 370 |
+
errstate(**kwargs)
|
| 371 |
+
|
| 372 |
+
Context manager for floating-point error handling.
|
| 373 |
+
|
| 374 |
+
Using an instance of `errstate` as a context manager allows statements in
|
| 375 |
+
that context to execute with a known error handling behavior. Upon entering
|
| 376 |
+
the context the error handling is set with `seterr` and `seterrcall`, and
|
| 377 |
+
upon exiting it is reset to what it was before.
|
| 378 |
+
|
| 379 |
+
.. versionchanged:: 1.17.0
|
| 380 |
+
`errstate` is also usable as a function decorator, saving
|
| 381 |
+
a level of indentation if an entire function is wrapped.
|
| 382 |
+
See :py:class:`contextlib.ContextDecorator` for more information.
|
| 383 |
+
|
| 384 |
+
Parameters
|
| 385 |
+
----------
|
| 386 |
+
kwargs : {divide, over, under, invalid}
|
| 387 |
+
Keyword arguments. The valid keywords are the possible floating-point
|
| 388 |
+
exceptions. Each keyword should have a string value that defines the
|
| 389 |
+
treatment for the particular error. Possible values are
|
| 390 |
+
{'ignore', 'warn', 'raise', 'call', 'print', 'log'}.
|
| 391 |
+
|
| 392 |
+
See Also
|
| 393 |
+
--------
|
| 394 |
+
seterr, geterr, seterrcall, geterrcall
|
| 395 |
+
|
| 396 |
+
Notes
|
| 397 |
+
-----
|
| 398 |
+
For complete documentation of the types of floating-point exceptions and
|
| 399 |
+
treatment options, see `seterr`.
|
| 400 |
+
|
| 401 |
+
Examples
|
| 402 |
+
--------
|
| 403 |
+
>>> olderr = np.seterr(all='ignore') # Set error handling to known state.
|
| 404 |
+
|
| 405 |
+
>>> np.arange(3) / 0.
|
| 406 |
+
array([nan, inf, inf])
|
| 407 |
+
>>> with np.errstate(divide='warn'):
|
| 408 |
+
... np.arange(3) / 0.
|
| 409 |
+
array([nan, inf, inf])
|
| 410 |
+
|
| 411 |
+
>>> np.sqrt(-1)
|
| 412 |
+
nan
|
| 413 |
+
>>> with np.errstate(invalid='raise'):
|
| 414 |
+
... np.sqrt(-1)
|
| 415 |
+
Traceback (most recent call last):
|
| 416 |
+
File "<stdin>", line 2, in <module>
|
| 417 |
+
FloatingPointError: invalid value encountered in sqrt
|
| 418 |
+
|
| 419 |
+
Outside the context the error handling behavior has not changed:
|
| 420 |
+
|
| 421 |
+
>>> np.geterr()
|
| 422 |
+
{'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}
|
| 423 |
+
|
| 424 |
+
"""
|
| 425 |
+
|
| 426 |
+
def __init__(self, *, call=_Unspecified, **kwargs):
|
| 427 |
+
self.call = call
|
| 428 |
+
self.kwargs = kwargs
|
| 429 |
+
|
| 430 |
+
def __enter__(self):
|
| 431 |
+
self.oldstate = seterr(**self.kwargs)
|
| 432 |
+
if self.call is not _Unspecified:
|
| 433 |
+
self.oldcall = seterrcall(self.call)
|
| 434 |
+
|
| 435 |
+
def __exit__(self, *exc_info):
|
| 436 |
+
seterr(**self.oldstate)
|
| 437 |
+
if self.call is not _Unspecified:
|
| 438 |
+
seterrcall(self.oldcall)
|
| 439 |
+
|
| 440 |
+
|
| 441 |
+
def _setdef():
|
| 442 |
+
defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT, None]
|
| 443 |
+
umath.seterrobj(defval)
|
| 444 |
+
|
| 445 |
+
|
| 446 |
+
# set the default values
|
| 447 |
+
_setdef()
|
| 448 |
+
|
| 449 |
+
|
| 450 |
+
NO_NEP50_WARNING = contextvars.ContextVar("_no_nep50_warning", default=False)
|
| 451 |
+
|
| 452 |
+
@set_module('numpy')
|
| 453 |
+
@contextlib.contextmanager
|
| 454 |
+
def _no_nep50_warning():
|
| 455 |
+
"""
|
| 456 |
+
Context manager to disable NEP 50 warnings. This context manager is
|
| 457 |
+
only relevant if the NEP 50 warnings are enabled globally (which is not
|
| 458 |
+
thread/context safe).
|
| 459 |
+
|
| 460 |
+
This warning context manager itself is fully safe, however.
|
| 461 |
+
"""
|
| 462 |
+
token = NO_NEP50_WARNING.set(True)
|
| 463 |
+
try:
|
| 464 |
+
yield
|
| 465 |
+
finally:
|
| 466 |
+
NO_NEP50_WARNING.reset(token)
|
pllava/lib/python3.10/site-packages/numpy/core/_umath_tests.cpython-310-x86_64-linux-gnu.so
ADDED
|
Binary file (42.3 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/numpy/core/defchararray.py
ADDED
|
@@ -0,0 +1,2914 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
This module contains a set of functions for vectorized string
|
| 3 |
+
operations and methods.
|
| 4 |
+
|
| 5 |
+
.. note::
|
| 6 |
+
The `chararray` class exists for backwards compatibility with
|
| 7 |
+
Numarray, it is not recommended for new development. Starting from numpy
|
| 8 |
+
1.4, if one needs arrays of strings, it is recommended to use arrays of
|
| 9 |
+
`dtype` `object_`, `bytes_` or `str_`, and use the free functions
|
| 10 |
+
in the `numpy.char` module for fast vectorized string operations.
|
| 11 |
+
|
| 12 |
+
Some methods will only be available if the corresponding string method is
|
| 13 |
+
available in your version of Python.
|
| 14 |
+
|
| 15 |
+
The preferred alias for `defchararray` is `numpy.char`.
|
| 16 |
+
|
| 17 |
+
"""
|
| 18 |
+
import functools
|
| 19 |
+
|
| 20 |
+
from .._utils import set_module
|
| 21 |
+
from .numerictypes import (
|
| 22 |
+
bytes_, str_, integer, int_, object_, bool_, character)
|
| 23 |
+
from .numeric import ndarray, compare_chararrays
|
| 24 |
+
from .numeric import array as narray
|
| 25 |
+
from numpy.core.multiarray import _vec_string
|
| 26 |
+
from numpy.core import overrides
|
| 27 |
+
from numpy.compat import asbytes
|
| 28 |
+
import numpy
|
| 29 |
+
|
| 30 |
+
__all__ = [
|
| 31 |
+
'equal', 'not_equal', 'greater_equal', 'less_equal',
|
| 32 |
+
'greater', 'less', 'str_len', 'add', 'multiply', 'mod', 'capitalize',
|
| 33 |
+
'center', 'count', 'decode', 'encode', 'endswith', 'expandtabs',
|
| 34 |
+
'find', 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace',
|
| 35 |
+
'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip', 'partition',
|
| 36 |
+
'replace', 'rfind', 'rindex', 'rjust', 'rpartition', 'rsplit',
|
| 37 |
+
'rstrip', 'split', 'splitlines', 'startswith', 'strip', 'swapcase',
|
| 38 |
+
'title', 'translate', 'upper', 'zfill', 'isnumeric', 'isdecimal',
|
| 39 |
+
'array', 'asarray'
|
| 40 |
+
]
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
_globalvar = 0
|
| 44 |
+
|
| 45 |
+
array_function_dispatch = functools.partial(
|
| 46 |
+
overrides.array_function_dispatch, module='numpy.char')
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def _is_unicode(arr):
|
| 50 |
+
"""Returns True if arr is a string or a string array with a dtype that
|
| 51 |
+
represents a unicode string, otherwise returns False.
|
| 52 |
+
|
| 53 |
+
"""
|
| 54 |
+
if (isinstance(arr, str) or
|
| 55 |
+
issubclass(numpy.asarray(arr).dtype.type, str)):
|
| 56 |
+
return True
|
| 57 |
+
return False
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def _to_bytes_or_str_array(result, output_dtype_like=None):
|
| 61 |
+
"""
|
| 62 |
+
Helper function to cast a result back into an array
|
| 63 |
+
with the appropriate dtype if an object array must be used
|
| 64 |
+
as an intermediary.
|
| 65 |
+
"""
|
| 66 |
+
ret = numpy.asarray(result.tolist())
|
| 67 |
+
dtype = getattr(output_dtype_like, 'dtype', None)
|
| 68 |
+
if dtype is not None:
|
| 69 |
+
return ret.astype(type(dtype)(_get_num_chars(ret)), copy=False)
|
| 70 |
+
return ret
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def _clean_args(*args):
|
| 74 |
+
"""
|
| 75 |
+
Helper function for delegating arguments to Python string
|
| 76 |
+
functions.
|
| 77 |
+
|
| 78 |
+
Many of the Python string operations that have optional arguments
|
| 79 |
+
do not use 'None' to indicate a default value. In these cases,
|
| 80 |
+
we need to remove all None arguments, and those following them.
|
| 81 |
+
"""
|
| 82 |
+
newargs = []
|
| 83 |
+
for chk in args:
|
| 84 |
+
if chk is None:
|
| 85 |
+
break
|
| 86 |
+
newargs.append(chk)
|
| 87 |
+
return newargs
|
| 88 |
+
|
| 89 |
+
def _get_num_chars(a):
|
| 90 |
+
"""
|
| 91 |
+
Helper function that returns the number of characters per field in
|
| 92 |
+
a string or unicode array. This is to abstract out the fact that
|
| 93 |
+
for a unicode array this is itemsize / 4.
|
| 94 |
+
"""
|
| 95 |
+
if issubclass(a.dtype.type, str_):
|
| 96 |
+
return a.itemsize // 4
|
| 97 |
+
return a.itemsize
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def _binary_op_dispatcher(x1, x2):
|
| 101 |
+
return (x1, x2)
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
@array_function_dispatch(_binary_op_dispatcher)
|
| 105 |
+
def equal(x1, x2):
|
| 106 |
+
"""
|
| 107 |
+
Return (x1 == x2) element-wise.
|
| 108 |
+
|
| 109 |
+
Unlike `numpy.equal`, this comparison is performed by first
|
| 110 |
+
stripping whitespace characters from the end of the string. This
|
| 111 |
+
behavior is provided for backward-compatibility with numarray.
|
| 112 |
+
|
| 113 |
+
Parameters
|
| 114 |
+
----------
|
| 115 |
+
x1, x2 : array_like of str or unicode
|
| 116 |
+
Input arrays of the same shape.
|
| 117 |
+
|
| 118 |
+
Returns
|
| 119 |
+
-------
|
| 120 |
+
out : ndarray
|
| 121 |
+
Output array of bools.
|
| 122 |
+
|
| 123 |
+
See Also
|
| 124 |
+
--------
|
| 125 |
+
not_equal, greater_equal, less_equal, greater, less
|
| 126 |
+
"""
|
| 127 |
+
return compare_chararrays(x1, x2, '==', True)
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
@array_function_dispatch(_binary_op_dispatcher)
|
| 131 |
+
def not_equal(x1, x2):
|
| 132 |
+
"""
|
| 133 |
+
Return (x1 != x2) element-wise.
|
| 134 |
+
|
| 135 |
+
Unlike `numpy.not_equal`, this comparison is performed by first
|
| 136 |
+
stripping whitespace characters from the end of the string. This
|
| 137 |
+
behavior is provided for backward-compatibility with numarray.
|
| 138 |
+
|
| 139 |
+
Parameters
|
| 140 |
+
----------
|
| 141 |
+
x1, x2 : array_like of str or unicode
|
| 142 |
+
Input arrays of the same shape.
|
| 143 |
+
|
| 144 |
+
Returns
|
| 145 |
+
-------
|
| 146 |
+
out : ndarray
|
| 147 |
+
Output array of bools.
|
| 148 |
+
|
| 149 |
+
See Also
|
| 150 |
+
--------
|
| 151 |
+
equal, greater_equal, less_equal, greater, less
|
| 152 |
+
"""
|
| 153 |
+
return compare_chararrays(x1, x2, '!=', True)
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
@array_function_dispatch(_binary_op_dispatcher)
|
| 157 |
+
def greater_equal(x1, x2):
|
| 158 |
+
"""
|
| 159 |
+
Return (x1 >= x2) element-wise.
|
| 160 |
+
|
| 161 |
+
Unlike `numpy.greater_equal`, this comparison is performed by
|
| 162 |
+
first stripping whitespace characters from the end of the string.
|
| 163 |
+
This behavior is provided for backward-compatibility with
|
| 164 |
+
numarray.
|
| 165 |
+
|
| 166 |
+
Parameters
|
| 167 |
+
----------
|
| 168 |
+
x1, x2 : array_like of str or unicode
|
| 169 |
+
Input arrays of the same shape.
|
| 170 |
+
|
| 171 |
+
Returns
|
| 172 |
+
-------
|
| 173 |
+
out : ndarray
|
| 174 |
+
Output array of bools.
|
| 175 |
+
|
| 176 |
+
See Also
|
| 177 |
+
--------
|
| 178 |
+
equal, not_equal, less_equal, greater, less
|
| 179 |
+
"""
|
| 180 |
+
return compare_chararrays(x1, x2, '>=', True)
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
@array_function_dispatch(_binary_op_dispatcher)
|
| 184 |
+
def less_equal(x1, x2):
|
| 185 |
+
"""
|
| 186 |
+
Return (x1 <= x2) element-wise.
|
| 187 |
+
|
| 188 |
+
Unlike `numpy.less_equal`, this comparison is performed by first
|
| 189 |
+
stripping whitespace characters from the end of the string. This
|
| 190 |
+
behavior is provided for backward-compatibility with numarray.
|
| 191 |
+
|
| 192 |
+
Parameters
|
| 193 |
+
----------
|
| 194 |
+
x1, x2 : array_like of str or unicode
|
| 195 |
+
Input arrays of the same shape.
|
| 196 |
+
|
| 197 |
+
Returns
|
| 198 |
+
-------
|
| 199 |
+
out : ndarray
|
| 200 |
+
Output array of bools.
|
| 201 |
+
|
| 202 |
+
See Also
|
| 203 |
+
--------
|
| 204 |
+
equal, not_equal, greater_equal, greater, less
|
| 205 |
+
"""
|
| 206 |
+
return compare_chararrays(x1, x2, '<=', True)
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
@array_function_dispatch(_binary_op_dispatcher)
|
| 210 |
+
def greater(x1, x2):
|
| 211 |
+
"""
|
| 212 |
+
Return (x1 > x2) element-wise.
|
| 213 |
+
|
| 214 |
+
Unlike `numpy.greater`, this comparison is performed by first
|
| 215 |
+
stripping whitespace characters from the end of the string. This
|
| 216 |
+
behavior is provided for backward-compatibility with numarray.
|
| 217 |
+
|
| 218 |
+
Parameters
|
| 219 |
+
----------
|
| 220 |
+
x1, x2 : array_like of str or unicode
|
| 221 |
+
Input arrays of the same shape.
|
| 222 |
+
|
| 223 |
+
Returns
|
| 224 |
+
-------
|
| 225 |
+
out : ndarray
|
| 226 |
+
Output array of bools.
|
| 227 |
+
|
| 228 |
+
See Also
|
| 229 |
+
--------
|
| 230 |
+
equal, not_equal, greater_equal, less_equal, less
|
| 231 |
+
"""
|
| 232 |
+
return compare_chararrays(x1, x2, '>', True)
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
@array_function_dispatch(_binary_op_dispatcher)
|
| 236 |
+
def less(x1, x2):
|
| 237 |
+
"""
|
| 238 |
+
Return (x1 < x2) element-wise.
|
| 239 |
+
|
| 240 |
+
Unlike `numpy.greater`, this comparison is performed by first
|
| 241 |
+
stripping whitespace characters from the end of the string. This
|
| 242 |
+
behavior is provided for backward-compatibility with numarray.
|
| 243 |
+
|
| 244 |
+
Parameters
|
| 245 |
+
----------
|
| 246 |
+
x1, x2 : array_like of str or unicode
|
| 247 |
+
Input arrays of the same shape.
|
| 248 |
+
|
| 249 |
+
Returns
|
| 250 |
+
-------
|
| 251 |
+
out : ndarray
|
| 252 |
+
Output array of bools.
|
| 253 |
+
|
| 254 |
+
See Also
|
| 255 |
+
--------
|
| 256 |
+
equal, not_equal, greater_equal, less_equal, greater
|
| 257 |
+
"""
|
| 258 |
+
return compare_chararrays(x1, x2, '<', True)
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
def _unary_op_dispatcher(a):
|
| 262 |
+
return (a,)
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
@array_function_dispatch(_unary_op_dispatcher)
|
| 266 |
+
def str_len(a):
|
| 267 |
+
"""
|
| 268 |
+
Return len(a) element-wise.
|
| 269 |
+
|
| 270 |
+
Parameters
|
| 271 |
+
----------
|
| 272 |
+
a : array_like of str or unicode
|
| 273 |
+
|
| 274 |
+
Returns
|
| 275 |
+
-------
|
| 276 |
+
out : ndarray
|
| 277 |
+
Output array of integers
|
| 278 |
+
|
| 279 |
+
See Also
|
| 280 |
+
--------
|
| 281 |
+
len
|
| 282 |
+
|
| 283 |
+
Examples
|
| 284 |
+
--------
|
| 285 |
+
>>> a = np.array(['Grace Hopper Conference', 'Open Source Day'])
|
| 286 |
+
>>> np.char.str_len(a)
|
| 287 |
+
array([23, 15])
|
| 288 |
+
>>> a = np.array([u'\u0420', u'\u043e'])
|
| 289 |
+
>>> np.char.str_len(a)
|
| 290 |
+
array([1, 1])
|
| 291 |
+
>>> a = np.array([['hello', 'world'], [u'\u0420', u'\u043e']])
|
| 292 |
+
>>> np.char.str_len(a)
|
| 293 |
+
array([[5, 5], [1, 1]])
|
| 294 |
+
"""
|
| 295 |
+
# Note: __len__, etc. currently return ints, which are not C-integers.
|
| 296 |
+
# Generally intp would be expected for lengths, although int is sufficient
|
| 297 |
+
# due to the dtype itemsize limitation.
|
| 298 |
+
return _vec_string(a, int_, '__len__')
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
@array_function_dispatch(_binary_op_dispatcher)
|
| 302 |
+
def add(x1, x2):
|
| 303 |
+
"""
|
| 304 |
+
Return element-wise string concatenation for two arrays of str or unicode.
|
| 305 |
+
|
| 306 |
+
Arrays `x1` and `x2` must have the same shape.
|
| 307 |
+
|
| 308 |
+
Parameters
|
| 309 |
+
----------
|
| 310 |
+
x1 : array_like of str or unicode
|
| 311 |
+
Input array.
|
| 312 |
+
x2 : array_like of str or unicode
|
| 313 |
+
Input array.
|
| 314 |
+
|
| 315 |
+
Returns
|
| 316 |
+
-------
|
| 317 |
+
add : ndarray
|
| 318 |
+
Output array of `bytes_` or `str_`, depending on input types
|
| 319 |
+
of the same shape as `x1` and `x2`.
|
| 320 |
+
|
| 321 |
+
"""
|
| 322 |
+
arr1 = numpy.asarray(x1)
|
| 323 |
+
arr2 = numpy.asarray(x2)
|
| 324 |
+
out_size = _get_num_chars(arr1) + _get_num_chars(arr2)
|
| 325 |
+
|
| 326 |
+
if type(arr1.dtype) != type(arr2.dtype):
|
| 327 |
+
# Enforce this for now. The solution to it will be implement add
|
| 328 |
+
# as a ufunc. It never worked right on Python 3: bytes + unicode gave
|
| 329 |
+
# nonsense unicode + bytes errored, and unicode + object used the
|
| 330 |
+
# object dtype itemsize as num chars (worked on short strings).
|
| 331 |
+
# bytes + void worked but promoting void->bytes is dubious also.
|
| 332 |
+
raise TypeError(
|
| 333 |
+
"np.char.add() requires both arrays of the same dtype kind, but "
|
| 334 |
+
f"got dtypes: '{arr1.dtype}' and '{arr2.dtype}' (the few cases "
|
| 335 |
+
"where this used to work often lead to incorrect results).")
|
| 336 |
+
|
| 337 |
+
return _vec_string(arr1, type(arr1.dtype)(out_size), '__add__', (arr2,))
|
| 338 |
+
|
| 339 |
+
def _multiply_dispatcher(a, i):
|
| 340 |
+
return (a,)
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
@array_function_dispatch(_multiply_dispatcher)
|
| 344 |
+
def multiply(a, i):
|
| 345 |
+
"""
|
| 346 |
+
Return (a * i), that is string multiple concatenation,
|
| 347 |
+
element-wise.
|
| 348 |
+
|
| 349 |
+
Values in `i` of less than 0 are treated as 0 (which yields an
|
| 350 |
+
empty string).
|
| 351 |
+
|
| 352 |
+
Parameters
|
| 353 |
+
----------
|
| 354 |
+
a : array_like of str or unicode
|
| 355 |
+
|
| 356 |
+
i : array_like of ints
|
| 357 |
+
|
| 358 |
+
Returns
|
| 359 |
+
-------
|
| 360 |
+
out : ndarray
|
| 361 |
+
Output array of str or unicode, depending on input types
|
| 362 |
+
|
| 363 |
+
Examples
|
| 364 |
+
--------
|
| 365 |
+
>>> a = np.array(["a", "b", "c"])
|
| 366 |
+
>>> np.char.multiply(x, 3)
|
| 367 |
+
array(['aaa', 'bbb', 'ccc'], dtype='<U3')
|
| 368 |
+
>>> i = np.array([1, 2, 3])
|
| 369 |
+
>>> np.char.multiply(a, i)
|
| 370 |
+
array(['a', 'bb', 'ccc'], dtype='<U3')
|
| 371 |
+
>>> np.char.multiply(np.array(['a']), i)
|
| 372 |
+
array(['a', 'aa', 'aaa'], dtype='<U3')
|
| 373 |
+
>>> a = np.array(['a', 'b', 'c', 'd', 'e', 'f']).reshape((2, 3))
|
| 374 |
+
>>> np.char.multiply(a, 3)
|
| 375 |
+
array([['aaa', 'bbb', 'ccc'],
|
| 376 |
+
['ddd', 'eee', 'fff']], dtype='<U3')
|
| 377 |
+
>>> np.char.multiply(a, i)
|
| 378 |
+
array([['a', 'bb', 'ccc'],
|
| 379 |
+
['d', 'ee', 'fff']], dtype='<U3')
|
| 380 |
+
"""
|
| 381 |
+
a_arr = numpy.asarray(a)
|
| 382 |
+
i_arr = numpy.asarray(i)
|
| 383 |
+
if not issubclass(i_arr.dtype.type, integer):
|
| 384 |
+
raise ValueError("Can only multiply by integers")
|
| 385 |
+
out_size = _get_num_chars(a_arr) * max(int(i_arr.max()), 0)
|
| 386 |
+
return _vec_string(
|
| 387 |
+
a_arr, type(a_arr.dtype)(out_size), '__mul__', (i_arr,))
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
def _mod_dispatcher(a, values):
|
| 391 |
+
return (a, values)
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
@array_function_dispatch(_mod_dispatcher)
|
| 395 |
+
def mod(a, values):
|
| 396 |
+
"""
|
| 397 |
+
Return (a % i), that is pre-Python 2.6 string formatting
|
| 398 |
+
(interpolation), element-wise for a pair of array_likes of str
|
| 399 |
+
or unicode.
|
| 400 |
+
|
| 401 |
+
Parameters
|
| 402 |
+
----------
|
| 403 |
+
a : array_like of str or unicode
|
| 404 |
+
|
| 405 |
+
values : array_like of values
|
| 406 |
+
These values will be element-wise interpolated into the string.
|
| 407 |
+
|
| 408 |
+
Returns
|
| 409 |
+
-------
|
| 410 |
+
out : ndarray
|
| 411 |
+
Output array of str or unicode, depending on input types
|
| 412 |
+
|
| 413 |
+
See Also
|
| 414 |
+
--------
|
| 415 |
+
str.__mod__
|
| 416 |
+
|
| 417 |
+
"""
|
| 418 |
+
return _to_bytes_or_str_array(
|
| 419 |
+
_vec_string(a, object_, '__mod__', (values,)), a)
|
| 420 |
+
|
| 421 |
+
|
| 422 |
+
@array_function_dispatch(_unary_op_dispatcher)
|
| 423 |
+
def capitalize(a):
|
| 424 |
+
"""
|
| 425 |
+
Return a copy of `a` with only the first character of each element
|
| 426 |
+
capitalized.
|
| 427 |
+
|
| 428 |
+
Calls `str.capitalize` element-wise.
|
| 429 |
+
|
| 430 |
+
For 8-bit strings, this method is locale-dependent.
|
| 431 |
+
|
| 432 |
+
Parameters
|
| 433 |
+
----------
|
| 434 |
+
a : array_like of str or unicode
|
| 435 |
+
Input array of strings to capitalize.
|
| 436 |
+
|
| 437 |
+
Returns
|
| 438 |
+
-------
|
| 439 |
+
out : ndarray
|
| 440 |
+
Output array of str or unicode, depending on input
|
| 441 |
+
types
|
| 442 |
+
|
| 443 |
+
See Also
|
| 444 |
+
--------
|
| 445 |
+
str.capitalize
|
| 446 |
+
|
| 447 |
+
Examples
|
| 448 |
+
--------
|
| 449 |
+
>>> c = np.array(['a1b2','1b2a','b2a1','2a1b'],'S4'); c
|
| 450 |
+
array(['a1b2', '1b2a', 'b2a1', '2a1b'],
|
| 451 |
+
dtype='|S4')
|
| 452 |
+
>>> np.char.capitalize(c)
|
| 453 |
+
array(['A1b2', '1b2a', 'B2a1', '2a1b'],
|
| 454 |
+
dtype='|S4')
|
| 455 |
+
|
| 456 |
+
"""
|
| 457 |
+
a_arr = numpy.asarray(a)
|
| 458 |
+
return _vec_string(a_arr, a_arr.dtype, 'capitalize')
|
| 459 |
+
|
| 460 |
+
|
| 461 |
+
def _center_dispatcher(a, width, fillchar=None):
|
| 462 |
+
return (a,)
|
| 463 |
+
|
| 464 |
+
|
| 465 |
+
@array_function_dispatch(_center_dispatcher)
|
| 466 |
+
def center(a, width, fillchar=' '):
|
| 467 |
+
"""
|
| 468 |
+
Return a copy of `a` with its elements centered in a string of
|
| 469 |
+
length `width`.
|
| 470 |
+
|
| 471 |
+
Calls `str.center` element-wise.
|
| 472 |
+
|
| 473 |
+
Parameters
|
| 474 |
+
----------
|
| 475 |
+
a : array_like of str or unicode
|
| 476 |
+
|
| 477 |
+
width : int
|
| 478 |
+
The length of the resulting strings
|
| 479 |
+
fillchar : str or unicode, optional
|
| 480 |
+
The padding character to use (default is space).
|
| 481 |
+
|
| 482 |
+
Returns
|
| 483 |
+
-------
|
| 484 |
+
out : ndarray
|
| 485 |
+
Output array of str or unicode, depending on input
|
| 486 |
+
types
|
| 487 |
+
|
| 488 |
+
See Also
|
| 489 |
+
--------
|
| 490 |
+
str.center
|
| 491 |
+
|
| 492 |
+
Notes
|
| 493 |
+
-----
|
| 494 |
+
This function is intended to work with arrays of strings. The
|
| 495 |
+
fill character is not applied to numeric types.
|
| 496 |
+
|
| 497 |
+
Examples
|
| 498 |
+
--------
|
| 499 |
+
>>> c = np.array(['a1b2','1b2a','b2a1','2a1b']); c
|
| 500 |
+
array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='<U4')
|
| 501 |
+
>>> np.char.center(c, width=9)
|
| 502 |
+
array([' a1b2 ', ' 1b2a ', ' b2a1 ', ' 2a1b '], dtype='<U9')
|
| 503 |
+
>>> np.char.center(c, width=9, fillchar='*')
|
| 504 |
+
array(['***a1b2**', '***1b2a**', '***b2a1**', '***2a1b**'], dtype='<U9')
|
| 505 |
+
>>> np.char.center(c, width=1)
|
| 506 |
+
array(['a', '1', 'b', '2'], dtype='<U1')
|
| 507 |
+
|
| 508 |
+
"""
|
| 509 |
+
a_arr = numpy.asarray(a)
|
| 510 |
+
width_arr = numpy.asarray(width)
|
| 511 |
+
size = int(numpy.max(width_arr.flat))
|
| 512 |
+
if numpy.issubdtype(a_arr.dtype, numpy.bytes_):
|
| 513 |
+
fillchar = asbytes(fillchar)
|
| 514 |
+
return _vec_string(
|
| 515 |
+
a_arr, type(a_arr.dtype)(size), 'center', (width_arr, fillchar))
|
| 516 |
+
|
| 517 |
+
|
| 518 |
+
def _count_dispatcher(a, sub, start=None, end=None):
|
| 519 |
+
return (a,)
|
| 520 |
+
|
| 521 |
+
|
| 522 |
+
@array_function_dispatch(_count_dispatcher)
|
| 523 |
+
def count(a, sub, start=0, end=None):
|
| 524 |
+
"""
|
| 525 |
+
Returns an array with the number of non-overlapping occurrences of
|
| 526 |
+
substring `sub` in the range [`start`, `end`].
|
| 527 |
+
|
| 528 |
+
Calls `str.count` element-wise.
|
| 529 |
+
|
| 530 |
+
Parameters
|
| 531 |
+
----------
|
| 532 |
+
a : array_like of str or unicode
|
| 533 |
+
|
| 534 |
+
sub : str or unicode
|
| 535 |
+
The substring to search for.
|
| 536 |
+
|
| 537 |
+
start, end : int, optional
|
| 538 |
+
Optional arguments `start` and `end` are interpreted as slice
|
| 539 |
+
notation to specify the range in which to count.
|
| 540 |
+
|
| 541 |
+
Returns
|
| 542 |
+
-------
|
| 543 |
+
out : ndarray
|
| 544 |
+
Output array of ints.
|
| 545 |
+
|
| 546 |
+
See Also
|
| 547 |
+
--------
|
| 548 |
+
str.count
|
| 549 |
+
|
| 550 |
+
Examples
|
| 551 |
+
--------
|
| 552 |
+
>>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
|
| 553 |
+
>>> c
|
| 554 |
+
array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
|
| 555 |
+
>>> np.char.count(c, 'A')
|
| 556 |
+
array([3, 1, 1])
|
| 557 |
+
>>> np.char.count(c, 'aA')
|
| 558 |
+
array([3, 1, 0])
|
| 559 |
+
>>> np.char.count(c, 'A', start=1, end=4)
|
| 560 |
+
array([2, 1, 1])
|
| 561 |
+
>>> np.char.count(c, 'A', start=1, end=3)
|
| 562 |
+
array([1, 0, 0])
|
| 563 |
+
|
| 564 |
+
"""
|
| 565 |
+
return _vec_string(a, int_, 'count', [sub, start] + _clean_args(end))
|
| 566 |
+
|
| 567 |
+
|
| 568 |
+
def _code_dispatcher(a, encoding=None, errors=None):
|
| 569 |
+
return (a,)
|
| 570 |
+
|
| 571 |
+
|
| 572 |
+
@array_function_dispatch(_code_dispatcher)
|
| 573 |
+
def decode(a, encoding=None, errors=None):
|
| 574 |
+
r"""
|
| 575 |
+
Calls ``bytes.decode`` element-wise.
|
| 576 |
+
|
| 577 |
+
The set of available codecs comes from the Python standard library,
|
| 578 |
+
and may be extended at runtime. For more information, see the
|
| 579 |
+
:mod:`codecs` module.
|
| 580 |
+
|
| 581 |
+
Parameters
|
| 582 |
+
----------
|
| 583 |
+
a : array_like of str or unicode
|
| 584 |
+
|
| 585 |
+
encoding : str, optional
|
| 586 |
+
The name of an encoding
|
| 587 |
+
|
| 588 |
+
errors : str, optional
|
| 589 |
+
Specifies how to handle encoding errors
|
| 590 |
+
|
| 591 |
+
Returns
|
| 592 |
+
-------
|
| 593 |
+
out : ndarray
|
| 594 |
+
|
| 595 |
+
See Also
|
| 596 |
+
--------
|
| 597 |
+
:py:meth:`bytes.decode`
|
| 598 |
+
|
| 599 |
+
Notes
|
| 600 |
+
-----
|
| 601 |
+
The type of the result will depend on the encoding specified.
|
| 602 |
+
|
| 603 |
+
Examples
|
| 604 |
+
--------
|
| 605 |
+
>>> c = np.array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@',
|
| 606 |
+
... b'\x81\x82\xc2\xc1\xc2\x82\x81'])
|
| 607 |
+
>>> c
|
| 608 |
+
array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@',
|
| 609 |
+
... b'\x81\x82\xc2\xc1\xc2\x82\x81'], dtype='|S7')
|
| 610 |
+
>>> np.char.decode(c, encoding='cp037')
|
| 611 |
+
array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
|
| 612 |
+
|
| 613 |
+
"""
|
| 614 |
+
return _to_bytes_or_str_array(
|
| 615 |
+
_vec_string(a, object_, 'decode', _clean_args(encoding, errors)))
|
| 616 |
+
|
| 617 |
+
|
| 618 |
+
@array_function_dispatch(_code_dispatcher)
|
| 619 |
+
def encode(a, encoding=None, errors=None):
|
| 620 |
+
"""
|
| 621 |
+
Calls `str.encode` element-wise.
|
| 622 |
+
|
| 623 |
+
The set of available codecs comes from the Python standard library,
|
| 624 |
+
and may be extended at runtime. For more information, see the codecs
|
| 625 |
+
module.
|
| 626 |
+
|
| 627 |
+
Parameters
|
| 628 |
+
----------
|
| 629 |
+
a : array_like of str or unicode
|
| 630 |
+
|
| 631 |
+
encoding : str, optional
|
| 632 |
+
The name of an encoding
|
| 633 |
+
|
| 634 |
+
errors : str, optional
|
| 635 |
+
Specifies how to handle encoding errors
|
| 636 |
+
|
| 637 |
+
Returns
|
| 638 |
+
-------
|
| 639 |
+
out : ndarray
|
| 640 |
+
|
| 641 |
+
See Also
|
| 642 |
+
--------
|
| 643 |
+
str.encode
|
| 644 |
+
|
| 645 |
+
Notes
|
| 646 |
+
-----
|
| 647 |
+
The type of the result will depend on the encoding specified.
|
| 648 |
+
|
| 649 |
+
"""
|
| 650 |
+
return _to_bytes_or_str_array(
|
| 651 |
+
_vec_string(a, object_, 'encode', _clean_args(encoding, errors)))
|
| 652 |
+
|
| 653 |
+
|
| 654 |
+
def _endswith_dispatcher(a, suffix, start=None, end=None):
|
| 655 |
+
return (a,)
|
| 656 |
+
|
| 657 |
+
|
| 658 |
+
@array_function_dispatch(_endswith_dispatcher)
|
| 659 |
+
def endswith(a, suffix, start=0, end=None):
|
| 660 |
+
"""
|
| 661 |
+
Returns a boolean array which is `True` where the string element
|
| 662 |
+
in `a` ends with `suffix`, otherwise `False`.
|
| 663 |
+
|
| 664 |
+
Calls `str.endswith` element-wise.
|
| 665 |
+
|
| 666 |
+
Parameters
|
| 667 |
+
----------
|
| 668 |
+
a : array_like of str or unicode
|
| 669 |
+
|
| 670 |
+
suffix : str
|
| 671 |
+
|
| 672 |
+
start, end : int, optional
|
| 673 |
+
With optional `start`, test beginning at that position. With
|
| 674 |
+
optional `end`, stop comparing at that position.
|
| 675 |
+
|
| 676 |
+
Returns
|
| 677 |
+
-------
|
| 678 |
+
out : ndarray
|
| 679 |
+
Outputs an array of bools.
|
| 680 |
+
|
| 681 |
+
See Also
|
| 682 |
+
--------
|
| 683 |
+
str.endswith
|
| 684 |
+
|
| 685 |
+
Examples
|
| 686 |
+
--------
|
| 687 |
+
>>> s = np.array(['foo', 'bar'])
|
| 688 |
+
>>> s[0] = 'foo'
|
| 689 |
+
>>> s[1] = 'bar'
|
| 690 |
+
>>> s
|
| 691 |
+
array(['foo', 'bar'], dtype='<U3')
|
| 692 |
+
>>> np.char.endswith(s, 'ar')
|
| 693 |
+
array([False, True])
|
| 694 |
+
>>> np.char.endswith(s, 'a', start=1, end=2)
|
| 695 |
+
array([False, True])
|
| 696 |
+
|
| 697 |
+
"""
|
| 698 |
+
return _vec_string(
|
| 699 |
+
a, bool_, 'endswith', [suffix, start] + _clean_args(end))
|
| 700 |
+
|
| 701 |
+
|
| 702 |
+
def _expandtabs_dispatcher(a, tabsize=None):
|
| 703 |
+
return (a,)
|
| 704 |
+
|
| 705 |
+
|
| 706 |
+
@array_function_dispatch(_expandtabs_dispatcher)
|
| 707 |
+
def expandtabs(a, tabsize=8):
|
| 708 |
+
"""
|
| 709 |
+
Return a copy of each string element where all tab characters are
|
| 710 |
+
replaced by one or more spaces.
|
| 711 |
+
|
| 712 |
+
Calls `str.expandtabs` element-wise.
|
| 713 |
+
|
| 714 |
+
Return a copy of each string element where all tab characters are
|
| 715 |
+
replaced by one or more spaces, depending on the current column
|
| 716 |
+
and the given `tabsize`. The column number is reset to zero after
|
| 717 |
+
each newline occurring in the string. This doesn't understand other
|
| 718 |
+
non-printing characters or escape sequences.
|
| 719 |
+
|
| 720 |
+
Parameters
|
| 721 |
+
----------
|
| 722 |
+
a : array_like of str or unicode
|
| 723 |
+
Input array
|
| 724 |
+
tabsize : int, optional
|
| 725 |
+
Replace tabs with `tabsize` number of spaces. If not given defaults
|
| 726 |
+
to 8 spaces.
|
| 727 |
+
|
| 728 |
+
Returns
|
| 729 |
+
-------
|
| 730 |
+
out : ndarray
|
| 731 |
+
Output array of str or unicode, depending on input type
|
| 732 |
+
|
| 733 |
+
See Also
|
| 734 |
+
--------
|
| 735 |
+
str.expandtabs
|
| 736 |
+
|
| 737 |
+
"""
|
| 738 |
+
return _to_bytes_or_str_array(
|
| 739 |
+
_vec_string(a, object_, 'expandtabs', (tabsize,)), a)
|
| 740 |
+
|
| 741 |
+
|
| 742 |
+
@array_function_dispatch(_count_dispatcher)
|
| 743 |
+
def find(a, sub, start=0, end=None):
|
| 744 |
+
"""
|
| 745 |
+
For each element, return the lowest index in the string where
|
| 746 |
+
substring `sub` is found.
|
| 747 |
+
|
| 748 |
+
Calls `str.find` element-wise.
|
| 749 |
+
|
| 750 |
+
For each element, return the lowest index in the string where
|
| 751 |
+
substring `sub` is found, such that `sub` is contained in the
|
| 752 |
+
range [`start`, `end`].
|
| 753 |
+
|
| 754 |
+
Parameters
|
| 755 |
+
----------
|
| 756 |
+
a : array_like of str or unicode
|
| 757 |
+
|
| 758 |
+
sub : str or unicode
|
| 759 |
+
|
| 760 |
+
start, end : int, optional
|
| 761 |
+
Optional arguments `start` and `end` are interpreted as in
|
| 762 |
+
slice notation.
|
| 763 |
+
|
| 764 |
+
Returns
|
| 765 |
+
-------
|
| 766 |
+
out : ndarray or int
|
| 767 |
+
Output array of ints. Returns -1 if `sub` is not found.
|
| 768 |
+
|
| 769 |
+
See Also
|
| 770 |
+
--------
|
| 771 |
+
str.find
|
| 772 |
+
|
| 773 |
+
Examples
|
| 774 |
+
--------
|
| 775 |
+
>>> a = np.array(["NumPy is a Python library"])
|
| 776 |
+
>>> np.char.find(a, "Python", start=0, end=None)
|
| 777 |
+
array([11])
|
| 778 |
+
|
| 779 |
+
"""
|
| 780 |
+
return _vec_string(
|
| 781 |
+
a, int_, 'find', [sub, start] + _clean_args(end))
|
| 782 |
+
|
| 783 |
+
|
| 784 |
+
@array_function_dispatch(_count_dispatcher)
|
| 785 |
+
def index(a, sub, start=0, end=None):
|
| 786 |
+
"""
|
| 787 |
+
Like `find`, but raises `ValueError` when the substring is not found.
|
| 788 |
+
|
| 789 |
+
Calls `str.index` element-wise.
|
| 790 |
+
|
| 791 |
+
Parameters
|
| 792 |
+
----------
|
| 793 |
+
a : array_like of str or unicode
|
| 794 |
+
|
| 795 |
+
sub : str or unicode
|
| 796 |
+
|
| 797 |
+
start, end : int, optional
|
| 798 |
+
|
| 799 |
+
Returns
|
| 800 |
+
-------
|
| 801 |
+
out : ndarray
|
| 802 |
+
Output array of ints. Returns -1 if `sub` is not found.
|
| 803 |
+
|
| 804 |
+
See Also
|
| 805 |
+
--------
|
| 806 |
+
find, str.find
|
| 807 |
+
|
| 808 |
+
Examples
|
| 809 |
+
--------
|
| 810 |
+
>>> a = np.array(["Computer Science"])
|
| 811 |
+
>>> np.char.index(a, "Science", start=0, end=None)
|
| 812 |
+
array([9])
|
| 813 |
+
|
| 814 |
+
"""
|
| 815 |
+
return _vec_string(
|
| 816 |
+
a, int_, 'index', [sub, start] + _clean_args(end))
|
| 817 |
+
|
| 818 |
+
|
| 819 |
+
@array_function_dispatch(_unary_op_dispatcher)
|
| 820 |
+
def isalnum(a):
|
| 821 |
+
"""
|
| 822 |
+
Returns true for each element if all characters in the string are
|
| 823 |
+
alphanumeric and there is at least one character, false otherwise.
|
| 824 |
+
|
| 825 |
+
Calls `str.isalnum` element-wise.
|
| 826 |
+
|
| 827 |
+
For 8-bit strings, this method is locale-dependent.
|
| 828 |
+
|
| 829 |
+
Parameters
|
| 830 |
+
----------
|
| 831 |
+
a : array_like of str or unicode
|
| 832 |
+
|
| 833 |
+
Returns
|
| 834 |
+
-------
|
| 835 |
+
out : ndarray
|
| 836 |
+
Output array of str or unicode, depending on input type
|
| 837 |
+
|
| 838 |
+
See Also
|
| 839 |
+
--------
|
| 840 |
+
str.isalnum
|
| 841 |
+
"""
|
| 842 |
+
return _vec_string(a, bool_, 'isalnum')
|
| 843 |
+
|
| 844 |
+
|
| 845 |
+
@array_function_dispatch(_unary_op_dispatcher)
|
| 846 |
+
def isalpha(a):
|
| 847 |
+
"""
|
| 848 |
+
Returns true for each element if all characters in the string are
|
| 849 |
+
alphabetic and there is at least one character, false otherwise.
|
| 850 |
+
|
| 851 |
+
Calls `str.isalpha` element-wise.
|
| 852 |
+
|
| 853 |
+
For 8-bit strings, this method is locale-dependent.
|
| 854 |
+
|
| 855 |
+
Parameters
|
| 856 |
+
----------
|
| 857 |
+
a : array_like of str or unicode
|
| 858 |
+
|
| 859 |
+
Returns
|
| 860 |
+
-------
|
| 861 |
+
out : ndarray
|
| 862 |
+
Output array of bools
|
| 863 |
+
|
| 864 |
+
See Also
|
| 865 |
+
--------
|
| 866 |
+
str.isalpha
|
| 867 |
+
"""
|
| 868 |
+
return _vec_string(a, bool_, 'isalpha')
|
| 869 |
+
|
| 870 |
+
|
| 871 |
+
@array_function_dispatch(_unary_op_dispatcher)
|
| 872 |
+
def isdigit(a):
|
| 873 |
+
"""
|
| 874 |
+
Returns true for each element if all characters in the string are
|
| 875 |
+
digits and there is at least one character, false otherwise.
|
| 876 |
+
|
| 877 |
+
Calls `str.isdigit` element-wise.
|
| 878 |
+
|
| 879 |
+
For 8-bit strings, this method is locale-dependent.
|
| 880 |
+
|
| 881 |
+
Parameters
|
| 882 |
+
----------
|
| 883 |
+
a : array_like of str or unicode
|
| 884 |
+
|
| 885 |
+
Returns
|
| 886 |
+
-------
|
| 887 |
+
out : ndarray
|
| 888 |
+
Output array of bools
|
| 889 |
+
|
| 890 |
+
See Also
|
| 891 |
+
--------
|
| 892 |
+
str.isdigit
|
| 893 |
+
|
| 894 |
+
Examples
|
| 895 |
+
--------
|
| 896 |
+
>>> a = np.array(['a', 'b', '0'])
|
| 897 |
+
>>> np.char.isdigit(a)
|
| 898 |
+
array([False, False, True])
|
| 899 |
+
>>> a = np.array([['a', 'b', '0'], ['c', '1', '2']])
|
| 900 |
+
>>> np.char.isdigit(a)
|
| 901 |
+
array([[False, False, True], [False, True, True]])
|
| 902 |
+
"""
|
| 903 |
+
return _vec_string(a, bool_, 'isdigit')
|
| 904 |
+
|
| 905 |
+
|
| 906 |
+
@array_function_dispatch(_unary_op_dispatcher)
|
| 907 |
+
def islower(a):
|
| 908 |
+
"""
|
| 909 |
+
Returns true for each element if all cased characters in the
|
| 910 |
+
string are lowercase and there is at least one cased character,
|
| 911 |
+
false otherwise.
|
| 912 |
+
|
| 913 |
+
Calls `str.islower` element-wise.
|
| 914 |
+
|
| 915 |
+
For 8-bit strings, this method is locale-dependent.
|
| 916 |
+
|
| 917 |
+
Parameters
|
| 918 |
+
----------
|
| 919 |
+
a : array_like of str or unicode
|
| 920 |
+
|
| 921 |
+
Returns
|
| 922 |
+
-------
|
| 923 |
+
out : ndarray
|
| 924 |
+
Output array of bools
|
| 925 |
+
|
| 926 |
+
See Also
|
| 927 |
+
--------
|
| 928 |
+
str.islower
|
| 929 |
+
"""
|
| 930 |
+
return _vec_string(a, bool_, 'islower')
|
| 931 |
+
|
| 932 |
+
|
| 933 |
+
@array_function_dispatch(_unary_op_dispatcher)
|
| 934 |
+
def isspace(a):
|
| 935 |
+
"""
|
| 936 |
+
Returns true for each element if there are only whitespace
|
| 937 |
+
characters in the string and there is at least one character,
|
| 938 |
+
false otherwise.
|
| 939 |
+
|
| 940 |
+
Calls `str.isspace` element-wise.
|
| 941 |
+
|
| 942 |
+
For 8-bit strings, this method is locale-dependent.
|
| 943 |
+
|
| 944 |
+
Parameters
|
| 945 |
+
----------
|
| 946 |
+
a : array_like of str or unicode
|
| 947 |
+
|
| 948 |
+
Returns
|
| 949 |
+
-------
|
| 950 |
+
out : ndarray
|
| 951 |
+
Output array of bools
|
| 952 |
+
|
| 953 |
+
See Also
|
| 954 |
+
--------
|
| 955 |
+
str.isspace
|
| 956 |
+
"""
|
| 957 |
+
return _vec_string(a, bool_, 'isspace')
|
| 958 |
+
|
| 959 |
+
|
| 960 |
+
@array_function_dispatch(_unary_op_dispatcher)
|
| 961 |
+
def istitle(a):
|
| 962 |
+
"""
|
| 963 |
+
Returns true for each element if the element is a titlecased
|
| 964 |
+
string and there is at least one character, false otherwise.
|
| 965 |
+
|
| 966 |
+
Call `str.istitle` element-wise.
|
| 967 |
+
|
| 968 |
+
For 8-bit strings, this method is locale-dependent.
|
| 969 |
+
|
| 970 |
+
Parameters
|
| 971 |
+
----------
|
| 972 |
+
a : array_like of str or unicode
|
| 973 |
+
|
| 974 |
+
Returns
|
| 975 |
+
-------
|
| 976 |
+
out : ndarray
|
| 977 |
+
Output array of bools
|
| 978 |
+
|
| 979 |
+
See Also
|
| 980 |
+
--------
|
| 981 |
+
str.istitle
|
| 982 |
+
"""
|
| 983 |
+
return _vec_string(a, bool_, 'istitle')
|
| 984 |
+
|
| 985 |
+
|
| 986 |
+
@array_function_dispatch(_unary_op_dispatcher)
|
| 987 |
+
def isupper(a):
|
| 988 |
+
"""
|
| 989 |
+
Return true for each element if all cased characters in the
|
| 990 |
+
string are uppercase and there is at least one character, false
|
| 991 |
+
otherwise.
|
| 992 |
+
|
| 993 |
+
Call `str.isupper` element-wise.
|
| 994 |
+
|
| 995 |
+
For 8-bit strings, this method is locale-dependent.
|
| 996 |
+
|
| 997 |
+
Parameters
|
| 998 |
+
----------
|
| 999 |
+
a : array_like of str or unicode
|
| 1000 |
+
|
| 1001 |
+
Returns
|
| 1002 |
+
-------
|
| 1003 |
+
out : ndarray
|
| 1004 |
+
Output array of bools
|
| 1005 |
+
|
| 1006 |
+
See Also
|
| 1007 |
+
--------
|
| 1008 |
+
str.isupper
|
| 1009 |
+
|
| 1010 |
+
Examples
|
| 1011 |
+
--------
|
| 1012 |
+
>>> str = "GHC"
|
| 1013 |
+
>>> np.char.isupper(str)
|
| 1014 |
+
array(True)
|
| 1015 |
+
>>> a = np.array(["hello", "HELLO", "Hello"])
|
| 1016 |
+
>>> np.char.isupper(a)
|
| 1017 |
+
array([False, True, False])
|
| 1018 |
+
|
| 1019 |
+
"""
|
| 1020 |
+
return _vec_string(a, bool_, 'isupper')
|
| 1021 |
+
|
| 1022 |
+
|
| 1023 |
+
def _join_dispatcher(sep, seq):
|
| 1024 |
+
return (sep, seq)
|
| 1025 |
+
|
| 1026 |
+
|
| 1027 |
+
@array_function_dispatch(_join_dispatcher)
|
| 1028 |
+
def join(sep, seq):
|
| 1029 |
+
"""
|
| 1030 |
+
Return a string which is the concatenation of the strings in the
|
| 1031 |
+
sequence `seq`.
|
| 1032 |
+
|
| 1033 |
+
Calls `str.join` element-wise.
|
| 1034 |
+
|
| 1035 |
+
Parameters
|
| 1036 |
+
----------
|
| 1037 |
+
sep : array_like of str or unicode
|
| 1038 |
+
seq : array_like of str or unicode
|
| 1039 |
+
|
| 1040 |
+
Returns
|
| 1041 |
+
-------
|
| 1042 |
+
out : ndarray
|
| 1043 |
+
Output array of str or unicode, depending on input types
|
| 1044 |
+
|
| 1045 |
+
See Also
|
| 1046 |
+
--------
|
| 1047 |
+
str.join
|
| 1048 |
+
|
| 1049 |
+
Examples
|
| 1050 |
+
--------
|
| 1051 |
+
>>> np.char.join('-', 'osd')
|
| 1052 |
+
array('o-s-d', dtype='<U5')
|
| 1053 |
+
|
| 1054 |
+
>>> np.char.join(['-', '.'], ['ghc', 'osd'])
|
| 1055 |
+
array(['g-h-c', 'o.s.d'], dtype='<U5')
|
| 1056 |
+
|
| 1057 |
+
"""
|
| 1058 |
+
return _to_bytes_or_str_array(
|
| 1059 |
+
_vec_string(sep, object_, 'join', (seq,)), seq)
|
| 1060 |
+
|
| 1061 |
+
|
| 1062 |
+
|
| 1063 |
+
def _just_dispatcher(a, width, fillchar=None):
|
| 1064 |
+
return (a,)
|
| 1065 |
+
|
| 1066 |
+
|
| 1067 |
+
@array_function_dispatch(_just_dispatcher)
|
| 1068 |
+
def ljust(a, width, fillchar=' '):
|
| 1069 |
+
"""
|
| 1070 |
+
Return an array with the elements of `a` left-justified in a
|
| 1071 |
+
string of length `width`.
|
| 1072 |
+
|
| 1073 |
+
Calls `str.ljust` element-wise.
|
| 1074 |
+
|
| 1075 |
+
Parameters
|
| 1076 |
+
----------
|
| 1077 |
+
a : array_like of str or unicode
|
| 1078 |
+
|
| 1079 |
+
width : int
|
| 1080 |
+
The length of the resulting strings
|
| 1081 |
+
fillchar : str or unicode, optional
|
| 1082 |
+
The character to use for padding
|
| 1083 |
+
|
| 1084 |
+
Returns
|
| 1085 |
+
-------
|
| 1086 |
+
out : ndarray
|
| 1087 |
+
Output array of str or unicode, depending on input type
|
| 1088 |
+
|
| 1089 |
+
See Also
|
| 1090 |
+
--------
|
| 1091 |
+
str.ljust
|
| 1092 |
+
|
| 1093 |
+
"""
|
| 1094 |
+
a_arr = numpy.asarray(a)
|
| 1095 |
+
width_arr = numpy.asarray(width)
|
| 1096 |
+
size = int(numpy.max(width_arr.flat))
|
| 1097 |
+
if numpy.issubdtype(a_arr.dtype, numpy.bytes_):
|
| 1098 |
+
fillchar = asbytes(fillchar)
|
| 1099 |
+
return _vec_string(
|
| 1100 |
+
a_arr, type(a_arr.dtype)(size), 'ljust', (width_arr, fillchar))
|
| 1101 |
+
|
| 1102 |
+
|
| 1103 |
+
@array_function_dispatch(_unary_op_dispatcher)
|
| 1104 |
+
def lower(a):
|
| 1105 |
+
"""
|
| 1106 |
+
Return an array with the elements converted to lowercase.
|
| 1107 |
+
|
| 1108 |
+
Call `str.lower` element-wise.
|
| 1109 |
+
|
| 1110 |
+
For 8-bit strings, this method is locale-dependent.
|
| 1111 |
+
|
| 1112 |
+
Parameters
|
| 1113 |
+
----------
|
| 1114 |
+
a : array_like, {str, unicode}
|
| 1115 |
+
Input array.
|
| 1116 |
+
|
| 1117 |
+
Returns
|
| 1118 |
+
-------
|
| 1119 |
+
out : ndarray, {str, unicode}
|
| 1120 |
+
Output array of str or unicode, depending on input type
|
| 1121 |
+
|
| 1122 |
+
See Also
|
| 1123 |
+
--------
|
| 1124 |
+
str.lower
|
| 1125 |
+
|
| 1126 |
+
Examples
|
| 1127 |
+
--------
|
| 1128 |
+
>>> c = np.array(['A1B C', '1BCA', 'BCA1']); c
|
| 1129 |
+
array(['A1B C', '1BCA', 'BCA1'], dtype='<U5')
|
| 1130 |
+
>>> np.char.lower(c)
|
| 1131 |
+
array(['a1b c', '1bca', 'bca1'], dtype='<U5')
|
| 1132 |
+
|
| 1133 |
+
"""
|
| 1134 |
+
a_arr = numpy.asarray(a)
|
| 1135 |
+
return _vec_string(a_arr, a_arr.dtype, 'lower')
|
| 1136 |
+
|
| 1137 |
+
|
| 1138 |
+
def _strip_dispatcher(a, chars=None):
|
| 1139 |
+
return (a,)
|
| 1140 |
+
|
| 1141 |
+
|
| 1142 |
+
@array_function_dispatch(_strip_dispatcher)
|
| 1143 |
+
def lstrip(a, chars=None):
|
| 1144 |
+
"""
|
| 1145 |
+
For each element in `a`, return a copy with the leading characters
|
| 1146 |
+
removed.
|
| 1147 |
+
|
| 1148 |
+
Calls `str.lstrip` element-wise.
|
| 1149 |
+
|
| 1150 |
+
Parameters
|
| 1151 |
+
----------
|
| 1152 |
+
a : array-like, {str, unicode}
|
| 1153 |
+
Input array.
|
| 1154 |
+
|
| 1155 |
+
chars : {str, unicode}, optional
|
| 1156 |
+
The `chars` argument is a string specifying the set of
|
| 1157 |
+
characters to be removed. If omitted or None, the `chars`
|
| 1158 |
+
argument defaults to removing whitespace. The `chars` argument
|
| 1159 |
+
is not a prefix; rather, all combinations of its values are
|
| 1160 |
+
stripped.
|
| 1161 |
+
|
| 1162 |
+
Returns
|
| 1163 |
+
-------
|
| 1164 |
+
out : ndarray, {str, unicode}
|
| 1165 |
+
Output array of str or unicode, depending on input type
|
| 1166 |
+
|
| 1167 |
+
See Also
|
| 1168 |
+
--------
|
| 1169 |
+
str.lstrip
|
| 1170 |
+
|
| 1171 |
+
Examples
|
| 1172 |
+
--------
|
| 1173 |
+
>>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
|
| 1174 |
+
>>> c
|
| 1175 |
+
array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
|
| 1176 |
+
|
| 1177 |
+
The 'a' variable is unstripped from c[1] because whitespace leading.
|
| 1178 |
+
|
| 1179 |
+
>>> np.char.lstrip(c, 'a')
|
| 1180 |
+
array(['AaAaA', ' aA ', 'bBABba'], dtype='<U7')
|
| 1181 |
+
|
| 1182 |
+
|
| 1183 |
+
>>> np.char.lstrip(c, 'A') # leaves c unchanged
|
| 1184 |
+
array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
|
| 1185 |
+
>>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, '')).all()
|
| 1186 |
+
... # XXX: is this a regression? This used to return True
|
| 1187 |
+
... # np.char.lstrip(c,'') does not modify c at all.
|
| 1188 |
+
False
|
| 1189 |
+
>>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, None)).all()
|
| 1190 |
+
True
|
| 1191 |
+
|
| 1192 |
+
"""
|
| 1193 |
+
a_arr = numpy.asarray(a)
|
| 1194 |
+
return _vec_string(a_arr, a_arr.dtype, 'lstrip', (chars,))
|
| 1195 |
+
|
| 1196 |
+
|
| 1197 |
+
def _partition_dispatcher(a, sep):
|
| 1198 |
+
return (a,)
|
| 1199 |
+
|
| 1200 |
+
|
| 1201 |
+
@array_function_dispatch(_partition_dispatcher)
|
| 1202 |
+
def partition(a, sep):
|
| 1203 |
+
"""
|
| 1204 |
+
Partition each element in `a` around `sep`.
|
| 1205 |
+
|
| 1206 |
+
Calls `str.partition` element-wise.
|
| 1207 |
+
|
| 1208 |
+
For each element in `a`, split the element as the first
|
| 1209 |
+
occurrence of `sep`, and return 3 strings containing the part
|
| 1210 |
+
before the separator, the separator itself, and the part after
|
| 1211 |
+
the separator. If the separator is not found, return 3 strings
|
| 1212 |
+
containing the string itself, followed by two empty strings.
|
| 1213 |
+
|
| 1214 |
+
Parameters
|
| 1215 |
+
----------
|
| 1216 |
+
a : array_like, {str, unicode}
|
| 1217 |
+
Input array
|
| 1218 |
+
sep : {str, unicode}
|
| 1219 |
+
Separator to split each string element in `a`.
|
| 1220 |
+
|
| 1221 |
+
Returns
|
| 1222 |
+
-------
|
| 1223 |
+
out : ndarray, {str, unicode}
|
| 1224 |
+
Output array of str or unicode, depending on input type.
|
| 1225 |
+
The output array will have an extra dimension with 3
|
| 1226 |
+
elements per input element.
|
| 1227 |
+
|
| 1228 |
+
See Also
|
| 1229 |
+
--------
|
| 1230 |
+
str.partition
|
| 1231 |
+
|
| 1232 |
+
"""
|
| 1233 |
+
return _to_bytes_or_str_array(
|
| 1234 |
+
_vec_string(a, object_, 'partition', (sep,)), a)
|
| 1235 |
+
|
| 1236 |
+
|
| 1237 |
+
def _replace_dispatcher(a, old, new, count=None):
|
| 1238 |
+
return (a,)
|
| 1239 |
+
|
| 1240 |
+
|
| 1241 |
+
@array_function_dispatch(_replace_dispatcher)
|
| 1242 |
+
def replace(a, old, new, count=None):
|
| 1243 |
+
"""
|
| 1244 |
+
For each element in `a`, return a copy of the string with all
|
| 1245 |
+
occurrences of substring `old` replaced by `new`.
|
| 1246 |
+
|
| 1247 |
+
Calls `str.replace` element-wise.
|
| 1248 |
+
|
| 1249 |
+
Parameters
|
| 1250 |
+
----------
|
| 1251 |
+
a : array-like of str or unicode
|
| 1252 |
+
|
| 1253 |
+
old, new : str or unicode
|
| 1254 |
+
|
| 1255 |
+
count : int, optional
|
| 1256 |
+
If the optional argument `count` is given, only the first
|
| 1257 |
+
`count` occurrences are replaced.
|
| 1258 |
+
|
| 1259 |
+
Returns
|
| 1260 |
+
-------
|
| 1261 |
+
out : ndarray
|
| 1262 |
+
Output array of str or unicode, depending on input type
|
| 1263 |
+
|
| 1264 |
+
See Also
|
| 1265 |
+
--------
|
| 1266 |
+
str.replace
|
| 1267 |
+
|
| 1268 |
+
Examples
|
| 1269 |
+
--------
|
| 1270 |
+
>>> a = np.array(["That is a mango", "Monkeys eat mangos"])
|
| 1271 |
+
>>> np.char.replace(a, 'mango', 'banana')
|
| 1272 |
+
array(['That is a banana', 'Monkeys eat bananas'], dtype='<U19')
|
| 1273 |
+
|
| 1274 |
+
>>> a = np.array(["The dish is fresh", "This is it"])
|
| 1275 |
+
>>> np.char.replace(a, 'is', 'was')
|
| 1276 |
+
array(['The dwash was fresh', 'Thwas was it'], dtype='<U19')
|
| 1277 |
+
"""
|
| 1278 |
+
return _to_bytes_or_str_array(
|
| 1279 |
+
_vec_string(a, object_, 'replace', [old, new] + _clean_args(count)), a)
|
| 1280 |
+
|
| 1281 |
+
|
| 1282 |
+
@array_function_dispatch(_count_dispatcher)
|
| 1283 |
+
def rfind(a, sub, start=0, end=None):
|
| 1284 |
+
"""
|
| 1285 |
+
For each element in `a`, return the highest index in the string
|
| 1286 |
+
where substring `sub` is found, such that `sub` is contained
|
| 1287 |
+
within [`start`, `end`].
|
| 1288 |
+
|
| 1289 |
+
Calls `str.rfind` element-wise.
|
| 1290 |
+
|
| 1291 |
+
Parameters
|
| 1292 |
+
----------
|
| 1293 |
+
a : array-like of str or unicode
|
| 1294 |
+
|
| 1295 |
+
sub : str or unicode
|
| 1296 |
+
|
| 1297 |
+
start, end : int, optional
|
| 1298 |
+
Optional arguments `start` and `end` are interpreted as in
|
| 1299 |
+
slice notation.
|
| 1300 |
+
|
| 1301 |
+
Returns
|
| 1302 |
+
-------
|
| 1303 |
+
out : ndarray
|
| 1304 |
+
Output array of ints. Return -1 on failure.
|
| 1305 |
+
|
| 1306 |
+
See Also
|
| 1307 |
+
--------
|
| 1308 |
+
str.rfind
|
| 1309 |
+
|
| 1310 |
+
"""
|
| 1311 |
+
return _vec_string(
|
| 1312 |
+
a, int_, 'rfind', [sub, start] + _clean_args(end))
|
| 1313 |
+
|
| 1314 |
+
|
| 1315 |
+
@array_function_dispatch(_count_dispatcher)
|
| 1316 |
+
def rindex(a, sub, start=0, end=None):
|
| 1317 |
+
"""
|
| 1318 |
+
Like `rfind`, but raises `ValueError` when the substring `sub` is
|
| 1319 |
+
not found.
|
| 1320 |
+
|
| 1321 |
+
Calls `str.rindex` element-wise.
|
| 1322 |
+
|
| 1323 |
+
Parameters
|
| 1324 |
+
----------
|
| 1325 |
+
a : array-like of str or unicode
|
| 1326 |
+
|
| 1327 |
+
sub : str or unicode
|
| 1328 |
+
|
| 1329 |
+
start, end : int, optional
|
| 1330 |
+
|
| 1331 |
+
Returns
|
| 1332 |
+
-------
|
| 1333 |
+
out : ndarray
|
| 1334 |
+
Output array of ints.
|
| 1335 |
+
|
| 1336 |
+
See Also
|
| 1337 |
+
--------
|
| 1338 |
+
rfind, str.rindex
|
| 1339 |
+
|
| 1340 |
+
"""
|
| 1341 |
+
return _vec_string(
|
| 1342 |
+
a, int_, 'rindex', [sub, start] + _clean_args(end))
|
| 1343 |
+
|
| 1344 |
+
|
| 1345 |
+
@array_function_dispatch(_just_dispatcher)
|
| 1346 |
+
def rjust(a, width, fillchar=' '):
|
| 1347 |
+
"""
|
| 1348 |
+
Return an array with the elements of `a` right-justified in a
|
| 1349 |
+
string of length `width`.
|
| 1350 |
+
|
| 1351 |
+
Calls `str.rjust` element-wise.
|
| 1352 |
+
|
| 1353 |
+
Parameters
|
| 1354 |
+
----------
|
| 1355 |
+
a : array_like of str or unicode
|
| 1356 |
+
|
| 1357 |
+
width : int
|
| 1358 |
+
The length of the resulting strings
|
| 1359 |
+
fillchar : str or unicode, optional
|
| 1360 |
+
The character to use for padding
|
| 1361 |
+
|
| 1362 |
+
Returns
|
| 1363 |
+
-------
|
| 1364 |
+
out : ndarray
|
| 1365 |
+
Output array of str or unicode, depending on input type
|
| 1366 |
+
|
| 1367 |
+
See Also
|
| 1368 |
+
--------
|
| 1369 |
+
str.rjust
|
| 1370 |
+
|
| 1371 |
+
"""
|
| 1372 |
+
a_arr = numpy.asarray(a)
|
| 1373 |
+
width_arr = numpy.asarray(width)
|
| 1374 |
+
size = int(numpy.max(width_arr.flat))
|
| 1375 |
+
if numpy.issubdtype(a_arr.dtype, numpy.bytes_):
|
| 1376 |
+
fillchar = asbytes(fillchar)
|
| 1377 |
+
return _vec_string(
|
| 1378 |
+
a_arr, type(a_arr.dtype)(size), 'rjust', (width_arr, fillchar))
|
| 1379 |
+
|
| 1380 |
+
|
| 1381 |
+
@array_function_dispatch(_partition_dispatcher)
|
| 1382 |
+
def rpartition(a, sep):
|
| 1383 |
+
"""
|
| 1384 |
+
Partition (split) each element around the right-most separator.
|
| 1385 |
+
|
| 1386 |
+
Calls `str.rpartition` element-wise.
|
| 1387 |
+
|
| 1388 |
+
For each element in `a`, split the element as the last
|
| 1389 |
+
occurrence of `sep`, and return 3 strings containing the part
|
| 1390 |
+
before the separator, the separator itself, and the part after
|
| 1391 |
+
the separator. If the separator is not found, return 3 strings
|
| 1392 |
+
containing the string itself, followed by two empty strings.
|
| 1393 |
+
|
| 1394 |
+
Parameters
|
| 1395 |
+
----------
|
| 1396 |
+
a : array_like of str or unicode
|
| 1397 |
+
Input array
|
| 1398 |
+
sep : str or unicode
|
| 1399 |
+
Right-most separator to split each element in array.
|
| 1400 |
+
|
| 1401 |
+
Returns
|
| 1402 |
+
-------
|
| 1403 |
+
out : ndarray
|
| 1404 |
+
Output array of string or unicode, depending on input
|
| 1405 |
+
type. The output array will have an extra dimension with
|
| 1406 |
+
3 elements per input element.
|
| 1407 |
+
|
| 1408 |
+
See Also
|
| 1409 |
+
--------
|
| 1410 |
+
str.rpartition
|
| 1411 |
+
|
| 1412 |
+
"""
|
| 1413 |
+
return _to_bytes_or_str_array(
|
| 1414 |
+
_vec_string(a, object_, 'rpartition', (sep,)), a)
|
| 1415 |
+
|
| 1416 |
+
|
| 1417 |
+
def _split_dispatcher(a, sep=None, maxsplit=None):
|
| 1418 |
+
return (a,)
|
| 1419 |
+
|
| 1420 |
+
|
| 1421 |
+
@array_function_dispatch(_split_dispatcher)
|
| 1422 |
+
def rsplit(a, sep=None, maxsplit=None):
|
| 1423 |
+
"""
|
| 1424 |
+
For each element in `a`, return a list of the words in the
|
| 1425 |
+
string, using `sep` as the delimiter string.
|
| 1426 |
+
|
| 1427 |
+
Calls `str.rsplit` element-wise.
|
| 1428 |
+
|
| 1429 |
+
Except for splitting from the right, `rsplit`
|
| 1430 |
+
behaves like `split`.
|
| 1431 |
+
|
| 1432 |
+
Parameters
|
| 1433 |
+
----------
|
| 1434 |
+
a : array_like of str or unicode
|
| 1435 |
+
|
| 1436 |
+
sep : str or unicode, optional
|
| 1437 |
+
If `sep` is not specified or None, any whitespace string
|
| 1438 |
+
is a separator.
|
| 1439 |
+
maxsplit : int, optional
|
| 1440 |
+
If `maxsplit` is given, at most `maxsplit` splits are done,
|
| 1441 |
+
the rightmost ones.
|
| 1442 |
+
|
| 1443 |
+
Returns
|
| 1444 |
+
-------
|
| 1445 |
+
out : ndarray
|
| 1446 |
+
Array of list objects
|
| 1447 |
+
|
| 1448 |
+
See Also
|
| 1449 |
+
--------
|
| 1450 |
+
str.rsplit, split
|
| 1451 |
+
|
| 1452 |
+
"""
|
| 1453 |
+
# This will return an array of lists of different sizes, so we
|
| 1454 |
+
# leave it as an object array
|
| 1455 |
+
return _vec_string(
|
| 1456 |
+
a, object_, 'rsplit', [sep] + _clean_args(maxsplit))
|
| 1457 |
+
|
| 1458 |
+
|
| 1459 |
+
def _strip_dispatcher(a, chars=None):
|
| 1460 |
+
return (a,)
|
| 1461 |
+
|
| 1462 |
+
|
| 1463 |
+
@array_function_dispatch(_strip_dispatcher)
|
| 1464 |
+
def rstrip(a, chars=None):
|
| 1465 |
+
"""
|
| 1466 |
+
For each element in `a`, return a copy with the trailing
|
| 1467 |
+
characters removed.
|
| 1468 |
+
|
| 1469 |
+
Calls `str.rstrip` element-wise.
|
| 1470 |
+
|
| 1471 |
+
Parameters
|
| 1472 |
+
----------
|
| 1473 |
+
a : array-like of str or unicode
|
| 1474 |
+
|
| 1475 |
+
chars : str or unicode, optional
|
| 1476 |
+
The `chars` argument is a string specifying the set of
|
| 1477 |
+
characters to be removed. If omitted or None, the `chars`
|
| 1478 |
+
argument defaults to removing whitespace. The `chars` argument
|
| 1479 |
+
is not a suffix; rather, all combinations of its values are
|
| 1480 |
+
stripped.
|
| 1481 |
+
|
| 1482 |
+
Returns
|
| 1483 |
+
-------
|
| 1484 |
+
out : ndarray
|
| 1485 |
+
Output array of str or unicode, depending on input type
|
| 1486 |
+
|
| 1487 |
+
See Also
|
| 1488 |
+
--------
|
| 1489 |
+
str.rstrip
|
| 1490 |
+
|
| 1491 |
+
Examples
|
| 1492 |
+
--------
|
| 1493 |
+
>>> c = np.array(['aAaAaA', 'abBABba'], dtype='S7'); c
|
| 1494 |
+
array(['aAaAaA', 'abBABba'],
|
| 1495 |
+
dtype='|S7')
|
| 1496 |
+
>>> np.char.rstrip(c, b'a')
|
| 1497 |
+
array(['aAaAaA', 'abBABb'],
|
| 1498 |
+
dtype='|S7')
|
| 1499 |
+
>>> np.char.rstrip(c, b'A')
|
| 1500 |
+
array(['aAaAa', 'abBABba'],
|
| 1501 |
+
dtype='|S7')
|
| 1502 |
+
|
| 1503 |
+
"""
|
| 1504 |
+
a_arr = numpy.asarray(a)
|
| 1505 |
+
return _vec_string(a_arr, a_arr.dtype, 'rstrip', (chars,))
|
| 1506 |
+
|
| 1507 |
+
|
| 1508 |
+
@array_function_dispatch(_split_dispatcher)
|
| 1509 |
+
def split(a, sep=None, maxsplit=None):
|
| 1510 |
+
"""
|
| 1511 |
+
For each element in `a`, return a list of the words in the
|
| 1512 |
+
string, using `sep` as the delimiter string.
|
| 1513 |
+
|
| 1514 |
+
Calls `str.split` element-wise.
|
| 1515 |
+
|
| 1516 |
+
Parameters
|
| 1517 |
+
----------
|
| 1518 |
+
a : array_like of str or unicode
|
| 1519 |
+
|
| 1520 |
+
sep : str or unicode, optional
|
| 1521 |
+
If `sep` is not specified or None, any whitespace string is a
|
| 1522 |
+
separator.
|
| 1523 |
+
|
| 1524 |
+
maxsplit : int, optional
|
| 1525 |
+
If `maxsplit` is given, at most `maxsplit` splits are done.
|
| 1526 |
+
|
| 1527 |
+
Returns
|
| 1528 |
+
-------
|
| 1529 |
+
out : ndarray
|
| 1530 |
+
Array of list objects
|
| 1531 |
+
|
| 1532 |
+
See Also
|
| 1533 |
+
--------
|
| 1534 |
+
str.split, rsplit
|
| 1535 |
+
|
| 1536 |
+
"""
|
| 1537 |
+
# This will return an array of lists of different sizes, so we
|
| 1538 |
+
# leave it as an object array
|
| 1539 |
+
return _vec_string(
|
| 1540 |
+
a, object_, 'split', [sep] + _clean_args(maxsplit))
|
| 1541 |
+
|
| 1542 |
+
|
| 1543 |
+
def _splitlines_dispatcher(a, keepends=None):
|
| 1544 |
+
return (a,)
|
| 1545 |
+
|
| 1546 |
+
|
| 1547 |
+
@array_function_dispatch(_splitlines_dispatcher)
|
| 1548 |
+
def splitlines(a, keepends=None):
|
| 1549 |
+
"""
|
| 1550 |
+
For each element in `a`, return a list of the lines in the
|
| 1551 |
+
element, breaking at line boundaries.
|
| 1552 |
+
|
| 1553 |
+
Calls `str.splitlines` element-wise.
|
| 1554 |
+
|
| 1555 |
+
Parameters
|
| 1556 |
+
----------
|
| 1557 |
+
a : array_like of str or unicode
|
| 1558 |
+
|
| 1559 |
+
keepends : bool, optional
|
| 1560 |
+
Line breaks are not included in the resulting list unless
|
| 1561 |
+
keepends is given and true.
|
| 1562 |
+
|
| 1563 |
+
Returns
|
| 1564 |
+
-------
|
| 1565 |
+
out : ndarray
|
| 1566 |
+
Array of list objects
|
| 1567 |
+
|
| 1568 |
+
See Also
|
| 1569 |
+
--------
|
| 1570 |
+
str.splitlines
|
| 1571 |
+
|
| 1572 |
+
"""
|
| 1573 |
+
return _vec_string(
|
| 1574 |
+
a, object_, 'splitlines', _clean_args(keepends))
|
| 1575 |
+
|
| 1576 |
+
|
| 1577 |
+
def _startswith_dispatcher(a, prefix, start=None, end=None):
|
| 1578 |
+
return (a,)
|
| 1579 |
+
|
| 1580 |
+
|
| 1581 |
+
@array_function_dispatch(_startswith_dispatcher)
|
| 1582 |
+
def startswith(a, prefix, start=0, end=None):
|
| 1583 |
+
"""
|
| 1584 |
+
Returns a boolean array which is `True` where the string element
|
| 1585 |
+
in `a` starts with `prefix`, otherwise `False`.
|
| 1586 |
+
|
| 1587 |
+
Calls `str.startswith` element-wise.
|
| 1588 |
+
|
| 1589 |
+
Parameters
|
| 1590 |
+
----------
|
| 1591 |
+
a : array_like of str or unicode
|
| 1592 |
+
|
| 1593 |
+
prefix : str
|
| 1594 |
+
|
| 1595 |
+
start, end : int, optional
|
| 1596 |
+
With optional `start`, test beginning at that position. With
|
| 1597 |
+
optional `end`, stop comparing at that position.
|
| 1598 |
+
|
| 1599 |
+
Returns
|
| 1600 |
+
-------
|
| 1601 |
+
out : ndarray
|
| 1602 |
+
Array of booleans
|
| 1603 |
+
|
| 1604 |
+
See Also
|
| 1605 |
+
--------
|
| 1606 |
+
str.startswith
|
| 1607 |
+
|
| 1608 |
+
"""
|
| 1609 |
+
return _vec_string(
|
| 1610 |
+
a, bool_, 'startswith', [prefix, start] + _clean_args(end))
|
| 1611 |
+
|
| 1612 |
+
|
| 1613 |
+
@array_function_dispatch(_strip_dispatcher)
|
| 1614 |
+
def strip(a, chars=None):
|
| 1615 |
+
"""
|
| 1616 |
+
For each element in `a`, return a copy with the leading and
|
| 1617 |
+
trailing characters removed.
|
| 1618 |
+
|
| 1619 |
+
Calls `str.strip` element-wise.
|
| 1620 |
+
|
| 1621 |
+
Parameters
|
| 1622 |
+
----------
|
| 1623 |
+
a : array-like of str or unicode
|
| 1624 |
+
|
| 1625 |
+
chars : str or unicode, optional
|
| 1626 |
+
The `chars` argument is a string specifying the set of
|
| 1627 |
+
characters to be removed. If omitted or None, the `chars`
|
| 1628 |
+
argument defaults to removing whitespace. The `chars` argument
|
| 1629 |
+
is not a prefix or suffix; rather, all combinations of its
|
| 1630 |
+
values are stripped.
|
| 1631 |
+
|
| 1632 |
+
Returns
|
| 1633 |
+
-------
|
| 1634 |
+
out : ndarray
|
| 1635 |
+
Output array of str or unicode, depending on input type
|
| 1636 |
+
|
| 1637 |
+
See Also
|
| 1638 |
+
--------
|
| 1639 |
+
str.strip
|
| 1640 |
+
|
| 1641 |
+
Examples
|
| 1642 |
+
--------
|
| 1643 |
+
>>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
|
| 1644 |
+
>>> c
|
| 1645 |
+
array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
|
| 1646 |
+
>>> np.char.strip(c)
|
| 1647 |
+
array(['aAaAaA', 'aA', 'abBABba'], dtype='<U7')
|
| 1648 |
+
>>> np.char.strip(c, 'a') # 'a' unstripped from c[1] because whitespace leads
|
| 1649 |
+
array(['AaAaA', ' aA ', 'bBABb'], dtype='<U7')
|
| 1650 |
+
>>> np.char.strip(c, 'A') # 'A' unstripped from c[1] because (unprinted) ws trails
|
| 1651 |
+
array(['aAaAa', ' aA ', 'abBABba'], dtype='<U7')
|
| 1652 |
+
|
| 1653 |
+
"""
|
| 1654 |
+
a_arr = numpy.asarray(a)
|
| 1655 |
+
return _vec_string(a_arr, a_arr.dtype, 'strip', _clean_args(chars))
|
| 1656 |
+
|
| 1657 |
+
|
| 1658 |
+
@array_function_dispatch(_unary_op_dispatcher)
|
| 1659 |
+
def swapcase(a):
|
| 1660 |
+
"""
|
| 1661 |
+
Return element-wise a copy of the string with
|
| 1662 |
+
uppercase characters converted to lowercase and vice versa.
|
| 1663 |
+
|
| 1664 |
+
Calls `str.swapcase` element-wise.
|
| 1665 |
+
|
| 1666 |
+
For 8-bit strings, this method is locale-dependent.
|
| 1667 |
+
|
| 1668 |
+
Parameters
|
| 1669 |
+
----------
|
| 1670 |
+
a : array_like, {str, unicode}
|
| 1671 |
+
Input array.
|
| 1672 |
+
|
| 1673 |
+
Returns
|
| 1674 |
+
-------
|
| 1675 |
+
out : ndarray, {str, unicode}
|
| 1676 |
+
Output array of str or unicode, depending on input type
|
| 1677 |
+
|
| 1678 |
+
See Also
|
| 1679 |
+
--------
|
| 1680 |
+
str.swapcase
|
| 1681 |
+
|
| 1682 |
+
Examples
|
| 1683 |
+
--------
|
| 1684 |
+
>>> c=np.array(['a1B c','1b Ca','b Ca1','cA1b'],'S5'); c
|
| 1685 |
+
array(['a1B c', '1b Ca', 'b Ca1', 'cA1b'],
|
| 1686 |
+
dtype='|S5')
|
| 1687 |
+
>>> np.char.swapcase(c)
|
| 1688 |
+
array(['A1b C', '1B cA', 'B cA1', 'Ca1B'],
|
| 1689 |
+
dtype='|S5')
|
| 1690 |
+
|
| 1691 |
+
"""
|
| 1692 |
+
a_arr = numpy.asarray(a)
|
| 1693 |
+
return _vec_string(a_arr, a_arr.dtype, 'swapcase')
|
| 1694 |
+
|
| 1695 |
+
|
| 1696 |
+
@array_function_dispatch(_unary_op_dispatcher)
|
| 1697 |
+
def title(a):
|
| 1698 |
+
"""
|
| 1699 |
+
Return element-wise title cased version of string or unicode.
|
| 1700 |
+
|
| 1701 |
+
Title case words start with uppercase characters, all remaining cased
|
| 1702 |
+
characters are lowercase.
|
| 1703 |
+
|
| 1704 |
+
Calls `str.title` element-wise.
|
| 1705 |
+
|
| 1706 |
+
For 8-bit strings, this method is locale-dependent.
|
| 1707 |
+
|
| 1708 |
+
Parameters
|
| 1709 |
+
----------
|
| 1710 |
+
a : array_like, {str, unicode}
|
| 1711 |
+
Input array.
|
| 1712 |
+
|
| 1713 |
+
Returns
|
| 1714 |
+
-------
|
| 1715 |
+
out : ndarray
|
| 1716 |
+
Output array of str or unicode, depending on input type
|
| 1717 |
+
|
| 1718 |
+
See Also
|
| 1719 |
+
--------
|
| 1720 |
+
str.title
|
| 1721 |
+
|
| 1722 |
+
Examples
|
| 1723 |
+
--------
|
| 1724 |
+
>>> c=np.array(['a1b c','1b ca','b ca1','ca1b'],'S5'); c
|
| 1725 |
+
array(['a1b c', '1b ca', 'b ca1', 'ca1b'],
|
| 1726 |
+
dtype='|S5')
|
| 1727 |
+
>>> np.char.title(c)
|
| 1728 |
+
array(['A1B C', '1B Ca', 'B Ca1', 'Ca1B'],
|
| 1729 |
+
dtype='|S5')
|
| 1730 |
+
|
| 1731 |
+
"""
|
| 1732 |
+
a_arr = numpy.asarray(a)
|
| 1733 |
+
return _vec_string(a_arr, a_arr.dtype, 'title')
|
| 1734 |
+
|
| 1735 |
+
|
| 1736 |
+
def _translate_dispatcher(a, table, deletechars=None):
|
| 1737 |
+
return (a,)
|
| 1738 |
+
|
| 1739 |
+
|
| 1740 |
+
@array_function_dispatch(_translate_dispatcher)
|
| 1741 |
+
def translate(a, table, deletechars=None):
|
| 1742 |
+
"""
|
| 1743 |
+
For each element in `a`, return a copy of the string where all
|
| 1744 |
+
characters occurring in the optional argument `deletechars` are
|
| 1745 |
+
removed, and the remaining characters have been mapped through the
|
| 1746 |
+
given translation table.
|
| 1747 |
+
|
| 1748 |
+
Calls `str.translate` element-wise.
|
| 1749 |
+
|
| 1750 |
+
Parameters
|
| 1751 |
+
----------
|
| 1752 |
+
a : array-like of str or unicode
|
| 1753 |
+
|
| 1754 |
+
table : str of length 256
|
| 1755 |
+
|
| 1756 |
+
deletechars : str
|
| 1757 |
+
|
| 1758 |
+
Returns
|
| 1759 |
+
-------
|
| 1760 |
+
out : ndarray
|
| 1761 |
+
Output array of str or unicode, depending on input type
|
| 1762 |
+
|
| 1763 |
+
See Also
|
| 1764 |
+
--------
|
| 1765 |
+
str.translate
|
| 1766 |
+
|
| 1767 |
+
"""
|
| 1768 |
+
a_arr = numpy.asarray(a)
|
| 1769 |
+
if issubclass(a_arr.dtype.type, str_):
|
| 1770 |
+
return _vec_string(
|
| 1771 |
+
a_arr, a_arr.dtype, 'translate', (table,))
|
| 1772 |
+
else:
|
| 1773 |
+
return _vec_string(
|
| 1774 |
+
a_arr, a_arr.dtype, 'translate', [table] + _clean_args(deletechars))
|
| 1775 |
+
|
| 1776 |
+
|
| 1777 |
+
@array_function_dispatch(_unary_op_dispatcher)
|
| 1778 |
+
def upper(a):
|
| 1779 |
+
"""
|
| 1780 |
+
Return an array with the elements converted to uppercase.
|
| 1781 |
+
|
| 1782 |
+
Calls `str.upper` element-wise.
|
| 1783 |
+
|
| 1784 |
+
For 8-bit strings, this method is locale-dependent.
|
| 1785 |
+
|
| 1786 |
+
Parameters
|
| 1787 |
+
----------
|
| 1788 |
+
a : array_like, {str, unicode}
|
| 1789 |
+
Input array.
|
| 1790 |
+
|
| 1791 |
+
Returns
|
| 1792 |
+
-------
|
| 1793 |
+
out : ndarray, {str, unicode}
|
| 1794 |
+
Output array of str or unicode, depending on input type
|
| 1795 |
+
|
| 1796 |
+
See Also
|
| 1797 |
+
--------
|
| 1798 |
+
str.upper
|
| 1799 |
+
|
| 1800 |
+
Examples
|
| 1801 |
+
--------
|
| 1802 |
+
>>> c = np.array(['a1b c', '1bca', 'bca1']); c
|
| 1803 |
+
array(['a1b c', '1bca', 'bca1'], dtype='<U5')
|
| 1804 |
+
>>> np.char.upper(c)
|
| 1805 |
+
array(['A1B C', '1BCA', 'BCA1'], dtype='<U5')
|
| 1806 |
+
|
| 1807 |
+
"""
|
| 1808 |
+
a_arr = numpy.asarray(a)
|
| 1809 |
+
return _vec_string(a_arr, a_arr.dtype, 'upper')
|
| 1810 |
+
|
| 1811 |
+
|
| 1812 |
+
def _zfill_dispatcher(a, width):
|
| 1813 |
+
return (a,)
|
| 1814 |
+
|
| 1815 |
+
|
| 1816 |
+
@array_function_dispatch(_zfill_dispatcher)
|
| 1817 |
+
def zfill(a, width):
|
| 1818 |
+
"""
|
| 1819 |
+
Return the numeric string left-filled with zeros
|
| 1820 |
+
|
| 1821 |
+
Calls `str.zfill` element-wise.
|
| 1822 |
+
|
| 1823 |
+
Parameters
|
| 1824 |
+
----------
|
| 1825 |
+
a : array_like, {str, unicode}
|
| 1826 |
+
Input array.
|
| 1827 |
+
width : int
|
| 1828 |
+
Width of string to left-fill elements in `a`.
|
| 1829 |
+
|
| 1830 |
+
Returns
|
| 1831 |
+
-------
|
| 1832 |
+
out : ndarray, {str, unicode}
|
| 1833 |
+
Output array of str or unicode, depending on input type
|
| 1834 |
+
|
| 1835 |
+
See Also
|
| 1836 |
+
--------
|
| 1837 |
+
str.zfill
|
| 1838 |
+
|
| 1839 |
+
"""
|
| 1840 |
+
a_arr = numpy.asarray(a)
|
| 1841 |
+
width_arr = numpy.asarray(width)
|
| 1842 |
+
size = int(numpy.max(width_arr.flat))
|
| 1843 |
+
return _vec_string(
|
| 1844 |
+
a_arr, type(a_arr.dtype)(size), 'zfill', (width_arr,))
|
| 1845 |
+
|
| 1846 |
+
|
| 1847 |
+
@array_function_dispatch(_unary_op_dispatcher)
|
| 1848 |
+
def isnumeric(a):
|
| 1849 |
+
"""
|
| 1850 |
+
For each element, return True if there are only numeric
|
| 1851 |
+
characters in the element.
|
| 1852 |
+
|
| 1853 |
+
Calls `str.isnumeric` element-wise.
|
| 1854 |
+
|
| 1855 |
+
Numeric characters include digit characters, and all characters
|
| 1856 |
+
that have the Unicode numeric value property, e.g. ``U+2155,
|
| 1857 |
+
VULGAR FRACTION ONE FIFTH``.
|
| 1858 |
+
|
| 1859 |
+
Parameters
|
| 1860 |
+
----------
|
| 1861 |
+
a : array_like, unicode
|
| 1862 |
+
Input array.
|
| 1863 |
+
|
| 1864 |
+
Returns
|
| 1865 |
+
-------
|
| 1866 |
+
out : ndarray, bool
|
| 1867 |
+
Array of booleans of same shape as `a`.
|
| 1868 |
+
|
| 1869 |
+
See Also
|
| 1870 |
+
--------
|
| 1871 |
+
str.isnumeric
|
| 1872 |
+
|
| 1873 |
+
Examples
|
| 1874 |
+
--------
|
| 1875 |
+
>>> np.char.isnumeric(['123', '123abc', '9.0', '1/4', 'VIII'])
|
| 1876 |
+
array([ True, False, False, False, False])
|
| 1877 |
+
|
| 1878 |
+
"""
|
| 1879 |
+
if not _is_unicode(a):
|
| 1880 |
+
raise TypeError("isnumeric is only available for Unicode strings and arrays")
|
| 1881 |
+
return _vec_string(a, bool_, 'isnumeric')
|
| 1882 |
+
|
| 1883 |
+
|
| 1884 |
+
@array_function_dispatch(_unary_op_dispatcher)
|
| 1885 |
+
def isdecimal(a):
|
| 1886 |
+
"""
|
| 1887 |
+
For each element, return True if there are only decimal
|
| 1888 |
+
characters in the element.
|
| 1889 |
+
|
| 1890 |
+
Calls `str.isdecimal` element-wise.
|
| 1891 |
+
|
| 1892 |
+
Decimal characters include digit characters, and all characters
|
| 1893 |
+
that can be used to form decimal-radix numbers,
|
| 1894 |
+
e.g. ``U+0660, ARABIC-INDIC DIGIT ZERO``.
|
| 1895 |
+
|
| 1896 |
+
Parameters
|
| 1897 |
+
----------
|
| 1898 |
+
a : array_like, unicode
|
| 1899 |
+
Input array.
|
| 1900 |
+
|
| 1901 |
+
Returns
|
| 1902 |
+
-------
|
| 1903 |
+
out : ndarray, bool
|
| 1904 |
+
Array of booleans identical in shape to `a`.
|
| 1905 |
+
|
| 1906 |
+
See Also
|
| 1907 |
+
--------
|
| 1908 |
+
str.isdecimal
|
| 1909 |
+
|
| 1910 |
+
Examples
|
| 1911 |
+
--------
|
| 1912 |
+
>>> np.char.isdecimal(['12345', '4.99', '123ABC', ''])
|
| 1913 |
+
array([ True, False, False, False])
|
| 1914 |
+
|
| 1915 |
+
"""
|
| 1916 |
+
if not _is_unicode(a):
|
| 1917 |
+
raise TypeError(
|
| 1918 |
+
"isdecimal is only available for Unicode strings and arrays")
|
| 1919 |
+
return _vec_string(a, bool_, 'isdecimal')
|
| 1920 |
+
|
| 1921 |
+
|
| 1922 |
+
@set_module('numpy')
|
| 1923 |
+
class chararray(ndarray):
|
| 1924 |
+
"""
|
| 1925 |
+
chararray(shape, itemsize=1, unicode=False, buffer=None, offset=0,
|
| 1926 |
+
strides=None, order=None)
|
| 1927 |
+
|
| 1928 |
+
Provides a convenient view on arrays of string and unicode values.
|
| 1929 |
+
|
| 1930 |
+
.. note::
|
| 1931 |
+
The `chararray` class exists for backwards compatibility with
|
| 1932 |
+
Numarray, it is not recommended for new development. Starting from numpy
|
| 1933 |
+
1.4, if one needs arrays of strings, it is recommended to use arrays of
|
| 1934 |
+
`dtype` `object_`, `bytes_` or `str_`, and use the free functions
|
| 1935 |
+
in the `numpy.char` module for fast vectorized string operations.
|
| 1936 |
+
|
| 1937 |
+
Versus a regular NumPy array of type `str` or `unicode`, this
|
| 1938 |
+
class adds the following functionality:
|
| 1939 |
+
|
| 1940 |
+
1) values automatically have whitespace removed from the end
|
| 1941 |
+
when indexed
|
| 1942 |
+
|
| 1943 |
+
2) comparison operators automatically remove whitespace from the
|
| 1944 |
+
end when comparing values
|
| 1945 |
+
|
| 1946 |
+
3) vectorized string operations are provided as methods
|
| 1947 |
+
(e.g. `.endswith`) and infix operators (e.g. ``"+", "*", "%"``)
|
| 1948 |
+
|
| 1949 |
+
chararrays should be created using `numpy.char.array` or
|
| 1950 |
+
`numpy.char.asarray`, rather than this constructor directly.
|
| 1951 |
+
|
| 1952 |
+
This constructor creates the array, using `buffer` (with `offset`
|
| 1953 |
+
and `strides`) if it is not ``None``. If `buffer` is ``None``, then
|
| 1954 |
+
constructs a new array with `strides` in "C order", unless both
|
| 1955 |
+
``len(shape) >= 2`` and ``order='F'``, in which case `strides`
|
| 1956 |
+
is in "Fortran order".
|
| 1957 |
+
|
| 1958 |
+
Methods
|
| 1959 |
+
-------
|
| 1960 |
+
astype
|
| 1961 |
+
argsort
|
| 1962 |
+
copy
|
| 1963 |
+
count
|
| 1964 |
+
decode
|
| 1965 |
+
dump
|
| 1966 |
+
dumps
|
| 1967 |
+
encode
|
| 1968 |
+
endswith
|
| 1969 |
+
expandtabs
|
| 1970 |
+
fill
|
| 1971 |
+
find
|
| 1972 |
+
flatten
|
| 1973 |
+
getfield
|
| 1974 |
+
index
|
| 1975 |
+
isalnum
|
| 1976 |
+
isalpha
|
| 1977 |
+
isdecimal
|
| 1978 |
+
isdigit
|
| 1979 |
+
islower
|
| 1980 |
+
isnumeric
|
| 1981 |
+
isspace
|
| 1982 |
+
istitle
|
| 1983 |
+
isupper
|
| 1984 |
+
item
|
| 1985 |
+
join
|
| 1986 |
+
ljust
|
| 1987 |
+
lower
|
| 1988 |
+
lstrip
|
| 1989 |
+
nonzero
|
| 1990 |
+
put
|
| 1991 |
+
ravel
|
| 1992 |
+
repeat
|
| 1993 |
+
replace
|
| 1994 |
+
reshape
|
| 1995 |
+
resize
|
| 1996 |
+
rfind
|
| 1997 |
+
rindex
|
| 1998 |
+
rjust
|
| 1999 |
+
rsplit
|
| 2000 |
+
rstrip
|
| 2001 |
+
searchsorted
|
| 2002 |
+
setfield
|
| 2003 |
+
setflags
|
| 2004 |
+
sort
|
| 2005 |
+
split
|
| 2006 |
+
splitlines
|
| 2007 |
+
squeeze
|
| 2008 |
+
startswith
|
| 2009 |
+
strip
|
| 2010 |
+
swapaxes
|
| 2011 |
+
swapcase
|
| 2012 |
+
take
|
| 2013 |
+
title
|
| 2014 |
+
tofile
|
| 2015 |
+
tolist
|
| 2016 |
+
tostring
|
| 2017 |
+
translate
|
| 2018 |
+
transpose
|
| 2019 |
+
upper
|
| 2020 |
+
view
|
| 2021 |
+
zfill
|
| 2022 |
+
|
| 2023 |
+
Parameters
|
| 2024 |
+
----------
|
| 2025 |
+
shape : tuple
|
| 2026 |
+
Shape of the array.
|
| 2027 |
+
itemsize : int, optional
|
| 2028 |
+
Length of each array element, in number of characters. Default is 1.
|
| 2029 |
+
unicode : bool, optional
|
| 2030 |
+
Are the array elements of type unicode (True) or string (False).
|
| 2031 |
+
Default is False.
|
| 2032 |
+
buffer : object exposing the buffer interface or str, optional
|
| 2033 |
+
Memory address of the start of the array data. Default is None,
|
| 2034 |
+
in which case a new array is created.
|
| 2035 |
+
offset : int, optional
|
| 2036 |
+
Fixed stride displacement from the beginning of an axis?
|
| 2037 |
+
Default is 0. Needs to be >=0.
|
| 2038 |
+
strides : array_like of ints, optional
|
| 2039 |
+
Strides for the array (see `ndarray.strides` for full description).
|
| 2040 |
+
Default is None.
|
| 2041 |
+
order : {'C', 'F'}, optional
|
| 2042 |
+
The order in which the array data is stored in memory: 'C' ->
|
| 2043 |
+
"row major" order (the default), 'F' -> "column major"
|
| 2044 |
+
(Fortran) order.
|
| 2045 |
+
|
| 2046 |
+
Examples
|
| 2047 |
+
--------
|
| 2048 |
+
>>> charar = np.chararray((3, 3))
|
| 2049 |
+
>>> charar[:] = 'a'
|
| 2050 |
+
>>> charar
|
| 2051 |
+
chararray([[b'a', b'a', b'a'],
|
| 2052 |
+
[b'a', b'a', b'a'],
|
| 2053 |
+
[b'a', b'a', b'a']], dtype='|S1')
|
| 2054 |
+
|
| 2055 |
+
>>> charar = np.chararray(charar.shape, itemsize=5)
|
| 2056 |
+
>>> charar[:] = 'abc'
|
| 2057 |
+
>>> charar
|
| 2058 |
+
chararray([[b'abc', b'abc', b'abc'],
|
| 2059 |
+
[b'abc', b'abc', b'abc'],
|
| 2060 |
+
[b'abc', b'abc', b'abc']], dtype='|S5')
|
| 2061 |
+
|
| 2062 |
+
"""
|
| 2063 |
+
def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None,
|
| 2064 |
+
offset=0, strides=None, order='C'):
|
| 2065 |
+
global _globalvar
|
| 2066 |
+
|
| 2067 |
+
if unicode:
|
| 2068 |
+
dtype = str_
|
| 2069 |
+
else:
|
| 2070 |
+
dtype = bytes_
|
| 2071 |
+
|
| 2072 |
+
# force itemsize to be a Python int, since using NumPy integer
|
| 2073 |
+
# types results in itemsize.itemsize being used as the size of
|
| 2074 |
+
# strings in the new array.
|
| 2075 |
+
itemsize = int(itemsize)
|
| 2076 |
+
|
| 2077 |
+
if isinstance(buffer, str):
|
| 2078 |
+
# unicode objects do not have the buffer interface
|
| 2079 |
+
filler = buffer
|
| 2080 |
+
buffer = None
|
| 2081 |
+
else:
|
| 2082 |
+
filler = None
|
| 2083 |
+
|
| 2084 |
+
_globalvar = 1
|
| 2085 |
+
if buffer is None:
|
| 2086 |
+
self = ndarray.__new__(subtype, shape, (dtype, itemsize),
|
| 2087 |
+
order=order)
|
| 2088 |
+
else:
|
| 2089 |
+
self = ndarray.__new__(subtype, shape, (dtype, itemsize),
|
| 2090 |
+
buffer=buffer,
|
| 2091 |
+
offset=offset, strides=strides,
|
| 2092 |
+
order=order)
|
| 2093 |
+
if filler is not None:
|
| 2094 |
+
self[...] = filler
|
| 2095 |
+
_globalvar = 0
|
| 2096 |
+
return self
|
| 2097 |
+
|
| 2098 |
+
def __array_finalize__(self, obj):
|
| 2099 |
+
# The b is a special case because it is used for reconstructing.
|
| 2100 |
+
if not _globalvar and self.dtype.char not in 'SUbc':
|
| 2101 |
+
raise ValueError("Can only create a chararray from string data.")
|
| 2102 |
+
|
| 2103 |
+
def __getitem__(self, obj):
|
| 2104 |
+
val = ndarray.__getitem__(self, obj)
|
| 2105 |
+
|
| 2106 |
+
if isinstance(val, character):
|
| 2107 |
+
temp = val.rstrip()
|
| 2108 |
+
if len(temp) == 0:
|
| 2109 |
+
val = ''
|
| 2110 |
+
else:
|
| 2111 |
+
val = temp
|
| 2112 |
+
|
| 2113 |
+
return val
|
| 2114 |
+
|
| 2115 |
+
# IMPLEMENTATION NOTE: Most of the methods of this class are
|
| 2116 |
+
# direct delegations to the free functions in this module.
|
| 2117 |
+
# However, those that return an array of strings should instead
|
| 2118 |
+
# return a chararray, so some extra wrapping is required.
|
| 2119 |
+
|
| 2120 |
+
def __eq__(self, other):
|
| 2121 |
+
"""
|
| 2122 |
+
Return (self == other) element-wise.
|
| 2123 |
+
|
| 2124 |
+
See Also
|
| 2125 |
+
--------
|
| 2126 |
+
equal
|
| 2127 |
+
"""
|
| 2128 |
+
return equal(self, other)
|
| 2129 |
+
|
| 2130 |
+
def __ne__(self, other):
|
| 2131 |
+
"""
|
| 2132 |
+
Return (self != other) element-wise.
|
| 2133 |
+
|
| 2134 |
+
See Also
|
| 2135 |
+
--------
|
| 2136 |
+
not_equal
|
| 2137 |
+
"""
|
| 2138 |
+
return not_equal(self, other)
|
| 2139 |
+
|
| 2140 |
+
def __ge__(self, other):
|
| 2141 |
+
"""
|
| 2142 |
+
Return (self >= other) element-wise.
|
| 2143 |
+
|
| 2144 |
+
See Also
|
| 2145 |
+
--------
|
| 2146 |
+
greater_equal
|
| 2147 |
+
"""
|
| 2148 |
+
return greater_equal(self, other)
|
| 2149 |
+
|
| 2150 |
+
def __le__(self, other):
|
| 2151 |
+
"""
|
| 2152 |
+
Return (self <= other) element-wise.
|
| 2153 |
+
|
| 2154 |
+
See Also
|
| 2155 |
+
--------
|
| 2156 |
+
less_equal
|
| 2157 |
+
"""
|
| 2158 |
+
return less_equal(self, other)
|
| 2159 |
+
|
| 2160 |
+
def __gt__(self, other):
|
| 2161 |
+
"""
|
| 2162 |
+
Return (self > other) element-wise.
|
| 2163 |
+
|
| 2164 |
+
See Also
|
| 2165 |
+
--------
|
| 2166 |
+
greater
|
| 2167 |
+
"""
|
| 2168 |
+
return greater(self, other)
|
| 2169 |
+
|
| 2170 |
+
def __lt__(self, other):
|
| 2171 |
+
"""
|
| 2172 |
+
Return (self < other) element-wise.
|
| 2173 |
+
|
| 2174 |
+
See Also
|
| 2175 |
+
--------
|
| 2176 |
+
less
|
| 2177 |
+
"""
|
| 2178 |
+
return less(self, other)
|
| 2179 |
+
|
| 2180 |
+
def __add__(self, other):
|
| 2181 |
+
"""
|
| 2182 |
+
Return (self + other), that is string concatenation,
|
| 2183 |
+
element-wise for a pair of array_likes of str or unicode.
|
| 2184 |
+
|
| 2185 |
+
See Also
|
| 2186 |
+
--------
|
| 2187 |
+
add
|
| 2188 |
+
"""
|
| 2189 |
+
return asarray(add(self, other))
|
| 2190 |
+
|
| 2191 |
+
def __radd__(self, other):
|
| 2192 |
+
"""
|
| 2193 |
+
Return (other + self), that is string concatenation,
|
| 2194 |
+
element-wise for a pair of array_likes of `bytes_` or `str_`.
|
| 2195 |
+
|
| 2196 |
+
See Also
|
| 2197 |
+
--------
|
| 2198 |
+
add
|
| 2199 |
+
"""
|
| 2200 |
+
return asarray(add(numpy.asarray(other), self))
|
| 2201 |
+
|
| 2202 |
+
def __mul__(self, i):
|
| 2203 |
+
"""
|
| 2204 |
+
Return (self * i), that is string multiple concatenation,
|
| 2205 |
+
element-wise.
|
| 2206 |
+
|
| 2207 |
+
See Also
|
| 2208 |
+
--------
|
| 2209 |
+
multiply
|
| 2210 |
+
"""
|
| 2211 |
+
return asarray(multiply(self, i))
|
| 2212 |
+
|
| 2213 |
+
def __rmul__(self, i):
|
| 2214 |
+
"""
|
| 2215 |
+
Return (self * i), that is string multiple concatenation,
|
| 2216 |
+
element-wise.
|
| 2217 |
+
|
| 2218 |
+
See Also
|
| 2219 |
+
--------
|
| 2220 |
+
multiply
|
| 2221 |
+
"""
|
| 2222 |
+
return asarray(multiply(self, i))
|
| 2223 |
+
|
| 2224 |
+
def __mod__(self, i):
|
| 2225 |
+
"""
|
| 2226 |
+
Return (self % i), that is pre-Python 2.6 string formatting
|
| 2227 |
+
(interpolation), element-wise for a pair of array_likes of `bytes_`
|
| 2228 |
+
or `str_`.
|
| 2229 |
+
|
| 2230 |
+
See Also
|
| 2231 |
+
--------
|
| 2232 |
+
mod
|
| 2233 |
+
"""
|
| 2234 |
+
return asarray(mod(self, i))
|
| 2235 |
+
|
| 2236 |
+
def __rmod__(self, other):
|
| 2237 |
+
return NotImplemented
|
| 2238 |
+
|
| 2239 |
+
def argsort(self, axis=-1, kind=None, order=None):
|
| 2240 |
+
"""
|
| 2241 |
+
Return the indices that sort the array lexicographically.
|
| 2242 |
+
|
| 2243 |
+
For full documentation see `numpy.argsort`, for which this method is
|
| 2244 |
+
in fact merely a "thin wrapper."
|
| 2245 |
+
|
| 2246 |
+
Examples
|
| 2247 |
+
--------
|
| 2248 |
+
>>> c = np.array(['a1b c', '1b ca', 'b ca1', 'Ca1b'], 'S5')
|
| 2249 |
+
>>> c = c.view(np.chararray); c
|
| 2250 |
+
chararray(['a1b c', '1b ca', 'b ca1', 'Ca1b'],
|
| 2251 |
+
dtype='|S5')
|
| 2252 |
+
>>> c[c.argsort()]
|
| 2253 |
+
chararray(['1b ca', 'Ca1b', 'a1b c', 'b ca1'],
|
| 2254 |
+
dtype='|S5')
|
| 2255 |
+
|
| 2256 |
+
"""
|
| 2257 |
+
return self.__array__().argsort(axis, kind, order)
|
| 2258 |
+
argsort.__doc__ = ndarray.argsort.__doc__
|
| 2259 |
+
|
| 2260 |
+
def capitalize(self):
|
| 2261 |
+
"""
|
| 2262 |
+
Return a copy of `self` with only the first character of each element
|
| 2263 |
+
capitalized.
|
| 2264 |
+
|
| 2265 |
+
See Also
|
| 2266 |
+
--------
|
| 2267 |
+
char.capitalize
|
| 2268 |
+
|
| 2269 |
+
"""
|
| 2270 |
+
return asarray(capitalize(self))
|
| 2271 |
+
|
| 2272 |
+
def center(self, width, fillchar=' '):
|
| 2273 |
+
"""
|
| 2274 |
+
Return a copy of `self` with its elements centered in a
|
| 2275 |
+
string of length `width`.
|
| 2276 |
+
|
| 2277 |
+
See Also
|
| 2278 |
+
--------
|
| 2279 |
+
center
|
| 2280 |
+
"""
|
| 2281 |
+
return asarray(center(self, width, fillchar))
|
| 2282 |
+
|
| 2283 |
+
def count(self, sub, start=0, end=None):
|
| 2284 |
+
"""
|
| 2285 |
+
Returns an array with the number of non-overlapping occurrences of
|
| 2286 |
+
substring `sub` in the range [`start`, `end`].
|
| 2287 |
+
|
| 2288 |
+
See Also
|
| 2289 |
+
--------
|
| 2290 |
+
char.count
|
| 2291 |
+
|
| 2292 |
+
"""
|
| 2293 |
+
return count(self, sub, start, end)
|
| 2294 |
+
|
| 2295 |
+
def decode(self, encoding=None, errors=None):
|
| 2296 |
+
"""
|
| 2297 |
+
Calls ``bytes.decode`` element-wise.
|
| 2298 |
+
|
| 2299 |
+
See Also
|
| 2300 |
+
--------
|
| 2301 |
+
char.decode
|
| 2302 |
+
|
| 2303 |
+
"""
|
| 2304 |
+
return decode(self, encoding, errors)
|
| 2305 |
+
|
| 2306 |
+
def encode(self, encoding=None, errors=None):
|
| 2307 |
+
"""
|
| 2308 |
+
Calls `str.encode` element-wise.
|
| 2309 |
+
|
| 2310 |
+
See Also
|
| 2311 |
+
--------
|
| 2312 |
+
char.encode
|
| 2313 |
+
|
| 2314 |
+
"""
|
| 2315 |
+
return encode(self, encoding, errors)
|
| 2316 |
+
|
| 2317 |
+
def endswith(self, suffix, start=0, end=None):
|
| 2318 |
+
"""
|
| 2319 |
+
Returns a boolean array which is `True` where the string element
|
| 2320 |
+
in `self` ends with `suffix`, otherwise `False`.
|
| 2321 |
+
|
| 2322 |
+
See Also
|
| 2323 |
+
--------
|
| 2324 |
+
char.endswith
|
| 2325 |
+
|
| 2326 |
+
"""
|
| 2327 |
+
return endswith(self, suffix, start, end)
|
| 2328 |
+
|
| 2329 |
+
def expandtabs(self, tabsize=8):
|
| 2330 |
+
"""
|
| 2331 |
+
Return a copy of each string element where all tab characters are
|
| 2332 |
+
replaced by one or more spaces.
|
| 2333 |
+
|
| 2334 |
+
See Also
|
| 2335 |
+
--------
|
| 2336 |
+
char.expandtabs
|
| 2337 |
+
|
| 2338 |
+
"""
|
| 2339 |
+
return asarray(expandtabs(self, tabsize))
|
| 2340 |
+
|
| 2341 |
+
def find(self, sub, start=0, end=None):
|
| 2342 |
+
"""
|
| 2343 |
+
For each element, return the lowest index in the string where
|
| 2344 |
+
substring `sub` is found.
|
| 2345 |
+
|
| 2346 |
+
See Also
|
| 2347 |
+
--------
|
| 2348 |
+
char.find
|
| 2349 |
+
|
| 2350 |
+
"""
|
| 2351 |
+
return find(self, sub, start, end)
|
| 2352 |
+
|
| 2353 |
+
def index(self, sub, start=0, end=None):
|
| 2354 |
+
"""
|
| 2355 |
+
Like `find`, but raises `ValueError` when the substring is not found.
|
| 2356 |
+
|
| 2357 |
+
See Also
|
| 2358 |
+
--------
|
| 2359 |
+
char.index
|
| 2360 |
+
|
| 2361 |
+
"""
|
| 2362 |
+
return index(self, sub, start, end)
|
| 2363 |
+
|
| 2364 |
+
def isalnum(self):
|
| 2365 |
+
"""
|
| 2366 |
+
Returns true for each element if all characters in the string
|
| 2367 |
+
are alphanumeric and there is at least one character, false
|
| 2368 |
+
otherwise.
|
| 2369 |
+
|
| 2370 |
+
See Also
|
| 2371 |
+
--------
|
| 2372 |
+
char.isalnum
|
| 2373 |
+
|
| 2374 |
+
"""
|
| 2375 |
+
return isalnum(self)
|
| 2376 |
+
|
| 2377 |
+
def isalpha(self):
|
| 2378 |
+
"""
|
| 2379 |
+
Returns true for each element if all characters in the string
|
| 2380 |
+
are alphabetic and there is at least one character, false
|
| 2381 |
+
otherwise.
|
| 2382 |
+
|
| 2383 |
+
See Also
|
| 2384 |
+
--------
|
| 2385 |
+
char.isalpha
|
| 2386 |
+
|
| 2387 |
+
"""
|
| 2388 |
+
return isalpha(self)
|
| 2389 |
+
|
| 2390 |
+
def isdigit(self):
|
| 2391 |
+
"""
|
| 2392 |
+
Returns true for each element if all characters in the string are
|
| 2393 |
+
digits and there is at least one character, false otherwise.
|
| 2394 |
+
|
| 2395 |
+
See Also
|
| 2396 |
+
--------
|
| 2397 |
+
char.isdigit
|
| 2398 |
+
|
| 2399 |
+
"""
|
| 2400 |
+
return isdigit(self)
|
| 2401 |
+
|
| 2402 |
+
def islower(self):
|
| 2403 |
+
"""
|
| 2404 |
+
Returns true for each element if all cased characters in the
|
| 2405 |
+
string are lowercase and there is at least one cased character,
|
| 2406 |
+
false otherwise.
|
| 2407 |
+
|
| 2408 |
+
See Also
|
| 2409 |
+
--------
|
| 2410 |
+
char.islower
|
| 2411 |
+
|
| 2412 |
+
"""
|
| 2413 |
+
return islower(self)
|
| 2414 |
+
|
| 2415 |
+
def isspace(self):
|
| 2416 |
+
"""
|
| 2417 |
+
Returns true for each element if there are only whitespace
|
| 2418 |
+
characters in the string and there is at least one character,
|
| 2419 |
+
false otherwise.
|
| 2420 |
+
|
| 2421 |
+
See Also
|
| 2422 |
+
--------
|
| 2423 |
+
char.isspace
|
| 2424 |
+
|
| 2425 |
+
"""
|
| 2426 |
+
return isspace(self)
|
| 2427 |
+
|
| 2428 |
+
def istitle(self):
|
| 2429 |
+
"""
|
| 2430 |
+
Returns true for each element if the element is a titlecased
|
| 2431 |
+
string and there is at least one character, false otherwise.
|
| 2432 |
+
|
| 2433 |
+
See Also
|
| 2434 |
+
--------
|
| 2435 |
+
char.istitle
|
| 2436 |
+
|
| 2437 |
+
"""
|
| 2438 |
+
return istitle(self)
|
| 2439 |
+
|
| 2440 |
+
def isupper(self):
|
| 2441 |
+
"""
|
| 2442 |
+
Returns true for each element if all cased characters in the
|
| 2443 |
+
string are uppercase and there is at least one character, false
|
| 2444 |
+
otherwise.
|
| 2445 |
+
|
| 2446 |
+
See Also
|
| 2447 |
+
--------
|
| 2448 |
+
char.isupper
|
| 2449 |
+
|
| 2450 |
+
"""
|
| 2451 |
+
return isupper(self)
|
| 2452 |
+
|
| 2453 |
+
def join(self, seq):
|
| 2454 |
+
"""
|
| 2455 |
+
Return a string which is the concatenation of the strings in the
|
| 2456 |
+
sequence `seq`.
|
| 2457 |
+
|
| 2458 |
+
See Also
|
| 2459 |
+
--------
|
| 2460 |
+
char.join
|
| 2461 |
+
|
| 2462 |
+
"""
|
| 2463 |
+
return join(self, seq)
|
| 2464 |
+
|
| 2465 |
+
def ljust(self, width, fillchar=' '):
|
| 2466 |
+
"""
|
| 2467 |
+
Return an array with the elements of `self` left-justified in a
|
| 2468 |
+
string of length `width`.
|
| 2469 |
+
|
| 2470 |
+
See Also
|
| 2471 |
+
--------
|
| 2472 |
+
char.ljust
|
| 2473 |
+
|
| 2474 |
+
"""
|
| 2475 |
+
return asarray(ljust(self, width, fillchar))
|
| 2476 |
+
|
| 2477 |
+
def lower(self):
|
| 2478 |
+
"""
|
| 2479 |
+
Return an array with the elements of `self` converted to
|
| 2480 |
+
lowercase.
|
| 2481 |
+
|
| 2482 |
+
See Also
|
| 2483 |
+
--------
|
| 2484 |
+
char.lower
|
| 2485 |
+
|
| 2486 |
+
"""
|
| 2487 |
+
return asarray(lower(self))
|
| 2488 |
+
|
| 2489 |
+
def lstrip(self, chars=None):
|
| 2490 |
+
"""
|
| 2491 |
+
For each element in `self`, return a copy with the leading characters
|
| 2492 |
+
removed.
|
| 2493 |
+
|
| 2494 |
+
See Also
|
| 2495 |
+
--------
|
| 2496 |
+
char.lstrip
|
| 2497 |
+
|
| 2498 |
+
"""
|
| 2499 |
+
return asarray(lstrip(self, chars))
|
| 2500 |
+
|
| 2501 |
+
def partition(self, sep):
|
| 2502 |
+
"""
|
| 2503 |
+
Partition each element in `self` around `sep`.
|
| 2504 |
+
|
| 2505 |
+
See Also
|
| 2506 |
+
--------
|
| 2507 |
+
partition
|
| 2508 |
+
"""
|
| 2509 |
+
return asarray(partition(self, sep))
|
| 2510 |
+
|
| 2511 |
+
def replace(self, old, new, count=None):
|
| 2512 |
+
"""
|
| 2513 |
+
For each element in `self`, return a copy of the string with all
|
| 2514 |
+
occurrences of substring `old` replaced by `new`.
|
| 2515 |
+
|
| 2516 |
+
See Also
|
| 2517 |
+
--------
|
| 2518 |
+
char.replace
|
| 2519 |
+
|
| 2520 |
+
"""
|
| 2521 |
+
return asarray(replace(self, old, new, count))
|
| 2522 |
+
|
| 2523 |
+
def rfind(self, sub, start=0, end=None):
|
| 2524 |
+
"""
|
| 2525 |
+
For each element in `self`, return the highest index in the string
|
| 2526 |
+
where substring `sub` is found, such that `sub` is contained
|
| 2527 |
+
within [`start`, `end`].
|
| 2528 |
+
|
| 2529 |
+
See Also
|
| 2530 |
+
--------
|
| 2531 |
+
char.rfind
|
| 2532 |
+
|
| 2533 |
+
"""
|
| 2534 |
+
return rfind(self, sub, start, end)
|
| 2535 |
+
|
| 2536 |
+
def rindex(self, sub, start=0, end=None):
|
| 2537 |
+
"""
|
| 2538 |
+
Like `rfind`, but raises `ValueError` when the substring `sub` is
|
| 2539 |
+
not found.
|
| 2540 |
+
|
| 2541 |
+
See Also
|
| 2542 |
+
--------
|
| 2543 |
+
char.rindex
|
| 2544 |
+
|
| 2545 |
+
"""
|
| 2546 |
+
return rindex(self, sub, start, end)
|
| 2547 |
+
|
| 2548 |
+
def rjust(self, width, fillchar=' '):
|
| 2549 |
+
"""
|
| 2550 |
+
Return an array with the elements of `self`
|
| 2551 |
+
right-justified in a string of length `width`.
|
| 2552 |
+
|
| 2553 |
+
See Also
|
| 2554 |
+
--------
|
| 2555 |
+
char.rjust
|
| 2556 |
+
|
| 2557 |
+
"""
|
| 2558 |
+
return asarray(rjust(self, width, fillchar))
|
| 2559 |
+
|
| 2560 |
+
def rpartition(self, sep):
|
| 2561 |
+
"""
|
| 2562 |
+
Partition each element in `self` around `sep`.
|
| 2563 |
+
|
| 2564 |
+
See Also
|
| 2565 |
+
--------
|
| 2566 |
+
rpartition
|
| 2567 |
+
"""
|
| 2568 |
+
return asarray(rpartition(self, sep))
|
| 2569 |
+
|
| 2570 |
+
def rsplit(self, sep=None, maxsplit=None):
|
| 2571 |
+
"""
|
| 2572 |
+
For each element in `self`, return a list of the words in
|
| 2573 |
+
the string, using `sep` as the delimiter string.
|
| 2574 |
+
|
| 2575 |
+
See Also
|
| 2576 |
+
--------
|
| 2577 |
+
char.rsplit
|
| 2578 |
+
|
| 2579 |
+
"""
|
| 2580 |
+
return rsplit(self, sep, maxsplit)
|
| 2581 |
+
|
| 2582 |
+
def rstrip(self, chars=None):
|
| 2583 |
+
"""
|
| 2584 |
+
For each element in `self`, return a copy with the trailing
|
| 2585 |
+
characters removed.
|
| 2586 |
+
|
| 2587 |
+
See Also
|
| 2588 |
+
--------
|
| 2589 |
+
char.rstrip
|
| 2590 |
+
|
| 2591 |
+
"""
|
| 2592 |
+
return asarray(rstrip(self, chars))
|
| 2593 |
+
|
| 2594 |
+
def split(self, sep=None, maxsplit=None):
|
| 2595 |
+
"""
|
| 2596 |
+
For each element in `self`, return a list of the words in the
|
| 2597 |
+
string, using `sep` as the delimiter string.
|
| 2598 |
+
|
| 2599 |
+
See Also
|
| 2600 |
+
--------
|
| 2601 |
+
char.split
|
| 2602 |
+
|
| 2603 |
+
"""
|
| 2604 |
+
return split(self, sep, maxsplit)
|
| 2605 |
+
|
| 2606 |
+
def splitlines(self, keepends=None):
|
| 2607 |
+
"""
|
| 2608 |
+
For each element in `self`, return a list of the lines in the
|
| 2609 |
+
element, breaking at line boundaries.
|
| 2610 |
+
|
| 2611 |
+
See Also
|
| 2612 |
+
--------
|
| 2613 |
+
char.splitlines
|
| 2614 |
+
|
| 2615 |
+
"""
|
| 2616 |
+
return splitlines(self, keepends)
|
| 2617 |
+
|
| 2618 |
+
def startswith(self, prefix, start=0, end=None):
|
| 2619 |
+
"""
|
| 2620 |
+
Returns a boolean array which is `True` where the string element
|
| 2621 |
+
in `self` starts with `prefix`, otherwise `False`.
|
| 2622 |
+
|
| 2623 |
+
See Also
|
| 2624 |
+
--------
|
| 2625 |
+
char.startswith
|
| 2626 |
+
|
| 2627 |
+
"""
|
| 2628 |
+
return startswith(self, prefix, start, end)
|
| 2629 |
+
|
| 2630 |
+
def strip(self, chars=None):
|
| 2631 |
+
"""
|
| 2632 |
+
For each element in `self`, return a copy with the leading and
|
| 2633 |
+
trailing characters removed.
|
| 2634 |
+
|
| 2635 |
+
See Also
|
| 2636 |
+
--------
|
| 2637 |
+
char.strip
|
| 2638 |
+
|
| 2639 |
+
"""
|
| 2640 |
+
return asarray(strip(self, chars))
|
| 2641 |
+
|
| 2642 |
+
def swapcase(self):
|
| 2643 |
+
"""
|
| 2644 |
+
For each element in `self`, return a copy of the string with
|
| 2645 |
+
uppercase characters converted to lowercase and vice versa.
|
| 2646 |
+
|
| 2647 |
+
See Also
|
| 2648 |
+
--------
|
| 2649 |
+
char.swapcase
|
| 2650 |
+
|
| 2651 |
+
"""
|
| 2652 |
+
return asarray(swapcase(self))
|
| 2653 |
+
|
| 2654 |
+
def title(self):
|
| 2655 |
+
"""
|
| 2656 |
+
For each element in `self`, return a titlecased version of the
|
| 2657 |
+
string: words start with uppercase characters, all remaining cased
|
| 2658 |
+
characters are lowercase.
|
| 2659 |
+
|
| 2660 |
+
See Also
|
| 2661 |
+
--------
|
| 2662 |
+
char.title
|
| 2663 |
+
|
| 2664 |
+
"""
|
| 2665 |
+
return asarray(title(self))
|
| 2666 |
+
|
| 2667 |
+
def translate(self, table, deletechars=None):
|
| 2668 |
+
"""
|
| 2669 |
+
For each element in `self`, return a copy of the string where
|
| 2670 |
+
all characters occurring in the optional argument
|
| 2671 |
+
`deletechars` are removed, and the remaining characters have
|
| 2672 |
+
been mapped through the given translation table.
|
| 2673 |
+
|
| 2674 |
+
See Also
|
| 2675 |
+
--------
|
| 2676 |
+
char.translate
|
| 2677 |
+
|
| 2678 |
+
"""
|
| 2679 |
+
return asarray(translate(self, table, deletechars))
|
| 2680 |
+
|
| 2681 |
+
def upper(self):
|
| 2682 |
+
"""
|
| 2683 |
+
Return an array with the elements of `self` converted to
|
| 2684 |
+
uppercase.
|
| 2685 |
+
|
| 2686 |
+
See Also
|
| 2687 |
+
--------
|
| 2688 |
+
char.upper
|
| 2689 |
+
|
| 2690 |
+
"""
|
| 2691 |
+
return asarray(upper(self))
|
| 2692 |
+
|
| 2693 |
+
def zfill(self, width):
|
| 2694 |
+
"""
|
| 2695 |
+
Return the numeric string left-filled with zeros in a string of
|
| 2696 |
+
length `width`.
|
| 2697 |
+
|
| 2698 |
+
See Also
|
| 2699 |
+
--------
|
| 2700 |
+
char.zfill
|
| 2701 |
+
|
| 2702 |
+
"""
|
| 2703 |
+
return asarray(zfill(self, width))
|
| 2704 |
+
|
| 2705 |
+
def isnumeric(self):
|
| 2706 |
+
"""
|
| 2707 |
+
For each element in `self`, return True if there are only
|
| 2708 |
+
numeric characters in the element.
|
| 2709 |
+
|
| 2710 |
+
See Also
|
| 2711 |
+
--------
|
| 2712 |
+
char.isnumeric
|
| 2713 |
+
|
| 2714 |
+
"""
|
| 2715 |
+
return isnumeric(self)
|
| 2716 |
+
|
| 2717 |
+
def isdecimal(self):
|
| 2718 |
+
"""
|
| 2719 |
+
For each element in `self`, return True if there are only
|
| 2720 |
+
decimal characters in the element.
|
| 2721 |
+
|
| 2722 |
+
See Also
|
| 2723 |
+
--------
|
| 2724 |
+
char.isdecimal
|
| 2725 |
+
|
| 2726 |
+
"""
|
| 2727 |
+
return isdecimal(self)
|
| 2728 |
+
|
| 2729 |
+
|
| 2730 |
+
@set_module("numpy.char")
|
| 2731 |
+
def array(obj, itemsize=None, copy=True, unicode=None, order=None):
|
| 2732 |
+
"""
|
| 2733 |
+
Create a `chararray`.
|
| 2734 |
+
|
| 2735 |
+
.. note::
|
| 2736 |
+
This class is provided for numarray backward-compatibility.
|
| 2737 |
+
New code (not concerned with numarray compatibility) should use
|
| 2738 |
+
arrays of type `bytes_` or `str_` and use the free functions
|
| 2739 |
+
in :mod:`numpy.char <numpy.core.defchararray>` for fast
|
| 2740 |
+
vectorized string operations instead.
|
| 2741 |
+
|
| 2742 |
+
Versus a regular NumPy array of type `str` or `unicode`, this
|
| 2743 |
+
class adds the following functionality:
|
| 2744 |
+
|
| 2745 |
+
1) values automatically have whitespace removed from the end
|
| 2746 |
+
when indexed
|
| 2747 |
+
|
| 2748 |
+
2) comparison operators automatically remove whitespace from the
|
| 2749 |
+
end when comparing values
|
| 2750 |
+
|
| 2751 |
+
3) vectorized string operations are provided as methods
|
| 2752 |
+
(e.g. `str.endswith`) and infix operators (e.g. ``+, *, %``)
|
| 2753 |
+
|
| 2754 |
+
Parameters
|
| 2755 |
+
----------
|
| 2756 |
+
obj : array of str or unicode-like
|
| 2757 |
+
|
| 2758 |
+
itemsize : int, optional
|
| 2759 |
+
`itemsize` is the number of characters per scalar in the
|
| 2760 |
+
resulting array. If `itemsize` is None, and `obj` is an
|
| 2761 |
+
object array or a Python list, the `itemsize` will be
|
| 2762 |
+
automatically determined. If `itemsize` is provided and `obj`
|
| 2763 |
+
is of type str or unicode, then the `obj` string will be
|
| 2764 |
+
chunked into `itemsize` pieces.
|
| 2765 |
+
|
| 2766 |
+
copy : bool, optional
|
| 2767 |
+
If true (default), then the object is copied. Otherwise, a copy
|
| 2768 |
+
will only be made if __array__ returns a copy, if obj is a
|
| 2769 |
+
nested sequence, or if a copy is needed to satisfy any of the other
|
| 2770 |
+
requirements (`itemsize`, unicode, `order`, etc.).
|
| 2771 |
+
|
| 2772 |
+
unicode : bool, optional
|
| 2773 |
+
When true, the resulting `chararray` can contain Unicode
|
| 2774 |
+
characters, when false only 8-bit characters. If unicode is
|
| 2775 |
+
None and `obj` is one of the following:
|
| 2776 |
+
|
| 2777 |
+
- a `chararray`,
|
| 2778 |
+
- an ndarray of type `str` or `unicode`
|
| 2779 |
+
- a Python str or unicode object,
|
| 2780 |
+
|
| 2781 |
+
then the unicode setting of the output array will be
|
| 2782 |
+
automatically determined.
|
| 2783 |
+
|
| 2784 |
+
order : {'C', 'F', 'A'}, optional
|
| 2785 |
+
Specify the order of the array. If order is 'C' (default), then the
|
| 2786 |
+
array will be in C-contiguous order (last-index varies the
|
| 2787 |
+
fastest). If order is 'F', then the returned array
|
| 2788 |
+
will be in Fortran-contiguous order (first-index varies the
|
| 2789 |
+
fastest). If order is 'A', then the returned array may
|
| 2790 |
+
be in any order (either C-, Fortran-contiguous, or even
|
| 2791 |
+
discontiguous).
|
| 2792 |
+
"""
|
| 2793 |
+
if isinstance(obj, (bytes, str)):
|
| 2794 |
+
if unicode is None:
|
| 2795 |
+
if isinstance(obj, str):
|
| 2796 |
+
unicode = True
|
| 2797 |
+
else:
|
| 2798 |
+
unicode = False
|
| 2799 |
+
|
| 2800 |
+
if itemsize is None:
|
| 2801 |
+
itemsize = len(obj)
|
| 2802 |
+
shape = len(obj) // itemsize
|
| 2803 |
+
|
| 2804 |
+
return chararray(shape, itemsize=itemsize, unicode=unicode,
|
| 2805 |
+
buffer=obj, order=order)
|
| 2806 |
+
|
| 2807 |
+
if isinstance(obj, (list, tuple)):
|
| 2808 |
+
obj = numpy.asarray(obj)
|
| 2809 |
+
|
| 2810 |
+
if isinstance(obj, ndarray) and issubclass(obj.dtype.type, character):
|
| 2811 |
+
# If we just have a vanilla chararray, create a chararray
|
| 2812 |
+
# view around it.
|
| 2813 |
+
if not isinstance(obj, chararray):
|
| 2814 |
+
obj = obj.view(chararray)
|
| 2815 |
+
|
| 2816 |
+
if itemsize is None:
|
| 2817 |
+
itemsize = obj.itemsize
|
| 2818 |
+
# itemsize is in 8-bit chars, so for Unicode, we need
|
| 2819 |
+
# to divide by the size of a single Unicode character,
|
| 2820 |
+
# which for NumPy is always 4
|
| 2821 |
+
if issubclass(obj.dtype.type, str_):
|
| 2822 |
+
itemsize //= 4
|
| 2823 |
+
|
| 2824 |
+
if unicode is None:
|
| 2825 |
+
if issubclass(obj.dtype.type, str_):
|
| 2826 |
+
unicode = True
|
| 2827 |
+
else:
|
| 2828 |
+
unicode = False
|
| 2829 |
+
|
| 2830 |
+
if unicode:
|
| 2831 |
+
dtype = str_
|
| 2832 |
+
else:
|
| 2833 |
+
dtype = bytes_
|
| 2834 |
+
|
| 2835 |
+
if order is not None:
|
| 2836 |
+
obj = numpy.asarray(obj, order=order)
|
| 2837 |
+
if (copy or
|
| 2838 |
+
(itemsize != obj.itemsize) or
|
| 2839 |
+
(not unicode and isinstance(obj, str_)) or
|
| 2840 |
+
(unicode and isinstance(obj, bytes_))):
|
| 2841 |
+
obj = obj.astype((dtype, int(itemsize)))
|
| 2842 |
+
return obj
|
| 2843 |
+
|
| 2844 |
+
if isinstance(obj, ndarray) and issubclass(obj.dtype.type, object):
|
| 2845 |
+
if itemsize is None:
|
| 2846 |
+
# Since no itemsize was specified, convert the input array to
|
| 2847 |
+
# a list so the ndarray constructor will automatically
|
| 2848 |
+
# determine the itemsize for us.
|
| 2849 |
+
obj = obj.tolist()
|
| 2850 |
+
# Fall through to the default case
|
| 2851 |
+
|
| 2852 |
+
if unicode:
|
| 2853 |
+
dtype = str_
|
| 2854 |
+
else:
|
| 2855 |
+
dtype = bytes_
|
| 2856 |
+
|
| 2857 |
+
if itemsize is None:
|
| 2858 |
+
val = narray(obj, dtype=dtype, order=order, subok=True)
|
| 2859 |
+
else:
|
| 2860 |
+
val = narray(obj, dtype=(dtype, itemsize), order=order, subok=True)
|
| 2861 |
+
return val.view(chararray)
|
| 2862 |
+
|
| 2863 |
+
|
| 2864 |
+
@set_module("numpy.char")
|
| 2865 |
+
def asarray(obj, itemsize=None, unicode=None, order=None):
|
| 2866 |
+
"""
|
| 2867 |
+
Convert the input to a `chararray`, copying the data only if
|
| 2868 |
+
necessary.
|
| 2869 |
+
|
| 2870 |
+
Versus a regular NumPy array of type `str` or `unicode`, this
|
| 2871 |
+
class adds the following functionality:
|
| 2872 |
+
|
| 2873 |
+
1) values automatically have whitespace removed from the end
|
| 2874 |
+
when indexed
|
| 2875 |
+
|
| 2876 |
+
2) comparison operators automatically remove whitespace from the
|
| 2877 |
+
end when comparing values
|
| 2878 |
+
|
| 2879 |
+
3) vectorized string operations are provided as methods
|
| 2880 |
+
(e.g. `str.endswith`) and infix operators (e.g. ``+``, ``*``,``%``)
|
| 2881 |
+
|
| 2882 |
+
Parameters
|
| 2883 |
+
----------
|
| 2884 |
+
obj : array of str or unicode-like
|
| 2885 |
+
|
| 2886 |
+
itemsize : int, optional
|
| 2887 |
+
`itemsize` is the number of characters per scalar in the
|
| 2888 |
+
resulting array. If `itemsize` is None, and `obj` is an
|
| 2889 |
+
object array or a Python list, the `itemsize` will be
|
| 2890 |
+
automatically determined. If `itemsize` is provided and `obj`
|
| 2891 |
+
is of type str or unicode, then the `obj` string will be
|
| 2892 |
+
chunked into `itemsize` pieces.
|
| 2893 |
+
|
| 2894 |
+
unicode : bool, optional
|
| 2895 |
+
When true, the resulting `chararray` can contain Unicode
|
| 2896 |
+
characters, when false only 8-bit characters. If unicode is
|
| 2897 |
+
None and `obj` is one of the following:
|
| 2898 |
+
|
| 2899 |
+
- a `chararray`,
|
| 2900 |
+
- an ndarray of type `str` or 'unicode`
|
| 2901 |
+
- a Python str or unicode object,
|
| 2902 |
+
|
| 2903 |
+
then the unicode setting of the output array will be
|
| 2904 |
+
automatically determined.
|
| 2905 |
+
|
| 2906 |
+
order : {'C', 'F'}, optional
|
| 2907 |
+
Specify the order of the array. If order is 'C' (default), then the
|
| 2908 |
+
array will be in C-contiguous order (last-index varies the
|
| 2909 |
+
fastest). If order is 'F', then the returned array
|
| 2910 |
+
will be in Fortran-contiguous order (first-index varies the
|
| 2911 |
+
fastest).
|
| 2912 |
+
"""
|
| 2913 |
+
return array(obj, itemsize, copy=False,
|
| 2914 |
+
unicode=unicode, order=order)
|
pllava/lib/python3.10/site-packages/numpy/core/einsumfunc.py
ADDED
|
@@ -0,0 +1,1443 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Implementation of optimized einsum.
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
import itertools
|
| 6 |
+
import operator
|
| 7 |
+
|
| 8 |
+
from numpy.core.multiarray import c_einsum
|
| 9 |
+
from numpy.core.numeric import asanyarray, tensordot
|
| 10 |
+
from numpy.core.overrides import array_function_dispatch
|
| 11 |
+
|
| 12 |
+
__all__ = ['einsum', 'einsum_path']
|
| 13 |
+
|
| 14 |
+
einsum_symbols = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
|
| 15 |
+
einsum_symbols_set = set(einsum_symbols)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def _flop_count(idx_contraction, inner, num_terms, size_dictionary):
|
| 19 |
+
"""
|
| 20 |
+
Computes the number of FLOPS in the contraction.
|
| 21 |
+
|
| 22 |
+
Parameters
|
| 23 |
+
----------
|
| 24 |
+
idx_contraction : iterable
|
| 25 |
+
The indices involved in the contraction
|
| 26 |
+
inner : bool
|
| 27 |
+
Does this contraction require an inner product?
|
| 28 |
+
num_terms : int
|
| 29 |
+
The number of terms in a contraction
|
| 30 |
+
size_dictionary : dict
|
| 31 |
+
The size of each of the indices in idx_contraction
|
| 32 |
+
|
| 33 |
+
Returns
|
| 34 |
+
-------
|
| 35 |
+
flop_count : int
|
| 36 |
+
The total number of FLOPS required for the contraction.
|
| 37 |
+
|
| 38 |
+
Examples
|
| 39 |
+
--------
|
| 40 |
+
|
| 41 |
+
>>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5})
|
| 42 |
+
30
|
| 43 |
+
|
| 44 |
+
>>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5})
|
| 45 |
+
60
|
| 46 |
+
|
| 47 |
+
"""
|
| 48 |
+
|
| 49 |
+
overall_size = _compute_size_by_dict(idx_contraction, size_dictionary)
|
| 50 |
+
op_factor = max(1, num_terms - 1)
|
| 51 |
+
if inner:
|
| 52 |
+
op_factor += 1
|
| 53 |
+
|
| 54 |
+
return overall_size * op_factor
|
| 55 |
+
|
| 56 |
+
def _compute_size_by_dict(indices, idx_dict):
|
| 57 |
+
"""
|
| 58 |
+
Computes the product of the elements in indices based on the dictionary
|
| 59 |
+
idx_dict.
|
| 60 |
+
|
| 61 |
+
Parameters
|
| 62 |
+
----------
|
| 63 |
+
indices : iterable
|
| 64 |
+
Indices to base the product on.
|
| 65 |
+
idx_dict : dictionary
|
| 66 |
+
Dictionary of index sizes
|
| 67 |
+
|
| 68 |
+
Returns
|
| 69 |
+
-------
|
| 70 |
+
ret : int
|
| 71 |
+
The resulting product.
|
| 72 |
+
|
| 73 |
+
Examples
|
| 74 |
+
--------
|
| 75 |
+
>>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5})
|
| 76 |
+
90
|
| 77 |
+
|
| 78 |
+
"""
|
| 79 |
+
ret = 1
|
| 80 |
+
for i in indices:
|
| 81 |
+
ret *= idx_dict[i]
|
| 82 |
+
return ret
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def _find_contraction(positions, input_sets, output_set):
|
| 86 |
+
"""
|
| 87 |
+
Finds the contraction for a given set of input and output sets.
|
| 88 |
+
|
| 89 |
+
Parameters
|
| 90 |
+
----------
|
| 91 |
+
positions : iterable
|
| 92 |
+
Integer positions of terms used in the contraction.
|
| 93 |
+
input_sets : list
|
| 94 |
+
List of sets that represent the lhs side of the einsum subscript
|
| 95 |
+
output_set : set
|
| 96 |
+
Set that represents the rhs side of the overall einsum subscript
|
| 97 |
+
|
| 98 |
+
Returns
|
| 99 |
+
-------
|
| 100 |
+
new_result : set
|
| 101 |
+
The indices of the resulting contraction
|
| 102 |
+
remaining : list
|
| 103 |
+
List of sets that have not been contracted, the new set is appended to
|
| 104 |
+
the end of this list
|
| 105 |
+
idx_removed : set
|
| 106 |
+
Indices removed from the entire contraction
|
| 107 |
+
idx_contraction : set
|
| 108 |
+
The indices used in the current contraction
|
| 109 |
+
|
| 110 |
+
Examples
|
| 111 |
+
--------
|
| 112 |
+
|
| 113 |
+
# A simple dot product test case
|
| 114 |
+
>>> pos = (0, 1)
|
| 115 |
+
>>> isets = [set('ab'), set('bc')]
|
| 116 |
+
>>> oset = set('ac')
|
| 117 |
+
>>> _find_contraction(pos, isets, oset)
|
| 118 |
+
({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'})
|
| 119 |
+
|
| 120 |
+
# A more complex case with additional terms in the contraction
|
| 121 |
+
>>> pos = (0, 2)
|
| 122 |
+
>>> isets = [set('abd'), set('ac'), set('bdc')]
|
| 123 |
+
>>> oset = set('ac')
|
| 124 |
+
>>> _find_contraction(pos, isets, oset)
|
| 125 |
+
({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'})
|
| 126 |
+
"""
|
| 127 |
+
|
| 128 |
+
idx_contract = set()
|
| 129 |
+
idx_remain = output_set.copy()
|
| 130 |
+
remaining = []
|
| 131 |
+
for ind, value in enumerate(input_sets):
|
| 132 |
+
if ind in positions:
|
| 133 |
+
idx_contract |= value
|
| 134 |
+
else:
|
| 135 |
+
remaining.append(value)
|
| 136 |
+
idx_remain |= value
|
| 137 |
+
|
| 138 |
+
new_result = idx_remain & idx_contract
|
| 139 |
+
idx_removed = (idx_contract - new_result)
|
| 140 |
+
remaining.append(new_result)
|
| 141 |
+
|
| 142 |
+
return (new_result, remaining, idx_removed, idx_contract)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def _optimal_path(input_sets, output_set, idx_dict, memory_limit):
|
| 146 |
+
"""
|
| 147 |
+
Computes all possible pair contractions, sieves the results based
|
| 148 |
+
on ``memory_limit`` and returns the lowest cost path. This algorithm
|
| 149 |
+
scales factorial with respect to the elements in the list ``input_sets``.
|
| 150 |
+
|
| 151 |
+
Parameters
|
| 152 |
+
----------
|
| 153 |
+
input_sets : list
|
| 154 |
+
List of sets that represent the lhs side of the einsum subscript
|
| 155 |
+
output_set : set
|
| 156 |
+
Set that represents the rhs side of the overall einsum subscript
|
| 157 |
+
idx_dict : dictionary
|
| 158 |
+
Dictionary of index sizes
|
| 159 |
+
memory_limit : int
|
| 160 |
+
The maximum number of elements in a temporary array
|
| 161 |
+
|
| 162 |
+
Returns
|
| 163 |
+
-------
|
| 164 |
+
path : list
|
| 165 |
+
The optimal contraction order within the memory limit constraint.
|
| 166 |
+
|
| 167 |
+
Examples
|
| 168 |
+
--------
|
| 169 |
+
>>> isets = [set('abd'), set('ac'), set('bdc')]
|
| 170 |
+
>>> oset = set()
|
| 171 |
+
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
|
| 172 |
+
>>> _optimal_path(isets, oset, idx_sizes, 5000)
|
| 173 |
+
[(0, 2), (0, 1)]
|
| 174 |
+
"""
|
| 175 |
+
|
| 176 |
+
full_results = [(0, [], input_sets)]
|
| 177 |
+
for iteration in range(len(input_sets) - 1):
|
| 178 |
+
iter_results = []
|
| 179 |
+
|
| 180 |
+
# Compute all unique pairs
|
| 181 |
+
for curr in full_results:
|
| 182 |
+
cost, positions, remaining = curr
|
| 183 |
+
for con in itertools.combinations(range(len(input_sets) - iteration), 2):
|
| 184 |
+
|
| 185 |
+
# Find the contraction
|
| 186 |
+
cont = _find_contraction(con, remaining, output_set)
|
| 187 |
+
new_result, new_input_sets, idx_removed, idx_contract = cont
|
| 188 |
+
|
| 189 |
+
# Sieve the results based on memory_limit
|
| 190 |
+
new_size = _compute_size_by_dict(new_result, idx_dict)
|
| 191 |
+
if new_size > memory_limit:
|
| 192 |
+
continue
|
| 193 |
+
|
| 194 |
+
# Build (total_cost, positions, indices_remaining)
|
| 195 |
+
total_cost = cost + _flop_count(idx_contract, idx_removed, len(con), idx_dict)
|
| 196 |
+
new_pos = positions + [con]
|
| 197 |
+
iter_results.append((total_cost, new_pos, new_input_sets))
|
| 198 |
+
|
| 199 |
+
# Update combinatorial list, if we did not find anything return best
|
| 200 |
+
# path + remaining contractions
|
| 201 |
+
if iter_results:
|
| 202 |
+
full_results = iter_results
|
| 203 |
+
else:
|
| 204 |
+
path = min(full_results, key=lambda x: x[0])[1]
|
| 205 |
+
path += [tuple(range(len(input_sets) - iteration))]
|
| 206 |
+
return path
|
| 207 |
+
|
| 208 |
+
# If we have not found anything return single einsum contraction
|
| 209 |
+
if len(full_results) == 0:
|
| 210 |
+
return [tuple(range(len(input_sets)))]
|
| 211 |
+
|
| 212 |
+
path = min(full_results, key=lambda x: x[0])[1]
|
| 213 |
+
return path
|
| 214 |
+
|
| 215 |
+
def _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost, naive_cost):
|
| 216 |
+
"""Compute the cost (removed size + flops) and resultant indices for
|
| 217 |
+
performing the contraction specified by ``positions``.
|
| 218 |
+
|
| 219 |
+
Parameters
|
| 220 |
+
----------
|
| 221 |
+
positions : tuple of int
|
| 222 |
+
The locations of the proposed tensors to contract.
|
| 223 |
+
input_sets : list of sets
|
| 224 |
+
The indices found on each tensors.
|
| 225 |
+
output_set : set
|
| 226 |
+
The output indices of the expression.
|
| 227 |
+
idx_dict : dict
|
| 228 |
+
Mapping of each index to its size.
|
| 229 |
+
memory_limit : int
|
| 230 |
+
The total allowed size for an intermediary tensor.
|
| 231 |
+
path_cost : int
|
| 232 |
+
The contraction cost so far.
|
| 233 |
+
naive_cost : int
|
| 234 |
+
The cost of the unoptimized expression.
|
| 235 |
+
|
| 236 |
+
Returns
|
| 237 |
+
-------
|
| 238 |
+
cost : (int, int)
|
| 239 |
+
A tuple containing the size of any indices removed, and the flop cost.
|
| 240 |
+
positions : tuple of int
|
| 241 |
+
The locations of the proposed tensors to contract.
|
| 242 |
+
new_input_sets : list of sets
|
| 243 |
+
The resulting new list of indices if this proposed contraction is performed.
|
| 244 |
+
|
| 245 |
+
"""
|
| 246 |
+
|
| 247 |
+
# Find the contraction
|
| 248 |
+
contract = _find_contraction(positions, input_sets, output_set)
|
| 249 |
+
idx_result, new_input_sets, idx_removed, idx_contract = contract
|
| 250 |
+
|
| 251 |
+
# Sieve the results based on memory_limit
|
| 252 |
+
new_size = _compute_size_by_dict(idx_result, idx_dict)
|
| 253 |
+
if new_size > memory_limit:
|
| 254 |
+
return None
|
| 255 |
+
|
| 256 |
+
# Build sort tuple
|
| 257 |
+
old_sizes = (_compute_size_by_dict(input_sets[p], idx_dict) for p in positions)
|
| 258 |
+
removed_size = sum(old_sizes) - new_size
|
| 259 |
+
|
| 260 |
+
# NB: removed_size used to be just the size of any removed indices i.e.:
|
| 261 |
+
# helpers.compute_size_by_dict(idx_removed, idx_dict)
|
| 262 |
+
cost = _flop_count(idx_contract, idx_removed, len(positions), idx_dict)
|
| 263 |
+
sort = (-removed_size, cost)
|
| 264 |
+
|
| 265 |
+
# Sieve based on total cost as well
|
| 266 |
+
if (path_cost + cost) > naive_cost:
|
| 267 |
+
return None
|
| 268 |
+
|
| 269 |
+
# Add contraction to possible choices
|
| 270 |
+
return [sort, positions, new_input_sets]
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
def _update_other_results(results, best):
|
| 274 |
+
"""Update the positions and provisional input_sets of ``results`` based on
|
| 275 |
+
performing the contraction result ``best``. Remove any involving the tensors
|
| 276 |
+
contracted.
|
| 277 |
+
|
| 278 |
+
Parameters
|
| 279 |
+
----------
|
| 280 |
+
results : list
|
| 281 |
+
List of contraction results produced by ``_parse_possible_contraction``.
|
| 282 |
+
best : list
|
| 283 |
+
The best contraction of ``results`` i.e. the one that will be performed.
|
| 284 |
+
|
| 285 |
+
Returns
|
| 286 |
+
-------
|
| 287 |
+
mod_results : list
|
| 288 |
+
The list of modified results, updated with outcome of ``best`` contraction.
|
| 289 |
+
"""
|
| 290 |
+
|
| 291 |
+
best_con = best[1]
|
| 292 |
+
bx, by = best_con
|
| 293 |
+
mod_results = []
|
| 294 |
+
|
| 295 |
+
for cost, (x, y), con_sets in results:
|
| 296 |
+
|
| 297 |
+
# Ignore results involving tensors just contracted
|
| 298 |
+
if x in best_con or y in best_con:
|
| 299 |
+
continue
|
| 300 |
+
|
| 301 |
+
# Update the input_sets
|
| 302 |
+
del con_sets[by - int(by > x) - int(by > y)]
|
| 303 |
+
del con_sets[bx - int(bx > x) - int(bx > y)]
|
| 304 |
+
con_sets.insert(-1, best[2][-1])
|
| 305 |
+
|
| 306 |
+
# Update the position indices
|
| 307 |
+
mod_con = x - int(x > bx) - int(x > by), y - int(y > bx) - int(y > by)
|
| 308 |
+
mod_results.append((cost, mod_con, con_sets))
|
| 309 |
+
|
| 310 |
+
return mod_results
|
| 311 |
+
|
| 312 |
+
def _greedy_path(input_sets, output_set, idx_dict, memory_limit):
|
| 313 |
+
"""
|
| 314 |
+
Finds the path by contracting the best pair until the input list is
|
| 315 |
+
exhausted. The best pair is found by minimizing the tuple
|
| 316 |
+
``(-prod(indices_removed), cost)``. What this amounts to is prioritizing
|
| 317 |
+
matrix multiplication or inner product operations, then Hadamard like
|
| 318 |
+
operations, and finally outer operations. Outer products are limited by
|
| 319 |
+
``memory_limit``. This algorithm scales cubically with respect to the
|
| 320 |
+
number of elements in the list ``input_sets``.
|
| 321 |
+
|
| 322 |
+
Parameters
|
| 323 |
+
----------
|
| 324 |
+
input_sets : list
|
| 325 |
+
List of sets that represent the lhs side of the einsum subscript
|
| 326 |
+
output_set : set
|
| 327 |
+
Set that represents the rhs side of the overall einsum subscript
|
| 328 |
+
idx_dict : dictionary
|
| 329 |
+
Dictionary of index sizes
|
| 330 |
+
memory_limit : int
|
| 331 |
+
The maximum number of elements in a temporary array
|
| 332 |
+
|
| 333 |
+
Returns
|
| 334 |
+
-------
|
| 335 |
+
path : list
|
| 336 |
+
The greedy contraction order within the memory limit constraint.
|
| 337 |
+
|
| 338 |
+
Examples
|
| 339 |
+
--------
|
| 340 |
+
>>> isets = [set('abd'), set('ac'), set('bdc')]
|
| 341 |
+
>>> oset = set()
|
| 342 |
+
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
|
| 343 |
+
>>> _greedy_path(isets, oset, idx_sizes, 5000)
|
| 344 |
+
[(0, 2), (0, 1)]
|
| 345 |
+
"""
|
| 346 |
+
|
| 347 |
+
# Handle trivial cases that leaked through
|
| 348 |
+
if len(input_sets) == 1:
|
| 349 |
+
return [(0,)]
|
| 350 |
+
elif len(input_sets) == 2:
|
| 351 |
+
return [(0, 1)]
|
| 352 |
+
|
| 353 |
+
# Build up a naive cost
|
| 354 |
+
contract = _find_contraction(range(len(input_sets)), input_sets, output_set)
|
| 355 |
+
idx_result, new_input_sets, idx_removed, idx_contract = contract
|
| 356 |
+
naive_cost = _flop_count(idx_contract, idx_removed, len(input_sets), idx_dict)
|
| 357 |
+
|
| 358 |
+
# Initially iterate over all pairs
|
| 359 |
+
comb_iter = itertools.combinations(range(len(input_sets)), 2)
|
| 360 |
+
known_contractions = []
|
| 361 |
+
|
| 362 |
+
path_cost = 0
|
| 363 |
+
path = []
|
| 364 |
+
|
| 365 |
+
for iteration in range(len(input_sets) - 1):
|
| 366 |
+
|
| 367 |
+
# Iterate over all pairs on first step, only previously found pairs on subsequent steps
|
| 368 |
+
for positions in comb_iter:
|
| 369 |
+
|
| 370 |
+
# Always initially ignore outer products
|
| 371 |
+
if input_sets[positions[0]].isdisjoint(input_sets[positions[1]]):
|
| 372 |
+
continue
|
| 373 |
+
|
| 374 |
+
result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost,
|
| 375 |
+
naive_cost)
|
| 376 |
+
if result is not None:
|
| 377 |
+
known_contractions.append(result)
|
| 378 |
+
|
| 379 |
+
# If we do not have a inner contraction, rescan pairs including outer products
|
| 380 |
+
if len(known_contractions) == 0:
|
| 381 |
+
|
| 382 |
+
# Then check the outer products
|
| 383 |
+
for positions in itertools.combinations(range(len(input_sets)), 2):
|
| 384 |
+
result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit,
|
| 385 |
+
path_cost, naive_cost)
|
| 386 |
+
if result is not None:
|
| 387 |
+
known_contractions.append(result)
|
| 388 |
+
|
| 389 |
+
# If we still did not find any remaining contractions, default back to einsum like behavior
|
| 390 |
+
if len(known_contractions) == 0:
|
| 391 |
+
path.append(tuple(range(len(input_sets))))
|
| 392 |
+
break
|
| 393 |
+
|
| 394 |
+
# Sort based on first index
|
| 395 |
+
best = min(known_contractions, key=lambda x: x[0])
|
| 396 |
+
|
| 397 |
+
# Now propagate as many unused contractions as possible to next iteration
|
| 398 |
+
known_contractions = _update_other_results(known_contractions, best)
|
| 399 |
+
|
| 400 |
+
# Next iteration only compute contractions with the new tensor
|
| 401 |
+
# All other contractions have been accounted for
|
| 402 |
+
input_sets = best[2]
|
| 403 |
+
new_tensor_pos = len(input_sets) - 1
|
| 404 |
+
comb_iter = ((i, new_tensor_pos) for i in range(new_tensor_pos))
|
| 405 |
+
|
| 406 |
+
# Update path and total cost
|
| 407 |
+
path.append(best[1])
|
| 408 |
+
path_cost += best[0][1]
|
| 409 |
+
|
| 410 |
+
return path
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
def _can_dot(inputs, result, idx_removed):
|
| 414 |
+
"""
|
| 415 |
+
Checks if we can use BLAS (np.tensordot) call and its beneficial to do so.
|
| 416 |
+
|
| 417 |
+
Parameters
|
| 418 |
+
----------
|
| 419 |
+
inputs : list of str
|
| 420 |
+
Specifies the subscripts for summation.
|
| 421 |
+
result : str
|
| 422 |
+
Resulting summation.
|
| 423 |
+
idx_removed : set
|
| 424 |
+
Indices that are removed in the summation
|
| 425 |
+
|
| 426 |
+
|
| 427 |
+
Returns
|
| 428 |
+
-------
|
| 429 |
+
type : bool
|
| 430 |
+
Returns true if BLAS should and can be used, else False
|
| 431 |
+
|
| 432 |
+
Notes
|
| 433 |
+
-----
|
| 434 |
+
If the operations is BLAS level 1 or 2 and is not already aligned
|
| 435 |
+
we default back to einsum as the memory movement to copy is more
|
| 436 |
+
costly than the operation itself.
|
| 437 |
+
|
| 438 |
+
|
| 439 |
+
Examples
|
| 440 |
+
--------
|
| 441 |
+
|
| 442 |
+
# Standard GEMM operation
|
| 443 |
+
>>> _can_dot(['ij', 'jk'], 'ik', set('j'))
|
| 444 |
+
True
|
| 445 |
+
|
| 446 |
+
# Can use the standard BLAS, but requires odd data movement
|
| 447 |
+
>>> _can_dot(['ijj', 'jk'], 'ik', set('j'))
|
| 448 |
+
False
|
| 449 |
+
|
| 450 |
+
# DDOT where the memory is not aligned
|
| 451 |
+
>>> _can_dot(['ijk', 'ikj'], '', set('ijk'))
|
| 452 |
+
False
|
| 453 |
+
|
| 454 |
+
"""
|
| 455 |
+
|
| 456 |
+
# All `dot` calls remove indices
|
| 457 |
+
if len(idx_removed) == 0:
|
| 458 |
+
return False
|
| 459 |
+
|
| 460 |
+
# BLAS can only handle two operands
|
| 461 |
+
if len(inputs) != 2:
|
| 462 |
+
return False
|
| 463 |
+
|
| 464 |
+
input_left, input_right = inputs
|
| 465 |
+
|
| 466 |
+
for c in set(input_left + input_right):
|
| 467 |
+
# can't deal with repeated indices on same input or more than 2 total
|
| 468 |
+
nl, nr = input_left.count(c), input_right.count(c)
|
| 469 |
+
if (nl > 1) or (nr > 1) or (nl + nr > 2):
|
| 470 |
+
return False
|
| 471 |
+
|
| 472 |
+
# can't do implicit summation or dimension collapse e.g.
|
| 473 |
+
# "ab,bc->c" (implicitly sum over 'a')
|
| 474 |
+
# "ab,ca->ca" (take diagonal of 'a')
|
| 475 |
+
if nl + nr - 1 == int(c in result):
|
| 476 |
+
return False
|
| 477 |
+
|
| 478 |
+
# Build a few temporaries
|
| 479 |
+
set_left = set(input_left)
|
| 480 |
+
set_right = set(input_right)
|
| 481 |
+
keep_left = set_left - idx_removed
|
| 482 |
+
keep_right = set_right - idx_removed
|
| 483 |
+
rs = len(idx_removed)
|
| 484 |
+
|
| 485 |
+
# At this point we are a DOT, GEMV, or GEMM operation
|
| 486 |
+
|
| 487 |
+
# Handle inner products
|
| 488 |
+
|
| 489 |
+
# DDOT with aligned data
|
| 490 |
+
if input_left == input_right:
|
| 491 |
+
return True
|
| 492 |
+
|
| 493 |
+
# DDOT without aligned data (better to use einsum)
|
| 494 |
+
if set_left == set_right:
|
| 495 |
+
return False
|
| 496 |
+
|
| 497 |
+
# Handle the 4 possible (aligned) GEMV or GEMM cases
|
| 498 |
+
|
| 499 |
+
# GEMM or GEMV no transpose
|
| 500 |
+
if input_left[-rs:] == input_right[:rs]:
|
| 501 |
+
return True
|
| 502 |
+
|
| 503 |
+
# GEMM or GEMV transpose both
|
| 504 |
+
if input_left[:rs] == input_right[-rs:]:
|
| 505 |
+
return True
|
| 506 |
+
|
| 507 |
+
# GEMM or GEMV transpose right
|
| 508 |
+
if input_left[-rs:] == input_right[-rs:]:
|
| 509 |
+
return True
|
| 510 |
+
|
| 511 |
+
# GEMM or GEMV transpose left
|
| 512 |
+
if input_left[:rs] == input_right[:rs]:
|
| 513 |
+
return True
|
| 514 |
+
|
| 515 |
+
# Einsum is faster than GEMV if we have to copy data
|
| 516 |
+
if not keep_left or not keep_right:
|
| 517 |
+
return False
|
| 518 |
+
|
| 519 |
+
# We are a matrix-matrix product, but we need to copy data
|
| 520 |
+
return True
|
| 521 |
+
|
| 522 |
+
|
| 523 |
+
def _parse_einsum_input(operands):
|
| 524 |
+
"""
|
| 525 |
+
A reproduction of einsum c side einsum parsing in python.
|
| 526 |
+
|
| 527 |
+
Returns
|
| 528 |
+
-------
|
| 529 |
+
input_strings : str
|
| 530 |
+
Parsed input strings
|
| 531 |
+
output_string : str
|
| 532 |
+
Parsed output string
|
| 533 |
+
operands : list of array_like
|
| 534 |
+
The operands to use in the numpy contraction
|
| 535 |
+
|
| 536 |
+
Examples
|
| 537 |
+
--------
|
| 538 |
+
The operand list is simplified to reduce printing:
|
| 539 |
+
|
| 540 |
+
>>> np.random.seed(123)
|
| 541 |
+
>>> a = np.random.rand(4, 4)
|
| 542 |
+
>>> b = np.random.rand(4, 4, 4)
|
| 543 |
+
>>> _parse_einsum_input(('...a,...a->...', a, b))
|
| 544 |
+
('za,xza', 'xz', [a, b]) # may vary
|
| 545 |
+
|
| 546 |
+
>>> _parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))
|
| 547 |
+
('za,xza', 'xz', [a, b]) # may vary
|
| 548 |
+
"""
|
| 549 |
+
|
| 550 |
+
if len(operands) == 0:
|
| 551 |
+
raise ValueError("No input operands")
|
| 552 |
+
|
| 553 |
+
if isinstance(operands[0], str):
|
| 554 |
+
subscripts = operands[0].replace(" ", "")
|
| 555 |
+
operands = [asanyarray(v) for v in operands[1:]]
|
| 556 |
+
|
| 557 |
+
# Ensure all characters are valid
|
| 558 |
+
for s in subscripts:
|
| 559 |
+
if s in '.,->':
|
| 560 |
+
continue
|
| 561 |
+
if s not in einsum_symbols:
|
| 562 |
+
raise ValueError("Character %s is not a valid symbol." % s)
|
| 563 |
+
|
| 564 |
+
else:
|
| 565 |
+
tmp_operands = list(operands)
|
| 566 |
+
operand_list = []
|
| 567 |
+
subscript_list = []
|
| 568 |
+
for p in range(len(operands) // 2):
|
| 569 |
+
operand_list.append(tmp_operands.pop(0))
|
| 570 |
+
subscript_list.append(tmp_operands.pop(0))
|
| 571 |
+
|
| 572 |
+
output_list = tmp_operands[-1] if len(tmp_operands) else None
|
| 573 |
+
operands = [asanyarray(v) for v in operand_list]
|
| 574 |
+
subscripts = ""
|
| 575 |
+
last = len(subscript_list) - 1
|
| 576 |
+
for num, sub in enumerate(subscript_list):
|
| 577 |
+
for s in sub:
|
| 578 |
+
if s is Ellipsis:
|
| 579 |
+
subscripts += "..."
|
| 580 |
+
else:
|
| 581 |
+
try:
|
| 582 |
+
s = operator.index(s)
|
| 583 |
+
except TypeError as e:
|
| 584 |
+
raise TypeError("For this input type lists must contain "
|
| 585 |
+
"either int or Ellipsis") from e
|
| 586 |
+
subscripts += einsum_symbols[s]
|
| 587 |
+
if num != last:
|
| 588 |
+
subscripts += ","
|
| 589 |
+
|
| 590 |
+
if output_list is not None:
|
| 591 |
+
subscripts += "->"
|
| 592 |
+
for s in output_list:
|
| 593 |
+
if s is Ellipsis:
|
| 594 |
+
subscripts += "..."
|
| 595 |
+
else:
|
| 596 |
+
try:
|
| 597 |
+
s = operator.index(s)
|
| 598 |
+
except TypeError as e:
|
| 599 |
+
raise TypeError("For this input type lists must contain "
|
| 600 |
+
"either int or Ellipsis") from e
|
| 601 |
+
subscripts += einsum_symbols[s]
|
| 602 |
+
# Check for proper "->"
|
| 603 |
+
if ("-" in subscripts) or (">" in subscripts):
|
| 604 |
+
invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1)
|
| 605 |
+
if invalid or (subscripts.count("->") != 1):
|
| 606 |
+
raise ValueError("Subscripts can only contain one '->'.")
|
| 607 |
+
|
| 608 |
+
# Parse ellipses
|
| 609 |
+
if "." in subscripts:
|
| 610 |
+
used = subscripts.replace(".", "").replace(",", "").replace("->", "")
|
| 611 |
+
unused = list(einsum_symbols_set - set(used))
|
| 612 |
+
ellipse_inds = "".join(unused)
|
| 613 |
+
longest = 0
|
| 614 |
+
|
| 615 |
+
if "->" in subscripts:
|
| 616 |
+
input_tmp, output_sub = subscripts.split("->")
|
| 617 |
+
split_subscripts = input_tmp.split(",")
|
| 618 |
+
out_sub = True
|
| 619 |
+
else:
|
| 620 |
+
split_subscripts = subscripts.split(',')
|
| 621 |
+
out_sub = False
|
| 622 |
+
|
| 623 |
+
for num, sub in enumerate(split_subscripts):
|
| 624 |
+
if "." in sub:
|
| 625 |
+
if (sub.count(".") != 3) or (sub.count("...") != 1):
|
| 626 |
+
raise ValueError("Invalid Ellipses.")
|
| 627 |
+
|
| 628 |
+
# Take into account numerical values
|
| 629 |
+
if operands[num].shape == ():
|
| 630 |
+
ellipse_count = 0
|
| 631 |
+
else:
|
| 632 |
+
ellipse_count = max(operands[num].ndim, 1)
|
| 633 |
+
ellipse_count -= (len(sub) - 3)
|
| 634 |
+
|
| 635 |
+
if ellipse_count > longest:
|
| 636 |
+
longest = ellipse_count
|
| 637 |
+
|
| 638 |
+
if ellipse_count < 0:
|
| 639 |
+
raise ValueError("Ellipses lengths do not match.")
|
| 640 |
+
elif ellipse_count == 0:
|
| 641 |
+
split_subscripts[num] = sub.replace('...', '')
|
| 642 |
+
else:
|
| 643 |
+
rep_inds = ellipse_inds[-ellipse_count:]
|
| 644 |
+
split_subscripts[num] = sub.replace('...', rep_inds)
|
| 645 |
+
|
| 646 |
+
subscripts = ",".join(split_subscripts)
|
| 647 |
+
if longest == 0:
|
| 648 |
+
out_ellipse = ""
|
| 649 |
+
else:
|
| 650 |
+
out_ellipse = ellipse_inds[-longest:]
|
| 651 |
+
|
| 652 |
+
if out_sub:
|
| 653 |
+
subscripts += "->" + output_sub.replace("...", out_ellipse)
|
| 654 |
+
else:
|
| 655 |
+
# Special care for outputless ellipses
|
| 656 |
+
output_subscript = ""
|
| 657 |
+
tmp_subscripts = subscripts.replace(",", "")
|
| 658 |
+
for s in sorted(set(tmp_subscripts)):
|
| 659 |
+
if s not in (einsum_symbols):
|
| 660 |
+
raise ValueError("Character %s is not a valid symbol." % s)
|
| 661 |
+
if tmp_subscripts.count(s) == 1:
|
| 662 |
+
output_subscript += s
|
| 663 |
+
normal_inds = ''.join(sorted(set(output_subscript) -
|
| 664 |
+
set(out_ellipse)))
|
| 665 |
+
|
| 666 |
+
subscripts += "->" + out_ellipse + normal_inds
|
| 667 |
+
|
| 668 |
+
# Build output string if does not exist
|
| 669 |
+
if "->" in subscripts:
|
| 670 |
+
input_subscripts, output_subscript = subscripts.split("->")
|
| 671 |
+
else:
|
| 672 |
+
input_subscripts = subscripts
|
| 673 |
+
# Build output subscripts
|
| 674 |
+
tmp_subscripts = subscripts.replace(",", "")
|
| 675 |
+
output_subscript = ""
|
| 676 |
+
for s in sorted(set(tmp_subscripts)):
|
| 677 |
+
if s not in einsum_symbols:
|
| 678 |
+
raise ValueError("Character %s is not a valid symbol." % s)
|
| 679 |
+
if tmp_subscripts.count(s) == 1:
|
| 680 |
+
output_subscript += s
|
| 681 |
+
|
| 682 |
+
# Make sure output subscripts are in the input
|
| 683 |
+
for char in output_subscript:
|
| 684 |
+
if char not in input_subscripts:
|
| 685 |
+
raise ValueError("Output character %s did not appear in the input"
|
| 686 |
+
% char)
|
| 687 |
+
|
| 688 |
+
# Make sure number operands is equivalent to the number of terms
|
| 689 |
+
if len(input_subscripts.split(',')) != len(operands):
|
| 690 |
+
raise ValueError("Number of einsum subscripts must be equal to the "
|
| 691 |
+
"number of operands.")
|
| 692 |
+
|
| 693 |
+
return (input_subscripts, output_subscript, operands)
|
| 694 |
+
|
| 695 |
+
|
| 696 |
+
def _einsum_path_dispatcher(*operands, optimize=None, einsum_call=None):
|
| 697 |
+
# NOTE: technically, we should only dispatch on array-like arguments, not
|
| 698 |
+
# subscripts (given as strings). But separating operands into
|
| 699 |
+
# arrays/subscripts is a little tricky/slow (given einsum's two supported
|
| 700 |
+
# signatures), so as a practical shortcut we dispatch on everything.
|
| 701 |
+
# Strings will be ignored for dispatching since they don't define
|
| 702 |
+
# __array_function__.
|
| 703 |
+
return operands
|
| 704 |
+
|
| 705 |
+
|
| 706 |
+
@array_function_dispatch(_einsum_path_dispatcher, module='numpy')
|
| 707 |
+
def einsum_path(*operands, optimize='greedy', einsum_call=False):
|
| 708 |
+
"""
|
| 709 |
+
einsum_path(subscripts, *operands, optimize='greedy')
|
| 710 |
+
|
| 711 |
+
Evaluates the lowest cost contraction order for an einsum expression by
|
| 712 |
+
considering the creation of intermediate arrays.
|
| 713 |
+
|
| 714 |
+
Parameters
|
| 715 |
+
----------
|
| 716 |
+
subscripts : str
|
| 717 |
+
Specifies the subscripts for summation.
|
| 718 |
+
*operands : list of array_like
|
| 719 |
+
These are the arrays for the operation.
|
| 720 |
+
optimize : {bool, list, tuple, 'greedy', 'optimal'}
|
| 721 |
+
Choose the type of path. If a tuple is provided, the second argument is
|
| 722 |
+
assumed to be the maximum intermediate size created. If only a single
|
| 723 |
+
argument is provided the largest input or output array size is used
|
| 724 |
+
as a maximum intermediate size.
|
| 725 |
+
|
| 726 |
+
* if a list is given that starts with ``einsum_path``, uses this as the
|
| 727 |
+
contraction path
|
| 728 |
+
* if False no optimization is taken
|
| 729 |
+
* if True defaults to the 'greedy' algorithm
|
| 730 |
+
* 'optimal' An algorithm that combinatorially explores all possible
|
| 731 |
+
ways of contracting the listed tensors and chooses the least costly
|
| 732 |
+
path. Scales exponentially with the number of terms in the
|
| 733 |
+
contraction.
|
| 734 |
+
* 'greedy' An algorithm that chooses the best pair contraction
|
| 735 |
+
at each step. Effectively, this algorithm searches the largest inner,
|
| 736 |
+
Hadamard, and then outer products at each step. Scales cubically with
|
| 737 |
+
the number of terms in the contraction. Equivalent to the 'optimal'
|
| 738 |
+
path for most contractions.
|
| 739 |
+
|
| 740 |
+
Default is 'greedy'.
|
| 741 |
+
|
| 742 |
+
Returns
|
| 743 |
+
-------
|
| 744 |
+
path : list of tuples
|
| 745 |
+
A list representation of the einsum path.
|
| 746 |
+
string_repr : str
|
| 747 |
+
A printable representation of the einsum path.
|
| 748 |
+
|
| 749 |
+
Notes
|
| 750 |
+
-----
|
| 751 |
+
The resulting path indicates which terms of the input contraction should be
|
| 752 |
+
contracted first, the result of this contraction is then appended to the
|
| 753 |
+
end of the contraction list. This list can then be iterated over until all
|
| 754 |
+
intermediate contractions are complete.
|
| 755 |
+
|
| 756 |
+
See Also
|
| 757 |
+
--------
|
| 758 |
+
einsum, linalg.multi_dot
|
| 759 |
+
|
| 760 |
+
Examples
|
| 761 |
+
--------
|
| 762 |
+
|
| 763 |
+
We can begin with a chain dot example. In this case, it is optimal to
|
| 764 |
+
contract the ``b`` and ``c`` tensors first as represented by the first
|
| 765 |
+
element of the path ``(1, 2)``. The resulting tensor is added to the end
|
| 766 |
+
of the contraction and the remaining contraction ``(0, 1)`` is then
|
| 767 |
+
completed.
|
| 768 |
+
|
| 769 |
+
>>> np.random.seed(123)
|
| 770 |
+
>>> a = np.random.rand(2, 2)
|
| 771 |
+
>>> b = np.random.rand(2, 5)
|
| 772 |
+
>>> c = np.random.rand(5, 2)
|
| 773 |
+
>>> path_info = np.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy')
|
| 774 |
+
>>> print(path_info[0])
|
| 775 |
+
['einsum_path', (1, 2), (0, 1)]
|
| 776 |
+
>>> print(path_info[1])
|
| 777 |
+
Complete contraction: ij,jk,kl->il # may vary
|
| 778 |
+
Naive scaling: 4
|
| 779 |
+
Optimized scaling: 3
|
| 780 |
+
Naive FLOP count: 1.600e+02
|
| 781 |
+
Optimized FLOP count: 5.600e+01
|
| 782 |
+
Theoretical speedup: 2.857
|
| 783 |
+
Largest intermediate: 4.000e+00 elements
|
| 784 |
+
-------------------------------------------------------------------------
|
| 785 |
+
scaling current remaining
|
| 786 |
+
-------------------------------------------------------------------------
|
| 787 |
+
3 kl,jk->jl ij,jl->il
|
| 788 |
+
3 jl,ij->il il->il
|
| 789 |
+
|
| 790 |
+
|
| 791 |
+
A more complex index transformation example.
|
| 792 |
+
|
| 793 |
+
>>> I = np.random.rand(10, 10, 10, 10)
|
| 794 |
+
>>> C = np.random.rand(10, 10)
|
| 795 |
+
>>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C,
|
| 796 |
+
... optimize='greedy')
|
| 797 |
+
|
| 798 |
+
>>> print(path_info[0])
|
| 799 |
+
['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)]
|
| 800 |
+
>>> print(path_info[1])
|
| 801 |
+
Complete contraction: ea,fb,abcd,gc,hd->efgh # may vary
|
| 802 |
+
Naive scaling: 8
|
| 803 |
+
Optimized scaling: 5
|
| 804 |
+
Naive FLOP count: 8.000e+08
|
| 805 |
+
Optimized FLOP count: 8.000e+05
|
| 806 |
+
Theoretical speedup: 1000.000
|
| 807 |
+
Largest intermediate: 1.000e+04 elements
|
| 808 |
+
--------------------------------------------------------------------------
|
| 809 |
+
scaling current remaining
|
| 810 |
+
--------------------------------------------------------------------------
|
| 811 |
+
5 abcd,ea->bcde fb,gc,hd,bcde->efgh
|
| 812 |
+
5 bcde,fb->cdef gc,hd,cdef->efgh
|
| 813 |
+
5 cdef,gc->defg hd,defg->efgh
|
| 814 |
+
5 defg,hd->efgh efgh->efgh
|
| 815 |
+
"""
|
| 816 |
+
|
| 817 |
+
# Figure out what the path really is
|
| 818 |
+
path_type = optimize
|
| 819 |
+
if path_type is True:
|
| 820 |
+
path_type = 'greedy'
|
| 821 |
+
if path_type is None:
|
| 822 |
+
path_type = False
|
| 823 |
+
|
| 824 |
+
explicit_einsum_path = False
|
| 825 |
+
memory_limit = None
|
| 826 |
+
|
| 827 |
+
# No optimization or a named path algorithm
|
| 828 |
+
if (path_type is False) or isinstance(path_type, str):
|
| 829 |
+
pass
|
| 830 |
+
|
| 831 |
+
# Given an explicit path
|
| 832 |
+
elif len(path_type) and (path_type[0] == 'einsum_path'):
|
| 833 |
+
explicit_einsum_path = True
|
| 834 |
+
|
| 835 |
+
# Path tuple with memory limit
|
| 836 |
+
elif ((len(path_type) == 2) and isinstance(path_type[0], str) and
|
| 837 |
+
isinstance(path_type[1], (int, float))):
|
| 838 |
+
memory_limit = int(path_type[1])
|
| 839 |
+
path_type = path_type[0]
|
| 840 |
+
|
| 841 |
+
else:
|
| 842 |
+
raise TypeError("Did not understand the path: %s" % str(path_type))
|
| 843 |
+
|
| 844 |
+
# Hidden option, only einsum should call this
|
| 845 |
+
einsum_call_arg = einsum_call
|
| 846 |
+
|
| 847 |
+
# Python side parsing
|
| 848 |
+
input_subscripts, output_subscript, operands = _parse_einsum_input(operands)
|
| 849 |
+
|
| 850 |
+
# Build a few useful list and sets
|
| 851 |
+
input_list = input_subscripts.split(',')
|
| 852 |
+
input_sets = [set(x) for x in input_list]
|
| 853 |
+
output_set = set(output_subscript)
|
| 854 |
+
indices = set(input_subscripts.replace(',', ''))
|
| 855 |
+
|
| 856 |
+
# Get length of each unique dimension and ensure all dimensions are correct
|
| 857 |
+
dimension_dict = {}
|
| 858 |
+
broadcast_indices = [[] for x in range(len(input_list))]
|
| 859 |
+
for tnum, term in enumerate(input_list):
|
| 860 |
+
sh = operands[tnum].shape
|
| 861 |
+
if len(sh) != len(term):
|
| 862 |
+
raise ValueError("Einstein sum subscript %s does not contain the "
|
| 863 |
+
"correct number of indices for operand %d."
|
| 864 |
+
% (input_subscripts[tnum], tnum))
|
| 865 |
+
for cnum, char in enumerate(term):
|
| 866 |
+
dim = sh[cnum]
|
| 867 |
+
|
| 868 |
+
# Build out broadcast indices
|
| 869 |
+
if dim == 1:
|
| 870 |
+
broadcast_indices[tnum].append(char)
|
| 871 |
+
|
| 872 |
+
if char in dimension_dict.keys():
|
| 873 |
+
# For broadcasting cases we always want the largest dim size
|
| 874 |
+
if dimension_dict[char] == 1:
|
| 875 |
+
dimension_dict[char] = dim
|
| 876 |
+
elif dim not in (1, dimension_dict[char]):
|
| 877 |
+
raise ValueError("Size of label '%s' for operand %d (%d) "
|
| 878 |
+
"does not match previous terms (%d)."
|
| 879 |
+
% (char, tnum, dimension_dict[char], dim))
|
| 880 |
+
else:
|
| 881 |
+
dimension_dict[char] = dim
|
| 882 |
+
|
| 883 |
+
# Convert broadcast inds to sets
|
| 884 |
+
broadcast_indices = [set(x) for x in broadcast_indices]
|
| 885 |
+
|
| 886 |
+
# Compute size of each input array plus the output array
|
| 887 |
+
size_list = [_compute_size_by_dict(term, dimension_dict)
|
| 888 |
+
for term in input_list + [output_subscript]]
|
| 889 |
+
max_size = max(size_list)
|
| 890 |
+
|
| 891 |
+
if memory_limit is None:
|
| 892 |
+
memory_arg = max_size
|
| 893 |
+
else:
|
| 894 |
+
memory_arg = memory_limit
|
| 895 |
+
|
| 896 |
+
# Compute naive cost
|
| 897 |
+
# This isn't quite right, need to look into exactly how einsum does this
|
| 898 |
+
inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0
|
| 899 |
+
naive_cost = _flop_count(indices, inner_product, len(input_list), dimension_dict)
|
| 900 |
+
|
| 901 |
+
# Compute the path
|
| 902 |
+
if explicit_einsum_path:
|
| 903 |
+
path = path_type[1:]
|
| 904 |
+
elif (
|
| 905 |
+
(path_type is False)
|
| 906 |
+
or (len(input_list) in [1, 2])
|
| 907 |
+
or (indices == output_set)
|
| 908 |
+
):
|
| 909 |
+
# Nothing to be optimized, leave it to einsum
|
| 910 |
+
path = [tuple(range(len(input_list)))]
|
| 911 |
+
elif path_type == "greedy":
|
| 912 |
+
path = _greedy_path(input_sets, output_set, dimension_dict, memory_arg)
|
| 913 |
+
elif path_type == "optimal":
|
| 914 |
+
path = _optimal_path(input_sets, output_set, dimension_dict, memory_arg)
|
| 915 |
+
else:
|
| 916 |
+
raise KeyError("Path name %s not found", path_type)
|
| 917 |
+
|
| 918 |
+
cost_list, scale_list, size_list, contraction_list = [], [], [], []
|
| 919 |
+
|
| 920 |
+
# Build contraction tuple (positions, gemm, einsum_str, remaining)
|
| 921 |
+
for cnum, contract_inds in enumerate(path):
|
| 922 |
+
# Make sure we remove inds from right to left
|
| 923 |
+
contract_inds = tuple(sorted(list(contract_inds), reverse=True))
|
| 924 |
+
|
| 925 |
+
contract = _find_contraction(contract_inds, input_sets, output_set)
|
| 926 |
+
out_inds, input_sets, idx_removed, idx_contract = contract
|
| 927 |
+
|
| 928 |
+
cost = _flop_count(idx_contract, idx_removed, len(contract_inds), dimension_dict)
|
| 929 |
+
cost_list.append(cost)
|
| 930 |
+
scale_list.append(len(idx_contract))
|
| 931 |
+
size_list.append(_compute_size_by_dict(out_inds, dimension_dict))
|
| 932 |
+
|
| 933 |
+
bcast = set()
|
| 934 |
+
tmp_inputs = []
|
| 935 |
+
for x in contract_inds:
|
| 936 |
+
tmp_inputs.append(input_list.pop(x))
|
| 937 |
+
bcast |= broadcast_indices.pop(x)
|
| 938 |
+
|
| 939 |
+
new_bcast_inds = bcast - idx_removed
|
| 940 |
+
|
| 941 |
+
# If we're broadcasting, nix blas
|
| 942 |
+
if not len(idx_removed & bcast):
|
| 943 |
+
do_blas = _can_dot(tmp_inputs, out_inds, idx_removed)
|
| 944 |
+
else:
|
| 945 |
+
do_blas = False
|
| 946 |
+
|
| 947 |
+
# Last contraction
|
| 948 |
+
if (cnum - len(path)) == -1:
|
| 949 |
+
idx_result = output_subscript
|
| 950 |
+
else:
|
| 951 |
+
sort_result = [(dimension_dict[ind], ind) for ind in out_inds]
|
| 952 |
+
idx_result = "".join([x[1] for x in sorted(sort_result)])
|
| 953 |
+
|
| 954 |
+
input_list.append(idx_result)
|
| 955 |
+
broadcast_indices.append(new_bcast_inds)
|
| 956 |
+
einsum_str = ",".join(tmp_inputs) + "->" + idx_result
|
| 957 |
+
|
| 958 |
+
contraction = (contract_inds, idx_removed, einsum_str, input_list[:], do_blas)
|
| 959 |
+
contraction_list.append(contraction)
|
| 960 |
+
|
| 961 |
+
opt_cost = sum(cost_list) + 1
|
| 962 |
+
|
| 963 |
+
if len(input_list) != 1:
|
| 964 |
+
# Explicit "einsum_path" is usually trusted, but we detect this kind of
|
| 965 |
+
# mistake in order to prevent from returning an intermediate value.
|
| 966 |
+
raise RuntimeError(
|
| 967 |
+
"Invalid einsum_path is specified: {} more operands has to be "
|
| 968 |
+
"contracted.".format(len(input_list) - 1))
|
| 969 |
+
|
| 970 |
+
if einsum_call_arg:
|
| 971 |
+
return (operands, contraction_list)
|
| 972 |
+
|
| 973 |
+
# Return the path along with a nice string representation
|
| 974 |
+
overall_contraction = input_subscripts + "->" + output_subscript
|
| 975 |
+
header = ("scaling", "current", "remaining")
|
| 976 |
+
|
| 977 |
+
speedup = naive_cost / opt_cost
|
| 978 |
+
max_i = max(size_list)
|
| 979 |
+
|
| 980 |
+
path_print = " Complete contraction: %s\n" % overall_contraction
|
| 981 |
+
path_print += " Naive scaling: %d\n" % len(indices)
|
| 982 |
+
path_print += " Optimized scaling: %d\n" % max(scale_list)
|
| 983 |
+
path_print += " Naive FLOP count: %.3e\n" % naive_cost
|
| 984 |
+
path_print += " Optimized FLOP count: %.3e\n" % opt_cost
|
| 985 |
+
path_print += " Theoretical speedup: %3.3f\n" % speedup
|
| 986 |
+
path_print += " Largest intermediate: %.3e elements\n" % max_i
|
| 987 |
+
path_print += "-" * 74 + "\n"
|
| 988 |
+
path_print += "%6s %24s %40s\n" % header
|
| 989 |
+
path_print += "-" * 74
|
| 990 |
+
|
| 991 |
+
for n, contraction in enumerate(contraction_list):
|
| 992 |
+
inds, idx_rm, einsum_str, remaining, blas = contraction
|
| 993 |
+
remaining_str = ",".join(remaining) + "->" + output_subscript
|
| 994 |
+
path_run = (scale_list[n], einsum_str, remaining_str)
|
| 995 |
+
path_print += "\n%4d %24s %40s" % path_run
|
| 996 |
+
|
| 997 |
+
path = ['einsum_path'] + path
|
| 998 |
+
return (path, path_print)
|
| 999 |
+
|
| 1000 |
+
|
| 1001 |
+
def _einsum_dispatcher(*operands, out=None, optimize=None, **kwargs):
|
| 1002 |
+
# Arguably we dispatch on more arguments than we really should; see note in
|
| 1003 |
+
# _einsum_path_dispatcher for why.
|
| 1004 |
+
yield from operands
|
| 1005 |
+
yield out
|
| 1006 |
+
|
| 1007 |
+
|
| 1008 |
+
# Rewrite einsum to handle different cases
|
| 1009 |
+
@array_function_dispatch(_einsum_dispatcher, module='numpy')
|
| 1010 |
+
def einsum(*operands, out=None, optimize=False, **kwargs):
|
| 1011 |
+
"""
|
| 1012 |
+
einsum(subscripts, *operands, out=None, dtype=None, order='K',
|
| 1013 |
+
casting='safe', optimize=False)
|
| 1014 |
+
|
| 1015 |
+
Evaluates the Einstein summation convention on the operands.
|
| 1016 |
+
|
| 1017 |
+
Using the Einstein summation convention, many common multi-dimensional,
|
| 1018 |
+
linear algebraic array operations can be represented in a simple fashion.
|
| 1019 |
+
In *implicit* mode `einsum` computes these values.
|
| 1020 |
+
|
| 1021 |
+
In *explicit* mode, `einsum` provides further flexibility to compute
|
| 1022 |
+
other array operations that might not be considered classical Einstein
|
| 1023 |
+
summation operations, by disabling, or forcing summation over specified
|
| 1024 |
+
subscript labels.
|
| 1025 |
+
|
| 1026 |
+
See the notes and examples for clarification.
|
| 1027 |
+
|
| 1028 |
+
Parameters
|
| 1029 |
+
----------
|
| 1030 |
+
subscripts : str
|
| 1031 |
+
Specifies the subscripts for summation as comma separated list of
|
| 1032 |
+
subscript labels. An implicit (classical Einstein summation)
|
| 1033 |
+
calculation is performed unless the explicit indicator '->' is
|
| 1034 |
+
included as well as subscript labels of the precise output form.
|
| 1035 |
+
operands : list of array_like
|
| 1036 |
+
These are the arrays for the operation.
|
| 1037 |
+
out : ndarray, optional
|
| 1038 |
+
If provided, the calculation is done into this array.
|
| 1039 |
+
dtype : {data-type, None}, optional
|
| 1040 |
+
If provided, forces the calculation to use the data type specified.
|
| 1041 |
+
Note that you may have to also give a more liberal `casting`
|
| 1042 |
+
parameter to allow the conversions. Default is None.
|
| 1043 |
+
order : {'C', 'F', 'A', 'K'}, optional
|
| 1044 |
+
Controls the memory layout of the output. 'C' means it should
|
| 1045 |
+
be C contiguous. 'F' means it should be Fortran contiguous,
|
| 1046 |
+
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
|
| 1047 |
+
'K' means it should be as close to the layout as the inputs as
|
| 1048 |
+
is possible, including arbitrarily permuted axes.
|
| 1049 |
+
Default is 'K'.
|
| 1050 |
+
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
|
| 1051 |
+
Controls what kind of data casting may occur. Setting this to
|
| 1052 |
+
'unsafe' is not recommended, as it can adversely affect accumulations.
|
| 1053 |
+
|
| 1054 |
+
* 'no' means the data types should not be cast at all.
|
| 1055 |
+
* 'equiv' means only byte-order changes are allowed.
|
| 1056 |
+
* 'safe' means only casts which can preserve values are allowed.
|
| 1057 |
+
* 'same_kind' means only safe casts or casts within a kind,
|
| 1058 |
+
like float64 to float32, are allowed.
|
| 1059 |
+
* 'unsafe' means any data conversions may be done.
|
| 1060 |
+
|
| 1061 |
+
Default is 'safe'.
|
| 1062 |
+
optimize : {False, True, 'greedy', 'optimal'}, optional
|
| 1063 |
+
Controls if intermediate optimization should occur. No optimization
|
| 1064 |
+
will occur if False and True will default to the 'greedy' algorithm.
|
| 1065 |
+
Also accepts an explicit contraction list from the ``np.einsum_path``
|
| 1066 |
+
function. See ``np.einsum_path`` for more details. Defaults to False.
|
| 1067 |
+
|
| 1068 |
+
Returns
|
| 1069 |
+
-------
|
| 1070 |
+
output : ndarray
|
| 1071 |
+
The calculation based on the Einstein summation convention.
|
| 1072 |
+
|
| 1073 |
+
See Also
|
| 1074 |
+
--------
|
| 1075 |
+
einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
|
| 1076 |
+
einops :
|
| 1077 |
+
similar verbose interface is provided by
|
| 1078 |
+
`einops <https://github.com/arogozhnikov/einops>`_ package to cover
|
| 1079 |
+
additional operations: transpose, reshape/flatten, repeat/tile,
|
| 1080 |
+
squeeze/unsqueeze and reductions.
|
| 1081 |
+
opt_einsum :
|
| 1082 |
+
`opt_einsum <https://optimized-einsum.readthedocs.io/en/stable/>`_
|
| 1083 |
+
optimizes contraction order for einsum-like expressions
|
| 1084 |
+
in backend-agnostic manner.
|
| 1085 |
+
|
| 1086 |
+
Notes
|
| 1087 |
+
-----
|
| 1088 |
+
.. versionadded:: 1.6.0
|
| 1089 |
+
|
| 1090 |
+
The Einstein summation convention can be used to compute
|
| 1091 |
+
many multi-dimensional, linear algebraic array operations. `einsum`
|
| 1092 |
+
provides a succinct way of representing these.
|
| 1093 |
+
|
| 1094 |
+
A non-exhaustive list of these operations,
|
| 1095 |
+
which can be computed by `einsum`, is shown below along with examples:
|
| 1096 |
+
|
| 1097 |
+
* Trace of an array, :py:func:`numpy.trace`.
|
| 1098 |
+
* Return a diagonal, :py:func:`numpy.diag`.
|
| 1099 |
+
* Array axis summations, :py:func:`numpy.sum`.
|
| 1100 |
+
* Transpositions and permutations, :py:func:`numpy.transpose`.
|
| 1101 |
+
* Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`.
|
| 1102 |
+
* Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`.
|
| 1103 |
+
* Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`.
|
| 1104 |
+
* Tensor contractions, :py:func:`numpy.tensordot`.
|
| 1105 |
+
* Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`.
|
| 1106 |
+
|
| 1107 |
+
The subscripts string is a comma-separated list of subscript labels,
|
| 1108 |
+
where each label refers to a dimension of the corresponding operand.
|
| 1109 |
+
Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
|
| 1110 |
+
is equivalent to :py:func:`np.inner(a,b) <numpy.inner>`. If a label
|
| 1111 |
+
appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
|
| 1112 |
+
view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
|
| 1113 |
+
describes traditional matrix multiplication and is equivalent to
|
| 1114 |
+
:py:func:`np.matmul(a,b) <numpy.matmul>`. Repeated subscript labels in one
|
| 1115 |
+
operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
|
| 1116 |
+
to :py:func:`np.trace(a) <numpy.trace>`.
|
| 1117 |
+
|
| 1118 |
+
In *implicit mode*, the chosen subscripts are important
|
| 1119 |
+
since the axes of the output are reordered alphabetically. This
|
| 1120 |
+
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
|
| 1121 |
+
``np.einsum('ji', a)`` takes its transpose. Additionally,
|
| 1122 |
+
``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
|
| 1123 |
+
``np.einsum('ij,jh', a, b)`` returns the transpose of the
|
| 1124 |
+
multiplication since subscript 'h' precedes subscript 'i'.
|
| 1125 |
+
|
| 1126 |
+
In *explicit mode* the output can be directly controlled by
|
| 1127 |
+
specifying output subscript labels. This requires the
|
| 1128 |
+
identifier '->' as well as the list of output subscript labels.
|
| 1129 |
+
This feature increases the flexibility of the function since
|
| 1130 |
+
summing can be disabled or forced when required. The call
|
| 1131 |
+
``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <numpy.sum>`,
|
| 1132 |
+
and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <numpy.diag>`.
|
| 1133 |
+
The difference is that `einsum` does not allow broadcasting by default.
|
| 1134 |
+
Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
|
| 1135 |
+
order of the output subscript labels and therefore returns matrix
|
| 1136 |
+
multiplication, unlike the example above in implicit mode.
|
| 1137 |
+
|
| 1138 |
+
To enable and control broadcasting, use an ellipsis. Default
|
| 1139 |
+
NumPy-style broadcasting is done by adding an ellipsis
|
| 1140 |
+
to the left of each term, like ``np.einsum('...ii->...i', a)``.
|
| 1141 |
+
To take the trace along the first and last axes,
|
| 1142 |
+
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
|
| 1143 |
+
product with the left-most indices instead of rightmost, one can do
|
| 1144 |
+
``np.einsum('ij...,jk...->ik...', a, b)``.
|
| 1145 |
+
|
| 1146 |
+
When there is only one operand, no axes are summed, and no output
|
| 1147 |
+
parameter is provided, a view into the operand is returned instead
|
| 1148 |
+
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
|
| 1149 |
+
produces a view (changed in version 1.10.0).
|
| 1150 |
+
|
| 1151 |
+
`einsum` also provides an alternative way to provide the subscripts
|
| 1152 |
+
and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``.
|
| 1153 |
+
If the output shape is not provided in this format `einsum` will be
|
| 1154 |
+
calculated in implicit mode, otherwise it will be performed explicitly.
|
| 1155 |
+
The examples below have corresponding `einsum` calls with the two
|
| 1156 |
+
parameter methods.
|
| 1157 |
+
|
| 1158 |
+
.. versionadded:: 1.10.0
|
| 1159 |
+
|
| 1160 |
+
Views returned from einsum are now writeable whenever the input array
|
| 1161 |
+
is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
|
| 1162 |
+
have the same effect as :py:func:`np.swapaxes(a, 0, 2) <numpy.swapaxes>`
|
| 1163 |
+
and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
|
| 1164 |
+
of a 2D array.
|
| 1165 |
+
|
| 1166 |
+
.. versionadded:: 1.12.0
|
| 1167 |
+
|
| 1168 |
+
Added the ``optimize`` argument which will optimize the contraction order
|
| 1169 |
+
of an einsum expression. For a contraction with three or more operands this
|
| 1170 |
+
can greatly increase the computational efficiency at the cost of a larger
|
| 1171 |
+
memory footprint during computation.
|
| 1172 |
+
|
| 1173 |
+
Typically a 'greedy' algorithm is applied which empirical tests have shown
|
| 1174 |
+
returns the optimal path in the majority of cases. In some cases 'optimal'
|
| 1175 |
+
will return the superlative path through a more expensive, exhaustive search.
|
| 1176 |
+
For iterative calculations it may be advisable to calculate the optimal path
|
| 1177 |
+
once and reuse that path by supplying it as an argument. An example is given
|
| 1178 |
+
below.
|
| 1179 |
+
|
| 1180 |
+
See :py:func:`numpy.einsum_path` for more details.
|
| 1181 |
+
|
| 1182 |
+
Examples
|
| 1183 |
+
--------
|
| 1184 |
+
>>> a = np.arange(25).reshape(5,5)
|
| 1185 |
+
>>> b = np.arange(5)
|
| 1186 |
+
>>> c = np.arange(6).reshape(2,3)
|
| 1187 |
+
|
| 1188 |
+
Trace of a matrix:
|
| 1189 |
+
|
| 1190 |
+
>>> np.einsum('ii', a)
|
| 1191 |
+
60
|
| 1192 |
+
>>> np.einsum(a, [0,0])
|
| 1193 |
+
60
|
| 1194 |
+
>>> np.trace(a)
|
| 1195 |
+
60
|
| 1196 |
+
|
| 1197 |
+
Extract the diagonal (requires explicit form):
|
| 1198 |
+
|
| 1199 |
+
>>> np.einsum('ii->i', a)
|
| 1200 |
+
array([ 0, 6, 12, 18, 24])
|
| 1201 |
+
>>> np.einsum(a, [0,0], [0])
|
| 1202 |
+
array([ 0, 6, 12, 18, 24])
|
| 1203 |
+
>>> np.diag(a)
|
| 1204 |
+
array([ 0, 6, 12, 18, 24])
|
| 1205 |
+
|
| 1206 |
+
Sum over an axis (requires explicit form):
|
| 1207 |
+
|
| 1208 |
+
>>> np.einsum('ij->i', a)
|
| 1209 |
+
array([ 10, 35, 60, 85, 110])
|
| 1210 |
+
>>> np.einsum(a, [0,1], [0])
|
| 1211 |
+
array([ 10, 35, 60, 85, 110])
|
| 1212 |
+
>>> np.sum(a, axis=1)
|
| 1213 |
+
array([ 10, 35, 60, 85, 110])
|
| 1214 |
+
|
| 1215 |
+
For higher dimensional arrays summing a single axis can be done with ellipsis:
|
| 1216 |
+
|
| 1217 |
+
>>> np.einsum('...j->...', a)
|
| 1218 |
+
array([ 10, 35, 60, 85, 110])
|
| 1219 |
+
>>> np.einsum(a, [Ellipsis,1], [Ellipsis])
|
| 1220 |
+
array([ 10, 35, 60, 85, 110])
|
| 1221 |
+
|
| 1222 |
+
Compute a matrix transpose, or reorder any number of axes:
|
| 1223 |
+
|
| 1224 |
+
>>> np.einsum('ji', c)
|
| 1225 |
+
array([[0, 3],
|
| 1226 |
+
[1, 4],
|
| 1227 |
+
[2, 5]])
|
| 1228 |
+
>>> np.einsum('ij->ji', c)
|
| 1229 |
+
array([[0, 3],
|
| 1230 |
+
[1, 4],
|
| 1231 |
+
[2, 5]])
|
| 1232 |
+
>>> np.einsum(c, [1,0])
|
| 1233 |
+
array([[0, 3],
|
| 1234 |
+
[1, 4],
|
| 1235 |
+
[2, 5]])
|
| 1236 |
+
>>> np.transpose(c)
|
| 1237 |
+
array([[0, 3],
|
| 1238 |
+
[1, 4],
|
| 1239 |
+
[2, 5]])
|
| 1240 |
+
|
| 1241 |
+
Vector inner products:
|
| 1242 |
+
|
| 1243 |
+
>>> np.einsum('i,i', b, b)
|
| 1244 |
+
30
|
| 1245 |
+
>>> np.einsum(b, [0], b, [0])
|
| 1246 |
+
30
|
| 1247 |
+
>>> np.inner(b,b)
|
| 1248 |
+
30
|
| 1249 |
+
|
| 1250 |
+
Matrix vector multiplication:
|
| 1251 |
+
|
| 1252 |
+
>>> np.einsum('ij,j', a, b)
|
| 1253 |
+
array([ 30, 80, 130, 180, 230])
|
| 1254 |
+
>>> np.einsum(a, [0,1], b, [1])
|
| 1255 |
+
array([ 30, 80, 130, 180, 230])
|
| 1256 |
+
>>> np.dot(a, b)
|
| 1257 |
+
array([ 30, 80, 130, 180, 230])
|
| 1258 |
+
>>> np.einsum('...j,j', a, b)
|
| 1259 |
+
array([ 30, 80, 130, 180, 230])
|
| 1260 |
+
|
| 1261 |
+
Broadcasting and scalar multiplication:
|
| 1262 |
+
|
| 1263 |
+
>>> np.einsum('..., ...', 3, c)
|
| 1264 |
+
array([[ 0, 3, 6],
|
| 1265 |
+
[ 9, 12, 15]])
|
| 1266 |
+
>>> np.einsum(',ij', 3, c)
|
| 1267 |
+
array([[ 0, 3, 6],
|
| 1268 |
+
[ 9, 12, 15]])
|
| 1269 |
+
>>> np.einsum(3, [Ellipsis], c, [Ellipsis])
|
| 1270 |
+
array([[ 0, 3, 6],
|
| 1271 |
+
[ 9, 12, 15]])
|
| 1272 |
+
>>> np.multiply(3, c)
|
| 1273 |
+
array([[ 0, 3, 6],
|
| 1274 |
+
[ 9, 12, 15]])
|
| 1275 |
+
|
| 1276 |
+
Vector outer product:
|
| 1277 |
+
|
| 1278 |
+
>>> np.einsum('i,j', np.arange(2)+1, b)
|
| 1279 |
+
array([[0, 1, 2, 3, 4],
|
| 1280 |
+
[0, 2, 4, 6, 8]])
|
| 1281 |
+
>>> np.einsum(np.arange(2)+1, [0], b, [1])
|
| 1282 |
+
array([[0, 1, 2, 3, 4],
|
| 1283 |
+
[0, 2, 4, 6, 8]])
|
| 1284 |
+
>>> np.outer(np.arange(2)+1, b)
|
| 1285 |
+
array([[0, 1, 2, 3, 4],
|
| 1286 |
+
[0, 2, 4, 6, 8]])
|
| 1287 |
+
|
| 1288 |
+
Tensor contraction:
|
| 1289 |
+
|
| 1290 |
+
>>> a = np.arange(60.).reshape(3,4,5)
|
| 1291 |
+
>>> b = np.arange(24.).reshape(4,3,2)
|
| 1292 |
+
>>> np.einsum('ijk,jil->kl', a, b)
|
| 1293 |
+
array([[4400., 4730.],
|
| 1294 |
+
[4532., 4874.],
|
| 1295 |
+
[4664., 5018.],
|
| 1296 |
+
[4796., 5162.],
|
| 1297 |
+
[4928., 5306.]])
|
| 1298 |
+
>>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
|
| 1299 |
+
array([[4400., 4730.],
|
| 1300 |
+
[4532., 4874.],
|
| 1301 |
+
[4664., 5018.],
|
| 1302 |
+
[4796., 5162.],
|
| 1303 |
+
[4928., 5306.]])
|
| 1304 |
+
>>> np.tensordot(a,b, axes=([1,0],[0,1]))
|
| 1305 |
+
array([[4400., 4730.],
|
| 1306 |
+
[4532., 4874.],
|
| 1307 |
+
[4664., 5018.],
|
| 1308 |
+
[4796., 5162.],
|
| 1309 |
+
[4928., 5306.]])
|
| 1310 |
+
|
| 1311 |
+
Writeable returned arrays (since version 1.10.0):
|
| 1312 |
+
|
| 1313 |
+
>>> a = np.zeros((3, 3))
|
| 1314 |
+
>>> np.einsum('ii->i', a)[:] = 1
|
| 1315 |
+
>>> a
|
| 1316 |
+
array([[1., 0., 0.],
|
| 1317 |
+
[0., 1., 0.],
|
| 1318 |
+
[0., 0., 1.]])
|
| 1319 |
+
|
| 1320 |
+
Example of ellipsis use:
|
| 1321 |
+
|
| 1322 |
+
>>> a = np.arange(6).reshape((3,2))
|
| 1323 |
+
>>> b = np.arange(12).reshape((4,3))
|
| 1324 |
+
>>> np.einsum('ki,jk->ij', a, b)
|
| 1325 |
+
array([[10, 28, 46, 64],
|
| 1326 |
+
[13, 40, 67, 94]])
|
| 1327 |
+
>>> np.einsum('ki,...k->i...', a, b)
|
| 1328 |
+
array([[10, 28, 46, 64],
|
| 1329 |
+
[13, 40, 67, 94]])
|
| 1330 |
+
>>> np.einsum('k...,jk', a, b)
|
| 1331 |
+
array([[10, 28, 46, 64],
|
| 1332 |
+
[13, 40, 67, 94]])
|
| 1333 |
+
|
| 1334 |
+
Chained array operations. For more complicated contractions, speed ups
|
| 1335 |
+
might be achieved by repeatedly computing a 'greedy' path or pre-computing the
|
| 1336 |
+
'optimal' path and repeatedly applying it, using an
|
| 1337 |
+
`einsum_path` insertion (since version 1.12.0). Performance improvements can be
|
| 1338 |
+
particularly significant with larger arrays:
|
| 1339 |
+
|
| 1340 |
+
>>> a = np.ones(64).reshape(2,4,8)
|
| 1341 |
+
|
| 1342 |
+
Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.)
|
| 1343 |
+
|
| 1344 |
+
>>> for iteration in range(500):
|
| 1345 |
+
... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)
|
| 1346 |
+
|
| 1347 |
+
Sub-optimal `einsum` (due to repeated path calculation time): ~330ms
|
| 1348 |
+
|
| 1349 |
+
>>> for iteration in range(500):
|
| 1350 |
+
... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')
|
| 1351 |
+
|
| 1352 |
+
Greedy `einsum` (faster optimal path approximation): ~160ms
|
| 1353 |
+
|
| 1354 |
+
>>> for iteration in range(500):
|
| 1355 |
+
... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy')
|
| 1356 |
+
|
| 1357 |
+
Optimal `einsum` (best usage pattern in some use cases): ~110ms
|
| 1358 |
+
|
| 1359 |
+
>>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')[0]
|
| 1360 |
+
>>> for iteration in range(500):
|
| 1361 |
+
... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path)
|
| 1362 |
+
|
| 1363 |
+
"""
|
| 1364 |
+
# Special handling if out is specified
|
| 1365 |
+
specified_out = out is not None
|
| 1366 |
+
|
| 1367 |
+
# If no optimization, run pure einsum
|
| 1368 |
+
if optimize is False:
|
| 1369 |
+
if specified_out:
|
| 1370 |
+
kwargs['out'] = out
|
| 1371 |
+
return c_einsum(*operands, **kwargs)
|
| 1372 |
+
|
| 1373 |
+
# Check the kwargs to avoid a more cryptic error later, without having to
|
| 1374 |
+
# repeat default values here
|
| 1375 |
+
valid_einsum_kwargs = ['dtype', 'order', 'casting']
|
| 1376 |
+
unknown_kwargs = [k for (k, v) in kwargs.items() if
|
| 1377 |
+
k not in valid_einsum_kwargs]
|
| 1378 |
+
if len(unknown_kwargs):
|
| 1379 |
+
raise TypeError("Did not understand the following kwargs: %s"
|
| 1380 |
+
% unknown_kwargs)
|
| 1381 |
+
|
| 1382 |
+
# Build the contraction list and operand
|
| 1383 |
+
operands, contraction_list = einsum_path(*operands, optimize=optimize,
|
| 1384 |
+
einsum_call=True)
|
| 1385 |
+
|
| 1386 |
+
# Handle order kwarg for output array, c_einsum allows mixed case
|
| 1387 |
+
output_order = kwargs.pop('order', 'K')
|
| 1388 |
+
if output_order.upper() == 'A':
|
| 1389 |
+
if all(arr.flags.f_contiguous for arr in operands):
|
| 1390 |
+
output_order = 'F'
|
| 1391 |
+
else:
|
| 1392 |
+
output_order = 'C'
|
| 1393 |
+
|
| 1394 |
+
# Start contraction loop
|
| 1395 |
+
for num, contraction in enumerate(contraction_list):
|
| 1396 |
+
inds, idx_rm, einsum_str, remaining, blas = contraction
|
| 1397 |
+
tmp_operands = [operands.pop(x) for x in inds]
|
| 1398 |
+
|
| 1399 |
+
# Do we need to deal with the output?
|
| 1400 |
+
handle_out = specified_out and ((num + 1) == len(contraction_list))
|
| 1401 |
+
|
| 1402 |
+
# Call tensordot if still possible
|
| 1403 |
+
if blas:
|
| 1404 |
+
# Checks have already been handled
|
| 1405 |
+
input_str, results_index = einsum_str.split('->')
|
| 1406 |
+
input_left, input_right = input_str.split(',')
|
| 1407 |
+
|
| 1408 |
+
tensor_result = input_left + input_right
|
| 1409 |
+
for s in idx_rm:
|
| 1410 |
+
tensor_result = tensor_result.replace(s, "")
|
| 1411 |
+
|
| 1412 |
+
# Find indices to contract over
|
| 1413 |
+
left_pos, right_pos = [], []
|
| 1414 |
+
for s in sorted(idx_rm):
|
| 1415 |
+
left_pos.append(input_left.find(s))
|
| 1416 |
+
right_pos.append(input_right.find(s))
|
| 1417 |
+
|
| 1418 |
+
# Contract!
|
| 1419 |
+
new_view = tensordot(*tmp_operands, axes=(tuple(left_pos), tuple(right_pos)))
|
| 1420 |
+
|
| 1421 |
+
# Build a new view if needed
|
| 1422 |
+
if (tensor_result != results_index) or handle_out:
|
| 1423 |
+
if handle_out:
|
| 1424 |
+
kwargs["out"] = out
|
| 1425 |
+
new_view = c_einsum(tensor_result + '->' + results_index, new_view, **kwargs)
|
| 1426 |
+
|
| 1427 |
+
# Call einsum
|
| 1428 |
+
else:
|
| 1429 |
+
# If out was specified
|
| 1430 |
+
if handle_out:
|
| 1431 |
+
kwargs["out"] = out
|
| 1432 |
+
|
| 1433 |
+
# Do the contraction
|
| 1434 |
+
new_view = c_einsum(einsum_str, *tmp_operands, **kwargs)
|
| 1435 |
+
|
| 1436 |
+
# Append new items and dereference what we can
|
| 1437 |
+
operands.append(new_view)
|
| 1438 |
+
del tmp_operands, new_view
|
| 1439 |
+
|
| 1440 |
+
if specified_out:
|
| 1441 |
+
return out
|
| 1442 |
+
else:
|
| 1443 |
+
return asanyarray(operands[0], order=output_order)
|
pllava/lib/python3.10/site-packages/numpy/core/einsumfunc.pyi
ADDED
|
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections.abc import Sequence
|
| 2 |
+
from typing import TypeVar, Any, overload, Union, Literal
|
| 3 |
+
|
| 4 |
+
from numpy import (
|
| 5 |
+
ndarray,
|
| 6 |
+
dtype,
|
| 7 |
+
bool_,
|
| 8 |
+
number,
|
| 9 |
+
_OrderKACF,
|
| 10 |
+
)
|
| 11 |
+
from numpy._typing import (
|
| 12 |
+
_ArrayLikeBool_co,
|
| 13 |
+
_ArrayLikeUInt_co,
|
| 14 |
+
_ArrayLikeInt_co,
|
| 15 |
+
_ArrayLikeFloat_co,
|
| 16 |
+
_ArrayLikeComplex_co,
|
| 17 |
+
_ArrayLikeObject_co,
|
| 18 |
+
_DTypeLikeBool,
|
| 19 |
+
_DTypeLikeUInt,
|
| 20 |
+
_DTypeLikeInt,
|
| 21 |
+
_DTypeLikeFloat,
|
| 22 |
+
_DTypeLikeComplex,
|
| 23 |
+
_DTypeLikeComplex_co,
|
| 24 |
+
_DTypeLikeObject,
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
_ArrayType = TypeVar(
|
| 28 |
+
"_ArrayType",
|
| 29 |
+
bound=ndarray[Any, dtype[Union[bool_, number[Any]]]],
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
_OptimizeKind = None | bool | Literal["greedy", "optimal"] | Sequence[Any]
|
| 33 |
+
_CastingSafe = Literal["no", "equiv", "safe", "same_kind"]
|
| 34 |
+
_CastingUnsafe = Literal["unsafe"]
|
| 35 |
+
|
| 36 |
+
__all__: list[str]
|
| 37 |
+
|
| 38 |
+
# TODO: Properly handle the `casting`-based combinatorics
|
| 39 |
+
# TODO: We need to evaluate the content `__subscripts` in order
|
| 40 |
+
# to identify whether or an array or scalar is returned. At a cursory
|
| 41 |
+
# glance this seems like something that can quite easily be done with
|
| 42 |
+
# a mypy plugin.
|
| 43 |
+
# Something like `is_scalar = bool(__subscripts.partition("->")[-1])`
|
| 44 |
+
@overload
|
| 45 |
+
def einsum(
|
| 46 |
+
subscripts: str | _ArrayLikeInt_co,
|
| 47 |
+
/,
|
| 48 |
+
*operands: _ArrayLikeBool_co,
|
| 49 |
+
out: None = ...,
|
| 50 |
+
dtype: None | _DTypeLikeBool = ...,
|
| 51 |
+
order: _OrderKACF = ...,
|
| 52 |
+
casting: _CastingSafe = ...,
|
| 53 |
+
optimize: _OptimizeKind = ...,
|
| 54 |
+
) -> Any: ...
|
| 55 |
+
@overload
|
| 56 |
+
def einsum(
|
| 57 |
+
subscripts: str | _ArrayLikeInt_co,
|
| 58 |
+
/,
|
| 59 |
+
*operands: _ArrayLikeUInt_co,
|
| 60 |
+
out: None = ...,
|
| 61 |
+
dtype: None | _DTypeLikeUInt = ...,
|
| 62 |
+
order: _OrderKACF = ...,
|
| 63 |
+
casting: _CastingSafe = ...,
|
| 64 |
+
optimize: _OptimizeKind = ...,
|
| 65 |
+
) -> Any: ...
|
| 66 |
+
@overload
|
| 67 |
+
def einsum(
|
| 68 |
+
subscripts: str | _ArrayLikeInt_co,
|
| 69 |
+
/,
|
| 70 |
+
*operands: _ArrayLikeInt_co,
|
| 71 |
+
out: None = ...,
|
| 72 |
+
dtype: None | _DTypeLikeInt = ...,
|
| 73 |
+
order: _OrderKACF = ...,
|
| 74 |
+
casting: _CastingSafe = ...,
|
| 75 |
+
optimize: _OptimizeKind = ...,
|
| 76 |
+
) -> Any: ...
|
| 77 |
+
@overload
|
| 78 |
+
def einsum(
|
| 79 |
+
subscripts: str | _ArrayLikeInt_co,
|
| 80 |
+
/,
|
| 81 |
+
*operands: _ArrayLikeFloat_co,
|
| 82 |
+
out: None = ...,
|
| 83 |
+
dtype: None | _DTypeLikeFloat = ...,
|
| 84 |
+
order: _OrderKACF = ...,
|
| 85 |
+
casting: _CastingSafe = ...,
|
| 86 |
+
optimize: _OptimizeKind = ...,
|
| 87 |
+
) -> Any: ...
|
| 88 |
+
@overload
|
| 89 |
+
def einsum(
|
| 90 |
+
subscripts: str | _ArrayLikeInt_co,
|
| 91 |
+
/,
|
| 92 |
+
*operands: _ArrayLikeComplex_co,
|
| 93 |
+
out: None = ...,
|
| 94 |
+
dtype: None | _DTypeLikeComplex = ...,
|
| 95 |
+
order: _OrderKACF = ...,
|
| 96 |
+
casting: _CastingSafe = ...,
|
| 97 |
+
optimize: _OptimizeKind = ...,
|
| 98 |
+
) -> Any: ...
|
| 99 |
+
@overload
|
| 100 |
+
def einsum(
|
| 101 |
+
subscripts: str | _ArrayLikeInt_co,
|
| 102 |
+
/,
|
| 103 |
+
*operands: Any,
|
| 104 |
+
casting: _CastingUnsafe,
|
| 105 |
+
dtype: None | _DTypeLikeComplex_co = ...,
|
| 106 |
+
out: None = ...,
|
| 107 |
+
order: _OrderKACF = ...,
|
| 108 |
+
optimize: _OptimizeKind = ...,
|
| 109 |
+
) -> Any: ...
|
| 110 |
+
@overload
|
| 111 |
+
def einsum(
|
| 112 |
+
subscripts: str | _ArrayLikeInt_co,
|
| 113 |
+
/,
|
| 114 |
+
*operands: _ArrayLikeComplex_co,
|
| 115 |
+
out: _ArrayType,
|
| 116 |
+
dtype: None | _DTypeLikeComplex_co = ...,
|
| 117 |
+
order: _OrderKACF = ...,
|
| 118 |
+
casting: _CastingSafe = ...,
|
| 119 |
+
optimize: _OptimizeKind = ...,
|
| 120 |
+
) -> _ArrayType: ...
|
| 121 |
+
@overload
|
| 122 |
+
def einsum(
|
| 123 |
+
subscripts: str | _ArrayLikeInt_co,
|
| 124 |
+
/,
|
| 125 |
+
*operands: Any,
|
| 126 |
+
out: _ArrayType,
|
| 127 |
+
casting: _CastingUnsafe,
|
| 128 |
+
dtype: None | _DTypeLikeComplex_co = ...,
|
| 129 |
+
order: _OrderKACF = ...,
|
| 130 |
+
optimize: _OptimizeKind = ...,
|
| 131 |
+
) -> _ArrayType: ...
|
| 132 |
+
|
| 133 |
+
@overload
|
| 134 |
+
def einsum(
|
| 135 |
+
subscripts: str | _ArrayLikeInt_co,
|
| 136 |
+
/,
|
| 137 |
+
*operands: _ArrayLikeObject_co,
|
| 138 |
+
out: None = ...,
|
| 139 |
+
dtype: None | _DTypeLikeObject = ...,
|
| 140 |
+
order: _OrderKACF = ...,
|
| 141 |
+
casting: _CastingSafe = ...,
|
| 142 |
+
optimize: _OptimizeKind = ...,
|
| 143 |
+
) -> Any: ...
|
| 144 |
+
@overload
|
| 145 |
+
def einsum(
|
| 146 |
+
subscripts: str | _ArrayLikeInt_co,
|
| 147 |
+
/,
|
| 148 |
+
*operands: Any,
|
| 149 |
+
casting: _CastingUnsafe,
|
| 150 |
+
dtype: None | _DTypeLikeObject = ...,
|
| 151 |
+
out: None = ...,
|
| 152 |
+
order: _OrderKACF = ...,
|
| 153 |
+
optimize: _OptimizeKind = ...,
|
| 154 |
+
) -> Any: ...
|
| 155 |
+
@overload
|
| 156 |
+
def einsum(
|
| 157 |
+
subscripts: str | _ArrayLikeInt_co,
|
| 158 |
+
/,
|
| 159 |
+
*operands: _ArrayLikeObject_co,
|
| 160 |
+
out: _ArrayType,
|
| 161 |
+
dtype: None | _DTypeLikeObject = ...,
|
| 162 |
+
order: _OrderKACF = ...,
|
| 163 |
+
casting: _CastingSafe = ...,
|
| 164 |
+
optimize: _OptimizeKind = ...,
|
| 165 |
+
) -> _ArrayType: ...
|
| 166 |
+
@overload
|
| 167 |
+
def einsum(
|
| 168 |
+
subscripts: str | _ArrayLikeInt_co,
|
| 169 |
+
/,
|
| 170 |
+
*operands: Any,
|
| 171 |
+
out: _ArrayType,
|
| 172 |
+
casting: _CastingUnsafe,
|
| 173 |
+
dtype: None | _DTypeLikeObject = ...,
|
| 174 |
+
order: _OrderKACF = ...,
|
| 175 |
+
optimize: _OptimizeKind = ...,
|
| 176 |
+
) -> _ArrayType: ...
|
| 177 |
+
|
| 178 |
+
# NOTE: `einsum_call` is a hidden kwarg unavailable for public use.
|
| 179 |
+
# It is therefore excluded from the signatures below.
|
| 180 |
+
# NOTE: In practice the list consists of a `str` (first element)
|
| 181 |
+
# and a variable number of integer tuples.
|
| 182 |
+
def einsum_path(
|
| 183 |
+
subscripts: str | _ArrayLikeInt_co,
|
| 184 |
+
/,
|
| 185 |
+
*operands: _ArrayLikeComplex_co | _DTypeLikeObject,
|
| 186 |
+
optimize: _OptimizeKind = ...,
|
| 187 |
+
) -> tuple[list[Any], str]: ...
|
pllava/lib/python3.10/site-packages/numpy/core/fromnumeric.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
pllava/lib/python3.10/site-packages/numpy/core/fromnumeric.pyi
ADDED
|
@@ -0,0 +1,1060 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import datetime as dt
|
| 2 |
+
from collections.abc import Sequence
|
| 3 |
+
from typing import Union, Any, overload, TypeVar, Literal, SupportsIndex
|
| 4 |
+
|
| 5 |
+
from numpy import (
|
| 6 |
+
ndarray,
|
| 7 |
+
number,
|
| 8 |
+
uint64,
|
| 9 |
+
int_,
|
| 10 |
+
int64,
|
| 11 |
+
intp,
|
| 12 |
+
float16,
|
| 13 |
+
bool_,
|
| 14 |
+
floating,
|
| 15 |
+
complexfloating,
|
| 16 |
+
object_,
|
| 17 |
+
generic,
|
| 18 |
+
_OrderKACF,
|
| 19 |
+
_OrderACF,
|
| 20 |
+
_ModeKind,
|
| 21 |
+
_PartitionKind,
|
| 22 |
+
_SortKind,
|
| 23 |
+
_SortSide,
|
| 24 |
+
_CastingKind,
|
| 25 |
+
)
|
| 26 |
+
from numpy._typing import (
|
| 27 |
+
DTypeLike,
|
| 28 |
+
_DTypeLike,
|
| 29 |
+
ArrayLike,
|
| 30 |
+
_ArrayLike,
|
| 31 |
+
NDArray,
|
| 32 |
+
_ShapeLike,
|
| 33 |
+
_Shape,
|
| 34 |
+
_ArrayLikeBool_co,
|
| 35 |
+
_ArrayLikeUInt_co,
|
| 36 |
+
_ArrayLikeInt_co,
|
| 37 |
+
_ArrayLikeFloat_co,
|
| 38 |
+
_ArrayLikeComplex_co,
|
| 39 |
+
_ArrayLikeObject_co,
|
| 40 |
+
_IntLike_co,
|
| 41 |
+
_BoolLike_co,
|
| 42 |
+
_ComplexLike_co,
|
| 43 |
+
_NumberLike_co,
|
| 44 |
+
_ScalarLike_co,
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
_SCT = TypeVar("_SCT", bound=generic)
|
| 48 |
+
_SCT_uifcO = TypeVar("_SCT_uifcO", bound=number[Any] | object_)
|
| 49 |
+
_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
|
| 50 |
+
|
| 51 |
+
__all__: list[str]
|
| 52 |
+
|
| 53 |
+
@overload
|
| 54 |
+
def take(
|
| 55 |
+
a: _ArrayLike[_SCT],
|
| 56 |
+
indices: _IntLike_co,
|
| 57 |
+
axis: None = ...,
|
| 58 |
+
out: None = ...,
|
| 59 |
+
mode: _ModeKind = ...,
|
| 60 |
+
) -> _SCT: ...
|
| 61 |
+
@overload
|
| 62 |
+
def take(
|
| 63 |
+
a: ArrayLike,
|
| 64 |
+
indices: _IntLike_co,
|
| 65 |
+
axis: None | SupportsIndex = ...,
|
| 66 |
+
out: None = ...,
|
| 67 |
+
mode: _ModeKind = ...,
|
| 68 |
+
) -> Any: ...
|
| 69 |
+
@overload
|
| 70 |
+
def take(
|
| 71 |
+
a: _ArrayLike[_SCT],
|
| 72 |
+
indices: _ArrayLikeInt_co,
|
| 73 |
+
axis: None | SupportsIndex = ...,
|
| 74 |
+
out: None = ...,
|
| 75 |
+
mode: _ModeKind = ...,
|
| 76 |
+
) -> NDArray[_SCT]: ...
|
| 77 |
+
@overload
|
| 78 |
+
def take(
|
| 79 |
+
a: ArrayLike,
|
| 80 |
+
indices: _ArrayLikeInt_co,
|
| 81 |
+
axis: None | SupportsIndex = ...,
|
| 82 |
+
out: None = ...,
|
| 83 |
+
mode: _ModeKind = ...,
|
| 84 |
+
) -> NDArray[Any]: ...
|
| 85 |
+
@overload
|
| 86 |
+
def take(
|
| 87 |
+
a: ArrayLike,
|
| 88 |
+
indices: _ArrayLikeInt_co,
|
| 89 |
+
axis: None | SupportsIndex = ...,
|
| 90 |
+
out: _ArrayType = ...,
|
| 91 |
+
mode: _ModeKind = ...,
|
| 92 |
+
) -> _ArrayType: ...
|
| 93 |
+
|
| 94 |
+
@overload
|
| 95 |
+
def reshape(
|
| 96 |
+
a: _ArrayLike[_SCT],
|
| 97 |
+
newshape: _ShapeLike,
|
| 98 |
+
order: _OrderACF = ...,
|
| 99 |
+
) -> NDArray[_SCT]: ...
|
| 100 |
+
@overload
|
| 101 |
+
def reshape(
|
| 102 |
+
a: ArrayLike,
|
| 103 |
+
newshape: _ShapeLike,
|
| 104 |
+
order: _OrderACF = ...,
|
| 105 |
+
) -> NDArray[Any]: ...
|
| 106 |
+
|
| 107 |
+
@overload
|
| 108 |
+
def choose(
|
| 109 |
+
a: _IntLike_co,
|
| 110 |
+
choices: ArrayLike,
|
| 111 |
+
out: None = ...,
|
| 112 |
+
mode: _ModeKind = ...,
|
| 113 |
+
) -> Any: ...
|
| 114 |
+
@overload
|
| 115 |
+
def choose(
|
| 116 |
+
a: _ArrayLikeInt_co,
|
| 117 |
+
choices: _ArrayLike[_SCT],
|
| 118 |
+
out: None = ...,
|
| 119 |
+
mode: _ModeKind = ...,
|
| 120 |
+
) -> NDArray[_SCT]: ...
|
| 121 |
+
@overload
|
| 122 |
+
def choose(
|
| 123 |
+
a: _ArrayLikeInt_co,
|
| 124 |
+
choices: ArrayLike,
|
| 125 |
+
out: None = ...,
|
| 126 |
+
mode: _ModeKind = ...,
|
| 127 |
+
) -> NDArray[Any]: ...
|
| 128 |
+
@overload
|
| 129 |
+
def choose(
|
| 130 |
+
a: _ArrayLikeInt_co,
|
| 131 |
+
choices: ArrayLike,
|
| 132 |
+
out: _ArrayType = ...,
|
| 133 |
+
mode: _ModeKind = ...,
|
| 134 |
+
) -> _ArrayType: ...
|
| 135 |
+
|
| 136 |
+
@overload
|
| 137 |
+
def repeat(
|
| 138 |
+
a: _ArrayLike[_SCT],
|
| 139 |
+
repeats: _ArrayLikeInt_co,
|
| 140 |
+
axis: None | SupportsIndex = ...,
|
| 141 |
+
) -> NDArray[_SCT]: ...
|
| 142 |
+
@overload
|
| 143 |
+
def repeat(
|
| 144 |
+
a: ArrayLike,
|
| 145 |
+
repeats: _ArrayLikeInt_co,
|
| 146 |
+
axis: None | SupportsIndex = ...,
|
| 147 |
+
) -> NDArray[Any]: ...
|
| 148 |
+
|
| 149 |
+
def put(
|
| 150 |
+
a: NDArray[Any],
|
| 151 |
+
ind: _ArrayLikeInt_co,
|
| 152 |
+
v: ArrayLike,
|
| 153 |
+
mode: _ModeKind = ...,
|
| 154 |
+
) -> None: ...
|
| 155 |
+
|
| 156 |
+
@overload
|
| 157 |
+
def swapaxes(
|
| 158 |
+
a: _ArrayLike[_SCT],
|
| 159 |
+
axis1: SupportsIndex,
|
| 160 |
+
axis2: SupportsIndex,
|
| 161 |
+
) -> NDArray[_SCT]: ...
|
| 162 |
+
@overload
|
| 163 |
+
def swapaxes(
|
| 164 |
+
a: ArrayLike,
|
| 165 |
+
axis1: SupportsIndex,
|
| 166 |
+
axis2: SupportsIndex,
|
| 167 |
+
) -> NDArray[Any]: ...
|
| 168 |
+
|
| 169 |
+
@overload
|
| 170 |
+
def transpose(
|
| 171 |
+
a: _ArrayLike[_SCT],
|
| 172 |
+
axes: None | _ShapeLike = ...
|
| 173 |
+
) -> NDArray[_SCT]: ...
|
| 174 |
+
@overload
|
| 175 |
+
def transpose(
|
| 176 |
+
a: ArrayLike,
|
| 177 |
+
axes: None | _ShapeLike = ...
|
| 178 |
+
) -> NDArray[Any]: ...
|
| 179 |
+
|
| 180 |
+
@overload
|
| 181 |
+
def partition(
|
| 182 |
+
a: _ArrayLike[_SCT],
|
| 183 |
+
kth: _ArrayLikeInt_co,
|
| 184 |
+
axis: None | SupportsIndex = ...,
|
| 185 |
+
kind: _PartitionKind = ...,
|
| 186 |
+
order: None | str | Sequence[str] = ...,
|
| 187 |
+
) -> NDArray[_SCT]: ...
|
| 188 |
+
@overload
|
| 189 |
+
def partition(
|
| 190 |
+
a: ArrayLike,
|
| 191 |
+
kth: _ArrayLikeInt_co,
|
| 192 |
+
axis: None | SupportsIndex = ...,
|
| 193 |
+
kind: _PartitionKind = ...,
|
| 194 |
+
order: None | str | Sequence[str] = ...,
|
| 195 |
+
) -> NDArray[Any]: ...
|
| 196 |
+
|
| 197 |
+
def argpartition(
|
| 198 |
+
a: ArrayLike,
|
| 199 |
+
kth: _ArrayLikeInt_co,
|
| 200 |
+
axis: None | SupportsIndex = ...,
|
| 201 |
+
kind: _PartitionKind = ...,
|
| 202 |
+
order: None | str | Sequence[str] = ...,
|
| 203 |
+
) -> NDArray[intp]: ...
|
| 204 |
+
|
| 205 |
+
@overload
|
| 206 |
+
def sort(
|
| 207 |
+
a: _ArrayLike[_SCT],
|
| 208 |
+
axis: None | SupportsIndex = ...,
|
| 209 |
+
kind: None | _SortKind = ...,
|
| 210 |
+
order: None | str | Sequence[str] = ...,
|
| 211 |
+
) -> NDArray[_SCT]: ...
|
| 212 |
+
@overload
|
| 213 |
+
def sort(
|
| 214 |
+
a: ArrayLike,
|
| 215 |
+
axis: None | SupportsIndex = ...,
|
| 216 |
+
kind: None | _SortKind = ...,
|
| 217 |
+
order: None | str | Sequence[str] = ...,
|
| 218 |
+
) -> NDArray[Any]: ...
|
| 219 |
+
|
| 220 |
+
def argsort(
|
| 221 |
+
a: ArrayLike,
|
| 222 |
+
axis: None | SupportsIndex = ...,
|
| 223 |
+
kind: None | _SortKind = ...,
|
| 224 |
+
order: None | str | Sequence[str] = ...,
|
| 225 |
+
) -> NDArray[intp]: ...
|
| 226 |
+
|
| 227 |
+
@overload
|
| 228 |
+
def argmax(
|
| 229 |
+
a: ArrayLike,
|
| 230 |
+
axis: None = ...,
|
| 231 |
+
out: None = ...,
|
| 232 |
+
*,
|
| 233 |
+
keepdims: Literal[False] = ...,
|
| 234 |
+
) -> intp: ...
|
| 235 |
+
@overload
|
| 236 |
+
def argmax(
|
| 237 |
+
a: ArrayLike,
|
| 238 |
+
axis: None | SupportsIndex = ...,
|
| 239 |
+
out: None = ...,
|
| 240 |
+
*,
|
| 241 |
+
keepdims: bool = ...,
|
| 242 |
+
) -> Any: ...
|
| 243 |
+
@overload
|
| 244 |
+
def argmax(
|
| 245 |
+
a: ArrayLike,
|
| 246 |
+
axis: None | SupportsIndex = ...,
|
| 247 |
+
out: _ArrayType = ...,
|
| 248 |
+
*,
|
| 249 |
+
keepdims: bool = ...,
|
| 250 |
+
) -> _ArrayType: ...
|
| 251 |
+
|
| 252 |
+
@overload
|
| 253 |
+
def argmin(
|
| 254 |
+
a: ArrayLike,
|
| 255 |
+
axis: None = ...,
|
| 256 |
+
out: None = ...,
|
| 257 |
+
*,
|
| 258 |
+
keepdims: Literal[False] = ...,
|
| 259 |
+
) -> intp: ...
|
| 260 |
+
@overload
|
| 261 |
+
def argmin(
|
| 262 |
+
a: ArrayLike,
|
| 263 |
+
axis: None | SupportsIndex = ...,
|
| 264 |
+
out: None = ...,
|
| 265 |
+
*,
|
| 266 |
+
keepdims: bool = ...,
|
| 267 |
+
) -> Any: ...
|
| 268 |
+
@overload
|
| 269 |
+
def argmin(
|
| 270 |
+
a: ArrayLike,
|
| 271 |
+
axis: None | SupportsIndex = ...,
|
| 272 |
+
out: _ArrayType = ...,
|
| 273 |
+
*,
|
| 274 |
+
keepdims: bool = ...,
|
| 275 |
+
) -> _ArrayType: ...
|
| 276 |
+
|
| 277 |
+
@overload
|
| 278 |
+
def searchsorted(
|
| 279 |
+
a: ArrayLike,
|
| 280 |
+
v: _ScalarLike_co,
|
| 281 |
+
side: _SortSide = ...,
|
| 282 |
+
sorter: None | _ArrayLikeInt_co = ..., # 1D int array
|
| 283 |
+
) -> intp: ...
|
| 284 |
+
@overload
|
| 285 |
+
def searchsorted(
|
| 286 |
+
a: ArrayLike,
|
| 287 |
+
v: ArrayLike,
|
| 288 |
+
side: _SortSide = ...,
|
| 289 |
+
sorter: None | _ArrayLikeInt_co = ..., # 1D int array
|
| 290 |
+
) -> NDArray[intp]: ...
|
| 291 |
+
|
| 292 |
+
@overload
|
| 293 |
+
def resize(
|
| 294 |
+
a: _ArrayLike[_SCT],
|
| 295 |
+
new_shape: _ShapeLike,
|
| 296 |
+
) -> NDArray[_SCT]: ...
|
| 297 |
+
@overload
|
| 298 |
+
def resize(
|
| 299 |
+
a: ArrayLike,
|
| 300 |
+
new_shape: _ShapeLike,
|
| 301 |
+
) -> NDArray[Any]: ...
|
| 302 |
+
|
| 303 |
+
@overload
|
| 304 |
+
def squeeze(
|
| 305 |
+
a: _SCT,
|
| 306 |
+
axis: None | _ShapeLike = ...,
|
| 307 |
+
) -> _SCT: ...
|
| 308 |
+
@overload
|
| 309 |
+
def squeeze(
|
| 310 |
+
a: _ArrayLike[_SCT],
|
| 311 |
+
axis: None | _ShapeLike = ...,
|
| 312 |
+
) -> NDArray[_SCT]: ...
|
| 313 |
+
@overload
|
| 314 |
+
def squeeze(
|
| 315 |
+
a: ArrayLike,
|
| 316 |
+
axis: None | _ShapeLike = ...,
|
| 317 |
+
) -> NDArray[Any]: ...
|
| 318 |
+
|
| 319 |
+
@overload
|
| 320 |
+
def diagonal(
|
| 321 |
+
a: _ArrayLike[_SCT],
|
| 322 |
+
offset: SupportsIndex = ...,
|
| 323 |
+
axis1: SupportsIndex = ...,
|
| 324 |
+
axis2: SupportsIndex = ..., # >= 2D array
|
| 325 |
+
) -> NDArray[_SCT]: ...
|
| 326 |
+
@overload
|
| 327 |
+
def diagonal(
|
| 328 |
+
a: ArrayLike,
|
| 329 |
+
offset: SupportsIndex = ...,
|
| 330 |
+
axis1: SupportsIndex = ...,
|
| 331 |
+
axis2: SupportsIndex = ..., # >= 2D array
|
| 332 |
+
) -> NDArray[Any]: ...
|
| 333 |
+
|
| 334 |
+
@overload
|
| 335 |
+
def trace(
|
| 336 |
+
a: ArrayLike, # >= 2D array
|
| 337 |
+
offset: SupportsIndex = ...,
|
| 338 |
+
axis1: SupportsIndex = ...,
|
| 339 |
+
axis2: SupportsIndex = ...,
|
| 340 |
+
dtype: DTypeLike = ...,
|
| 341 |
+
out: None = ...,
|
| 342 |
+
) -> Any: ...
|
| 343 |
+
@overload
|
| 344 |
+
def trace(
|
| 345 |
+
a: ArrayLike, # >= 2D array
|
| 346 |
+
offset: SupportsIndex = ...,
|
| 347 |
+
axis1: SupportsIndex = ...,
|
| 348 |
+
axis2: SupportsIndex = ...,
|
| 349 |
+
dtype: DTypeLike = ...,
|
| 350 |
+
out: _ArrayType = ...,
|
| 351 |
+
) -> _ArrayType: ...
|
| 352 |
+
|
| 353 |
+
@overload
|
| 354 |
+
def ravel(a: _ArrayLike[_SCT], order: _OrderKACF = ...) -> NDArray[_SCT]: ...
|
| 355 |
+
@overload
|
| 356 |
+
def ravel(a: ArrayLike, order: _OrderKACF = ...) -> NDArray[Any]: ...
|
| 357 |
+
|
| 358 |
+
def nonzero(a: ArrayLike) -> tuple[NDArray[intp], ...]: ...
|
| 359 |
+
|
| 360 |
+
def shape(a: ArrayLike) -> _Shape: ...
|
| 361 |
+
|
| 362 |
+
@overload
|
| 363 |
+
def compress(
|
| 364 |
+
condition: _ArrayLikeBool_co, # 1D bool array
|
| 365 |
+
a: _ArrayLike[_SCT],
|
| 366 |
+
axis: None | SupportsIndex = ...,
|
| 367 |
+
out: None = ...,
|
| 368 |
+
) -> NDArray[_SCT]: ...
|
| 369 |
+
@overload
|
| 370 |
+
def compress(
|
| 371 |
+
condition: _ArrayLikeBool_co, # 1D bool array
|
| 372 |
+
a: ArrayLike,
|
| 373 |
+
axis: None | SupportsIndex = ...,
|
| 374 |
+
out: None = ...,
|
| 375 |
+
) -> NDArray[Any]: ...
|
| 376 |
+
@overload
|
| 377 |
+
def compress(
|
| 378 |
+
condition: _ArrayLikeBool_co, # 1D bool array
|
| 379 |
+
a: ArrayLike,
|
| 380 |
+
axis: None | SupportsIndex = ...,
|
| 381 |
+
out: _ArrayType = ...,
|
| 382 |
+
) -> _ArrayType: ...
|
| 383 |
+
|
| 384 |
+
@overload
|
| 385 |
+
def clip(
|
| 386 |
+
a: _SCT,
|
| 387 |
+
a_min: None | ArrayLike,
|
| 388 |
+
a_max: None | ArrayLike,
|
| 389 |
+
out: None = ...,
|
| 390 |
+
*,
|
| 391 |
+
dtype: None = ...,
|
| 392 |
+
where: None | _ArrayLikeBool_co = ...,
|
| 393 |
+
order: _OrderKACF = ...,
|
| 394 |
+
subok: bool = ...,
|
| 395 |
+
signature: str | tuple[None | str, ...] = ...,
|
| 396 |
+
extobj: list[Any] = ...,
|
| 397 |
+
casting: _CastingKind = ...,
|
| 398 |
+
) -> _SCT: ...
|
| 399 |
+
@overload
|
| 400 |
+
def clip(
|
| 401 |
+
a: _ScalarLike_co,
|
| 402 |
+
a_min: None | ArrayLike,
|
| 403 |
+
a_max: None | ArrayLike,
|
| 404 |
+
out: None = ...,
|
| 405 |
+
*,
|
| 406 |
+
dtype: None = ...,
|
| 407 |
+
where: None | _ArrayLikeBool_co = ...,
|
| 408 |
+
order: _OrderKACF = ...,
|
| 409 |
+
subok: bool = ...,
|
| 410 |
+
signature: str | tuple[None | str, ...] = ...,
|
| 411 |
+
extobj: list[Any] = ...,
|
| 412 |
+
casting: _CastingKind = ...,
|
| 413 |
+
) -> Any: ...
|
| 414 |
+
@overload
|
| 415 |
+
def clip(
|
| 416 |
+
a: _ArrayLike[_SCT],
|
| 417 |
+
a_min: None | ArrayLike,
|
| 418 |
+
a_max: None | ArrayLike,
|
| 419 |
+
out: None = ...,
|
| 420 |
+
*,
|
| 421 |
+
dtype: None = ...,
|
| 422 |
+
where: None | _ArrayLikeBool_co = ...,
|
| 423 |
+
order: _OrderKACF = ...,
|
| 424 |
+
subok: bool = ...,
|
| 425 |
+
signature: str | tuple[None | str, ...] = ...,
|
| 426 |
+
extobj: list[Any] = ...,
|
| 427 |
+
casting: _CastingKind = ...,
|
| 428 |
+
) -> NDArray[_SCT]: ...
|
| 429 |
+
@overload
|
| 430 |
+
def clip(
|
| 431 |
+
a: ArrayLike,
|
| 432 |
+
a_min: None | ArrayLike,
|
| 433 |
+
a_max: None | ArrayLike,
|
| 434 |
+
out: None = ...,
|
| 435 |
+
*,
|
| 436 |
+
dtype: None = ...,
|
| 437 |
+
where: None | _ArrayLikeBool_co = ...,
|
| 438 |
+
order: _OrderKACF = ...,
|
| 439 |
+
subok: bool = ...,
|
| 440 |
+
signature: str | tuple[None | str, ...] = ...,
|
| 441 |
+
extobj: list[Any] = ...,
|
| 442 |
+
casting: _CastingKind = ...,
|
| 443 |
+
) -> NDArray[Any]: ...
|
| 444 |
+
@overload
|
| 445 |
+
def clip(
|
| 446 |
+
a: ArrayLike,
|
| 447 |
+
a_min: None | ArrayLike,
|
| 448 |
+
a_max: None | ArrayLike,
|
| 449 |
+
out: _ArrayType = ...,
|
| 450 |
+
*,
|
| 451 |
+
dtype: DTypeLike,
|
| 452 |
+
where: None | _ArrayLikeBool_co = ...,
|
| 453 |
+
order: _OrderKACF = ...,
|
| 454 |
+
subok: bool = ...,
|
| 455 |
+
signature: str | tuple[None | str, ...] = ...,
|
| 456 |
+
extobj: list[Any] = ...,
|
| 457 |
+
casting: _CastingKind = ...,
|
| 458 |
+
) -> Any: ...
|
| 459 |
+
@overload
|
| 460 |
+
def clip(
|
| 461 |
+
a: ArrayLike,
|
| 462 |
+
a_min: None | ArrayLike,
|
| 463 |
+
a_max: None | ArrayLike,
|
| 464 |
+
out: _ArrayType,
|
| 465 |
+
*,
|
| 466 |
+
dtype: DTypeLike = ...,
|
| 467 |
+
where: None | _ArrayLikeBool_co = ...,
|
| 468 |
+
order: _OrderKACF = ...,
|
| 469 |
+
subok: bool = ...,
|
| 470 |
+
signature: str | tuple[None | str, ...] = ...,
|
| 471 |
+
extobj: list[Any] = ...,
|
| 472 |
+
casting: _CastingKind = ...,
|
| 473 |
+
) -> _ArrayType: ...
|
| 474 |
+
|
| 475 |
+
@overload
|
| 476 |
+
def sum(
|
| 477 |
+
a: _ArrayLike[_SCT],
|
| 478 |
+
axis: None = ...,
|
| 479 |
+
dtype: None = ...,
|
| 480 |
+
out: None = ...,
|
| 481 |
+
keepdims: bool = ...,
|
| 482 |
+
initial: _NumberLike_co = ...,
|
| 483 |
+
where: _ArrayLikeBool_co = ...,
|
| 484 |
+
) -> _SCT: ...
|
| 485 |
+
@overload
|
| 486 |
+
def sum(
|
| 487 |
+
a: ArrayLike,
|
| 488 |
+
axis: None | _ShapeLike = ...,
|
| 489 |
+
dtype: DTypeLike = ...,
|
| 490 |
+
out: None = ...,
|
| 491 |
+
keepdims: bool = ...,
|
| 492 |
+
initial: _NumberLike_co = ...,
|
| 493 |
+
where: _ArrayLikeBool_co = ...,
|
| 494 |
+
) -> Any: ...
|
| 495 |
+
@overload
|
| 496 |
+
def sum(
|
| 497 |
+
a: ArrayLike,
|
| 498 |
+
axis: None | _ShapeLike = ...,
|
| 499 |
+
dtype: DTypeLike = ...,
|
| 500 |
+
out: _ArrayType = ...,
|
| 501 |
+
keepdims: bool = ...,
|
| 502 |
+
initial: _NumberLike_co = ...,
|
| 503 |
+
where: _ArrayLikeBool_co = ...,
|
| 504 |
+
) -> _ArrayType: ...
|
| 505 |
+
|
| 506 |
+
@overload
|
| 507 |
+
def all(
|
| 508 |
+
a: ArrayLike,
|
| 509 |
+
axis: None = ...,
|
| 510 |
+
out: None = ...,
|
| 511 |
+
keepdims: Literal[False] = ...,
|
| 512 |
+
*,
|
| 513 |
+
where: _ArrayLikeBool_co = ...,
|
| 514 |
+
) -> bool_: ...
|
| 515 |
+
@overload
|
| 516 |
+
def all(
|
| 517 |
+
a: ArrayLike,
|
| 518 |
+
axis: None | _ShapeLike = ...,
|
| 519 |
+
out: None = ...,
|
| 520 |
+
keepdims: bool = ...,
|
| 521 |
+
*,
|
| 522 |
+
where: _ArrayLikeBool_co = ...,
|
| 523 |
+
) -> Any: ...
|
| 524 |
+
@overload
|
| 525 |
+
def all(
|
| 526 |
+
a: ArrayLike,
|
| 527 |
+
axis: None | _ShapeLike = ...,
|
| 528 |
+
out: _ArrayType = ...,
|
| 529 |
+
keepdims: bool = ...,
|
| 530 |
+
*,
|
| 531 |
+
where: _ArrayLikeBool_co = ...,
|
| 532 |
+
) -> _ArrayType: ...
|
| 533 |
+
|
| 534 |
+
@overload
|
| 535 |
+
def any(
|
| 536 |
+
a: ArrayLike,
|
| 537 |
+
axis: None = ...,
|
| 538 |
+
out: None = ...,
|
| 539 |
+
keepdims: Literal[False] = ...,
|
| 540 |
+
*,
|
| 541 |
+
where: _ArrayLikeBool_co = ...,
|
| 542 |
+
) -> bool_: ...
|
| 543 |
+
@overload
|
| 544 |
+
def any(
|
| 545 |
+
a: ArrayLike,
|
| 546 |
+
axis: None | _ShapeLike = ...,
|
| 547 |
+
out: None = ...,
|
| 548 |
+
keepdims: bool = ...,
|
| 549 |
+
*,
|
| 550 |
+
where: _ArrayLikeBool_co = ...,
|
| 551 |
+
) -> Any: ...
|
| 552 |
+
@overload
|
| 553 |
+
def any(
|
| 554 |
+
a: ArrayLike,
|
| 555 |
+
axis: None | _ShapeLike = ...,
|
| 556 |
+
out: _ArrayType = ...,
|
| 557 |
+
keepdims: bool = ...,
|
| 558 |
+
*,
|
| 559 |
+
where: _ArrayLikeBool_co = ...,
|
| 560 |
+
) -> _ArrayType: ...
|
| 561 |
+
|
| 562 |
+
@overload
|
| 563 |
+
def cumsum(
|
| 564 |
+
a: _ArrayLike[_SCT],
|
| 565 |
+
axis: None | SupportsIndex = ...,
|
| 566 |
+
dtype: None = ...,
|
| 567 |
+
out: None = ...,
|
| 568 |
+
) -> NDArray[_SCT]: ...
|
| 569 |
+
@overload
|
| 570 |
+
def cumsum(
|
| 571 |
+
a: ArrayLike,
|
| 572 |
+
axis: None | SupportsIndex = ...,
|
| 573 |
+
dtype: None = ...,
|
| 574 |
+
out: None = ...,
|
| 575 |
+
) -> NDArray[Any]: ...
|
| 576 |
+
@overload
|
| 577 |
+
def cumsum(
|
| 578 |
+
a: ArrayLike,
|
| 579 |
+
axis: None | SupportsIndex = ...,
|
| 580 |
+
dtype: _DTypeLike[_SCT] = ...,
|
| 581 |
+
out: None = ...,
|
| 582 |
+
) -> NDArray[_SCT]: ...
|
| 583 |
+
@overload
|
| 584 |
+
def cumsum(
|
| 585 |
+
a: ArrayLike,
|
| 586 |
+
axis: None | SupportsIndex = ...,
|
| 587 |
+
dtype: DTypeLike = ...,
|
| 588 |
+
out: None = ...,
|
| 589 |
+
) -> NDArray[Any]: ...
|
| 590 |
+
@overload
|
| 591 |
+
def cumsum(
|
| 592 |
+
a: ArrayLike,
|
| 593 |
+
axis: None | SupportsIndex = ...,
|
| 594 |
+
dtype: DTypeLike = ...,
|
| 595 |
+
out: _ArrayType = ...,
|
| 596 |
+
) -> _ArrayType: ...
|
| 597 |
+
|
| 598 |
+
@overload
|
| 599 |
+
def ptp(
|
| 600 |
+
a: _ArrayLike[_SCT],
|
| 601 |
+
axis: None = ...,
|
| 602 |
+
out: None = ...,
|
| 603 |
+
keepdims: Literal[False] = ...,
|
| 604 |
+
) -> _SCT: ...
|
| 605 |
+
@overload
|
| 606 |
+
def ptp(
|
| 607 |
+
a: ArrayLike,
|
| 608 |
+
axis: None | _ShapeLike = ...,
|
| 609 |
+
out: None = ...,
|
| 610 |
+
keepdims: bool = ...,
|
| 611 |
+
) -> Any: ...
|
| 612 |
+
@overload
|
| 613 |
+
def ptp(
|
| 614 |
+
a: ArrayLike,
|
| 615 |
+
axis: None | _ShapeLike = ...,
|
| 616 |
+
out: _ArrayType = ...,
|
| 617 |
+
keepdims: bool = ...,
|
| 618 |
+
) -> _ArrayType: ...
|
| 619 |
+
|
| 620 |
+
@overload
|
| 621 |
+
def amax(
|
| 622 |
+
a: _ArrayLike[_SCT],
|
| 623 |
+
axis: None = ...,
|
| 624 |
+
out: None = ...,
|
| 625 |
+
keepdims: Literal[False] = ...,
|
| 626 |
+
initial: _NumberLike_co = ...,
|
| 627 |
+
where: _ArrayLikeBool_co = ...,
|
| 628 |
+
) -> _SCT: ...
|
| 629 |
+
@overload
|
| 630 |
+
def amax(
|
| 631 |
+
a: ArrayLike,
|
| 632 |
+
axis: None | _ShapeLike = ...,
|
| 633 |
+
out: None = ...,
|
| 634 |
+
keepdims: bool = ...,
|
| 635 |
+
initial: _NumberLike_co = ...,
|
| 636 |
+
where: _ArrayLikeBool_co = ...,
|
| 637 |
+
) -> Any: ...
|
| 638 |
+
@overload
|
| 639 |
+
def amax(
|
| 640 |
+
a: ArrayLike,
|
| 641 |
+
axis: None | _ShapeLike = ...,
|
| 642 |
+
out: _ArrayType = ...,
|
| 643 |
+
keepdims: bool = ...,
|
| 644 |
+
initial: _NumberLike_co = ...,
|
| 645 |
+
where: _ArrayLikeBool_co = ...,
|
| 646 |
+
) -> _ArrayType: ...
|
| 647 |
+
|
| 648 |
+
@overload
|
| 649 |
+
def amin(
|
| 650 |
+
a: _ArrayLike[_SCT],
|
| 651 |
+
axis: None = ...,
|
| 652 |
+
out: None = ...,
|
| 653 |
+
keepdims: Literal[False] = ...,
|
| 654 |
+
initial: _NumberLike_co = ...,
|
| 655 |
+
where: _ArrayLikeBool_co = ...,
|
| 656 |
+
) -> _SCT: ...
|
| 657 |
+
@overload
|
| 658 |
+
def amin(
|
| 659 |
+
a: ArrayLike,
|
| 660 |
+
axis: None | _ShapeLike = ...,
|
| 661 |
+
out: None = ...,
|
| 662 |
+
keepdims: bool = ...,
|
| 663 |
+
initial: _NumberLike_co = ...,
|
| 664 |
+
where: _ArrayLikeBool_co = ...,
|
| 665 |
+
) -> Any: ...
|
| 666 |
+
@overload
|
| 667 |
+
def amin(
|
| 668 |
+
a: ArrayLike,
|
| 669 |
+
axis: None | _ShapeLike = ...,
|
| 670 |
+
out: _ArrayType = ...,
|
| 671 |
+
keepdims: bool = ...,
|
| 672 |
+
initial: _NumberLike_co = ...,
|
| 673 |
+
where: _ArrayLikeBool_co = ...,
|
| 674 |
+
) -> _ArrayType: ...
|
| 675 |
+
|
| 676 |
+
# TODO: `np.prod()``: For object arrays `initial` does not necessarily
|
| 677 |
+
# have to be a numerical scalar.
|
| 678 |
+
# The only requirement is that it is compatible
|
| 679 |
+
# with the `.__mul__()` method(s) of the passed array's elements.
|
| 680 |
+
|
| 681 |
+
# Note that the same situation holds for all wrappers around
|
| 682 |
+
# `np.ufunc.reduce`, e.g. `np.sum()` (`.__add__()`).
|
| 683 |
+
@overload
|
| 684 |
+
def prod(
|
| 685 |
+
a: _ArrayLikeBool_co,
|
| 686 |
+
axis: None = ...,
|
| 687 |
+
dtype: None = ...,
|
| 688 |
+
out: None = ...,
|
| 689 |
+
keepdims: Literal[False] = ...,
|
| 690 |
+
initial: _NumberLike_co = ...,
|
| 691 |
+
where: _ArrayLikeBool_co = ...,
|
| 692 |
+
) -> int_: ...
|
| 693 |
+
@overload
|
| 694 |
+
def prod(
|
| 695 |
+
a: _ArrayLikeUInt_co,
|
| 696 |
+
axis: None = ...,
|
| 697 |
+
dtype: None = ...,
|
| 698 |
+
out: None = ...,
|
| 699 |
+
keepdims: Literal[False] = ...,
|
| 700 |
+
initial: _NumberLike_co = ...,
|
| 701 |
+
where: _ArrayLikeBool_co = ...,
|
| 702 |
+
) -> uint64: ...
|
| 703 |
+
@overload
|
| 704 |
+
def prod(
|
| 705 |
+
a: _ArrayLikeInt_co,
|
| 706 |
+
axis: None = ...,
|
| 707 |
+
dtype: None = ...,
|
| 708 |
+
out: None = ...,
|
| 709 |
+
keepdims: Literal[False] = ...,
|
| 710 |
+
initial: _NumberLike_co = ...,
|
| 711 |
+
where: _ArrayLikeBool_co = ...,
|
| 712 |
+
) -> int64: ...
|
| 713 |
+
@overload
|
| 714 |
+
def prod(
|
| 715 |
+
a: _ArrayLikeFloat_co,
|
| 716 |
+
axis: None = ...,
|
| 717 |
+
dtype: None = ...,
|
| 718 |
+
out: None = ...,
|
| 719 |
+
keepdims: Literal[False] = ...,
|
| 720 |
+
initial: _NumberLike_co = ...,
|
| 721 |
+
where: _ArrayLikeBool_co = ...,
|
| 722 |
+
) -> floating[Any]: ...
|
| 723 |
+
@overload
|
| 724 |
+
def prod(
|
| 725 |
+
a: _ArrayLikeComplex_co,
|
| 726 |
+
axis: None = ...,
|
| 727 |
+
dtype: None = ...,
|
| 728 |
+
out: None = ...,
|
| 729 |
+
keepdims: Literal[False] = ...,
|
| 730 |
+
initial: _NumberLike_co = ...,
|
| 731 |
+
where: _ArrayLikeBool_co = ...,
|
| 732 |
+
) -> complexfloating[Any, Any]: ...
|
| 733 |
+
@overload
|
| 734 |
+
def prod(
|
| 735 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 736 |
+
axis: None | _ShapeLike = ...,
|
| 737 |
+
dtype: None = ...,
|
| 738 |
+
out: None = ...,
|
| 739 |
+
keepdims: bool = ...,
|
| 740 |
+
initial: _NumberLike_co = ...,
|
| 741 |
+
where: _ArrayLikeBool_co = ...,
|
| 742 |
+
) -> Any: ...
|
| 743 |
+
@overload
|
| 744 |
+
def prod(
|
| 745 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 746 |
+
axis: None = ...,
|
| 747 |
+
dtype: _DTypeLike[_SCT] = ...,
|
| 748 |
+
out: None = ...,
|
| 749 |
+
keepdims: Literal[False] = ...,
|
| 750 |
+
initial: _NumberLike_co = ...,
|
| 751 |
+
where: _ArrayLikeBool_co = ...,
|
| 752 |
+
) -> _SCT: ...
|
| 753 |
+
@overload
|
| 754 |
+
def prod(
|
| 755 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 756 |
+
axis: None | _ShapeLike = ...,
|
| 757 |
+
dtype: None | DTypeLike = ...,
|
| 758 |
+
out: None = ...,
|
| 759 |
+
keepdims: bool = ...,
|
| 760 |
+
initial: _NumberLike_co = ...,
|
| 761 |
+
where: _ArrayLikeBool_co = ...,
|
| 762 |
+
) -> Any: ...
|
| 763 |
+
@overload
|
| 764 |
+
def prod(
|
| 765 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 766 |
+
axis: None | _ShapeLike = ...,
|
| 767 |
+
dtype: None | DTypeLike = ...,
|
| 768 |
+
out: _ArrayType = ...,
|
| 769 |
+
keepdims: bool = ...,
|
| 770 |
+
initial: _NumberLike_co = ...,
|
| 771 |
+
where: _ArrayLikeBool_co = ...,
|
| 772 |
+
) -> _ArrayType: ...
|
| 773 |
+
|
| 774 |
+
@overload
|
| 775 |
+
def cumprod(
|
| 776 |
+
a: _ArrayLikeBool_co,
|
| 777 |
+
axis: None | SupportsIndex = ...,
|
| 778 |
+
dtype: None = ...,
|
| 779 |
+
out: None = ...,
|
| 780 |
+
) -> NDArray[int_]: ...
|
| 781 |
+
@overload
|
| 782 |
+
def cumprod(
|
| 783 |
+
a: _ArrayLikeUInt_co,
|
| 784 |
+
axis: None | SupportsIndex = ...,
|
| 785 |
+
dtype: None = ...,
|
| 786 |
+
out: None = ...,
|
| 787 |
+
) -> NDArray[uint64]: ...
|
| 788 |
+
@overload
|
| 789 |
+
def cumprod(
|
| 790 |
+
a: _ArrayLikeInt_co,
|
| 791 |
+
axis: None | SupportsIndex = ...,
|
| 792 |
+
dtype: None = ...,
|
| 793 |
+
out: None = ...,
|
| 794 |
+
) -> NDArray[int64]: ...
|
| 795 |
+
@overload
|
| 796 |
+
def cumprod(
|
| 797 |
+
a: _ArrayLikeFloat_co,
|
| 798 |
+
axis: None | SupportsIndex = ...,
|
| 799 |
+
dtype: None = ...,
|
| 800 |
+
out: None = ...,
|
| 801 |
+
) -> NDArray[floating[Any]]: ...
|
| 802 |
+
@overload
|
| 803 |
+
def cumprod(
|
| 804 |
+
a: _ArrayLikeComplex_co,
|
| 805 |
+
axis: None | SupportsIndex = ...,
|
| 806 |
+
dtype: None = ...,
|
| 807 |
+
out: None = ...,
|
| 808 |
+
) -> NDArray[complexfloating[Any, Any]]: ...
|
| 809 |
+
@overload
|
| 810 |
+
def cumprod(
|
| 811 |
+
a: _ArrayLikeObject_co,
|
| 812 |
+
axis: None | SupportsIndex = ...,
|
| 813 |
+
dtype: None = ...,
|
| 814 |
+
out: None = ...,
|
| 815 |
+
) -> NDArray[object_]: ...
|
| 816 |
+
@overload
|
| 817 |
+
def cumprod(
|
| 818 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 819 |
+
axis: None | SupportsIndex = ...,
|
| 820 |
+
dtype: _DTypeLike[_SCT] = ...,
|
| 821 |
+
out: None = ...,
|
| 822 |
+
) -> NDArray[_SCT]: ...
|
| 823 |
+
@overload
|
| 824 |
+
def cumprod(
|
| 825 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 826 |
+
axis: None | SupportsIndex = ...,
|
| 827 |
+
dtype: DTypeLike = ...,
|
| 828 |
+
out: None = ...,
|
| 829 |
+
) -> NDArray[Any]: ...
|
| 830 |
+
@overload
|
| 831 |
+
def cumprod(
|
| 832 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 833 |
+
axis: None | SupportsIndex = ...,
|
| 834 |
+
dtype: DTypeLike = ...,
|
| 835 |
+
out: _ArrayType = ...,
|
| 836 |
+
) -> _ArrayType: ...
|
| 837 |
+
|
| 838 |
+
def ndim(a: ArrayLike) -> int: ...
|
| 839 |
+
|
| 840 |
+
def size(a: ArrayLike, axis: None | int = ...) -> int: ...
|
| 841 |
+
|
| 842 |
+
@overload
|
| 843 |
+
def around(
|
| 844 |
+
a: _BoolLike_co,
|
| 845 |
+
decimals: SupportsIndex = ...,
|
| 846 |
+
out: None = ...,
|
| 847 |
+
) -> float16: ...
|
| 848 |
+
@overload
|
| 849 |
+
def around(
|
| 850 |
+
a: _SCT_uifcO,
|
| 851 |
+
decimals: SupportsIndex = ...,
|
| 852 |
+
out: None = ...,
|
| 853 |
+
) -> _SCT_uifcO: ...
|
| 854 |
+
@overload
|
| 855 |
+
def around(
|
| 856 |
+
a: _ComplexLike_co | object_,
|
| 857 |
+
decimals: SupportsIndex = ...,
|
| 858 |
+
out: None = ...,
|
| 859 |
+
) -> Any: ...
|
| 860 |
+
@overload
|
| 861 |
+
def around(
|
| 862 |
+
a: _ArrayLikeBool_co,
|
| 863 |
+
decimals: SupportsIndex = ...,
|
| 864 |
+
out: None = ...,
|
| 865 |
+
) -> NDArray[float16]: ...
|
| 866 |
+
@overload
|
| 867 |
+
def around(
|
| 868 |
+
a: _ArrayLike[_SCT_uifcO],
|
| 869 |
+
decimals: SupportsIndex = ...,
|
| 870 |
+
out: None = ...,
|
| 871 |
+
) -> NDArray[_SCT_uifcO]: ...
|
| 872 |
+
@overload
|
| 873 |
+
def around(
|
| 874 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 875 |
+
decimals: SupportsIndex = ...,
|
| 876 |
+
out: None = ...,
|
| 877 |
+
) -> NDArray[Any]: ...
|
| 878 |
+
@overload
|
| 879 |
+
def around(
|
| 880 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 881 |
+
decimals: SupportsIndex = ...,
|
| 882 |
+
out: _ArrayType = ...,
|
| 883 |
+
) -> _ArrayType: ...
|
| 884 |
+
|
| 885 |
+
@overload
|
| 886 |
+
def mean(
|
| 887 |
+
a: _ArrayLikeFloat_co,
|
| 888 |
+
axis: None = ...,
|
| 889 |
+
dtype: None = ...,
|
| 890 |
+
out: None = ...,
|
| 891 |
+
keepdims: Literal[False] = ...,
|
| 892 |
+
*,
|
| 893 |
+
where: _ArrayLikeBool_co = ...,
|
| 894 |
+
) -> floating[Any]: ...
|
| 895 |
+
@overload
|
| 896 |
+
def mean(
|
| 897 |
+
a: _ArrayLikeComplex_co,
|
| 898 |
+
axis: None = ...,
|
| 899 |
+
dtype: None = ...,
|
| 900 |
+
out: None = ...,
|
| 901 |
+
keepdims: Literal[False] = ...,
|
| 902 |
+
*,
|
| 903 |
+
where: _ArrayLikeBool_co = ...,
|
| 904 |
+
) -> complexfloating[Any, Any]: ...
|
| 905 |
+
@overload
|
| 906 |
+
def mean(
|
| 907 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 908 |
+
axis: None | _ShapeLike = ...,
|
| 909 |
+
dtype: None = ...,
|
| 910 |
+
out: None = ...,
|
| 911 |
+
keepdims: bool = ...,
|
| 912 |
+
*,
|
| 913 |
+
where: _ArrayLikeBool_co = ...,
|
| 914 |
+
) -> Any: ...
|
| 915 |
+
@overload
|
| 916 |
+
def mean(
|
| 917 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 918 |
+
axis: None = ...,
|
| 919 |
+
dtype: _DTypeLike[_SCT] = ...,
|
| 920 |
+
out: None = ...,
|
| 921 |
+
keepdims: Literal[False] = ...,
|
| 922 |
+
*,
|
| 923 |
+
where: _ArrayLikeBool_co = ...,
|
| 924 |
+
) -> _SCT: ...
|
| 925 |
+
@overload
|
| 926 |
+
def mean(
|
| 927 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 928 |
+
axis: None | _ShapeLike = ...,
|
| 929 |
+
dtype: DTypeLike = ...,
|
| 930 |
+
out: None = ...,
|
| 931 |
+
keepdims: bool = ...,
|
| 932 |
+
*,
|
| 933 |
+
where: _ArrayLikeBool_co = ...,
|
| 934 |
+
) -> Any: ...
|
| 935 |
+
@overload
|
| 936 |
+
def mean(
|
| 937 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 938 |
+
axis: None | _ShapeLike = ...,
|
| 939 |
+
dtype: DTypeLike = ...,
|
| 940 |
+
out: _ArrayType = ...,
|
| 941 |
+
keepdims: bool = ...,
|
| 942 |
+
*,
|
| 943 |
+
where: _ArrayLikeBool_co = ...,
|
| 944 |
+
) -> _ArrayType: ...
|
| 945 |
+
|
| 946 |
+
@overload
|
| 947 |
+
def std(
|
| 948 |
+
a: _ArrayLikeComplex_co,
|
| 949 |
+
axis: None = ...,
|
| 950 |
+
dtype: None = ...,
|
| 951 |
+
out: None = ...,
|
| 952 |
+
ddof: float = ...,
|
| 953 |
+
keepdims: Literal[False] = ...,
|
| 954 |
+
*,
|
| 955 |
+
where: _ArrayLikeBool_co = ...,
|
| 956 |
+
) -> floating[Any]: ...
|
| 957 |
+
@overload
|
| 958 |
+
def std(
|
| 959 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 960 |
+
axis: None | _ShapeLike = ...,
|
| 961 |
+
dtype: None = ...,
|
| 962 |
+
out: None = ...,
|
| 963 |
+
ddof: float = ...,
|
| 964 |
+
keepdims: bool = ...,
|
| 965 |
+
*,
|
| 966 |
+
where: _ArrayLikeBool_co = ...,
|
| 967 |
+
) -> Any: ...
|
| 968 |
+
@overload
|
| 969 |
+
def std(
|
| 970 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 971 |
+
axis: None = ...,
|
| 972 |
+
dtype: _DTypeLike[_SCT] = ...,
|
| 973 |
+
out: None = ...,
|
| 974 |
+
ddof: float = ...,
|
| 975 |
+
keepdims: Literal[False] = ...,
|
| 976 |
+
*,
|
| 977 |
+
where: _ArrayLikeBool_co = ...,
|
| 978 |
+
) -> _SCT: ...
|
| 979 |
+
@overload
|
| 980 |
+
def std(
|
| 981 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 982 |
+
axis: None | _ShapeLike = ...,
|
| 983 |
+
dtype: DTypeLike = ...,
|
| 984 |
+
out: None = ...,
|
| 985 |
+
ddof: float = ...,
|
| 986 |
+
keepdims: bool = ...,
|
| 987 |
+
*,
|
| 988 |
+
where: _ArrayLikeBool_co = ...,
|
| 989 |
+
) -> Any: ...
|
| 990 |
+
@overload
|
| 991 |
+
def std(
|
| 992 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 993 |
+
axis: None | _ShapeLike = ...,
|
| 994 |
+
dtype: DTypeLike = ...,
|
| 995 |
+
out: _ArrayType = ...,
|
| 996 |
+
ddof: float = ...,
|
| 997 |
+
keepdims: bool = ...,
|
| 998 |
+
*,
|
| 999 |
+
where: _ArrayLikeBool_co = ...,
|
| 1000 |
+
) -> _ArrayType: ...
|
| 1001 |
+
|
| 1002 |
+
@overload
|
| 1003 |
+
def var(
|
| 1004 |
+
a: _ArrayLikeComplex_co,
|
| 1005 |
+
axis: None = ...,
|
| 1006 |
+
dtype: None = ...,
|
| 1007 |
+
out: None = ...,
|
| 1008 |
+
ddof: float = ...,
|
| 1009 |
+
keepdims: Literal[False] = ...,
|
| 1010 |
+
*,
|
| 1011 |
+
where: _ArrayLikeBool_co = ...,
|
| 1012 |
+
) -> floating[Any]: ...
|
| 1013 |
+
@overload
|
| 1014 |
+
def var(
|
| 1015 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 1016 |
+
axis: None | _ShapeLike = ...,
|
| 1017 |
+
dtype: None = ...,
|
| 1018 |
+
out: None = ...,
|
| 1019 |
+
ddof: float = ...,
|
| 1020 |
+
keepdims: bool = ...,
|
| 1021 |
+
*,
|
| 1022 |
+
where: _ArrayLikeBool_co = ...,
|
| 1023 |
+
) -> Any: ...
|
| 1024 |
+
@overload
|
| 1025 |
+
def var(
|
| 1026 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 1027 |
+
axis: None = ...,
|
| 1028 |
+
dtype: _DTypeLike[_SCT] = ...,
|
| 1029 |
+
out: None = ...,
|
| 1030 |
+
ddof: float = ...,
|
| 1031 |
+
keepdims: Literal[False] = ...,
|
| 1032 |
+
*,
|
| 1033 |
+
where: _ArrayLikeBool_co = ...,
|
| 1034 |
+
) -> _SCT: ...
|
| 1035 |
+
@overload
|
| 1036 |
+
def var(
|
| 1037 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 1038 |
+
axis: None | _ShapeLike = ...,
|
| 1039 |
+
dtype: DTypeLike = ...,
|
| 1040 |
+
out: None = ...,
|
| 1041 |
+
ddof: float = ...,
|
| 1042 |
+
keepdims: bool = ...,
|
| 1043 |
+
*,
|
| 1044 |
+
where: _ArrayLikeBool_co = ...,
|
| 1045 |
+
) -> Any: ...
|
| 1046 |
+
@overload
|
| 1047 |
+
def var(
|
| 1048 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 1049 |
+
axis: None | _ShapeLike = ...,
|
| 1050 |
+
dtype: DTypeLike = ...,
|
| 1051 |
+
out: _ArrayType = ...,
|
| 1052 |
+
ddof: float = ...,
|
| 1053 |
+
keepdims: bool = ...,
|
| 1054 |
+
*,
|
| 1055 |
+
where: _ArrayLikeBool_co = ...,
|
| 1056 |
+
) -> _ArrayType: ...
|
| 1057 |
+
|
| 1058 |
+
max = amax
|
| 1059 |
+
min = amin
|
| 1060 |
+
round = around
|
pllava/lib/python3.10/site-packages/numpy/core/function_base.py
ADDED
|
@@ -0,0 +1,551 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import warnings
|
| 3 |
+
import operator
|
| 4 |
+
import types
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
from . import numeric as _nx
|
| 8 |
+
from .numeric import result_type, NaN, asanyarray, ndim
|
| 9 |
+
from numpy.core.multiarray import add_docstring
|
| 10 |
+
from numpy.core import overrides
|
| 11 |
+
|
| 12 |
+
__all__ = ['logspace', 'linspace', 'geomspace']
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
array_function_dispatch = functools.partial(
|
| 16 |
+
overrides.array_function_dispatch, module='numpy')
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def _linspace_dispatcher(start, stop, num=None, endpoint=None, retstep=None,
|
| 20 |
+
dtype=None, axis=None):
|
| 21 |
+
return (start, stop)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@array_function_dispatch(_linspace_dispatcher)
|
| 25 |
+
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,
|
| 26 |
+
axis=0):
|
| 27 |
+
"""
|
| 28 |
+
Return evenly spaced numbers over a specified interval.
|
| 29 |
+
|
| 30 |
+
Returns `num` evenly spaced samples, calculated over the
|
| 31 |
+
interval [`start`, `stop`].
|
| 32 |
+
|
| 33 |
+
The endpoint of the interval can optionally be excluded.
|
| 34 |
+
|
| 35 |
+
.. versionchanged:: 1.16.0
|
| 36 |
+
Non-scalar `start` and `stop` are now supported.
|
| 37 |
+
|
| 38 |
+
.. versionchanged:: 1.20.0
|
| 39 |
+
Values are rounded towards ``-inf`` instead of ``0`` when an
|
| 40 |
+
integer ``dtype`` is specified. The old behavior can
|
| 41 |
+
still be obtained with ``np.linspace(start, stop, num).astype(int)``
|
| 42 |
+
|
| 43 |
+
Parameters
|
| 44 |
+
----------
|
| 45 |
+
start : array_like
|
| 46 |
+
The starting value of the sequence.
|
| 47 |
+
stop : array_like
|
| 48 |
+
The end value of the sequence, unless `endpoint` is set to False.
|
| 49 |
+
In that case, the sequence consists of all but the last of ``num + 1``
|
| 50 |
+
evenly spaced samples, so that `stop` is excluded. Note that the step
|
| 51 |
+
size changes when `endpoint` is False.
|
| 52 |
+
num : int, optional
|
| 53 |
+
Number of samples to generate. Default is 50. Must be non-negative.
|
| 54 |
+
endpoint : bool, optional
|
| 55 |
+
If True, `stop` is the last sample. Otherwise, it is not included.
|
| 56 |
+
Default is True.
|
| 57 |
+
retstep : bool, optional
|
| 58 |
+
If True, return (`samples`, `step`), where `step` is the spacing
|
| 59 |
+
between samples.
|
| 60 |
+
dtype : dtype, optional
|
| 61 |
+
The type of the output array. If `dtype` is not given, the data type
|
| 62 |
+
is inferred from `start` and `stop`. The inferred dtype will never be
|
| 63 |
+
an integer; `float` is chosen even if the arguments would produce an
|
| 64 |
+
array of integers.
|
| 65 |
+
|
| 66 |
+
.. versionadded:: 1.9.0
|
| 67 |
+
|
| 68 |
+
axis : int, optional
|
| 69 |
+
The axis in the result to store the samples. Relevant only if start
|
| 70 |
+
or stop are array-like. By default (0), the samples will be along a
|
| 71 |
+
new axis inserted at the beginning. Use -1 to get an axis at the end.
|
| 72 |
+
|
| 73 |
+
.. versionadded:: 1.16.0
|
| 74 |
+
|
| 75 |
+
Returns
|
| 76 |
+
-------
|
| 77 |
+
samples : ndarray
|
| 78 |
+
There are `num` equally spaced samples in the closed interval
|
| 79 |
+
``[start, stop]`` or the half-open interval ``[start, stop)``
|
| 80 |
+
(depending on whether `endpoint` is True or False).
|
| 81 |
+
step : float, optional
|
| 82 |
+
Only returned if `retstep` is True
|
| 83 |
+
|
| 84 |
+
Size of spacing between samples.
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
See Also
|
| 88 |
+
--------
|
| 89 |
+
arange : Similar to `linspace`, but uses a step size (instead of the
|
| 90 |
+
number of samples).
|
| 91 |
+
geomspace : Similar to `linspace`, but with numbers spaced evenly on a log
|
| 92 |
+
scale (a geometric progression).
|
| 93 |
+
logspace : Similar to `geomspace`, but with the end points specified as
|
| 94 |
+
logarithms.
|
| 95 |
+
:ref:`how-to-partition`
|
| 96 |
+
|
| 97 |
+
Examples
|
| 98 |
+
--------
|
| 99 |
+
>>> np.linspace(2.0, 3.0, num=5)
|
| 100 |
+
array([2. , 2.25, 2.5 , 2.75, 3. ])
|
| 101 |
+
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
|
| 102 |
+
array([2. , 2.2, 2.4, 2.6, 2.8])
|
| 103 |
+
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
|
| 104 |
+
(array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
|
| 105 |
+
|
| 106 |
+
Graphical illustration:
|
| 107 |
+
|
| 108 |
+
>>> import matplotlib.pyplot as plt
|
| 109 |
+
>>> N = 8
|
| 110 |
+
>>> y = np.zeros(N)
|
| 111 |
+
>>> x1 = np.linspace(0, 10, N, endpoint=True)
|
| 112 |
+
>>> x2 = np.linspace(0, 10, N, endpoint=False)
|
| 113 |
+
>>> plt.plot(x1, y, 'o')
|
| 114 |
+
[<matplotlib.lines.Line2D object at 0x...>]
|
| 115 |
+
>>> plt.plot(x2, y + 0.5, 'o')
|
| 116 |
+
[<matplotlib.lines.Line2D object at 0x...>]
|
| 117 |
+
>>> plt.ylim([-0.5, 1])
|
| 118 |
+
(-0.5, 1)
|
| 119 |
+
>>> plt.show()
|
| 120 |
+
|
| 121 |
+
"""
|
| 122 |
+
num = operator.index(num)
|
| 123 |
+
if num < 0:
|
| 124 |
+
raise ValueError("Number of samples, %s, must be non-negative." % num)
|
| 125 |
+
div = (num - 1) if endpoint else num
|
| 126 |
+
|
| 127 |
+
# Convert float/complex array scalars to float, gh-3504
|
| 128 |
+
# and make sure one can use variables that have an __array_interface__, gh-6634
|
| 129 |
+
start = asanyarray(start) * 1.0
|
| 130 |
+
stop = asanyarray(stop) * 1.0
|
| 131 |
+
|
| 132 |
+
dt = result_type(start, stop, float(num))
|
| 133 |
+
if dtype is None:
|
| 134 |
+
dtype = dt
|
| 135 |
+
integer_dtype = False
|
| 136 |
+
else:
|
| 137 |
+
integer_dtype = _nx.issubdtype(dtype, _nx.integer)
|
| 138 |
+
|
| 139 |
+
delta = stop - start
|
| 140 |
+
y = _nx.arange(0, num, dtype=dt).reshape((-1,) + (1,) * ndim(delta))
|
| 141 |
+
# In-place multiplication y *= delta/div is faster, but prevents the multiplicant
|
| 142 |
+
# from overriding what class is produced, and thus prevents, e.g. use of Quantities,
|
| 143 |
+
# see gh-7142. Hence, we multiply in place only for standard scalar types.
|
| 144 |
+
if div > 0:
|
| 145 |
+
_mult_inplace = _nx.isscalar(delta)
|
| 146 |
+
step = delta / div
|
| 147 |
+
any_step_zero = (
|
| 148 |
+
step == 0 if _mult_inplace else _nx.asanyarray(step == 0).any())
|
| 149 |
+
if any_step_zero:
|
| 150 |
+
# Special handling for denormal numbers, gh-5437
|
| 151 |
+
y /= div
|
| 152 |
+
if _mult_inplace:
|
| 153 |
+
y *= delta
|
| 154 |
+
else:
|
| 155 |
+
y = y * delta
|
| 156 |
+
else:
|
| 157 |
+
if _mult_inplace:
|
| 158 |
+
y *= step
|
| 159 |
+
else:
|
| 160 |
+
y = y * step
|
| 161 |
+
else:
|
| 162 |
+
# sequences with 0 items or 1 item with endpoint=True (i.e. div <= 0)
|
| 163 |
+
# have an undefined step
|
| 164 |
+
step = NaN
|
| 165 |
+
# Multiply with delta to allow possible override of output class.
|
| 166 |
+
y = y * delta
|
| 167 |
+
|
| 168 |
+
y += start
|
| 169 |
+
|
| 170 |
+
if endpoint and num > 1:
|
| 171 |
+
y[-1, ...] = stop
|
| 172 |
+
|
| 173 |
+
if axis != 0:
|
| 174 |
+
y = _nx.moveaxis(y, 0, axis)
|
| 175 |
+
|
| 176 |
+
if integer_dtype:
|
| 177 |
+
_nx.floor(y, out=y)
|
| 178 |
+
|
| 179 |
+
if retstep:
|
| 180 |
+
return y.astype(dtype, copy=False), step
|
| 181 |
+
else:
|
| 182 |
+
return y.astype(dtype, copy=False)
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def _logspace_dispatcher(start, stop, num=None, endpoint=None, base=None,
|
| 186 |
+
dtype=None, axis=None):
|
| 187 |
+
return (start, stop, base)
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
@array_function_dispatch(_logspace_dispatcher)
|
| 191 |
+
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None,
|
| 192 |
+
axis=0):
|
| 193 |
+
"""
|
| 194 |
+
Return numbers spaced evenly on a log scale.
|
| 195 |
+
|
| 196 |
+
In linear space, the sequence starts at ``base ** start``
|
| 197 |
+
(`base` to the power of `start`) and ends with ``base ** stop``
|
| 198 |
+
(see `endpoint` below).
|
| 199 |
+
|
| 200 |
+
.. versionchanged:: 1.16.0
|
| 201 |
+
Non-scalar `start` and `stop` are now supported.
|
| 202 |
+
|
| 203 |
+
.. versionchanged:: 1.25.0
|
| 204 |
+
Non-scalar 'base` is now supported
|
| 205 |
+
|
| 206 |
+
Parameters
|
| 207 |
+
----------
|
| 208 |
+
start : array_like
|
| 209 |
+
``base ** start`` is the starting value of the sequence.
|
| 210 |
+
stop : array_like
|
| 211 |
+
``base ** stop`` is the final value of the sequence, unless `endpoint`
|
| 212 |
+
is False. In that case, ``num + 1`` values are spaced over the
|
| 213 |
+
interval in log-space, of which all but the last (a sequence of
|
| 214 |
+
length `num`) are returned.
|
| 215 |
+
num : integer, optional
|
| 216 |
+
Number of samples to generate. Default is 50.
|
| 217 |
+
endpoint : boolean, optional
|
| 218 |
+
If true, `stop` is the last sample. Otherwise, it is not included.
|
| 219 |
+
Default is True.
|
| 220 |
+
base : array_like, optional
|
| 221 |
+
The base of the log space. The step size between the elements in
|
| 222 |
+
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
|
| 223 |
+
Default is 10.0.
|
| 224 |
+
dtype : dtype
|
| 225 |
+
The type of the output array. If `dtype` is not given, the data type
|
| 226 |
+
is inferred from `start` and `stop`. The inferred type will never be
|
| 227 |
+
an integer; `float` is chosen even if the arguments would produce an
|
| 228 |
+
array of integers.
|
| 229 |
+
axis : int, optional
|
| 230 |
+
The axis in the result to store the samples. Relevant only if start,
|
| 231 |
+
stop, or base are array-like. By default (0), the samples will be
|
| 232 |
+
along a new axis inserted at the beginning. Use -1 to get an axis at
|
| 233 |
+
the end.
|
| 234 |
+
|
| 235 |
+
.. versionadded:: 1.16.0
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
Returns
|
| 239 |
+
-------
|
| 240 |
+
samples : ndarray
|
| 241 |
+
`num` samples, equally spaced on a log scale.
|
| 242 |
+
|
| 243 |
+
See Also
|
| 244 |
+
--------
|
| 245 |
+
arange : Similar to linspace, with the step size specified instead of the
|
| 246 |
+
number of samples. Note that, when used with a float endpoint, the
|
| 247 |
+
endpoint may or may not be included.
|
| 248 |
+
linspace : Similar to logspace, but with the samples uniformly distributed
|
| 249 |
+
in linear space, instead of log space.
|
| 250 |
+
geomspace : Similar to logspace, but with endpoints specified directly.
|
| 251 |
+
:ref:`how-to-partition`
|
| 252 |
+
|
| 253 |
+
Notes
|
| 254 |
+
-----
|
| 255 |
+
If base is a scalar, logspace is equivalent to the code
|
| 256 |
+
|
| 257 |
+
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
|
| 258 |
+
... # doctest: +SKIP
|
| 259 |
+
>>> power(base, y).astype(dtype)
|
| 260 |
+
... # doctest: +SKIP
|
| 261 |
+
|
| 262 |
+
Examples
|
| 263 |
+
--------
|
| 264 |
+
>>> np.logspace(2.0, 3.0, num=4)
|
| 265 |
+
array([ 100. , 215.443469 , 464.15888336, 1000. ])
|
| 266 |
+
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
|
| 267 |
+
array([100. , 177.827941 , 316.22776602, 562.34132519])
|
| 268 |
+
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
|
| 269 |
+
array([4. , 5.0396842 , 6.34960421, 8. ])
|
| 270 |
+
>>> np.logspace(2.0, 3.0, num=4, base=[2.0, 3.0], axis=-1)
|
| 271 |
+
array([[ 4. , 5.0396842 , 6.34960421, 8. ],
|
| 272 |
+
[ 9. , 12.98024613, 18.72075441, 27. ]])
|
| 273 |
+
|
| 274 |
+
Graphical illustration:
|
| 275 |
+
|
| 276 |
+
>>> import matplotlib.pyplot as plt
|
| 277 |
+
>>> N = 10
|
| 278 |
+
>>> x1 = np.logspace(0.1, 1, N, endpoint=True)
|
| 279 |
+
>>> x2 = np.logspace(0.1, 1, N, endpoint=False)
|
| 280 |
+
>>> y = np.zeros(N)
|
| 281 |
+
>>> plt.plot(x1, y, 'o')
|
| 282 |
+
[<matplotlib.lines.Line2D object at 0x...>]
|
| 283 |
+
>>> plt.plot(x2, y + 0.5, 'o')
|
| 284 |
+
[<matplotlib.lines.Line2D object at 0x...>]
|
| 285 |
+
>>> plt.ylim([-0.5, 1])
|
| 286 |
+
(-0.5, 1)
|
| 287 |
+
>>> plt.show()
|
| 288 |
+
|
| 289 |
+
"""
|
| 290 |
+
ndmax = np.broadcast(start, stop, base).ndim
|
| 291 |
+
start, stop, base = (
|
| 292 |
+
np.array(a, copy=False, subok=True, ndmin=ndmax)
|
| 293 |
+
for a in (start, stop, base)
|
| 294 |
+
)
|
| 295 |
+
y = linspace(start, stop, num=num, endpoint=endpoint, axis=axis)
|
| 296 |
+
base = np.expand_dims(base, axis=axis)
|
| 297 |
+
if dtype is None:
|
| 298 |
+
return _nx.power(base, y)
|
| 299 |
+
return _nx.power(base, y).astype(dtype, copy=False)
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
def _geomspace_dispatcher(start, stop, num=None, endpoint=None, dtype=None,
|
| 303 |
+
axis=None):
|
| 304 |
+
return (start, stop)
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
@array_function_dispatch(_geomspace_dispatcher)
|
| 308 |
+
def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0):
|
| 309 |
+
"""
|
| 310 |
+
Return numbers spaced evenly on a log scale (a geometric progression).
|
| 311 |
+
|
| 312 |
+
This is similar to `logspace`, but with endpoints specified directly.
|
| 313 |
+
Each output sample is a constant multiple of the previous.
|
| 314 |
+
|
| 315 |
+
.. versionchanged:: 1.16.0
|
| 316 |
+
Non-scalar `start` and `stop` are now supported.
|
| 317 |
+
|
| 318 |
+
Parameters
|
| 319 |
+
----------
|
| 320 |
+
start : array_like
|
| 321 |
+
The starting value of the sequence.
|
| 322 |
+
stop : array_like
|
| 323 |
+
The final value of the sequence, unless `endpoint` is False.
|
| 324 |
+
In that case, ``num + 1`` values are spaced over the
|
| 325 |
+
interval in log-space, of which all but the last (a sequence of
|
| 326 |
+
length `num`) are returned.
|
| 327 |
+
num : integer, optional
|
| 328 |
+
Number of samples to generate. Default is 50.
|
| 329 |
+
endpoint : boolean, optional
|
| 330 |
+
If true, `stop` is the last sample. Otherwise, it is not included.
|
| 331 |
+
Default is True.
|
| 332 |
+
dtype : dtype
|
| 333 |
+
The type of the output array. If `dtype` is not given, the data type
|
| 334 |
+
is inferred from `start` and `stop`. The inferred dtype will never be
|
| 335 |
+
an integer; `float` is chosen even if the arguments would produce an
|
| 336 |
+
array of integers.
|
| 337 |
+
axis : int, optional
|
| 338 |
+
The axis in the result to store the samples. Relevant only if start
|
| 339 |
+
or stop are array-like. By default (0), the samples will be along a
|
| 340 |
+
new axis inserted at the beginning. Use -1 to get an axis at the end.
|
| 341 |
+
|
| 342 |
+
.. versionadded:: 1.16.0
|
| 343 |
+
|
| 344 |
+
Returns
|
| 345 |
+
-------
|
| 346 |
+
samples : ndarray
|
| 347 |
+
`num` samples, equally spaced on a log scale.
|
| 348 |
+
|
| 349 |
+
See Also
|
| 350 |
+
--------
|
| 351 |
+
logspace : Similar to geomspace, but with endpoints specified using log
|
| 352 |
+
and base.
|
| 353 |
+
linspace : Similar to geomspace, but with arithmetic instead of geometric
|
| 354 |
+
progression.
|
| 355 |
+
arange : Similar to linspace, with the step size specified instead of the
|
| 356 |
+
number of samples.
|
| 357 |
+
:ref:`how-to-partition`
|
| 358 |
+
|
| 359 |
+
Notes
|
| 360 |
+
-----
|
| 361 |
+
If the inputs or dtype are complex, the output will follow a logarithmic
|
| 362 |
+
spiral in the complex plane. (There are an infinite number of spirals
|
| 363 |
+
passing through two points; the output will follow the shortest such path.)
|
| 364 |
+
|
| 365 |
+
Examples
|
| 366 |
+
--------
|
| 367 |
+
>>> np.geomspace(1, 1000, num=4)
|
| 368 |
+
array([ 1., 10., 100., 1000.])
|
| 369 |
+
>>> np.geomspace(1, 1000, num=3, endpoint=False)
|
| 370 |
+
array([ 1., 10., 100.])
|
| 371 |
+
>>> np.geomspace(1, 1000, num=4, endpoint=False)
|
| 372 |
+
array([ 1. , 5.62341325, 31.6227766 , 177.827941 ])
|
| 373 |
+
>>> np.geomspace(1, 256, num=9)
|
| 374 |
+
array([ 1., 2., 4., 8., 16., 32., 64., 128., 256.])
|
| 375 |
+
|
| 376 |
+
Note that the above may not produce exact integers:
|
| 377 |
+
|
| 378 |
+
>>> np.geomspace(1, 256, num=9, dtype=int)
|
| 379 |
+
array([ 1, 2, 4, 7, 16, 32, 63, 127, 256])
|
| 380 |
+
>>> np.around(np.geomspace(1, 256, num=9)).astype(int)
|
| 381 |
+
array([ 1, 2, 4, 8, 16, 32, 64, 128, 256])
|
| 382 |
+
|
| 383 |
+
Negative, decreasing, and complex inputs are allowed:
|
| 384 |
+
|
| 385 |
+
>>> np.geomspace(1000, 1, num=4)
|
| 386 |
+
array([1000., 100., 10., 1.])
|
| 387 |
+
>>> np.geomspace(-1000, -1, num=4)
|
| 388 |
+
array([-1000., -100., -10., -1.])
|
| 389 |
+
>>> np.geomspace(1j, 1000j, num=4) # Straight line
|
| 390 |
+
array([0. +1.j, 0. +10.j, 0. +100.j, 0.+1000.j])
|
| 391 |
+
>>> np.geomspace(-1+0j, 1+0j, num=5) # Circle
|
| 392 |
+
array([-1.00000000e+00+1.22464680e-16j, -7.07106781e-01+7.07106781e-01j,
|
| 393 |
+
6.12323400e-17+1.00000000e+00j, 7.07106781e-01+7.07106781e-01j,
|
| 394 |
+
1.00000000e+00+0.00000000e+00j])
|
| 395 |
+
|
| 396 |
+
Graphical illustration of `endpoint` parameter:
|
| 397 |
+
|
| 398 |
+
>>> import matplotlib.pyplot as plt
|
| 399 |
+
>>> N = 10
|
| 400 |
+
>>> y = np.zeros(N)
|
| 401 |
+
>>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=True), y + 1, 'o')
|
| 402 |
+
[<matplotlib.lines.Line2D object at 0x...>]
|
| 403 |
+
>>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=False), y + 2, 'o')
|
| 404 |
+
[<matplotlib.lines.Line2D object at 0x...>]
|
| 405 |
+
>>> plt.axis([0.5, 2000, 0, 3])
|
| 406 |
+
[0.5, 2000, 0, 3]
|
| 407 |
+
>>> plt.grid(True, color='0.7', linestyle='-', which='both', axis='both')
|
| 408 |
+
>>> plt.show()
|
| 409 |
+
|
| 410 |
+
"""
|
| 411 |
+
start = asanyarray(start)
|
| 412 |
+
stop = asanyarray(stop)
|
| 413 |
+
if _nx.any(start == 0) or _nx.any(stop == 0):
|
| 414 |
+
raise ValueError('Geometric sequence cannot include zero')
|
| 415 |
+
|
| 416 |
+
dt = result_type(start, stop, float(num), _nx.zeros((), dtype))
|
| 417 |
+
if dtype is None:
|
| 418 |
+
dtype = dt
|
| 419 |
+
else:
|
| 420 |
+
# complex to dtype('complex128'), for instance
|
| 421 |
+
dtype = _nx.dtype(dtype)
|
| 422 |
+
|
| 423 |
+
# Promote both arguments to the same dtype in case, for instance, one is
|
| 424 |
+
# complex and another is negative and log would produce NaN otherwise.
|
| 425 |
+
# Copy since we may change things in-place further down.
|
| 426 |
+
start = start.astype(dt, copy=True)
|
| 427 |
+
stop = stop.astype(dt, copy=True)
|
| 428 |
+
|
| 429 |
+
out_sign = _nx.ones(_nx.broadcast(start, stop).shape, dt)
|
| 430 |
+
# Avoid negligible real or imaginary parts in output by rotating to
|
| 431 |
+
# positive real, calculating, then undoing rotation
|
| 432 |
+
if _nx.issubdtype(dt, _nx.complexfloating):
|
| 433 |
+
all_imag = (start.real == 0.) & (stop.real == 0.)
|
| 434 |
+
if _nx.any(all_imag):
|
| 435 |
+
start[all_imag] = start[all_imag].imag
|
| 436 |
+
stop[all_imag] = stop[all_imag].imag
|
| 437 |
+
out_sign[all_imag] = 1j
|
| 438 |
+
|
| 439 |
+
both_negative = (_nx.sign(start) == -1) & (_nx.sign(stop) == -1)
|
| 440 |
+
if _nx.any(both_negative):
|
| 441 |
+
_nx.negative(start, out=start, where=both_negative)
|
| 442 |
+
_nx.negative(stop, out=stop, where=both_negative)
|
| 443 |
+
_nx.negative(out_sign, out=out_sign, where=both_negative)
|
| 444 |
+
|
| 445 |
+
log_start = _nx.log10(start)
|
| 446 |
+
log_stop = _nx.log10(stop)
|
| 447 |
+
result = logspace(log_start, log_stop, num=num,
|
| 448 |
+
endpoint=endpoint, base=10.0, dtype=dtype)
|
| 449 |
+
|
| 450 |
+
# Make sure the endpoints match the start and stop arguments. This is
|
| 451 |
+
# necessary because np.exp(np.log(x)) is not necessarily equal to x.
|
| 452 |
+
if num > 0:
|
| 453 |
+
result[0] = start
|
| 454 |
+
if num > 1 and endpoint:
|
| 455 |
+
result[-1] = stop
|
| 456 |
+
|
| 457 |
+
result = out_sign * result
|
| 458 |
+
|
| 459 |
+
if axis != 0:
|
| 460 |
+
result = _nx.moveaxis(result, 0, axis)
|
| 461 |
+
|
| 462 |
+
return result.astype(dtype, copy=False)
|
| 463 |
+
|
| 464 |
+
|
| 465 |
+
def _needs_add_docstring(obj):
|
| 466 |
+
"""
|
| 467 |
+
Returns true if the only way to set the docstring of `obj` from python is
|
| 468 |
+
via add_docstring.
|
| 469 |
+
|
| 470 |
+
This function errs on the side of being overly conservative.
|
| 471 |
+
"""
|
| 472 |
+
Py_TPFLAGS_HEAPTYPE = 1 << 9
|
| 473 |
+
|
| 474 |
+
if isinstance(obj, (types.FunctionType, types.MethodType, property)):
|
| 475 |
+
return False
|
| 476 |
+
|
| 477 |
+
if isinstance(obj, type) and obj.__flags__ & Py_TPFLAGS_HEAPTYPE:
|
| 478 |
+
return False
|
| 479 |
+
|
| 480 |
+
return True
|
| 481 |
+
|
| 482 |
+
|
| 483 |
+
def _add_docstring(obj, doc, warn_on_python):
|
| 484 |
+
if warn_on_python and not _needs_add_docstring(obj):
|
| 485 |
+
warnings.warn(
|
| 486 |
+
"add_newdoc was used on a pure-python object {}. "
|
| 487 |
+
"Prefer to attach it directly to the source."
|
| 488 |
+
.format(obj),
|
| 489 |
+
UserWarning,
|
| 490 |
+
stacklevel=3)
|
| 491 |
+
try:
|
| 492 |
+
add_docstring(obj, doc)
|
| 493 |
+
except Exception:
|
| 494 |
+
pass
|
| 495 |
+
|
| 496 |
+
|
| 497 |
+
def add_newdoc(place, obj, doc, warn_on_python=True):
|
| 498 |
+
"""
|
| 499 |
+
Add documentation to an existing object, typically one defined in C
|
| 500 |
+
|
| 501 |
+
The purpose is to allow easier editing of the docstrings without requiring
|
| 502 |
+
a re-compile. This exists primarily for internal use within numpy itself.
|
| 503 |
+
|
| 504 |
+
Parameters
|
| 505 |
+
----------
|
| 506 |
+
place : str
|
| 507 |
+
The absolute name of the module to import from
|
| 508 |
+
obj : str
|
| 509 |
+
The name of the object to add documentation to, typically a class or
|
| 510 |
+
function name
|
| 511 |
+
doc : {str, Tuple[str, str], List[Tuple[str, str]]}
|
| 512 |
+
If a string, the documentation to apply to `obj`
|
| 513 |
+
|
| 514 |
+
If a tuple, then the first element is interpreted as an attribute of
|
| 515 |
+
`obj` and the second as the docstring to apply - ``(method, docstring)``
|
| 516 |
+
|
| 517 |
+
If a list, then each element of the list should be a tuple of length
|
| 518 |
+
two - ``[(method1, docstring1), (method2, docstring2), ...]``
|
| 519 |
+
warn_on_python : bool
|
| 520 |
+
If True, the default, emit `UserWarning` if this is used to attach
|
| 521 |
+
documentation to a pure-python object.
|
| 522 |
+
|
| 523 |
+
Notes
|
| 524 |
+
-----
|
| 525 |
+
This routine never raises an error if the docstring can't be written, but
|
| 526 |
+
will raise an error if the object being documented does not exist.
|
| 527 |
+
|
| 528 |
+
This routine cannot modify read-only docstrings, as appear
|
| 529 |
+
in new-style classes or built-in functions. Because this
|
| 530 |
+
routine never raises an error the caller must check manually
|
| 531 |
+
that the docstrings were changed.
|
| 532 |
+
|
| 533 |
+
Since this function grabs the ``char *`` from a c-level str object and puts
|
| 534 |
+
it into the ``tp_doc`` slot of the type of `obj`, it violates a number of
|
| 535 |
+
C-API best-practices, by:
|
| 536 |
+
|
| 537 |
+
- modifying a `PyTypeObject` after calling `PyType_Ready`
|
| 538 |
+
- calling `Py_INCREF` on the str and losing the reference, so the str
|
| 539 |
+
will never be released
|
| 540 |
+
|
| 541 |
+
If possible it should be avoided.
|
| 542 |
+
"""
|
| 543 |
+
new = getattr(__import__(place, globals(), {}, [obj]), obj)
|
| 544 |
+
if isinstance(doc, str):
|
| 545 |
+
_add_docstring(new, doc.strip(), warn_on_python)
|
| 546 |
+
elif isinstance(doc, tuple):
|
| 547 |
+
attr, docstring = doc
|
| 548 |
+
_add_docstring(getattr(new, attr), docstring.strip(), warn_on_python)
|
| 549 |
+
elif isinstance(doc, list):
|
| 550 |
+
for attr, docstring in doc:
|
| 551 |
+
_add_docstring(getattr(new, attr), docstring.strip(), warn_on_python)
|
pllava/lib/python3.10/site-packages/numpy/core/function_base.pyi
ADDED
|
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import (
|
| 2 |
+
Literal as L,
|
| 3 |
+
overload,
|
| 4 |
+
Any,
|
| 5 |
+
SupportsIndex,
|
| 6 |
+
TypeVar,
|
| 7 |
+
)
|
| 8 |
+
|
| 9 |
+
from numpy import floating, complexfloating, generic
|
| 10 |
+
from numpy._typing import (
|
| 11 |
+
NDArray,
|
| 12 |
+
DTypeLike,
|
| 13 |
+
_DTypeLike,
|
| 14 |
+
_ArrayLikeFloat_co,
|
| 15 |
+
_ArrayLikeComplex_co,
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
_SCT = TypeVar("_SCT", bound=generic)
|
| 19 |
+
|
| 20 |
+
__all__: list[str]
|
| 21 |
+
|
| 22 |
+
@overload
|
| 23 |
+
def linspace(
|
| 24 |
+
start: _ArrayLikeFloat_co,
|
| 25 |
+
stop: _ArrayLikeFloat_co,
|
| 26 |
+
num: SupportsIndex = ...,
|
| 27 |
+
endpoint: bool = ...,
|
| 28 |
+
retstep: L[False] = ...,
|
| 29 |
+
dtype: None = ...,
|
| 30 |
+
axis: SupportsIndex = ...,
|
| 31 |
+
) -> NDArray[floating[Any]]: ...
|
| 32 |
+
@overload
|
| 33 |
+
def linspace(
|
| 34 |
+
start: _ArrayLikeComplex_co,
|
| 35 |
+
stop: _ArrayLikeComplex_co,
|
| 36 |
+
num: SupportsIndex = ...,
|
| 37 |
+
endpoint: bool = ...,
|
| 38 |
+
retstep: L[False] = ...,
|
| 39 |
+
dtype: None = ...,
|
| 40 |
+
axis: SupportsIndex = ...,
|
| 41 |
+
) -> NDArray[complexfloating[Any, Any]]: ...
|
| 42 |
+
@overload
|
| 43 |
+
def linspace(
|
| 44 |
+
start: _ArrayLikeComplex_co,
|
| 45 |
+
stop: _ArrayLikeComplex_co,
|
| 46 |
+
num: SupportsIndex = ...,
|
| 47 |
+
endpoint: bool = ...,
|
| 48 |
+
retstep: L[False] = ...,
|
| 49 |
+
dtype: _DTypeLike[_SCT] = ...,
|
| 50 |
+
axis: SupportsIndex = ...,
|
| 51 |
+
) -> NDArray[_SCT]: ...
|
| 52 |
+
@overload
|
| 53 |
+
def linspace(
|
| 54 |
+
start: _ArrayLikeComplex_co,
|
| 55 |
+
stop: _ArrayLikeComplex_co,
|
| 56 |
+
num: SupportsIndex = ...,
|
| 57 |
+
endpoint: bool = ...,
|
| 58 |
+
retstep: L[False] = ...,
|
| 59 |
+
dtype: DTypeLike = ...,
|
| 60 |
+
axis: SupportsIndex = ...,
|
| 61 |
+
) -> NDArray[Any]: ...
|
| 62 |
+
@overload
|
| 63 |
+
def linspace(
|
| 64 |
+
start: _ArrayLikeFloat_co,
|
| 65 |
+
stop: _ArrayLikeFloat_co,
|
| 66 |
+
num: SupportsIndex = ...,
|
| 67 |
+
endpoint: bool = ...,
|
| 68 |
+
retstep: L[True] = ...,
|
| 69 |
+
dtype: None = ...,
|
| 70 |
+
axis: SupportsIndex = ...,
|
| 71 |
+
) -> tuple[NDArray[floating[Any]], floating[Any]]: ...
|
| 72 |
+
@overload
|
| 73 |
+
def linspace(
|
| 74 |
+
start: _ArrayLikeComplex_co,
|
| 75 |
+
stop: _ArrayLikeComplex_co,
|
| 76 |
+
num: SupportsIndex = ...,
|
| 77 |
+
endpoint: bool = ...,
|
| 78 |
+
retstep: L[True] = ...,
|
| 79 |
+
dtype: None = ...,
|
| 80 |
+
axis: SupportsIndex = ...,
|
| 81 |
+
) -> tuple[NDArray[complexfloating[Any, Any]], complexfloating[Any, Any]]: ...
|
| 82 |
+
@overload
|
| 83 |
+
def linspace(
|
| 84 |
+
start: _ArrayLikeComplex_co,
|
| 85 |
+
stop: _ArrayLikeComplex_co,
|
| 86 |
+
num: SupportsIndex = ...,
|
| 87 |
+
endpoint: bool = ...,
|
| 88 |
+
retstep: L[True] = ...,
|
| 89 |
+
dtype: _DTypeLike[_SCT] = ...,
|
| 90 |
+
axis: SupportsIndex = ...,
|
| 91 |
+
) -> tuple[NDArray[_SCT], _SCT]: ...
|
| 92 |
+
@overload
|
| 93 |
+
def linspace(
|
| 94 |
+
start: _ArrayLikeComplex_co,
|
| 95 |
+
stop: _ArrayLikeComplex_co,
|
| 96 |
+
num: SupportsIndex = ...,
|
| 97 |
+
endpoint: bool = ...,
|
| 98 |
+
retstep: L[True] = ...,
|
| 99 |
+
dtype: DTypeLike = ...,
|
| 100 |
+
axis: SupportsIndex = ...,
|
| 101 |
+
) -> tuple[NDArray[Any], Any]: ...
|
| 102 |
+
|
| 103 |
+
@overload
|
| 104 |
+
def logspace(
|
| 105 |
+
start: _ArrayLikeFloat_co,
|
| 106 |
+
stop: _ArrayLikeFloat_co,
|
| 107 |
+
num: SupportsIndex = ...,
|
| 108 |
+
endpoint: bool = ...,
|
| 109 |
+
base: _ArrayLikeFloat_co = ...,
|
| 110 |
+
dtype: None = ...,
|
| 111 |
+
axis: SupportsIndex = ...,
|
| 112 |
+
) -> NDArray[floating[Any]]: ...
|
| 113 |
+
@overload
|
| 114 |
+
def logspace(
|
| 115 |
+
start: _ArrayLikeComplex_co,
|
| 116 |
+
stop: _ArrayLikeComplex_co,
|
| 117 |
+
num: SupportsIndex = ...,
|
| 118 |
+
endpoint: bool = ...,
|
| 119 |
+
base: _ArrayLikeComplex_co = ...,
|
| 120 |
+
dtype: None = ...,
|
| 121 |
+
axis: SupportsIndex = ...,
|
| 122 |
+
) -> NDArray[complexfloating[Any, Any]]: ...
|
| 123 |
+
@overload
|
| 124 |
+
def logspace(
|
| 125 |
+
start: _ArrayLikeComplex_co,
|
| 126 |
+
stop: _ArrayLikeComplex_co,
|
| 127 |
+
num: SupportsIndex = ...,
|
| 128 |
+
endpoint: bool = ...,
|
| 129 |
+
base: _ArrayLikeComplex_co = ...,
|
| 130 |
+
dtype: _DTypeLike[_SCT] = ...,
|
| 131 |
+
axis: SupportsIndex = ...,
|
| 132 |
+
) -> NDArray[_SCT]: ...
|
| 133 |
+
@overload
|
| 134 |
+
def logspace(
|
| 135 |
+
start: _ArrayLikeComplex_co,
|
| 136 |
+
stop: _ArrayLikeComplex_co,
|
| 137 |
+
num: SupportsIndex = ...,
|
| 138 |
+
endpoint: bool = ...,
|
| 139 |
+
base: _ArrayLikeComplex_co = ...,
|
| 140 |
+
dtype: DTypeLike = ...,
|
| 141 |
+
axis: SupportsIndex = ...,
|
| 142 |
+
) -> NDArray[Any]: ...
|
| 143 |
+
|
| 144 |
+
@overload
|
| 145 |
+
def geomspace(
|
| 146 |
+
start: _ArrayLikeFloat_co,
|
| 147 |
+
stop: _ArrayLikeFloat_co,
|
| 148 |
+
num: SupportsIndex = ...,
|
| 149 |
+
endpoint: bool = ...,
|
| 150 |
+
dtype: None = ...,
|
| 151 |
+
axis: SupportsIndex = ...,
|
| 152 |
+
) -> NDArray[floating[Any]]: ...
|
| 153 |
+
@overload
|
| 154 |
+
def geomspace(
|
| 155 |
+
start: _ArrayLikeComplex_co,
|
| 156 |
+
stop: _ArrayLikeComplex_co,
|
| 157 |
+
num: SupportsIndex = ...,
|
| 158 |
+
endpoint: bool = ...,
|
| 159 |
+
dtype: None = ...,
|
| 160 |
+
axis: SupportsIndex = ...,
|
| 161 |
+
) -> NDArray[complexfloating[Any, Any]]: ...
|
| 162 |
+
@overload
|
| 163 |
+
def geomspace(
|
| 164 |
+
start: _ArrayLikeComplex_co,
|
| 165 |
+
stop: _ArrayLikeComplex_co,
|
| 166 |
+
num: SupportsIndex = ...,
|
| 167 |
+
endpoint: bool = ...,
|
| 168 |
+
dtype: _DTypeLike[_SCT] = ...,
|
| 169 |
+
axis: SupportsIndex = ...,
|
| 170 |
+
) -> NDArray[_SCT]: ...
|
| 171 |
+
@overload
|
| 172 |
+
def geomspace(
|
| 173 |
+
start: _ArrayLikeComplex_co,
|
| 174 |
+
stop: _ArrayLikeComplex_co,
|
| 175 |
+
num: SupportsIndex = ...,
|
| 176 |
+
endpoint: bool = ...,
|
| 177 |
+
dtype: DTypeLike = ...,
|
| 178 |
+
axis: SupportsIndex = ...,
|
| 179 |
+
) -> NDArray[Any]: ...
|
| 180 |
+
|
| 181 |
+
# Re-exported to `np.lib.function_base`
|
| 182 |
+
def add_newdoc(
|
| 183 |
+
place: str,
|
| 184 |
+
obj: str,
|
| 185 |
+
doc: str | tuple[str, str] | list[tuple[str, str]],
|
| 186 |
+
warn_on_python: bool = ...,
|
| 187 |
+
) -> None: ...
|
pllava/lib/python3.10/site-packages/numpy/core/getlimits.pyi
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from numpy import (
|
| 2 |
+
finfo as finfo,
|
| 3 |
+
iinfo as iinfo,
|
| 4 |
+
)
|
| 5 |
+
|
| 6 |
+
__all__: list[str]
|
pllava/lib/python3.10/site-packages/numpy/core/numeric.pyi
ADDED
|
@@ -0,0 +1,660 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections.abc import Callable, Sequence
|
| 2 |
+
from typing import (
|
| 3 |
+
Any,
|
| 4 |
+
overload,
|
| 5 |
+
TypeVar,
|
| 6 |
+
Literal,
|
| 7 |
+
SupportsAbs,
|
| 8 |
+
SupportsIndex,
|
| 9 |
+
NoReturn,
|
| 10 |
+
)
|
| 11 |
+
if sys.version_info >= (3, 10):
|
| 12 |
+
from typing import TypeGuard
|
| 13 |
+
else:
|
| 14 |
+
from typing_extensions import TypeGuard
|
| 15 |
+
|
| 16 |
+
from numpy import (
|
| 17 |
+
ComplexWarning as ComplexWarning,
|
| 18 |
+
generic,
|
| 19 |
+
unsignedinteger,
|
| 20 |
+
signedinteger,
|
| 21 |
+
floating,
|
| 22 |
+
complexfloating,
|
| 23 |
+
bool_,
|
| 24 |
+
int_,
|
| 25 |
+
intp,
|
| 26 |
+
float64,
|
| 27 |
+
timedelta64,
|
| 28 |
+
object_,
|
| 29 |
+
_OrderKACF,
|
| 30 |
+
_OrderCF,
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
from numpy._typing import (
|
| 34 |
+
ArrayLike,
|
| 35 |
+
NDArray,
|
| 36 |
+
DTypeLike,
|
| 37 |
+
_ShapeLike,
|
| 38 |
+
_DTypeLike,
|
| 39 |
+
_ArrayLike,
|
| 40 |
+
_SupportsArrayFunc,
|
| 41 |
+
_ScalarLike_co,
|
| 42 |
+
_ArrayLikeBool_co,
|
| 43 |
+
_ArrayLikeUInt_co,
|
| 44 |
+
_ArrayLikeInt_co,
|
| 45 |
+
_ArrayLikeFloat_co,
|
| 46 |
+
_ArrayLikeComplex_co,
|
| 47 |
+
_ArrayLikeTD64_co,
|
| 48 |
+
_ArrayLikeObject_co,
|
| 49 |
+
_ArrayLikeUnknown,
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
_T = TypeVar("_T")
|
| 53 |
+
_SCT = TypeVar("_SCT", bound=generic)
|
| 54 |
+
_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
|
| 55 |
+
|
| 56 |
+
_CorrelateMode = Literal["valid", "same", "full"]
|
| 57 |
+
|
| 58 |
+
__all__: list[str]
|
| 59 |
+
|
| 60 |
+
@overload
|
| 61 |
+
def zeros_like(
|
| 62 |
+
a: _ArrayType,
|
| 63 |
+
dtype: None = ...,
|
| 64 |
+
order: _OrderKACF = ...,
|
| 65 |
+
subok: Literal[True] = ...,
|
| 66 |
+
shape: None = ...,
|
| 67 |
+
) -> _ArrayType: ...
|
| 68 |
+
@overload
|
| 69 |
+
def zeros_like(
|
| 70 |
+
a: _ArrayLike[_SCT],
|
| 71 |
+
dtype: None = ...,
|
| 72 |
+
order: _OrderKACF = ...,
|
| 73 |
+
subok: bool = ...,
|
| 74 |
+
shape: None | _ShapeLike = ...,
|
| 75 |
+
) -> NDArray[_SCT]: ...
|
| 76 |
+
@overload
|
| 77 |
+
def zeros_like(
|
| 78 |
+
a: object,
|
| 79 |
+
dtype: None = ...,
|
| 80 |
+
order: _OrderKACF = ...,
|
| 81 |
+
subok: bool = ...,
|
| 82 |
+
shape: None | _ShapeLike= ...,
|
| 83 |
+
) -> NDArray[Any]: ...
|
| 84 |
+
@overload
|
| 85 |
+
def zeros_like(
|
| 86 |
+
a: Any,
|
| 87 |
+
dtype: _DTypeLike[_SCT],
|
| 88 |
+
order: _OrderKACF = ...,
|
| 89 |
+
subok: bool = ...,
|
| 90 |
+
shape: None | _ShapeLike= ...,
|
| 91 |
+
) -> NDArray[_SCT]: ...
|
| 92 |
+
@overload
|
| 93 |
+
def zeros_like(
|
| 94 |
+
a: Any,
|
| 95 |
+
dtype: DTypeLike,
|
| 96 |
+
order: _OrderKACF = ...,
|
| 97 |
+
subok: bool = ...,
|
| 98 |
+
shape: None | _ShapeLike= ...,
|
| 99 |
+
) -> NDArray[Any]: ...
|
| 100 |
+
|
| 101 |
+
@overload
|
| 102 |
+
def ones(
|
| 103 |
+
shape: _ShapeLike,
|
| 104 |
+
dtype: None = ...,
|
| 105 |
+
order: _OrderCF = ...,
|
| 106 |
+
*,
|
| 107 |
+
like: _SupportsArrayFunc = ...,
|
| 108 |
+
) -> NDArray[float64]: ...
|
| 109 |
+
@overload
|
| 110 |
+
def ones(
|
| 111 |
+
shape: _ShapeLike,
|
| 112 |
+
dtype: _DTypeLike[_SCT],
|
| 113 |
+
order: _OrderCF = ...,
|
| 114 |
+
*,
|
| 115 |
+
like: _SupportsArrayFunc = ...,
|
| 116 |
+
) -> NDArray[_SCT]: ...
|
| 117 |
+
@overload
|
| 118 |
+
def ones(
|
| 119 |
+
shape: _ShapeLike,
|
| 120 |
+
dtype: DTypeLike,
|
| 121 |
+
order: _OrderCF = ...,
|
| 122 |
+
*,
|
| 123 |
+
like: _SupportsArrayFunc = ...,
|
| 124 |
+
) -> NDArray[Any]: ...
|
| 125 |
+
|
| 126 |
+
@overload
|
| 127 |
+
def ones_like(
|
| 128 |
+
a: _ArrayType,
|
| 129 |
+
dtype: None = ...,
|
| 130 |
+
order: _OrderKACF = ...,
|
| 131 |
+
subok: Literal[True] = ...,
|
| 132 |
+
shape: None = ...,
|
| 133 |
+
) -> _ArrayType: ...
|
| 134 |
+
@overload
|
| 135 |
+
def ones_like(
|
| 136 |
+
a: _ArrayLike[_SCT],
|
| 137 |
+
dtype: None = ...,
|
| 138 |
+
order: _OrderKACF = ...,
|
| 139 |
+
subok: bool = ...,
|
| 140 |
+
shape: None | _ShapeLike = ...,
|
| 141 |
+
) -> NDArray[_SCT]: ...
|
| 142 |
+
@overload
|
| 143 |
+
def ones_like(
|
| 144 |
+
a: object,
|
| 145 |
+
dtype: None = ...,
|
| 146 |
+
order: _OrderKACF = ...,
|
| 147 |
+
subok: bool = ...,
|
| 148 |
+
shape: None | _ShapeLike= ...,
|
| 149 |
+
) -> NDArray[Any]: ...
|
| 150 |
+
@overload
|
| 151 |
+
def ones_like(
|
| 152 |
+
a: Any,
|
| 153 |
+
dtype: _DTypeLike[_SCT],
|
| 154 |
+
order: _OrderKACF = ...,
|
| 155 |
+
subok: bool = ...,
|
| 156 |
+
shape: None | _ShapeLike= ...,
|
| 157 |
+
) -> NDArray[_SCT]: ...
|
| 158 |
+
@overload
|
| 159 |
+
def ones_like(
|
| 160 |
+
a: Any,
|
| 161 |
+
dtype: DTypeLike,
|
| 162 |
+
order: _OrderKACF = ...,
|
| 163 |
+
subok: bool = ...,
|
| 164 |
+
shape: None | _ShapeLike= ...,
|
| 165 |
+
) -> NDArray[Any]: ...
|
| 166 |
+
|
| 167 |
+
@overload
|
| 168 |
+
def full(
|
| 169 |
+
shape: _ShapeLike,
|
| 170 |
+
fill_value: Any,
|
| 171 |
+
dtype: None = ...,
|
| 172 |
+
order: _OrderCF = ...,
|
| 173 |
+
*,
|
| 174 |
+
like: _SupportsArrayFunc = ...,
|
| 175 |
+
) -> NDArray[Any]: ...
|
| 176 |
+
@overload
|
| 177 |
+
def full(
|
| 178 |
+
shape: _ShapeLike,
|
| 179 |
+
fill_value: Any,
|
| 180 |
+
dtype: _DTypeLike[_SCT],
|
| 181 |
+
order: _OrderCF = ...,
|
| 182 |
+
*,
|
| 183 |
+
like: _SupportsArrayFunc = ...,
|
| 184 |
+
) -> NDArray[_SCT]: ...
|
| 185 |
+
@overload
|
| 186 |
+
def full(
|
| 187 |
+
shape: _ShapeLike,
|
| 188 |
+
fill_value: Any,
|
| 189 |
+
dtype: DTypeLike,
|
| 190 |
+
order: _OrderCF = ...,
|
| 191 |
+
*,
|
| 192 |
+
like: _SupportsArrayFunc = ...,
|
| 193 |
+
) -> NDArray[Any]: ...
|
| 194 |
+
|
| 195 |
+
@overload
|
| 196 |
+
def full_like(
|
| 197 |
+
a: _ArrayType,
|
| 198 |
+
fill_value: Any,
|
| 199 |
+
dtype: None = ...,
|
| 200 |
+
order: _OrderKACF = ...,
|
| 201 |
+
subok: Literal[True] = ...,
|
| 202 |
+
shape: None = ...,
|
| 203 |
+
) -> _ArrayType: ...
|
| 204 |
+
@overload
|
| 205 |
+
def full_like(
|
| 206 |
+
a: _ArrayLike[_SCT],
|
| 207 |
+
fill_value: Any,
|
| 208 |
+
dtype: None = ...,
|
| 209 |
+
order: _OrderKACF = ...,
|
| 210 |
+
subok: bool = ...,
|
| 211 |
+
shape: None | _ShapeLike = ...,
|
| 212 |
+
) -> NDArray[_SCT]: ...
|
| 213 |
+
@overload
|
| 214 |
+
def full_like(
|
| 215 |
+
a: object,
|
| 216 |
+
fill_value: Any,
|
| 217 |
+
dtype: None = ...,
|
| 218 |
+
order: _OrderKACF = ...,
|
| 219 |
+
subok: bool = ...,
|
| 220 |
+
shape: None | _ShapeLike= ...,
|
| 221 |
+
) -> NDArray[Any]: ...
|
| 222 |
+
@overload
|
| 223 |
+
def full_like(
|
| 224 |
+
a: Any,
|
| 225 |
+
fill_value: Any,
|
| 226 |
+
dtype: _DTypeLike[_SCT],
|
| 227 |
+
order: _OrderKACF = ...,
|
| 228 |
+
subok: bool = ...,
|
| 229 |
+
shape: None | _ShapeLike= ...,
|
| 230 |
+
) -> NDArray[_SCT]: ...
|
| 231 |
+
@overload
|
| 232 |
+
def full_like(
|
| 233 |
+
a: Any,
|
| 234 |
+
fill_value: Any,
|
| 235 |
+
dtype: DTypeLike,
|
| 236 |
+
order: _OrderKACF = ...,
|
| 237 |
+
subok: bool = ...,
|
| 238 |
+
shape: None | _ShapeLike= ...,
|
| 239 |
+
) -> NDArray[Any]: ...
|
| 240 |
+
|
| 241 |
+
@overload
|
| 242 |
+
def count_nonzero(
|
| 243 |
+
a: ArrayLike,
|
| 244 |
+
axis: None = ...,
|
| 245 |
+
*,
|
| 246 |
+
keepdims: Literal[False] = ...,
|
| 247 |
+
) -> int: ...
|
| 248 |
+
@overload
|
| 249 |
+
def count_nonzero(
|
| 250 |
+
a: ArrayLike,
|
| 251 |
+
axis: _ShapeLike = ...,
|
| 252 |
+
*,
|
| 253 |
+
keepdims: bool = ...,
|
| 254 |
+
) -> Any: ... # TODO: np.intp or ndarray[np.intp]
|
| 255 |
+
|
| 256 |
+
def isfortran(a: NDArray[Any] | generic) -> bool: ...
|
| 257 |
+
|
| 258 |
+
def argwhere(a: ArrayLike) -> NDArray[intp]: ...
|
| 259 |
+
|
| 260 |
+
def flatnonzero(a: ArrayLike) -> NDArray[intp]: ...
|
| 261 |
+
|
| 262 |
+
@overload
|
| 263 |
+
def correlate(
|
| 264 |
+
a: _ArrayLikeUnknown,
|
| 265 |
+
v: _ArrayLikeUnknown,
|
| 266 |
+
mode: _CorrelateMode = ...,
|
| 267 |
+
) -> NDArray[Any]: ...
|
| 268 |
+
@overload
|
| 269 |
+
def correlate(
|
| 270 |
+
a: _ArrayLikeBool_co,
|
| 271 |
+
v: _ArrayLikeBool_co,
|
| 272 |
+
mode: _CorrelateMode = ...,
|
| 273 |
+
) -> NDArray[bool_]: ...
|
| 274 |
+
@overload
|
| 275 |
+
def correlate(
|
| 276 |
+
a: _ArrayLikeUInt_co,
|
| 277 |
+
v: _ArrayLikeUInt_co,
|
| 278 |
+
mode: _CorrelateMode = ...,
|
| 279 |
+
) -> NDArray[unsignedinteger[Any]]: ...
|
| 280 |
+
@overload
|
| 281 |
+
def correlate(
|
| 282 |
+
a: _ArrayLikeInt_co,
|
| 283 |
+
v: _ArrayLikeInt_co,
|
| 284 |
+
mode: _CorrelateMode = ...,
|
| 285 |
+
) -> NDArray[signedinteger[Any]]: ...
|
| 286 |
+
@overload
|
| 287 |
+
def correlate(
|
| 288 |
+
a: _ArrayLikeFloat_co,
|
| 289 |
+
v: _ArrayLikeFloat_co,
|
| 290 |
+
mode: _CorrelateMode = ...,
|
| 291 |
+
) -> NDArray[floating[Any]]: ...
|
| 292 |
+
@overload
|
| 293 |
+
def correlate(
|
| 294 |
+
a: _ArrayLikeComplex_co,
|
| 295 |
+
v: _ArrayLikeComplex_co,
|
| 296 |
+
mode: _CorrelateMode = ...,
|
| 297 |
+
) -> NDArray[complexfloating[Any, Any]]: ...
|
| 298 |
+
@overload
|
| 299 |
+
def correlate(
|
| 300 |
+
a: _ArrayLikeTD64_co,
|
| 301 |
+
v: _ArrayLikeTD64_co,
|
| 302 |
+
mode: _CorrelateMode = ...,
|
| 303 |
+
) -> NDArray[timedelta64]: ...
|
| 304 |
+
@overload
|
| 305 |
+
def correlate(
|
| 306 |
+
a: _ArrayLikeObject_co,
|
| 307 |
+
v: _ArrayLikeObject_co,
|
| 308 |
+
mode: _CorrelateMode = ...,
|
| 309 |
+
) -> NDArray[object_]: ...
|
| 310 |
+
|
| 311 |
+
@overload
|
| 312 |
+
def convolve(
|
| 313 |
+
a: _ArrayLikeUnknown,
|
| 314 |
+
v: _ArrayLikeUnknown,
|
| 315 |
+
mode: _CorrelateMode = ...,
|
| 316 |
+
) -> NDArray[Any]: ...
|
| 317 |
+
@overload
|
| 318 |
+
def convolve(
|
| 319 |
+
a: _ArrayLikeBool_co,
|
| 320 |
+
v: _ArrayLikeBool_co,
|
| 321 |
+
mode: _CorrelateMode = ...,
|
| 322 |
+
) -> NDArray[bool_]: ...
|
| 323 |
+
@overload
|
| 324 |
+
def convolve(
|
| 325 |
+
a: _ArrayLikeUInt_co,
|
| 326 |
+
v: _ArrayLikeUInt_co,
|
| 327 |
+
mode: _CorrelateMode = ...,
|
| 328 |
+
) -> NDArray[unsignedinteger[Any]]: ...
|
| 329 |
+
@overload
|
| 330 |
+
def convolve(
|
| 331 |
+
a: _ArrayLikeInt_co,
|
| 332 |
+
v: _ArrayLikeInt_co,
|
| 333 |
+
mode: _CorrelateMode = ...,
|
| 334 |
+
) -> NDArray[signedinteger[Any]]: ...
|
| 335 |
+
@overload
|
| 336 |
+
def convolve(
|
| 337 |
+
a: _ArrayLikeFloat_co,
|
| 338 |
+
v: _ArrayLikeFloat_co,
|
| 339 |
+
mode: _CorrelateMode = ...,
|
| 340 |
+
) -> NDArray[floating[Any]]: ...
|
| 341 |
+
@overload
|
| 342 |
+
def convolve(
|
| 343 |
+
a: _ArrayLikeComplex_co,
|
| 344 |
+
v: _ArrayLikeComplex_co,
|
| 345 |
+
mode: _CorrelateMode = ...,
|
| 346 |
+
) -> NDArray[complexfloating[Any, Any]]: ...
|
| 347 |
+
@overload
|
| 348 |
+
def convolve(
|
| 349 |
+
a: _ArrayLikeTD64_co,
|
| 350 |
+
v: _ArrayLikeTD64_co,
|
| 351 |
+
mode: _CorrelateMode = ...,
|
| 352 |
+
) -> NDArray[timedelta64]: ...
|
| 353 |
+
@overload
|
| 354 |
+
def convolve(
|
| 355 |
+
a: _ArrayLikeObject_co,
|
| 356 |
+
v: _ArrayLikeObject_co,
|
| 357 |
+
mode: _CorrelateMode = ...,
|
| 358 |
+
) -> NDArray[object_]: ...
|
| 359 |
+
|
| 360 |
+
@overload
|
| 361 |
+
def outer(
|
| 362 |
+
a: _ArrayLikeUnknown,
|
| 363 |
+
b: _ArrayLikeUnknown,
|
| 364 |
+
out: None = ...,
|
| 365 |
+
) -> NDArray[Any]: ...
|
| 366 |
+
@overload
|
| 367 |
+
def outer(
|
| 368 |
+
a: _ArrayLikeBool_co,
|
| 369 |
+
b: _ArrayLikeBool_co,
|
| 370 |
+
out: None = ...,
|
| 371 |
+
) -> NDArray[bool_]: ...
|
| 372 |
+
@overload
|
| 373 |
+
def outer(
|
| 374 |
+
a: _ArrayLikeUInt_co,
|
| 375 |
+
b: _ArrayLikeUInt_co,
|
| 376 |
+
out: None = ...,
|
| 377 |
+
) -> NDArray[unsignedinteger[Any]]: ...
|
| 378 |
+
@overload
|
| 379 |
+
def outer(
|
| 380 |
+
a: _ArrayLikeInt_co,
|
| 381 |
+
b: _ArrayLikeInt_co,
|
| 382 |
+
out: None = ...,
|
| 383 |
+
) -> NDArray[signedinteger[Any]]: ...
|
| 384 |
+
@overload
|
| 385 |
+
def outer(
|
| 386 |
+
a: _ArrayLikeFloat_co,
|
| 387 |
+
b: _ArrayLikeFloat_co,
|
| 388 |
+
out: None = ...,
|
| 389 |
+
) -> NDArray[floating[Any]]: ...
|
| 390 |
+
@overload
|
| 391 |
+
def outer(
|
| 392 |
+
a: _ArrayLikeComplex_co,
|
| 393 |
+
b: _ArrayLikeComplex_co,
|
| 394 |
+
out: None = ...,
|
| 395 |
+
) -> NDArray[complexfloating[Any, Any]]: ...
|
| 396 |
+
@overload
|
| 397 |
+
def outer(
|
| 398 |
+
a: _ArrayLikeTD64_co,
|
| 399 |
+
b: _ArrayLikeTD64_co,
|
| 400 |
+
out: None = ...,
|
| 401 |
+
) -> NDArray[timedelta64]: ...
|
| 402 |
+
@overload
|
| 403 |
+
def outer(
|
| 404 |
+
a: _ArrayLikeObject_co,
|
| 405 |
+
b: _ArrayLikeObject_co,
|
| 406 |
+
out: None = ...,
|
| 407 |
+
) -> NDArray[object_]: ...
|
| 408 |
+
@overload
|
| 409 |
+
def outer(
|
| 410 |
+
a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
|
| 411 |
+
b: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
|
| 412 |
+
out: _ArrayType,
|
| 413 |
+
) -> _ArrayType: ...
|
| 414 |
+
|
| 415 |
+
@overload
|
| 416 |
+
def tensordot(
|
| 417 |
+
a: _ArrayLikeUnknown,
|
| 418 |
+
b: _ArrayLikeUnknown,
|
| 419 |
+
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
| 420 |
+
) -> NDArray[Any]: ...
|
| 421 |
+
@overload
|
| 422 |
+
def tensordot(
|
| 423 |
+
a: _ArrayLikeBool_co,
|
| 424 |
+
b: _ArrayLikeBool_co,
|
| 425 |
+
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
| 426 |
+
) -> NDArray[bool_]: ...
|
| 427 |
+
@overload
|
| 428 |
+
def tensordot(
|
| 429 |
+
a: _ArrayLikeUInt_co,
|
| 430 |
+
b: _ArrayLikeUInt_co,
|
| 431 |
+
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
| 432 |
+
) -> NDArray[unsignedinteger[Any]]: ...
|
| 433 |
+
@overload
|
| 434 |
+
def tensordot(
|
| 435 |
+
a: _ArrayLikeInt_co,
|
| 436 |
+
b: _ArrayLikeInt_co,
|
| 437 |
+
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
| 438 |
+
) -> NDArray[signedinteger[Any]]: ...
|
| 439 |
+
@overload
|
| 440 |
+
def tensordot(
|
| 441 |
+
a: _ArrayLikeFloat_co,
|
| 442 |
+
b: _ArrayLikeFloat_co,
|
| 443 |
+
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
| 444 |
+
) -> NDArray[floating[Any]]: ...
|
| 445 |
+
@overload
|
| 446 |
+
def tensordot(
|
| 447 |
+
a: _ArrayLikeComplex_co,
|
| 448 |
+
b: _ArrayLikeComplex_co,
|
| 449 |
+
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
| 450 |
+
) -> NDArray[complexfloating[Any, Any]]: ...
|
| 451 |
+
@overload
|
| 452 |
+
def tensordot(
|
| 453 |
+
a: _ArrayLikeTD64_co,
|
| 454 |
+
b: _ArrayLikeTD64_co,
|
| 455 |
+
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
| 456 |
+
) -> NDArray[timedelta64]: ...
|
| 457 |
+
@overload
|
| 458 |
+
def tensordot(
|
| 459 |
+
a: _ArrayLikeObject_co,
|
| 460 |
+
b: _ArrayLikeObject_co,
|
| 461 |
+
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
| 462 |
+
) -> NDArray[object_]: ...
|
| 463 |
+
|
| 464 |
+
@overload
|
| 465 |
+
def roll(
|
| 466 |
+
a: _ArrayLike[_SCT],
|
| 467 |
+
shift: _ShapeLike,
|
| 468 |
+
axis: None | _ShapeLike = ...,
|
| 469 |
+
) -> NDArray[_SCT]: ...
|
| 470 |
+
@overload
|
| 471 |
+
def roll(
|
| 472 |
+
a: ArrayLike,
|
| 473 |
+
shift: _ShapeLike,
|
| 474 |
+
axis: None | _ShapeLike = ...,
|
| 475 |
+
) -> NDArray[Any]: ...
|
| 476 |
+
|
| 477 |
+
def rollaxis(
|
| 478 |
+
a: NDArray[_SCT],
|
| 479 |
+
axis: int,
|
| 480 |
+
start: int = ...,
|
| 481 |
+
) -> NDArray[_SCT]: ...
|
| 482 |
+
|
| 483 |
+
def moveaxis(
|
| 484 |
+
a: NDArray[_SCT],
|
| 485 |
+
source: _ShapeLike,
|
| 486 |
+
destination: _ShapeLike,
|
| 487 |
+
) -> NDArray[_SCT]: ...
|
| 488 |
+
|
| 489 |
+
@overload
|
| 490 |
+
def cross(
|
| 491 |
+
a: _ArrayLikeUnknown,
|
| 492 |
+
b: _ArrayLikeUnknown,
|
| 493 |
+
axisa: int = ...,
|
| 494 |
+
axisb: int = ...,
|
| 495 |
+
axisc: int = ...,
|
| 496 |
+
axis: None | int = ...,
|
| 497 |
+
) -> NDArray[Any]: ...
|
| 498 |
+
@overload
|
| 499 |
+
def cross(
|
| 500 |
+
a: _ArrayLikeBool_co,
|
| 501 |
+
b: _ArrayLikeBool_co,
|
| 502 |
+
axisa: int = ...,
|
| 503 |
+
axisb: int = ...,
|
| 504 |
+
axisc: int = ...,
|
| 505 |
+
axis: None | int = ...,
|
| 506 |
+
) -> NoReturn: ...
|
| 507 |
+
@overload
|
| 508 |
+
def cross(
|
| 509 |
+
a: _ArrayLikeUInt_co,
|
| 510 |
+
b: _ArrayLikeUInt_co,
|
| 511 |
+
axisa: int = ...,
|
| 512 |
+
axisb: int = ...,
|
| 513 |
+
axisc: int = ...,
|
| 514 |
+
axis: None | int = ...,
|
| 515 |
+
) -> NDArray[unsignedinteger[Any]]: ...
|
| 516 |
+
@overload
|
| 517 |
+
def cross(
|
| 518 |
+
a: _ArrayLikeInt_co,
|
| 519 |
+
b: _ArrayLikeInt_co,
|
| 520 |
+
axisa: int = ...,
|
| 521 |
+
axisb: int = ...,
|
| 522 |
+
axisc: int = ...,
|
| 523 |
+
axis: None | int = ...,
|
| 524 |
+
) -> NDArray[signedinteger[Any]]: ...
|
| 525 |
+
@overload
|
| 526 |
+
def cross(
|
| 527 |
+
a: _ArrayLikeFloat_co,
|
| 528 |
+
b: _ArrayLikeFloat_co,
|
| 529 |
+
axisa: int = ...,
|
| 530 |
+
axisb: int = ...,
|
| 531 |
+
axisc: int = ...,
|
| 532 |
+
axis: None | int = ...,
|
| 533 |
+
) -> NDArray[floating[Any]]: ...
|
| 534 |
+
@overload
|
| 535 |
+
def cross(
|
| 536 |
+
a: _ArrayLikeComplex_co,
|
| 537 |
+
b: _ArrayLikeComplex_co,
|
| 538 |
+
axisa: int = ...,
|
| 539 |
+
axisb: int = ...,
|
| 540 |
+
axisc: int = ...,
|
| 541 |
+
axis: None | int = ...,
|
| 542 |
+
) -> NDArray[complexfloating[Any, Any]]: ...
|
| 543 |
+
@overload
|
| 544 |
+
def cross(
|
| 545 |
+
a: _ArrayLikeObject_co,
|
| 546 |
+
b: _ArrayLikeObject_co,
|
| 547 |
+
axisa: int = ...,
|
| 548 |
+
axisb: int = ...,
|
| 549 |
+
axisc: int = ...,
|
| 550 |
+
axis: None | int = ...,
|
| 551 |
+
) -> NDArray[object_]: ...
|
| 552 |
+
|
| 553 |
+
@overload
|
| 554 |
+
def indices(
|
| 555 |
+
dimensions: Sequence[int],
|
| 556 |
+
dtype: type[int] = ...,
|
| 557 |
+
sparse: Literal[False] = ...,
|
| 558 |
+
) -> NDArray[int_]: ...
|
| 559 |
+
@overload
|
| 560 |
+
def indices(
|
| 561 |
+
dimensions: Sequence[int],
|
| 562 |
+
dtype: type[int] = ...,
|
| 563 |
+
sparse: Literal[True] = ...,
|
| 564 |
+
) -> tuple[NDArray[int_], ...]: ...
|
| 565 |
+
@overload
|
| 566 |
+
def indices(
|
| 567 |
+
dimensions: Sequence[int],
|
| 568 |
+
dtype: _DTypeLike[_SCT],
|
| 569 |
+
sparse: Literal[False] = ...,
|
| 570 |
+
) -> NDArray[_SCT]: ...
|
| 571 |
+
@overload
|
| 572 |
+
def indices(
|
| 573 |
+
dimensions: Sequence[int],
|
| 574 |
+
dtype: _DTypeLike[_SCT],
|
| 575 |
+
sparse: Literal[True],
|
| 576 |
+
) -> tuple[NDArray[_SCT], ...]: ...
|
| 577 |
+
@overload
|
| 578 |
+
def indices(
|
| 579 |
+
dimensions: Sequence[int],
|
| 580 |
+
dtype: DTypeLike,
|
| 581 |
+
sparse: Literal[False] = ...,
|
| 582 |
+
) -> NDArray[Any]: ...
|
| 583 |
+
@overload
|
| 584 |
+
def indices(
|
| 585 |
+
dimensions: Sequence[int],
|
| 586 |
+
dtype: DTypeLike,
|
| 587 |
+
sparse: Literal[True],
|
| 588 |
+
) -> tuple[NDArray[Any], ...]: ...
|
| 589 |
+
|
| 590 |
+
def fromfunction(
|
| 591 |
+
function: Callable[..., _T],
|
| 592 |
+
shape: Sequence[int],
|
| 593 |
+
*,
|
| 594 |
+
dtype: DTypeLike = ...,
|
| 595 |
+
like: _SupportsArrayFunc = ...,
|
| 596 |
+
**kwargs: Any,
|
| 597 |
+
) -> _T: ...
|
| 598 |
+
|
| 599 |
+
def isscalar(element: object) -> TypeGuard[
|
| 600 |
+
generic | bool | int | float | complex | str | bytes | memoryview
|
| 601 |
+
]: ...
|
| 602 |
+
|
| 603 |
+
def binary_repr(num: SupportsIndex, width: None | int = ...) -> str: ...
|
| 604 |
+
|
| 605 |
+
def base_repr(
|
| 606 |
+
number: SupportsAbs[float],
|
| 607 |
+
base: float = ...,
|
| 608 |
+
padding: SupportsIndex = ...,
|
| 609 |
+
) -> str: ...
|
| 610 |
+
|
| 611 |
+
@overload
|
| 612 |
+
def identity(
|
| 613 |
+
n: int,
|
| 614 |
+
dtype: None = ...,
|
| 615 |
+
*,
|
| 616 |
+
like: _SupportsArrayFunc = ...,
|
| 617 |
+
) -> NDArray[float64]: ...
|
| 618 |
+
@overload
|
| 619 |
+
def identity(
|
| 620 |
+
n: int,
|
| 621 |
+
dtype: _DTypeLike[_SCT],
|
| 622 |
+
*,
|
| 623 |
+
like: _SupportsArrayFunc = ...,
|
| 624 |
+
) -> NDArray[_SCT]: ...
|
| 625 |
+
@overload
|
| 626 |
+
def identity(
|
| 627 |
+
n: int,
|
| 628 |
+
dtype: DTypeLike,
|
| 629 |
+
*,
|
| 630 |
+
like: _SupportsArrayFunc = ...,
|
| 631 |
+
) -> NDArray[Any]: ...
|
| 632 |
+
|
| 633 |
+
def allclose(
|
| 634 |
+
a: ArrayLike,
|
| 635 |
+
b: ArrayLike,
|
| 636 |
+
rtol: float = ...,
|
| 637 |
+
atol: float = ...,
|
| 638 |
+
equal_nan: bool = ...,
|
| 639 |
+
) -> bool: ...
|
| 640 |
+
|
| 641 |
+
@overload
|
| 642 |
+
def isclose(
|
| 643 |
+
a: _ScalarLike_co,
|
| 644 |
+
b: _ScalarLike_co,
|
| 645 |
+
rtol: float = ...,
|
| 646 |
+
atol: float = ...,
|
| 647 |
+
equal_nan: bool = ...,
|
| 648 |
+
) -> bool_: ...
|
| 649 |
+
@overload
|
| 650 |
+
def isclose(
|
| 651 |
+
a: ArrayLike,
|
| 652 |
+
b: ArrayLike,
|
| 653 |
+
rtol: float = ...,
|
| 654 |
+
atol: float = ...,
|
| 655 |
+
equal_nan: bool = ...,
|
| 656 |
+
) -> NDArray[bool_]: ...
|
| 657 |
+
|
| 658 |
+
def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: bool = ...) -> bool: ...
|
| 659 |
+
|
| 660 |
+
def array_equiv(a1: ArrayLike, a2: ArrayLike) -> bool: ...
|
pllava/lib/python3.10/site-packages/numpy/core/numerictypes.pyi
ADDED
|
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import types
|
| 3 |
+
from collections.abc import Iterable
|
| 4 |
+
from typing import (
|
| 5 |
+
Literal as L,
|
| 6 |
+
Union,
|
| 7 |
+
overload,
|
| 8 |
+
Any,
|
| 9 |
+
TypeVar,
|
| 10 |
+
Protocol,
|
| 11 |
+
TypedDict,
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
from numpy import (
|
| 15 |
+
ndarray,
|
| 16 |
+
dtype,
|
| 17 |
+
generic,
|
| 18 |
+
bool_,
|
| 19 |
+
ubyte,
|
| 20 |
+
ushort,
|
| 21 |
+
uintc,
|
| 22 |
+
uint,
|
| 23 |
+
ulonglong,
|
| 24 |
+
byte,
|
| 25 |
+
short,
|
| 26 |
+
intc,
|
| 27 |
+
int_,
|
| 28 |
+
longlong,
|
| 29 |
+
half,
|
| 30 |
+
single,
|
| 31 |
+
double,
|
| 32 |
+
longdouble,
|
| 33 |
+
csingle,
|
| 34 |
+
cdouble,
|
| 35 |
+
clongdouble,
|
| 36 |
+
datetime64,
|
| 37 |
+
timedelta64,
|
| 38 |
+
object_,
|
| 39 |
+
str_,
|
| 40 |
+
bytes_,
|
| 41 |
+
void,
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
from numpy.core._type_aliases import (
|
| 45 |
+
sctypeDict as sctypeDict,
|
| 46 |
+
sctypes as sctypes,
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
from numpy._typing import DTypeLike, ArrayLike, _DTypeLike
|
| 50 |
+
|
| 51 |
+
_T = TypeVar("_T")
|
| 52 |
+
_SCT = TypeVar("_SCT", bound=generic)
|
| 53 |
+
|
| 54 |
+
class _CastFunc(Protocol):
|
| 55 |
+
def __call__(
|
| 56 |
+
self, x: ArrayLike, k: DTypeLike = ...
|
| 57 |
+
) -> ndarray[Any, dtype[Any]]: ...
|
| 58 |
+
|
| 59 |
+
class _TypeCodes(TypedDict):
|
| 60 |
+
Character: L['c']
|
| 61 |
+
Integer: L['bhilqp']
|
| 62 |
+
UnsignedInteger: L['BHILQP']
|
| 63 |
+
Float: L['efdg']
|
| 64 |
+
Complex: L['FDG']
|
| 65 |
+
AllInteger: L['bBhHiIlLqQpP']
|
| 66 |
+
AllFloat: L['efdgFDG']
|
| 67 |
+
Datetime: L['Mm']
|
| 68 |
+
All: L['?bhilqpBHILQPefdgFDGSUVOMm']
|
| 69 |
+
|
| 70 |
+
class _typedict(dict[type[generic], _T]):
|
| 71 |
+
def __getitem__(self, key: DTypeLike) -> _T: ...
|
| 72 |
+
|
| 73 |
+
if sys.version_info >= (3, 10):
|
| 74 |
+
_TypeTuple = Union[
|
| 75 |
+
type[Any],
|
| 76 |
+
types.UnionType,
|
| 77 |
+
tuple[Union[type[Any], types.UnionType, tuple[Any, ...]], ...],
|
| 78 |
+
]
|
| 79 |
+
else:
|
| 80 |
+
_TypeTuple = Union[
|
| 81 |
+
type[Any],
|
| 82 |
+
tuple[Union[type[Any], tuple[Any, ...]], ...],
|
| 83 |
+
]
|
| 84 |
+
|
| 85 |
+
__all__: list[str]
|
| 86 |
+
|
| 87 |
+
@overload
|
| 88 |
+
def maximum_sctype(t: _DTypeLike[_SCT]) -> type[_SCT]: ...
|
| 89 |
+
@overload
|
| 90 |
+
def maximum_sctype(t: DTypeLike) -> type[Any]: ...
|
| 91 |
+
|
| 92 |
+
@overload
|
| 93 |
+
def issctype(rep: dtype[Any] | type[Any]) -> bool: ...
|
| 94 |
+
@overload
|
| 95 |
+
def issctype(rep: object) -> L[False]: ...
|
| 96 |
+
|
| 97 |
+
@overload
|
| 98 |
+
def obj2sctype(rep: _DTypeLike[_SCT], default: None = ...) -> None | type[_SCT]: ...
|
| 99 |
+
@overload
|
| 100 |
+
def obj2sctype(rep: _DTypeLike[_SCT], default: _T) -> _T | type[_SCT]: ...
|
| 101 |
+
@overload
|
| 102 |
+
def obj2sctype(rep: DTypeLike, default: None = ...) -> None | type[Any]: ...
|
| 103 |
+
@overload
|
| 104 |
+
def obj2sctype(rep: DTypeLike, default: _T) -> _T | type[Any]: ...
|
| 105 |
+
@overload
|
| 106 |
+
def obj2sctype(rep: object, default: None = ...) -> None: ...
|
| 107 |
+
@overload
|
| 108 |
+
def obj2sctype(rep: object, default: _T) -> _T: ...
|
| 109 |
+
|
| 110 |
+
@overload
|
| 111 |
+
def issubclass_(arg1: type[Any], arg2: _TypeTuple) -> bool: ...
|
| 112 |
+
@overload
|
| 113 |
+
def issubclass_(arg1: object, arg2: object) -> L[False]: ...
|
| 114 |
+
|
| 115 |
+
def issubsctype(arg1: DTypeLike, arg2: DTypeLike) -> bool: ...
|
| 116 |
+
|
| 117 |
+
def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> bool: ...
|
| 118 |
+
|
| 119 |
+
def sctype2char(sctype: DTypeLike) -> str: ...
|
| 120 |
+
|
| 121 |
+
cast: _typedict[_CastFunc]
|
| 122 |
+
nbytes: _typedict[int]
|
| 123 |
+
typecodes: _TypeCodes
|
| 124 |
+
ScalarType: tuple[
|
| 125 |
+
type[int],
|
| 126 |
+
type[float],
|
| 127 |
+
type[complex],
|
| 128 |
+
type[bool],
|
| 129 |
+
type[bytes],
|
| 130 |
+
type[str],
|
| 131 |
+
type[memoryview],
|
| 132 |
+
type[bool_],
|
| 133 |
+
type[csingle],
|
| 134 |
+
type[cdouble],
|
| 135 |
+
type[clongdouble],
|
| 136 |
+
type[half],
|
| 137 |
+
type[single],
|
| 138 |
+
type[double],
|
| 139 |
+
type[longdouble],
|
| 140 |
+
type[byte],
|
| 141 |
+
type[short],
|
| 142 |
+
type[intc],
|
| 143 |
+
type[int_],
|
| 144 |
+
type[longlong],
|
| 145 |
+
type[timedelta64],
|
| 146 |
+
type[datetime64],
|
| 147 |
+
type[object_],
|
| 148 |
+
type[bytes_],
|
| 149 |
+
type[str_],
|
| 150 |
+
type[ubyte],
|
| 151 |
+
type[ushort],
|
| 152 |
+
type[uintc],
|
| 153 |
+
type[uint],
|
| 154 |
+
type[ulonglong],
|
| 155 |
+
type[void],
|
| 156 |
+
]
|
pllava/lib/python3.10/site-packages/numpy/core/records.pyi
ADDED
|
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from collections.abc import Sequence, Iterable
|
| 3 |
+
from typing import (
|
| 4 |
+
Any,
|
| 5 |
+
TypeVar,
|
| 6 |
+
overload,
|
| 7 |
+
Protocol,
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
from numpy import (
|
| 11 |
+
format_parser as format_parser,
|
| 12 |
+
record as record,
|
| 13 |
+
recarray as recarray,
|
| 14 |
+
dtype,
|
| 15 |
+
generic,
|
| 16 |
+
void,
|
| 17 |
+
_ByteOrder,
|
| 18 |
+
_SupportsBuffer,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
from numpy._typing import (
|
| 22 |
+
ArrayLike,
|
| 23 |
+
DTypeLike,
|
| 24 |
+
NDArray,
|
| 25 |
+
_ShapeLike,
|
| 26 |
+
_ArrayLikeVoid_co,
|
| 27 |
+
_NestedSequence,
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
_SCT = TypeVar("_SCT", bound=generic)
|
| 31 |
+
|
| 32 |
+
_RecArray = recarray[Any, dtype[_SCT]]
|
| 33 |
+
|
| 34 |
+
class _SupportsReadInto(Protocol):
|
| 35 |
+
def seek(self, offset: int, whence: int, /) -> object: ...
|
| 36 |
+
def tell(self, /) -> int: ...
|
| 37 |
+
def readinto(self, buffer: memoryview, /) -> int: ...
|
| 38 |
+
|
| 39 |
+
__all__: list[str]
|
| 40 |
+
|
| 41 |
+
@overload
|
| 42 |
+
def fromarrays(
|
| 43 |
+
arrayList: Iterable[ArrayLike],
|
| 44 |
+
dtype: DTypeLike = ...,
|
| 45 |
+
shape: None | _ShapeLike = ...,
|
| 46 |
+
formats: None = ...,
|
| 47 |
+
names: None = ...,
|
| 48 |
+
titles: None = ...,
|
| 49 |
+
aligned: bool = ...,
|
| 50 |
+
byteorder: None = ...,
|
| 51 |
+
) -> _RecArray[Any]: ...
|
| 52 |
+
@overload
|
| 53 |
+
def fromarrays(
|
| 54 |
+
arrayList: Iterable[ArrayLike],
|
| 55 |
+
dtype: None = ...,
|
| 56 |
+
shape: None | _ShapeLike = ...,
|
| 57 |
+
*,
|
| 58 |
+
formats: DTypeLike,
|
| 59 |
+
names: None | str | Sequence[str] = ...,
|
| 60 |
+
titles: None | str | Sequence[str] = ...,
|
| 61 |
+
aligned: bool = ...,
|
| 62 |
+
byteorder: None | _ByteOrder = ...,
|
| 63 |
+
) -> _RecArray[record]: ...
|
| 64 |
+
|
| 65 |
+
@overload
|
| 66 |
+
def fromrecords(
|
| 67 |
+
recList: _ArrayLikeVoid_co | tuple[Any, ...] | _NestedSequence[tuple[Any, ...]],
|
| 68 |
+
dtype: DTypeLike = ...,
|
| 69 |
+
shape: None | _ShapeLike = ...,
|
| 70 |
+
formats: None = ...,
|
| 71 |
+
names: None = ...,
|
| 72 |
+
titles: None = ...,
|
| 73 |
+
aligned: bool = ...,
|
| 74 |
+
byteorder: None = ...,
|
| 75 |
+
) -> _RecArray[record]: ...
|
| 76 |
+
@overload
|
| 77 |
+
def fromrecords(
|
| 78 |
+
recList: _ArrayLikeVoid_co | tuple[Any, ...] | _NestedSequence[tuple[Any, ...]],
|
| 79 |
+
dtype: None = ...,
|
| 80 |
+
shape: None | _ShapeLike = ...,
|
| 81 |
+
*,
|
| 82 |
+
formats: DTypeLike,
|
| 83 |
+
names: None | str | Sequence[str] = ...,
|
| 84 |
+
titles: None | str | Sequence[str] = ...,
|
| 85 |
+
aligned: bool = ...,
|
| 86 |
+
byteorder: None | _ByteOrder = ...,
|
| 87 |
+
) -> _RecArray[record]: ...
|
| 88 |
+
|
| 89 |
+
@overload
|
| 90 |
+
def fromstring(
|
| 91 |
+
datastring: _SupportsBuffer,
|
| 92 |
+
dtype: DTypeLike,
|
| 93 |
+
shape: None | _ShapeLike = ...,
|
| 94 |
+
offset: int = ...,
|
| 95 |
+
formats: None = ...,
|
| 96 |
+
names: None = ...,
|
| 97 |
+
titles: None = ...,
|
| 98 |
+
aligned: bool = ...,
|
| 99 |
+
byteorder: None = ...,
|
| 100 |
+
) -> _RecArray[record]: ...
|
| 101 |
+
@overload
|
| 102 |
+
def fromstring(
|
| 103 |
+
datastring: _SupportsBuffer,
|
| 104 |
+
dtype: None = ...,
|
| 105 |
+
shape: None | _ShapeLike = ...,
|
| 106 |
+
offset: int = ...,
|
| 107 |
+
*,
|
| 108 |
+
formats: DTypeLike,
|
| 109 |
+
names: None | str | Sequence[str] = ...,
|
| 110 |
+
titles: None | str | Sequence[str] = ...,
|
| 111 |
+
aligned: bool = ...,
|
| 112 |
+
byteorder: None | _ByteOrder = ...,
|
| 113 |
+
) -> _RecArray[record]: ...
|
| 114 |
+
|
| 115 |
+
@overload
|
| 116 |
+
def fromfile(
|
| 117 |
+
fd: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _SupportsReadInto,
|
| 118 |
+
dtype: DTypeLike,
|
| 119 |
+
shape: None | _ShapeLike = ...,
|
| 120 |
+
offset: int = ...,
|
| 121 |
+
formats: None = ...,
|
| 122 |
+
names: None = ...,
|
| 123 |
+
titles: None = ...,
|
| 124 |
+
aligned: bool = ...,
|
| 125 |
+
byteorder: None = ...,
|
| 126 |
+
) -> _RecArray[Any]: ...
|
| 127 |
+
@overload
|
| 128 |
+
def fromfile(
|
| 129 |
+
fd: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _SupportsReadInto,
|
| 130 |
+
dtype: None = ...,
|
| 131 |
+
shape: None | _ShapeLike = ...,
|
| 132 |
+
offset: int = ...,
|
| 133 |
+
*,
|
| 134 |
+
formats: DTypeLike,
|
| 135 |
+
names: None | str | Sequence[str] = ...,
|
| 136 |
+
titles: None | str | Sequence[str] = ...,
|
| 137 |
+
aligned: bool = ...,
|
| 138 |
+
byteorder: None | _ByteOrder = ...,
|
| 139 |
+
) -> _RecArray[record]: ...
|
| 140 |
+
|
| 141 |
+
@overload
|
| 142 |
+
def array(
|
| 143 |
+
obj: _SCT | NDArray[_SCT],
|
| 144 |
+
dtype: None = ...,
|
| 145 |
+
shape: None | _ShapeLike = ...,
|
| 146 |
+
offset: int = ...,
|
| 147 |
+
formats: None = ...,
|
| 148 |
+
names: None = ...,
|
| 149 |
+
titles: None = ...,
|
| 150 |
+
aligned: bool = ...,
|
| 151 |
+
byteorder: None = ...,
|
| 152 |
+
copy: bool = ...,
|
| 153 |
+
) -> _RecArray[_SCT]: ...
|
| 154 |
+
@overload
|
| 155 |
+
def array(
|
| 156 |
+
obj: ArrayLike,
|
| 157 |
+
dtype: DTypeLike,
|
| 158 |
+
shape: None | _ShapeLike = ...,
|
| 159 |
+
offset: int = ...,
|
| 160 |
+
formats: None = ...,
|
| 161 |
+
names: None = ...,
|
| 162 |
+
titles: None = ...,
|
| 163 |
+
aligned: bool = ...,
|
| 164 |
+
byteorder: None = ...,
|
| 165 |
+
copy: bool = ...,
|
| 166 |
+
) -> _RecArray[Any]: ...
|
| 167 |
+
@overload
|
| 168 |
+
def array(
|
| 169 |
+
obj: ArrayLike,
|
| 170 |
+
dtype: None = ...,
|
| 171 |
+
shape: None | _ShapeLike = ...,
|
| 172 |
+
offset: int = ...,
|
| 173 |
+
*,
|
| 174 |
+
formats: DTypeLike,
|
| 175 |
+
names: None | str | Sequence[str] = ...,
|
| 176 |
+
titles: None | str | Sequence[str] = ...,
|
| 177 |
+
aligned: bool = ...,
|
| 178 |
+
byteorder: None | _ByteOrder = ...,
|
| 179 |
+
copy: bool = ...,
|
| 180 |
+
) -> _RecArray[record]: ...
|
| 181 |
+
@overload
|
| 182 |
+
def array(
|
| 183 |
+
obj: None,
|
| 184 |
+
dtype: DTypeLike,
|
| 185 |
+
shape: _ShapeLike,
|
| 186 |
+
offset: int = ...,
|
| 187 |
+
formats: None = ...,
|
| 188 |
+
names: None = ...,
|
| 189 |
+
titles: None = ...,
|
| 190 |
+
aligned: bool = ...,
|
| 191 |
+
byteorder: None = ...,
|
| 192 |
+
copy: bool = ...,
|
| 193 |
+
) -> _RecArray[Any]: ...
|
| 194 |
+
@overload
|
| 195 |
+
def array(
|
| 196 |
+
obj: None,
|
| 197 |
+
dtype: None = ...,
|
| 198 |
+
*,
|
| 199 |
+
shape: _ShapeLike,
|
| 200 |
+
offset: int = ...,
|
| 201 |
+
formats: DTypeLike,
|
| 202 |
+
names: None | str | Sequence[str] = ...,
|
| 203 |
+
titles: None | str | Sequence[str] = ...,
|
| 204 |
+
aligned: bool = ...,
|
| 205 |
+
byteorder: None | _ByteOrder = ...,
|
| 206 |
+
copy: bool = ...,
|
| 207 |
+
) -> _RecArray[record]: ...
|
| 208 |
+
@overload
|
| 209 |
+
def array(
|
| 210 |
+
obj: _SupportsReadInto,
|
| 211 |
+
dtype: DTypeLike,
|
| 212 |
+
shape: None | _ShapeLike = ...,
|
| 213 |
+
offset: int = ...,
|
| 214 |
+
formats: None = ...,
|
| 215 |
+
names: None = ...,
|
| 216 |
+
titles: None = ...,
|
| 217 |
+
aligned: bool = ...,
|
| 218 |
+
byteorder: None = ...,
|
| 219 |
+
copy: bool = ...,
|
| 220 |
+
) -> _RecArray[Any]: ...
|
| 221 |
+
@overload
|
| 222 |
+
def array(
|
| 223 |
+
obj: _SupportsReadInto,
|
| 224 |
+
dtype: None = ...,
|
| 225 |
+
shape: None | _ShapeLike = ...,
|
| 226 |
+
offset: int = ...,
|
| 227 |
+
*,
|
| 228 |
+
formats: DTypeLike,
|
| 229 |
+
names: None | str | Sequence[str] = ...,
|
| 230 |
+
titles: None | str | Sequence[str] = ...,
|
| 231 |
+
aligned: bool = ...,
|
| 232 |
+
byteorder: None | _ByteOrder = ...,
|
| 233 |
+
copy: bool = ...,
|
| 234 |
+
) -> _RecArray[record]: ...
|
pllava/lib/python3.10/site-packages/numpy/core/shape_base.pyi
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections.abc import Sequence
|
| 2 |
+
from typing import TypeVar, overload, Any, SupportsIndex
|
| 3 |
+
|
| 4 |
+
from numpy import generic, _CastingKind
|
| 5 |
+
from numpy._typing import (
|
| 6 |
+
NDArray,
|
| 7 |
+
ArrayLike,
|
| 8 |
+
DTypeLike,
|
| 9 |
+
_ArrayLike,
|
| 10 |
+
_DTypeLike,
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
_SCT = TypeVar("_SCT", bound=generic)
|
| 14 |
+
_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
|
| 15 |
+
|
| 16 |
+
__all__: list[str]
|
| 17 |
+
|
| 18 |
+
@overload
|
| 19 |
+
def atleast_1d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ...
|
| 20 |
+
@overload
|
| 21 |
+
def atleast_1d(arys: ArrayLike, /) -> NDArray[Any]: ...
|
| 22 |
+
@overload
|
| 23 |
+
def atleast_1d(*arys: ArrayLike) -> list[NDArray[Any]]: ...
|
| 24 |
+
|
| 25 |
+
@overload
|
| 26 |
+
def atleast_2d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ...
|
| 27 |
+
@overload
|
| 28 |
+
def atleast_2d(arys: ArrayLike, /) -> NDArray[Any]: ...
|
| 29 |
+
@overload
|
| 30 |
+
def atleast_2d(*arys: ArrayLike) -> list[NDArray[Any]]: ...
|
| 31 |
+
|
| 32 |
+
@overload
|
| 33 |
+
def atleast_3d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ...
|
| 34 |
+
@overload
|
| 35 |
+
def atleast_3d(arys: ArrayLike, /) -> NDArray[Any]: ...
|
| 36 |
+
@overload
|
| 37 |
+
def atleast_3d(*arys: ArrayLike) -> list[NDArray[Any]]: ...
|
| 38 |
+
|
| 39 |
+
@overload
|
| 40 |
+
def vstack(
|
| 41 |
+
tup: Sequence[_ArrayLike[_SCT]],
|
| 42 |
+
*,
|
| 43 |
+
dtype: None = ...,
|
| 44 |
+
casting: _CastingKind = ...
|
| 45 |
+
) -> NDArray[_SCT]: ...
|
| 46 |
+
@overload
|
| 47 |
+
def vstack(
|
| 48 |
+
tup: Sequence[ArrayLike],
|
| 49 |
+
*,
|
| 50 |
+
dtype: _DTypeLike[_SCT],
|
| 51 |
+
casting: _CastingKind = ...
|
| 52 |
+
) -> NDArray[_SCT]: ...
|
| 53 |
+
@overload
|
| 54 |
+
def vstack(
|
| 55 |
+
tup: Sequence[ArrayLike],
|
| 56 |
+
*,
|
| 57 |
+
dtype: DTypeLike = ...,
|
| 58 |
+
casting: _CastingKind = ...
|
| 59 |
+
) -> NDArray[Any]: ...
|
| 60 |
+
|
| 61 |
+
@overload
|
| 62 |
+
def hstack(
|
| 63 |
+
tup: Sequence[_ArrayLike[_SCT]],
|
| 64 |
+
*,
|
| 65 |
+
dtype: None = ...,
|
| 66 |
+
casting: _CastingKind = ...
|
| 67 |
+
) -> NDArray[_SCT]: ...
|
| 68 |
+
@overload
|
| 69 |
+
def hstack(
|
| 70 |
+
tup: Sequence[ArrayLike],
|
| 71 |
+
*,
|
| 72 |
+
dtype: _DTypeLike[_SCT],
|
| 73 |
+
casting: _CastingKind = ...
|
| 74 |
+
) -> NDArray[_SCT]: ...
|
| 75 |
+
@overload
|
| 76 |
+
def hstack(
|
| 77 |
+
tup: Sequence[ArrayLike],
|
| 78 |
+
*,
|
| 79 |
+
dtype: DTypeLike = ...,
|
| 80 |
+
casting: _CastingKind = ...
|
| 81 |
+
) -> NDArray[Any]: ...
|
| 82 |
+
|
| 83 |
+
@overload
|
| 84 |
+
def stack(
|
| 85 |
+
arrays: Sequence[_ArrayLike[_SCT]],
|
| 86 |
+
axis: SupportsIndex = ...,
|
| 87 |
+
out: None = ...,
|
| 88 |
+
*,
|
| 89 |
+
dtype: None = ...,
|
| 90 |
+
casting: _CastingKind = ...
|
| 91 |
+
) -> NDArray[_SCT]: ...
|
| 92 |
+
@overload
|
| 93 |
+
def stack(
|
| 94 |
+
arrays: Sequence[ArrayLike],
|
| 95 |
+
axis: SupportsIndex = ...,
|
| 96 |
+
out: None = ...,
|
| 97 |
+
*,
|
| 98 |
+
dtype: _DTypeLike[_SCT],
|
| 99 |
+
casting: _CastingKind = ...
|
| 100 |
+
) -> NDArray[_SCT]: ...
|
| 101 |
+
@overload
|
| 102 |
+
def stack(
|
| 103 |
+
arrays: Sequence[ArrayLike],
|
| 104 |
+
axis: SupportsIndex = ...,
|
| 105 |
+
out: None = ...,
|
| 106 |
+
*,
|
| 107 |
+
dtype: DTypeLike = ...,
|
| 108 |
+
casting: _CastingKind = ...
|
| 109 |
+
) -> NDArray[Any]: ...
|
| 110 |
+
@overload
|
| 111 |
+
def stack(
|
| 112 |
+
arrays: Sequence[ArrayLike],
|
| 113 |
+
axis: SupportsIndex = ...,
|
| 114 |
+
out: _ArrayType = ...,
|
| 115 |
+
*,
|
| 116 |
+
dtype: DTypeLike = ...,
|
| 117 |
+
casting: _CastingKind = ...
|
| 118 |
+
) -> _ArrayType: ...
|
| 119 |
+
|
| 120 |
+
@overload
|
| 121 |
+
def block(arrays: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
|
| 122 |
+
@overload
|
| 123 |
+
def block(arrays: ArrayLike) -> NDArray[Any]: ...
|
pllava/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_scalar_ctors.cpython-310.pyc
ADDED
|
Binary file (6.79 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_simd_module.cpython-310.pyc
ADDED
|
Binary file (4.02 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/numpy/core/tests/_locales.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Provide class for testing in French locale
|
| 2 |
+
|
| 3 |
+
"""
|
| 4 |
+
import sys
|
| 5 |
+
import locale
|
| 6 |
+
|
| 7 |
+
import pytest
|
| 8 |
+
|
| 9 |
+
__ALL__ = ['CommaDecimalPointLocale']
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def find_comma_decimal_point_locale():
|
| 13 |
+
"""See if platform has a decimal point as comma locale.
|
| 14 |
+
|
| 15 |
+
Find a locale that uses a comma instead of a period as the
|
| 16 |
+
decimal point.
|
| 17 |
+
|
| 18 |
+
Returns
|
| 19 |
+
-------
|
| 20 |
+
old_locale: str
|
| 21 |
+
Locale when the function was called.
|
| 22 |
+
new_locale: {str, None)
|
| 23 |
+
First French locale found, None if none found.
|
| 24 |
+
|
| 25 |
+
"""
|
| 26 |
+
if sys.platform == 'win32':
|
| 27 |
+
locales = ['FRENCH']
|
| 28 |
+
else:
|
| 29 |
+
locales = ['fr_FR', 'fr_FR.UTF-8', 'fi_FI', 'fi_FI.UTF-8']
|
| 30 |
+
|
| 31 |
+
old_locale = locale.getlocale(locale.LC_NUMERIC)
|
| 32 |
+
new_locale = None
|
| 33 |
+
try:
|
| 34 |
+
for loc in locales:
|
| 35 |
+
try:
|
| 36 |
+
locale.setlocale(locale.LC_NUMERIC, loc)
|
| 37 |
+
new_locale = loc
|
| 38 |
+
break
|
| 39 |
+
except locale.Error:
|
| 40 |
+
pass
|
| 41 |
+
finally:
|
| 42 |
+
locale.setlocale(locale.LC_NUMERIC, locale=old_locale)
|
| 43 |
+
return old_locale, new_locale
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class CommaDecimalPointLocale:
|
| 47 |
+
"""Sets LC_NUMERIC to a locale with comma as decimal point.
|
| 48 |
+
|
| 49 |
+
Classes derived from this class have setup and teardown methods that run
|
| 50 |
+
tests with locale.LC_NUMERIC set to a locale where commas (',') are used as
|
| 51 |
+
the decimal point instead of periods ('.'). On exit the locale is restored
|
| 52 |
+
to the initial locale. It also serves as context manager with the same
|
| 53 |
+
effect. If no such locale is available, the test is skipped.
|
| 54 |
+
|
| 55 |
+
.. versionadded:: 1.15.0
|
| 56 |
+
|
| 57 |
+
"""
|
| 58 |
+
(cur_locale, tst_locale) = find_comma_decimal_point_locale()
|
| 59 |
+
|
| 60 |
+
def setup_method(self):
|
| 61 |
+
if self.tst_locale is None:
|
| 62 |
+
pytest.skip("No French locale available")
|
| 63 |
+
locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale)
|
| 64 |
+
|
| 65 |
+
def teardown_method(self):
|
| 66 |
+
locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale)
|
| 67 |
+
|
| 68 |
+
def __enter__(self):
|
| 69 |
+
if self.tst_locale is None:
|
| 70 |
+
pytest.skip("No French locale available")
|
| 71 |
+
locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale)
|
| 72 |
+
|
| 73 |
+
def __exit__(self, type, value, traceback):
|
| 74 |
+
locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale)
|
pllava/lib/python3.10/site-packages/numpy/core/tests/test_api.py
ADDED
|
@@ -0,0 +1,615 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from numpy.core._rational_tests import rational
|
| 5 |
+
import pytest
|
| 6 |
+
from numpy.testing import (
|
| 7 |
+
assert_, assert_equal, assert_array_equal, assert_raises, assert_warns,
|
| 8 |
+
HAS_REFCOUNT
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def test_array_array():
|
| 13 |
+
tobj = type(object)
|
| 14 |
+
ones11 = np.ones((1, 1), np.float64)
|
| 15 |
+
tndarray = type(ones11)
|
| 16 |
+
# Test is_ndarray
|
| 17 |
+
assert_equal(np.array(ones11, dtype=np.float64), ones11)
|
| 18 |
+
if HAS_REFCOUNT:
|
| 19 |
+
old_refcount = sys.getrefcount(tndarray)
|
| 20 |
+
np.array(ones11)
|
| 21 |
+
assert_equal(old_refcount, sys.getrefcount(tndarray))
|
| 22 |
+
|
| 23 |
+
# test None
|
| 24 |
+
assert_equal(np.array(None, dtype=np.float64),
|
| 25 |
+
np.array(np.nan, dtype=np.float64))
|
| 26 |
+
if HAS_REFCOUNT:
|
| 27 |
+
old_refcount = sys.getrefcount(tobj)
|
| 28 |
+
np.array(None, dtype=np.float64)
|
| 29 |
+
assert_equal(old_refcount, sys.getrefcount(tobj))
|
| 30 |
+
|
| 31 |
+
# test scalar
|
| 32 |
+
assert_equal(np.array(1.0, dtype=np.float64),
|
| 33 |
+
np.ones((), dtype=np.float64))
|
| 34 |
+
if HAS_REFCOUNT:
|
| 35 |
+
old_refcount = sys.getrefcount(np.float64)
|
| 36 |
+
np.array(np.array(1.0, dtype=np.float64), dtype=np.float64)
|
| 37 |
+
assert_equal(old_refcount, sys.getrefcount(np.float64))
|
| 38 |
+
|
| 39 |
+
# test string
|
| 40 |
+
S2 = np.dtype((bytes, 2))
|
| 41 |
+
S3 = np.dtype((bytes, 3))
|
| 42 |
+
S5 = np.dtype((bytes, 5))
|
| 43 |
+
assert_equal(np.array(b"1.0", dtype=np.float64),
|
| 44 |
+
np.ones((), dtype=np.float64))
|
| 45 |
+
assert_equal(np.array(b"1.0").dtype, S3)
|
| 46 |
+
assert_equal(np.array(b"1.0", dtype=bytes).dtype, S3)
|
| 47 |
+
assert_equal(np.array(b"1.0", dtype=S2), np.array(b"1."))
|
| 48 |
+
assert_equal(np.array(b"1", dtype=S5), np.ones((), dtype=S5))
|
| 49 |
+
|
| 50 |
+
# test string
|
| 51 |
+
U2 = np.dtype((str, 2))
|
| 52 |
+
U3 = np.dtype((str, 3))
|
| 53 |
+
U5 = np.dtype((str, 5))
|
| 54 |
+
assert_equal(np.array("1.0", dtype=np.float64),
|
| 55 |
+
np.ones((), dtype=np.float64))
|
| 56 |
+
assert_equal(np.array("1.0").dtype, U3)
|
| 57 |
+
assert_equal(np.array("1.0", dtype=str).dtype, U3)
|
| 58 |
+
assert_equal(np.array("1.0", dtype=U2), np.array(str("1.")))
|
| 59 |
+
assert_equal(np.array("1", dtype=U5), np.ones((), dtype=U5))
|
| 60 |
+
|
| 61 |
+
builtins = getattr(__builtins__, '__dict__', __builtins__)
|
| 62 |
+
assert_(hasattr(builtins, 'get'))
|
| 63 |
+
|
| 64 |
+
# test memoryview
|
| 65 |
+
dat = np.array(memoryview(b'1.0'), dtype=np.float64)
|
| 66 |
+
assert_equal(dat, [49.0, 46.0, 48.0])
|
| 67 |
+
assert_(dat.dtype.type is np.float64)
|
| 68 |
+
|
| 69 |
+
dat = np.array(memoryview(b'1.0'))
|
| 70 |
+
assert_equal(dat, [49, 46, 48])
|
| 71 |
+
assert_(dat.dtype.type is np.uint8)
|
| 72 |
+
|
| 73 |
+
# test array interface
|
| 74 |
+
a = np.array(100.0, dtype=np.float64)
|
| 75 |
+
o = type("o", (object,),
|
| 76 |
+
dict(__array_interface__=a.__array_interface__))
|
| 77 |
+
assert_equal(np.array(o, dtype=np.float64), a)
|
| 78 |
+
|
| 79 |
+
# test array_struct interface
|
| 80 |
+
a = np.array([(1, 4.0, 'Hello'), (2, 6.0, 'World')],
|
| 81 |
+
dtype=[('f0', int), ('f1', float), ('f2', str)])
|
| 82 |
+
o = type("o", (object,),
|
| 83 |
+
dict(__array_struct__=a.__array_struct__))
|
| 84 |
+
## wasn't what I expected... is np.array(o) supposed to equal a ?
|
| 85 |
+
## instead we get a array([...], dtype=">V18")
|
| 86 |
+
assert_equal(bytes(np.array(o).data), bytes(a.data))
|
| 87 |
+
|
| 88 |
+
# test array
|
| 89 |
+
o = type("o", (object,),
|
| 90 |
+
dict(__array__=lambda *x: np.array(100.0, dtype=np.float64)))()
|
| 91 |
+
assert_equal(np.array(o, dtype=np.float64), np.array(100.0, np.float64))
|
| 92 |
+
|
| 93 |
+
# test recursion
|
| 94 |
+
nested = 1.5
|
| 95 |
+
for i in range(np.MAXDIMS):
|
| 96 |
+
nested = [nested]
|
| 97 |
+
|
| 98 |
+
# no error
|
| 99 |
+
np.array(nested)
|
| 100 |
+
|
| 101 |
+
# Exceeds recursion limit
|
| 102 |
+
assert_raises(ValueError, np.array, [nested], dtype=np.float64)
|
| 103 |
+
|
| 104 |
+
# Try with lists...
|
| 105 |
+
# float32
|
| 106 |
+
assert_equal(np.array([None] * 10, dtype=np.float32),
|
| 107 |
+
np.full((10,), np.nan, dtype=np.float32))
|
| 108 |
+
assert_equal(np.array([[None]] * 10, dtype=np.float32),
|
| 109 |
+
np.full((10, 1), np.nan, dtype=np.float32))
|
| 110 |
+
assert_equal(np.array([[None] * 10], dtype=np.float32),
|
| 111 |
+
np.full((1, 10), np.nan, dtype=np.float32))
|
| 112 |
+
assert_equal(np.array([[None] * 10] * 10, dtype=np.float32),
|
| 113 |
+
np.full((10, 10), np.nan, dtype=np.float32))
|
| 114 |
+
# float64
|
| 115 |
+
assert_equal(np.array([None] * 10, dtype=np.float64),
|
| 116 |
+
np.full((10,), np.nan, dtype=np.float64))
|
| 117 |
+
assert_equal(np.array([[None]] * 10, dtype=np.float64),
|
| 118 |
+
np.full((10, 1), np.nan, dtype=np.float64))
|
| 119 |
+
assert_equal(np.array([[None] * 10], dtype=np.float64),
|
| 120 |
+
np.full((1, 10), np.nan, dtype=np.float64))
|
| 121 |
+
assert_equal(np.array([[None] * 10] * 10, dtype=np.float64),
|
| 122 |
+
np.full((10, 10), np.nan, dtype=np.float64))
|
| 123 |
+
|
| 124 |
+
assert_equal(np.array([1.0] * 10, dtype=np.float64),
|
| 125 |
+
np.ones((10,), dtype=np.float64))
|
| 126 |
+
assert_equal(np.array([[1.0]] * 10, dtype=np.float64),
|
| 127 |
+
np.ones((10, 1), dtype=np.float64))
|
| 128 |
+
assert_equal(np.array([[1.0] * 10], dtype=np.float64),
|
| 129 |
+
np.ones((1, 10), dtype=np.float64))
|
| 130 |
+
assert_equal(np.array([[1.0] * 10] * 10, dtype=np.float64),
|
| 131 |
+
np.ones((10, 10), dtype=np.float64))
|
| 132 |
+
|
| 133 |
+
# Try with tuples
|
| 134 |
+
assert_equal(np.array((None,) * 10, dtype=np.float64),
|
| 135 |
+
np.full((10,), np.nan, dtype=np.float64))
|
| 136 |
+
assert_equal(np.array([(None,)] * 10, dtype=np.float64),
|
| 137 |
+
np.full((10, 1), np.nan, dtype=np.float64))
|
| 138 |
+
assert_equal(np.array([(None,) * 10], dtype=np.float64),
|
| 139 |
+
np.full((1, 10), np.nan, dtype=np.float64))
|
| 140 |
+
assert_equal(np.array([(None,) * 10] * 10, dtype=np.float64),
|
| 141 |
+
np.full((10, 10), np.nan, dtype=np.float64))
|
| 142 |
+
|
| 143 |
+
assert_equal(np.array((1.0,) * 10, dtype=np.float64),
|
| 144 |
+
np.ones((10,), dtype=np.float64))
|
| 145 |
+
assert_equal(np.array([(1.0,)] * 10, dtype=np.float64),
|
| 146 |
+
np.ones((10, 1), dtype=np.float64))
|
| 147 |
+
assert_equal(np.array([(1.0,) * 10], dtype=np.float64),
|
| 148 |
+
np.ones((1, 10), dtype=np.float64))
|
| 149 |
+
assert_equal(np.array([(1.0,) * 10] * 10, dtype=np.float64),
|
| 150 |
+
np.ones((10, 10), dtype=np.float64))
|
| 151 |
+
|
| 152 |
+
@pytest.mark.parametrize("array", [True, False])
|
| 153 |
+
def test_array_impossible_casts(array):
|
| 154 |
+
# All builtin types can be forcibly cast, at least theoretically,
|
| 155 |
+
# but user dtypes cannot necessarily.
|
| 156 |
+
rt = rational(1, 2)
|
| 157 |
+
if array:
|
| 158 |
+
rt = np.array(rt)
|
| 159 |
+
with assert_raises(TypeError):
|
| 160 |
+
np.array(rt, dtype="M8")
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
# TODO: remove when fastCopyAndTranspose deprecation expires
|
| 164 |
+
@pytest.mark.parametrize("a",
|
| 165 |
+
(
|
| 166 |
+
np.array(2), # 0D array
|
| 167 |
+
np.array([3, 2, 7, 0]), # 1D array
|
| 168 |
+
np.arange(6).reshape(2, 3) # 2D array
|
| 169 |
+
),
|
| 170 |
+
)
|
| 171 |
+
def test_fastCopyAndTranspose(a):
|
| 172 |
+
with pytest.deprecated_call():
|
| 173 |
+
b = np.fastCopyAndTranspose(a)
|
| 174 |
+
assert_equal(b, a.T)
|
| 175 |
+
assert b.flags.owndata
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def test_array_astype():
|
| 179 |
+
a = np.arange(6, dtype='f4').reshape(2, 3)
|
| 180 |
+
# Default behavior: allows unsafe casts, keeps memory layout,
|
| 181 |
+
# always copies.
|
| 182 |
+
b = a.astype('i4')
|
| 183 |
+
assert_equal(a, b)
|
| 184 |
+
assert_equal(b.dtype, np.dtype('i4'))
|
| 185 |
+
assert_equal(a.strides, b.strides)
|
| 186 |
+
b = a.T.astype('i4')
|
| 187 |
+
assert_equal(a.T, b)
|
| 188 |
+
assert_equal(b.dtype, np.dtype('i4'))
|
| 189 |
+
assert_equal(a.T.strides, b.strides)
|
| 190 |
+
b = a.astype('f4')
|
| 191 |
+
assert_equal(a, b)
|
| 192 |
+
assert_(not (a is b))
|
| 193 |
+
|
| 194 |
+
# copy=False parameter can sometimes skip a copy
|
| 195 |
+
b = a.astype('f4', copy=False)
|
| 196 |
+
assert_(a is b)
|
| 197 |
+
|
| 198 |
+
# order parameter allows overriding of the memory layout,
|
| 199 |
+
# forcing a copy if the layout is wrong
|
| 200 |
+
b = a.astype('f4', order='F', copy=False)
|
| 201 |
+
assert_equal(a, b)
|
| 202 |
+
assert_(not (a is b))
|
| 203 |
+
assert_(b.flags.f_contiguous)
|
| 204 |
+
|
| 205 |
+
b = a.astype('f4', order='C', copy=False)
|
| 206 |
+
assert_equal(a, b)
|
| 207 |
+
assert_(a is b)
|
| 208 |
+
assert_(b.flags.c_contiguous)
|
| 209 |
+
|
| 210 |
+
# casting parameter allows catching bad casts
|
| 211 |
+
b = a.astype('c8', casting='safe')
|
| 212 |
+
assert_equal(a, b)
|
| 213 |
+
assert_equal(b.dtype, np.dtype('c8'))
|
| 214 |
+
|
| 215 |
+
assert_raises(TypeError, a.astype, 'i4', casting='safe')
|
| 216 |
+
|
| 217 |
+
# subok=False passes through a non-subclassed array
|
| 218 |
+
b = a.astype('f4', subok=0, copy=False)
|
| 219 |
+
assert_(a is b)
|
| 220 |
+
|
| 221 |
+
class MyNDArray(np.ndarray):
|
| 222 |
+
pass
|
| 223 |
+
|
| 224 |
+
a = np.array([[0, 1, 2], [3, 4, 5]], dtype='f4').view(MyNDArray)
|
| 225 |
+
|
| 226 |
+
# subok=True passes through a subclass
|
| 227 |
+
b = a.astype('f4', subok=True, copy=False)
|
| 228 |
+
assert_(a is b)
|
| 229 |
+
|
| 230 |
+
# subok=True is default, and creates a subtype on a cast
|
| 231 |
+
b = a.astype('i4', copy=False)
|
| 232 |
+
assert_equal(a, b)
|
| 233 |
+
assert_equal(type(b), MyNDArray)
|
| 234 |
+
|
| 235 |
+
# subok=False never returns a subclass
|
| 236 |
+
b = a.astype('f4', subok=False, copy=False)
|
| 237 |
+
assert_equal(a, b)
|
| 238 |
+
assert_(not (a is b))
|
| 239 |
+
assert_(type(b) is not MyNDArray)
|
| 240 |
+
|
| 241 |
+
# Make sure converting from string object to fixed length string
|
| 242 |
+
# does not truncate.
|
| 243 |
+
a = np.array([b'a'*100], dtype='O')
|
| 244 |
+
b = a.astype('S')
|
| 245 |
+
assert_equal(a, b)
|
| 246 |
+
assert_equal(b.dtype, np.dtype('S100'))
|
| 247 |
+
a = np.array(['a'*100], dtype='O')
|
| 248 |
+
b = a.astype('U')
|
| 249 |
+
assert_equal(a, b)
|
| 250 |
+
assert_equal(b.dtype, np.dtype('U100'))
|
| 251 |
+
|
| 252 |
+
# Same test as above but for strings shorter than 64 characters
|
| 253 |
+
a = np.array([b'a'*10], dtype='O')
|
| 254 |
+
b = a.astype('S')
|
| 255 |
+
assert_equal(a, b)
|
| 256 |
+
assert_equal(b.dtype, np.dtype('S10'))
|
| 257 |
+
a = np.array(['a'*10], dtype='O')
|
| 258 |
+
b = a.astype('U')
|
| 259 |
+
assert_equal(a, b)
|
| 260 |
+
assert_equal(b.dtype, np.dtype('U10'))
|
| 261 |
+
|
| 262 |
+
a = np.array(123456789012345678901234567890, dtype='O').astype('S')
|
| 263 |
+
assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
|
| 264 |
+
a = np.array(123456789012345678901234567890, dtype='O').astype('U')
|
| 265 |
+
assert_array_equal(a, np.array('1234567890' * 3, dtype='U30'))
|
| 266 |
+
|
| 267 |
+
a = np.array([123456789012345678901234567890], dtype='O').astype('S')
|
| 268 |
+
assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
|
| 269 |
+
a = np.array([123456789012345678901234567890], dtype='O').astype('U')
|
| 270 |
+
assert_array_equal(a, np.array('1234567890' * 3, dtype='U30'))
|
| 271 |
+
|
| 272 |
+
a = np.array(123456789012345678901234567890, dtype='S')
|
| 273 |
+
assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
|
| 274 |
+
a = np.array(123456789012345678901234567890, dtype='U')
|
| 275 |
+
assert_array_equal(a, np.array('1234567890' * 3, dtype='U30'))
|
| 276 |
+
|
| 277 |
+
a = np.array('a\u0140', dtype='U')
|
| 278 |
+
b = np.ndarray(buffer=a, dtype='uint32', shape=2)
|
| 279 |
+
assert_(b.size == 2)
|
| 280 |
+
|
| 281 |
+
a = np.array([1000], dtype='i4')
|
| 282 |
+
assert_raises(TypeError, a.astype, 'S1', casting='safe')
|
| 283 |
+
|
| 284 |
+
a = np.array(1000, dtype='i4')
|
| 285 |
+
assert_raises(TypeError, a.astype, 'U1', casting='safe')
|
| 286 |
+
|
| 287 |
+
# gh-24023
|
| 288 |
+
assert_raises(TypeError, a.astype)
|
| 289 |
+
|
| 290 |
+
@pytest.mark.parametrize("dt", ["S", "U"])
|
| 291 |
+
def test_array_astype_to_string_discovery_empty(dt):
|
| 292 |
+
# See also gh-19085
|
| 293 |
+
arr = np.array([""], dtype=object)
|
| 294 |
+
# Note, the itemsize is the `0 -> 1` logic, which should change.
|
| 295 |
+
# The important part the test is rather that it does not error.
|
| 296 |
+
assert arr.astype(dt).dtype.itemsize == np.dtype(f"{dt}1").itemsize
|
| 297 |
+
|
| 298 |
+
# check the same thing for `np.can_cast` (since it accepts arrays)
|
| 299 |
+
assert np.can_cast(arr, dt, casting="unsafe")
|
| 300 |
+
assert not np.can_cast(arr, dt, casting="same_kind")
|
| 301 |
+
# as well as for the object as a descriptor:
|
| 302 |
+
assert np.can_cast("O", dt, casting="unsafe")
|
| 303 |
+
|
| 304 |
+
@pytest.mark.parametrize("dt", ["d", "f", "S13", "U32"])
|
| 305 |
+
def test_array_astype_to_void(dt):
|
| 306 |
+
dt = np.dtype(dt)
|
| 307 |
+
arr = np.array([], dtype=dt)
|
| 308 |
+
assert arr.astype("V").dtype.itemsize == dt.itemsize
|
| 309 |
+
|
| 310 |
+
def test_object_array_astype_to_void():
|
| 311 |
+
# This is different to `test_array_astype_to_void` as object arrays
|
| 312 |
+
# are inspected. The default void is "V8" (8 is the length of double)
|
| 313 |
+
arr = np.array([], dtype="O").astype("V")
|
| 314 |
+
assert arr.dtype == "V8"
|
| 315 |
+
|
| 316 |
+
@pytest.mark.parametrize("t",
|
| 317 |
+
np.sctypes['uint'] + np.sctypes['int'] + np.sctypes['float']
|
| 318 |
+
)
|
| 319 |
+
def test_array_astype_warning(t):
|
| 320 |
+
# test ComplexWarning when casting from complex to float or int
|
| 321 |
+
a = np.array(10, dtype=np.complex_)
|
| 322 |
+
assert_warns(np.ComplexWarning, a.astype, t)
|
| 323 |
+
|
| 324 |
+
@pytest.mark.parametrize(["dtype", "out_dtype"],
|
| 325 |
+
[(np.bytes_, np.bool_),
|
| 326 |
+
(np.str_, np.bool_),
|
| 327 |
+
(np.dtype("S10,S9"), np.dtype("?,?"))])
|
| 328 |
+
def test_string_to_boolean_cast(dtype, out_dtype):
|
| 329 |
+
"""
|
| 330 |
+
Currently, for `astype` strings are cast to booleans effectively by
|
| 331 |
+
calling `bool(int(string)`. This is not consistent (see gh-9875) and
|
| 332 |
+
will eventually be deprecated.
|
| 333 |
+
"""
|
| 334 |
+
arr = np.array(["10", "10\0\0\0", "0\0\0", "0"], dtype=dtype)
|
| 335 |
+
expected = np.array([True, True, False, False], dtype=out_dtype)
|
| 336 |
+
assert_array_equal(arr.astype(out_dtype), expected)
|
| 337 |
+
|
| 338 |
+
@pytest.mark.parametrize(["dtype", "out_dtype"],
|
| 339 |
+
[(np.bytes_, np.bool_),
|
| 340 |
+
(np.str_, np.bool_),
|
| 341 |
+
(np.dtype("S10,S9"), np.dtype("?,?"))])
|
| 342 |
+
def test_string_to_boolean_cast_errors(dtype, out_dtype):
|
| 343 |
+
"""
|
| 344 |
+
These currently error out, since cast to integers fails, but should not
|
| 345 |
+
error out in the future.
|
| 346 |
+
"""
|
| 347 |
+
for invalid in ["False", "True", "", "\0", "non-empty"]:
|
| 348 |
+
arr = np.array([invalid], dtype=dtype)
|
| 349 |
+
with assert_raises(ValueError):
|
| 350 |
+
arr.astype(out_dtype)
|
| 351 |
+
|
| 352 |
+
@pytest.mark.parametrize("str_type", [str, bytes, np.str_, np.unicode_])
|
| 353 |
+
@pytest.mark.parametrize("scalar_type",
|
| 354 |
+
[np.complex64, np.complex128, np.clongdouble])
|
| 355 |
+
def test_string_to_complex_cast(str_type, scalar_type):
|
| 356 |
+
value = scalar_type(b"1+3j")
|
| 357 |
+
assert scalar_type(value) == 1+3j
|
| 358 |
+
assert np.array([value], dtype=object).astype(scalar_type)[()] == 1+3j
|
| 359 |
+
assert np.array(value).astype(scalar_type)[()] == 1+3j
|
| 360 |
+
arr = np.zeros(1, dtype=scalar_type)
|
| 361 |
+
arr[0] = value
|
| 362 |
+
assert arr[0] == 1+3j
|
| 363 |
+
|
| 364 |
+
@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
|
| 365 |
+
def test_none_to_nan_cast(dtype):
|
| 366 |
+
# Note that at the time of writing this test, the scalar constructors
|
| 367 |
+
# reject None
|
| 368 |
+
arr = np.zeros(1, dtype=dtype)
|
| 369 |
+
arr[0] = None
|
| 370 |
+
assert np.isnan(arr)[0]
|
| 371 |
+
assert np.isnan(np.array(None, dtype=dtype))[()]
|
| 372 |
+
assert np.isnan(np.array([None], dtype=dtype))[0]
|
| 373 |
+
assert np.isnan(np.array(None).astype(dtype))[()]
|
| 374 |
+
|
| 375 |
+
def test_copyto_fromscalar():
|
| 376 |
+
a = np.arange(6, dtype='f4').reshape(2, 3)
|
| 377 |
+
|
| 378 |
+
# Simple copy
|
| 379 |
+
np.copyto(a, 1.5)
|
| 380 |
+
assert_equal(a, 1.5)
|
| 381 |
+
np.copyto(a.T, 2.5)
|
| 382 |
+
assert_equal(a, 2.5)
|
| 383 |
+
|
| 384 |
+
# Where-masked copy
|
| 385 |
+
mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?')
|
| 386 |
+
np.copyto(a, 3.5, where=mask)
|
| 387 |
+
assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]])
|
| 388 |
+
mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?')
|
| 389 |
+
np.copyto(a.T, 4.5, where=mask)
|
| 390 |
+
assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]])
|
| 391 |
+
|
| 392 |
+
def test_copyto():
|
| 393 |
+
a = np.arange(6, dtype='i4').reshape(2, 3)
|
| 394 |
+
|
| 395 |
+
# Simple copy
|
| 396 |
+
np.copyto(a, [[3, 1, 5], [6, 2, 1]])
|
| 397 |
+
assert_equal(a, [[3, 1, 5], [6, 2, 1]])
|
| 398 |
+
|
| 399 |
+
# Overlapping copy should work
|
| 400 |
+
np.copyto(a[:, :2], a[::-1, 1::-1])
|
| 401 |
+
assert_equal(a, [[2, 6, 5], [1, 3, 1]])
|
| 402 |
+
|
| 403 |
+
# Defaults to 'same_kind' casting
|
| 404 |
+
assert_raises(TypeError, np.copyto, a, 1.5)
|
| 405 |
+
|
| 406 |
+
# Force a copy with 'unsafe' casting, truncating 1.5 to 1
|
| 407 |
+
np.copyto(a, 1.5, casting='unsafe')
|
| 408 |
+
assert_equal(a, 1)
|
| 409 |
+
|
| 410 |
+
# Copying with a mask
|
| 411 |
+
np.copyto(a, 3, where=[True, False, True])
|
| 412 |
+
assert_equal(a, [[3, 1, 3], [3, 1, 3]])
|
| 413 |
+
|
| 414 |
+
# Casting rule still applies with a mask
|
| 415 |
+
assert_raises(TypeError, np.copyto, a, 3.5, where=[True, False, True])
|
| 416 |
+
|
| 417 |
+
# Lists of integer 0's and 1's is ok too
|
| 418 |
+
np.copyto(a, 4.0, casting='unsafe', where=[[0, 1, 1], [1, 0, 0]])
|
| 419 |
+
assert_equal(a, [[3, 4, 4], [4, 1, 3]])
|
| 420 |
+
|
| 421 |
+
# Overlapping copy with mask should work
|
| 422 |
+
np.copyto(a[:, :2], a[::-1, 1::-1], where=[[0, 1], [1, 1]])
|
| 423 |
+
assert_equal(a, [[3, 4, 4], [4, 3, 3]])
|
| 424 |
+
|
| 425 |
+
# 'dst' must be an array
|
| 426 |
+
assert_raises(TypeError, np.copyto, [1, 2, 3], [2, 3, 4])
|
| 427 |
+
|
| 428 |
+
def test_copyto_permut():
|
| 429 |
+
# test explicit overflow case
|
| 430 |
+
pad = 500
|
| 431 |
+
l = [True] * pad + [True, True, True, True]
|
| 432 |
+
r = np.zeros(len(l)-pad)
|
| 433 |
+
d = np.ones(len(l)-pad)
|
| 434 |
+
mask = np.array(l)[pad:]
|
| 435 |
+
np.copyto(r, d, where=mask[::-1])
|
| 436 |
+
|
| 437 |
+
# test all permutation of possible masks, 9 should be sufficient for
|
| 438 |
+
# current 4 byte unrolled code
|
| 439 |
+
power = 9
|
| 440 |
+
d = np.ones(power)
|
| 441 |
+
for i in range(2**power):
|
| 442 |
+
r = np.zeros(power)
|
| 443 |
+
l = [(i & x) != 0 for x in range(power)]
|
| 444 |
+
mask = np.array(l)
|
| 445 |
+
np.copyto(r, d, where=mask)
|
| 446 |
+
assert_array_equal(r == 1, l)
|
| 447 |
+
assert_equal(r.sum(), sum(l))
|
| 448 |
+
|
| 449 |
+
r = np.zeros(power)
|
| 450 |
+
np.copyto(r, d, where=mask[::-1])
|
| 451 |
+
assert_array_equal(r == 1, l[::-1])
|
| 452 |
+
assert_equal(r.sum(), sum(l))
|
| 453 |
+
|
| 454 |
+
r = np.zeros(power)
|
| 455 |
+
np.copyto(r[::2], d[::2], where=mask[::2])
|
| 456 |
+
assert_array_equal(r[::2] == 1, l[::2])
|
| 457 |
+
assert_equal(r[::2].sum(), sum(l[::2]))
|
| 458 |
+
|
| 459 |
+
r = np.zeros(power)
|
| 460 |
+
np.copyto(r[::2], d[::2], where=mask[::-2])
|
| 461 |
+
assert_array_equal(r[::2] == 1, l[::-2])
|
| 462 |
+
assert_equal(r[::2].sum(), sum(l[::-2]))
|
| 463 |
+
|
| 464 |
+
for c in [0xFF, 0x7F, 0x02, 0x10]:
|
| 465 |
+
r = np.zeros(power)
|
| 466 |
+
mask = np.array(l)
|
| 467 |
+
imask = np.array(l).view(np.uint8)
|
| 468 |
+
imask[mask != 0] = c
|
| 469 |
+
np.copyto(r, d, where=mask)
|
| 470 |
+
assert_array_equal(r == 1, l)
|
| 471 |
+
assert_equal(r.sum(), sum(l))
|
| 472 |
+
|
| 473 |
+
r = np.zeros(power)
|
| 474 |
+
np.copyto(r, d, where=True)
|
| 475 |
+
assert_equal(r.sum(), r.size)
|
| 476 |
+
r = np.ones(power)
|
| 477 |
+
d = np.zeros(power)
|
| 478 |
+
np.copyto(r, d, where=False)
|
| 479 |
+
assert_equal(r.sum(), r.size)
|
| 480 |
+
|
| 481 |
+
def test_copy_order():
|
| 482 |
+
a = np.arange(24).reshape(2, 1, 3, 4)
|
| 483 |
+
b = a.copy(order='F')
|
| 484 |
+
c = np.arange(24).reshape(2, 1, 4, 3).swapaxes(2, 3)
|
| 485 |
+
|
| 486 |
+
def check_copy_result(x, y, ccontig, fcontig, strides=False):
|
| 487 |
+
assert_(not (x is y))
|
| 488 |
+
assert_equal(x, y)
|
| 489 |
+
assert_equal(res.flags.c_contiguous, ccontig)
|
| 490 |
+
assert_equal(res.flags.f_contiguous, fcontig)
|
| 491 |
+
|
| 492 |
+
# Validate the initial state of a, b, and c
|
| 493 |
+
assert_(a.flags.c_contiguous)
|
| 494 |
+
assert_(not a.flags.f_contiguous)
|
| 495 |
+
assert_(not b.flags.c_contiguous)
|
| 496 |
+
assert_(b.flags.f_contiguous)
|
| 497 |
+
assert_(not c.flags.c_contiguous)
|
| 498 |
+
assert_(not c.flags.f_contiguous)
|
| 499 |
+
|
| 500 |
+
# Copy with order='C'
|
| 501 |
+
res = a.copy(order='C')
|
| 502 |
+
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
|
| 503 |
+
res = b.copy(order='C')
|
| 504 |
+
check_copy_result(res, b, ccontig=True, fcontig=False, strides=False)
|
| 505 |
+
res = c.copy(order='C')
|
| 506 |
+
check_copy_result(res, c, ccontig=True, fcontig=False, strides=False)
|
| 507 |
+
res = np.copy(a, order='C')
|
| 508 |
+
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
|
| 509 |
+
res = np.copy(b, order='C')
|
| 510 |
+
check_copy_result(res, b, ccontig=True, fcontig=False, strides=False)
|
| 511 |
+
res = np.copy(c, order='C')
|
| 512 |
+
check_copy_result(res, c, ccontig=True, fcontig=False, strides=False)
|
| 513 |
+
|
| 514 |
+
# Copy with order='F'
|
| 515 |
+
res = a.copy(order='F')
|
| 516 |
+
check_copy_result(res, a, ccontig=False, fcontig=True, strides=False)
|
| 517 |
+
res = b.copy(order='F')
|
| 518 |
+
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
|
| 519 |
+
res = c.copy(order='F')
|
| 520 |
+
check_copy_result(res, c, ccontig=False, fcontig=True, strides=False)
|
| 521 |
+
res = np.copy(a, order='F')
|
| 522 |
+
check_copy_result(res, a, ccontig=False, fcontig=True, strides=False)
|
| 523 |
+
res = np.copy(b, order='F')
|
| 524 |
+
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
|
| 525 |
+
res = np.copy(c, order='F')
|
| 526 |
+
check_copy_result(res, c, ccontig=False, fcontig=True, strides=False)
|
| 527 |
+
|
| 528 |
+
# Copy with order='K'
|
| 529 |
+
res = a.copy(order='K')
|
| 530 |
+
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
|
| 531 |
+
res = b.copy(order='K')
|
| 532 |
+
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
|
| 533 |
+
res = c.copy(order='K')
|
| 534 |
+
check_copy_result(res, c, ccontig=False, fcontig=False, strides=True)
|
| 535 |
+
res = np.copy(a, order='K')
|
| 536 |
+
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
|
| 537 |
+
res = np.copy(b, order='K')
|
| 538 |
+
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
|
| 539 |
+
res = np.copy(c, order='K')
|
| 540 |
+
check_copy_result(res, c, ccontig=False, fcontig=False, strides=True)
|
| 541 |
+
|
| 542 |
+
def test_contiguous_flags():
|
| 543 |
+
a = np.ones((4, 4, 1))[::2,:,:]
|
| 544 |
+
a.strides = a.strides[:2] + (-123,)
|
| 545 |
+
b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4)
|
| 546 |
+
|
| 547 |
+
def check_contig(a, ccontig, fcontig):
|
| 548 |
+
assert_(a.flags.c_contiguous == ccontig)
|
| 549 |
+
assert_(a.flags.f_contiguous == fcontig)
|
| 550 |
+
|
| 551 |
+
# Check if new arrays are correct:
|
| 552 |
+
check_contig(a, False, False)
|
| 553 |
+
check_contig(b, False, False)
|
| 554 |
+
check_contig(np.empty((2, 2, 0, 2, 2)), True, True)
|
| 555 |
+
check_contig(np.array([[[1], [2]]], order='F'), True, True)
|
| 556 |
+
check_contig(np.empty((2, 2)), True, False)
|
| 557 |
+
check_contig(np.empty((2, 2), order='F'), False, True)
|
| 558 |
+
|
| 559 |
+
# Check that np.array creates correct contiguous flags:
|
| 560 |
+
check_contig(np.array(a, copy=False), False, False)
|
| 561 |
+
check_contig(np.array(a, copy=False, order='C'), True, False)
|
| 562 |
+
check_contig(np.array(a, ndmin=4, copy=False, order='F'), False, True)
|
| 563 |
+
|
| 564 |
+
# Check slicing update of flags and :
|
| 565 |
+
check_contig(a[0], True, True)
|
| 566 |
+
check_contig(a[None, ::4, ..., None], True, True)
|
| 567 |
+
check_contig(b[0, 0, ...], False, True)
|
| 568 |
+
check_contig(b[:, :, 0:0, :, :], True, True)
|
| 569 |
+
|
| 570 |
+
# Test ravel and squeeze.
|
| 571 |
+
check_contig(a.ravel(), True, True)
|
| 572 |
+
check_contig(np.ones((1, 3, 1)).squeeze(), True, True)
|
| 573 |
+
|
| 574 |
+
def test_broadcast_arrays():
|
| 575 |
+
# Test user defined dtypes
|
| 576 |
+
a = np.array([(1, 2, 3)], dtype='u4,u4,u4')
|
| 577 |
+
b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')
|
| 578 |
+
result = np.broadcast_arrays(a, b)
|
| 579 |
+
assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4'))
|
| 580 |
+
assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4'))
|
| 581 |
+
|
| 582 |
+
@pytest.mark.parametrize(["shape", "fill_value", "expected_output"],
|
| 583 |
+
[((2, 2), [5.0, 6.0], np.array([[5.0, 6.0], [5.0, 6.0]])),
|
| 584 |
+
((3, 2), [1.0, 2.0], np.array([[1.0, 2.0], [1.0, 2.0], [1.0, 2.0]]))])
|
| 585 |
+
def test_full_from_list(shape, fill_value, expected_output):
|
| 586 |
+
output = np.full(shape, fill_value)
|
| 587 |
+
assert_equal(output, expected_output)
|
| 588 |
+
|
| 589 |
+
def test_astype_copyflag():
|
| 590 |
+
# test the various copyflag options
|
| 591 |
+
arr = np.arange(10, dtype=np.intp)
|
| 592 |
+
|
| 593 |
+
res_true = arr.astype(np.intp, copy=True)
|
| 594 |
+
assert not np.may_share_memory(arr, res_true)
|
| 595 |
+
res_always = arr.astype(np.intp, copy=np._CopyMode.ALWAYS)
|
| 596 |
+
assert not np.may_share_memory(arr, res_always)
|
| 597 |
+
|
| 598 |
+
res_false = arr.astype(np.intp, copy=False)
|
| 599 |
+
# `res_false is arr` currently, but check `may_share_memory`.
|
| 600 |
+
assert np.may_share_memory(arr, res_false)
|
| 601 |
+
res_if_needed = arr.astype(np.intp, copy=np._CopyMode.IF_NEEDED)
|
| 602 |
+
# `res_if_needed is arr` currently, but check `may_share_memory`.
|
| 603 |
+
assert np.may_share_memory(arr, res_if_needed)
|
| 604 |
+
|
| 605 |
+
res_never = arr.astype(np.intp, copy=np._CopyMode.NEVER)
|
| 606 |
+
assert np.may_share_memory(arr, res_never)
|
| 607 |
+
|
| 608 |
+
# Simple tests for when a copy is necessary:
|
| 609 |
+
res_false = arr.astype(np.float64, copy=False)
|
| 610 |
+
assert_array_equal(res_false, arr)
|
| 611 |
+
res_if_needed = arr.astype(np.float64,
|
| 612 |
+
copy=np._CopyMode.IF_NEEDED)
|
| 613 |
+
assert_array_equal(res_if_needed, arr)
|
| 614 |
+
assert_raises(ValueError, arr.astype, np.float64,
|
| 615 |
+
copy=np._CopyMode.NEVER)
|
pllava/lib/python3.10/site-packages/numpy/core/tests/test_array_coercion.py
ADDED
|
@@ -0,0 +1,898 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Tests for array coercion, mainly through testing `np.array` results directly.
|
| 3 |
+
Note that other such tests exist, e.g., in `test_api.py` and many corner-cases
|
| 4 |
+
are tested (sometimes indirectly) elsewhere.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from itertools import permutations, product
|
| 8 |
+
|
| 9 |
+
import pytest
|
| 10 |
+
from pytest import param
|
| 11 |
+
|
| 12 |
+
import numpy as np
|
| 13 |
+
from numpy.core._rational_tests import rational
|
| 14 |
+
from numpy.core._multiarray_umath import _discover_array_parameters
|
| 15 |
+
|
| 16 |
+
from numpy.testing import (
|
| 17 |
+
assert_array_equal, assert_warns, IS_PYPY)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def arraylikes():
|
| 21 |
+
"""
|
| 22 |
+
Generator for functions converting an array into various array-likes.
|
| 23 |
+
If full is True (default) it includes array-likes not capable of handling
|
| 24 |
+
all dtypes.
|
| 25 |
+
"""
|
| 26 |
+
# base array:
|
| 27 |
+
def ndarray(a):
|
| 28 |
+
return a
|
| 29 |
+
|
| 30 |
+
yield param(ndarray, id="ndarray")
|
| 31 |
+
|
| 32 |
+
# subclass:
|
| 33 |
+
class MyArr(np.ndarray):
|
| 34 |
+
pass
|
| 35 |
+
|
| 36 |
+
def subclass(a):
|
| 37 |
+
return a.view(MyArr)
|
| 38 |
+
|
| 39 |
+
yield subclass
|
| 40 |
+
|
| 41 |
+
class _SequenceLike():
|
| 42 |
+
# Older NumPy versions, sometimes cared whether a protocol array was
|
| 43 |
+
# also _SequenceLike. This shouldn't matter, but keep it for now
|
| 44 |
+
# for __array__ and not the others.
|
| 45 |
+
def __len__(self):
|
| 46 |
+
raise TypeError
|
| 47 |
+
|
| 48 |
+
def __getitem__(self):
|
| 49 |
+
raise TypeError
|
| 50 |
+
|
| 51 |
+
# Array-interface
|
| 52 |
+
class ArrayDunder(_SequenceLike):
|
| 53 |
+
def __init__(self, a):
|
| 54 |
+
self.a = a
|
| 55 |
+
|
| 56 |
+
def __array__(self, dtype=None):
|
| 57 |
+
return self.a
|
| 58 |
+
|
| 59 |
+
yield param(ArrayDunder, id="__array__")
|
| 60 |
+
|
| 61 |
+
# memory-view
|
| 62 |
+
yield param(memoryview, id="memoryview")
|
| 63 |
+
|
| 64 |
+
# Array-interface
|
| 65 |
+
class ArrayInterface:
|
| 66 |
+
def __init__(self, a):
|
| 67 |
+
self.a = a # need to hold on to keep interface valid
|
| 68 |
+
self.__array_interface__ = a.__array_interface__
|
| 69 |
+
|
| 70 |
+
yield param(ArrayInterface, id="__array_interface__")
|
| 71 |
+
|
| 72 |
+
# Array-Struct
|
| 73 |
+
class ArrayStruct:
|
| 74 |
+
def __init__(self, a):
|
| 75 |
+
self.a = a # need to hold on to keep struct valid
|
| 76 |
+
self.__array_struct__ = a.__array_struct__
|
| 77 |
+
|
| 78 |
+
yield param(ArrayStruct, id="__array_struct__")
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def scalar_instances(times=True, extended_precision=True, user_dtype=True):
|
| 82 |
+
# Hard-coded list of scalar instances.
|
| 83 |
+
# Floats:
|
| 84 |
+
yield param(np.sqrt(np.float16(5)), id="float16")
|
| 85 |
+
yield param(np.sqrt(np.float32(5)), id="float32")
|
| 86 |
+
yield param(np.sqrt(np.float64(5)), id="float64")
|
| 87 |
+
if extended_precision:
|
| 88 |
+
yield param(np.sqrt(np.longdouble(5)), id="longdouble")
|
| 89 |
+
|
| 90 |
+
# Complex:
|
| 91 |
+
yield param(np.sqrt(np.complex64(2+3j)), id="complex64")
|
| 92 |
+
yield param(np.sqrt(np.complex128(2+3j)), id="complex128")
|
| 93 |
+
if extended_precision:
|
| 94 |
+
yield param(np.sqrt(np.longcomplex(2+3j)), id="clongdouble")
|
| 95 |
+
|
| 96 |
+
# Bool:
|
| 97 |
+
# XFAIL: Bool should be added, but has some bad properties when it
|
| 98 |
+
# comes to strings, see also gh-9875
|
| 99 |
+
# yield param(np.bool_(0), id="bool")
|
| 100 |
+
|
| 101 |
+
# Integers:
|
| 102 |
+
yield param(np.int8(2), id="int8")
|
| 103 |
+
yield param(np.int16(2), id="int16")
|
| 104 |
+
yield param(np.int32(2), id="int32")
|
| 105 |
+
yield param(np.int64(2), id="int64")
|
| 106 |
+
|
| 107 |
+
yield param(np.uint8(2), id="uint8")
|
| 108 |
+
yield param(np.uint16(2), id="uint16")
|
| 109 |
+
yield param(np.uint32(2), id="uint32")
|
| 110 |
+
yield param(np.uint64(2), id="uint64")
|
| 111 |
+
|
| 112 |
+
# Rational:
|
| 113 |
+
if user_dtype:
|
| 114 |
+
yield param(rational(1, 2), id="rational")
|
| 115 |
+
|
| 116 |
+
# Cannot create a structured void scalar directly:
|
| 117 |
+
structured = np.array([(1, 3)], "i,i")[0]
|
| 118 |
+
assert isinstance(structured, np.void)
|
| 119 |
+
assert structured.dtype == np.dtype("i,i")
|
| 120 |
+
yield param(structured, id="structured")
|
| 121 |
+
|
| 122 |
+
if times:
|
| 123 |
+
# Datetimes and timedelta
|
| 124 |
+
yield param(np.timedelta64(2), id="timedelta64[generic]")
|
| 125 |
+
yield param(np.timedelta64(23, "s"), id="timedelta64[s]")
|
| 126 |
+
yield param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)")
|
| 127 |
+
|
| 128 |
+
yield param(np.datetime64("NaT"), id="datetime64[generic](NaT)")
|
| 129 |
+
yield param(np.datetime64("2020-06-07 12:43", "ms"), id="datetime64[ms]")
|
| 130 |
+
|
| 131 |
+
# Strings and unstructured void:
|
| 132 |
+
yield param(np.bytes_(b"1234"), id="bytes")
|
| 133 |
+
yield param(np.str_("2345"), id="unicode")
|
| 134 |
+
yield param(np.void(b"4321"), id="unstructured_void")
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def is_parametric_dtype(dtype):
|
| 138 |
+
"""Returns True if the dtype is a parametric legacy dtype (itemsize
|
| 139 |
+
is 0, or a datetime without units)
|
| 140 |
+
"""
|
| 141 |
+
if dtype.itemsize == 0:
|
| 142 |
+
return True
|
| 143 |
+
if issubclass(dtype.type, (np.datetime64, np.timedelta64)):
|
| 144 |
+
if dtype.name.endswith("64"):
|
| 145 |
+
# Generic time units
|
| 146 |
+
return True
|
| 147 |
+
return False
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
class TestStringDiscovery:
|
| 151 |
+
@pytest.mark.parametrize("obj",
|
| 152 |
+
[object(), 1.2, 10**43, None, "string"],
|
| 153 |
+
ids=["object", "1.2", "10**43", "None", "string"])
|
| 154 |
+
def test_basic_stringlength(self, obj):
|
| 155 |
+
length = len(str(obj))
|
| 156 |
+
expected = np.dtype(f"S{length}")
|
| 157 |
+
|
| 158 |
+
assert np.array(obj, dtype="S").dtype == expected
|
| 159 |
+
assert np.array([obj], dtype="S").dtype == expected
|
| 160 |
+
|
| 161 |
+
# A nested array is also discovered correctly
|
| 162 |
+
arr = np.array(obj, dtype="O")
|
| 163 |
+
assert np.array(arr, dtype="S").dtype == expected
|
| 164 |
+
# Also if we use the dtype class
|
| 165 |
+
assert np.array(arr, dtype=type(expected)).dtype == expected
|
| 166 |
+
# Check that .astype() behaves identical
|
| 167 |
+
assert arr.astype("S").dtype == expected
|
| 168 |
+
# The DType class is accepted by `.astype()`
|
| 169 |
+
assert arr.astype(type(np.dtype("S"))).dtype == expected
|
| 170 |
+
|
| 171 |
+
@pytest.mark.parametrize("obj",
|
| 172 |
+
[object(), 1.2, 10**43, None, "string"],
|
| 173 |
+
ids=["object", "1.2", "10**43", "None", "string"])
|
| 174 |
+
def test_nested_arrays_stringlength(self, obj):
|
| 175 |
+
length = len(str(obj))
|
| 176 |
+
expected = np.dtype(f"S{length}")
|
| 177 |
+
arr = np.array(obj, dtype="O")
|
| 178 |
+
assert np.array([arr, arr], dtype="S").dtype == expected
|
| 179 |
+
|
| 180 |
+
@pytest.mark.parametrize("arraylike", arraylikes())
|
| 181 |
+
def test_unpack_first_level(self, arraylike):
|
| 182 |
+
# We unpack exactly one level of array likes
|
| 183 |
+
obj = np.array([None])
|
| 184 |
+
obj[0] = np.array(1.2)
|
| 185 |
+
# the length of the included item, not of the float dtype
|
| 186 |
+
length = len(str(obj[0]))
|
| 187 |
+
expected = np.dtype(f"S{length}")
|
| 188 |
+
|
| 189 |
+
obj = arraylike(obj)
|
| 190 |
+
# casting to string usually calls str(obj)
|
| 191 |
+
arr = np.array([obj], dtype="S")
|
| 192 |
+
assert arr.shape == (1, 1)
|
| 193 |
+
assert arr.dtype == expected
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
class TestScalarDiscovery:
|
| 197 |
+
def test_void_special_case(self):
|
| 198 |
+
# Void dtypes with structures discover tuples as elements
|
| 199 |
+
arr = np.array((1, 2, 3), dtype="i,i,i")
|
| 200 |
+
assert arr.shape == ()
|
| 201 |
+
arr = np.array([(1, 2, 3)], dtype="i,i,i")
|
| 202 |
+
assert arr.shape == (1,)
|
| 203 |
+
|
| 204 |
+
def test_char_special_case(self):
|
| 205 |
+
arr = np.array("string", dtype="c")
|
| 206 |
+
assert arr.shape == (6,)
|
| 207 |
+
assert arr.dtype.char == "c"
|
| 208 |
+
arr = np.array(["string"], dtype="c")
|
| 209 |
+
assert arr.shape == (1, 6)
|
| 210 |
+
assert arr.dtype.char == "c"
|
| 211 |
+
|
| 212 |
+
def test_char_special_case_deep(self):
|
| 213 |
+
# Check that the character special case errors correctly if the
|
| 214 |
+
# array is too deep:
|
| 215 |
+
nested = ["string"] # 2 dimensions (due to string being sequence)
|
| 216 |
+
for i in range(np.MAXDIMS - 2):
|
| 217 |
+
nested = [nested]
|
| 218 |
+
|
| 219 |
+
arr = np.array(nested, dtype='c')
|
| 220 |
+
assert arr.shape == (1,) * (np.MAXDIMS - 1) + (6,)
|
| 221 |
+
with pytest.raises(ValueError):
|
| 222 |
+
np.array([nested], dtype="c")
|
| 223 |
+
|
| 224 |
+
def test_unknown_object(self):
|
| 225 |
+
arr = np.array(object())
|
| 226 |
+
assert arr.shape == ()
|
| 227 |
+
assert arr.dtype == np.dtype("O")
|
| 228 |
+
|
| 229 |
+
@pytest.mark.parametrize("scalar", scalar_instances())
|
| 230 |
+
def test_scalar(self, scalar):
|
| 231 |
+
arr = np.array(scalar)
|
| 232 |
+
assert arr.shape == ()
|
| 233 |
+
assert arr.dtype == scalar.dtype
|
| 234 |
+
|
| 235 |
+
arr = np.array([[scalar, scalar]])
|
| 236 |
+
assert arr.shape == (1, 2)
|
| 237 |
+
assert arr.dtype == scalar.dtype
|
| 238 |
+
|
| 239 |
+
# Additionally to string this test also runs into a corner case
|
| 240 |
+
# with datetime promotion (the difference is the promotion order).
|
| 241 |
+
@pytest.mark.filterwarnings("ignore:Promotion of numbers:FutureWarning")
|
| 242 |
+
def test_scalar_promotion(self):
|
| 243 |
+
for sc1, sc2 in product(scalar_instances(), scalar_instances()):
|
| 244 |
+
sc1, sc2 = sc1.values[0], sc2.values[0]
|
| 245 |
+
# test all combinations:
|
| 246 |
+
try:
|
| 247 |
+
arr = np.array([sc1, sc2])
|
| 248 |
+
except (TypeError, ValueError):
|
| 249 |
+
# The promotion between two times can fail
|
| 250 |
+
# XFAIL (ValueError): Some object casts are currently undefined
|
| 251 |
+
continue
|
| 252 |
+
assert arr.shape == (2,)
|
| 253 |
+
try:
|
| 254 |
+
dt1, dt2 = sc1.dtype, sc2.dtype
|
| 255 |
+
expected_dtype = np.promote_types(dt1, dt2)
|
| 256 |
+
assert arr.dtype == expected_dtype
|
| 257 |
+
except TypeError as e:
|
| 258 |
+
# Will currently always go to object dtype
|
| 259 |
+
assert arr.dtype == np.dtype("O")
|
| 260 |
+
|
| 261 |
+
@pytest.mark.parametrize("scalar", scalar_instances())
|
| 262 |
+
def test_scalar_coercion(self, scalar):
|
| 263 |
+
# This tests various scalar coercion paths, mainly for the numerical
|
| 264 |
+
# types. It includes some paths not directly related to `np.array`.
|
| 265 |
+
if isinstance(scalar, np.inexact):
|
| 266 |
+
# Ensure we have a full-precision number if available
|
| 267 |
+
scalar = type(scalar)((scalar * 2)**0.5)
|
| 268 |
+
|
| 269 |
+
if type(scalar) is rational:
|
| 270 |
+
# Rational generally fails due to a missing cast. In the future
|
| 271 |
+
# object casts should automatically be defined based on `setitem`.
|
| 272 |
+
pytest.xfail("Rational to object cast is undefined currently.")
|
| 273 |
+
|
| 274 |
+
# Use casting from object:
|
| 275 |
+
arr = np.array(scalar, dtype=object).astype(scalar.dtype)
|
| 276 |
+
|
| 277 |
+
# Test various ways to create an array containing this scalar:
|
| 278 |
+
arr1 = np.array(scalar).reshape(1)
|
| 279 |
+
arr2 = np.array([scalar])
|
| 280 |
+
arr3 = np.empty(1, dtype=scalar.dtype)
|
| 281 |
+
arr3[0] = scalar
|
| 282 |
+
arr4 = np.empty(1, dtype=scalar.dtype)
|
| 283 |
+
arr4[:] = [scalar]
|
| 284 |
+
# All of these methods should yield the same results
|
| 285 |
+
assert_array_equal(arr, arr1)
|
| 286 |
+
assert_array_equal(arr, arr2)
|
| 287 |
+
assert_array_equal(arr, arr3)
|
| 288 |
+
assert_array_equal(arr, arr4)
|
| 289 |
+
|
| 290 |
+
@pytest.mark.xfail(IS_PYPY, reason="`int(np.complex128(3))` fails on PyPy")
|
| 291 |
+
@pytest.mark.filterwarnings("ignore::numpy.ComplexWarning")
|
| 292 |
+
@pytest.mark.parametrize("cast_to", scalar_instances())
|
| 293 |
+
def test_scalar_coercion_same_as_cast_and_assignment(self, cast_to):
|
| 294 |
+
"""
|
| 295 |
+
Test that in most cases:
|
| 296 |
+
* `np.array(scalar, dtype=dtype)`
|
| 297 |
+
* `np.empty((), dtype=dtype)[()] = scalar`
|
| 298 |
+
* `np.array(scalar).astype(dtype)`
|
| 299 |
+
should behave the same. The only exceptions are parametric dtypes
|
| 300 |
+
(mainly datetime/timedelta without unit) and void without fields.
|
| 301 |
+
"""
|
| 302 |
+
dtype = cast_to.dtype # use to parametrize only the target dtype
|
| 303 |
+
|
| 304 |
+
for scalar in scalar_instances(times=False):
|
| 305 |
+
scalar = scalar.values[0]
|
| 306 |
+
|
| 307 |
+
if dtype.type == np.void:
|
| 308 |
+
if scalar.dtype.fields is not None and dtype.fields is None:
|
| 309 |
+
# Here, coercion to "V6" works, but the cast fails.
|
| 310 |
+
# Since the types are identical, SETITEM takes care of
|
| 311 |
+
# this, but has different rules than the cast.
|
| 312 |
+
with pytest.raises(TypeError):
|
| 313 |
+
np.array(scalar).astype(dtype)
|
| 314 |
+
np.array(scalar, dtype=dtype)
|
| 315 |
+
np.array([scalar], dtype=dtype)
|
| 316 |
+
continue
|
| 317 |
+
|
| 318 |
+
# The main test, we first try to use casting and if it succeeds
|
| 319 |
+
# continue below testing that things are the same, otherwise
|
| 320 |
+
# test that the alternative paths at least also fail.
|
| 321 |
+
try:
|
| 322 |
+
cast = np.array(scalar).astype(dtype)
|
| 323 |
+
except (TypeError, ValueError, RuntimeError):
|
| 324 |
+
# coercion should also raise (error type may change)
|
| 325 |
+
with pytest.raises(Exception):
|
| 326 |
+
np.array(scalar, dtype=dtype)
|
| 327 |
+
|
| 328 |
+
if (isinstance(scalar, rational) and
|
| 329 |
+
np.issubdtype(dtype, np.signedinteger)):
|
| 330 |
+
return
|
| 331 |
+
|
| 332 |
+
with pytest.raises(Exception):
|
| 333 |
+
np.array([scalar], dtype=dtype)
|
| 334 |
+
# assignment should also raise
|
| 335 |
+
res = np.zeros((), dtype=dtype)
|
| 336 |
+
with pytest.raises(Exception):
|
| 337 |
+
res[()] = scalar
|
| 338 |
+
|
| 339 |
+
return
|
| 340 |
+
|
| 341 |
+
# Non error path:
|
| 342 |
+
arr = np.array(scalar, dtype=dtype)
|
| 343 |
+
assert_array_equal(arr, cast)
|
| 344 |
+
# assignment behaves the same
|
| 345 |
+
ass = np.zeros((), dtype=dtype)
|
| 346 |
+
ass[()] = scalar
|
| 347 |
+
assert_array_equal(ass, cast)
|
| 348 |
+
|
| 349 |
+
@pytest.mark.parametrize("pyscalar", [10, 10.32, 10.14j, 10**100])
|
| 350 |
+
def test_pyscalar_subclasses(self, pyscalar):
|
| 351 |
+
"""NumPy arrays are read/write which means that anything but invariant
|
| 352 |
+
behaviour is on thin ice. However, we currently are happy to discover
|
| 353 |
+
subclasses of Python float, int, complex the same as the base classes.
|
| 354 |
+
This should potentially be deprecated.
|
| 355 |
+
"""
|
| 356 |
+
class MyScalar(type(pyscalar)):
|
| 357 |
+
pass
|
| 358 |
+
|
| 359 |
+
res = np.array(MyScalar(pyscalar))
|
| 360 |
+
expected = np.array(pyscalar)
|
| 361 |
+
assert_array_equal(res, expected)
|
| 362 |
+
|
| 363 |
+
@pytest.mark.parametrize("dtype_char", np.typecodes["All"])
|
| 364 |
+
def test_default_dtype_instance(self, dtype_char):
|
| 365 |
+
if dtype_char in "SU":
|
| 366 |
+
dtype = np.dtype(dtype_char + "1")
|
| 367 |
+
elif dtype_char == "V":
|
| 368 |
+
# Legacy behaviour was to use V8. The reason was float64 being the
|
| 369 |
+
# default dtype and that having 8 bytes.
|
| 370 |
+
dtype = np.dtype("V8")
|
| 371 |
+
else:
|
| 372 |
+
dtype = np.dtype(dtype_char)
|
| 373 |
+
|
| 374 |
+
discovered_dtype, _ = _discover_array_parameters([], type(dtype))
|
| 375 |
+
|
| 376 |
+
assert discovered_dtype == dtype
|
| 377 |
+
assert discovered_dtype.itemsize == dtype.itemsize
|
| 378 |
+
|
| 379 |
+
@pytest.mark.parametrize("dtype", np.typecodes["Integer"])
|
| 380 |
+
@pytest.mark.parametrize(["scalar", "error"],
|
| 381 |
+
[(np.float64(np.nan), ValueError),
|
| 382 |
+
(np.array(-1).astype(np.ulonglong)[()], OverflowError)])
|
| 383 |
+
def test_scalar_to_int_coerce_does_not_cast(self, dtype, scalar, error):
|
| 384 |
+
"""
|
| 385 |
+
Signed integers are currently different in that they do not cast other
|
| 386 |
+
NumPy scalar, but instead use scalar.__int__(). The hardcoded
|
| 387 |
+
exception to this rule is `np.array(scalar, dtype=integer)`.
|
| 388 |
+
"""
|
| 389 |
+
dtype = np.dtype(dtype)
|
| 390 |
+
|
| 391 |
+
# This is a special case using casting logic. It warns for the NaN
|
| 392 |
+
# but allows the cast (giving undefined behaviour).
|
| 393 |
+
with np.errstate(invalid="ignore"):
|
| 394 |
+
coerced = np.array(scalar, dtype=dtype)
|
| 395 |
+
cast = np.array(scalar).astype(dtype)
|
| 396 |
+
assert_array_equal(coerced, cast)
|
| 397 |
+
|
| 398 |
+
# However these fail:
|
| 399 |
+
with pytest.raises(error):
|
| 400 |
+
np.array([scalar], dtype=dtype)
|
| 401 |
+
with pytest.raises(error):
|
| 402 |
+
cast[()] = scalar
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
class TestTimeScalars:
|
| 406 |
+
@pytest.mark.parametrize("dtype", [np.int64, np.float32])
|
| 407 |
+
@pytest.mark.parametrize("scalar",
|
| 408 |
+
[param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)"),
|
| 409 |
+
param(np.timedelta64(123, "s"), id="timedelta64[s]"),
|
| 410 |
+
param(np.datetime64("NaT", "generic"), id="datetime64[generic](NaT)"),
|
| 411 |
+
param(np.datetime64(1, "D"), id="datetime64[D]")],)
|
| 412 |
+
def test_coercion_basic(self, dtype, scalar):
|
| 413 |
+
# Note the `[scalar]` is there because np.array(scalar) uses stricter
|
| 414 |
+
# `scalar.__int__()` rules for backward compatibility right now.
|
| 415 |
+
arr = np.array(scalar, dtype=dtype)
|
| 416 |
+
cast = np.array(scalar).astype(dtype)
|
| 417 |
+
assert_array_equal(arr, cast)
|
| 418 |
+
|
| 419 |
+
ass = np.ones((), dtype=dtype)
|
| 420 |
+
if issubclass(dtype, np.integer):
|
| 421 |
+
with pytest.raises(TypeError):
|
| 422 |
+
# raises, as would np.array([scalar], dtype=dtype), this is
|
| 423 |
+
# conversion from times, but behaviour of integers.
|
| 424 |
+
ass[()] = scalar
|
| 425 |
+
else:
|
| 426 |
+
ass[()] = scalar
|
| 427 |
+
assert_array_equal(ass, cast)
|
| 428 |
+
|
| 429 |
+
@pytest.mark.parametrize("dtype", [np.int64, np.float32])
|
| 430 |
+
@pytest.mark.parametrize("scalar",
|
| 431 |
+
[param(np.timedelta64(123, "ns"), id="timedelta64[ns]"),
|
| 432 |
+
param(np.timedelta64(12, "generic"), id="timedelta64[generic]")])
|
| 433 |
+
def test_coercion_timedelta_convert_to_number(self, dtype, scalar):
|
| 434 |
+
# Only "ns" and "generic" timedeltas can be converted to numbers
|
| 435 |
+
# so these are slightly special.
|
| 436 |
+
arr = np.array(scalar, dtype=dtype)
|
| 437 |
+
cast = np.array(scalar).astype(dtype)
|
| 438 |
+
ass = np.ones((), dtype=dtype)
|
| 439 |
+
ass[()] = scalar # raises, as would np.array([scalar], dtype=dtype)
|
| 440 |
+
|
| 441 |
+
assert_array_equal(arr, cast)
|
| 442 |
+
assert_array_equal(cast, cast)
|
| 443 |
+
|
| 444 |
+
@pytest.mark.parametrize("dtype", ["S6", "U6"])
|
| 445 |
+
@pytest.mark.parametrize(["val", "unit"],
|
| 446 |
+
[param(123, "s", id="[s]"), param(123, "D", id="[D]")])
|
| 447 |
+
def test_coercion_assignment_datetime(self, val, unit, dtype):
|
| 448 |
+
# String from datetime64 assignment is currently special cased to
|
| 449 |
+
# never use casting. This is because casting will error in this
|
| 450 |
+
# case, and traditionally in most cases the behaviour is maintained
|
| 451 |
+
# like this. (`np.array(scalar, dtype="U6")` would have failed before)
|
| 452 |
+
# TODO: This discrepancy _should_ be resolved, either by relaxing the
|
| 453 |
+
# cast, or by deprecating the first part.
|
| 454 |
+
scalar = np.datetime64(val, unit)
|
| 455 |
+
dtype = np.dtype(dtype)
|
| 456 |
+
cut_string = dtype.type(str(scalar)[:6])
|
| 457 |
+
|
| 458 |
+
arr = np.array(scalar, dtype=dtype)
|
| 459 |
+
assert arr[()] == cut_string
|
| 460 |
+
ass = np.ones((), dtype=dtype)
|
| 461 |
+
ass[()] = scalar
|
| 462 |
+
assert ass[()] == cut_string
|
| 463 |
+
|
| 464 |
+
with pytest.raises(RuntimeError):
|
| 465 |
+
# However, unlike the above assignment using `str(scalar)[:6]`
|
| 466 |
+
# due to being handled by the string DType and not be casting
|
| 467 |
+
# the explicit cast fails:
|
| 468 |
+
np.array(scalar).astype(dtype)
|
| 469 |
+
|
| 470 |
+
|
| 471 |
+
@pytest.mark.parametrize(["val", "unit"],
|
| 472 |
+
[param(123, "s", id="[s]"), param(123, "D", id="[D]")])
|
| 473 |
+
def test_coercion_assignment_timedelta(self, val, unit):
|
| 474 |
+
scalar = np.timedelta64(val, unit)
|
| 475 |
+
|
| 476 |
+
# Unlike datetime64, timedelta allows the unsafe cast:
|
| 477 |
+
np.array(scalar, dtype="S6")
|
| 478 |
+
cast = np.array(scalar).astype("S6")
|
| 479 |
+
ass = np.ones((), dtype="S6")
|
| 480 |
+
ass[()] = scalar
|
| 481 |
+
expected = scalar.astype("S")[:6]
|
| 482 |
+
assert cast[()] == expected
|
| 483 |
+
assert ass[()] == expected
|
| 484 |
+
|
| 485 |
+
class TestNested:
|
| 486 |
+
def test_nested_simple(self):
|
| 487 |
+
initial = [1.2]
|
| 488 |
+
nested = initial
|
| 489 |
+
for i in range(np.MAXDIMS - 1):
|
| 490 |
+
nested = [nested]
|
| 491 |
+
|
| 492 |
+
arr = np.array(nested, dtype="float64")
|
| 493 |
+
assert arr.shape == (1,) * np.MAXDIMS
|
| 494 |
+
with pytest.raises(ValueError):
|
| 495 |
+
np.array([nested], dtype="float64")
|
| 496 |
+
|
| 497 |
+
with pytest.raises(ValueError, match=".*would exceed the maximum"):
|
| 498 |
+
np.array([nested]) # user must ask for `object` explicitly
|
| 499 |
+
|
| 500 |
+
arr = np.array([nested], dtype=object)
|
| 501 |
+
assert arr.dtype == np.dtype("O")
|
| 502 |
+
assert arr.shape == (1,) * np.MAXDIMS
|
| 503 |
+
assert arr.item() is initial
|
| 504 |
+
|
| 505 |
+
def test_pathological_self_containing(self):
|
| 506 |
+
# Test that this also works for two nested sequences
|
| 507 |
+
l = []
|
| 508 |
+
l.append(l)
|
| 509 |
+
arr = np.array([l, l, l], dtype=object)
|
| 510 |
+
assert arr.shape == (3,) + (1,) * (np.MAXDIMS - 1)
|
| 511 |
+
|
| 512 |
+
# Also check a ragged case:
|
| 513 |
+
arr = np.array([l, [None], l], dtype=object)
|
| 514 |
+
assert arr.shape == (3, 1)
|
| 515 |
+
|
| 516 |
+
@pytest.mark.parametrize("arraylike", arraylikes())
|
| 517 |
+
def test_nested_arraylikes(self, arraylike):
|
| 518 |
+
# We try storing an array like into an array, but the array-like
|
| 519 |
+
# will have too many dimensions. This means the shape discovery
|
| 520 |
+
# decides that the array-like must be treated as an object (a special
|
| 521 |
+
# case of ragged discovery). The result will be an array with one
|
| 522 |
+
# dimension less than the maximum dimensions, and the array being
|
| 523 |
+
# assigned to it (which does work for object or if `float(arraylike)`
|
| 524 |
+
# works).
|
| 525 |
+
initial = arraylike(np.ones((1, 1)))
|
| 526 |
+
|
| 527 |
+
nested = initial
|
| 528 |
+
for i in range(np.MAXDIMS - 1):
|
| 529 |
+
nested = [nested]
|
| 530 |
+
|
| 531 |
+
with pytest.raises(ValueError, match=".*would exceed the maximum"):
|
| 532 |
+
# It will refuse to assign the array into
|
| 533 |
+
np.array(nested, dtype="float64")
|
| 534 |
+
|
| 535 |
+
# If this is object, we end up assigning a (1, 1) array into (1,)
|
| 536 |
+
# (due to running out of dimensions), this is currently supported but
|
| 537 |
+
# a special case which is not ideal.
|
| 538 |
+
arr = np.array(nested, dtype=object)
|
| 539 |
+
assert arr.shape == (1,) * np.MAXDIMS
|
| 540 |
+
assert arr.item() == np.array(initial).item()
|
| 541 |
+
|
| 542 |
+
@pytest.mark.parametrize("arraylike", arraylikes())
|
| 543 |
+
def test_uneven_depth_ragged(self, arraylike):
|
| 544 |
+
arr = np.arange(4).reshape((2, 2))
|
| 545 |
+
arr = arraylike(arr)
|
| 546 |
+
|
| 547 |
+
# Array is ragged in the second dimension already:
|
| 548 |
+
out = np.array([arr, [arr]], dtype=object)
|
| 549 |
+
assert out.shape == (2,)
|
| 550 |
+
assert out[0] is arr
|
| 551 |
+
assert type(out[1]) is list
|
| 552 |
+
|
| 553 |
+
# Array is ragged in the third dimension:
|
| 554 |
+
with pytest.raises(ValueError):
|
| 555 |
+
# This is a broadcast error during assignment, because
|
| 556 |
+
# the array shape would be (2, 2, 2) but `arr[0, 0] = arr` fails.
|
| 557 |
+
np.array([arr, [arr, arr]], dtype=object)
|
| 558 |
+
|
| 559 |
+
def test_empty_sequence(self):
|
| 560 |
+
arr = np.array([[], [1], [[1]]], dtype=object)
|
| 561 |
+
assert arr.shape == (3,)
|
| 562 |
+
|
| 563 |
+
# The empty sequence stops further dimension discovery, so the
|
| 564 |
+
# result shape will be (0,) which leads to an error during:
|
| 565 |
+
with pytest.raises(ValueError):
|
| 566 |
+
np.array([[], np.empty((0, 1))], dtype=object)
|
| 567 |
+
|
| 568 |
+
def test_array_of_different_depths(self):
|
| 569 |
+
# When multiple arrays (or array-likes) are included in a
|
| 570 |
+
# sequences and have different depth, we currently discover
|
| 571 |
+
# as many dimensions as they share. (see also gh-17224)
|
| 572 |
+
arr = np.zeros((3, 2))
|
| 573 |
+
mismatch_first_dim = np.zeros((1, 2))
|
| 574 |
+
mismatch_second_dim = np.zeros((3, 3))
|
| 575 |
+
|
| 576 |
+
dtype, shape = _discover_array_parameters(
|
| 577 |
+
[arr, mismatch_second_dim], dtype=np.dtype("O"))
|
| 578 |
+
assert shape == (2, 3)
|
| 579 |
+
|
| 580 |
+
dtype, shape = _discover_array_parameters(
|
| 581 |
+
[arr, mismatch_first_dim], dtype=np.dtype("O"))
|
| 582 |
+
assert shape == (2,)
|
| 583 |
+
# The second case is currently supported because the arrays
|
| 584 |
+
# can be stored as objects:
|
| 585 |
+
res = np.asarray([arr, mismatch_first_dim], dtype=np.dtype("O"))
|
| 586 |
+
assert res[0] is arr
|
| 587 |
+
assert res[1] is mismatch_first_dim
|
| 588 |
+
|
| 589 |
+
|
| 590 |
+
class TestBadSequences:
|
| 591 |
+
# These are tests for bad objects passed into `np.array`, in general
|
| 592 |
+
# these have undefined behaviour. In the old code they partially worked
|
| 593 |
+
# when now they will fail. We could (and maybe should) create a copy
|
| 594 |
+
# of all sequences to be safe against bad-actors.
|
| 595 |
+
|
| 596 |
+
def test_growing_list(self):
|
| 597 |
+
# List to coerce, `mylist` will append to it during coercion
|
| 598 |
+
obj = []
|
| 599 |
+
class mylist(list):
|
| 600 |
+
def __len__(self):
|
| 601 |
+
obj.append([1, 2])
|
| 602 |
+
return super().__len__()
|
| 603 |
+
|
| 604 |
+
obj.append(mylist([1, 2]))
|
| 605 |
+
|
| 606 |
+
with pytest.raises(RuntimeError):
|
| 607 |
+
np.array(obj)
|
| 608 |
+
|
| 609 |
+
# Note: We do not test a shrinking list. These do very evil things
|
| 610 |
+
# and the only way to fix them would be to copy all sequences.
|
| 611 |
+
# (which may be a real option in the future).
|
| 612 |
+
|
| 613 |
+
def test_mutated_list(self):
|
| 614 |
+
# List to coerce, `mylist` will mutate the first element
|
| 615 |
+
obj = []
|
| 616 |
+
class mylist(list):
|
| 617 |
+
def __len__(self):
|
| 618 |
+
obj[0] = [2, 3] # replace with a different list.
|
| 619 |
+
return super().__len__()
|
| 620 |
+
|
| 621 |
+
obj.append([2, 3])
|
| 622 |
+
obj.append(mylist([1, 2]))
|
| 623 |
+
# Does not crash:
|
| 624 |
+
np.array(obj)
|
| 625 |
+
|
| 626 |
+
def test_replace_0d_array(self):
|
| 627 |
+
# List to coerce, `mylist` will mutate the first element
|
| 628 |
+
obj = []
|
| 629 |
+
class baditem:
|
| 630 |
+
def __len__(self):
|
| 631 |
+
obj[0][0] = 2 # replace with a different list.
|
| 632 |
+
raise ValueError("not actually a sequence!")
|
| 633 |
+
|
| 634 |
+
def __getitem__(self):
|
| 635 |
+
pass
|
| 636 |
+
|
| 637 |
+
# Runs into a corner case in the new code, the `array(2)` is cached
|
| 638 |
+
# so replacing it invalidates the cache.
|
| 639 |
+
obj.append([np.array(2), baditem()])
|
| 640 |
+
with pytest.raises(RuntimeError):
|
| 641 |
+
np.array(obj)
|
| 642 |
+
|
| 643 |
+
|
| 644 |
+
class TestArrayLikes:
|
| 645 |
+
@pytest.mark.parametrize("arraylike", arraylikes())
|
| 646 |
+
def test_0d_object_special_case(self, arraylike):
|
| 647 |
+
arr = np.array(0.)
|
| 648 |
+
obj = arraylike(arr)
|
| 649 |
+
# A single array-like is always converted:
|
| 650 |
+
res = np.array(obj, dtype=object)
|
| 651 |
+
assert_array_equal(arr, res)
|
| 652 |
+
|
| 653 |
+
# But a single 0-D nested array-like never:
|
| 654 |
+
res = np.array([obj], dtype=object)
|
| 655 |
+
assert res[0] is obj
|
| 656 |
+
|
| 657 |
+
@pytest.mark.parametrize("arraylike", arraylikes())
|
| 658 |
+
@pytest.mark.parametrize("arr", [np.array(0.), np.arange(4)])
|
| 659 |
+
def test_object_assignment_special_case(self, arraylike, arr):
|
| 660 |
+
obj = arraylike(arr)
|
| 661 |
+
empty = np.arange(1, dtype=object)
|
| 662 |
+
empty[:] = [obj]
|
| 663 |
+
assert empty[0] is obj
|
| 664 |
+
|
| 665 |
+
def test_0d_generic_special_case(self):
|
| 666 |
+
class ArraySubclass(np.ndarray):
|
| 667 |
+
def __float__(self):
|
| 668 |
+
raise TypeError("e.g. quantities raise on this")
|
| 669 |
+
|
| 670 |
+
arr = np.array(0.)
|
| 671 |
+
obj = arr.view(ArraySubclass)
|
| 672 |
+
res = np.array(obj)
|
| 673 |
+
# The subclass is simply cast:
|
| 674 |
+
assert_array_equal(arr, res)
|
| 675 |
+
|
| 676 |
+
# If the 0-D array-like is included, __float__ is currently
|
| 677 |
+
# guaranteed to be used. We may want to change that, quantities
|
| 678 |
+
# and masked arrays half make use of this.
|
| 679 |
+
with pytest.raises(TypeError):
|
| 680 |
+
np.array([obj])
|
| 681 |
+
|
| 682 |
+
# The same holds for memoryview:
|
| 683 |
+
obj = memoryview(arr)
|
| 684 |
+
res = np.array(obj)
|
| 685 |
+
assert_array_equal(arr, res)
|
| 686 |
+
with pytest.raises(ValueError):
|
| 687 |
+
# The error type does not matter much here.
|
| 688 |
+
np.array([obj])
|
| 689 |
+
|
| 690 |
+
def test_arraylike_classes(self):
|
| 691 |
+
# The classes of array-likes should generally be acceptable to be
|
| 692 |
+
# stored inside a numpy (object) array. This tests all of the
|
| 693 |
+
# special attributes (since all are checked during coercion).
|
| 694 |
+
arr = np.array(np.int64)
|
| 695 |
+
assert arr[()] is np.int64
|
| 696 |
+
arr = np.array([np.int64])
|
| 697 |
+
assert arr[0] is np.int64
|
| 698 |
+
|
| 699 |
+
# This also works for properties/unbound methods:
|
| 700 |
+
class ArrayLike:
|
| 701 |
+
@property
|
| 702 |
+
def __array_interface__(self):
|
| 703 |
+
pass
|
| 704 |
+
|
| 705 |
+
@property
|
| 706 |
+
def __array_struct__(self):
|
| 707 |
+
pass
|
| 708 |
+
|
| 709 |
+
def __array__(self):
|
| 710 |
+
pass
|
| 711 |
+
|
| 712 |
+
arr = np.array(ArrayLike)
|
| 713 |
+
assert arr[()] is ArrayLike
|
| 714 |
+
arr = np.array([ArrayLike])
|
| 715 |
+
assert arr[0] is ArrayLike
|
| 716 |
+
|
| 717 |
+
@pytest.mark.skipif(
|
| 718 |
+
np.dtype(np.intp).itemsize < 8, reason="Needs 64bit platform")
|
| 719 |
+
def test_too_large_array_error_paths(self):
|
| 720 |
+
"""Test the error paths, including for memory leaks"""
|
| 721 |
+
arr = np.array(0, dtype="uint8")
|
| 722 |
+
# Guarantees that a contiguous copy won't work:
|
| 723 |
+
arr = np.broadcast_to(arr, 2**62)
|
| 724 |
+
|
| 725 |
+
for i in range(5):
|
| 726 |
+
# repeat, to ensure caching cannot have an effect:
|
| 727 |
+
with pytest.raises(MemoryError):
|
| 728 |
+
np.array(arr)
|
| 729 |
+
with pytest.raises(MemoryError):
|
| 730 |
+
np.array([arr])
|
| 731 |
+
|
| 732 |
+
@pytest.mark.parametrize("attribute",
|
| 733 |
+
["__array_interface__", "__array__", "__array_struct__"])
|
| 734 |
+
@pytest.mark.parametrize("error", [RecursionError, MemoryError])
|
| 735 |
+
def test_bad_array_like_attributes(self, attribute, error):
|
| 736 |
+
# RecursionError and MemoryError are considered fatal. All errors
|
| 737 |
+
# (except AttributeError) should probably be raised in the future,
|
| 738 |
+
# but shapely made use of it, so it will require a deprecation.
|
| 739 |
+
|
| 740 |
+
class BadInterface:
|
| 741 |
+
def __getattr__(self, attr):
|
| 742 |
+
if attr == attribute:
|
| 743 |
+
raise error
|
| 744 |
+
super().__getattr__(attr)
|
| 745 |
+
|
| 746 |
+
with pytest.raises(error):
|
| 747 |
+
np.array(BadInterface())
|
| 748 |
+
|
| 749 |
+
@pytest.mark.parametrize("error", [RecursionError, MemoryError])
|
| 750 |
+
def test_bad_array_like_bad_length(self, error):
|
| 751 |
+
# RecursionError and MemoryError are considered "critical" in
|
| 752 |
+
# sequences. We could expand this more generally though. (NumPy 1.20)
|
| 753 |
+
class BadSequence:
|
| 754 |
+
def __len__(self):
|
| 755 |
+
raise error
|
| 756 |
+
def __getitem__(self):
|
| 757 |
+
# must have getitem to be a Sequence
|
| 758 |
+
return 1
|
| 759 |
+
|
| 760 |
+
with pytest.raises(error):
|
| 761 |
+
np.array(BadSequence())
|
| 762 |
+
|
| 763 |
+
|
| 764 |
+
class TestAsArray:
|
| 765 |
+
"""Test expected behaviors of ``asarray``."""
|
| 766 |
+
|
| 767 |
+
def test_dtype_identity(self):
|
| 768 |
+
"""Confirm the intended behavior for *dtype* kwarg.
|
| 769 |
+
|
| 770 |
+
The result of ``asarray()`` should have the dtype provided through the
|
| 771 |
+
keyword argument, when used. This forces unique array handles to be
|
| 772 |
+
produced for unique np.dtype objects, but (for equivalent dtypes), the
|
| 773 |
+
underlying data (the base object) is shared with the original array
|
| 774 |
+
object.
|
| 775 |
+
|
| 776 |
+
Ref https://github.com/numpy/numpy/issues/1468
|
| 777 |
+
"""
|
| 778 |
+
int_array = np.array([1, 2, 3], dtype='i')
|
| 779 |
+
assert np.asarray(int_array) is int_array
|
| 780 |
+
|
| 781 |
+
# The character code resolves to the singleton dtype object provided
|
| 782 |
+
# by the numpy package.
|
| 783 |
+
assert np.asarray(int_array, dtype='i') is int_array
|
| 784 |
+
|
| 785 |
+
# Derive a dtype from n.dtype('i'), but add a metadata object to force
|
| 786 |
+
# the dtype to be distinct.
|
| 787 |
+
unequal_type = np.dtype('i', metadata={'spam': True})
|
| 788 |
+
annotated_int_array = np.asarray(int_array, dtype=unequal_type)
|
| 789 |
+
assert annotated_int_array is not int_array
|
| 790 |
+
assert annotated_int_array.base is int_array
|
| 791 |
+
# Create an equivalent descriptor with a new and distinct dtype
|
| 792 |
+
# instance.
|
| 793 |
+
equivalent_requirement = np.dtype('i', metadata={'spam': True})
|
| 794 |
+
annotated_int_array_alt = np.asarray(annotated_int_array,
|
| 795 |
+
dtype=equivalent_requirement)
|
| 796 |
+
assert unequal_type == equivalent_requirement
|
| 797 |
+
assert unequal_type is not equivalent_requirement
|
| 798 |
+
assert annotated_int_array_alt is not annotated_int_array
|
| 799 |
+
assert annotated_int_array_alt.dtype is equivalent_requirement
|
| 800 |
+
|
| 801 |
+
# Check the same logic for a pair of C types whose equivalence may vary
|
| 802 |
+
# between computing environments.
|
| 803 |
+
# Find an equivalent pair.
|
| 804 |
+
integer_type_codes = ('i', 'l', 'q')
|
| 805 |
+
integer_dtypes = [np.dtype(code) for code in integer_type_codes]
|
| 806 |
+
typeA = None
|
| 807 |
+
typeB = None
|
| 808 |
+
for typeA, typeB in permutations(integer_dtypes, r=2):
|
| 809 |
+
if typeA == typeB:
|
| 810 |
+
assert typeA is not typeB
|
| 811 |
+
break
|
| 812 |
+
assert isinstance(typeA, np.dtype) and isinstance(typeB, np.dtype)
|
| 813 |
+
|
| 814 |
+
# These ``asarray()`` calls may produce a new view or a copy,
|
| 815 |
+
# but never the same object.
|
| 816 |
+
long_int_array = np.asarray(int_array, dtype='l')
|
| 817 |
+
long_long_int_array = np.asarray(int_array, dtype='q')
|
| 818 |
+
assert long_int_array is not int_array
|
| 819 |
+
assert long_long_int_array is not int_array
|
| 820 |
+
assert np.asarray(long_int_array, dtype='q') is not long_int_array
|
| 821 |
+
array_a = np.asarray(int_array, dtype=typeA)
|
| 822 |
+
assert typeA == typeB
|
| 823 |
+
assert typeA is not typeB
|
| 824 |
+
assert array_a.dtype is typeA
|
| 825 |
+
assert array_a is not np.asarray(array_a, dtype=typeB)
|
| 826 |
+
assert np.asarray(array_a, dtype=typeB).dtype is typeB
|
| 827 |
+
assert array_a is np.asarray(array_a, dtype=typeB).base
|
| 828 |
+
|
| 829 |
+
|
| 830 |
+
class TestSpecialAttributeLookupFailure:
|
| 831 |
+
# An exception was raised while fetching the attribute
|
| 832 |
+
|
| 833 |
+
class WeirdArrayLike:
|
| 834 |
+
@property
|
| 835 |
+
def __array__(self):
|
| 836 |
+
raise RuntimeError("oops!")
|
| 837 |
+
|
| 838 |
+
class WeirdArrayInterface:
|
| 839 |
+
@property
|
| 840 |
+
def __array_interface__(self):
|
| 841 |
+
raise RuntimeError("oops!")
|
| 842 |
+
|
| 843 |
+
def test_deprecated(self):
|
| 844 |
+
with pytest.raises(RuntimeError):
|
| 845 |
+
np.array(self.WeirdArrayLike())
|
| 846 |
+
with pytest.raises(RuntimeError):
|
| 847 |
+
np.array(self.WeirdArrayInterface())
|
| 848 |
+
|
| 849 |
+
|
| 850 |
+
def test_subarray_from_array_construction():
|
| 851 |
+
# Arrays are more complex, since they "broadcast" on success:
|
| 852 |
+
arr = np.array([1, 2])
|
| 853 |
+
|
| 854 |
+
res = arr.astype("(2)i,")
|
| 855 |
+
assert_array_equal(res, [[1, 1], [2, 2]])
|
| 856 |
+
|
| 857 |
+
res = np.array(arr, dtype="(2)i,")
|
| 858 |
+
|
| 859 |
+
assert_array_equal(res, [[1, 1], [2, 2]])
|
| 860 |
+
|
| 861 |
+
res = np.array([[(1,), (2,)], arr], dtype="(2)i,")
|
| 862 |
+
assert_array_equal(res, [[[1, 1], [2, 2]], [[1, 1], [2, 2]]])
|
| 863 |
+
|
| 864 |
+
# Also try a multi-dimensional example:
|
| 865 |
+
arr = np.arange(5 * 2).reshape(5, 2)
|
| 866 |
+
expected = np.broadcast_to(arr[:, :, np.newaxis, np.newaxis], (5, 2, 2, 2))
|
| 867 |
+
|
| 868 |
+
res = arr.astype("(2,2)f")
|
| 869 |
+
assert_array_equal(res, expected)
|
| 870 |
+
|
| 871 |
+
res = np.array(arr, dtype="(2,2)f")
|
| 872 |
+
assert_array_equal(res, expected)
|
| 873 |
+
|
| 874 |
+
|
| 875 |
+
def test_empty_string():
|
| 876 |
+
# Empty strings are unfortunately often converted to S1 and we need to
|
| 877 |
+
# make sure we are filling the S1 and not the (possibly) detected S0
|
| 878 |
+
# result. This should likely just return S0 and if not maybe the decision
|
| 879 |
+
# to return S1 should be moved.
|
| 880 |
+
res = np.array([""] * 10, dtype="S")
|
| 881 |
+
assert_array_equal(res, np.array("\0", "S1"))
|
| 882 |
+
assert res.dtype == "S1"
|
| 883 |
+
|
| 884 |
+
arr = np.array([""] * 10, dtype=object)
|
| 885 |
+
|
| 886 |
+
res = arr.astype("S")
|
| 887 |
+
assert_array_equal(res, b"")
|
| 888 |
+
assert res.dtype == "S1"
|
| 889 |
+
|
| 890 |
+
res = np.array(arr, dtype="S")
|
| 891 |
+
assert_array_equal(res, b"")
|
| 892 |
+
# TODO: This is arguably weird/wrong, but seems old:
|
| 893 |
+
assert res.dtype == f"S{np.dtype('O').itemsize}"
|
| 894 |
+
|
| 895 |
+
res = np.array([[""] * 10, arr], dtype="S")
|
| 896 |
+
assert_array_equal(res, b"")
|
| 897 |
+
assert res.shape == (2, 10)
|
| 898 |
+
assert res.dtype == "S1"
|
pllava/lib/python3.10/site-packages/numpy/core/tests/test_arrayprint.py
ADDED
|
@@ -0,0 +1,1047 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import gc
|
| 3 |
+
from hypothesis import given
|
| 4 |
+
from hypothesis.extra import numpy as hynp
|
| 5 |
+
import pytest
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
from numpy.testing import (
|
| 9 |
+
assert_, assert_equal, assert_raises, assert_warns, HAS_REFCOUNT,
|
| 10 |
+
assert_raises_regex,
|
| 11 |
+
)
|
| 12 |
+
from numpy.core.arrayprint import _typelessdata
|
| 13 |
+
import textwrap
|
| 14 |
+
|
| 15 |
+
class TestArrayRepr:
|
| 16 |
+
def test_nan_inf(self):
|
| 17 |
+
x = np.array([np.nan, np.inf])
|
| 18 |
+
assert_equal(repr(x), 'array([nan, inf])')
|
| 19 |
+
|
| 20 |
+
def test_subclass(self):
|
| 21 |
+
class sub(np.ndarray): pass
|
| 22 |
+
|
| 23 |
+
# one dimensional
|
| 24 |
+
x1d = np.array([1, 2]).view(sub)
|
| 25 |
+
assert_equal(repr(x1d), 'sub([1, 2])')
|
| 26 |
+
|
| 27 |
+
# two dimensional
|
| 28 |
+
x2d = np.array([[1, 2], [3, 4]]).view(sub)
|
| 29 |
+
assert_equal(repr(x2d),
|
| 30 |
+
'sub([[1, 2],\n'
|
| 31 |
+
' [3, 4]])')
|
| 32 |
+
|
| 33 |
+
# two dimensional with flexible dtype
|
| 34 |
+
xstruct = np.ones((2,2), dtype=[('a', '<i4')]).view(sub)
|
| 35 |
+
assert_equal(repr(xstruct),
|
| 36 |
+
"sub([[(1,), (1,)],\n"
|
| 37 |
+
" [(1,), (1,)]], dtype=[('a', '<i4')])"
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
@pytest.mark.xfail(reason="See gh-10544")
|
| 41 |
+
def test_object_subclass(self):
|
| 42 |
+
class sub(np.ndarray):
|
| 43 |
+
def __new__(cls, inp):
|
| 44 |
+
obj = np.asarray(inp).view(cls)
|
| 45 |
+
return obj
|
| 46 |
+
|
| 47 |
+
def __getitem__(self, ind):
|
| 48 |
+
ret = super().__getitem__(ind)
|
| 49 |
+
return sub(ret)
|
| 50 |
+
|
| 51 |
+
# test that object + subclass is OK:
|
| 52 |
+
x = sub([None, None])
|
| 53 |
+
assert_equal(repr(x), 'sub([None, None], dtype=object)')
|
| 54 |
+
assert_equal(str(x), '[None None]')
|
| 55 |
+
|
| 56 |
+
x = sub([None, sub([None, None])])
|
| 57 |
+
assert_equal(repr(x),
|
| 58 |
+
'sub([None, sub([None, None], dtype=object)], dtype=object)')
|
| 59 |
+
assert_equal(str(x), '[None sub([None, None], dtype=object)]')
|
| 60 |
+
|
| 61 |
+
def test_0d_object_subclass(self):
|
| 62 |
+
# make sure that subclasses which return 0ds instead
|
| 63 |
+
# of scalars don't cause infinite recursion in str
|
| 64 |
+
class sub(np.ndarray):
|
| 65 |
+
def __new__(cls, inp):
|
| 66 |
+
obj = np.asarray(inp).view(cls)
|
| 67 |
+
return obj
|
| 68 |
+
|
| 69 |
+
def __getitem__(self, ind):
|
| 70 |
+
ret = super().__getitem__(ind)
|
| 71 |
+
return sub(ret)
|
| 72 |
+
|
| 73 |
+
x = sub(1)
|
| 74 |
+
assert_equal(repr(x), 'sub(1)')
|
| 75 |
+
assert_equal(str(x), '1')
|
| 76 |
+
|
| 77 |
+
x = sub([1, 1])
|
| 78 |
+
assert_equal(repr(x), 'sub([1, 1])')
|
| 79 |
+
assert_equal(str(x), '[1 1]')
|
| 80 |
+
|
| 81 |
+
# check it works properly with object arrays too
|
| 82 |
+
x = sub(None)
|
| 83 |
+
assert_equal(repr(x), 'sub(None, dtype=object)')
|
| 84 |
+
assert_equal(str(x), 'None')
|
| 85 |
+
|
| 86 |
+
# plus recursive object arrays (even depth > 1)
|
| 87 |
+
y = sub(None)
|
| 88 |
+
x[()] = y
|
| 89 |
+
y[()] = x
|
| 90 |
+
assert_equal(repr(x),
|
| 91 |
+
'sub(sub(sub(..., dtype=object), dtype=object), dtype=object)')
|
| 92 |
+
assert_equal(str(x), '...')
|
| 93 |
+
x[()] = 0 # resolve circular references for garbage collector
|
| 94 |
+
|
| 95 |
+
# nested 0d-subclass-object
|
| 96 |
+
x = sub(None)
|
| 97 |
+
x[()] = sub(None)
|
| 98 |
+
assert_equal(repr(x), 'sub(sub(None, dtype=object), dtype=object)')
|
| 99 |
+
assert_equal(str(x), 'None')
|
| 100 |
+
|
| 101 |
+
# gh-10663
|
| 102 |
+
class DuckCounter(np.ndarray):
|
| 103 |
+
def __getitem__(self, item):
|
| 104 |
+
result = super().__getitem__(item)
|
| 105 |
+
if not isinstance(result, DuckCounter):
|
| 106 |
+
result = result[...].view(DuckCounter)
|
| 107 |
+
return result
|
| 108 |
+
|
| 109 |
+
def to_string(self):
|
| 110 |
+
return {0: 'zero', 1: 'one', 2: 'two'}.get(self.item(), 'many')
|
| 111 |
+
|
| 112 |
+
def __str__(self):
|
| 113 |
+
if self.shape == ():
|
| 114 |
+
return self.to_string()
|
| 115 |
+
else:
|
| 116 |
+
fmt = {'all': lambda x: x.to_string()}
|
| 117 |
+
return np.array2string(self, formatter=fmt)
|
| 118 |
+
|
| 119 |
+
dc = np.arange(5).view(DuckCounter)
|
| 120 |
+
assert_equal(str(dc), "[zero one two many many]")
|
| 121 |
+
assert_equal(str(dc[0]), "zero")
|
| 122 |
+
|
| 123 |
+
def test_self_containing(self):
|
| 124 |
+
arr0d = np.array(None)
|
| 125 |
+
arr0d[()] = arr0d
|
| 126 |
+
assert_equal(repr(arr0d),
|
| 127 |
+
'array(array(..., dtype=object), dtype=object)')
|
| 128 |
+
arr0d[()] = 0 # resolve recursion for garbage collector
|
| 129 |
+
|
| 130 |
+
arr1d = np.array([None, None])
|
| 131 |
+
arr1d[1] = arr1d
|
| 132 |
+
assert_equal(repr(arr1d),
|
| 133 |
+
'array([None, array(..., dtype=object)], dtype=object)')
|
| 134 |
+
arr1d[1] = 0 # resolve recursion for garbage collector
|
| 135 |
+
|
| 136 |
+
first = np.array(None)
|
| 137 |
+
second = np.array(None)
|
| 138 |
+
first[()] = second
|
| 139 |
+
second[()] = first
|
| 140 |
+
assert_equal(repr(first),
|
| 141 |
+
'array(array(array(..., dtype=object), dtype=object), dtype=object)')
|
| 142 |
+
first[()] = 0 # resolve circular references for garbage collector
|
| 143 |
+
|
| 144 |
+
def test_containing_list(self):
|
| 145 |
+
# printing square brackets directly would be ambiguuous
|
| 146 |
+
arr1d = np.array([None, None])
|
| 147 |
+
arr1d[0] = [1, 2]
|
| 148 |
+
arr1d[1] = [3]
|
| 149 |
+
assert_equal(repr(arr1d),
|
| 150 |
+
'array([list([1, 2]), list([3])], dtype=object)')
|
| 151 |
+
|
| 152 |
+
def test_void_scalar_recursion(self):
|
| 153 |
+
# gh-9345
|
| 154 |
+
repr(np.void(b'test')) # RecursionError ?
|
| 155 |
+
|
| 156 |
+
def test_fieldless_structured(self):
|
| 157 |
+
# gh-10366
|
| 158 |
+
no_fields = np.dtype([])
|
| 159 |
+
arr_no_fields = np.empty(4, dtype=no_fields)
|
| 160 |
+
assert_equal(repr(arr_no_fields), 'array([(), (), (), ()], dtype=[])')
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
class TestComplexArray:
|
| 164 |
+
def test_str(self):
|
| 165 |
+
rvals = [0, 1, -1, np.inf, -np.inf, np.nan]
|
| 166 |
+
cvals = [complex(rp, ip) for rp in rvals for ip in rvals]
|
| 167 |
+
dtypes = [np.complex64, np.cdouble, np.clongdouble]
|
| 168 |
+
actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes]
|
| 169 |
+
wanted = [
|
| 170 |
+
'[0.+0.j]', '[0.+0.j]', '[0.+0.j]',
|
| 171 |
+
'[0.+1.j]', '[0.+1.j]', '[0.+1.j]',
|
| 172 |
+
'[0.-1.j]', '[0.-1.j]', '[0.-1.j]',
|
| 173 |
+
'[0.+infj]', '[0.+infj]', '[0.+infj]',
|
| 174 |
+
'[0.-infj]', '[0.-infj]', '[0.-infj]',
|
| 175 |
+
'[0.+nanj]', '[0.+nanj]', '[0.+nanj]',
|
| 176 |
+
'[1.+0.j]', '[1.+0.j]', '[1.+0.j]',
|
| 177 |
+
'[1.+1.j]', '[1.+1.j]', '[1.+1.j]',
|
| 178 |
+
'[1.-1.j]', '[1.-1.j]', '[1.-1.j]',
|
| 179 |
+
'[1.+infj]', '[1.+infj]', '[1.+infj]',
|
| 180 |
+
'[1.-infj]', '[1.-infj]', '[1.-infj]',
|
| 181 |
+
'[1.+nanj]', '[1.+nanj]', '[1.+nanj]',
|
| 182 |
+
'[-1.+0.j]', '[-1.+0.j]', '[-1.+0.j]',
|
| 183 |
+
'[-1.+1.j]', '[-1.+1.j]', '[-1.+1.j]',
|
| 184 |
+
'[-1.-1.j]', '[-1.-1.j]', '[-1.-1.j]',
|
| 185 |
+
'[-1.+infj]', '[-1.+infj]', '[-1.+infj]',
|
| 186 |
+
'[-1.-infj]', '[-1.-infj]', '[-1.-infj]',
|
| 187 |
+
'[-1.+nanj]', '[-1.+nanj]', '[-1.+nanj]',
|
| 188 |
+
'[inf+0.j]', '[inf+0.j]', '[inf+0.j]',
|
| 189 |
+
'[inf+1.j]', '[inf+1.j]', '[inf+1.j]',
|
| 190 |
+
'[inf-1.j]', '[inf-1.j]', '[inf-1.j]',
|
| 191 |
+
'[inf+infj]', '[inf+infj]', '[inf+infj]',
|
| 192 |
+
'[inf-infj]', '[inf-infj]', '[inf-infj]',
|
| 193 |
+
'[inf+nanj]', '[inf+nanj]', '[inf+nanj]',
|
| 194 |
+
'[-inf+0.j]', '[-inf+0.j]', '[-inf+0.j]',
|
| 195 |
+
'[-inf+1.j]', '[-inf+1.j]', '[-inf+1.j]',
|
| 196 |
+
'[-inf-1.j]', '[-inf-1.j]', '[-inf-1.j]',
|
| 197 |
+
'[-inf+infj]', '[-inf+infj]', '[-inf+infj]',
|
| 198 |
+
'[-inf-infj]', '[-inf-infj]', '[-inf-infj]',
|
| 199 |
+
'[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]',
|
| 200 |
+
'[nan+0.j]', '[nan+0.j]', '[nan+0.j]',
|
| 201 |
+
'[nan+1.j]', '[nan+1.j]', '[nan+1.j]',
|
| 202 |
+
'[nan-1.j]', '[nan-1.j]', '[nan-1.j]',
|
| 203 |
+
'[nan+infj]', '[nan+infj]', '[nan+infj]',
|
| 204 |
+
'[nan-infj]', '[nan-infj]', '[nan-infj]',
|
| 205 |
+
'[nan+nanj]', '[nan+nanj]', '[nan+nanj]']
|
| 206 |
+
|
| 207 |
+
for res, val in zip(actual, wanted):
|
| 208 |
+
assert_equal(res, val)
|
| 209 |
+
|
| 210 |
+
class TestArray2String:
|
| 211 |
+
def test_basic(self):
|
| 212 |
+
"""Basic test of array2string."""
|
| 213 |
+
a = np.arange(3)
|
| 214 |
+
assert_(np.array2string(a) == '[0 1 2]')
|
| 215 |
+
assert_(np.array2string(a, max_line_width=4, legacy='1.13') == '[0 1\n 2]')
|
| 216 |
+
assert_(np.array2string(a, max_line_width=4) == '[0\n 1\n 2]')
|
| 217 |
+
|
| 218 |
+
def test_unexpected_kwarg(self):
|
| 219 |
+
# ensure than an appropriate TypeError
|
| 220 |
+
# is raised when array2string receives
|
| 221 |
+
# an unexpected kwarg
|
| 222 |
+
|
| 223 |
+
with assert_raises_regex(TypeError, 'nonsense'):
|
| 224 |
+
np.array2string(np.array([1, 2, 3]),
|
| 225 |
+
nonsense=None)
|
| 226 |
+
|
| 227 |
+
def test_format_function(self):
|
| 228 |
+
"""Test custom format function for each element in array."""
|
| 229 |
+
def _format_function(x):
|
| 230 |
+
if np.abs(x) < 1:
|
| 231 |
+
return '.'
|
| 232 |
+
elif np.abs(x) < 2:
|
| 233 |
+
return 'o'
|
| 234 |
+
else:
|
| 235 |
+
return 'O'
|
| 236 |
+
|
| 237 |
+
x = np.arange(3)
|
| 238 |
+
x_hex = "[0x0 0x1 0x2]"
|
| 239 |
+
x_oct = "[0o0 0o1 0o2]"
|
| 240 |
+
assert_(np.array2string(x, formatter={'all':_format_function}) ==
|
| 241 |
+
"[. o O]")
|
| 242 |
+
assert_(np.array2string(x, formatter={'int_kind':_format_function}) ==
|
| 243 |
+
"[. o O]")
|
| 244 |
+
assert_(np.array2string(x, formatter={'all':lambda x: "%.4f" % x}) ==
|
| 245 |
+
"[0.0000 1.0000 2.0000]")
|
| 246 |
+
assert_equal(np.array2string(x, formatter={'int':lambda x: hex(x)}),
|
| 247 |
+
x_hex)
|
| 248 |
+
assert_equal(np.array2string(x, formatter={'int':lambda x: oct(x)}),
|
| 249 |
+
x_oct)
|
| 250 |
+
|
| 251 |
+
x = np.arange(3.)
|
| 252 |
+
assert_(np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) ==
|
| 253 |
+
"[0.00 1.00 2.00]")
|
| 254 |
+
assert_(np.array2string(x, formatter={'float':lambda x: "%.2f" % x}) ==
|
| 255 |
+
"[0.00 1.00 2.00]")
|
| 256 |
+
|
| 257 |
+
s = np.array(['abc', 'def'])
|
| 258 |
+
assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) ==
|
| 259 |
+
'[abcabc defdef]')
|
| 260 |
+
|
| 261 |
+
def test_structure_format_mixed(self):
|
| 262 |
+
dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
|
| 263 |
+
x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
|
| 264 |
+
assert_equal(np.array2string(x),
|
| 265 |
+
"[('Sarah', [8., 7.]) ('John', [6., 7.])]")
|
| 266 |
+
|
| 267 |
+
np.set_printoptions(legacy='1.13')
|
| 268 |
+
try:
|
| 269 |
+
# for issue #5692
|
| 270 |
+
A = np.zeros(shape=10, dtype=[("A", "M8[s]")])
|
| 271 |
+
A[5:].fill(np.datetime64('NaT'))
|
| 272 |
+
assert_equal(
|
| 273 |
+
np.array2string(A),
|
| 274 |
+
textwrap.dedent("""\
|
| 275 |
+
[('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
|
| 276 |
+
('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('NaT',) ('NaT',)
|
| 277 |
+
('NaT',) ('NaT',) ('NaT',)]""")
|
| 278 |
+
)
|
| 279 |
+
finally:
|
| 280 |
+
np.set_printoptions(legacy=False)
|
| 281 |
+
|
| 282 |
+
# same again, but with non-legacy behavior
|
| 283 |
+
assert_equal(
|
| 284 |
+
np.array2string(A),
|
| 285 |
+
textwrap.dedent("""\
|
| 286 |
+
[('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
|
| 287 |
+
('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
|
| 288 |
+
('1970-01-01T00:00:00',) ( 'NaT',)
|
| 289 |
+
( 'NaT',) ( 'NaT',)
|
| 290 |
+
( 'NaT',) ( 'NaT',)]""")
|
| 291 |
+
)
|
| 292 |
+
|
| 293 |
+
# and again, with timedeltas
|
| 294 |
+
A = np.full(10, 123456, dtype=[("A", "m8[s]")])
|
| 295 |
+
A[5:].fill(np.datetime64('NaT'))
|
| 296 |
+
assert_equal(
|
| 297 |
+
np.array2string(A),
|
| 298 |
+
textwrap.dedent("""\
|
| 299 |
+
[(123456,) (123456,) (123456,) (123456,) (123456,) ( 'NaT',) ( 'NaT',)
|
| 300 |
+
( 'NaT',) ( 'NaT',) ( 'NaT',)]""")
|
| 301 |
+
)
|
| 302 |
+
|
| 303 |
+
def test_structure_format_int(self):
|
| 304 |
+
# See #8160
|
| 305 |
+
struct_int = np.array([([1, -1],), ([123, 1],)], dtype=[('B', 'i4', 2)])
|
| 306 |
+
assert_equal(np.array2string(struct_int),
|
| 307 |
+
"[([ 1, -1],) ([123, 1],)]")
|
| 308 |
+
struct_2dint = np.array([([[0, 1], [2, 3]],), ([[12, 0], [0, 0]],)],
|
| 309 |
+
dtype=[('B', 'i4', (2, 2))])
|
| 310 |
+
assert_equal(np.array2string(struct_2dint),
|
| 311 |
+
"[([[ 0, 1], [ 2, 3]],) ([[12, 0], [ 0, 0]],)]")
|
| 312 |
+
|
| 313 |
+
def test_structure_format_float(self):
|
| 314 |
+
# See #8172
|
| 315 |
+
array_scalar = np.array(
|
| 316 |
+
(1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8'))
|
| 317 |
+
assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)")
|
| 318 |
+
|
| 319 |
+
def test_unstructured_void_repr(self):
|
| 320 |
+
a = np.array([27, 91, 50, 75, 7, 65, 10, 8,
|
| 321 |
+
27, 91, 51, 49,109, 82,101,100], dtype='u1').view('V8')
|
| 322 |
+
assert_equal(repr(a[0]), r"void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')")
|
| 323 |
+
assert_equal(str(a[0]), r"b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'")
|
| 324 |
+
assert_equal(repr(a),
|
| 325 |
+
r"array([b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'," "\n"
|
| 326 |
+
r" b'\x1B\x5B\x33\x31\x6D\x52\x65\x64'], dtype='|V8')")
|
| 327 |
+
|
| 328 |
+
assert_equal(eval(repr(a), vars(np)), a)
|
| 329 |
+
assert_equal(eval(repr(a[0]), vars(np)), a[0])
|
| 330 |
+
|
| 331 |
+
def test_edgeitems_kwarg(self):
|
| 332 |
+
# previously the global print options would be taken over the kwarg
|
| 333 |
+
arr = np.zeros(3, int)
|
| 334 |
+
assert_equal(
|
| 335 |
+
np.array2string(arr, edgeitems=1, threshold=0),
|
| 336 |
+
"[0 ... 0]"
|
| 337 |
+
)
|
| 338 |
+
|
| 339 |
+
def test_summarize_1d(self):
|
| 340 |
+
A = np.arange(1001)
|
| 341 |
+
strA = '[ 0 1 2 ... 998 999 1000]'
|
| 342 |
+
assert_equal(str(A), strA)
|
| 343 |
+
|
| 344 |
+
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
|
| 345 |
+
assert_equal(repr(A), reprA)
|
| 346 |
+
|
| 347 |
+
def test_summarize_2d(self):
|
| 348 |
+
A = np.arange(1002).reshape(2, 501)
|
| 349 |
+
strA = '[[ 0 1 2 ... 498 499 500]\n' \
|
| 350 |
+
' [ 501 502 503 ... 999 1000 1001]]'
|
| 351 |
+
assert_equal(str(A), strA)
|
| 352 |
+
|
| 353 |
+
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
|
| 354 |
+
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
|
| 355 |
+
assert_equal(repr(A), reprA)
|
| 356 |
+
|
| 357 |
+
def test_summarize_structure(self):
|
| 358 |
+
A = (np.arange(2002, dtype="<i8").reshape(2, 1001)
|
| 359 |
+
.view([('i', "<i8", (1001,))]))
|
| 360 |
+
strA = ("[[([ 0, 1, 2, ..., 998, 999, 1000],)]\n"
|
| 361 |
+
" [([1001, 1002, 1003, ..., 1999, 2000, 2001],)]]")
|
| 362 |
+
assert_equal(str(A), strA)
|
| 363 |
+
|
| 364 |
+
reprA = ("array([[([ 0, 1, 2, ..., 998, 999, 1000],)],\n"
|
| 365 |
+
" [([1001, 1002, 1003, ..., 1999, 2000, 2001],)]],\n"
|
| 366 |
+
" dtype=[('i', '<i8', (1001,))])")
|
| 367 |
+
assert_equal(repr(A), reprA)
|
| 368 |
+
|
| 369 |
+
B = np.ones(2002, dtype=">i8").view([('i', ">i8", (2, 1001))])
|
| 370 |
+
strB = "[([[1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1]],)]"
|
| 371 |
+
assert_equal(str(B), strB)
|
| 372 |
+
|
| 373 |
+
reprB = (
|
| 374 |
+
"array([([[1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1]],)],\n"
|
| 375 |
+
" dtype=[('i', '>i8', (2, 1001))])"
|
| 376 |
+
)
|
| 377 |
+
assert_equal(repr(B), reprB)
|
| 378 |
+
|
| 379 |
+
C = (np.arange(22, dtype="<i8").reshape(2, 11)
|
| 380 |
+
.view([('i1', "<i8"), ('i10', "<i8", (10,))]))
|
| 381 |
+
strC = "[[( 0, [ 1, ..., 10])]\n [(11, [12, ..., 21])]]"
|
| 382 |
+
assert_equal(np.array2string(C, threshold=1, edgeitems=1), strC)
|
| 383 |
+
|
| 384 |
+
def test_linewidth(self):
|
| 385 |
+
a = np.full(6, 1)
|
| 386 |
+
|
| 387 |
+
def make_str(a, width, **kw):
|
| 388 |
+
return np.array2string(a, separator="", max_line_width=width, **kw)
|
| 389 |
+
|
| 390 |
+
assert_equal(make_str(a, 8, legacy='1.13'), '[111111]')
|
| 391 |
+
assert_equal(make_str(a, 7, legacy='1.13'), '[111111]')
|
| 392 |
+
assert_equal(make_str(a, 5, legacy='1.13'), '[1111\n'
|
| 393 |
+
' 11]')
|
| 394 |
+
|
| 395 |
+
assert_equal(make_str(a, 8), '[111111]')
|
| 396 |
+
assert_equal(make_str(a, 7), '[11111\n'
|
| 397 |
+
' 1]')
|
| 398 |
+
assert_equal(make_str(a, 5), '[111\n'
|
| 399 |
+
' 111]')
|
| 400 |
+
|
| 401 |
+
b = a[None,None,:]
|
| 402 |
+
|
| 403 |
+
assert_equal(make_str(b, 12, legacy='1.13'), '[[[111111]]]')
|
| 404 |
+
assert_equal(make_str(b, 9, legacy='1.13'), '[[[111111]]]')
|
| 405 |
+
assert_equal(make_str(b, 8, legacy='1.13'), '[[[11111\n'
|
| 406 |
+
' 1]]]')
|
| 407 |
+
|
| 408 |
+
assert_equal(make_str(b, 12), '[[[111111]]]')
|
| 409 |
+
assert_equal(make_str(b, 9), '[[[111\n'
|
| 410 |
+
' 111]]]')
|
| 411 |
+
assert_equal(make_str(b, 8), '[[[11\n'
|
| 412 |
+
' 11\n'
|
| 413 |
+
' 11]]]')
|
| 414 |
+
|
| 415 |
+
def test_wide_element(self):
|
| 416 |
+
a = np.array(['xxxxx'])
|
| 417 |
+
assert_equal(
|
| 418 |
+
np.array2string(a, max_line_width=5),
|
| 419 |
+
"['xxxxx']"
|
| 420 |
+
)
|
| 421 |
+
assert_equal(
|
| 422 |
+
np.array2string(a, max_line_width=5, legacy='1.13'),
|
| 423 |
+
"[ 'xxxxx']"
|
| 424 |
+
)
|
| 425 |
+
|
| 426 |
+
def test_multiline_repr(self):
|
| 427 |
+
class MultiLine:
|
| 428 |
+
def __repr__(self):
|
| 429 |
+
return "Line 1\nLine 2"
|
| 430 |
+
|
| 431 |
+
a = np.array([[None, MultiLine()], [MultiLine(), None]])
|
| 432 |
+
|
| 433 |
+
assert_equal(
|
| 434 |
+
np.array2string(a),
|
| 435 |
+
'[[None Line 1\n'
|
| 436 |
+
' Line 2]\n'
|
| 437 |
+
' [Line 1\n'
|
| 438 |
+
' Line 2 None]]'
|
| 439 |
+
)
|
| 440 |
+
assert_equal(
|
| 441 |
+
np.array2string(a, max_line_width=5),
|
| 442 |
+
'[[None\n'
|
| 443 |
+
' Line 1\n'
|
| 444 |
+
' Line 2]\n'
|
| 445 |
+
' [Line 1\n'
|
| 446 |
+
' Line 2\n'
|
| 447 |
+
' None]]'
|
| 448 |
+
)
|
| 449 |
+
assert_equal(
|
| 450 |
+
repr(a),
|
| 451 |
+
'array([[None, Line 1\n'
|
| 452 |
+
' Line 2],\n'
|
| 453 |
+
' [Line 1\n'
|
| 454 |
+
' Line 2, None]], dtype=object)'
|
| 455 |
+
)
|
| 456 |
+
|
| 457 |
+
class MultiLineLong:
|
| 458 |
+
def __repr__(self):
|
| 459 |
+
return "Line 1\nLooooooooooongestLine2\nLongerLine 3"
|
| 460 |
+
|
| 461 |
+
a = np.array([[None, MultiLineLong()], [MultiLineLong(), None]])
|
| 462 |
+
assert_equal(
|
| 463 |
+
repr(a),
|
| 464 |
+
'array([[None, Line 1\n'
|
| 465 |
+
' LooooooooooongestLine2\n'
|
| 466 |
+
' LongerLine 3 ],\n'
|
| 467 |
+
' [Line 1\n'
|
| 468 |
+
' LooooooooooongestLine2\n'
|
| 469 |
+
' LongerLine 3 , None]], dtype=object)'
|
| 470 |
+
)
|
| 471 |
+
assert_equal(
|
| 472 |
+
np.array_repr(a, 20),
|
| 473 |
+
'array([[None,\n'
|
| 474 |
+
' Line 1\n'
|
| 475 |
+
' LooooooooooongestLine2\n'
|
| 476 |
+
' LongerLine 3 ],\n'
|
| 477 |
+
' [Line 1\n'
|
| 478 |
+
' LooooooooooongestLine2\n'
|
| 479 |
+
' LongerLine 3 ,\n'
|
| 480 |
+
' None]],\n'
|
| 481 |
+
' dtype=object)'
|
| 482 |
+
)
|
| 483 |
+
|
| 484 |
+
def test_nested_array_repr(self):
|
| 485 |
+
a = np.empty((2, 2), dtype=object)
|
| 486 |
+
a[0, 0] = np.eye(2)
|
| 487 |
+
a[0, 1] = np.eye(3)
|
| 488 |
+
a[1, 0] = None
|
| 489 |
+
a[1, 1] = np.ones((3, 1))
|
| 490 |
+
assert_equal(
|
| 491 |
+
repr(a),
|
| 492 |
+
'array([[array([[1., 0.],\n'
|
| 493 |
+
' [0., 1.]]), array([[1., 0., 0.],\n'
|
| 494 |
+
' [0., 1., 0.],\n'
|
| 495 |
+
' [0., 0., 1.]])],\n'
|
| 496 |
+
' [None, array([[1.],\n'
|
| 497 |
+
' [1.],\n'
|
| 498 |
+
' [1.]])]], dtype=object)'
|
| 499 |
+
)
|
| 500 |
+
|
| 501 |
+
@given(hynp.from_dtype(np.dtype("U")))
|
| 502 |
+
def test_any_text(self, text):
|
| 503 |
+
# This test checks that, given any value that can be represented in an
|
| 504 |
+
# array of dtype("U") (i.e. unicode string), ...
|
| 505 |
+
a = np.array([text, text, text])
|
| 506 |
+
# casting a list of them to an array does not e.g. truncate the value
|
| 507 |
+
assert_equal(a[0], text)
|
| 508 |
+
# and that np.array2string puts a newline in the expected location
|
| 509 |
+
expected_repr = "[{0!r} {0!r}\n {0!r}]".format(text)
|
| 510 |
+
result = np.array2string(a, max_line_width=len(repr(text)) * 2 + 3)
|
| 511 |
+
assert_equal(result, expected_repr)
|
| 512 |
+
|
| 513 |
+
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
|
| 514 |
+
def test_refcount(self):
|
| 515 |
+
# make sure we do not hold references to the array due to a recursive
|
| 516 |
+
# closure (gh-10620)
|
| 517 |
+
gc.disable()
|
| 518 |
+
a = np.arange(2)
|
| 519 |
+
r1 = sys.getrefcount(a)
|
| 520 |
+
np.array2string(a)
|
| 521 |
+
np.array2string(a)
|
| 522 |
+
r2 = sys.getrefcount(a)
|
| 523 |
+
gc.collect()
|
| 524 |
+
gc.enable()
|
| 525 |
+
assert_(r1 == r2)
|
| 526 |
+
|
| 527 |
+
class TestPrintOptions:
|
| 528 |
+
"""Test getting and setting global print options."""
|
| 529 |
+
|
| 530 |
+
def setup_method(self):
|
| 531 |
+
self.oldopts = np.get_printoptions()
|
| 532 |
+
|
| 533 |
+
def teardown_method(self):
|
| 534 |
+
np.set_printoptions(**self.oldopts)
|
| 535 |
+
|
| 536 |
+
def test_basic(self):
|
| 537 |
+
x = np.array([1.5, 0, 1.234567890])
|
| 538 |
+
assert_equal(repr(x), "array([1.5 , 0. , 1.23456789])")
|
| 539 |
+
np.set_printoptions(precision=4)
|
| 540 |
+
assert_equal(repr(x), "array([1.5 , 0. , 1.2346])")
|
| 541 |
+
|
| 542 |
+
def test_precision_zero(self):
|
| 543 |
+
np.set_printoptions(precision=0)
|
| 544 |
+
for values, string in (
|
| 545 |
+
([0.], "0."), ([.3], "0."), ([-.3], "-0."), ([.7], "1."),
|
| 546 |
+
([1.5], "2."), ([-1.5], "-2."), ([-15.34], "-15."),
|
| 547 |
+
([100.], "100."), ([.2, -1, 122.51], " 0., -1., 123."),
|
| 548 |
+
([0], "0"), ([-12], "-12"), ([complex(.3, -.7)], "0.-1.j")):
|
| 549 |
+
x = np.array(values)
|
| 550 |
+
assert_equal(repr(x), "array([%s])" % string)
|
| 551 |
+
|
| 552 |
+
def test_formatter(self):
|
| 553 |
+
x = np.arange(3)
|
| 554 |
+
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
|
| 555 |
+
assert_equal(repr(x), "array([-1, 0, 1])")
|
| 556 |
+
|
| 557 |
+
def test_formatter_reset(self):
|
| 558 |
+
x = np.arange(3)
|
| 559 |
+
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
|
| 560 |
+
assert_equal(repr(x), "array([-1, 0, 1])")
|
| 561 |
+
np.set_printoptions(formatter={'int':None})
|
| 562 |
+
assert_equal(repr(x), "array([0, 1, 2])")
|
| 563 |
+
|
| 564 |
+
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
|
| 565 |
+
assert_equal(repr(x), "array([-1, 0, 1])")
|
| 566 |
+
np.set_printoptions(formatter={'all':None})
|
| 567 |
+
assert_equal(repr(x), "array([0, 1, 2])")
|
| 568 |
+
|
| 569 |
+
np.set_printoptions(formatter={'int':lambda x: str(x-1)})
|
| 570 |
+
assert_equal(repr(x), "array([-1, 0, 1])")
|
| 571 |
+
np.set_printoptions(formatter={'int_kind':None})
|
| 572 |
+
assert_equal(repr(x), "array([0, 1, 2])")
|
| 573 |
+
|
| 574 |
+
x = np.arange(3.)
|
| 575 |
+
np.set_printoptions(formatter={'float':lambda x: str(x-1)})
|
| 576 |
+
assert_equal(repr(x), "array([-1.0, 0.0, 1.0])")
|
| 577 |
+
np.set_printoptions(formatter={'float_kind':None})
|
| 578 |
+
assert_equal(repr(x), "array([0., 1., 2.])")
|
| 579 |
+
|
| 580 |
+
def test_0d_arrays(self):
|
| 581 |
+
assert_equal(str(np.array('café', '<U4')), 'café')
|
| 582 |
+
|
| 583 |
+
assert_equal(repr(np.array('café', '<U4')),
|
| 584 |
+
"array('café', dtype='<U4')")
|
| 585 |
+
assert_equal(str(np.array('test', np.str_)), 'test')
|
| 586 |
+
|
| 587 |
+
a = np.zeros(1, dtype=[('a', '<i4', (3,))])
|
| 588 |
+
assert_equal(str(a[0]), '([0, 0, 0],)')
|
| 589 |
+
|
| 590 |
+
assert_equal(repr(np.datetime64('2005-02-25')[...]),
|
| 591 |
+
"array('2005-02-25', dtype='datetime64[D]')")
|
| 592 |
+
|
| 593 |
+
assert_equal(repr(np.timedelta64('10', 'Y')[...]),
|
| 594 |
+
"array(10, dtype='timedelta64[Y]')")
|
| 595 |
+
|
| 596 |
+
# repr of 0d arrays is affected by printoptions
|
| 597 |
+
x = np.array(1)
|
| 598 |
+
np.set_printoptions(formatter={'all':lambda x: "test"})
|
| 599 |
+
assert_equal(repr(x), "array(test)")
|
| 600 |
+
# str is unaffected
|
| 601 |
+
assert_equal(str(x), "1")
|
| 602 |
+
|
| 603 |
+
# check `style` arg raises
|
| 604 |
+
assert_warns(DeprecationWarning, np.array2string,
|
| 605 |
+
np.array(1.), style=repr)
|
| 606 |
+
# but not in legacy mode
|
| 607 |
+
np.array2string(np.array(1.), style=repr, legacy='1.13')
|
| 608 |
+
# gh-10934 style was broken in legacy mode, check it works
|
| 609 |
+
np.array2string(np.array(1.), legacy='1.13')
|
| 610 |
+
|
| 611 |
+
def test_float_spacing(self):
|
| 612 |
+
x = np.array([1., 2., 3.])
|
| 613 |
+
y = np.array([1., 2., -10.])
|
| 614 |
+
z = np.array([100., 2., -1.])
|
| 615 |
+
w = np.array([-100., 2., 1.])
|
| 616 |
+
|
| 617 |
+
assert_equal(repr(x), 'array([1., 2., 3.])')
|
| 618 |
+
assert_equal(repr(y), 'array([ 1., 2., -10.])')
|
| 619 |
+
assert_equal(repr(np.array(y[0])), 'array(1.)')
|
| 620 |
+
assert_equal(repr(np.array(y[-1])), 'array(-10.)')
|
| 621 |
+
assert_equal(repr(z), 'array([100., 2., -1.])')
|
| 622 |
+
assert_equal(repr(w), 'array([-100., 2., 1.])')
|
| 623 |
+
|
| 624 |
+
assert_equal(repr(np.array([np.nan, np.inf])), 'array([nan, inf])')
|
| 625 |
+
assert_equal(repr(np.array([np.nan, -np.inf])), 'array([ nan, -inf])')
|
| 626 |
+
|
| 627 |
+
x = np.array([np.inf, 100000, 1.1234])
|
| 628 |
+
y = np.array([np.inf, 100000, -1.1234])
|
| 629 |
+
z = np.array([np.inf, 1.1234, -1e120])
|
| 630 |
+
np.set_printoptions(precision=2)
|
| 631 |
+
assert_equal(repr(x), 'array([ inf, 1.00e+05, 1.12e+00])')
|
| 632 |
+
assert_equal(repr(y), 'array([ inf, 1.00e+05, -1.12e+00])')
|
| 633 |
+
assert_equal(repr(z), 'array([ inf, 1.12e+000, -1.00e+120])')
|
| 634 |
+
|
| 635 |
+
def test_bool_spacing(self):
|
| 636 |
+
assert_equal(repr(np.array([True, True])),
|
| 637 |
+
'array([ True, True])')
|
| 638 |
+
assert_equal(repr(np.array([True, False])),
|
| 639 |
+
'array([ True, False])')
|
| 640 |
+
assert_equal(repr(np.array([True])),
|
| 641 |
+
'array([ True])')
|
| 642 |
+
assert_equal(repr(np.array(True)),
|
| 643 |
+
'array(True)')
|
| 644 |
+
assert_equal(repr(np.array(False)),
|
| 645 |
+
'array(False)')
|
| 646 |
+
|
| 647 |
+
def test_sign_spacing(self):
|
| 648 |
+
a = np.arange(4.)
|
| 649 |
+
b = np.array([1.234e9])
|
| 650 |
+
c = np.array([1.0 + 1.0j, 1.123456789 + 1.123456789j], dtype='c16')
|
| 651 |
+
|
| 652 |
+
assert_equal(repr(a), 'array([0., 1., 2., 3.])')
|
| 653 |
+
assert_equal(repr(np.array(1.)), 'array(1.)')
|
| 654 |
+
assert_equal(repr(b), 'array([1.234e+09])')
|
| 655 |
+
assert_equal(repr(np.array([0.])), 'array([0.])')
|
| 656 |
+
assert_equal(repr(c),
|
| 657 |
+
"array([1. +1.j , 1.12345679+1.12345679j])")
|
| 658 |
+
assert_equal(repr(np.array([0., -0.])), 'array([ 0., -0.])')
|
| 659 |
+
|
| 660 |
+
np.set_printoptions(sign=' ')
|
| 661 |
+
assert_equal(repr(a), 'array([ 0., 1., 2., 3.])')
|
| 662 |
+
assert_equal(repr(np.array(1.)), 'array( 1.)')
|
| 663 |
+
assert_equal(repr(b), 'array([ 1.234e+09])')
|
| 664 |
+
assert_equal(repr(c),
|
| 665 |
+
"array([ 1. +1.j , 1.12345679+1.12345679j])")
|
| 666 |
+
assert_equal(repr(np.array([0., -0.])), 'array([ 0., -0.])')
|
| 667 |
+
|
| 668 |
+
np.set_printoptions(sign='+')
|
| 669 |
+
assert_equal(repr(a), 'array([+0., +1., +2., +3.])')
|
| 670 |
+
assert_equal(repr(np.array(1.)), 'array(+1.)')
|
| 671 |
+
assert_equal(repr(b), 'array([+1.234e+09])')
|
| 672 |
+
assert_equal(repr(c),
|
| 673 |
+
"array([+1. +1.j , +1.12345679+1.12345679j])")
|
| 674 |
+
|
| 675 |
+
np.set_printoptions(legacy='1.13')
|
| 676 |
+
assert_equal(repr(a), 'array([ 0., 1., 2., 3.])')
|
| 677 |
+
assert_equal(repr(b), 'array([ 1.23400000e+09])')
|
| 678 |
+
assert_equal(repr(-b), 'array([ -1.23400000e+09])')
|
| 679 |
+
assert_equal(repr(np.array(1.)), 'array(1.0)')
|
| 680 |
+
assert_equal(repr(np.array([0.])), 'array([ 0.])')
|
| 681 |
+
assert_equal(repr(c),
|
| 682 |
+
"array([ 1.00000000+1.j , 1.12345679+1.12345679j])")
|
| 683 |
+
# gh-10383
|
| 684 |
+
assert_equal(str(np.array([-1., 10])), "[ -1. 10.]")
|
| 685 |
+
|
| 686 |
+
assert_raises(TypeError, np.set_printoptions, wrongarg=True)
|
| 687 |
+
|
| 688 |
+
def test_float_overflow_nowarn(self):
|
| 689 |
+
# make sure internal computations in FloatingFormat don't
|
| 690 |
+
# warn about overflow
|
| 691 |
+
repr(np.array([1e4, 0.1], dtype='f2'))
|
| 692 |
+
|
| 693 |
+
def test_sign_spacing_structured(self):
|
| 694 |
+
a = np.ones(2, dtype='<f,<f')
|
| 695 |
+
assert_equal(repr(a),
|
| 696 |
+
"array([(1., 1.), (1., 1.)], dtype=[('f0', '<f4'), ('f1', '<f4')])")
|
| 697 |
+
assert_equal(repr(a[0]), "(1., 1.)")
|
| 698 |
+
|
| 699 |
+
def test_floatmode(self):
|
| 700 |
+
x = np.array([0.6104, 0.922, 0.457, 0.0906, 0.3733, 0.007244,
|
| 701 |
+
0.5933, 0.947, 0.2383, 0.4226], dtype=np.float16)
|
| 702 |
+
y = np.array([0.2918820979355541, 0.5064172631089138,
|
| 703 |
+
0.2848750619642916, 0.4342965294660567,
|
| 704 |
+
0.7326538397312751, 0.3459503329096204,
|
| 705 |
+
0.0862072768214508, 0.39112753029631175],
|
| 706 |
+
dtype=np.float64)
|
| 707 |
+
z = np.arange(6, dtype=np.float16)/10
|
| 708 |
+
c = np.array([1.0 + 1.0j, 1.123456789 + 1.123456789j], dtype='c16')
|
| 709 |
+
|
| 710 |
+
# also make sure 1e23 is right (is between two fp numbers)
|
| 711 |
+
w = np.array(['1e{}'.format(i) for i in range(25)], dtype=np.float64)
|
| 712 |
+
# note: we construct w from the strings `1eXX` instead of doing
|
| 713 |
+
# `10.**arange(24)` because it turns out the two are not equivalent in
|
| 714 |
+
# python. On some architectures `1e23 != 10.**23`.
|
| 715 |
+
wp = np.array([1.234e1, 1e2, 1e123])
|
| 716 |
+
|
| 717 |
+
# unique mode
|
| 718 |
+
np.set_printoptions(floatmode='unique')
|
| 719 |
+
assert_equal(repr(x),
|
| 720 |
+
"array([0.6104 , 0.922 , 0.457 , 0.0906 , 0.3733 , 0.007244,\n"
|
| 721 |
+
" 0.5933 , 0.947 , 0.2383 , 0.4226 ], dtype=float16)")
|
| 722 |
+
assert_equal(repr(y),
|
| 723 |
+
"array([0.2918820979355541 , 0.5064172631089138 , 0.2848750619642916 ,\n"
|
| 724 |
+
" 0.4342965294660567 , 0.7326538397312751 , 0.3459503329096204 ,\n"
|
| 725 |
+
" 0.0862072768214508 , 0.39112753029631175])")
|
| 726 |
+
assert_equal(repr(z),
|
| 727 |
+
"array([0. , 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
|
| 728 |
+
assert_equal(repr(w),
|
| 729 |
+
"array([1.e+00, 1.e+01, 1.e+02, 1.e+03, 1.e+04, 1.e+05, 1.e+06, 1.e+07,\n"
|
| 730 |
+
" 1.e+08, 1.e+09, 1.e+10, 1.e+11, 1.e+12, 1.e+13, 1.e+14, 1.e+15,\n"
|
| 731 |
+
" 1.e+16, 1.e+17, 1.e+18, 1.e+19, 1.e+20, 1.e+21, 1.e+22, 1.e+23,\n"
|
| 732 |
+
" 1.e+24])")
|
| 733 |
+
assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
|
| 734 |
+
assert_equal(repr(c),
|
| 735 |
+
"array([1. +1.j , 1.123456789+1.123456789j])")
|
| 736 |
+
|
| 737 |
+
# maxprec mode, precision=8
|
| 738 |
+
np.set_printoptions(floatmode='maxprec', precision=8)
|
| 739 |
+
assert_equal(repr(x),
|
| 740 |
+
"array([0.6104 , 0.922 , 0.457 , 0.0906 , 0.3733 , 0.007244,\n"
|
| 741 |
+
" 0.5933 , 0.947 , 0.2383 , 0.4226 ], dtype=float16)")
|
| 742 |
+
assert_equal(repr(y),
|
| 743 |
+
"array([0.2918821 , 0.50641726, 0.28487506, 0.43429653, 0.73265384,\n"
|
| 744 |
+
" 0.34595033, 0.08620728, 0.39112753])")
|
| 745 |
+
assert_equal(repr(z),
|
| 746 |
+
"array([0. , 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
|
| 747 |
+
assert_equal(repr(w[::5]),
|
| 748 |
+
"array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])")
|
| 749 |
+
assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
|
| 750 |
+
assert_equal(repr(c),
|
| 751 |
+
"array([1. +1.j , 1.12345679+1.12345679j])")
|
| 752 |
+
|
| 753 |
+
# fixed mode, precision=4
|
| 754 |
+
np.set_printoptions(floatmode='fixed', precision=4)
|
| 755 |
+
assert_equal(repr(x),
|
| 756 |
+
"array([0.6104, 0.9219, 0.4570, 0.0906, 0.3733, 0.0072, 0.5933, 0.9468,\n"
|
| 757 |
+
" 0.2383, 0.4226], dtype=float16)")
|
| 758 |
+
assert_equal(repr(y),
|
| 759 |
+
"array([0.2919, 0.5064, 0.2849, 0.4343, 0.7327, 0.3460, 0.0862, 0.3911])")
|
| 760 |
+
assert_equal(repr(z),
|
| 761 |
+
"array([0.0000, 0.1000, 0.2000, 0.3000, 0.3999, 0.5000], dtype=float16)")
|
| 762 |
+
assert_equal(repr(w[::5]),
|
| 763 |
+
"array([1.0000e+00, 1.0000e+05, 1.0000e+10, 1.0000e+15, 1.0000e+20])")
|
| 764 |
+
assert_equal(repr(wp), "array([1.2340e+001, 1.0000e+002, 1.0000e+123])")
|
| 765 |
+
assert_equal(repr(np.zeros(3)), "array([0.0000, 0.0000, 0.0000])")
|
| 766 |
+
assert_equal(repr(c),
|
| 767 |
+
"array([1.0000+1.0000j, 1.1235+1.1235j])")
|
| 768 |
+
# for larger precision, representation error becomes more apparent:
|
| 769 |
+
np.set_printoptions(floatmode='fixed', precision=8)
|
| 770 |
+
assert_equal(repr(z),
|
| 771 |
+
"array([0.00000000, 0.09997559, 0.19995117, 0.30004883, 0.39990234,\n"
|
| 772 |
+
" 0.50000000], dtype=float16)")
|
| 773 |
+
|
| 774 |
+
# maxprec_equal mode, precision=8
|
| 775 |
+
np.set_printoptions(floatmode='maxprec_equal', precision=8)
|
| 776 |
+
assert_equal(repr(x),
|
| 777 |
+
"array([0.610352, 0.921875, 0.457031, 0.090576, 0.373291, 0.007244,\n"
|
| 778 |
+
" 0.593262, 0.946777, 0.238281, 0.422607], dtype=float16)")
|
| 779 |
+
assert_equal(repr(y),
|
| 780 |
+
"array([0.29188210, 0.50641726, 0.28487506, 0.43429653, 0.73265384,\n"
|
| 781 |
+
" 0.34595033, 0.08620728, 0.39112753])")
|
| 782 |
+
assert_equal(repr(z),
|
| 783 |
+
"array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
|
| 784 |
+
assert_equal(repr(w[::5]),
|
| 785 |
+
"array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])")
|
| 786 |
+
assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
|
| 787 |
+
assert_equal(repr(c),
|
| 788 |
+
"array([1.00000000+1.00000000j, 1.12345679+1.12345679j])")
|
| 789 |
+
|
| 790 |
+
# test unique special case (gh-18609)
|
| 791 |
+
a = np.float64.fromhex('-1p-97')
|
| 792 |
+
assert_equal(np.float64(np.array2string(a, floatmode='unique')), a)
|
| 793 |
+
|
| 794 |
+
def test_legacy_mode_scalars(self):
|
| 795 |
+
# in legacy mode, str of floats get truncated, and complex scalars
|
| 796 |
+
# use * for non-finite imaginary part
|
| 797 |
+
np.set_printoptions(legacy='1.13')
|
| 798 |
+
assert_equal(str(np.float64(1.123456789123456789)), '1.12345678912')
|
| 799 |
+
assert_equal(str(np.complex128(complex(1, np.nan))), '(1+nan*j)')
|
| 800 |
+
|
| 801 |
+
np.set_printoptions(legacy=False)
|
| 802 |
+
assert_equal(str(np.float64(1.123456789123456789)),
|
| 803 |
+
'1.1234567891234568')
|
| 804 |
+
assert_equal(str(np.complex128(complex(1, np.nan))), '(1+nanj)')
|
| 805 |
+
|
| 806 |
+
def test_legacy_stray_comma(self):
|
| 807 |
+
np.set_printoptions(legacy='1.13')
|
| 808 |
+
assert_equal(str(np.arange(10000)), '[ 0 1 2 ..., 9997 9998 9999]')
|
| 809 |
+
|
| 810 |
+
np.set_printoptions(legacy=False)
|
| 811 |
+
assert_equal(str(np.arange(10000)), '[ 0 1 2 ... 9997 9998 9999]')
|
| 812 |
+
|
| 813 |
+
def test_dtype_linewidth_wrapping(self):
|
| 814 |
+
np.set_printoptions(linewidth=75)
|
| 815 |
+
assert_equal(repr(np.arange(10,20., dtype='f4')),
|
| 816 |
+
"array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19.], dtype=float32)")
|
| 817 |
+
assert_equal(repr(np.arange(10,23., dtype='f4')), textwrap.dedent("""\
|
| 818 |
+
array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22.],
|
| 819 |
+
dtype=float32)"""))
|
| 820 |
+
|
| 821 |
+
styp = '<U4'
|
| 822 |
+
assert_equal(repr(np.ones(3, dtype=styp)),
|
| 823 |
+
"array(['1', '1', '1'], dtype='{}')".format(styp))
|
| 824 |
+
assert_equal(repr(np.ones(12, dtype=styp)), textwrap.dedent("""\
|
| 825 |
+
array(['1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1'],
|
| 826 |
+
dtype='{}')""".format(styp)))
|
| 827 |
+
|
| 828 |
+
@pytest.mark.parametrize(
|
| 829 |
+
['native'],
|
| 830 |
+
[
|
| 831 |
+
('bool',),
|
| 832 |
+
('uint8',),
|
| 833 |
+
('uint16',),
|
| 834 |
+
('uint32',),
|
| 835 |
+
('uint64',),
|
| 836 |
+
('int8',),
|
| 837 |
+
('int16',),
|
| 838 |
+
('int32',),
|
| 839 |
+
('int64',),
|
| 840 |
+
('float16',),
|
| 841 |
+
('float32',),
|
| 842 |
+
('float64',),
|
| 843 |
+
('U1',), # 4-byte width string
|
| 844 |
+
],
|
| 845 |
+
)
|
| 846 |
+
def test_dtype_endianness_repr(self, native):
|
| 847 |
+
'''
|
| 848 |
+
there was an issue where
|
| 849 |
+
repr(array([0], dtype='<u2')) and repr(array([0], dtype='>u2'))
|
| 850 |
+
both returned the same thing:
|
| 851 |
+
array([0], dtype=uint16)
|
| 852 |
+
even though their dtypes have different endianness.
|
| 853 |
+
'''
|
| 854 |
+
native_dtype = np.dtype(native)
|
| 855 |
+
non_native_dtype = native_dtype.newbyteorder()
|
| 856 |
+
non_native_repr = repr(np.array([1], non_native_dtype))
|
| 857 |
+
native_repr = repr(np.array([1], native_dtype))
|
| 858 |
+
# preserve the sensible default of only showing dtype if nonstandard
|
| 859 |
+
assert ('dtype' in native_repr) ^ (native_dtype in _typelessdata),\
|
| 860 |
+
("an array's repr should show dtype if and only if the type "
|
| 861 |
+
'of the array is NOT one of the standard types '
|
| 862 |
+
'(e.g., int32, bool, float64).')
|
| 863 |
+
if non_native_dtype.itemsize > 1:
|
| 864 |
+
# if the type is >1 byte, the non-native endian version
|
| 865 |
+
# must show endianness.
|
| 866 |
+
assert non_native_repr != native_repr
|
| 867 |
+
assert f"dtype='{non_native_dtype.byteorder}" in non_native_repr
|
| 868 |
+
|
| 869 |
+
def test_linewidth_repr(self):
|
| 870 |
+
a = np.full(7, fill_value=2)
|
| 871 |
+
np.set_printoptions(linewidth=17)
|
| 872 |
+
assert_equal(
|
| 873 |
+
repr(a),
|
| 874 |
+
textwrap.dedent("""\
|
| 875 |
+
array([2, 2, 2,
|
| 876 |
+
2, 2, 2,
|
| 877 |
+
2])""")
|
| 878 |
+
)
|
| 879 |
+
np.set_printoptions(linewidth=17, legacy='1.13')
|
| 880 |
+
assert_equal(
|
| 881 |
+
repr(a),
|
| 882 |
+
textwrap.dedent("""\
|
| 883 |
+
array([2, 2, 2,
|
| 884 |
+
2, 2, 2, 2])""")
|
| 885 |
+
)
|
| 886 |
+
|
| 887 |
+
a = np.full(8, fill_value=2)
|
| 888 |
+
|
| 889 |
+
np.set_printoptions(linewidth=18, legacy=False)
|
| 890 |
+
assert_equal(
|
| 891 |
+
repr(a),
|
| 892 |
+
textwrap.dedent("""\
|
| 893 |
+
array([2, 2, 2,
|
| 894 |
+
2, 2, 2,
|
| 895 |
+
2, 2])""")
|
| 896 |
+
)
|
| 897 |
+
|
| 898 |
+
np.set_printoptions(linewidth=18, legacy='1.13')
|
| 899 |
+
assert_equal(
|
| 900 |
+
repr(a),
|
| 901 |
+
textwrap.dedent("""\
|
| 902 |
+
array([2, 2, 2, 2,
|
| 903 |
+
2, 2, 2, 2])""")
|
| 904 |
+
)
|
| 905 |
+
|
| 906 |
+
def test_linewidth_str(self):
|
| 907 |
+
a = np.full(18, fill_value=2)
|
| 908 |
+
np.set_printoptions(linewidth=18)
|
| 909 |
+
assert_equal(
|
| 910 |
+
str(a),
|
| 911 |
+
textwrap.dedent("""\
|
| 912 |
+
[2 2 2 2 2 2 2 2
|
| 913 |
+
2 2 2 2 2 2 2 2
|
| 914 |
+
2 2]""")
|
| 915 |
+
)
|
| 916 |
+
np.set_printoptions(linewidth=18, legacy='1.13')
|
| 917 |
+
assert_equal(
|
| 918 |
+
str(a),
|
| 919 |
+
textwrap.dedent("""\
|
| 920 |
+
[2 2 2 2 2 2 2 2 2
|
| 921 |
+
2 2 2 2 2 2 2 2 2]""")
|
| 922 |
+
)
|
| 923 |
+
|
| 924 |
+
def test_edgeitems(self):
|
| 925 |
+
np.set_printoptions(edgeitems=1, threshold=1)
|
| 926 |
+
a = np.arange(27).reshape((3, 3, 3))
|
| 927 |
+
assert_equal(
|
| 928 |
+
repr(a),
|
| 929 |
+
textwrap.dedent("""\
|
| 930 |
+
array([[[ 0, ..., 2],
|
| 931 |
+
...,
|
| 932 |
+
[ 6, ..., 8]],
|
| 933 |
+
|
| 934 |
+
...,
|
| 935 |
+
|
| 936 |
+
[[18, ..., 20],
|
| 937 |
+
...,
|
| 938 |
+
[24, ..., 26]]])""")
|
| 939 |
+
)
|
| 940 |
+
|
| 941 |
+
b = np.zeros((3, 3, 1, 1))
|
| 942 |
+
assert_equal(
|
| 943 |
+
repr(b),
|
| 944 |
+
textwrap.dedent("""\
|
| 945 |
+
array([[[[0.]],
|
| 946 |
+
|
| 947 |
+
...,
|
| 948 |
+
|
| 949 |
+
[[0.]]],
|
| 950 |
+
|
| 951 |
+
|
| 952 |
+
...,
|
| 953 |
+
|
| 954 |
+
|
| 955 |
+
[[[0.]],
|
| 956 |
+
|
| 957 |
+
...,
|
| 958 |
+
|
| 959 |
+
[[0.]]]])""")
|
| 960 |
+
)
|
| 961 |
+
|
| 962 |
+
# 1.13 had extra trailing spaces, and was missing newlines
|
| 963 |
+
np.set_printoptions(legacy='1.13')
|
| 964 |
+
|
| 965 |
+
assert_equal(
|
| 966 |
+
repr(a),
|
| 967 |
+
textwrap.dedent("""\
|
| 968 |
+
array([[[ 0, ..., 2],
|
| 969 |
+
...,
|
| 970 |
+
[ 6, ..., 8]],
|
| 971 |
+
|
| 972 |
+
...,
|
| 973 |
+
[[18, ..., 20],
|
| 974 |
+
...,
|
| 975 |
+
[24, ..., 26]]])""")
|
| 976 |
+
)
|
| 977 |
+
|
| 978 |
+
assert_equal(
|
| 979 |
+
repr(b),
|
| 980 |
+
textwrap.dedent("""\
|
| 981 |
+
array([[[[ 0.]],
|
| 982 |
+
|
| 983 |
+
...,
|
| 984 |
+
[[ 0.]]],
|
| 985 |
+
|
| 986 |
+
|
| 987 |
+
...,
|
| 988 |
+
[[[ 0.]],
|
| 989 |
+
|
| 990 |
+
...,
|
| 991 |
+
[[ 0.]]]])""")
|
| 992 |
+
)
|
| 993 |
+
|
| 994 |
+
def test_edgeitems_structured(self):
|
| 995 |
+
np.set_printoptions(edgeitems=1, threshold=1)
|
| 996 |
+
A = np.arange(5*2*3, dtype="<i8").view([('i', "<i8", (5, 2, 3))])
|
| 997 |
+
reprA = (
|
| 998 |
+
"array([([[[ 0, ..., 2], [ 3, ..., 5]], ..., "
|
| 999 |
+
"[[24, ..., 26], [27, ..., 29]]],)],\n"
|
| 1000 |
+
" dtype=[('i', '<i8', (5, 2, 3))])"
|
| 1001 |
+
)
|
| 1002 |
+
assert_equal(repr(A), reprA)
|
| 1003 |
+
|
| 1004 |
+
def test_bad_args(self):
|
| 1005 |
+
assert_raises(ValueError, np.set_printoptions, threshold=float('nan'))
|
| 1006 |
+
assert_raises(TypeError, np.set_printoptions, threshold='1')
|
| 1007 |
+
assert_raises(TypeError, np.set_printoptions, threshold=b'1')
|
| 1008 |
+
|
| 1009 |
+
assert_raises(TypeError, np.set_printoptions, precision='1')
|
| 1010 |
+
assert_raises(TypeError, np.set_printoptions, precision=1.5)
|
| 1011 |
+
|
| 1012 |
+
def test_unicode_object_array():
|
| 1013 |
+
expected = "array(['é'], dtype=object)"
|
| 1014 |
+
x = np.array(['\xe9'], dtype=object)
|
| 1015 |
+
assert_equal(repr(x), expected)
|
| 1016 |
+
|
| 1017 |
+
|
| 1018 |
+
class TestContextManager:
|
| 1019 |
+
def test_ctx_mgr(self):
|
| 1020 |
+
# test that context manager actually works
|
| 1021 |
+
with np.printoptions(precision=2):
|
| 1022 |
+
s = str(np.array([2.0]) / 3)
|
| 1023 |
+
assert_equal(s, '[0.67]')
|
| 1024 |
+
|
| 1025 |
+
def test_ctx_mgr_restores(self):
|
| 1026 |
+
# test that print options are actually restrored
|
| 1027 |
+
opts = np.get_printoptions()
|
| 1028 |
+
with np.printoptions(precision=opts['precision'] - 1,
|
| 1029 |
+
linewidth=opts['linewidth'] - 4):
|
| 1030 |
+
pass
|
| 1031 |
+
assert_equal(np.get_printoptions(), opts)
|
| 1032 |
+
|
| 1033 |
+
def test_ctx_mgr_exceptions(self):
|
| 1034 |
+
# test that print options are restored even if an exception is raised
|
| 1035 |
+
opts = np.get_printoptions()
|
| 1036 |
+
try:
|
| 1037 |
+
with np.printoptions(precision=2, linewidth=11):
|
| 1038 |
+
raise ValueError
|
| 1039 |
+
except ValueError:
|
| 1040 |
+
pass
|
| 1041 |
+
assert_equal(np.get_printoptions(), opts)
|
| 1042 |
+
|
| 1043 |
+
def test_ctx_mgr_as_smth(self):
|
| 1044 |
+
opts = {"precision": 2}
|
| 1045 |
+
with np.printoptions(**opts) as ctx:
|
| 1046 |
+
saved_opts = ctx.copy()
|
| 1047 |
+
assert_equal({k: saved_opts[k] for k in opts}, opts)
|
pllava/lib/python3.10/site-packages/numpy/core/tests/test_custom_dtypes.py
ADDED
|
@@ -0,0 +1,253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from numpy.testing import assert_array_equal
|
| 5 |
+
from numpy.core._multiarray_umath import (
|
| 6 |
+
_discover_array_parameters as discover_array_params, _get_sfloat_dtype)
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
SF = _get_sfloat_dtype()
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class TestSFloat:
|
| 13 |
+
def _get_array(self, scaling, aligned=True):
|
| 14 |
+
if not aligned:
|
| 15 |
+
a = np.empty(3*8 + 1, dtype=np.uint8)[1:]
|
| 16 |
+
a = a.view(np.float64)
|
| 17 |
+
a[:] = [1., 2., 3.]
|
| 18 |
+
else:
|
| 19 |
+
a = np.array([1., 2., 3.])
|
| 20 |
+
|
| 21 |
+
a *= 1./scaling # the casting code also uses the reciprocal.
|
| 22 |
+
return a.view(SF(scaling))
|
| 23 |
+
|
| 24 |
+
def test_sfloat_rescaled(self):
|
| 25 |
+
sf = SF(1.)
|
| 26 |
+
sf2 = sf.scaled_by(2.)
|
| 27 |
+
assert sf2.get_scaling() == 2.
|
| 28 |
+
sf6 = sf2.scaled_by(3.)
|
| 29 |
+
assert sf6.get_scaling() == 6.
|
| 30 |
+
|
| 31 |
+
def test_class_discovery(self):
|
| 32 |
+
# This does not test much, since we always discover the scaling as 1.
|
| 33 |
+
# But most of NumPy (when writing) does not understand DType classes
|
| 34 |
+
dt, _ = discover_array_params([1., 2., 3.], dtype=SF)
|
| 35 |
+
assert dt == SF(1.)
|
| 36 |
+
|
| 37 |
+
@pytest.mark.parametrize("scaling", [1., -1., 2.])
|
| 38 |
+
def test_scaled_float_from_floats(self, scaling):
|
| 39 |
+
a = np.array([1., 2., 3.], dtype=SF(scaling))
|
| 40 |
+
|
| 41 |
+
assert a.dtype.get_scaling() == scaling
|
| 42 |
+
assert_array_equal(scaling * a.view(np.float64), [1., 2., 3.])
|
| 43 |
+
|
| 44 |
+
def test_repr(self):
|
| 45 |
+
# Check the repr, mainly to cover the code paths:
|
| 46 |
+
assert repr(SF(scaling=1.)) == "_ScaledFloatTestDType(scaling=1.0)"
|
| 47 |
+
|
| 48 |
+
def test_dtype_name(self):
|
| 49 |
+
assert SF(1.).name == "_ScaledFloatTestDType64"
|
| 50 |
+
|
| 51 |
+
@pytest.mark.parametrize("scaling", [1., -1., 2.])
|
| 52 |
+
def test_sfloat_from_float(self, scaling):
|
| 53 |
+
a = np.array([1., 2., 3.]).astype(dtype=SF(scaling))
|
| 54 |
+
|
| 55 |
+
assert a.dtype.get_scaling() == scaling
|
| 56 |
+
assert_array_equal(scaling * a.view(np.float64), [1., 2., 3.])
|
| 57 |
+
|
| 58 |
+
@pytest.mark.parametrize("aligned", [True, False])
|
| 59 |
+
@pytest.mark.parametrize("scaling", [1., -1., 2.])
|
| 60 |
+
def test_sfloat_getitem(self, aligned, scaling):
|
| 61 |
+
a = self._get_array(1., aligned)
|
| 62 |
+
assert a.tolist() == [1., 2., 3.]
|
| 63 |
+
|
| 64 |
+
@pytest.mark.parametrize("aligned", [True, False])
|
| 65 |
+
def test_sfloat_casts(self, aligned):
|
| 66 |
+
a = self._get_array(1., aligned)
|
| 67 |
+
|
| 68 |
+
assert np.can_cast(a, SF(-1.), casting="equiv")
|
| 69 |
+
assert not np.can_cast(a, SF(-1.), casting="no")
|
| 70 |
+
na = a.astype(SF(-1.))
|
| 71 |
+
assert_array_equal(-1 * na.view(np.float64), a.view(np.float64))
|
| 72 |
+
|
| 73 |
+
assert np.can_cast(a, SF(2.), casting="same_kind")
|
| 74 |
+
assert not np.can_cast(a, SF(2.), casting="safe")
|
| 75 |
+
a2 = a.astype(SF(2.))
|
| 76 |
+
assert_array_equal(2 * a2.view(np.float64), a.view(np.float64))
|
| 77 |
+
|
| 78 |
+
@pytest.mark.parametrize("aligned", [True, False])
|
| 79 |
+
def test_sfloat_cast_internal_errors(self, aligned):
|
| 80 |
+
a = self._get_array(2e300, aligned)
|
| 81 |
+
|
| 82 |
+
with pytest.raises(TypeError,
|
| 83 |
+
match="error raised inside the core-loop: non-finite factor!"):
|
| 84 |
+
a.astype(SF(2e-300))
|
| 85 |
+
|
| 86 |
+
def test_sfloat_promotion(self):
|
| 87 |
+
assert np.result_type(SF(2.), SF(3.)) == SF(3.)
|
| 88 |
+
assert np.result_type(SF(3.), SF(2.)) == SF(3.)
|
| 89 |
+
# Float64 -> SF(1.) and then promotes normally, so both of this work:
|
| 90 |
+
assert np.result_type(SF(3.), np.float64) == SF(3.)
|
| 91 |
+
assert np.result_type(np.float64, SF(0.5)) == SF(1.)
|
| 92 |
+
|
| 93 |
+
# Test an undefined promotion:
|
| 94 |
+
with pytest.raises(TypeError):
|
| 95 |
+
np.result_type(SF(1.), np.int64)
|
| 96 |
+
|
| 97 |
+
def test_basic_multiply(self):
|
| 98 |
+
a = self._get_array(2.)
|
| 99 |
+
b = self._get_array(4.)
|
| 100 |
+
|
| 101 |
+
res = a * b
|
| 102 |
+
# multiplies dtype scaling and content separately:
|
| 103 |
+
assert res.dtype.get_scaling() == 8.
|
| 104 |
+
expected_view = a.view(np.float64) * b.view(np.float64)
|
| 105 |
+
assert_array_equal(res.view(np.float64), expected_view)
|
| 106 |
+
|
| 107 |
+
def test_possible_and_impossible_reduce(self):
|
| 108 |
+
# For reductions to work, the first and last operand must have the
|
| 109 |
+
# same dtype. For this parametric DType that is not necessarily true.
|
| 110 |
+
a = self._get_array(2.)
|
| 111 |
+
# Addition reductin works (as of writing requires to pass initial
|
| 112 |
+
# because setting a scaled-float from the default `0` fails).
|
| 113 |
+
res = np.add.reduce(a, initial=0.)
|
| 114 |
+
assert res == a.astype(np.float64).sum()
|
| 115 |
+
|
| 116 |
+
# But each multiplication changes the factor, so a reduction is not
|
| 117 |
+
# possible (the relaxed version of the old refusal to handle any
|
| 118 |
+
# flexible dtype).
|
| 119 |
+
with pytest.raises(TypeError,
|
| 120 |
+
match="the resolved dtypes are not compatible"):
|
| 121 |
+
np.multiply.reduce(a)
|
| 122 |
+
|
| 123 |
+
def test_basic_ufunc_at(self):
|
| 124 |
+
float_a = np.array([1., 2., 3.])
|
| 125 |
+
b = self._get_array(2.)
|
| 126 |
+
|
| 127 |
+
float_b = b.view(np.float64).copy()
|
| 128 |
+
np.multiply.at(float_b, [1, 1, 1], float_a)
|
| 129 |
+
np.multiply.at(b, [1, 1, 1], float_a)
|
| 130 |
+
|
| 131 |
+
assert_array_equal(b.view(np.float64), float_b)
|
| 132 |
+
|
| 133 |
+
def test_basic_multiply_promotion(self):
|
| 134 |
+
float_a = np.array([1., 2., 3.])
|
| 135 |
+
b = self._get_array(2.)
|
| 136 |
+
|
| 137 |
+
res1 = float_a * b
|
| 138 |
+
res2 = b * float_a
|
| 139 |
+
|
| 140 |
+
# one factor is one, so we get the factor of b:
|
| 141 |
+
assert res1.dtype == res2.dtype == b.dtype
|
| 142 |
+
expected_view = float_a * b.view(np.float64)
|
| 143 |
+
assert_array_equal(res1.view(np.float64), expected_view)
|
| 144 |
+
assert_array_equal(res2.view(np.float64), expected_view)
|
| 145 |
+
|
| 146 |
+
# Check that promotion works when `out` is used:
|
| 147 |
+
np.multiply(b, float_a, out=res2)
|
| 148 |
+
with pytest.raises(TypeError):
|
| 149 |
+
# The promoter accepts this (maybe it should not), but the SFloat
|
| 150 |
+
# result cannot be cast to integer:
|
| 151 |
+
np.multiply(b, float_a, out=np.arange(3))
|
| 152 |
+
|
| 153 |
+
def test_basic_addition(self):
|
| 154 |
+
a = self._get_array(2.)
|
| 155 |
+
b = self._get_array(4.)
|
| 156 |
+
|
| 157 |
+
res = a + b
|
| 158 |
+
# addition uses the type promotion rules for the result:
|
| 159 |
+
assert res.dtype == np.result_type(a.dtype, b.dtype)
|
| 160 |
+
expected_view = (a.astype(res.dtype).view(np.float64) +
|
| 161 |
+
b.astype(res.dtype).view(np.float64))
|
| 162 |
+
assert_array_equal(res.view(np.float64), expected_view)
|
| 163 |
+
|
| 164 |
+
def test_addition_cast_safety(self):
|
| 165 |
+
"""The addition method is special for the scaled float, because it
|
| 166 |
+
includes the "cast" between different factors, thus cast-safety
|
| 167 |
+
is influenced by the implementation.
|
| 168 |
+
"""
|
| 169 |
+
a = self._get_array(2.)
|
| 170 |
+
b = self._get_array(-2.)
|
| 171 |
+
c = self._get_array(3.)
|
| 172 |
+
|
| 173 |
+
# sign change is "equiv":
|
| 174 |
+
np.add(a, b, casting="equiv")
|
| 175 |
+
with pytest.raises(TypeError):
|
| 176 |
+
np.add(a, b, casting="no")
|
| 177 |
+
|
| 178 |
+
# Different factor is "same_kind" (default) so check that "safe" fails
|
| 179 |
+
with pytest.raises(TypeError):
|
| 180 |
+
np.add(a, c, casting="safe")
|
| 181 |
+
|
| 182 |
+
# Check that casting the output fails also (done by the ufunc here)
|
| 183 |
+
with pytest.raises(TypeError):
|
| 184 |
+
np.add(a, a, out=c, casting="safe")
|
| 185 |
+
|
| 186 |
+
@pytest.mark.parametrize("ufunc",
|
| 187 |
+
[np.logical_and, np.logical_or, np.logical_xor])
|
| 188 |
+
def test_logical_ufuncs_casts_to_bool(self, ufunc):
|
| 189 |
+
a = self._get_array(2.)
|
| 190 |
+
a[0] = 0. # make sure first element is considered False.
|
| 191 |
+
|
| 192 |
+
float_equiv = a.astype(float)
|
| 193 |
+
expected = ufunc(float_equiv, float_equiv)
|
| 194 |
+
res = ufunc(a, a)
|
| 195 |
+
assert_array_equal(res, expected)
|
| 196 |
+
|
| 197 |
+
# also check that the same works for reductions:
|
| 198 |
+
expected = ufunc.reduce(float_equiv)
|
| 199 |
+
res = ufunc.reduce(a)
|
| 200 |
+
assert_array_equal(res, expected)
|
| 201 |
+
|
| 202 |
+
# The output casting does not match the bool, bool -> bool loop:
|
| 203 |
+
with pytest.raises(TypeError):
|
| 204 |
+
ufunc(a, a, out=np.empty(a.shape, dtype=int), casting="equiv")
|
| 205 |
+
|
| 206 |
+
def test_wrapped_and_wrapped_reductions(self):
|
| 207 |
+
a = self._get_array(2.)
|
| 208 |
+
float_equiv = a.astype(float)
|
| 209 |
+
|
| 210 |
+
expected = np.hypot(float_equiv, float_equiv)
|
| 211 |
+
res = np.hypot(a, a)
|
| 212 |
+
assert res.dtype == a.dtype
|
| 213 |
+
res_float = res.view(np.float64) * 2
|
| 214 |
+
assert_array_equal(res_float, expected)
|
| 215 |
+
|
| 216 |
+
# Also check reduction (keepdims, due to incorrect getitem)
|
| 217 |
+
res = np.hypot.reduce(a, keepdims=True)
|
| 218 |
+
assert res.dtype == a.dtype
|
| 219 |
+
expected = np.hypot.reduce(float_equiv, keepdims=True)
|
| 220 |
+
assert res.view(np.float64) * 2 == expected
|
| 221 |
+
|
| 222 |
+
def test_astype_class(self):
|
| 223 |
+
# Very simple test that we accept `.astype()` also on the class.
|
| 224 |
+
# ScaledFloat always returns the default descriptor, but it does
|
| 225 |
+
# check the relevant code paths.
|
| 226 |
+
arr = np.array([1., 2., 3.], dtype=object)
|
| 227 |
+
|
| 228 |
+
res = arr.astype(SF) # passing the class class
|
| 229 |
+
expected = arr.astype(SF(1.)) # above will have discovered 1. scaling
|
| 230 |
+
assert_array_equal(res.view(np.float64), expected.view(np.float64))
|
| 231 |
+
|
| 232 |
+
def test_creation_class(self):
|
| 233 |
+
arr1 = np.array([1., 2., 3.], dtype=SF)
|
| 234 |
+
assert arr1.dtype == SF(1.)
|
| 235 |
+
arr2 = np.array([1., 2., 3.], dtype=SF(1.))
|
| 236 |
+
assert_array_equal(arr1.view(np.float64), arr2.view(np.float64))
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
def test_type_pickle():
|
| 240 |
+
# can't actually unpickle, but we can pickle (if in namespace)
|
| 241 |
+
import pickle
|
| 242 |
+
|
| 243 |
+
np._ScaledFloatTestDType = SF
|
| 244 |
+
|
| 245 |
+
s = pickle.dumps(SF)
|
| 246 |
+
res = pickle.loads(s)
|
| 247 |
+
assert res is SF
|
| 248 |
+
|
| 249 |
+
del np._ScaledFloatTestDType
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
def test_is_numeric():
|
| 253 |
+
assert SF._is_numeric
|