ZTWHHH commited on
Commit
7d8711c
·
verified ·
1 Parent(s): 478372a

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +5 -0
  2. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/_csparsetools.cpython-310-x86_64-linux-gnu.so +3 -0
  3. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/__init__.py +210 -0
  4. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/__pycache__/_laplacian.cpython-310.pyc +0 -0
  5. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/__pycache__/_validation.cpython-310.pyc +0 -0
  6. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/_laplacian.py +563 -0
  7. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/_validation.py +66 -0
  8. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__init__.py +0 -0
  9. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  10. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_connected_components.cpython-310.pyc +0 -0
  11. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_conversions.cpython-310.pyc +0 -0
  12. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_flow.cpython-310.pyc +0 -0
  13. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_graph_laplacian.cpython-310.pyc +0 -0
  14. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_matching.cpython-310.pyc +0 -0
  15. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_pydata_sparse.cpython-310.pyc +0 -0
  16. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_reordering.cpython-310.pyc +0 -0
  17. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_shortest_path.cpython-310.pyc +0 -0
  18. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_spanning_tree.cpython-310.pyc +0 -0
  19. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_traversal.cpython-310.pyc +0 -0
  20. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_connected_components.py +119 -0
  21. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_conversions.py +61 -0
  22. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_flow.py +209 -0
  23. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_graph_laplacian.py +368 -0
  24. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_matching.py +295 -0
  25. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_pydata_sparse.py +194 -0
  26. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_reordering.py +70 -0
  27. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_shortest_path.py +484 -0
  28. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_spanning_tree.py +66 -0
  29. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_traversal.py +148 -0
  30. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/lsmr.cpython-310.pyc +0 -0
  31. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_gcrotmk.cpython-310.pyc +0 -0
  32. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_iterative.cpython-310.pyc +0 -0
  33. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_lsmr.cpython-310.pyc +0 -0
  34. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_lsqr.cpython-310.pyc +0 -0
  35. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_minres.cpython-310.pyc +0 -0
  36. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_utils.cpython-310.pyc +0 -0
  37. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/test_gcrotmk.py +183 -0
  38. mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_base.cpython-310.pyc +3 -0
  39. moondream/lib/python3.10/site-packages/contourpy-1.3.1.dist-info/INSTALLER +1 -0
  40. moondream/lib/python3.10/site-packages/contourpy-1.3.1.dist-info/METADATA +93 -0
  41. moondream/lib/python3.10/site-packages/contourpy-1.3.1.dist-info/RECORD +43 -0
  42. moondream/lib/python3.10/site-packages/contourpy-1.3.1.dist-info/REQUESTED +0 -0
  43. moondream/lib/python3.10/site-packages/matplotlib/_cm_multivar.py +166 -0
  44. moondream/lib/python3.10/site-packages/matplotlib/_internal_utils.py +64 -0
  45. moondream/lib/python3.10/site-packages/matplotlib/_layoutgrid.py +547 -0
  46. moondream/lib/python3.10/site-packages/matplotlib/_text_helpers.py +82 -0
  47. moondream/lib/python3.10/site-packages/matplotlib/artist.pyi +199 -0
  48. moondream/lib/python3.10/site-packages/matplotlib/axis.py +0 -0
  49. moondream/lib/python3.10/site-packages/matplotlib/backend_tools.py +998 -0
  50. moondream/lib/python3.10/site-packages/matplotlib/bezier.py +602 -0
.gitattributes CHANGED
@@ -574,3 +574,8 @@ moondream/lib/python3.10/site-packages/narwhals/__pycache__/series.cpython-310.p
574
  moondream/lib/python3.10/site-packages/narwhals/__pycache__/expr.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
575
  moondream/lib/python3.10/site-packages/narwhals/__pycache__/dataframe.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
576
  mantis_evalkit/lib/python3.10/site-packages/pyarrow/libarrow_flight.so.1900 filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
574
  moondream/lib/python3.10/site-packages/narwhals/__pycache__/expr.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
575
  moondream/lib/python3.10/site-packages/narwhals/__pycache__/dataframe.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
576
  mantis_evalkit/lib/python3.10/site-packages/pyarrow/libarrow_flight.so.1900 filter=lfs diff=lfs merge=lfs -text
577
+ mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/_csparsetools.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
578
+ mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_base.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
579
+ moondream/lib/python3.10/site-packages/numpy/random/mtrand.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
580
+ moondream/lib/python3.10/site-packages/pkg_resources/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
581
+ moondream/lib/python3.10/site-packages/onnx/onnx_cpp2py_export.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/_csparsetools.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbc4aa657a759ee9130cc8babb9c626a2e554ae582a34ae90a35564312269ab3
3
+ size 839504
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/__init__.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""
2
+ Compressed sparse graph routines (:mod:`scipy.sparse.csgraph`)
3
+ ==============================================================
4
+
5
+ .. currentmodule:: scipy.sparse.csgraph
6
+
7
+ Fast graph algorithms based on sparse matrix representations.
8
+
9
+ Contents
10
+ --------
11
+
12
+ .. autosummary::
13
+ :toctree: generated/
14
+
15
+ connected_components -- determine connected components of a graph
16
+ laplacian -- compute the laplacian of a graph
17
+ shortest_path -- compute the shortest path between points on a positive graph
18
+ dijkstra -- use Dijkstra's algorithm for shortest path
19
+ floyd_warshall -- use the Floyd-Warshall algorithm for shortest path
20
+ bellman_ford -- use the Bellman-Ford algorithm for shortest path
21
+ johnson -- use Johnson's algorithm for shortest path
22
+ yen -- use Yen's algorithm for K-shortest paths between to nodes.
23
+ breadth_first_order -- compute a breadth-first order of nodes
24
+ depth_first_order -- compute a depth-first order of nodes
25
+ breadth_first_tree -- construct the breadth-first tree from a given node
26
+ depth_first_tree -- construct a depth-first tree from a given node
27
+ minimum_spanning_tree -- construct the minimum spanning tree of a graph
28
+ reverse_cuthill_mckee -- compute permutation for reverse Cuthill-McKee ordering
29
+ maximum_flow -- solve the maximum flow problem for a graph
30
+ maximum_bipartite_matching -- compute a maximum matching of a bipartite graph
31
+ min_weight_full_bipartite_matching - compute a minimum weight full matching of a bipartite graph
32
+ structural_rank -- compute the structural rank of a graph
33
+ NegativeCycleError
34
+
35
+ .. autosummary::
36
+ :toctree: generated/
37
+
38
+ construct_dist_matrix
39
+ csgraph_from_dense
40
+ csgraph_from_masked
41
+ csgraph_masked_from_dense
42
+ csgraph_to_dense
43
+ csgraph_to_masked
44
+ reconstruct_path
45
+
46
+ Graph Representations
47
+ ---------------------
48
+ This module uses graphs which are stored in a matrix format. A
49
+ graph with N nodes can be represented by an (N x N) adjacency matrix G.
50
+ If there is a connection from node i to node j, then G[i, j] = w, where
51
+ w is the weight of the connection. For nodes i and j which are
52
+ not connected, the value depends on the representation:
53
+
54
+ - for dense array representations, non-edges are represented by
55
+ G[i, j] = 0, infinity, or NaN.
56
+
57
+ - for dense masked representations (of type np.ma.MaskedArray), non-edges
58
+ are represented by masked values. This can be useful when graphs with
59
+ zero-weight edges are desired.
60
+
61
+ - for sparse array representations, non-edges are represented by
62
+ non-entries in the matrix. This sort of sparse representation also
63
+ allows for edges with zero weights.
64
+
65
+ As a concrete example, imagine that you would like to represent the following
66
+ undirected graph::
67
+
68
+ G
69
+
70
+ (0)
71
+ / \
72
+ 1 2
73
+ / \
74
+ (2) (1)
75
+
76
+ This graph has three nodes, where node 0 and 1 are connected by an edge of
77
+ weight 2, and nodes 0 and 2 are connected by an edge of weight 1.
78
+ We can construct the dense, masked, and sparse representations as follows,
79
+ keeping in mind that an undirected graph is represented by a symmetric matrix::
80
+
81
+ >>> import numpy as np
82
+ >>> G_dense = np.array([[0, 2, 1],
83
+ ... [2, 0, 0],
84
+ ... [1, 0, 0]])
85
+ >>> G_masked = np.ma.masked_values(G_dense, 0)
86
+ >>> from scipy.sparse import csr_array
87
+ >>> G_sparse = csr_array(G_dense)
88
+
89
+ This becomes more difficult when zero edges are significant. For example,
90
+ consider the situation when we slightly modify the above graph::
91
+
92
+ G2
93
+
94
+ (0)
95
+ / \
96
+ 0 2
97
+ / \
98
+ (2) (1)
99
+
100
+ This is identical to the previous graph, except nodes 0 and 2 are connected
101
+ by an edge of zero weight. In this case, the dense representation above
102
+ leads to ambiguities: how can non-edges be represented if zero is a meaningful
103
+ value? In this case, either a masked or sparse representation must be used
104
+ to eliminate the ambiguity::
105
+
106
+ >>> import numpy as np
107
+ >>> G2_data = np.array([[np.inf, 2, 0 ],
108
+ ... [2, np.inf, np.inf],
109
+ ... [0, np.inf, np.inf]])
110
+ >>> G2_masked = np.ma.masked_invalid(G2_data)
111
+ >>> from scipy.sparse.csgraph import csgraph_from_dense
112
+ >>> # G2_sparse = csr_array(G2_data) would give the wrong result
113
+ >>> G2_sparse = csgraph_from_dense(G2_data, null_value=np.inf)
114
+ >>> G2_sparse.data
115
+ array([ 2., 0., 2., 0.])
116
+
117
+ Here we have used a utility routine from the csgraph submodule in order to
118
+ convert the dense representation to a sparse representation which can be
119
+ understood by the algorithms in submodule. By viewing the data array, we
120
+ can see that the zero values are explicitly encoded in the graph.
121
+
122
+ Directed vs. undirected
123
+ ^^^^^^^^^^^^^^^^^^^^^^^
124
+ Matrices may represent either directed or undirected graphs. This is
125
+ specified throughout the csgraph module by a boolean keyword. Graphs are
126
+ assumed to be directed by default. In a directed graph, traversal from node
127
+ i to node j can be accomplished over the edge G[i, j], but not the edge
128
+ G[j, i]. Consider the following dense graph::
129
+
130
+ >>> import numpy as np
131
+ >>> G_dense = np.array([[0, 1, 0],
132
+ ... [2, 0, 3],
133
+ ... [0, 4, 0]])
134
+
135
+ When ``directed=True`` we get the graph::
136
+
137
+ ---1--> ---3-->
138
+ (0) (1) (2)
139
+ <--2--- <--4---
140
+
141
+ In a non-directed graph, traversal from node i to node j can be
142
+ accomplished over either G[i, j] or G[j, i]. If both edges are not null,
143
+ and the two have unequal weights, then the smaller of the two is used.
144
+
145
+ So for the same graph, when ``directed=False`` we get the graph::
146
+
147
+ (0)--1--(1)--3--(2)
148
+
149
+ Note that a symmetric matrix will represent an undirected graph, regardless
150
+ of whether the 'directed' keyword is set to True or False. In this case,
151
+ using ``directed=True`` generally leads to more efficient computation.
152
+
153
+ The routines in this module accept as input either scipy.sparse representations
154
+ (csr, csc, or lil format), masked representations, or dense representations
155
+ with non-edges indicated by zeros, infinities, and NaN entries.
156
+ """ # noqa: E501
157
+
158
+ __docformat__ = "restructuredtext en"
159
+
160
+ __all__ = ['connected_components',
161
+ 'laplacian',
162
+ 'shortest_path',
163
+ 'floyd_warshall',
164
+ 'dijkstra',
165
+ 'bellman_ford',
166
+ 'johnson',
167
+ 'yen',
168
+ 'breadth_first_order',
169
+ 'depth_first_order',
170
+ 'breadth_first_tree',
171
+ 'depth_first_tree',
172
+ 'minimum_spanning_tree',
173
+ 'reverse_cuthill_mckee',
174
+ 'maximum_flow',
175
+ 'maximum_bipartite_matching',
176
+ 'min_weight_full_bipartite_matching',
177
+ 'structural_rank',
178
+ 'construct_dist_matrix',
179
+ 'reconstruct_path',
180
+ 'csgraph_masked_from_dense',
181
+ 'csgraph_from_dense',
182
+ 'csgraph_from_masked',
183
+ 'csgraph_to_dense',
184
+ 'csgraph_to_masked',
185
+ 'NegativeCycleError']
186
+
187
+ from ._laplacian import laplacian
188
+ from ._shortest_path import (
189
+ shortest_path, floyd_warshall, dijkstra, bellman_ford, johnson, yen,
190
+ NegativeCycleError
191
+ )
192
+ from ._traversal import (
193
+ breadth_first_order, depth_first_order, breadth_first_tree,
194
+ depth_first_tree, connected_components
195
+ )
196
+ from ._min_spanning_tree import minimum_spanning_tree
197
+ from ._flow import maximum_flow
198
+ from ._matching import (
199
+ maximum_bipartite_matching, min_weight_full_bipartite_matching
200
+ )
201
+ from ._reordering import reverse_cuthill_mckee, structural_rank
202
+ from ._tools import (
203
+ construct_dist_matrix, reconstruct_path, csgraph_from_dense,
204
+ csgraph_to_dense, csgraph_masked_from_dense, csgraph_from_masked,
205
+ csgraph_to_masked
206
+ )
207
+
208
+ from scipy._lib._testutils import PytestTester
209
+ test = PytestTester(__name__)
210
+ del PytestTester
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/__pycache__/_laplacian.cpython-310.pyc ADDED
Binary file (16.7 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/__pycache__/_validation.cpython-310.pyc ADDED
Binary file (1.65 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/_laplacian.py ADDED
@@ -0,0 +1,563 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Laplacian of a compressed-sparse graph
3
+ """
4
+
5
+ import numpy as np
6
+ from scipy.sparse import issparse
7
+ from scipy.sparse.linalg import LinearOperator
8
+ from scipy.sparse._sputils import convert_pydata_sparse_to_scipy, is_pydata_spmatrix
9
+
10
+
11
+ ###############################################################################
12
+ # Graph laplacian
13
+ def laplacian(
14
+ csgraph,
15
+ normed=False,
16
+ return_diag=False,
17
+ use_out_degree=False,
18
+ *,
19
+ copy=True,
20
+ form="array",
21
+ dtype=None,
22
+ symmetrized=False,
23
+ ):
24
+ """
25
+ Return the Laplacian of a directed graph.
26
+
27
+ Parameters
28
+ ----------
29
+ csgraph : array_like or sparse array or matrix, 2 dimensions
30
+ compressed-sparse graph, with shape (N, N).
31
+ normed : bool, optional
32
+ If True, then compute symmetrically normalized Laplacian.
33
+ Default: False.
34
+ return_diag : bool, optional
35
+ If True, then also return an array related to vertex degrees.
36
+ Default: False.
37
+ use_out_degree : bool, optional
38
+ If True, then use out-degree instead of in-degree.
39
+ This distinction matters only if the graph is asymmetric.
40
+ Default: False.
41
+ copy: bool, optional
42
+ If False, then change `csgraph` in place if possible,
43
+ avoiding doubling the memory use.
44
+ Default: True, for backward compatibility.
45
+ form: 'array', or 'function', or 'lo'
46
+ Determines the format of the output Laplacian:
47
+
48
+ * 'array' is a numpy array;
49
+ * 'function' is a pointer to evaluating the Laplacian-vector
50
+ or Laplacian-matrix product;
51
+ * 'lo' results in the format of the `LinearOperator`.
52
+
53
+ Choosing 'function' or 'lo' always avoids doubling
54
+ the memory use, ignoring `copy` value.
55
+ Default: 'array', for backward compatibility.
56
+ dtype: None or one of numeric numpy dtypes, optional
57
+ The dtype of the output. If ``dtype=None``, the dtype of the
58
+ output matches the dtype of the input csgraph, except for
59
+ the case ``normed=True`` and integer-like csgraph, where
60
+ the output dtype is 'float' allowing accurate normalization,
61
+ but dramatically increasing the memory use.
62
+ Default: None, for backward compatibility.
63
+ symmetrized: bool, optional
64
+ If True, then the output Laplacian is symmetric/Hermitian.
65
+ The symmetrization is done by ``csgraph + csgraph.T.conj``
66
+ without dividing by 2 to preserve integer dtypes if possible
67
+ prior to the construction of the Laplacian.
68
+ The symmetrization will increase the memory footprint of
69
+ sparse matrices unless the sparsity pattern is symmetric or
70
+ `form` is 'function' or 'lo'.
71
+ Default: False, for backward compatibility.
72
+
73
+ Returns
74
+ -------
75
+ lap : ndarray, or sparse array or matrix, or `LinearOperator`
76
+ The N x N Laplacian of csgraph. It will be a NumPy array (dense)
77
+ if the input was dense, or a sparse array otherwise, or
78
+ the format of a function or `LinearOperator` if
79
+ `form` equals 'function' or 'lo', respectively.
80
+ diag : ndarray, optional
81
+ The length-N main diagonal of the Laplacian matrix.
82
+ For the normalized Laplacian, this is the array of square roots
83
+ of vertex degrees or 1 if the degree is zero.
84
+
85
+ Notes
86
+ -----
87
+ The Laplacian matrix of a graph is sometimes referred to as the
88
+ "Kirchhoff matrix" or just the "Laplacian", and is useful in many
89
+ parts of spectral graph theory.
90
+ In particular, the eigen-decomposition of the Laplacian can give
91
+ insight into many properties of the graph, e.g.,
92
+ is commonly used for spectral data embedding and clustering.
93
+
94
+ The constructed Laplacian doubles the memory use if ``copy=True`` and
95
+ ``form="array"`` which is the default.
96
+ Choosing ``copy=False`` has no effect unless ``form="array"``
97
+ or the matrix is sparse in the ``coo`` format, or dense array, except
98
+ for the integer input with ``normed=True`` that forces the float output.
99
+
100
+ Sparse input is reformatted into ``coo`` if ``form="array"``,
101
+ which is the default.
102
+
103
+ If the input adjacency matrix is not symmetric, the Laplacian is
104
+ also non-symmetric unless ``symmetrized=True`` is used.
105
+
106
+ Diagonal entries of the input adjacency matrix are ignored and
107
+ replaced with zeros for the purpose of normalization where ``normed=True``.
108
+ The normalization uses the inverse square roots of row-sums of the input
109
+ adjacency matrix, and thus may fail if the row-sums contain
110
+ negative or complex with a non-zero imaginary part values.
111
+
112
+ The normalization is symmetric, making the normalized Laplacian also
113
+ symmetric if the input csgraph was symmetric.
114
+
115
+ References
116
+ ----------
117
+ .. [1] Laplacian matrix. https://en.wikipedia.org/wiki/Laplacian_matrix
118
+
119
+ Examples
120
+ --------
121
+ >>> import numpy as np
122
+ >>> from scipy.sparse import csgraph
123
+
124
+ Our first illustration is the symmetric graph
125
+
126
+ >>> G = np.arange(4) * np.arange(4)[:, np.newaxis]
127
+ >>> G
128
+ array([[0, 0, 0, 0],
129
+ [0, 1, 2, 3],
130
+ [0, 2, 4, 6],
131
+ [0, 3, 6, 9]])
132
+
133
+ and its symmetric Laplacian matrix
134
+
135
+ >>> csgraph.laplacian(G)
136
+ array([[ 0, 0, 0, 0],
137
+ [ 0, 5, -2, -3],
138
+ [ 0, -2, 8, -6],
139
+ [ 0, -3, -6, 9]])
140
+
141
+ The non-symmetric graph
142
+
143
+ >>> G = np.arange(9).reshape(3, 3)
144
+ >>> G
145
+ array([[0, 1, 2],
146
+ [3, 4, 5],
147
+ [6, 7, 8]])
148
+
149
+ has different row- and column sums, resulting in two varieties
150
+ of the Laplacian matrix, using an in-degree, which is the default
151
+
152
+ >>> L_in_degree = csgraph.laplacian(G)
153
+ >>> L_in_degree
154
+ array([[ 9, -1, -2],
155
+ [-3, 8, -5],
156
+ [-6, -7, 7]])
157
+
158
+ or alternatively an out-degree
159
+
160
+ >>> L_out_degree = csgraph.laplacian(G, use_out_degree=True)
161
+ >>> L_out_degree
162
+ array([[ 3, -1, -2],
163
+ [-3, 8, -5],
164
+ [-6, -7, 13]])
165
+
166
+ Constructing a symmetric Laplacian matrix, one can add the two as
167
+
168
+ >>> L_in_degree + L_out_degree.T
169
+ array([[ 12, -4, -8],
170
+ [ -4, 16, -12],
171
+ [ -8, -12, 20]])
172
+
173
+ or use the ``symmetrized=True`` option
174
+
175
+ >>> csgraph.laplacian(G, symmetrized=True)
176
+ array([[ 12, -4, -8],
177
+ [ -4, 16, -12],
178
+ [ -8, -12, 20]])
179
+
180
+ that is equivalent to symmetrizing the original graph
181
+
182
+ >>> csgraph.laplacian(G + G.T)
183
+ array([[ 12, -4, -8],
184
+ [ -4, 16, -12],
185
+ [ -8, -12, 20]])
186
+
187
+ The goal of normalization is to make the non-zero diagonal entries
188
+ of the Laplacian matrix to be all unit, also scaling off-diagonal
189
+ entries correspondingly. The normalization can be done manually, e.g.,
190
+
191
+ >>> G = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]])
192
+ >>> L, d = csgraph.laplacian(G, return_diag=True)
193
+ >>> L
194
+ array([[ 2, -1, -1],
195
+ [-1, 2, -1],
196
+ [-1, -1, 2]])
197
+ >>> d
198
+ array([2, 2, 2])
199
+ >>> scaling = np.sqrt(d)
200
+ >>> scaling
201
+ array([1.41421356, 1.41421356, 1.41421356])
202
+ >>> (1/scaling)*L*(1/scaling)
203
+ array([[ 1. , -0.5, -0.5],
204
+ [-0.5, 1. , -0.5],
205
+ [-0.5, -0.5, 1. ]])
206
+
207
+ Or using ``normed=True`` option
208
+
209
+ >>> L, d = csgraph.laplacian(G, return_diag=True, normed=True)
210
+ >>> L
211
+ array([[ 1. , -0.5, -0.5],
212
+ [-0.5, 1. , -0.5],
213
+ [-0.5, -0.5, 1. ]])
214
+
215
+ which now instead of the diagonal returns the scaling coefficients
216
+
217
+ >>> d
218
+ array([1.41421356, 1.41421356, 1.41421356])
219
+
220
+ Zero scaling coefficients are substituted with 1s, where scaling
221
+ has thus no effect, e.g.,
222
+
223
+ >>> G = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0]])
224
+ >>> G
225
+ array([[0, 0, 0],
226
+ [0, 0, 1],
227
+ [0, 1, 0]])
228
+ >>> L, d = csgraph.laplacian(G, return_diag=True, normed=True)
229
+ >>> L
230
+ array([[ 0., -0., -0.],
231
+ [-0., 1., -1.],
232
+ [-0., -1., 1.]])
233
+ >>> d
234
+ array([1., 1., 1.])
235
+
236
+ Only the symmetric normalization is implemented, resulting
237
+ in a symmetric Laplacian matrix if and only if its graph is symmetric
238
+ and has all non-negative degrees, like in the examples above.
239
+
240
+ The output Laplacian matrix is by default a dense array or a sparse
241
+ array or matrix inferring its class, shape, format, and dtype from
242
+ the input graph matrix:
243
+
244
+ >>> G = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]).astype(np.float32)
245
+ >>> G
246
+ array([[0., 1., 1.],
247
+ [1., 0., 1.],
248
+ [1., 1., 0.]], dtype=float32)
249
+ >>> csgraph.laplacian(G)
250
+ array([[ 2., -1., -1.],
251
+ [-1., 2., -1.],
252
+ [-1., -1., 2.]], dtype=float32)
253
+
254
+ but can alternatively be generated matrix-free as a LinearOperator:
255
+
256
+ >>> L = csgraph.laplacian(G, form="lo")
257
+ >>> L
258
+ <3x3 _CustomLinearOperator with dtype=float32>
259
+ >>> L(np.eye(3))
260
+ array([[ 2., -1., -1.],
261
+ [-1., 2., -1.],
262
+ [-1., -1., 2.]])
263
+
264
+ or as a lambda-function:
265
+
266
+ >>> L = csgraph.laplacian(G, form="function")
267
+ >>> L
268
+ <function _laplace.<locals>.<lambda> at 0x0000012AE6F5A598>
269
+ >>> L(np.eye(3))
270
+ array([[ 2., -1., -1.],
271
+ [-1., 2., -1.],
272
+ [-1., -1., 2.]])
273
+
274
+ The Laplacian matrix is used for
275
+ spectral data clustering and embedding
276
+ as well as for spectral graph partitioning.
277
+ Our final example illustrates the latter
278
+ for a noisy directed linear graph.
279
+
280
+ >>> from scipy.sparse import diags_array, random_array
281
+ >>> from scipy.sparse.linalg import lobpcg
282
+
283
+ Create a directed linear graph with ``N=35`` vertices
284
+ using a sparse adjacency matrix ``G``:
285
+
286
+ >>> N = 35
287
+ >>> G = diags_array(np.ones(N - 1), offsets=1, format="csr")
288
+
289
+ Fix a random seed ``rng`` and add a random sparse noise to the graph ``G``:
290
+
291
+ >>> rng = np.random.default_rng()
292
+ >>> G += 1e-2 * random_array((N, N), density=0.1, rng=rng)
293
+
294
+ Set initial approximations for eigenvectors:
295
+
296
+ >>> X = rng.random((N, 2))
297
+
298
+ The constant vector of ones is always a trivial eigenvector
299
+ of the non-normalized Laplacian to be filtered out:
300
+
301
+ >>> Y = np.ones((N, 1))
302
+
303
+ Alternating (1) the sign of the graph weights allows determining
304
+ labels for spectral max- and min- cuts in a single loop.
305
+ Since the graph is undirected, the option ``symmetrized=True``
306
+ must be used in the construction of the Laplacian.
307
+ The option ``normed=True`` cannot be used in (2) for the negative weights
308
+ here as the symmetric normalization evaluates square roots.
309
+ The option ``form="lo"`` in (2) is matrix-free, i.e., guarantees
310
+ a fixed memory footprint and read-only access to the graph.
311
+ Calling the eigenvalue solver ``lobpcg`` (3) computes the Fiedler vector
312
+ that determines the labels as the signs of its components in (5).
313
+ Since the sign in an eigenvector is not deterministic and can flip,
314
+ we fix the sign of the first component to be always +1 in (4).
315
+
316
+ >>> for cut in ["max", "min"]:
317
+ ... G = -G # 1.
318
+ ... L = csgraph.laplacian(G, symmetrized=True, form="lo") # 2.
319
+ ... _, eves = lobpcg(L, X, Y=Y, largest=False, tol=1e-2) # 3.
320
+ ... eves *= np.sign(eves[0, 0]) # 4.
321
+ ... print(cut + "-cut labels:\\n", 1 * (eves[:, 0]>0)) # 5.
322
+ max-cut labels:
323
+ [1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1]
324
+ min-cut labels:
325
+ [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
326
+
327
+ As anticipated for a (slightly noisy) linear graph,
328
+ the max-cut strips all the edges of the graph coloring all
329
+ odd vertices into one color and all even vertices into another one,
330
+ while the balanced min-cut partitions the graph
331
+ in the middle by deleting a single edge.
332
+ Both determined partitions are optimal.
333
+ """
334
+ is_pydata_sparse = is_pydata_spmatrix(csgraph)
335
+ if is_pydata_sparse:
336
+ pydata_sparse_cls = csgraph.__class__
337
+ csgraph = convert_pydata_sparse_to_scipy(csgraph)
338
+ if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
339
+ raise ValueError('csgraph must be a square matrix or array')
340
+
341
+ if normed and (
342
+ np.issubdtype(csgraph.dtype, np.signedinteger)
343
+ or np.issubdtype(csgraph.dtype, np.uint)
344
+ ):
345
+ csgraph = csgraph.astype(np.float64)
346
+
347
+ if form == "array":
348
+ create_lap = (
349
+ _laplacian_sparse if issparse(csgraph) else _laplacian_dense
350
+ )
351
+ else:
352
+ create_lap = (
353
+ _laplacian_sparse_flo
354
+ if issparse(csgraph)
355
+ else _laplacian_dense_flo
356
+ )
357
+
358
+ degree_axis = 1 if use_out_degree else 0
359
+
360
+ lap, d = create_lap(
361
+ csgraph,
362
+ normed=normed,
363
+ axis=degree_axis,
364
+ copy=copy,
365
+ form=form,
366
+ dtype=dtype,
367
+ symmetrized=symmetrized,
368
+ )
369
+ if is_pydata_sparse:
370
+ lap = pydata_sparse_cls.from_scipy_sparse(lap)
371
+ if return_diag:
372
+ return lap, d
373
+ return lap
374
+
375
+
376
+ def _setdiag_dense(m, d):
377
+ step = len(d) + 1
378
+ m.flat[::step] = d
379
+
380
+
381
+ def _laplace(m, d):
382
+ return lambda v: v * d[:, np.newaxis] - m @ v
383
+
384
+
385
+ def _laplace_normed(m, d, nd):
386
+ laplace = _laplace(m, d)
387
+ return lambda v: nd[:, np.newaxis] * laplace(v * nd[:, np.newaxis])
388
+
389
+
390
+ def _laplace_sym(m, d):
391
+ return (
392
+ lambda v: v * d[:, np.newaxis]
393
+ - m @ v
394
+ - np.transpose(np.conjugate(np.transpose(np.conjugate(v)) @ m))
395
+ )
396
+
397
+
398
+ def _laplace_normed_sym(m, d, nd):
399
+ laplace_sym = _laplace_sym(m, d)
400
+ return lambda v: nd[:, np.newaxis] * laplace_sym(v * nd[:, np.newaxis])
401
+
402
+
403
+ def _linearoperator(mv, shape, dtype):
404
+ return LinearOperator(matvec=mv, matmat=mv, shape=shape, dtype=dtype)
405
+
406
+
407
+ def _laplacian_sparse_flo(graph, normed, axis, copy, form, dtype, symmetrized):
408
+ # The keyword argument `copy` is unused and has no effect here.
409
+ del copy
410
+
411
+ if dtype is None:
412
+ dtype = graph.dtype
413
+
414
+ graph_sum = np.asarray(graph.sum(axis=axis)).ravel()
415
+ graph_diagonal = graph.diagonal()
416
+ diag = graph_sum - graph_diagonal
417
+ if symmetrized:
418
+ graph_sum += np.asarray(graph.sum(axis=1 - axis)).ravel()
419
+ diag = graph_sum - graph_diagonal - graph_diagonal
420
+
421
+ if normed:
422
+ isolated_node_mask = diag == 0
423
+ w = np.where(isolated_node_mask, 1, np.sqrt(diag))
424
+ if symmetrized:
425
+ md = _laplace_normed_sym(graph, graph_sum, 1.0 / w)
426
+ else:
427
+ md = _laplace_normed(graph, graph_sum, 1.0 / w)
428
+ if form == "function":
429
+ return md, w.astype(dtype, copy=False)
430
+ elif form == "lo":
431
+ m = _linearoperator(md, shape=graph.shape, dtype=dtype)
432
+ return m, w.astype(dtype, copy=False)
433
+ else:
434
+ raise ValueError(f"Invalid form: {form!r}")
435
+ else:
436
+ if symmetrized:
437
+ md = _laplace_sym(graph, graph_sum)
438
+ else:
439
+ md = _laplace(graph, graph_sum)
440
+ if form == "function":
441
+ return md, diag.astype(dtype, copy=False)
442
+ elif form == "lo":
443
+ m = _linearoperator(md, shape=graph.shape, dtype=dtype)
444
+ return m, diag.astype(dtype, copy=False)
445
+ else:
446
+ raise ValueError(f"Invalid form: {form!r}")
447
+
448
+
449
+ def _laplacian_sparse(graph, normed, axis, copy, form, dtype, symmetrized):
450
+ # The keyword argument `form` is unused and has no effect here.
451
+ del form
452
+
453
+ if dtype is None:
454
+ dtype = graph.dtype
455
+
456
+ needs_copy = False
457
+ if graph.format in ('lil', 'dok'):
458
+ m = graph.tocoo()
459
+ else:
460
+ m = graph
461
+ if copy:
462
+ needs_copy = True
463
+
464
+ if symmetrized:
465
+ m += m.T.conj()
466
+
467
+ w = np.asarray(m.sum(axis=axis)).ravel() - m.diagonal()
468
+ if normed:
469
+ m = m.tocoo(copy=needs_copy)
470
+ isolated_node_mask = (w == 0)
471
+ w = np.where(isolated_node_mask, 1, np.sqrt(w))
472
+ m.data /= w[m.row]
473
+ m.data /= w[m.col]
474
+ m.data *= -1
475
+ m.setdiag(1 - isolated_node_mask)
476
+ else:
477
+ if m.format == 'dia':
478
+ m = m.copy()
479
+ else:
480
+ m = m.tocoo(copy=needs_copy)
481
+ m.data *= -1
482
+ m.setdiag(w)
483
+
484
+ return m.astype(dtype, copy=False), w.astype(dtype)
485
+
486
+
487
+ def _laplacian_dense_flo(graph, normed, axis, copy, form, dtype, symmetrized):
488
+
489
+ if copy:
490
+ m = np.array(graph)
491
+ else:
492
+ m = np.asarray(graph)
493
+
494
+ if dtype is None:
495
+ dtype = m.dtype
496
+
497
+ graph_sum = m.sum(axis=axis)
498
+ graph_diagonal = m.diagonal()
499
+ diag = graph_sum - graph_diagonal
500
+ if symmetrized:
501
+ graph_sum += m.sum(axis=1 - axis)
502
+ diag = graph_sum - graph_diagonal - graph_diagonal
503
+
504
+ if normed:
505
+ isolated_node_mask = diag == 0
506
+ w = np.where(isolated_node_mask, 1, np.sqrt(diag))
507
+ if symmetrized:
508
+ md = _laplace_normed_sym(m, graph_sum, 1.0 / w)
509
+ else:
510
+ md = _laplace_normed(m, graph_sum, 1.0 / w)
511
+ if form == "function":
512
+ return md, w.astype(dtype, copy=False)
513
+ elif form == "lo":
514
+ m = _linearoperator(md, shape=graph.shape, dtype=dtype)
515
+ return m, w.astype(dtype, copy=False)
516
+ else:
517
+ raise ValueError(f"Invalid form: {form!r}")
518
+ else:
519
+ if symmetrized:
520
+ md = _laplace_sym(m, graph_sum)
521
+ else:
522
+ md = _laplace(m, graph_sum)
523
+ if form == "function":
524
+ return md, diag.astype(dtype, copy=False)
525
+ elif form == "lo":
526
+ m = _linearoperator(md, shape=graph.shape, dtype=dtype)
527
+ return m, diag.astype(dtype, copy=False)
528
+ else:
529
+ raise ValueError(f"Invalid form: {form!r}")
530
+
531
+
532
+ def _laplacian_dense(graph, normed, axis, copy, form, dtype, symmetrized):
533
+
534
+ if form != "array":
535
+ raise ValueError(f'{form!r} must be "array"')
536
+
537
+ if dtype is None:
538
+ dtype = graph.dtype
539
+
540
+ if copy:
541
+ m = np.array(graph)
542
+ else:
543
+ m = np.asarray(graph)
544
+
545
+ if dtype is None:
546
+ dtype = m.dtype
547
+
548
+ if symmetrized:
549
+ m += m.T.conj()
550
+ np.fill_diagonal(m, 0)
551
+ w = m.sum(axis=axis)
552
+ if normed:
553
+ isolated_node_mask = (w == 0)
554
+ w = np.where(isolated_node_mask, 1, np.sqrt(w))
555
+ m /= w
556
+ m /= w[:, np.newaxis]
557
+ m *= -1
558
+ _setdiag_dense(m, 1 - isolated_node_mask)
559
+ else:
560
+ m *= -1
561
+ _setdiag_dense(m, w)
562
+
563
+ return m.astype(dtype, copy=False), w.astype(dtype, copy=False)
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/_validation.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy.sparse import issparse
3
+ from scipy.sparse._sputils import convert_pydata_sparse_to_scipy
4
+ from scipy.sparse.csgraph._tools import (
5
+ csgraph_to_dense, csgraph_from_dense,
6
+ csgraph_masked_from_dense, csgraph_from_masked
7
+ )
8
+
9
+ DTYPE = np.float64
10
+
11
+
12
+ def validate_graph(csgraph, directed, dtype=DTYPE,
13
+ csr_output=True, dense_output=True,
14
+ copy_if_dense=False, copy_if_sparse=False,
15
+ null_value_in=0, null_value_out=np.inf,
16
+ infinity_null=True, nan_null=True):
17
+ """Routine for validation and conversion of csgraph inputs"""
18
+ if not (csr_output or dense_output):
19
+ raise ValueError("Internal: dense or csr output must be true")
20
+
21
+ accept_fv = [null_value_in]
22
+ if infinity_null:
23
+ accept_fv.append(np.inf)
24
+ if nan_null:
25
+ accept_fv.append(np.nan)
26
+ csgraph = convert_pydata_sparse_to_scipy(csgraph, accept_fv=accept_fv)
27
+
28
+ # if undirected and csc storage, then transposing in-place
29
+ # is quicker than later converting to csr.
30
+ if (not directed) and issparse(csgraph) and csgraph.format == "csc":
31
+ csgraph = csgraph.T
32
+
33
+ if issparse(csgraph):
34
+ if csr_output:
35
+ csgraph = csgraph.tocsr(copy=copy_if_sparse).astype(DTYPE, copy=False)
36
+ else:
37
+ csgraph = csgraph_to_dense(csgraph, null_value=null_value_out)
38
+ elif np.ma.isMaskedArray(csgraph):
39
+ if dense_output:
40
+ mask = csgraph.mask
41
+ csgraph = np.array(csgraph.data, dtype=DTYPE, copy=copy_if_dense)
42
+ csgraph[mask] = null_value_out
43
+ else:
44
+ csgraph = csgraph_from_masked(csgraph)
45
+ else:
46
+ if dense_output:
47
+ csgraph = csgraph_masked_from_dense(csgraph,
48
+ copy=copy_if_dense,
49
+ null_value=null_value_in,
50
+ nan_null=nan_null,
51
+ infinity_null=infinity_null)
52
+ mask = csgraph.mask
53
+ csgraph = np.asarray(csgraph.data, dtype=DTYPE)
54
+ csgraph[mask] = null_value_out
55
+ else:
56
+ csgraph = csgraph_from_dense(csgraph, null_value=null_value_in,
57
+ infinity_null=infinity_null,
58
+ nan_null=nan_null)
59
+
60
+ if csgraph.ndim != 2:
61
+ raise ValueError("compressed-sparse graph must be 2-D")
62
+
63
+ if csgraph.shape[0] != csgraph.shape[1]:
64
+ raise ValueError("compressed-sparse graph must be shape (N, N)")
65
+
66
+ return csgraph
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__init__.py ADDED
File without changes
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (186 Bytes). View file
 
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_connected_components.cpython-310.pyc ADDED
Binary file (3.22 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_conversions.cpython-310.pyc ADDED
Binary file (1.73 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_flow.cpython-310.pyc ADDED
Binary file (7.29 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_graph_laplacian.cpython-310.pyc ADDED
Binary file (7.7 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_matching.cpython-310.pyc ADDED
Binary file (10.4 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_pydata_sparse.cpython-310.pyc ADDED
Binary file (4.54 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_reordering.cpython-310.pyc ADDED
Binary file (2.91 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_shortest_path.cpython-310.pyc ADDED
Binary file (13.8 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_spanning_tree.cpython-310.pyc ADDED
Binary file (1.53 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_traversal.cpython-310.pyc ADDED
Binary file (3.88 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_connected_components.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from numpy.testing import assert_equal, assert_array_almost_equal
3
+ from scipy.sparse import csgraph, csr_array
4
+
5
+
6
+ def test_weak_connections():
7
+ Xde = np.array([[0, 1, 0],
8
+ [0, 0, 0],
9
+ [0, 0, 0]])
10
+
11
+ Xsp = csgraph.csgraph_from_dense(Xde, null_value=0)
12
+
13
+ for X in Xsp, Xde:
14
+ n_components, labels =\
15
+ csgraph.connected_components(X, directed=True,
16
+ connection='weak')
17
+
18
+ assert_equal(n_components, 2)
19
+ assert_array_almost_equal(labels, [0, 0, 1])
20
+
21
+
22
+ def test_strong_connections():
23
+ X1de = np.array([[0, 1, 0],
24
+ [0, 0, 0],
25
+ [0, 0, 0]])
26
+ X2de = X1de + X1de.T
27
+
28
+ X1sp = csgraph.csgraph_from_dense(X1de, null_value=0)
29
+ X2sp = csgraph.csgraph_from_dense(X2de, null_value=0)
30
+
31
+ for X in X1sp, X1de:
32
+ n_components, labels =\
33
+ csgraph.connected_components(X, directed=True,
34
+ connection='strong')
35
+
36
+ assert_equal(n_components, 3)
37
+ labels.sort()
38
+ assert_array_almost_equal(labels, [0, 1, 2])
39
+
40
+ for X in X2sp, X2de:
41
+ n_components, labels =\
42
+ csgraph.connected_components(X, directed=True,
43
+ connection='strong')
44
+
45
+ assert_equal(n_components, 2)
46
+ labels.sort()
47
+ assert_array_almost_equal(labels, [0, 0, 1])
48
+
49
+
50
+ def test_strong_connections2():
51
+ X = np.array([[0, 0, 0, 0, 0, 0],
52
+ [1, 0, 1, 0, 0, 0],
53
+ [0, 0, 0, 1, 0, 0],
54
+ [0, 0, 1, 0, 1, 0],
55
+ [0, 0, 0, 0, 0, 0],
56
+ [0, 0, 0, 0, 1, 0]])
57
+ n_components, labels =\
58
+ csgraph.connected_components(X, directed=True,
59
+ connection='strong')
60
+ assert_equal(n_components, 5)
61
+ labels.sort()
62
+ assert_array_almost_equal(labels, [0, 1, 2, 2, 3, 4])
63
+
64
+
65
+ def test_weak_connections2():
66
+ X = np.array([[0, 0, 0, 0, 0, 0],
67
+ [1, 0, 0, 0, 0, 0],
68
+ [0, 0, 0, 1, 0, 0],
69
+ [0, 0, 1, 0, 1, 0],
70
+ [0, 0, 0, 0, 0, 0],
71
+ [0, 0, 0, 0, 1, 0]])
72
+ n_components, labels =\
73
+ csgraph.connected_components(X, directed=True,
74
+ connection='weak')
75
+ assert_equal(n_components, 2)
76
+ labels.sort()
77
+ assert_array_almost_equal(labels, [0, 0, 1, 1, 1, 1])
78
+
79
+
80
+ def test_ticket1876():
81
+ # Regression test: this failed in the original implementation
82
+ # There should be two strongly-connected components; previously gave one
83
+ g = np.array([[0, 1, 1, 0],
84
+ [1, 0, 0, 1],
85
+ [0, 0, 0, 1],
86
+ [0, 0, 1, 0]])
87
+ n_components, labels = csgraph.connected_components(g, connection='strong')
88
+
89
+ assert_equal(n_components, 2)
90
+ assert_equal(labels[0], labels[1])
91
+ assert_equal(labels[2], labels[3])
92
+
93
+
94
+ def test_fully_connected_graph():
95
+ # Fully connected dense matrices raised an exception.
96
+ # https://github.com/scipy/scipy/issues/3818
97
+ g = np.ones((4, 4))
98
+ n_components, labels = csgraph.connected_components(g)
99
+ assert_equal(n_components, 1)
100
+
101
+
102
+ def test_int64_indices_undirected():
103
+ # See https://github.com/scipy/scipy/issues/18716
104
+ g = csr_array(([1], np.array([[0], [1]], dtype=np.int64)), shape=(2, 2))
105
+ assert g.indices.dtype == np.int64
106
+ n, labels = csgraph.connected_components(g, directed=False)
107
+ assert n == 1
108
+ assert_array_almost_equal(labels, [0, 0])
109
+
110
+
111
+ def test_int64_indices_directed():
112
+ # See https://github.com/scipy/scipy/issues/18716
113
+ g = csr_array(([1], np.array([[0], [1]], dtype=np.int64)), shape=(2, 2))
114
+ assert g.indices.dtype == np.int64
115
+ n, labels = csgraph.connected_components(g, directed=True,
116
+ connection='strong')
117
+ assert n == 2
118
+ assert_array_almost_equal(labels, [1, 0])
119
+
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_conversions.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from numpy.testing import assert_array_almost_equal
3
+ from scipy.sparse import csr_array
4
+ from scipy.sparse.csgraph import csgraph_from_dense, csgraph_to_dense
5
+
6
+
7
+ def test_csgraph_from_dense():
8
+ np.random.seed(1234)
9
+ G = np.random.random((10, 10))
10
+ some_nulls = (G < 0.4)
11
+ all_nulls = (G < 0.8)
12
+
13
+ for null_value in [0, np.nan, np.inf]:
14
+ G[all_nulls] = null_value
15
+ with np.errstate(invalid="ignore"):
16
+ G_csr = csgraph_from_dense(G, null_value=0)
17
+
18
+ G[all_nulls] = 0
19
+ assert_array_almost_equal(G, G_csr.toarray())
20
+
21
+ for null_value in [np.nan, np.inf]:
22
+ G[all_nulls] = 0
23
+ G[some_nulls] = null_value
24
+ with np.errstate(invalid="ignore"):
25
+ G_csr = csgraph_from_dense(G, null_value=0)
26
+
27
+ G[all_nulls] = 0
28
+ assert_array_almost_equal(G, G_csr.toarray())
29
+
30
+
31
+ def test_csgraph_to_dense():
32
+ np.random.seed(1234)
33
+ G = np.random.random((10, 10))
34
+ nulls = (G < 0.8)
35
+ G[nulls] = np.inf
36
+
37
+ G_csr = csgraph_from_dense(G)
38
+
39
+ for null_value in [0, 10, -np.inf, np.inf]:
40
+ G[nulls] = null_value
41
+ assert_array_almost_equal(G, csgraph_to_dense(G_csr, null_value))
42
+
43
+
44
+ def test_multiple_edges():
45
+ # create a random square matrix with an even number of elements
46
+ np.random.seed(1234)
47
+ X = np.random.random((10, 10))
48
+ Xcsr = csr_array(X)
49
+
50
+ # now double-up every other column
51
+ Xcsr.indices[::2] = Xcsr.indices[1::2]
52
+
53
+ # normal sparse toarray() will sum the duplicated edges
54
+ Xdense = Xcsr.toarray()
55
+ assert_array_almost_equal(Xdense[:, 1::2],
56
+ X[:, ::2] + X[:, 1::2])
57
+
58
+ # csgraph_to_dense chooses the minimum of each duplicated edge
59
+ Xdense = csgraph_to_dense(Xcsr)
60
+ assert_array_almost_equal(Xdense[:, 1::2],
61
+ np.minimum(X[:, ::2], X[:, 1::2]))
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_flow.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from numpy.testing import assert_array_equal
3
+ import pytest
4
+
5
+ from scipy.sparse import csr_array, csc_array, csr_matrix
6
+ from scipy.sparse.csgraph import maximum_flow
7
+ from scipy.sparse.csgraph._flow import (
8
+ _add_reverse_edges, _make_edge_pointers, _make_tails
9
+ )
10
+
11
+ methods = ['edmonds_karp', 'dinic']
12
+
13
+ def test_raises_on_dense_input():
14
+ with pytest.raises(TypeError):
15
+ graph = np.array([[0, 1], [0, 0]])
16
+ maximum_flow(graph, 0, 1)
17
+ maximum_flow(graph, 0, 1, method='edmonds_karp')
18
+
19
+
20
+ def test_raises_on_csc_input():
21
+ with pytest.raises(TypeError):
22
+ graph = csc_array([[0, 1], [0, 0]])
23
+ maximum_flow(graph, 0, 1)
24
+ maximum_flow(graph, 0, 1, method='edmonds_karp')
25
+
26
+
27
+ def test_raises_on_floating_point_input():
28
+ with pytest.raises(ValueError):
29
+ graph = csr_array([[0, 1.5], [0, 0]], dtype=np.float64)
30
+ maximum_flow(graph, 0, 1)
31
+ maximum_flow(graph, 0, 1, method='edmonds_karp')
32
+
33
+
34
+ def test_raises_on_non_square_input():
35
+ with pytest.raises(ValueError):
36
+ graph = csr_array([[0, 1, 2], [2, 1, 0]])
37
+ maximum_flow(graph, 0, 1)
38
+
39
+
40
+ def test_raises_when_source_is_sink():
41
+ with pytest.raises(ValueError):
42
+ graph = csr_array([[0, 1], [0, 0]])
43
+ maximum_flow(graph, 0, 0)
44
+ maximum_flow(graph, 0, 0, method='edmonds_karp')
45
+
46
+
47
+ @pytest.mark.parametrize('method', methods)
48
+ @pytest.mark.parametrize('source', [-1, 2, 3])
49
+ def test_raises_when_source_is_out_of_bounds(source, method):
50
+ with pytest.raises(ValueError):
51
+ graph = csr_array([[0, 1], [0, 0]])
52
+ maximum_flow(graph, source, 1, method=method)
53
+
54
+
55
+ @pytest.mark.parametrize('method', methods)
56
+ @pytest.mark.parametrize('sink', [-1, 2, 3])
57
+ def test_raises_when_sink_is_out_of_bounds(sink, method):
58
+ with pytest.raises(ValueError):
59
+ graph = csr_array([[0, 1], [0, 0]])
60
+ maximum_flow(graph, 0, sink, method=method)
61
+
62
+
63
+ @pytest.mark.parametrize('method', methods)
64
+ def test_simple_graph(method):
65
+ # This graph looks as follows:
66
+ # (0) --5--> (1)
67
+ graph = csr_array([[0, 5], [0, 0]])
68
+ res = maximum_flow(graph, 0, 1, method=method)
69
+ assert res.flow_value == 5
70
+ expected_flow = np.array([[0, 5], [-5, 0]])
71
+ assert_array_equal(res.flow.toarray(), expected_flow)
72
+
73
+
74
+ @pytest.mark.parametrize('method', methods)
75
+ def test_return_type(method):
76
+ graph = csr_array([[0, 5], [0, 0]])
77
+ assert isinstance(maximum_flow(graph, 0, 1, method=method).flow, csr_array)
78
+ graph = csr_matrix([[0, 5], [0, 0]])
79
+ assert isinstance(maximum_flow(graph, 0, 1, method=method).flow, csr_matrix)
80
+
81
+
82
+ @pytest.mark.parametrize('method', methods)
83
+ def test_bottle_neck_graph(method):
84
+ # This graph cannot use the full capacity between 0 and 1:
85
+ # (0) --5--> (1) --3--> (2)
86
+ graph = csr_array([[0, 5, 0], [0, 0, 3], [0, 0, 0]])
87
+ res = maximum_flow(graph, 0, 2, method=method)
88
+ assert res.flow_value == 3
89
+ expected_flow = np.array([[0, 3, 0], [-3, 0, 3], [0, -3, 0]])
90
+ assert_array_equal(res.flow.toarray(), expected_flow)
91
+
92
+
93
+ @pytest.mark.parametrize('method', methods)
94
+ def test_backwards_flow(method):
95
+ # This example causes backwards flow between vertices 3 and 4,
96
+ # and so this test ensures that we handle that accordingly. See
97
+ # https://stackoverflow.com/q/38843963/5085211
98
+ # for more information.
99
+ graph = csr_array([[0, 10, 0, 0, 10, 0, 0, 0],
100
+ [0, 0, 10, 0, 0, 0, 0, 0],
101
+ [0, 0, 0, 10, 0, 0, 0, 0],
102
+ [0, 0, 0, 0, 0, 0, 0, 10],
103
+ [0, 0, 0, 10, 0, 10, 0, 0],
104
+ [0, 0, 0, 0, 0, 0, 10, 0],
105
+ [0, 0, 0, 0, 0, 0, 0, 10],
106
+ [0, 0, 0, 0, 0, 0, 0, 0]])
107
+ res = maximum_flow(graph, 0, 7, method=method)
108
+ assert res.flow_value == 20
109
+ expected_flow = np.array([[0, 10, 0, 0, 10, 0, 0, 0],
110
+ [-10, 0, 10, 0, 0, 0, 0, 0],
111
+ [0, -10, 0, 10, 0, 0, 0, 0],
112
+ [0, 0, -10, 0, 0, 0, 0, 10],
113
+ [-10, 0, 0, 0, 0, 10, 0, 0],
114
+ [0, 0, 0, 0, -10, 0, 10, 0],
115
+ [0, 0, 0, 0, 0, -10, 0, 10],
116
+ [0, 0, 0, -10, 0, 0, -10, 0]])
117
+ assert_array_equal(res.flow.toarray(), expected_flow)
118
+
119
+
120
+ @pytest.mark.parametrize('method', methods)
121
+ def test_example_from_clrs_chapter_26_1(method):
122
+ # See page 659 in CLRS second edition, but note that the maximum flow
123
+ # we find is slightly different than the one in CLRS; we push a flow of
124
+ # 12 to v_1 instead of v_2.
125
+ graph = csr_array([[0, 16, 13, 0, 0, 0],
126
+ [0, 0, 10, 12, 0, 0],
127
+ [0, 4, 0, 0, 14, 0],
128
+ [0, 0, 9, 0, 0, 20],
129
+ [0, 0, 0, 7, 0, 4],
130
+ [0, 0, 0, 0, 0, 0]])
131
+ res = maximum_flow(graph, 0, 5, method=method)
132
+ assert res.flow_value == 23
133
+ expected_flow = np.array([[0, 12, 11, 0, 0, 0],
134
+ [-12, 0, 0, 12, 0, 0],
135
+ [-11, 0, 0, 0, 11, 0],
136
+ [0, -12, 0, 0, -7, 19],
137
+ [0, 0, -11, 7, 0, 4],
138
+ [0, 0, 0, -19, -4, 0]])
139
+ assert_array_equal(res.flow.toarray(), expected_flow)
140
+
141
+
142
+ @pytest.mark.parametrize('method', methods)
143
+ def test_disconnected_graph(method):
144
+ # This tests the following disconnected graph:
145
+ # (0) --5--> (1) (2) --3--> (3)
146
+ graph = csr_array([[0, 5, 0, 0],
147
+ [0, 0, 0, 0],
148
+ [0, 0, 9, 3],
149
+ [0, 0, 0, 0]])
150
+ res = maximum_flow(graph, 0, 3, method=method)
151
+ assert res.flow_value == 0
152
+ expected_flow = np.zeros((4, 4), dtype=np.int32)
153
+ assert_array_equal(res.flow.toarray(), expected_flow)
154
+
155
+
156
+ @pytest.mark.parametrize('method', methods)
157
+ def test_add_reverse_edges_large_graph(method):
158
+ # Regression test for https://github.com/scipy/scipy/issues/14385
159
+ n = 100_000
160
+ indices = np.arange(1, n)
161
+ indptr = np.array(list(range(n)) + [n - 1])
162
+ data = np.ones(n - 1, dtype=np.int32)
163
+ graph = csr_array((data, indices, indptr), shape=(n, n))
164
+ res = maximum_flow(graph, 0, n - 1, method=method)
165
+ assert res.flow_value == 1
166
+ expected_flow = graph - graph.transpose()
167
+ assert_array_equal(res.flow.data, expected_flow.data)
168
+ assert_array_equal(res.flow.indices, expected_flow.indices)
169
+ assert_array_equal(res.flow.indptr, expected_flow.indptr)
170
+
171
+
172
+ @pytest.mark.parametrize("a,b_data_expected", [
173
+ ([[]], []),
174
+ ([[0], [0]], []),
175
+ ([[1, 0, 2], [0, 0, 0], [0, 3, 0]], [1, 2, 0, 0, 3]),
176
+ ([[9, 8, 7], [4, 5, 6], [0, 0, 0]], [9, 8, 7, 4, 5, 6, 0, 0])])
177
+ def test_add_reverse_edges(a, b_data_expected):
178
+ """Test that the reversal of the edges of the input graph works
179
+ as expected.
180
+ """
181
+ a = csr_array(a, dtype=np.int32, shape=(len(a), len(a)))
182
+ b = _add_reverse_edges(a)
183
+ assert_array_equal(b.data, b_data_expected)
184
+
185
+
186
+ @pytest.mark.parametrize("a,expected", [
187
+ ([[]], []),
188
+ ([[0]], []),
189
+ ([[1]], [0]),
190
+ ([[0, 1], [10, 0]], [1, 0]),
191
+ ([[1, 0, 2], [0, 0, 3], [4, 5, 0]], [0, 3, 4, 1, 2])
192
+ ])
193
+ def test_make_edge_pointers(a, expected):
194
+ a = csr_array(a, dtype=np.int32)
195
+ rev_edge_ptr = _make_edge_pointers(a)
196
+ assert_array_equal(rev_edge_ptr, expected)
197
+
198
+
199
+ @pytest.mark.parametrize("a,expected", [
200
+ ([[]], []),
201
+ ([[0]], []),
202
+ ([[1]], [0]),
203
+ ([[0, 1], [10, 0]], [0, 1]),
204
+ ([[1, 0, 2], [0, 0, 3], [4, 5, 0]], [0, 0, 1, 2, 2])
205
+ ])
206
+ def test_make_tails(a, expected):
207
+ a = csr_array(a, dtype=np.int32)
208
+ tails = _make_tails(a)
209
+ assert_array_equal(tails, expected)
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_graph_laplacian.py ADDED
@@ -0,0 +1,368 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+ import numpy as np
3
+ from numpy.testing import assert_allclose
4
+ from pytest import raises as assert_raises
5
+ from scipy import sparse
6
+
7
+ from scipy.sparse import csgraph
8
+ from scipy._lib._util import np_long, np_ulong
9
+
10
+
11
+ def check_int_type(mat):
12
+ return np.issubdtype(mat.dtype, np.signedinteger) or np.issubdtype(
13
+ mat.dtype, np_ulong
14
+ )
15
+
16
+
17
+ def test_laplacian_value_error():
18
+ for t in int, float, complex:
19
+ for m in ([1, 1],
20
+ [[[1]]],
21
+ [[1, 2, 3], [4, 5, 6]],
22
+ [[1, 2], [3, 4], [5, 5]]):
23
+ A = np.array(m, dtype=t)
24
+ assert_raises(ValueError, csgraph.laplacian, A)
25
+
26
+
27
+ def _explicit_laplacian(x, normed=False):
28
+ if sparse.issparse(x):
29
+ x = x.toarray()
30
+ x = np.asarray(x)
31
+ y = -1.0 * x
32
+ for j in range(y.shape[0]):
33
+ y[j,j] = x[j,j+1:].sum() + x[j,:j].sum()
34
+ if normed:
35
+ d = np.diag(y).copy()
36
+ d[d == 0] = 1.0
37
+ y /= d[:,None]**.5
38
+ y /= d[None,:]**.5
39
+ return y
40
+
41
+
42
+ def _check_symmetric_graph_laplacian(mat, normed, copy=True):
43
+ if not hasattr(mat, 'shape'):
44
+ mat = eval(mat, dict(np=np, sparse=sparse))
45
+
46
+ if sparse.issparse(mat):
47
+ sp_mat = mat
48
+ mat = sp_mat.toarray()
49
+ else:
50
+ sp_mat = sparse.csr_array(mat)
51
+
52
+ mat_copy = np.copy(mat)
53
+ sp_mat_copy = sparse.csr_array(sp_mat, copy=True)
54
+
55
+ n_nodes = mat.shape[0]
56
+ explicit_laplacian = _explicit_laplacian(mat, normed=normed)
57
+ laplacian = csgraph.laplacian(mat, normed=normed, copy=copy)
58
+ sp_laplacian = csgraph.laplacian(sp_mat, normed=normed,
59
+ copy=copy)
60
+
61
+ if copy:
62
+ assert_allclose(mat, mat_copy)
63
+ _assert_allclose_sparse(sp_mat, sp_mat_copy)
64
+ else:
65
+ if not (normed and check_int_type(mat)):
66
+ assert_allclose(laplacian, mat)
67
+ if sp_mat.format == 'coo':
68
+ _assert_allclose_sparse(sp_laplacian, sp_mat)
69
+
70
+ assert_allclose(laplacian, sp_laplacian.toarray())
71
+
72
+ for tested in [laplacian, sp_laplacian.toarray()]:
73
+ if not normed:
74
+ assert_allclose(tested.sum(axis=0), np.zeros(n_nodes))
75
+ assert_allclose(tested.T, tested)
76
+ assert_allclose(tested, explicit_laplacian)
77
+
78
+
79
+ def test_symmetric_graph_laplacian():
80
+ symmetric_mats = (
81
+ 'np.arange(10) * np.arange(10)[:, np.newaxis]',
82
+ 'np.ones((7, 7))',
83
+ 'np.eye(19)',
84
+ 'sparse.diags([1, 1], [-1, 1], shape=(4, 4))',
85
+ 'sparse.diags([1, 1], [-1, 1], shape=(4, 4)).toarray()',
86
+ 'sparse.diags([1, 1], [-1, 1], shape=(4, 4)).todense()',
87
+ 'np.vander(np.arange(4)) + np.vander(np.arange(4)).T'
88
+ )
89
+ for mat in symmetric_mats:
90
+ for normed in True, False:
91
+ for copy in True, False:
92
+ _check_symmetric_graph_laplacian(mat, normed, copy)
93
+
94
+
95
+ def _assert_allclose_sparse(a, b, **kwargs):
96
+ # helper function that can deal with sparse matrices
97
+ if sparse.issparse(a):
98
+ a = a.toarray()
99
+ if sparse.issparse(b):
100
+ b = b.toarray()
101
+ assert_allclose(a, b, **kwargs)
102
+
103
+
104
+ def _check_laplacian_dtype_none(
105
+ A, desired_L, desired_d, normed, use_out_degree, copy, dtype, arr_type
106
+ ):
107
+ mat = arr_type(A, dtype=dtype)
108
+ L, d = csgraph.laplacian(
109
+ mat,
110
+ normed=normed,
111
+ return_diag=True,
112
+ use_out_degree=use_out_degree,
113
+ copy=copy,
114
+ dtype=None,
115
+ )
116
+ if normed and check_int_type(mat):
117
+ assert L.dtype == np.float64
118
+ assert d.dtype == np.float64
119
+ _assert_allclose_sparse(L, desired_L, atol=1e-12)
120
+ _assert_allclose_sparse(d, desired_d, atol=1e-12)
121
+ else:
122
+ assert L.dtype == dtype
123
+ assert d.dtype == dtype
124
+ desired_L = np.asarray(desired_L).astype(dtype)
125
+ desired_d = np.asarray(desired_d).astype(dtype)
126
+ _assert_allclose_sparse(L, desired_L, atol=1e-12)
127
+ _assert_allclose_sparse(d, desired_d, atol=1e-12)
128
+
129
+ if not copy:
130
+ if not (normed and check_int_type(mat)):
131
+ if type(mat) is np.ndarray:
132
+ assert_allclose(L, mat)
133
+ elif mat.format == "coo":
134
+ _assert_allclose_sparse(L, mat)
135
+
136
+
137
+ def _check_laplacian_dtype(
138
+ A, desired_L, desired_d, normed, use_out_degree, copy, dtype, arr_type
139
+ ):
140
+ mat = arr_type(A, dtype=dtype)
141
+ L, d = csgraph.laplacian(
142
+ mat,
143
+ normed=normed,
144
+ return_diag=True,
145
+ use_out_degree=use_out_degree,
146
+ copy=copy,
147
+ dtype=dtype,
148
+ )
149
+ assert L.dtype == dtype
150
+ assert d.dtype == dtype
151
+ desired_L = np.asarray(desired_L).astype(dtype)
152
+ desired_d = np.asarray(desired_d).astype(dtype)
153
+ _assert_allclose_sparse(L, desired_L, atol=1e-12)
154
+ _assert_allclose_sparse(d, desired_d, atol=1e-12)
155
+
156
+ if not copy:
157
+ if not (normed and check_int_type(mat)):
158
+ if type(mat) is np.ndarray:
159
+ assert_allclose(L, mat)
160
+ elif mat.format == 'coo':
161
+ _assert_allclose_sparse(L, mat)
162
+
163
+
164
+ INT_DTYPES = (np.intc, np_long, np.longlong)
165
+ REAL_DTYPES = (np.float32, np.float64, np.longdouble)
166
+ COMPLEX_DTYPES = (np.complex64, np.complex128, np.clongdouble)
167
+ DTYPES = INT_DTYPES + REAL_DTYPES + COMPLEX_DTYPES
168
+
169
+
170
+ @pytest.mark.parametrize("dtype", DTYPES)
171
+ @pytest.mark.parametrize("arr_type", [np.array,
172
+ sparse.csr_matrix,
173
+ sparse.coo_matrix,
174
+ sparse.csr_array,
175
+ sparse.coo_array])
176
+ @pytest.mark.parametrize("copy", [True, False])
177
+ @pytest.mark.parametrize("normed", [True, False])
178
+ @pytest.mark.parametrize("use_out_degree", [True, False])
179
+ def test_asymmetric_laplacian(use_out_degree, normed,
180
+ copy, dtype, arr_type):
181
+ # adjacency matrix
182
+ A = [[0, 1, 0],
183
+ [4, 2, 0],
184
+ [0, 0, 0]]
185
+ A = arr_type(np.array(A), dtype=dtype)
186
+ A_copy = A.copy()
187
+
188
+ if not normed and use_out_degree:
189
+ # Laplacian matrix using out-degree
190
+ L = [[1, -1, 0],
191
+ [-4, 4, 0],
192
+ [0, 0, 0]]
193
+ d = [1, 4, 0]
194
+
195
+ if normed and use_out_degree:
196
+ # normalized Laplacian matrix using out-degree
197
+ L = [[1, -0.5, 0],
198
+ [-2, 1, 0],
199
+ [0, 0, 0]]
200
+ d = [1, 2, 1]
201
+
202
+ if not normed and not use_out_degree:
203
+ # Laplacian matrix using in-degree
204
+ L = [[4, -1, 0],
205
+ [-4, 1, 0],
206
+ [0, 0, 0]]
207
+ d = [4, 1, 0]
208
+
209
+ if normed and not use_out_degree:
210
+ # normalized Laplacian matrix using in-degree
211
+ L = [[1, -0.5, 0],
212
+ [-2, 1, 0],
213
+ [0, 0, 0]]
214
+ d = [2, 1, 1]
215
+
216
+ _check_laplacian_dtype_none(
217
+ A,
218
+ L,
219
+ d,
220
+ normed=normed,
221
+ use_out_degree=use_out_degree,
222
+ copy=copy,
223
+ dtype=dtype,
224
+ arr_type=arr_type,
225
+ )
226
+
227
+ _check_laplacian_dtype(
228
+ A_copy,
229
+ L,
230
+ d,
231
+ normed=normed,
232
+ use_out_degree=use_out_degree,
233
+ copy=copy,
234
+ dtype=dtype,
235
+ arr_type=arr_type,
236
+ )
237
+
238
+
239
+ @pytest.mark.parametrize("fmt", ['csr', 'csc', 'coo', 'lil',
240
+ 'dok', 'dia', 'bsr'])
241
+ @pytest.mark.parametrize("normed", [True, False])
242
+ @pytest.mark.parametrize("copy", [True, False])
243
+ def test_sparse_formats(fmt, normed, copy):
244
+ mat = sparse.diags_array([1, 1], offsets=[-1, 1], shape=(4, 4), format=fmt)
245
+ _check_symmetric_graph_laplacian(mat, normed, copy)
246
+
247
+
248
+ @pytest.mark.parametrize(
249
+ "arr_type", [np.asarray,
250
+ sparse.csr_matrix,
251
+ sparse.coo_matrix,
252
+ sparse.csr_array,
253
+ sparse.coo_array]
254
+ )
255
+ @pytest.mark.parametrize("form", ["array", "function", "lo"])
256
+ def test_laplacian_symmetrized(arr_type, form):
257
+ # adjacency matrix
258
+ n = 3
259
+ mat = arr_type(np.arange(n * n).reshape(n, n))
260
+ L_in, d_in = csgraph.laplacian(
261
+ mat,
262
+ return_diag=True,
263
+ form=form,
264
+ )
265
+ L_out, d_out = csgraph.laplacian(
266
+ mat,
267
+ return_diag=True,
268
+ use_out_degree=True,
269
+ form=form,
270
+ )
271
+ Ls, ds = csgraph.laplacian(
272
+ mat,
273
+ return_diag=True,
274
+ symmetrized=True,
275
+ form=form,
276
+ )
277
+ Ls_normed, ds_normed = csgraph.laplacian(
278
+ mat,
279
+ return_diag=True,
280
+ symmetrized=True,
281
+ normed=True,
282
+ form=form,
283
+ )
284
+ mat += mat.T
285
+ Lss, dss = csgraph.laplacian(mat, return_diag=True, form=form)
286
+ Lss_normed, dss_normed = csgraph.laplacian(
287
+ mat,
288
+ return_diag=True,
289
+ normed=True,
290
+ form=form,
291
+ )
292
+
293
+ assert_allclose(ds, d_in + d_out)
294
+ assert_allclose(ds, dss)
295
+ assert_allclose(ds_normed, dss_normed)
296
+
297
+ d = {}
298
+ for L in ["L_in", "L_out", "Ls", "Ls_normed", "Lss", "Lss_normed"]:
299
+ if form == "array":
300
+ d[L] = eval(L)
301
+ else:
302
+ d[L] = eval(L)(np.eye(n, dtype=mat.dtype))
303
+
304
+ _assert_allclose_sparse(d["Ls"], d["L_in"] + d["L_out"].T)
305
+ _assert_allclose_sparse(d["Ls"], d["Lss"])
306
+ _assert_allclose_sparse(d["Ls_normed"], d["Lss_normed"])
307
+
308
+
309
+ @pytest.mark.parametrize(
310
+ "arr_type", [np.asarray,
311
+ sparse.csr_matrix,
312
+ sparse.coo_matrix,
313
+ sparse.csr_array,
314
+ sparse.coo_array]
315
+ )
316
+ @pytest.mark.parametrize("dtype", DTYPES)
317
+ @pytest.mark.parametrize("normed", [True, False])
318
+ @pytest.mark.parametrize("symmetrized", [True, False])
319
+ @pytest.mark.parametrize("use_out_degree", [True, False])
320
+ @pytest.mark.parametrize("form", ["function", "lo"])
321
+ def test_format(dtype, arr_type, normed, symmetrized, use_out_degree, form):
322
+ n = 3
323
+ mat = [[0, 1, 0], [4, 2, 0], [0, 0, 0]]
324
+ mat = arr_type(np.array(mat), dtype=dtype)
325
+ Lo, do = csgraph.laplacian(
326
+ mat,
327
+ return_diag=True,
328
+ normed=normed,
329
+ symmetrized=symmetrized,
330
+ use_out_degree=use_out_degree,
331
+ dtype=dtype,
332
+ )
333
+ La, da = csgraph.laplacian(
334
+ mat,
335
+ return_diag=True,
336
+ normed=normed,
337
+ symmetrized=symmetrized,
338
+ use_out_degree=use_out_degree,
339
+ dtype=dtype,
340
+ form="array",
341
+ )
342
+ assert_allclose(do, da)
343
+ _assert_allclose_sparse(Lo, La)
344
+
345
+ L, d = csgraph.laplacian(
346
+ mat,
347
+ return_diag=True,
348
+ normed=normed,
349
+ symmetrized=symmetrized,
350
+ use_out_degree=use_out_degree,
351
+ dtype=dtype,
352
+ form=form,
353
+ )
354
+ assert_allclose(d, do)
355
+ assert d.dtype == dtype
356
+ Lm = L(np.eye(n, dtype=mat.dtype)).astype(dtype)
357
+ _assert_allclose_sparse(Lm, Lo, rtol=2e-7, atol=2e-7)
358
+ x = np.arange(6).reshape(3, 2)
359
+ if not (normed and dtype in INT_DTYPES):
360
+ assert_allclose(L(x), Lo @ x)
361
+ else:
362
+ # Normalized Lo is casted to integer, but L() is not
363
+ pass
364
+
365
+
366
+ def test_format_error_message():
367
+ with pytest.raises(ValueError, match="Invalid form: 'toto'"):
368
+ _ = csgraph.laplacian(np.eye(1), form='toto')
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_matching.py ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from itertools import product
2
+
3
+ import numpy as np
4
+ from numpy.testing import assert_array_equal, assert_equal
5
+ import pytest
6
+
7
+ from scipy.sparse import csr_array, diags_array
8
+ from scipy.sparse.csgraph import (
9
+ maximum_bipartite_matching, min_weight_full_bipartite_matching
10
+ )
11
+
12
+
13
+ def test_maximum_bipartite_matching_raises_on_dense_input():
14
+ with pytest.raises(TypeError):
15
+ graph = np.array([[0, 1], [0, 0]])
16
+ maximum_bipartite_matching(graph)
17
+
18
+
19
+ def test_maximum_bipartite_matching_empty_graph():
20
+ graph = csr_array((0, 0))
21
+ x = maximum_bipartite_matching(graph, perm_type='row')
22
+ y = maximum_bipartite_matching(graph, perm_type='column')
23
+ expected_matching = np.array([])
24
+ assert_array_equal(expected_matching, x)
25
+ assert_array_equal(expected_matching, y)
26
+
27
+
28
+ def test_maximum_bipartite_matching_empty_left_partition():
29
+ graph = csr_array((2, 0))
30
+ x = maximum_bipartite_matching(graph, perm_type='row')
31
+ y = maximum_bipartite_matching(graph, perm_type='column')
32
+ assert_array_equal(np.array([]), x)
33
+ assert_array_equal(np.array([-1, -1]), y)
34
+
35
+
36
+ def test_maximum_bipartite_matching_empty_right_partition():
37
+ graph = csr_array((0, 3))
38
+ x = maximum_bipartite_matching(graph, perm_type='row')
39
+ y = maximum_bipartite_matching(graph, perm_type='column')
40
+ assert_array_equal(np.array([-1, -1, -1]), x)
41
+ assert_array_equal(np.array([]), y)
42
+
43
+
44
+ def test_maximum_bipartite_matching_graph_with_no_edges():
45
+ graph = csr_array((2, 2))
46
+ x = maximum_bipartite_matching(graph, perm_type='row')
47
+ y = maximum_bipartite_matching(graph, perm_type='column')
48
+ assert_array_equal(np.array([-1, -1]), x)
49
+ assert_array_equal(np.array([-1, -1]), y)
50
+
51
+
52
+ def test_maximum_bipartite_matching_graph_that_causes_augmentation():
53
+ # In this graph, column 1 is initially assigned to row 1, but it should be
54
+ # reassigned to make room for row 2.
55
+ graph = csr_array([[1, 1], [1, 0]])
56
+ x = maximum_bipartite_matching(graph, perm_type='column')
57
+ y = maximum_bipartite_matching(graph, perm_type='row')
58
+ expected_matching = np.array([1, 0])
59
+ assert_array_equal(expected_matching, x)
60
+ assert_array_equal(expected_matching, y)
61
+
62
+
63
+ def test_maximum_bipartite_matching_graph_with_more_rows_than_columns():
64
+ graph = csr_array([[1, 1], [1, 0], [0, 1]])
65
+ x = maximum_bipartite_matching(graph, perm_type='column')
66
+ y = maximum_bipartite_matching(graph, perm_type='row')
67
+ assert_array_equal(np.array([0, -1, 1]), x)
68
+ assert_array_equal(np.array([0, 2]), y)
69
+
70
+
71
+ def test_maximum_bipartite_matching_graph_with_more_columns_than_rows():
72
+ graph = csr_array([[1, 1, 0], [0, 0, 1]])
73
+ x = maximum_bipartite_matching(graph, perm_type='column')
74
+ y = maximum_bipartite_matching(graph, perm_type='row')
75
+ assert_array_equal(np.array([0, 2]), x)
76
+ assert_array_equal(np.array([0, -1, 1]), y)
77
+
78
+
79
+ def test_maximum_bipartite_matching_explicit_zeros_count_as_edges():
80
+ data = [0, 0]
81
+ indices = [1, 0]
82
+ indptr = [0, 1, 2]
83
+ graph = csr_array((data, indices, indptr), shape=(2, 2))
84
+ x = maximum_bipartite_matching(graph, perm_type='row')
85
+ y = maximum_bipartite_matching(graph, perm_type='column')
86
+ expected_matching = np.array([1, 0])
87
+ assert_array_equal(expected_matching, x)
88
+ assert_array_equal(expected_matching, y)
89
+
90
+
91
+ def test_maximum_bipartite_matching_feasibility_of_result():
92
+ # This is a regression test for GitHub issue #11458
93
+ data = np.ones(50, dtype=int)
94
+ indices = [11, 12, 19, 22, 23, 5, 22, 3, 8, 10, 5, 6, 11, 12, 13, 5, 13,
95
+ 14, 20, 22, 3, 15, 3, 13, 14, 11, 12, 19, 22, 23, 5, 22, 3, 8,
96
+ 10, 5, 6, 11, 12, 13, 5, 13, 14, 20, 22, 3, 15, 3, 13, 14]
97
+ indptr = [0, 5, 7, 10, 10, 15, 20, 22, 22, 23, 25, 30, 32, 35, 35, 40, 45,
98
+ 47, 47, 48, 50]
99
+ graph = csr_array((data, indices, indptr), shape=(20, 25))
100
+ x = maximum_bipartite_matching(graph, perm_type='row')
101
+ y = maximum_bipartite_matching(graph, perm_type='column')
102
+ assert (x != -1).sum() == 13
103
+ assert (y != -1).sum() == 13
104
+ # Ensure that each element of the matching is in fact an edge in the graph.
105
+ for u, v in zip(range(graph.shape[0]), y):
106
+ if v != -1:
107
+ assert graph[u, v]
108
+ for u, v in zip(x, range(graph.shape[1])):
109
+ if u != -1:
110
+ assert graph[u, v]
111
+
112
+
113
+ def test_matching_large_random_graph_with_one_edge_incident_to_each_vertex():
114
+ np.random.seed(42)
115
+ A = diags_array(np.ones(25), offsets=0, format='csr')
116
+ rand_perm = np.random.permutation(25)
117
+ rand_perm2 = np.random.permutation(25)
118
+
119
+ Rrow = np.arange(25)
120
+ Rcol = rand_perm
121
+ Rdata = np.ones(25, dtype=int)
122
+ Rmat = csr_array((Rdata, (Rrow, Rcol)))
123
+
124
+ Crow = rand_perm2
125
+ Ccol = np.arange(25)
126
+ Cdata = np.ones(25, dtype=int)
127
+ Cmat = csr_array((Cdata, (Crow, Ccol)))
128
+ # Randomly permute identity matrix
129
+ B = Rmat @ A @ Cmat
130
+
131
+ # Row permute
132
+ perm = maximum_bipartite_matching(B, perm_type='row')
133
+ Rrow = np.arange(25)
134
+ Rcol = perm
135
+ Rdata = np.ones(25, dtype=int)
136
+ Rmat = csr_array((Rdata, (Rrow, Rcol)))
137
+ C1 = Rmat @ B
138
+
139
+ # Column permute
140
+ perm2 = maximum_bipartite_matching(B, perm_type='column')
141
+ Crow = perm2
142
+ Ccol = np.arange(25)
143
+ Cdata = np.ones(25, dtype=int)
144
+ Cmat = csr_array((Cdata, (Crow, Ccol)))
145
+ C2 = B @ Cmat
146
+
147
+ # Should get identity matrix back
148
+ assert_equal(any(C1.diagonal() == 0), False)
149
+ assert_equal(any(C2.diagonal() == 0), False)
150
+
151
+
152
+ @pytest.mark.parametrize('num_rows,num_cols', [(0, 0), (2, 0), (0, 3)])
153
+ def test_min_weight_full_matching_trivial_graph(num_rows, num_cols):
154
+ biadjacency = csr_array((num_cols, num_rows))
155
+ row_ind, col_ind = min_weight_full_bipartite_matching(biadjacency)
156
+ assert len(row_ind) == 0
157
+ assert len(col_ind) == 0
158
+
159
+
160
+ @pytest.mark.parametrize('biadjacency',
161
+ [
162
+ [[1, 1, 1], [1, 0, 0], [1, 0, 0]],
163
+ [[1, 1, 1], [0, 0, 1], [0, 0, 1]],
164
+ [[1, 0, 0, 1], [1, 1, 0, 1], [0, 0, 0, 0]],
165
+ [[1, 0, 0], [2, 0, 0]],
166
+ [[0, 1, 0], [0, 2, 0]],
167
+ [[1, 0], [2, 0], [5, 0]]
168
+ ])
169
+ def test_min_weight_full_matching_infeasible_problems(biadjacency):
170
+ with pytest.raises(ValueError):
171
+ min_weight_full_bipartite_matching(csr_array(biadjacency))
172
+
173
+
174
+ def test_min_weight_full_matching_large_infeasible():
175
+ # Regression test for GitHub issue #17269
176
+ a = np.asarray([
177
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
178
+ 0.0, 0.0, 0.001, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
179
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
180
+ 0.0, 0.0, 0.0, 0.001, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
181
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
182
+ 0.0, 0.0, 0.0, 0.0, 0.001, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
183
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
184
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.001, 0.0, 0.0, 0.0, 0.0, 0.0],
185
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
186
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.001, 0.0, 0.0, 0.0, 0.0],
187
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
188
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.001, 0.0, 0.0, 0.0],
189
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
190
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.001, 0.0, 0.0],
191
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
192
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.001, 0.0],
193
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
194
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.001],
195
+ [0.0, 0.11687445, 0.0, 0.0, 0.01319788, 0.07509257, 0.0,
196
+ 0.0, 0.0, 0.74228317, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
197
+ 0.0, 0.0, 0.0, 0.0, 0.0],
198
+ [0.0, 0.0, 0.0, 0.81087935, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
199
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
200
+ [0.0, 0.0, 0.0, 0.0, 0.8408466, 0.0, 0.0, 0.0, 0.0, 0.01194389,
201
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
202
+ [0.0, 0.82994211, 0.0, 0.0, 0.0, 0.11468516, 0.0, 0.0, 0.0,
203
+ 0.11173505, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
204
+ 0.0, 0.0],
205
+ [0.18796507, 0.0, 0.04002318, 0.0, 0.0, 0.0, 0.0, 0.0, 0.75883335,
206
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
207
+ [0.0, 0.0, 0.71545464, 0.0, 0.0, 0.0, 0.0, 0.0, 0.02748488,
208
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
209
+ [0.78470564, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.14829198,
210
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
211
+ [0.0, 0.10870609, 0.0, 0.0, 0.0, 0.8918677, 0.0, 0.0, 0.0, 0.06306644,
212
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
213
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
214
+ 0.63844085, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
215
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.7442354, 0.0, 0.0, 0.0,
216
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
217
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.09850549, 0.0, 0.0, 0.18638258,
218
+ 0.2769244, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
219
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.73182464, 0.0, 0.0, 0.46443561,
220
+ 0.38589284, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
221
+ [0.29510278, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.09666032, 0.0,
222
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
223
+ ])
224
+ with pytest.raises(ValueError, match='no full matching exists'):
225
+ min_weight_full_bipartite_matching(csr_array(a))
226
+
227
+
228
+ @pytest.mark.thread_unsafe
229
+ def test_explicit_zero_causes_warning():
230
+ with pytest.warns(UserWarning):
231
+ biadjacency = csr_array(((2, 0, 3), (0, 1, 1), (0, 2, 3)))
232
+ min_weight_full_bipartite_matching(biadjacency)
233
+
234
+
235
+ # General test for linear sum assignment solvers to make it possible to rely
236
+ # on the same tests for scipy.optimize.linear_sum_assignment.
237
+ def linear_sum_assignment_assertions(
238
+ solver, array_type, sign, test_case
239
+ ):
240
+ cost_matrix, expected_cost = test_case
241
+ maximize = sign == -1
242
+ cost_matrix = sign * array_type(cost_matrix)
243
+ expected_cost = sign * np.array(expected_cost)
244
+
245
+ row_ind, col_ind = solver(cost_matrix, maximize=maximize)
246
+ assert_array_equal(row_ind, np.sort(row_ind))
247
+ assert_array_equal(expected_cost,
248
+ np.array(cost_matrix[row_ind, col_ind]).flatten())
249
+
250
+ cost_matrix = cost_matrix.T
251
+ row_ind, col_ind = solver(cost_matrix, maximize=maximize)
252
+ assert_array_equal(row_ind, np.sort(row_ind))
253
+ assert_array_equal(np.sort(expected_cost),
254
+ np.sort(np.array(
255
+ cost_matrix[row_ind, col_ind])).flatten())
256
+
257
+
258
+ linear_sum_assignment_test_cases = product(
259
+ [-1, 1],
260
+ [
261
+ # Square
262
+ ([[400, 150, 400],
263
+ [400, 450, 600],
264
+ [300, 225, 300]],
265
+ [150, 400, 300]),
266
+
267
+ # Rectangular variant
268
+ ([[400, 150, 400, 1],
269
+ [400, 450, 600, 2],
270
+ [300, 225, 300, 3]],
271
+ [150, 2, 300]),
272
+
273
+ ([[10, 10, 8],
274
+ [9, 8, 1],
275
+ [9, 7, 4]],
276
+ [10, 1, 7]),
277
+
278
+ # Square
279
+ ([[10, 10, 8, 11],
280
+ [9, 8, 1, 1],
281
+ [9, 7, 4, 10]],
282
+ [10, 1, 4]),
283
+
284
+ # Rectangular variant
285
+ ([[10, float("inf"), float("inf")],
286
+ [float("inf"), float("inf"), 1],
287
+ [float("inf"), 7, float("inf")]],
288
+ [10, 1, 7])
289
+ ])
290
+
291
+
292
+ @pytest.mark.parametrize('sign,test_case', linear_sum_assignment_test_cases)
293
+ def test_min_weight_full_matching_small_inputs(sign, test_case):
294
+ linear_sum_assignment_assertions(
295
+ min_weight_full_bipartite_matching, csr_array, sign, test_case)
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_pydata_sparse.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ import numpy as np
4
+ import scipy.sparse as sp
5
+ import scipy.sparse.csgraph as spgraph
6
+ from scipy._lib import _pep440
7
+
8
+ from numpy.testing import assert_equal
9
+
10
+ try:
11
+ import sparse
12
+ except Exception:
13
+ sparse = None
14
+
15
+ pytestmark = pytest.mark.skipif(sparse is None,
16
+ reason="pydata/sparse not installed")
17
+
18
+
19
+ msg = "pydata/sparse (0.15.1) does not implement necessary operations"
20
+
21
+
22
+ sparse_params = (pytest.param("COO"),
23
+ pytest.param("DOK", marks=[pytest.mark.xfail(reason=msg)]))
24
+
25
+
26
+ def check_sparse_version(min_ver):
27
+ if sparse is None:
28
+ return pytest.mark.skip(reason="sparse is not installed")
29
+ return pytest.mark.skipif(
30
+ _pep440.parse(sparse.__version__) < _pep440.Version(min_ver),
31
+ reason=f"sparse version >= {min_ver} required"
32
+ )
33
+
34
+
35
+ @pytest.fixture(params=sparse_params)
36
+ def sparse_cls(request):
37
+ return getattr(sparse, request.param)
38
+
39
+
40
+ @pytest.fixture
41
+ def graphs(sparse_cls):
42
+ graph = [
43
+ [0, 1, 1, 0, 0],
44
+ [0, 0, 1, 0, 0],
45
+ [0, 0, 0, 0, 0],
46
+ [0, 0, 0, 0, 1],
47
+ [0, 0, 0, 0, 0],
48
+ ]
49
+ A_dense = np.array(graph)
50
+ A_sparse = sparse_cls(A_dense)
51
+ return A_dense, A_sparse
52
+
53
+
54
+ @pytest.mark.parametrize(
55
+ "func",
56
+ [
57
+ spgraph.shortest_path,
58
+ spgraph.dijkstra,
59
+ spgraph.floyd_warshall,
60
+ spgraph.bellman_ford,
61
+ spgraph.johnson,
62
+ spgraph.reverse_cuthill_mckee,
63
+ spgraph.maximum_bipartite_matching,
64
+ spgraph.structural_rank,
65
+ ]
66
+ )
67
+ def test_csgraph_equiv(func, graphs):
68
+ A_dense, A_sparse = graphs
69
+ actual = func(A_sparse)
70
+ desired = func(sp.csc_array(A_dense))
71
+ assert_equal(actual, desired)
72
+
73
+
74
+ def test_connected_components(graphs):
75
+ A_dense, A_sparse = graphs
76
+ func = spgraph.connected_components
77
+
78
+ actual_comp, actual_labels = func(A_sparse)
79
+ desired_comp, desired_labels, = func(sp.csc_array(A_dense))
80
+
81
+ assert actual_comp == desired_comp
82
+ assert_equal(actual_labels, desired_labels)
83
+
84
+
85
+ def test_laplacian(graphs):
86
+ A_dense, A_sparse = graphs
87
+ sparse_cls = type(A_sparse)
88
+ func = spgraph.laplacian
89
+
90
+ actual = func(A_sparse)
91
+ desired = func(sp.csc_array(A_dense))
92
+
93
+ assert isinstance(actual, sparse_cls)
94
+
95
+ assert_equal(actual.todense(), desired.todense())
96
+
97
+
98
+ @pytest.mark.parametrize(
99
+ "func", [spgraph.breadth_first_order, spgraph.depth_first_order]
100
+ )
101
+ def test_order_search(graphs, func):
102
+ A_dense, A_sparse = graphs
103
+
104
+ actual = func(A_sparse, 0)
105
+ desired = func(sp.csc_array(A_dense), 0)
106
+
107
+ assert_equal(actual, desired)
108
+
109
+
110
+ @pytest.mark.parametrize(
111
+ "func", [spgraph.breadth_first_tree, spgraph.depth_first_tree]
112
+ )
113
+ def test_tree_search(graphs, func):
114
+ A_dense, A_sparse = graphs
115
+ sparse_cls = type(A_sparse)
116
+
117
+ actual = func(A_sparse, 0)
118
+ desired = func(sp.csc_array(A_dense), 0)
119
+
120
+ assert isinstance(actual, sparse_cls)
121
+
122
+ assert_equal(actual.todense(), desired.todense())
123
+
124
+
125
+ def test_minimum_spanning_tree(graphs):
126
+ A_dense, A_sparse = graphs
127
+ sparse_cls = type(A_sparse)
128
+ func = spgraph.minimum_spanning_tree
129
+
130
+ actual = func(A_sparse)
131
+ desired = func(sp.csc_array(A_dense))
132
+
133
+ assert isinstance(actual, sparse_cls)
134
+
135
+ assert_equal(actual.todense(), desired.todense())
136
+
137
+
138
+ def test_maximum_flow(graphs):
139
+ A_dense, A_sparse = graphs
140
+ sparse_cls = type(A_sparse)
141
+ func = spgraph.maximum_flow
142
+
143
+ actual = func(A_sparse, 0, 2)
144
+ desired = func(sp.csr_array(A_dense), 0, 2)
145
+
146
+ assert actual.flow_value == desired.flow_value
147
+ assert isinstance(actual.flow, sparse_cls)
148
+
149
+ assert_equal(actual.flow.todense(), desired.flow.todense())
150
+
151
+
152
+ def test_min_weight_full_bipartite_matching(graphs):
153
+ A_dense, A_sparse = graphs
154
+ func = spgraph.min_weight_full_bipartite_matching
155
+
156
+ actual = func(A_sparse[0:2, 1:3])
157
+ desired = func(sp.csc_array(A_dense)[0:2, 1:3])
158
+
159
+ assert_equal(actual, desired)
160
+
161
+
162
+ @check_sparse_version("0.15.4")
163
+ @pytest.mark.parametrize(
164
+ "func",
165
+ [
166
+ spgraph.shortest_path,
167
+ spgraph.dijkstra,
168
+ spgraph.floyd_warshall,
169
+ spgraph.bellman_ford,
170
+ spgraph.johnson,
171
+ spgraph.minimum_spanning_tree,
172
+ ]
173
+ )
174
+ @pytest.mark.parametrize(
175
+ "fill_value, comp_func",
176
+ [(np.inf, np.isposinf), (np.nan, np.isnan)],
177
+ )
178
+ def test_nonzero_fill_value(graphs, func, fill_value, comp_func):
179
+ A_dense, A_sparse = graphs
180
+ A_sparse = A_sparse.astype(float)
181
+ A_sparse.fill_value = fill_value
182
+ sparse_cls = type(A_sparse)
183
+
184
+ actual = func(A_sparse)
185
+ desired = func(sp.csc_array(A_dense))
186
+
187
+ if func == spgraph.minimum_spanning_tree:
188
+ assert isinstance(actual, sparse_cls)
189
+ assert comp_func(actual.fill_value)
190
+ actual = actual.todense()
191
+ actual[comp_func(actual)] = 0.0
192
+ assert_equal(actual, desired.todense())
193
+ else:
194
+ assert_equal(actual, desired)
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_reordering.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from numpy.testing import assert_equal
3
+ from scipy.sparse.csgraph import reverse_cuthill_mckee, structural_rank
4
+ from scipy.sparse import csc_array, csr_array, coo_array
5
+
6
+
7
+ def test_graph_reverse_cuthill_mckee():
8
+ A = np.array([[1, 0, 0, 0, 1, 0, 0, 0],
9
+ [0, 1, 1, 0, 0, 1, 0, 1],
10
+ [0, 1, 1, 0, 1, 0, 0, 0],
11
+ [0, 0, 0, 1, 0, 0, 1, 0],
12
+ [1, 0, 1, 0, 1, 0, 0, 0],
13
+ [0, 1, 0, 0, 0, 1, 0, 1],
14
+ [0, 0, 0, 1, 0, 0, 1, 0],
15
+ [0, 1, 0, 0, 0, 1, 0, 1]], dtype=int)
16
+
17
+ graph = csr_array(A)
18
+ perm = reverse_cuthill_mckee(graph)
19
+ correct_perm = np.array([6, 3, 7, 5, 1, 2, 4, 0])
20
+ assert_equal(perm, correct_perm)
21
+
22
+ # Test int64 indices input
23
+ graph.indices = graph.indices.astype('int64')
24
+ graph.indptr = graph.indptr.astype('int64')
25
+ perm = reverse_cuthill_mckee(graph, True)
26
+ assert_equal(perm, correct_perm)
27
+
28
+
29
+ def test_graph_reverse_cuthill_mckee_ordering():
30
+ data = np.ones(63,dtype=int)
31
+ rows = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2,
32
+ 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5,
33
+ 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9,
34
+ 9, 10, 10, 10, 10, 10, 11, 11, 11, 11,
35
+ 12, 12, 12, 13, 13, 13, 13, 14, 14, 14,
36
+ 14, 15, 15, 15, 15, 15])
37
+ cols = np.array([0, 2, 5, 8, 10, 1, 3, 9, 11, 0, 2,
38
+ 7, 10, 1, 3, 11, 4, 6, 12, 14, 0, 7, 13,
39
+ 15, 4, 6, 14, 2, 5, 7, 15, 0, 8, 10, 13,
40
+ 1, 9, 11, 0, 2, 8, 10, 15, 1, 3, 9, 11,
41
+ 4, 12, 14, 5, 8, 13, 15, 4, 6, 12, 14,
42
+ 5, 7, 10, 13, 15])
43
+ graph = csr_array((data, (rows,cols)))
44
+ perm = reverse_cuthill_mckee(graph)
45
+ correct_perm = np.array([12, 14, 4, 6, 10, 8, 2, 15,
46
+ 0, 13, 7, 5, 9, 11, 1, 3])
47
+ assert_equal(perm, correct_perm)
48
+
49
+
50
+ def test_graph_structural_rank():
51
+ # Test square matrix #1
52
+ A = csc_array([[1, 1, 0],
53
+ [1, 0, 1],
54
+ [0, 1, 0]])
55
+ assert_equal(structural_rank(A), 3)
56
+
57
+ # Test square matrix #2
58
+ rows = np.array([0,0,0,0,0,1,1,2,2,3,3,3,3,3,3,4,4,5,5,6,6,7,7])
59
+ cols = np.array([0,1,2,3,4,2,5,2,6,0,1,3,5,6,7,4,5,5,6,2,6,2,4])
60
+ data = np.ones_like(rows)
61
+ B = coo_array((data,(rows,cols)), shape=(8,8))
62
+ assert_equal(structural_rank(B), 6)
63
+
64
+ #Test non-square matrix
65
+ C = csc_array([[1, 0, 2, 0],
66
+ [2, 0, 4, 0]])
67
+ assert_equal(structural_rank(C), 2)
68
+
69
+ #Test tall matrix
70
+ assert_equal(structural_rank(C.T), 2)
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_shortest_path.py ADDED
@@ -0,0 +1,484 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import StringIO
2
+ import warnings
3
+ import numpy as np
4
+ from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose
5
+ from pytest import raises as assert_raises
6
+ from scipy.sparse.csgraph import (shortest_path, dijkstra, johnson,
7
+ bellman_ford, construct_dist_matrix, yen,
8
+ NegativeCycleError)
9
+ import scipy.sparse
10
+ from scipy.io import mmread
11
+ import pytest
12
+
13
+ directed_G = np.array([[0, 3, 3, 0, 0],
14
+ [0, 0, 0, 2, 4],
15
+ [0, 0, 0, 0, 0],
16
+ [1, 0, 0, 0, 0],
17
+ [2, 0, 0, 2, 0]], dtype=float)
18
+
19
+ undirected_G = np.array([[0, 3, 3, 1, 2],
20
+ [3, 0, 0, 2, 4],
21
+ [3, 0, 0, 0, 0],
22
+ [1, 2, 0, 0, 2],
23
+ [2, 4, 0, 2, 0]], dtype=float)
24
+
25
+ unweighted_G = (directed_G > 0).astype(float)
26
+
27
+ directed_SP = [[0, 3, 3, 5, 7],
28
+ [3, 0, 6, 2, 4],
29
+ [np.inf, np.inf, 0, np.inf, np.inf],
30
+ [1, 4, 4, 0, 8],
31
+ [2, 5, 5, 2, 0]]
32
+
33
+ directed_2SP_0_to_3 = [[-9999, 0, -9999, 1, -9999],
34
+ [-9999, 0, -9999, 4, 1]]
35
+
36
+ directed_sparse_zero_G = scipy.sparse.csr_array(
37
+ (
38
+ [0, 1, 2, 3, 1],
39
+ ([0, 1, 2, 3, 4], [1, 2, 0, 4, 3]),
40
+ ),
41
+ shape=(5, 5),
42
+ )
43
+
44
+ directed_sparse_zero_SP = [[0, 0, 1, np.inf, np.inf],
45
+ [3, 0, 1, np.inf, np.inf],
46
+ [2, 2, 0, np.inf, np.inf],
47
+ [np.inf, np.inf, np.inf, 0, 3],
48
+ [np.inf, np.inf, np.inf, 1, 0]]
49
+
50
+ undirected_sparse_zero_G = scipy.sparse.csr_array(
51
+ (
52
+ [0, 0, 1, 1, 2, 2, 1, 1],
53
+ ([0, 1, 1, 2, 2, 0, 3, 4], [1, 0, 2, 1, 0, 2, 4, 3])
54
+ ),
55
+ shape=(5, 5),
56
+ )
57
+
58
+ undirected_sparse_zero_SP = [[0, 0, 1, np.inf, np.inf],
59
+ [0, 0, 1, np.inf, np.inf],
60
+ [1, 1, 0, np.inf, np.inf],
61
+ [np.inf, np.inf, np.inf, 0, 1],
62
+ [np.inf, np.inf, np.inf, 1, 0]]
63
+
64
+ directed_pred = np.array([[-9999, 0, 0, 1, 1],
65
+ [3, -9999, 0, 1, 1],
66
+ [-9999, -9999, -9999, -9999, -9999],
67
+ [3, 0, 0, -9999, 1],
68
+ [4, 0, 0, 4, -9999]], dtype=float)
69
+
70
+ undirected_SP = np.array([[0, 3, 3, 1, 2],
71
+ [3, 0, 6, 2, 4],
72
+ [3, 6, 0, 4, 5],
73
+ [1, 2, 4, 0, 2],
74
+ [2, 4, 5, 2, 0]], dtype=float)
75
+
76
+ undirected_SP_limit_2 = np.array([[0, np.inf, np.inf, 1, 2],
77
+ [np.inf, 0, np.inf, 2, np.inf],
78
+ [np.inf, np.inf, 0, np.inf, np.inf],
79
+ [1, 2, np.inf, 0, 2],
80
+ [2, np.inf, np.inf, 2, 0]], dtype=float)
81
+
82
+ undirected_SP_limit_0 = np.ones((5, 5), dtype=float) - np.eye(5)
83
+ undirected_SP_limit_0[undirected_SP_limit_0 > 0] = np.inf
84
+
85
+ undirected_pred = np.array([[-9999, 0, 0, 0, 0],
86
+ [1, -9999, 0, 1, 1],
87
+ [2, 0, -9999, 0, 0],
88
+ [3, 3, 0, -9999, 3],
89
+ [4, 4, 0, 4, -9999]], dtype=float)
90
+
91
+ directed_negative_weighted_G = np.array([[0, 0, 0],
92
+ [-1, 0, 0],
93
+ [0, -1, 0]], dtype=float)
94
+
95
+ directed_negative_weighted_SP = np.array([[0, np.inf, np.inf],
96
+ [-1, 0, np.inf],
97
+ [-2, -1, 0]], dtype=float)
98
+
99
+ methods = ['auto', 'FW', 'D', 'BF', 'J']
100
+
101
+
102
+ def test_dijkstra_limit():
103
+ limits = [0, 2, np.inf]
104
+ results = [undirected_SP_limit_0,
105
+ undirected_SP_limit_2,
106
+ undirected_SP]
107
+
108
+ def check(limit, result):
109
+ SP = dijkstra(undirected_G, directed=False, limit=limit)
110
+ assert_array_almost_equal(SP, result)
111
+
112
+ for limit, result in zip(limits, results):
113
+ check(limit, result)
114
+
115
+
116
+ def test_directed():
117
+ def check(method):
118
+ SP = shortest_path(directed_G, method=method, directed=True,
119
+ overwrite=False)
120
+ assert_array_almost_equal(SP, directed_SP)
121
+
122
+ for method in methods:
123
+ check(method)
124
+
125
+
126
+ def test_undirected():
127
+ def check(method, directed_in):
128
+ if directed_in:
129
+ SP1 = shortest_path(directed_G, method=method, directed=False,
130
+ overwrite=False)
131
+ assert_array_almost_equal(SP1, undirected_SP)
132
+ else:
133
+ SP2 = shortest_path(undirected_G, method=method, directed=True,
134
+ overwrite=False)
135
+ assert_array_almost_equal(SP2, undirected_SP)
136
+
137
+ for method in methods:
138
+ for directed_in in (True, False):
139
+ check(method, directed_in)
140
+
141
+
142
+ def test_directed_sparse_zero():
143
+ # test directed sparse graph with zero-weight edge and two connected components
144
+ def check(method):
145
+ SP = shortest_path(directed_sparse_zero_G, method=method, directed=True,
146
+ overwrite=False)
147
+ assert_array_almost_equal(SP, directed_sparse_zero_SP)
148
+
149
+ for method in methods:
150
+ check(method)
151
+
152
+
153
+ def test_undirected_sparse_zero():
154
+ def check(method, directed_in):
155
+ if directed_in:
156
+ SP1 = shortest_path(directed_sparse_zero_G, method=method, directed=False,
157
+ overwrite=False)
158
+ assert_array_almost_equal(SP1, undirected_sparse_zero_SP)
159
+ else:
160
+ SP2 = shortest_path(undirected_sparse_zero_G, method=method, directed=True,
161
+ overwrite=False)
162
+ assert_array_almost_equal(SP2, undirected_sparse_zero_SP)
163
+
164
+ for method in methods:
165
+ for directed_in in (True, False):
166
+ check(method, directed_in)
167
+
168
+
169
+ @pytest.mark.parametrize('directed, SP_ans',
170
+ ((True, directed_SP),
171
+ (False, undirected_SP)))
172
+ @pytest.mark.parametrize('indices', ([0, 2, 4], [0, 4], [3, 4], [0, 0]))
173
+ def test_dijkstra_indices_min_only(directed, SP_ans, indices):
174
+ SP_ans = np.array(SP_ans)
175
+ indices = np.array(indices, dtype=np.int64)
176
+ min_ind_ans = indices[np.argmin(SP_ans[indices, :], axis=0)]
177
+ min_d_ans = np.zeros(SP_ans.shape[0], SP_ans.dtype)
178
+ for k in range(SP_ans.shape[0]):
179
+ min_d_ans[k] = SP_ans[min_ind_ans[k], k]
180
+ min_ind_ans[np.isinf(min_d_ans)] = -9999
181
+
182
+ SP, pred, sources = dijkstra(directed_G,
183
+ directed=directed,
184
+ indices=indices,
185
+ min_only=True,
186
+ return_predecessors=True)
187
+ assert_array_almost_equal(SP, min_d_ans)
188
+ assert_array_equal(min_ind_ans, sources)
189
+ SP = dijkstra(directed_G,
190
+ directed=directed,
191
+ indices=indices,
192
+ min_only=True,
193
+ return_predecessors=False)
194
+ assert_array_almost_equal(SP, min_d_ans)
195
+
196
+
197
+ @pytest.mark.parametrize('n', (10, 100, 1000))
198
+ def test_dijkstra_min_only_random(n):
199
+ rng = np.random.default_rng(7345782358920239234)
200
+ data = scipy.sparse.random_array((n, n), density=0.5, format='lil',
201
+ rng=rng, dtype=np.float64)
202
+ data.setdiag(np.zeros(n, dtype=np.bool_))
203
+ # choose some random vertices
204
+ v = np.arange(n)
205
+ rng.shuffle(v)
206
+ indices = v[:int(n*.1)]
207
+ ds, pred, sources = dijkstra(data,
208
+ directed=True,
209
+ indices=indices,
210
+ min_only=True,
211
+ return_predecessors=True)
212
+ for k in range(n):
213
+ p = pred[k]
214
+ s = sources[k]
215
+ while p != -9999:
216
+ assert sources[p] == s
217
+ p = pred[p]
218
+
219
+
220
+ def test_dijkstra_random():
221
+ # reproduces the hang observed in gh-17782
222
+ n = 10
223
+ indices = [0, 4, 4, 5, 7, 9, 0, 6, 2, 3, 7, 9, 1, 2, 9, 2, 5, 6]
224
+ indptr = [0, 0, 2, 5, 6, 7, 8, 12, 15, 18, 18]
225
+ data = [0.33629, 0.40458, 0.47493, 0.42757, 0.11497, 0.91653, 0.69084,
226
+ 0.64979, 0.62555, 0.743, 0.01724, 0.99945, 0.31095, 0.15557,
227
+ 0.02439, 0.65814, 0.23478, 0.24072]
228
+ graph = scipy.sparse.csr_array((data, indices, indptr), shape=(n, n))
229
+ dijkstra(graph, directed=True, return_predecessors=True)
230
+
231
+
232
+ def test_gh_17782_segfault():
233
+ text = """%%MatrixMarket matrix coordinate real general
234
+ 84 84 22
235
+ 2 1 4.699999809265137e+00
236
+ 6 14 1.199999973177910e-01
237
+ 9 6 1.199999973177910e-01
238
+ 10 16 2.012000083923340e+01
239
+ 11 10 1.422000026702881e+01
240
+ 12 1 9.645999908447266e+01
241
+ 13 18 2.012000083923340e+01
242
+ 14 13 4.679999828338623e+00
243
+ 15 11 1.199999973177910e-01
244
+ 16 12 1.199999973177910e-01
245
+ 18 15 1.199999973177910e-01
246
+ 32 2 2.299999952316284e+00
247
+ 33 20 6.000000000000000e+00
248
+ 33 32 5.000000000000000e+00
249
+ 36 9 3.720000028610229e+00
250
+ 36 37 3.720000028610229e+00
251
+ 36 38 3.720000028610229e+00
252
+ 37 44 8.159999847412109e+00
253
+ 38 32 7.903999328613281e+01
254
+ 43 20 2.400000000000000e+01
255
+ 43 33 4.000000000000000e+00
256
+ 44 43 6.028000259399414e+01
257
+ """
258
+ data = mmread(StringIO(text), spmatrix=False)
259
+ dijkstra(data, directed=True, return_predecessors=True)
260
+
261
+
262
+ def test_shortest_path_indices():
263
+ indices = np.arange(4)
264
+
265
+ def check(func, indshape):
266
+ outshape = indshape + (5,)
267
+ SP = func(directed_G, directed=False,
268
+ indices=indices.reshape(indshape))
269
+ assert_array_almost_equal(SP, undirected_SP[indices].reshape(outshape))
270
+
271
+ for indshape in [(4,), (4, 1), (2, 2)]:
272
+ for func in (dijkstra, bellman_ford, johnson, shortest_path):
273
+ check(func, indshape)
274
+
275
+ assert_raises(ValueError, shortest_path, directed_G, method='FW',
276
+ indices=indices)
277
+
278
+
279
+ def test_predecessors():
280
+ SP_res = {True: directed_SP,
281
+ False: undirected_SP}
282
+ pred_res = {True: directed_pred,
283
+ False: undirected_pred}
284
+
285
+ def check(method, directed):
286
+ SP, pred = shortest_path(directed_G, method, directed=directed,
287
+ overwrite=False,
288
+ return_predecessors=True)
289
+ assert_array_almost_equal(SP, SP_res[directed])
290
+ assert_array_almost_equal(pred, pred_res[directed])
291
+
292
+ for method in methods:
293
+ for directed in (True, False):
294
+ check(method, directed)
295
+
296
+
297
+ def test_construct_shortest_path():
298
+ def check(method, directed):
299
+ SP1, pred = shortest_path(directed_G,
300
+ directed=directed,
301
+ overwrite=False,
302
+ return_predecessors=True)
303
+ SP2 = construct_dist_matrix(directed_G, pred, directed=directed)
304
+ assert_array_almost_equal(SP1, SP2)
305
+
306
+ for method in methods:
307
+ for directed in (True, False):
308
+ check(method, directed)
309
+
310
+
311
+ def test_unweighted_path():
312
+ def check(method, directed):
313
+ SP1 = shortest_path(directed_G,
314
+ directed=directed,
315
+ overwrite=False,
316
+ unweighted=True)
317
+ SP2 = shortest_path(unweighted_G,
318
+ directed=directed,
319
+ overwrite=False,
320
+ unweighted=False)
321
+ assert_array_almost_equal(SP1, SP2)
322
+
323
+ for method in methods:
324
+ for directed in (True, False):
325
+ check(method, directed)
326
+
327
+
328
+ def test_negative_cycles():
329
+ # create a small graph with a negative cycle
330
+ graph = np.ones([5, 5])
331
+ graph.flat[::6] = 0
332
+ graph[1, 2] = -2
333
+
334
+ def check(method, directed):
335
+ assert_raises(NegativeCycleError, shortest_path, graph, method,
336
+ directed)
337
+
338
+ for directed in (True, False):
339
+ for method in ['FW', 'J', 'BF']:
340
+ check(method, directed)
341
+
342
+ assert_raises(NegativeCycleError, yen, graph, 0, 1, 1,
343
+ directed=directed)
344
+
345
+
346
+ @pytest.mark.parametrize("method", ['FW', 'J', 'BF'])
347
+ def test_negative_weights(method):
348
+ SP = shortest_path(directed_negative_weighted_G, method, directed=True)
349
+ assert_allclose(SP, directed_negative_weighted_SP, atol=1e-10)
350
+
351
+
352
+ def test_masked_input():
353
+ np.ma.masked_equal(directed_G, 0)
354
+
355
+ def check(method):
356
+ SP = shortest_path(directed_G, method=method, directed=True,
357
+ overwrite=False)
358
+ assert_array_almost_equal(SP, directed_SP)
359
+
360
+ for method in methods:
361
+ check(method)
362
+
363
+
364
+ def test_overwrite():
365
+ G = np.array([[0, 3, 3, 1, 2],
366
+ [3, 0, 0, 2, 4],
367
+ [3, 0, 0, 0, 0],
368
+ [1, 2, 0, 0, 2],
369
+ [2, 4, 0, 2, 0]], dtype=float)
370
+ foo = G.copy()
371
+ shortest_path(foo, overwrite=False)
372
+ assert_array_equal(foo, G)
373
+
374
+
375
+ @pytest.mark.parametrize('method', methods)
376
+ def test_buffer(method):
377
+ # Smoke test that sparse matrices with read-only buffers (e.g., those from
378
+ # joblib workers) do not cause::
379
+ #
380
+ # ValueError: buffer source array is read-only
381
+ #
382
+ G = scipy.sparse.csr_array([[1.]])
383
+ G.data.flags['WRITEABLE'] = False
384
+ shortest_path(G, method=method)
385
+
386
+
387
+ def test_NaN_warnings():
388
+ with warnings.catch_warnings(record=True) as record:
389
+ shortest_path(np.array([[0, 1], [np.nan, 0]]))
390
+ for r in record:
391
+ assert r.category is not RuntimeWarning
392
+
393
+
394
+ def test_sparse_matrices():
395
+ # Test that using lil,csr and csc sparse matrix do not cause error
396
+ G_dense = np.array([[0, 3, 0, 0, 0],
397
+ [0, 0, -1, 0, 0],
398
+ [0, 0, 0, 2, 0],
399
+ [0, 0, 0, 0, 4],
400
+ [0, 0, 0, 0, 0]], dtype=float)
401
+ SP = shortest_path(G_dense)
402
+ G_csr = scipy.sparse.csr_array(G_dense)
403
+ G_csc = scipy.sparse.csc_array(G_dense)
404
+ G_lil = scipy.sparse.lil_array(G_dense)
405
+ assert_array_almost_equal(SP, shortest_path(G_csr))
406
+ assert_array_almost_equal(SP, shortest_path(G_csc))
407
+ assert_array_almost_equal(SP, shortest_path(G_lil))
408
+
409
+
410
+ def test_yen_directed():
411
+ distances, predecessors = yen(
412
+ directed_G,
413
+ source=0,
414
+ sink=3,
415
+ K=2,
416
+ return_predecessors=True
417
+ )
418
+ assert_allclose(distances, [5., 9.])
419
+ assert_allclose(predecessors, directed_2SP_0_to_3)
420
+
421
+
422
+ def test_yen_undirected():
423
+ distances = yen(
424
+ undirected_G,
425
+ source=0,
426
+ sink=3,
427
+ K=4,
428
+ )
429
+ assert_allclose(distances, [1., 4., 5., 8.])
430
+
431
+ def test_yen_unweighted():
432
+ # Ask for more paths than there are, verify only the available paths are returned
433
+ distances, predecessors = yen(
434
+ directed_G,
435
+ source=0,
436
+ sink=3,
437
+ K=4,
438
+ unweighted=True,
439
+ return_predecessors=True,
440
+ )
441
+ assert_allclose(distances, [2., 3.])
442
+ assert_allclose(predecessors, directed_2SP_0_to_3)
443
+
444
+ def test_yen_no_paths():
445
+ distances = yen(
446
+ directed_G,
447
+ source=2,
448
+ sink=3,
449
+ K=1,
450
+ )
451
+ assert distances.size == 0
452
+
453
+ def test_yen_negative_weights():
454
+ distances = yen(
455
+ directed_negative_weighted_G,
456
+ source=2,
457
+ sink=0,
458
+ K=1,
459
+ )
460
+ assert_allclose(distances, [-2.])
461
+
462
+
463
+ @pytest.mark.parametrize("min_only", (True, False))
464
+ @pytest.mark.parametrize("directed", (True, False))
465
+ @pytest.mark.parametrize("return_predecessors", (True, False))
466
+ @pytest.mark.parametrize("index_dtype", (np.int32, np.int64))
467
+ @pytest.mark.parametrize("indices", (None, [1]))
468
+ def test_20904(min_only, directed, return_predecessors, index_dtype, indices):
469
+ """Test two failures from gh-20904: int32 and indices-as-None."""
470
+ adj_mat = scipy.sparse.eye_array(4, format="csr")
471
+ adj_mat = scipy.sparse.csr_array(
472
+ (
473
+ adj_mat.data,
474
+ adj_mat.indices.astype(index_dtype),
475
+ adj_mat.indptr.astype(index_dtype),
476
+ ),
477
+ )
478
+ dijkstra(
479
+ adj_mat,
480
+ directed,
481
+ indices=indices,
482
+ min_only=min_only,
483
+ return_predecessors=return_predecessors,
484
+ )
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_spanning_tree.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Test the minimum spanning tree function"""
2
+ import numpy as np
3
+ from numpy.testing import assert_
4
+ import numpy.testing as npt
5
+ from scipy.sparse import csr_array
6
+ from scipy.sparse.csgraph import minimum_spanning_tree
7
+
8
+
9
+ def test_minimum_spanning_tree():
10
+
11
+ # Create a graph with two connected components.
12
+ graph = [[0,1,0,0,0],
13
+ [1,0,0,0,0],
14
+ [0,0,0,8,5],
15
+ [0,0,8,0,1],
16
+ [0,0,5,1,0]]
17
+ graph = np.asarray(graph)
18
+
19
+ # Create the expected spanning tree.
20
+ expected = [[0,1,0,0,0],
21
+ [0,0,0,0,0],
22
+ [0,0,0,0,5],
23
+ [0,0,0,0,1],
24
+ [0,0,0,0,0]]
25
+ expected = np.asarray(expected)
26
+
27
+ # Ensure minimum spanning tree code gives this expected output.
28
+ csgraph = csr_array(graph)
29
+ mintree = minimum_spanning_tree(csgraph)
30
+ mintree_array = mintree.toarray()
31
+ npt.assert_array_equal(mintree_array, expected,
32
+ 'Incorrect spanning tree found.')
33
+
34
+ # Ensure that the original graph was not modified.
35
+ npt.assert_array_equal(csgraph.toarray(), graph,
36
+ 'Original graph was modified.')
37
+
38
+ # Now let the algorithm modify the csgraph in place.
39
+ mintree = minimum_spanning_tree(csgraph, overwrite=True)
40
+ npt.assert_array_equal(mintree.toarray(), expected,
41
+ 'Graph was not properly modified to contain MST.')
42
+
43
+ np.random.seed(1234)
44
+ for N in (5, 10, 15, 20):
45
+
46
+ # Create a random graph.
47
+ graph = 3 + np.random.random((N, N))
48
+ csgraph = csr_array(graph)
49
+
50
+ # The spanning tree has at most N - 1 edges.
51
+ mintree = minimum_spanning_tree(csgraph)
52
+ assert_(mintree.nnz < N)
53
+
54
+ # Set the sub diagonal to 1 to create a known spanning tree.
55
+ idx = np.arange(N-1)
56
+ graph[idx,idx+1] = 1
57
+ csgraph = csr_array(graph)
58
+ mintree = minimum_spanning_tree(csgraph)
59
+
60
+ # We expect to see this pattern in the spanning tree and otherwise
61
+ # have this zero.
62
+ expected = np.zeros((N, N))
63
+ expected[idx, idx+1] = 1
64
+
65
+ npt.assert_array_equal(mintree.toarray(), expected,
66
+ 'Incorrect spanning tree found.')
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_traversal.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+ from numpy.testing import assert_array_almost_equal
4
+ from scipy.sparse import csr_array, csr_matrix, coo_array, coo_matrix
5
+ from scipy.sparse.csgraph import (breadth_first_tree, depth_first_tree,
6
+ csgraph_to_dense, csgraph_from_dense, csgraph_masked_from_dense)
7
+
8
+
9
+ def test_graph_breadth_first():
10
+ csgraph = np.array([[0, 1, 2, 0, 0],
11
+ [1, 0, 0, 0, 3],
12
+ [2, 0, 0, 7, 0],
13
+ [0, 0, 7, 0, 1],
14
+ [0, 3, 0, 1, 0]])
15
+ csgraph = csgraph_from_dense(csgraph, null_value=0)
16
+
17
+ bfirst = np.array([[0, 1, 2, 0, 0],
18
+ [0, 0, 0, 0, 3],
19
+ [0, 0, 0, 7, 0],
20
+ [0, 0, 0, 0, 0],
21
+ [0, 0, 0, 0, 0]])
22
+
23
+ for directed in [True, False]:
24
+ bfirst_test = breadth_first_tree(csgraph, 0, directed)
25
+ assert_array_almost_equal(csgraph_to_dense(bfirst_test),
26
+ bfirst)
27
+
28
+
29
+ def test_graph_depth_first():
30
+ csgraph = np.array([[0, 1, 2, 0, 0],
31
+ [1, 0, 0, 0, 3],
32
+ [2, 0, 0, 7, 0],
33
+ [0, 0, 7, 0, 1],
34
+ [0, 3, 0, 1, 0]])
35
+ csgraph = csgraph_from_dense(csgraph, null_value=0)
36
+
37
+ dfirst = np.array([[0, 1, 0, 0, 0],
38
+ [0, 0, 0, 0, 3],
39
+ [0, 0, 0, 0, 0],
40
+ [0, 0, 7, 0, 0],
41
+ [0, 0, 0, 1, 0]])
42
+
43
+ for directed in [True, False]:
44
+ dfirst_test = depth_first_tree(csgraph, 0, directed)
45
+ assert_array_almost_equal(csgraph_to_dense(dfirst_test), dfirst)
46
+
47
+
48
+ def test_return_type():
49
+ from .._laplacian import laplacian
50
+ from .._min_spanning_tree import minimum_spanning_tree
51
+
52
+ np_csgraph = np.array([[0, 1, 2, 0, 0],
53
+ [1, 0, 0, 0, 3],
54
+ [2, 0, 0, 7, 0],
55
+ [0, 0, 7, 0, 1],
56
+ [0, 3, 0, 1, 0]])
57
+ csgraph = csr_array(np_csgraph)
58
+ assert isinstance(laplacian(csgraph), coo_array)
59
+ assert isinstance(minimum_spanning_tree(csgraph), csr_array)
60
+ for directed in [True, False]:
61
+ assert isinstance(depth_first_tree(csgraph, 0, directed), csr_array)
62
+ assert isinstance(breadth_first_tree(csgraph, 0, directed), csr_array)
63
+
64
+ csgraph = csgraph_from_dense(np_csgraph, null_value=0)
65
+ assert isinstance(csgraph, csr_array)
66
+ assert isinstance(laplacian(csgraph), coo_array)
67
+ assert isinstance(minimum_spanning_tree(csgraph), csr_array)
68
+ for directed in [True, False]:
69
+ assert isinstance(depth_first_tree(csgraph, 0, directed), csr_array)
70
+ assert isinstance(breadth_first_tree(csgraph, 0, directed), csr_array)
71
+
72
+ csgraph = csgraph_masked_from_dense(np_csgraph, null_value=0)
73
+ assert isinstance(csgraph, np.ma.MaskedArray)
74
+ assert csgraph._baseclass is np.ndarray
75
+ # laplacian doesnt work with masked arrays so not here
76
+ assert isinstance(minimum_spanning_tree(csgraph), csr_array)
77
+ for directed in [True, False]:
78
+ assert isinstance(depth_first_tree(csgraph, 0, directed), csr_array)
79
+ assert isinstance(breadth_first_tree(csgraph, 0, directed), csr_array)
80
+
81
+ # start of testing with matrix/spmatrix types
82
+ with np.testing.suppress_warnings() as sup:
83
+ sup.filter(DeprecationWarning, "the matrix subclass.*")
84
+ sup.filter(PendingDeprecationWarning, "the matrix subclass.*")
85
+
86
+ nm_csgraph = np.matrix([[0, 1, 2, 0, 0],
87
+ [1, 0, 0, 0, 3],
88
+ [2, 0, 0, 7, 0],
89
+ [0, 0, 7, 0, 1],
90
+ [0, 3, 0, 1, 0]])
91
+
92
+ csgraph = csr_matrix(nm_csgraph)
93
+ assert isinstance(laplacian(csgraph), coo_matrix)
94
+ assert isinstance(minimum_spanning_tree(csgraph), csr_matrix)
95
+ for directed in [True, False]:
96
+ assert isinstance(depth_first_tree(csgraph, 0, directed), csr_matrix)
97
+ assert isinstance(breadth_first_tree(csgraph, 0, directed), csr_matrix)
98
+
99
+ csgraph = csgraph_from_dense(nm_csgraph, null_value=0)
100
+ assert isinstance(csgraph, csr_matrix)
101
+ assert isinstance(laplacian(csgraph), coo_matrix)
102
+ assert isinstance(minimum_spanning_tree(csgraph), csr_matrix)
103
+ for directed in [True, False]:
104
+ assert isinstance(depth_first_tree(csgraph, 0, directed), csr_matrix)
105
+ assert isinstance(breadth_first_tree(csgraph, 0, directed), csr_matrix)
106
+
107
+ mm_csgraph = csgraph_masked_from_dense(nm_csgraph, null_value=0)
108
+ assert isinstance(mm_csgraph, np.ma.MaskedArray)
109
+ # laplacian doesnt work with masked arrays so not here
110
+ assert isinstance(minimum_spanning_tree(csgraph), csr_matrix)
111
+ for directed in [True, False]:
112
+ assert isinstance(depth_first_tree(csgraph, 0, directed), csr_matrix)
113
+ assert isinstance(breadth_first_tree(csgraph, 0, directed), csr_matrix)
114
+ # end of testing with matrix/spmatrix types
115
+
116
+
117
+ def test_graph_breadth_first_trivial_graph():
118
+ csgraph = np.array([[0]])
119
+ csgraph = csgraph_from_dense(csgraph, null_value=0)
120
+
121
+ bfirst = np.array([[0]])
122
+
123
+ for directed in [True, False]:
124
+ bfirst_test = breadth_first_tree(csgraph, 0, directed)
125
+ assert_array_almost_equal(csgraph_to_dense(bfirst_test), bfirst)
126
+
127
+
128
+ def test_graph_depth_first_trivial_graph():
129
+ csgraph = np.array([[0]])
130
+ csgraph = csgraph_from_dense(csgraph, null_value=0)
131
+
132
+ bfirst = np.array([[0]])
133
+
134
+ for directed in [True, False]:
135
+ bfirst_test = depth_first_tree(csgraph, 0, directed)
136
+ assert_array_almost_equal(csgraph_to_dense(bfirst_test),
137
+ bfirst)
138
+
139
+
140
+ @pytest.mark.parametrize('directed', [True, False])
141
+ @pytest.mark.parametrize('tree_func', [breadth_first_tree, depth_first_tree])
142
+ def test_int64_indices(tree_func, directed):
143
+ # See https://github.com/scipy/scipy/issues/18716
144
+ g = csr_array(([1], np.array([[0], [1]], dtype=np.int64)), shape=(2, 2))
145
+ assert g.indices.dtype == np.int64
146
+ tree = tree_func(g, 0, directed=directed)
147
+ assert_array_almost_equal(csgraph_to_dense(tree), [[0, 1], [0, 0]])
148
+
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/lsmr.cpython-310.pyc ADDED
Binary file (11.6 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_gcrotmk.cpython-310.pyc ADDED
Binary file (5.71 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_iterative.cpython-310.pyc ADDED
Binary file (21.8 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_lsmr.cpython-310.pyc ADDED
Binary file (6.63 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_lsqr.cpython-310.pyc ADDED
Binary file (3.04 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_minres.cpython-310.pyc ADDED
Binary file (3.15 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_utils.cpython-310.pyc ADDED
Binary file (591 Bytes). View file
 
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/test_gcrotmk.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ """Tests for the linalg._isolve.gcrotmk module
3
+ """
4
+
5
+ import threading
6
+ from numpy.testing import (assert_, assert_allclose, assert_equal,
7
+ suppress_warnings)
8
+
9
+ import numpy as np
10
+ from numpy import zeros, array, allclose
11
+ from scipy.linalg import norm
12
+ from scipy.sparse import csr_array, eye_array, random_array
13
+
14
+ from scipy.sparse.linalg._interface import LinearOperator
15
+ from scipy.sparse.linalg import splu
16
+ from scipy.sparse.linalg._isolve import gcrotmk, gmres
17
+
18
+
19
+ Am = csr_array(array([[-2,1,0,0,0,9],
20
+ [1,-2,1,0,5,0],
21
+ [0,1,-2,1,0,0],
22
+ [0,0,1,-2,1,0],
23
+ [0,3,0,1,-2,1],
24
+ [1,0,0,0,1,-2]]))
25
+ b = array([1,2,3,4,5,6])
26
+ count = threading.local() # [0]
27
+ niter = threading.local() # [0]
28
+
29
+
30
+ def matvec(v):
31
+ if not hasattr(count, 'c'):
32
+ count.c = [0]
33
+ count.c[0] += 1
34
+ return Am@v
35
+
36
+
37
+ def cb(v):
38
+ if not hasattr(niter, 'n'):
39
+ niter.n = [0]
40
+ niter.n[0] += 1
41
+
42
+
43
+ A = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype)
44
+
45
+
46
+ def do_solve(**kw):
47
+ if not hasattr(niter, 'n'):
48
+ niter.n = [0]
49
+
50
+ if not hasattr(count, 'c'):
51
+ count.c = [0]
52
+
53
+ count.c[0] = 0
54
+ with suppress_warnings() as sup:
55
+ sup.filter(DeprecationWarning, ".*called without specifying.*")
56
+ x0, flag = gcrotmk(A, b, x0=zeros(A.shape[0]), rtol=1e-14, **kw)
57
+ count_0 = count.c[0]
58
+ assert_(allclose(A@x0, b, rtol=1e-12, atol=1e-12), norm(A@x0-b))
59
+ return x0, count_0
60
+
61
+
62
+ class TestGCROTMK:
63
+ def test_preconditioner(self):
64
+ # Check that preconditioning works
65
+ pc = splu(Am.tocsc())
66
+ M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype)
67
+
68
+ x0, count_0 = do_solve()
69
+ niter.n[0] = 0
70
+ x1, count_1 = do_solve(M=M, callback=cb)
71
+
72
+ assert_equal(count_1, 3)
73
+ assert count_1 < count_0/2
74
+ assert allclose(x1, x0, rtol=1e-14)
75
+ assert niter.n[0] < 3
76
+
77
+ def test_arnoldi(self):
78
+ rng = np.random.default_rng(1)
79
+
80
+ A = eye_array(2000) + random_array((2000, 2000), density=5e-4, rng=rng)
81
+ b = rng.random(2000)
82
+
83
+ # The inner arnoldi should be equivalent to gmres
84
+ with suppress_warnings() as sup:
85
+ sup.filter(DeprecationWarning, ".*called without specifying.*")
86
+ x0, flag0 = gcrotmk(A, b, x0=zeros(A.shape[0]), m=10, k=0, maxiter=1)
87
+ x1, flag1 = gmres(A, b, x0=zeros(A.shape[0]), restart=10, maxiter=1)
88
+
89
+ assert_equal(flag0, 1)
90
+ assert_equal(flag1, 1)
91
+ assert np.linalg.norm(A.dot(x0) - b) > 1e-4
92
+
93
+ assert_allclose(x0, x1)
94
+
95
+ def test_cornercase(self):
96
+ np.random.seed(1234)
97
+
98
+ # Rounding error may prevent convergence with tol=0 --- ensure
99
+ # that the return values in this case are correct, and no
100
+ # exceptions are raised
101
+
102
+ for n in [3, 5, 10, 100]:
103
+ A = 2*eye_array(n)
104
+
105
+ with suppress_warnings() as sup:
106
+ sup.filter(DeprecationWarning, ".*called without specifying.*")
107
+ b = np.ones(n)
108
+ x, info = gcrotmk(A, b, maxiter=10)
109
+ assert_equal(info, 0)
110
+ assert_allclose(A.dot(x) - b, 0, atol=1e-14)
111
+
112
+ x, info = gcrotmk(A, b, rtol=0, maxiter=10)
113
+ if info == 0:
114
+ assert_allclose(A.dot(x) - b, 0, atol=1e-14)
115
+
116
+ b = np.random.rand(n)
117
+ x, info = gcrotmk(A, b, maxiter=10)
118
+ assert_equal(info, 0)
119
+ assert_allclose(A.dot(x) - b, 0, atol=1e-14)
120
+
121
+ x, info = gcrotmk(A, b, rtol=0, maxiter=10)
122
+ if info == 0:
123
+ assert_allclose(A.dot(x) - b, 0, atol=1e-14)
124
+
125
+ def test_nans(self):
126
+ A = eye_array(3, format='lil')
127
+ A[1,1] = np.nan
128
+ b = np.ones(3)
129
+
130
+ with suppress_warnings() as sup:
131
+ sup.filter(DeprecationWarning, ".*called without specifying.*")
132
+ x, info = gcrotmk(A, b, rtol=0, maxiter=10)
133
+ assert_equal(info, 1)
134
+
135
+ def test_truncate(self):
136
+ np.random.seed(1234)
137
+ A = np.random.rand(30, 30) + np.eye(30)
138
+ b = np.random.rand(30)
139
+
140
+ for truncate in ['oldest', 'smallest']:
141
+ with suppress_warnings() as sup:
142
+ sup.filter(DeprecationWarning, ".*called without specifying.*")
143
+ x, info = gcrotmk(A, b, m=10, k=10, truncate=truncate,
144
+ rtol=1e-4, maxiter=200)
145
+ assert_equal(info, 0)
146
+ assert_allclose(A.dot(x) - b, 0, atol=1e-3)
147
+
148
+ def test_CU(self):
149
+ for discard_C in (True, False):
150
+ # Check that C,U behave as expected
151
+ CU = []
152
+ x0, count_0 = do_solve(CU=CU, discard_C=discard_C)
153
+ assert_(len(CU) > 0)
154
+ assert_(len(CU) <= 6)
155
+
156
+ if discard_C:
157
+ for c, u in CU:
158
+ assert_(c is None)
159
+
160
+ # should converge immediately
161
+ x1, count_1 = do_solve(CU=CU, discard_C=discard_C)
162
+ if discard_C:
163
+ assert_equal(count_1, 2 + len(CU))
164
+ else:
165
+ assert_equal(count_1, 3)
166
+ assert_(count_1 <= count_0/2)
167
+ assert_allclose(x1, x0, atol=1e-14)
168
+
169
+ def test_denormals(self):
170
+ # Check that no warnings are emitted if the matrix contains
171
+ # numbers for which 1/x has no float representation, and that
172
+ # the solver behaves properly.
173
+ A = np.array([[1, 2], [3, 4]], dtype=float)
174
+ A *= 100 * np.nextafter(0, 1)
175
+
176
+ b = np.array([1, 1])
177
+
178
+ with suppress_warnings() as sup:
179
+ sup.filter(DeprecationWarning, ".*called without specifying.*")
180
+ xp, info = gcrotmk(A, b)
181
+
182
+ if info == 0:
183
+ assert_allclose(A.dot(xp), b)
mantis_evalkit/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_base.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:513e26cfb85855a7a0835f7faa0e0687257d1ac95aa0c31c3989fe1296da3d4f
3
+ size 160622
moondream/lib/python3.10/site-packages/contourpy-1.3.1.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
moondream/lib/python3.10/site-packages/contourpy-1.3.1.dist-info/METADATA ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: contourpy
3
+ Version: 1.3.1
4
+ Summary: Python library for calculating contours of 2D quadrilateral grids
5
+ Author-Email: Ian Thomas <ianthomas23@gmail.com>
6
+ License: BSD 3-Clause License
7
+
8
+ Copyright (c) 2021-2024, ContourPy Developers.
9
+ All rights reserved.
10
+
11
+ Redistribution and use in source and binary forms, with or without
12
+ modification, are permitted provided that the following conditions are met:
13
+
14
+ 1. Redistributions of source code must retain the above copyright notice, this
15
+ list of conditions and the following disclaimer.
16
+
17
+ 2. Redistributions in binary form must reproduce the above copyright notice,
18
+ this list of conditions and the following disclaimer in the documentation
19
+ and/or other materials provided with the distribution.
20
+
21
+ 3. Neither the name of the copyright holder nor the names of its
22
+ contributors may be used to endorse or promote products derived from
23
+ this software without specific prior written permission.
24
+
25
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
29
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
32
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
34
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35
+
36
+ Classifier: Development Status :: 5 - Production/Stable
37
+ Classifier: Intended Audience :: Developers
38
+ Classifier: Intended Audience :: Science/Research
39
+ Classifier: License :: OSI Approved :: BSD License
40
+ Classifier: Programming Language :: C++
41
+ Classifier: Programming Language :: Python :: 3
42
+ Classifier: Programming Language :: Python :: 3.10
43
+ Classifier: Programming Language :: Python :: 3.11
44
+ Classifier: Programming Language :: Python :: 3.12
45
+ Classifier: Programming Language :: Python :: 3.13
46
+ Classifier: Topic :: Scientific/Engineering :: Information Analysis
47
+ Classifier: Topic :: Scientific/Engineering :: Mathematics
48
+ Classifier: Topic :: Scientific/Engineering :: Visualization
49
+ Project-URL: Homepage, https://github.com/contourpy/contourpy
50
+ Project-URL: Changelog, https://contourpy.readthedocs.io/en/latest/changelog.html
51
+ Project-URL: Documentation, https://contourpy.readthedocs.io
52
+ Project-URL: Repository, https://github.com/contourpy/contourpy
53
+ Requires-Python: >=3.10
54
+ Requires-Dist: numpy>=1.23
55
+ Provides-Extra: docs
56
+ Requires-Dist: furo; extra == "docs"
57
+ Requires-Dist: sphinx>=7.2; extra == "docs"
58
+ Requires-Dist: sphinx-copybutton; extra == "docs"
59
+ Provides-Extra: bokeh
60
+ Requires-Dist: bokeh; extra == "bokeh"
61
+ Requires-Dist: selenium; extra == "bokeh"
62
+ Provides-Extra: mypy
63
+ Requires-Dist: contourpy[bokeh,docs]; extra == "mypy"
64
+ Requires-Dist: docutils-stubs; extra == "mypy"
65
+ Requires-Dist: mypy==1.11.1; extra == "mypy"
66
+ Requires-Dist: types-Pillow; extra == "mypy"
67
+ Provides-Extra: test
68
+ Requires-Dist: contourpy[test-no-images]; extra == "test"
69
+ Requires-Dist: matplotlib; extra == "test"
70
+ Requires-Dist: Pillow; extra == "test"
71
+ Provides-Extra: test-no-images
72
+ Requires-Dist: pytest; extra == "test-no-images"
73
+ Requires-Dist: pytest-cov; extra == "test-no-images"
74
+ Requires-Dist: pytest-rerunfailures; extra == "test-no-images"
75
+ Requires-Dist: pytest-xdist; extra == "test-no-images"
76
+ Requires-Dist: wurlitzer; extra == "test-no-images"
77
+ Description-Content-Type: text/markdown
78
+
79
+ <img alt="ContourPy" src="https://raw.githubusercontent.com/contourpy/contourpy/main/docs/_static/contourpy_logo_horiz.svg" height="90">
80
+
81
+ ContourPy is a Python library for calculating contours of 2D quadrilateral grids. It is written in C++11 and wrapped using pybind11.
82
+
83
+ It contains the 2005 and 2014 algorithms used in Matplotlib as well as a newer algorithm that includes more features and is available in both serial and multithreaded versions. It provides an easy way for Python libraries to use contouring algorithms without having to include Matplotlib as a dependency.
84
+
85
+ * **Documentation**: https://contourpy.readthedocs.io
86
+ * **Source code**: https://github.com/contourpy/contourpy
87
+
88
+ | | |
89
+ | --- | --- |
90
+ | Latest release | [![PyPI version](https://img.shields.io/pypi/v/contourpy.svg?label=pypi&color=fdae61)](https://pypi.python.org/pypi/contourpy) [![conda-forge version](https://img.shields.io/conda/v/conda-forge/contourpy.svg?label=conda-forge&color=a6d96a)](https://anaconda.org/conda-forge/contourpy) |
91
+ | Downloads | [![PyPi downloads](https://img.shields.io/pypi/dm/contourpy?label=pypi&style=flat&color=fdae61)](https://pepy.tech/project/contourpy) |
92
+ | Python version | [![Platforms](https://img.shields.io/pypi/pyversions/contourpy?color=fdae61)](https://pypi.org/project/contourpy/) |
93
+ | Coverage | [![Codecov](https://img.shields.io/codecov/c/gh/contourpy/contourpy?color=fdae61&label=codecov)](https://app.codecov.io/gh/contourpy/contourpy) |
moondream/lib/python3.10/site-packages/contourpy-1.3.1.dist-info/RECORD ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ contourpy-1.3.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ contourpy-1.3.1.dist-info/LICENSE,sha256=x9ChU7_6oQQERGPrxjN5PUUXIu_TE4tf_SUntA8VBaI,1534
3
+ contourpy-1.3.1.dist-info/METADATA,sha256=LQNae4q9MVNwpfb0FlnTCTe2tkw22GiKEJjpks9n7jk,5423
4
+ contourpy-1.3.1.dist-info/RECORD,,
5
+ contourpy-1.3.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ contourpy-1.3.1.dist-info/WHEEL,sha256=sZM_NeUMz2G4fDenMf11eikcCxcLaQWiYRmjwQBavQs,137
7
+ contourpy/__init__.py,sha256=Vi2YbtUhM9VxYPY3PBvxfu0xZYr6fBysl5gQPJEo88k,11831
8
+ contourpy/__pycache__/__init__.cpython-310.pyc,,
9
+ contourpy/__pycache__/_version.cpython-310.pyc,,
10
+ contourpy/__pycache__/array.cpython-310.pyc,,
11
+ contourpy/__pycache__/chunk.cpython-310.pyc,,
12
+ contourpy/__pycache__/convert.cpython-310.pyc,,
13
+ contourpy/__pycache__/dechunk.cpython-310.pyc,,
14
+ contourpy/__pycache__/enum_util.cpython-310.pyc,,
15
+ contourpy/__pycache__/typecheck.cpython-310.pyc,,
16
+ contourpy/__pycache__/types.cpython-310.pyc,,
17
+ contourpy/_contourpy.cpython-310-x86_64-linux-gnu.so,sha256=H8GWcYv0tePHkvGU0HhmBO_d_Chj3Bs_6QYgLRKjIMI,854312
18
+ contourpy/_contourpy.pyi,sha256=fvtccxkiZwGb6qYag7Fp4E8bsFmAIjAmobf8LNxqfgc,7122
19
+ contourpy/_version.py,sha256=-ypEJktJToAL9by62JJKWEzDo_KPCQtmE5kwFgX24z4,22
20
+ contourpy/array.py,sha256=4WwLuiZe30rizn_raymmY13OzE6hlCsDOO8kuVFOP18,8979
21
+ contourpy/chunk.py,sha256=8njDQqlpuD22RjaaCyA75FXQsSQDY5hZGJSrxFpvGGU,3279
22
+ contourpy/convert.py,sha256=mhyn7prEoWCnf0igaH-VqDwlk-CegFsZ4qOy2LL-hpU,26154
23
+ contourpy/dechunk.py,sha256=EgFL6hw5H54ccuof4tJ2ehdnktT7trgZjiZqppsH8QI,7756
24
+ contourpy/enum_util.py,sha256=o8MItJRs08oqzwPP3IwC75BBAY9Qq95saIzjkXBXwqA,1519
25
+ contourpy/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
+ contourpy/typecheck.py,sha256=t1nvvCuKMYva1Zx4fc30EpdKFcO0Enz3n_UFfXBsq9o,10747
27
+ contourpy/types.py,sha256=2K4T5tJpMIjYrkkg1Lqh3C2ZKlnOhnMtYmtwz92l_y8,247
28
+ contourpy/util/__init__.py,sha256=eVhJ_crOHL7nkG4Kb0dOo7NL4WHMy_Px665aAN_3d-8,118
29
+ contourpy/util/__pycache__/__init__.cpython-310.pyc,,
30
+ contourpy/util/__pycache__/_build_config.cpython-310.pyc,,
31
+ contourpy/util/__pycache__/bokeh_renderer.cpython-310.pyc,,
32
+ contourpy/util/__pycache__/bokeh_util.cpython-310.pyc,,
33
+ contourpy/util/__pycache__/data.cpython-310.pyc,,
34
+ contourpy/util/__pycache__/mpl_renderer.cpython-310.pyc,,
35
+ contourpy/util/__pycache__/mpl_util.cpython-310.pyc,,
36
+ contourpy/util/__pycache__/renderer.cpython-310.pyc,,
37
+ contourpy/util/_build_config.py,sha256=jzJKkuBQpyjnX1U_eltbhIAN_i6fbzTAQXMAP1YTlG0,1848
38
+ contourpy/util/bokeh_renderer.py,sha256=wNGBghEVA4x11wrSerb3dBbdRxX6E8kuoqlaKPoHTQ8,13769
39
+ contourpy/util/bokeh_util.py,sha256=wc-S3ewBUYWyIkEv9jkhFySIergjLQl4Z0UEVnE0HhA,2804
40
+ contourpy/util/data.py,sha256=-7SSGMLX_gN-1H2JzpNSEB_EcEF_uMtYdOo_ePRIcg8,2586
41
+ contourpy/util/mpl_renderer.py,sha256=avUxO7_MQRDQM84X5PZ9GbNtGxG0EXPSVQYV00xQMvQ,20089
42
+ contourpy/util/mpl_util.py,sha256=0Jz5f-aA9XMWlpO2pDnHbkVgxIiw4SY_ysxf_gACWEo,3452
43
+ contourpy/util/renderer.py,sha256=8CBHzPmVsFPfqsWxqrxGBhqFpJhVeFHFeDzVXAgT8Fc,5118
moondream/lib/python3.10/site-packages/contourpy-1.3.1.dist-info/REQUESTED ADDED
File without changes
moondream/lib/python3.10/site-packages/matplotlib/_cm_multivar.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # auto-generated by https://github.com/trygvrad/multivariate_colormaps
2
+ # date: 2024-05-28
3
+
4
+ from .colors import LinearSegmentedColormap, MultivarColormap
5
+ import matplotlib as mpl
6
+ _LUTSIZE = mpl.rcParams['image.lut']
7
+
8
+ _2VarAddA0_data = [[0.000, 0.000, 0.000],
9
+ [0.020, 0.026, 0.031],
10
+ [0.049, 0.068, 0.085],
11
+ [0.075, 0.107, 0.135],
12
+ [0.097, 0.144, 0.183],
13
+ [0.116, 0.178, 0.231],
14
+ [0.133, 0.212, 0.279],
15
+ [0.148, 0.244, 0.326],
16
+ [0.161, 0.276, 0.374],
17
+ [0.173, 0.308, 0.422],
18
+ [0.182, 0.339, 0.471],
19
+ [0.190, 0.370, 0.521],
20
+ [0.197, 0.400, 0.572],
21
+ [0.201, 0.431, 0.623],
22
+ [0.204, 0.461, 0.675],
23
+ [0.204, 0.491, 0.728],
24
+ [0.202, 0.520, 0.783],
25
+ [0.197, 0.549, 0.838],
26
+ [0.187, 0.577, 0.895]]
27
+
28
+ _2VarAddA1_data = [[0.000, 0.000, 0.000],
29
+ [0.030, 0.023, 0.018],
30
+ [0.079, 0.060, 0.043],
31
+ [0.125, 0.093, 0.065],
32
+ [0.170, 0.123, 0.083],
33
+ [0.213, 0.151, 0.098],
34
+ [0.255, 0.177, 0.110],
35
+ [0.298, 0.202, 0.120],
36
+ [0.341, 0.226, 0.128],
37
+ [0.384, 0.249, 0.134],
38
+ [0.427, 0.271, 0.138],
39
+ [0.472, 0.292, 0.141],
40
+ [0.517, 0.313, 0.142],
41
+ [0.563, 0.333, 0.141],
42
+ [0.610, 0.353, 0.139],
43
+ [0.658, 0.372, 0.134],
44
+ [0.708, 0.390, 0.127],
45
+ [0.759, 0.407, 0.118],
46
+ [0.813, 0.423, 0.105]]
47
+
48
+ _2VarSubA0_data = [[1.000, 1.000, 1.000],
49
+ [0.959, 0.973, 0.986],
50
+ [0.916, 0.948, 0.974],
51
+ [0.874, 0.923, 0.965],
52
+ [0.832, 0.899, 0.956],
53
+ [0.790, 0.875, 0.948],
54
+ [0.748, 0.852, 0.940],
55
+ [0.707, 0.829, 0.934],
56
+ [0.665, 0.806, 0.927],
57
+ [0.624, 0.784, 0.921],
58
+ [0.583, 0.762, 0.916],
59
+ [0.541, 0.740, 0.910],
60
+ [0.500, 0.718, 0.905],
61
+ [0.457, 0.697, 0.901],
62
+ [0.414, 0.675, 0.896],
63
+ [0.369, 0.652, 0.892],
64
+ [0.320, 0.629, 0.888],
65
+ [0.266, 0.604, 0.884],
66
+ [0.199, 0.574, 0.881]]
67
+
68
+ _2VarSubA1_data = [[1.000, 1.000, 1.000],
69
+ [0.982, 0.967, 0.955],
70
+ [0.966, 0.935, 0.908],
71
+ [0.951, 0.902, 0.860],
72
+ [0.937, 0.870, 0.813],
73
+ [0.923, 0.838, 0.765],
74
+ [0.910, 0.807, 0.718],
75
+ [0.898, 0.776, 0.671],
76
+ [0.886, 0.745, 0.624],
77
+ [0.874, 0.714, 0.577],
78
+ [0.862, 0.683, 0.530],
79
+ [0.851, 0.653, 0.483],
80
+ [0.841, 0.622, 0.435],
81
+ [0.831, 0.592, 0.388],
82
+ [0.822, 0.561, 0.340],
83
+ [0.813, 0.530, 0.290],
84
+ [0.806, 0.498, 0.239],
85
+ [0.802, 0.464, 0.184],
86
+ [0.801, 0.426, 0.119]]
87
+
88
+ _3VarAddA0_data = [[0.000, 0.000, 0.000],
89
+ [0.018, 0.023, 0.028],
90
+ [0.040, 0.056, 0.071],
91
+ [0.059, 0.087, 0.110],
92
+ [0.074, 0.114, 0.147],
93
+ [0.086, 0.139, 0.183],
94
+ [0.095, 0.163, 0.219],
95
+ [0.101, 0.187, 0.255],
96
+ [0.105, 0.209, 0.290],
97
+ [0.107, 0.230, 0.326],
98
+ [0.105, 0.251, 0.362],
99
+ [0.101, 0.271, 0.398],
100
+ [0.091, 0.291, 0.434],
101
+ [0.075, 0.309, 0.471],
102
+ [0.046, 0.325, 0.507],
103
+ [0.021, 0.341, 0.546],
104
+ [0.021, 0.363, 0.584],
105
+ [0.022, 0.385, 0.622],
106
+ [0.023, 0.408, 0.661]]
107
+
108
+ _3VarAddA1_data = [[0.000, 0.000, 0.000],
109
+ [0.020, 0.024, 0.016],
110
+ [0.047, 0.058, 0.034],
111
+ [0.072, 0.088, 0.048],
112
+ [0.093, 0.116, 0.059],
113
+ [0.113, 0.142, 0.067],
114
+ [0.131, 0.167, 0.071],
115
+ [0.149, 0.190, 0.074],
116
+ [0.166, 0.213, 0.074],
117
+ [0.182, 0.235, 0.072],
118
+ [0.198, 0.256, 0.068],
119
+ [0.215, 0.276, 0.061],
120
+ [0.232, 0.296, 0.051],
121
+ [0.249, 0.314, 0.037],
122
+ [0.270, 0.330, 0.018],
123
+ [0.288, 0.347, 0.000],
124
+ [0.302, 0.369, 0.000],
125
+ [0.315, 0.391, 0.000],
126
+ [0.328, 0.414, 0.000]]
127
+
128
+ _3VarAddA2_data = [[0.000, 0.000, 0.000],
129
+ [0.029, 0.020, 0.023],
130
+ [0.072, 0.045, 0.055],
131
+ [0.111, 0.067, 0.084],
132
+ [0.148, 0.085, 0.109],
133
+ [0.184, 0.101, 0.133],
134
+ [0.219, 0.115, 0.155],
135
+ [0.254, 0.127, 0.176],
136
+ [0.289, 0.138, 0.195],
137
+ [0.323, 0.147, 0.214],
138
+ [0.358, 0.155, 0.232],
139
+ [0.393, 0.161, 0.250],
140
+ [0.429, 0.166, 0.267],
141
+ [0.467, 0.169, 0.283],
142
+ [0.507, 0.168, 0.298],
143
+ [0.546, 0.168, 0.313],
144
+ [0.580, 0.172, 0.328],
145
+ [0.615, 0.175, 0.341],
146
+ [0.649, 0.178, 0.355]]
147
+
148
+ cmaps = {
149
+ name: LinearSegmentedColormap.from_list(name, data, _LUTSIZE) for name, data in [
150
+ ('2VarAddA0', _2VarAddA0_data),
151
+ ('2VarAddA1', _2VarAddA1_data),
152
+ ('2VarSubA0', _2VarSubA0_data),
153
+ ('2VarSubA1', _2VarSubA1_data),
154
+ ('3VarAddA0', _3VarAddA0_data),
155
+ ('3VarAddA1', _3VarAddA1_data),
156
+ ('3VarAddA2', _3VarAddA2_data),
157
+ ]}
158
+
159
+ cmap_families = {
160
+ '2VarAddA': MultivarColormap([cmaps[f'2VarAddA{i}'] for i in range(2)],
161
+ 'sRGB_add', name='2VarAddA'),
162
+ '2VarSubA': MultivarColormap([cmaps[f'2VarSubA{i}'] for i in range(2)],
163
+ 'sRGB_sub', name='2VarSubA'),
164
+ '3VarAddA': MultivarColormap([cmaps[f'3VarAddA{i}'] for i in range(3)],
165
+ 'sRGB_add', name='3VarAddA'),
166
+ }
moondream/lib/python3.10/site-packages/matplotlib/_internal_utils.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Internal debugging utilities, that are not expected to be used in the rest of
3
+ the codebase.
4
+
5
+ WARNING: Code in this module may change without prior notice!
6
+ """
7
+
8
+ from io import StringIO
9
+ from pathlib import Path
10
+ import subprocess
11
+
12
+ from matplotlib.transforms import TransformNode
13
+
14
+
15
+ def graphviz_dump_transform(transform, dest, *, highlight=None):
16
+ """
17
+ Generate a graphical representation of the transform tree for *transform*
18
+ using the :program:`dot` program (which this function depends on). The
19
+ output format (png, dot, etc.) is determined from the suffix of *dest*.
20
+
21
+ Parameters
22
+ ----------
23
+ transform : `~matplotlib.transform.Transform`
24
+ The represented transform.
25
+ dest : str
26
+ Output filename. The extension must be one of the formats supported
27
+ by :program:`dot`, e.g. png, svg, dot, ...
28
+ (see https://www.graphviz.org/doc/info/output.html).
29
+ highlight : list of `~matplotlib.transform.Transform` or None
30
+ The transforms in the tree to be drawn in bold.
31
+ If *None*, *transform* is highlighted.
32
+ """
33
+
34
+ if highlight is None:
35
+ highlight = [transform]
36
+ seen = set()
37
+
38
+ def recurse(root, buf):
39
+ if id(root) in seen:
40
+ return
41
+ seen.add(id(root))
42
+ props = {}
43
+ label = type(root).__name__
44
+ if root._invalid:
45
+ label = f'[{label}]'
46
+ if root in highlight:
47
+ props['style'] = 'bold'
48
+ props['shape'] = 'box'
49
+ props['label'] = '"%s"' % label
50
+ props = ' '.join(map('{0[0]}={0[1]}'.format, props.items()))
51
+ buf.write(f'{id(root)} [{props}];\n')
52
+ for key, val in vars(root).items():
53
+ if isinstance(val, TransformNode) and id(root) in val._parents:
54
+ buf.write(f'"{id(root)}" -> "{id(val)}" '
55
+ f'[label="{key}", fontsize=10];\n')
56
+ recurse(val, buf)
57
+
58
+ buf = StringIO()
59
+ buf.write('digraph G {\n')
60
+ recurse(transform, buf)
61
+ buf.write('}\n')
62
+ subprocess.run(
63
+ ['dot', '-T', Path(dest).suffix[1:], '-o', dest],
64
+ input=buf.getvalue().encode('utf-8'), check=True)
moondream/lib/python3.10/site-packages/matplotlib/_layoutgrid.py ADDED
@@ -0,0 +1,547 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A layoutgrid is a nrows by ncols set of boxes, meant to be used by
3
+ `._constrained_layout`, each box is analogous to a subplotspec element of
4
+ a gridspec.
5
+
6
+ Each box is defined by left[ncols], right[ncols], bottom[nrows] and top[nrows],
7
+ and by two editable margins for each side. The main margin gets its value
8
+ set by the size of ticklabels, titles, etc on each Axes that is in the figure.
9
+ The outer margin is the padding around the Axes, and space for any
10
+ colorbars.
11
+
12
+ The "inner" widths and heights of these boxes are then constrained to be the
13
+ same (relative the values of `width_ratios[ncols]` and `height_ratios[nrows]`).
14
+
15
+ The layoutgrid is then constrained to be contained within a parent layoutgrid,
16
+ its column(s) and row(s) specified when it is created.
17
+ """
18
+
19
+ import itertools
20
+ import kiwisolver as kiwi
21
+ import logging
22
+ import numpy as np
23
+
24
+ import matplotlib as mpl
25
+ import matplotlib.patches as mpatches
26
+ from matplotlib.transforms import Bbox
27
+
28
+ _log = logging.getLogger(__name__)
29
+
30
+
31
+ class LayoutGrid:
32
+ """
33
+ Analogous to a gridspec, and contained in another LayoutGrid.
34
+ """
35
+
36
+ def __init__(self, parent=None, parent_pos=(0, 0),
37
+ parent_inner=False, name='', ncols=1, nrows=1,
38
+ h_pad=None, w_pad=None, width_ratios=None,
39
+ height_ratios=None):
40
+ Variable = kiwi.Variable
41
+ self.parent_pos = parent_pos
42
+ self.parent_inner = parent_inner
43
+ self.name = name + seq_id()
44
+ if isinstance(parent, LayoutGrid):
45
+ self.name = f'{parent.name}.{self.name}'
46
+ self.nrows = nrows
47
+ self.ncols = ncols
48
+ self.height_ratios = np.atleast_1d(height_ratios)
49
+ if height_ratios is None:
50
+ self.height_ratios = np.ones(nrows)
51
+ self.width_ratios = np.atleast_1d(width_ratios)
52
+ if width_ratios is None:
53
+ self.width_ratios = np.ones(ncols)
54
+
55
+ sn = self.name + '_'
56
+ if not isinstance(parent, LayoutGrid):
57
+ # parent can be a rect if not a LayoutGrid
58
+ # allows specifying a rectangle to contain the layout.
59
+ self.solver = kiwi.Solver()
60
+ else:
61
+ parent.add_child(self, *parent_pos)
62
+ self.solver = parent.solver
63
+ # keep track of artist associated w/ this layout. Can be none
64
+ self.artists = np.empty((nrows, ncols), dtype=object)
65
+ self.children = np.empty((nrows, ncols), dtype=object)
66
+
67
+ self.margins = {}
68
+ self.margin_vals = {}
69
+ # all the boxes in each column share the same left/right margins:
70
+ for todo in ['left', 'right', 'leftcb', 'rightcb']:
71
+ # track the value so we can change only if a margin is larger
72
+ # than the current value
73
+ self.margin_vals[todo] = np.zeros(ncols)
74
+
75
+ sol = self.solver
76
+
77
+ self.lefts = [Variable(f'{sn}lefts[{i}]') for i in range(ncols)]
78
+ self.rights = [Variable(f'{sn}rights[{i}]') for i in range(ncols)]
79
+ for todo in ['left', 'right', 'leftcb', 'rightcb']:
80
+ self.margins[todo] = [Variable(f'{sn}margins[{todo}][{i}]')
81
+ for i in range(ncols)]
82
+ for i in range(ncols):
83
+ sol.addEditVariable(self.margins[todo][i], 'strong')
84
+
85
+ for todo in ['bottom', 'top', 'bottomcb', 'topcb']:
86
+ self.margins[todo] = np.empty((nrows), dtype=object)
87
+ self.margin_vals[todo] = np.zeros(nrows)
88
+
89
+ self.bottoms = [Variable(f'{sn}bottoms[{i}]') for i in range(nrows)]
90
+ self.tops = [Variable(f'{sn}tops[{i}]') for i in range(nrows)]
91
+ for todo in ['bottom', 'top', 'bottomcb', 'topcb']:
92
+ self.margins[todo] = [Variable(f'{sn}margins[{todo}][{i}]')
93
+ for i in range(nrows)]
94
+ for i in range(nrows):
95
+ sol.addEditVariable(self.margins[todo][i], 'strong')
96
+
97
+ # set these margins to zero by default. They will be edited as
98
+ # children are filled.
99
+ self.reset_margins()
100
+ self.add_constraints(parent)
101
+
102
+ self.h_pad = h_pad
103
+ self.w_pad = w_pad
104
+
105
+ def __repr__(self):
106
+ str = f'LayoutBox: {self.name:25s} {self.nrows}x{self.ncols},\n'
107
+ for i in range(self.nrows):
108
+ for j in range(self.ncols):
109
+ str += f'{i}, {j}: '\
110
+ f'L{self.lefts[j].value():1.3f}, ' \
111
+ f'B{self.bottoms[i].value():1.3f}, ' \
112
+ f'R{self.rights[j].value():1.3f}, ' \
113
+ f'T{self.tops[i].value():1.3f}, ' \
114
+ f'ML{self.margins["left"][j].value():1.3f}, ' \
115
+ f'MR{self.margins["right"][j].value():1.3f}, ' \
116
+ f'MB{self.margins["bottom"][i].value():1.3f}, ' \
117
+ f'MT{self.margins["top"][i].value():1.3f}, \n'
118
+ return str
119
+
120
+ def reset_margins(self):
121
+ """
122
+ Reset all the margins to zero. Must do this after changing
123
+ figure size, for instance, because the relative size of the
124
+ axes labels etc changes.
125
+ """
126
+ for todo in ['left', 'right', 'bottom', 'top',
127
+ 'leftcb', 'rightcb', 'bottomcb', 'topcb']:
128
+ self.edit_margins(todo, 0.0)
129
+
130
+ def add_constraints(self, parent):
131
+ # define self-consistent constraints
132
+ self.hard_constraints()
133
+ # define relationship with parent layoutgrid:
134
+ self.parent_constraints(parent)
135
+ # define relative widths of the grid cells to each other
136
+ # and stack horizontally and vertically.
137
+ self.grid_constraints()
138
+
139
+ def hard_constraints(self):
140
+ """
141
+ These are the redundant constraints, plus ones that make the
142
+ rest of the code easier.
143
+ """
144
+ for i in range(self.ncols):
145
+ hc = [self.rights[i] >= self.lefts[i],
146
+ (self.rights[i] - self.margins['right'][i] -
147
+ self.margins['rightcb'][i] >=
148
+ self.lefts[i] - self.margins['left'][i] -
149
+ self.margins['leftcb'][i])
150
+ ]
151
+ for c in hc:
152
+ self.solver.addConstraint(c | 'required')
153
+
154
+ for i in range(self.nrows):
155
+ hc = [self.tops[i] >= self.bottoms[i],
156
+ (self.tops[i] - self.margins['top'][i] -
157
+ self.margins['topcb'][i] >=
158
+ self.bottoms[i] - self.margins['bottom'][i] -
159
+ self.margins['bottomcb'][i])
160
+ ]
161
+ for c in hc:
162
+ self.solver.addConstraint(c | 'required')
163
+
164
+ def add_child(self, child, i=0, j=0):
165
+ # np.ix_ returns the cross product of i and j indices
166
+ self.children[np.ix_(np.atleast_1d(i), np.atleast_1d(j))] = child
167
+
168
+ def parent_constraints(self, parent):
169
+ # constraints that are due to the parent...
170
+ # i.e. the first column's left is equal to the
171
+ # parent's left, the last column right equal to the
172
+ # parent's right...
173
+ if not isinstance(parent, LayoutGrid):
174
+ # specify a rectangle in figure coordinates
175
+ hc = [self.lefts[0] == parent[0],
176
+ self.rights[-1] == parent[0] + parent[2],
177
+ # top and bottom reversed order...
178
+ self.tops[0] == parent[1] + parent[3],
179
+ self.bottoms[-1] == parent[1]]
180
+ else:
181
+ rows, cols = self.parent_pos
182
+ rows = np.atleast_1d(rows)
183
+ cols = np.atleast_1d(cols)
184
+
185
+ left = parent.lefts[cols[0]]
186
+ right = parent.rights[cols[-1]]
187
+ top = parent.tops[rows[0]]
188
+ bottom = parent.bottoms[rows[-1]]
189
+ if self.parent_inner:
190
+ # the layout grid is contained inside the inner
191
+ # grid of the parent.
192
+ left += parent.margins['left'][cols[0]]
193
+ left += parent.margins['leftcb'][cols[0]]
194
+ right -= parent.margins['right'][cols[-1]]
195
+ right -= parent.margins['rightcb'][cols[-1]]
196
+ top -= parent.margins['top'][rows[0]]
197
+ top -= parent.margins['topcb'][rows[0]]
198
+ bottom += parent.margins['bottom'][rows[-1]]
199
+ bottom += parent.margins['bottomcb'][rows[-1]]
200
+ hc = [self.lefts[0] == left,
201
+ self.rights[-1] == right,
202
+ # from top to bottom
203
+ self.tops[0] == top,
204
+ self.bottoms[-1] == bottom]
205
+ for c in hc:
206
+ self.solver.addConstraint(c | 'required')
207
+
208
+ def grid_constraints(self):
209
+ # constrain the ratio of the inner part of the grids
210
+ # to be the same (relative to width_ratios)
211
+
212
+ # constrain widths:
213
+ w = (self.rights[0] - self.margins['right'][0] -
214
+ self.margins['rightcb'][0])
215
+ w = (w - self.lefts[0] - self.margins['left'][0] -
216
+ self.margins['leftcb'][0])
217
+ w0 = w / self.width_ratios[0]
218
+ # from left to right
219
+ for i in range(1, self.ncols):
220
+ w = (self.rights[i] - self.margins['right'][i] -
221
+ self.margins['rightcb'][i])
222
+ w = (w - self.lefts[i] - self.margins['left'][i] -
223
+ self.margins['leftcb'][i])
224
+ c = (w == w0 * self.width_ratios[i])
225
+ self.solver.addConstraint(c | 'strong')
226
+ # constrain the grid cells to be directly next to each other.
227
+ c = (self.rights[i - 1] == self.lefts[i])
228
+ self.solver.addConstraint(c | 'strong')
229
+
230
+ # constrain heights:
231
+ h = self.tops[0] - self.margins['top'][0] - self.margins['topcb'][0]
232
+ h = (h - self.bottoms[0] - self.margins['bottom'][0] -
233
+ self.margins['bottomcb'][0])
234
+ h0 = h / self.height_ratios[0]
235
+ # from top to bottom:
236
+ for i in range(1, self.nrows):
237
+ h = (self.tops[i] - self.margins['top'][i] -
238
+ self.margins['topcb'][i])
239
+ h = (h - self.bottoms[i] - self.margins['bottom'][i] -
240
+ self.margins['bottomcb'][i])
241
+ c = (h == h0 * self.height_ratios[i])
242
+ self.solver.addConstraint(c | 'strong')
243
+ # constrain the grid cells to be directly above each other.
244
+ c = (self.bottoms[i - 1] == self.tops[i])
245
+ self.solver.addConstraint(c | 'strong')
246
+
247
+ # Margin editing: The margins are variable and meant to
248
+ # contain things of a fixed size like axes labels, tick labels, titles
249
+ # etc
250
+ def edit_margin(self, todo, size, cell):
251
+ """
252
+ Change the size of the margin for one cell.
253
+
254
+ Parameters
255
+ ----------
256
+ todo : string (one of 'left', 'right', 'bottom', 'top')
257
+ margin to alter.
258
+
259
+ size : float
260
+ Size of the margin. If it is larger than the existing minimum it
261
+ updates the margin size. Fraction of figure size.
262
+
263
+ cell : int
264
+ Cell column or row to edit.
265
+ """
266
+ self.solver.suggestValue(self.margins[todo][cell], size)
267
+ self.margin_vals[todo][cell] = size
268
+
269
+ def edit_margin_min(self, todo, size, cell=0):
270
+ """
271
+ Change the minimum size of the margin for one cell.
272
+
273
+ Parameters
274
+ ----------
275
+ todo : string (one of 'left', 'right', 'bottom', 'top')
276
+ margin to alter.
277
+
278
+ size : float
279
+ Minimum size of the margin . If it is larger than the
280
+ existing minimum it updates the margin size. Fraction of
281
+ figure size.
282
+
283
+ cell : int
284
+ Cell column or row to edit.
285
+ """
286
+
287
+ if size > self.margin_vals[todo][cell]:
288
+ self.edit_margin(todo, size, cell)
289
+
290
+ def edit_margins(self, todo, size):
291
+ """
292
+ Change the size of all the margin of all the cells in the layout grid.
293
+
294
+ Parameters
295
+ ----------
296
+ todo : string (one of 'left', 'right', 'bottom', 'top')
297
+ margin to alter.
298
+
299
+ size : float
300
+ Size to set the margins. Fraction of figure size.
301
+ """
302
+
303
+ for i in range(len(self.margin_vals[todo])):
304
+ self.edit_margin(todo, size, i)
305
+
306
+ def edit_all_margins_min(self, todo, size):
307
+ """
308
+ Change the minimum size of all the margin of all
309
+ the cells in the layout grid.
310
+
311
+ Parameters
312
+ ----------
313
+ todo : {'left', 'right', 'bottom', 'top'}
314
+ The margin to alter.
315
+
316
+ size : float
317
+ Minimum size of the margin. If it is larger than the
318
+ existing minimum it updates the margin size. Fraction of
319
+ figure size.
320
+ """
321
+
322
+ for i in range(len(self.margin_vals[todo])):
323
+ self.edit_margin_min(todo, size, i)
324
+
325
+ def edit_outer_margin_mins(self, margin, ss):
326
+ """
327
+ Edit all four margin minimums in one statement.
328
+
329
+ Parameters
330
+ ----------
331
+ margin : dict
332
+ size of margins in a dict with keys 'left', 'right', 'bottom',
333
+ 'top'
334
+
335
+ ss : SubplotSpec
336
+ defines the subplotspec these margins should be applied to
337
+ """
338
+
339
+ self.edit_margin_min('left', margin['left'], ss.colspan.start)
340
+ self.edit_margin_min('leftcb', margin['leftcb'], ss.colspan.start)
341
+ self.edit_margin_min('right', margin['right'], ss.colspan.stop - 1)
342
+ self.edit_margin_min('rightcb', margin['rightcb'], ss.colspan.stop - 1)
343
+ # rows are from the top down:
344
+ self.edit_margin_min('top', margin['top'], ss.rowspan.start)
345
+ self.edit_margin_min('topcb', margin['topcb'], ss.rowspan.start)
346
+ self.edit_margin_min('bottom', margin['bottom'], ss.rowspan.stop - 1)
347
+ self.edit_margin_min('bottomcb', margin['bottomcb'],
348
+ ss.rowspan.stop - 1)
349
+
350
+ def get_margins(self, todo, col):
351
+ """Return the margin at this position"""
352
+ return self.margin_vals[todo][col]
353
+
354
+ def get_outer_bbox(self, rows=0, cols=0):
355
+ """
356
+ Return the outer bounding box of the subplot specs
357
+ given by rows and cols. rows and cols can be spans.
358
+ """
359
+ rows = np.atleast_1d(rows)
360
+ cols = np.atleast_1d(cols)
361
+
362
+ bbox = Bbox.from_extents(
363
+ self.lefts[cols[0]].value(),
364
+ self.bottoms[rows[-1]].value(),
365
+ self.rights[cols[-1]].value(),
366
+ self.tops[rows[0]].value())
367
+ return bbox
368
+
369
+ def get_inner_bbox(self, rows=0, cols=0):
370
+ """
371
+ Return the inner bounding box of the subplot specs
372
+ given by rows and cols. rows and cols can be spans.
373
+ """
374
+ rows = np.atleast_1d(rows)
375
+ cols = np.atleast_1d(cols)
376
+
377
+ bbox = Bbox.from_extents(
378
+ (self.lefts[cols[0]].value() +
379
+ self.margins['left'][cols[0]].value() +
380
+ self.margins['leftcb'][cols[0]].value()),
381
+ (self.bottoms[rows[-1]].value() +
382
+ self.margins['bottom'][rows[-1]].value() +
383
+ self.margins['bottomcb'][rows[-1]].value()),
384
+ (self.rights[cols[-1]].value() -
385
+ self.margins['right'][cols[-1]].value() -
386
+ self.margins['rightcb'][cols[-1]].value()),
387
+ (self.tops[rows[0]].value() -
388
+ self.margins['top'][rows[0]].value() -
389
+ self.margins['topcb'][rows[0]].value())
390
+ )
391
+ return bbox
392
+
393
+ def get_bbox_for_cb(self, rows=0, cols=0):
394
+ """
395
+ Return the bounding box that includes the
396
+ decorations but, *not* the colorbar...
397
+ """
398
+ rows = np.atleast_1d(rows)
399
+ cols = np.atleast_1d(cols)
400
+
401
+ bbox = Bbox.from_extents(
402
+ (self.lefts[cols[0]].value() +
403
+ self.margins['leftcb'][cols[0]].value()),
404
+ (self.bottoms[rows[-1]].value() +
405
+ self.margins['bottomcb'][rows[-1]].value()),
406
+ (self.rights[cols[-1]].value() -
407
+ self.margins['rightcb'][cols[-1]].value()),
408
+ (self.tops[rows[0]].value() -
409
+ self.margins['topcb'][rows[0]].value())
410
+ )
411
+ return bbox
412
+
413
+ def get_left_margin_bbox(self, rows=0, cols=0):
414
+ """
415
+ Return the left margin bounding box of the subplot specs
416
+ given by rows and cols. rows and cols can be spans.
417
+ """
418
+ rows = np.atleast_1d(rows)
419
+ cols = np.atleast_1d(cols)
420
+
421
+ bbox = Bbox.from_extents(
422
+ (self.lefts[cols[0]].value() +
423
+ self.margins['leftcb'][cols[0]].value()),
424
+ (self.bottoms[rows[-1]].value()),
425
+ (self.lefts[cols[0]].value() +
426
+ self.margins['leftcb'][cols[0]].value() +
427
+ self.margins['left'][cols[0]].value()),
428
+ (self.tops[rows[0]].value()))
429
+ return bbox
430
+
431
+ def get_bottom_margin_bbox(self, rows=0, cols=0):
432
+ """
433
+ Return the left margin bounding box of the subplot specs
434
+ given by rows and cols. rows and cols can be spans.
435
+ """
436
+ rows = np.atleast_1d(rows)
437
+ cols = np.atleast_1d(cols)
438
+
439
+ bbox = Bbox.from_extents(
440
+ (self.lefts[cols[0]].value()),
441
+ (self.bottoms[rows[-1]].value() +
442
+ self.margins['bottomcb'][rows[-1]].value()),
443
+ (self.rights[cols[-1]].value()),
444
+ (self.bottoms[rows[-1]].value() +
445
+ self.margins['bottom'][rows[-1]].value() +
446
+ self.margins['bottomcb'][rows[-1]].value()
447
+ ))
448
+ return bbox
449
+
450
+ def get_right_margin_bbox(self, rows=0, cols=0):
451
+ """
452
+ Return the left margin bounding box of the subplot specs
453
+ given by rows and cols. rows and cols can be spans.
454
+ """
455
+ rows = np.atleast_1d(rows)
456
+ cols = np.atleast_1d(cols)
457
+
458
+ bbox = Bbox.from_extents(
459
+ (self.rights[cols[-1]].value() -
460
+ self.margins['right'][cols[-1]].value() -
461
+ self.margins['rightcb'][cols[-1]].value()),
462
+ (self.bottoms[rows[-1]].value()),
463
+ (self.rights[cols[-1]].value() -
464
+ self.margins['rightcb'][cols[-1]].value()),
465
+ (self.tops[rows[0]].value()))
466
+ return bbox
467
+
468
+ def get_top_margin_bbox(self, rows=0, cols=0):
469
+ """
470
+ Return the left margin bounding box of the subplot specs
471
+ given by rows and cols. rows and cols can be spans.
472
+ """
473
+ rows = np.atleast_1d(rows)
474
+ cols = np.atleast_1d(cols)
475
+
476
+ bbox = Bbox.from_extents(
477
+ (self.lefts[cols[0]].value()),
478
+ (self.tops[rows[0]].value() -
479
+ self.margins['topcb'][rows[0]].value()),
480
+ (self.rights[cols[-1]].value()),
481
+ (self.tops[rows[0]].value() -
482
+ self.margins['topcb'][rows[0]].value() -
483
+ self.margins['top'][rows[0]].value()))
484
+ return bbox
485
+
486
+ def update_variables(self):
487
+ """
488
+ Update the variables for the solver attached to this layoutgrid.
489
+ """
490
+ self.solver.updateVariables()
491
+
492
+ _layoutboxobjnum = itertools.count()
493
+
494
+
495
+ def seq_id():
496
+ """Generate a short sequential id for layoutbox objects."""
497
+ return '%06d' % next(_layoutboxobjnum)
498
+
499
+
500
+ def plot_children(fig, lg=None, level=0):
501
+ """Simple plotting to show where boxes are."""
502
+ if lg is None:
503
+ _layoutgrids = fig.get_layout_engine().execute(fig)
504
+ lg = _layoutgrids[fig]
505
+ colors = mpl.rcParams["axes.prop_cycle"].by_key()["color"]
506
+ col = colors[level]
507
+ for i in range(lg.nrows):
508
+ for j in range(lg.ncols):
509
+ bb = lg.get_outer_bbox(rows=i, cols=j)
510
+ fig.add_artist(
511
+ mpatches.Rectangle(bb.p0, bb.width, bb.height, linewidth=1,
512
+ edgecolor='0.7', facecolor='0.7',
513
+ alpha=0.2, transform=fig.transFigure,
514
+ zorder=-3))
515
+ bbi = lg.get_inner_bbox(rows=i, cols=j)
516
+ fig.add_artist(
517
+ mpatches.Rectangle(bbi.p0, bbi.width, bbi.height, linewidth=2,
518
+ edgecolor=col, facecolor='none',
519
+ transform=fig.transFigure, zorder=-2))
520
+
521
+ bbi = lg.get_left_margin_bbox(rows=i, cols=j)
522
+ fig.add_artist(
523
+ mpatches.Rectangle(bbi.p0, bbi.width, bbi.height, linewidth=0,
524
+ edgecolor='none', alpha=0.2,
525
+ facecolor=[0.5, 0.7, 0.5],
526
+ transform=fig.transFigure, zorder=-2))
527
+ bbi = lg.get_right_margin_bbox(rows=i, cols=j)
528
+ fig.add_artist(
529
+ mpatches.Rectangle(bbi.p0, bbi.width, bbi.height, linewidth=0,
530
+ edgecolor='none', alpha=0.2,
531
+ facecolor=[0.7, 0.5, 0.5],
532
+ transform=fig.transFigure, zorder=-2))
533
+ bbi = lg.get_bottom_margin_bbox(rows=i, cols=j)
534
+ fig.add_artist(
535
+ mpatches.Rectangle(bbi.p0, bbi.width, bbi.height, linewidth=0,
536
+ edgecolor='none', alpha=0.2,
537
+ facecolor=[0.5, 0.5, 0.7],
538
+ transform=fig.transFigure, zorder=-2))
539
+ bbi = lg.get_top_margin_bbox(rows=i, cols=j)
540
+ fig.add_artist(
541
+ mpatches.Rectangle(bbi.p0, bbi.width, bbi.height, linewidth=0,
542
+ edgecolor='none', alpha=0.2,
543
+ facecolor=[0.7, 0.2, 0.7],
544
+ transform=fig.transFigure, zorder=-2))
545
+ for ch in lg.children.flat:
546
+ if ch is not None:
547
+ plot_children(fig, ch, level=level+1)
moondream/lib/python3.10/site-packages/matplotlib/_text_helpers.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Low-level text helper utilities.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ import dataclasses
8
+
9
+ from . import _api
10
+ from .ft2font import FT2Font, Kerning, LoadFlags
11
+
12
+
13
+ @dataclasses.dataclass(frozen=True)
14
+ class LayoutItem:
15
+ ft_object: FT2Font
16
+ char: str
17
+ glyph_idx: int
18
+ x: float
19
+ prev_kern: float
20
+
21
+
22
+ def warn_on_missing_glyph(codepoint, fontnames):
23
+ _api.warn_external(
24
+ f"Glyph {codepoint} "
25
+ f"({chr(codepoint).encode('ascii', 'namereplace').decode('ascii')}) "
26
+ f"missing from font(s) {fontnames}.")
27
+
28
+ block = ("Hebrew" if 0x0590 <= codepoint <= 0x05ff else
29
+ "Arabic" if 0x0600 <= codepoint <= 0x06ff else
30
+ "Devanagari" if 0x0900 <= codepoint <= 0x097f else
31
+ "Bengali" if 0x0980 <= codepoint <= 0x09ff else
32
+ "Gurmukhi" if 0x0a00 <= codepoint <= 0x0a7f else
33
+ "Gujarati" if 0x0a80 <= codepoint <= 0x0aff else
34
+ "Oriya" if 0x0b00 <= codepoint <= 0x0b7f else
35
+ "Tamil" if 0x0b80 <= codepoint <= 0x0bff else
36
+ "Telugu" if 0x0c00 <= codepoint <= 0x0c7f else
37
+ "Kannada" if 0x0c80 <= codepoint <= 0x0cff else
38
+ "Malayalam" if 0x0d00 <= codepoint <= 0x0d7f else
39
+ "Sinhala" if 0x0d80 <= codepoint <= 0x0dff else
40
+ None)
41
+ if block:
42
+ _api.warn_external(
43
+ f"Matplotlib currently does not support {block} natively.")
44
+
45
+
46
+ def layout(string, font, *, kern_mode=Kerning.DEFAULT):
47
+ """
48
+ Render *string* with *font*.
49
+
50
+ For each character in *string*, yield a LayoutItem instance. When such an instance
51
+ is yielded, the font's glyph is set to the corresponding character.
52
+
53
+ Parameters
54
+ ----------
55
+ string : str
56
+ The string to be rendered.
57
+ font : FT2Font
58
+ The font.
59
+ kern_mode : Kerning
60
+ A FreeType kerning mode.
61
+
62
+ Yields
63
+ ------
64
+ LayoutItem
65
+ """
66
+ x = 0
67
+ prev_glyph_idx = None
68
+ char_to_font = font._get_fontmap(string)
69
+ base_font = font
70
+ for char in string:
71
+ # This has done the fallback logic
72
+ font = char_to_font.get(char, base_font)
73
+ glyph_idx = font.get_char_index(ord(char))
74
+ kern = (
75
+ base_font.get_kerning(prev_glyph_idx, glyph_idx, kern_mode) / 64
76
+ if prev_glyph_idx is not None else 0.
77
+ )
78
+ x += kern
79
+ glyph = font.load_glyph(glyph_idx, flags=LoadFlags.NO_HINTING)
80
+ yield LayoutItem(font, char, glyph_idx, x, kern)
81
+ x += glyph.linearHoriAdvance / 65536
82
+ prev_glyph_idx = glyph_idx
moondream/lib/python3.10/site-packages/matplotlib/artist.pyi ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .axes._base import _AxesBase
2
+ from .backend_bases import RendererBase, MouseEvent
3
+ from .figure import Figure, SubFigure
4
+ from .path import Path
5
+ from .patches import Patch
6
+ from .patheffects import AbstractPathEffect
7
+ from .transforms import (
8
+ BboxBase,
9
+ Bbox,
10
+ Transform,
11
+ TransformedPatchPath,
12
+ TransformedPath,
13
+ )
14
+
15
+ import numpy as np
16
+
17
+ from collections.abc import Callable, Iterable
18
+ from typing import Any, Literal, NamedTuple, TextIO, overload, TypeVar
19
+ from numpy.typing import ArrayLike
20
+
21
+ _T_Artist = TypeVar("_T_Artist", bound=Artist)
22
+
23
+ def allow_rasterization(draw): ...
24
+
25
+ class _XYPair(NamedTuple):
26
+ x: ArrayLike
27
+ y: ArrayLike
28
+
29
+ class _Unset: ...
30
+
31
+ class Artist:
32
+ zorder: float
33
+ stale_callback: Callable[[Artist, bool], None] | None
34
+ @property
35
+ def figure(self) -> Figure | SubFigure: ...
36
+ clipbox: BboxBase | None
37
+ def __init__(self) -> None: ...
38
+ def remove(self) -> None: ...
39
+ def have_units(self) -> bool: ...
40
+ # TODO units
41
+ def convert_xunits(self, x): ...
42
+ def convert_yunits(self, y): ...
43
+ @property
44
+ def axes(self) -> _AxesBase | None: ...
45
+ @axes.setter
46
+ def axes(self, new_axes: _AxesBase | None) -> None: ...
47
+ @property
48
+ def stale(self) -> bool: ...
49
+ @stale.setter
50
+ def stale(self, val: bool) -> None: ...
51
+ def get_window_extent(self, renderer: RendererBase | None = ...) -> Bbox: ...
52
+ def get_tightbbox(self, renderer: RendererBase | None = ...) -> Bbox | None: ...
53
+ def add_callback(self, func: Callable[[Artist], Any]) -> int: ...
54
+ def remove_callback(self, oid: int) -> None: ...
55
+ def pchanged(self) -> None: ...
56
+ def is_transform_set(self) -> bool: ...
57
+ def set_transform(self, t: Transform | None) -> None: ...
58
+ def get_transform(self) -> Transform: ...
59
+ def get_children(self) -> list[Artist]: ...
60
+ # TODO can these dicts be type narrowed? e.g. str keys
61
+ def contains(self, mouseevent: MouseEvent) -> tuple[bool, dict[Any, Any]]: ...
62
+ def pickable(self) -> bool: ...
63
+ def pick(self, mouseevent: MouseEvent) -> None: ...
64
+ def set_picker(
65
+ self,
66
+ picker: None
67
+ | bool
68
+ | float
69
+ | Callable[[Artist, MouseEvent], tuple[bool, dict[Any, Any]]],
70
+ ) -> None: ...
71
+ def get_picker(
72
+ self,
73
+ ) -> None | bool | float | Callable[
74
+ [Artist, MouseEvent], tuple[bool, dict[Any, Any]]
75
+ ]: ...
76
+ def get_url(self) -> str | None: ...
77
+ def set_url(self, url: str | None) -> None: ...
78
+ def get_gid(self) -> str | None: ...
79
+ def set_gid(self, gid: str | None) -> None: ...
80
+ def get_snap(self) -> bool | None: ...
81
+ def set_snap(self, snap: bool | None) -> None: ...
82
+ def get_sketch_params(self) -> tuple[float, float, float] | None: ...
83
+ def set_sketch_params(
84
+ self,
85
+ scale: float | None = ...,
86
+ length: float | None = ...,
87
+ randomness: float | None = ...,
88
+ ) -> None: ...
89
+ def set_path_effects(self, path_effects: list[AbstractPathEffect]) -> None: ...
90
+ def get_path_effects(self) -> list[AbstractPathEffect]: ...
91
+ @overload
92
+ def get_figure(self, root: Literal[True]) -> Figure | None: ...
93
+ @overload
94
+ def get_figure(self, root: Literal[False]) -> Figure | SubFigure | None: ...
95
+ @overload
96
+ def get_figure(self, root: bool = ...) -> Figure | SubFigure | None: ...
97
+ def set_figure(self, fig: Figure | SubFigure) -> None: ...
98
+ def set_clip_box(self, clipbox: BboxBase | None) -> None: ...
99
+ def set_clip_path(
100
+ self,
101
+ path: Patch | Path | TransformedPath | TransformedPatchPath | None,
102
+ transform: Transform | None = ...,
103
+ ) -> None: ...
104
+ def get_alpha(self) -> float | None: ...
105
+ def get_visible(self) -> bool: ...
106
+ def get_animated(self) -> bool: ...
107
+ def get_in_layout(self) -> bool: ...
108
+ def get_clip_on(self) -> bool: ...
109
+ def get_clip_box(self) -> Bbox | None: ...
110
+ def get_clip_path(
111
+ self,
112
+ ) -> Patch | Path | TransformedPath | TransformedPatchPath | None: ...
113
+ def get_transformed_clip_path_and_affine(
114
+ self,
115
+ ) -> tuple[None, None] | tuple[Path, Transform]: ...
116
+ def set_clip_on(self, b: bool) -> None: ...
117
+ def get_rasterized(self) -> bool: ...
118
+ def set_rasterized(self, rasterized: bool) -> None: ...
119
+ def get_agg_filter(self) -> Callable[[ArrayLike, float], tuple[np.ndarray, float, float]] | None: ...
120
+ def set_agg_filter(
121
+ self, filter_func: Callable[[ArrayLike, float], tuple[np.ndarray, float, float]] | None
122
+ ) -> None: ...
123
+ def draw(self, renderer: RendererBase) -> None: ...
124
+ def set_alpha(self, alpha: float | None) -> None: ...
125
+ def set_visible(self, b: bool) -> None: ...
126
+ def set_animated(self, b: bool) -> None: ...
127
+ def set_in_layout(self, in_layout: bool) -> None: ...
128
+ def get_label(self) -> object: ...
129
+ def set_label(self, s: object) -> None: ...
130
+ def get_zorder(self) -> float: ...
131
+ def set_zorder(self, level: float) -> None: ...
132
+ @property
133
+ def sticky_edges(self) -> _XYPair: ...
134
+ def update_from(self, other: Artist) -> None: ...
135
+ def properties(self) -> dict[str, Any]: ...
136
+ def update(self, props: dict[str, Any]) -> list[Any]: ...
137
+ def _internal_update(self, kwargs: Any) -> list[Any]: ...
138
+ def set(self, **kwargs: Any) -> list[Any]: ...
139
+
140
+ @overload
141
+ def findobj(
142
+ self,
143
+ match: None | Callable[[Artist], bool] = ...,
144
+ include_self: bool = ...,
145
+ ) -> list[Artist]: ...
146
+
147
+ @overload
148
+ def findobj(
149
+ self,
150
+ match: type[_T_Artist],
151
+ include_self: bool = ...,
152
+ ) -> list[_T_Artist]: ...
153
+
154
+ def get_cursor_data(self, event: MouseEvent) -> Any: ...
155
+ def format_cursor_data(self, data: Any) -> str: ...
156
+ def get_mouseover(self) -> bool: ...
157
+ def set_mouseover(self, mouseover: bool) -> None: ...
158
+ @property
159
+ def mouseover(self) -> bool: ...
160
+ @mouseover.setter
161
+ def mouseover(self, mouseover: bool) -> None: ...
162
+
163
+ class ArtistInspector:
164
+ oorig: Artist | type[Artist]
165
+ o: type[Artist]
166
+ aliasd: dict[str, set[str]]
167
+ def __init__(
168
+ self, o: Artist | type[Artist] | Iterable[Artist | type[Artist]]
169
+ ) -> None: ...
170
+ def get_aliases(self) -> dict[str, set[str]]: ...
171
+ def get_valid_values(self, attr: str) -> str | None: ...
172
+ def get_setters(self) -> list[str]: ...
173
+ @staticmethod
174
+ def number_of_parameters(func: Callable) -> int: ...
175
+ @staticmethod
176
+ def is_alias(method: Callable) -> bool: ...
177
+ def aliased_name(self, s: str) -> str: ...
178
+ def aliased_name_rest(self, s: str, target: str) -> str: ...
179
+ @overload
180
+ def pprint_setters(
181
+ self, prop: None = ..., leadingspace: int = ...
182
+ ) -> list[str]: ...
183
+ @overload
184
+ def pprint_setters(self, prop: str, leadingspace: int = ...) -> str: ...
185
+ @overload
186
+ def pprint_setters_rest(
187
+ self, prop: None = ..., leadingspace: int = ...
188
+ ) -> list[str]: ...
189
+ @overload
190
+ def pprint_setters_rest(self, prop: str, leadingspace: int = ...) -> str: ...
191
+ def properties(self) -> dict[str, Any]: ...
192
+ def pprint_getters(self) -> list[str]: ...
193
+
194
+ def getp(obj: Artist, property: str | None = ...) -> Any: ...
195
+
196
+ get = getp
197
+
198
+ def setp(obj: Artist, *args, file: TextIO | None = ..., **kwargs) -> list[Any] | None: ...
199
+ def kwdoc(artist: Artist | type[Artist] | Iterable[Artist | type[Artist]]) -> str: ...
moondream/lib/python3.10/site-packages/matplotlib/axis.py ADDED
The diff for this file is too large to render. See raw diff
 
moondream/lib/python3.10/site-packages/matplotlib/backend_tools.py ADDED
@@ -0,0 +1,998 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Abstract base classes define the primitives for Tools.
3
+ These tools are used by `matplotlib.backend_managers.ToolManager`
4
+
5
+ :class:`ToolBase`
6
+ Simple stateless tool
7
+
8
+ :class:`ToolToggleBase`
9
+ Tool that has two states, only one Toggle tool can be
10
+ active at any given time for the same
11
+ `matplotlib.backend_managers.ToolManager`
12
+ """
13
+
14
+ import enum
15
+ import functools
16
+ import re
17
+ import time
18
+ from types import SimpleNamespace
19
+ import uuid
20
+ from weakref import WeakKeyDictionary
21
+
22
+ import numpy as np
23
+
24
+ import matplotlib as mpl
25
+ from matplotlib._pylab_helpers import Gcf
26
+ from matplotlib import _api, cbook
27
+
28
+
29
+ class Cursors(enum.IntEnum): # Must subclass int for the macOS backend.
30
+ """Backend-independent cursor types."""
31
+ POINTER = enum.auto()
32
+ HAND = enum.auto()
33
+ SELECT_REGION = enum.auto()
34
+ MOVE = enum.auto()
35
+ WAIT = enum.auto()
36
+ RESIZE_HORIZONTAL = enum.auto()
37
+ RESIZE_VERTICAL = enum.auto()
38
+ cursors = Cursors # Backcompat.
39
+
40
+
41
+ # _tool_registry, _register_tool_class, and _find_tool_class implement a
42
+ # mechanism through which ToolManager.add_tool can determine whether a subclass
43
+ # of the requested tool class has been registered (either for the current
44
+ # canvas class or for a parent class), in which case that tool subclass will be
45
+ # instantiated instead. This is the mechanism used e.g. to allow different
46
+ # GUI backends to implement different specializations for ConfigureSubplots.
47
+
48
+
49
+ _tool_registry = set()
50
+
51
+
52
+ def _register_tool_class(canvas_cls, tool_cls=None):
53
+ """Decorator registering *tool_cls* as a tool class for *canvas_cls*."""
54
+ if tool_cls is None:
55
+ return functools.partial(_register_tool_class, canvas_cls)
56
+ _tool_registry.add((canvas_cls, tool_cls))
57
+ return tool_cls
58
+
59
+
60
+ def _find_tool_class(canvas_cls, tool_cls):
61
+ """Find a subclass of *tool_cls* registered for *canvas_cls*."""
62
+ for canvas_parent in canvas_cls.__mro__:
63
+ for tool_child in _api.recursive_subclasses(tool_cls):
64
+ if (canvas_parent, tool_child) in _tool_registry:
65
+ return tool_child
66
+ return tool_cls
67
+
68
+
69
+ # Views positions tool
70
+ _views_positions = 'viewpos'
71
+
72
+
73
+ class ToolBase:
74
+ """
75
+ Base tool class.
76
+
77
+ A base tool, only implements `trigger` method or no method at all.
78
+ The tool is instantiated by `matplotlib.backend_managers.ToolManager`.
79
+ """
80
+
81
+ default_keymap = None
82
+ """
83
+ Keymap to associate with this tool.
84
+
85
+ ``list[str]``: List of keys that will trigger this tool when a keypress
86
+ event is emitted on ``self.figure.canvas``. Note that this attribute is
87
+ looked up on the instance, and can therefore be a property (this is used
88
+ e.g. by the built-in tools to load the rcParams at instantiation time).
89
+ """
90
+
91
+ description = None
92
+ """
93
+ Description of the Tool.
94
+
95
+ `str`: Tooltip used if the Tool is included in a Toolbar.
96
+ """
97
+
98
+ image = None
99
+ """
100
+ Icon filename.
101
+
102
+ ``str | None``: Filename of the Toolbar icon; either absolute, or relative to the
103
+ directory containing the Python source file where the ``Tool.image`` class attribute
104
+ is defined (in the latter case, this cannot be defined as an instance attribute).
105
+ In either case, the extension is optional; leaving it off lets individual backends
106
+ select the icon format they prefer. If None, the *name* is used as a label in the
107
+ toolbar button.
108
+ """
109
+
110
+ def __init__(self, toolmanager, name):
111
+ self._name = name
112
+ self._toolmanager = toolmanager
113
+ self._figure = None
114
+
115
+ name = property(
116
+ lambda self: self._name,
117
+ doc="The tool id (str, must be unique among tools of a tool manager).")
118
+ toolmanager = property(
119
+ lambda self: self._toolmanager,
120
+ doc="The `.ToolManager` that controls this tool.")
121
+ canvas = property(
122
+ lambda self: self._figure.canvas if self._figure is not None else None,
123
+ doc="The canvas of the figure affected by this tool, or None.")
124
+
125
+ def set_figure(self, figure):
126
+ self._figure = figure
127
+
128
+ figure = property(
129
+ lambda self: self._figure,
130
+ # The setter must explicitly call self.set_figure so that subclasses can
131
+ # meaningfully override it.
132
+ lambda self, figure: self.set_figure(figure),
133
+ doc="The Figure affected by this tool, or None.")
134
+
135
+ def _make_classic_style_pseudo_toolbar(self):
136
+ """
137
+ Return a placeholder object with a single `canvas` attribute.
138
+
139
+ This is useful to reuse the implementations of tools already provided
140
+ by the classic Toolbars.
141
+ """
142
+ return SimpleNamespace(canvas=self.canvas)
143
+
144
+ def trigger(self, sender, event, data=None):
145
+ """
146
+ Called when this tool gets used.
147
+
148
+ This method is called by `.ToolManager.trigger_tool`.
149
+
150
+ Parameters
151
+ ----------
152
+ event : `.Event`
153
+ The canvas event that caused this tool to be called.
154
+ sender : object
155
+ Object that requested the tool to be triggered.
156
+ data : object
157
+ Extra data.
158
+ """
159
+ pass
160
+
161
+
162
+ class ToolToggleBase(ToolBase):
163
+ """
164
+ Toggleable tool.
165
+
166
+ Every time it is triggered, it switches between enable and disable.
167
+
168
+ Parameters
169
+ ----------
170
+ ``*args``
171
+ Variable length argument to be used by the Tool.
172
+ ``**kwargs``
173
+ `toggled` if present and True, sets the initial state of the Tool
174
+ Arbitrary keyword arguments to be consumed by the Tool
175
+ """
176
+
177
+ radio_group = None
178
+ """
179
+ Attribute to group 'radio' like tools (mutually exclusive).
180
+
181
+ `str` that identifies the group or **None** if not belonging to a group.
182
+ """
183
+
184
+ cursor = None
185
+ """Cursor to use when the tool is active."""
186
+
187
+ default_toggled = False
188
+ """Default of toggled state."""
189
+
190
+ def __init__(self, *args, **kwargs):
191
+ self._toggled = kwargs.pop('toggled', self.default_toggled)
192
+ super().__init__(*args, **kwargs)
193
+
194
+ def trigger(self, sender, event, data=None):
195
+ """Calls `enable` or `disable` based on `toggled` value."""
196
+ if self._toggled:
197
+ self.disable(event)
198
+ else:
199
+ self.enable(event)
200
+ self._toggled = not self._toggled
201
+
202
+ def enable(self, event=None):
203
+ """
204
+ Enable the toggle tool.
205
+
206
+ `trigger` calls this method when `toggled` is False.
207
+ """
208
+ pass
209
+
210
+ def disable(self, event=None):
211
+ """
212
+ Disable the toggle tool.
213
+
214
+ `trigger` call this method when `toggled` is True.
215
+
216
+ This can happen in different circumstances.
217
+
218
+ * Click on the toolbar tool button.
219
+ * Call to `matplotlib.backend_managers.ToolManager.trigger_tool`.
220
+ * Another `ToolToggleBase` derived tool is triggered
221
+ (from the same `.ToolManager`).
222
+ """
223
+ pass
224
+
225
+ @property
226
+ def toggled(self):
227
+ """State of the toggled tool."""
228
+ return self._toggled
229
+
230
+ def set_figure(self, figure):
231
+ toggled = self.toggled
232
+ if toggled:
233
+ if self.figure:
234
+ self.trigger(self, None)
235
+ else:
236
+ # if no figure the internal state is not changed
237
+ # we change it here so next call to trigger will change it back
238
+ self._toggled = False
239
+ super().set_figure(figure)
240
+ if toggled:
241
+ if figure:
242
+ self.trigger(self, None)
243
+ else:
244
+ # if there is no figure, trigger won't change the internal
245
+ # state we change it back
246
+ self._toggled = True
247
+
248
+
249
+ class ToolSetCursor(ToolBase):
250
+ """
251
+ Change to the current cursor while inaxes.
252
+
253
+ This tool, keeps track of all `ToolToggleBase` derived tools, and updates
254
+ the cursor when a tool gets triggered.
255
+ """
256
+ def __init__(self, *args, **kwargs):
257
+ super().__init__(*args, **kwargs)
258
+ self._id_drag = None
259
+ self._current_tool = None
260
+ self._default_cursor = cursors.POINTER
261
+ self._last_cursor = self._default_cursor
262
+ self.toolmanager.toolmanager_connect('tool_added_event',
263
+ self._add_tool_cbk)
264
+ for tool in self.toolmanager.tools.values(): # process current tools
265
+ self._add_tool_cbk(mpl.backend_managers.ToolEvent(
266
+ 'tool_added_event', self.toolmanager, tool))
267
+
268
+ def set_figure(self, figure):
269
+ if self._id_drag:
270
+ self.canvas.mpl_disconnect(self._id_drag)
271
+ super().set_figure(figure)
272
+ if figure:
273
+ self._id_drag = self.canvas.mpl_connect(
274
+ 'motion_notify_event', self._set_cursor_cbk)
275
+
276
+ def _add_tool_cbk(self, event):
277
+ """Process every newly added tool."""
278
+ if getattr(event.tool, 'cursor', None) is not None:
279
+ self.toolmanager.toolmanager_connect(
280
+ f'tool_trigger_{event.tool.name}', self._tool_trigger_cbk)
281
+
282
+ def _tool_trigger_cbk(self, event):
283
+ self._current_tool = event.tool if event.tool.toggled else None
284
+ self._set_cursor_cbk(event.canvasevent)
285
+
286
+ def _set_cursor_cbk(self, event):
287
+ if not event or not self.canvas:
288
+ return
289
+ if (self._current_tool and getattr(event, "inaxes", None)
290
+ and event.inaxes.get_navigate()):
291
+ if self._last_cursor != self._current_tool.cursor:
292
+ self.canvas.set_cursor(self._current_tool.cursor)
293
+ self._last_cursor = self._current_tool.cursor
294
+ elif self._last_cursor != self._default_cursor:
295
+ self.canvas.set_cursor(self._default_cursor)
296
+ self._last_cursor = self._default_cursor
297
+
298
+
299
+ class ToolCursorPosition(ToolBase):
300
+ """
301
+ Send message with the current pointer position.
302
+
303
+ This tool runs in the background reporting the position of the cursor.
304
+ """
305
+ def __init__(self, *args, **kwargs):
306
+ self._id_drag = None
307
+ super().__init__(*args, **kwargs)
308
+
309
+ def set_figure(self, figure):
310
+ if self._id_drag:
311
+ self.canvas.mpl_disconnect(self._id_drag)
312
+ super().set_figure(figure)
313
+ if figure:
314
+ self._id_drag = self.canvas.mpl_connect(
315
+ 'motion_notify_event', self.send_message)
316
+
317
+ def send_message(self, event):
318
+ """Call `matplotlib.backend_managers.ToolManager.message_event`."""
319
+ if self.toolmanager.messagelock.locked():
320
+ return
321
+
322
+ from matplotlib.backend_bases import NavigationToolbar2
323
+ message = NavigationToolbar2._mouse_event_to_message(event)
324
+ self.toolmanager.message_event(message, self)
325
+
326
+
327
+ class RubberbandBase(ToolBase):
328
+ """Draw and remove a rubberband."""
329
+ def trigger(self, sender, event, data=None):
330
+ """Call `draw_rubberband` or `remove_rubberband` based on data."""
331
+ if not self.figure.canvas.widgetlock.available(sender):
332
+ return
333
+ if data is not None:
334
+ self.draw_rubberband(*data)
335
+ else:
336
+ self.remove_rubberband()
337
+
338
+ def draw_rubberband(self, *data):
339
+ """
340
+ Draw rubberband.
341
+
342
+ This method must get implemented per backend.
343
+ """
344
+ raise NotImplementedError
345
+
346
+ def remove_rubberband(self):
347
+ """
348
+ Remove rubberband.
349
+
350
+ This method should get implemented per backend.
351
+ """
352
+ pass
353
+
354
+
355
+ class ToolQuit(ToolBase):
356
+ """Tool to call the figure manager destroy method."""
357
+
358
+ description = 'Quit the figure'
359
+ default_keymap = property(lambda self: mpl.rcParams['keymap.quit'])
360
+
361
+ def trigger(self, sender, event, data=None):
362
+ Gcf.destroy_fig(self.figure)
363
+
364
+
365
+ class ToolQuitAll(ToolBase):
366
+ """Tool to call the figure manager destroy method."""
367
+
368
+ description = 'Quit all figures'
369
+ default_keymap = property(lambda self: mpl.rcParams['keymap.quit_all'])
370
+
371
+ def trigger(self, sender, event, data=None):
372
+ Gcf.destroy_all()
373
+
374
+
375
+ class ToolGrid(ToolBase):
376
+ """Tool to toggle the major grids of the figure."""
377
+
378
+ description = 'Toggle major grids'
379
+ default_keymap = property(lambda self: mpl.rcParams['keymap.grid'])
380
+
381
+ def trigger(self, sender, event, data=None):
382
+ sentinel = str(uuid.uuid4())
383
+ # Trigger grid switching by temporarily setting :rc:`keymap.grid`
384
+ # to a unique key and sending an appropriate event.
385
+ with (cbook._setattr_cm(event, key=sentinel),
386
+ mpl.rc_context({'keymap.grid': sentinel})):
387
+ mpl.backend_bases.key_press_handler(event, self.figure.canvas)
388
+
389
+
390
+ class ToolMinorGrid(ToolBase):
391
+ """Tool to toggle the major and minor grids of the figure."""
392
+
393
+ description = 'Toggle major and minor grids'
394
+ default_keymap = property(lambda self: mpl.rcParams['keymap.grid_minor'])
395
+
396
+ def trigger(self, sender, event, data=None):
397
+ sentinel = str(uuid.uuid4())
398
+ # Trigger grid switching by temporarily setting :rc:`keymap.grid_minor`
399
+ # to a unique key and sending an appropriate event.
400
+ with (cbook._setattr_cm(event, key=sentinel),
401
+ mpl.rc_context({'keymap.grid_minor': sentinel})):
402
+ mpl.backend_bases.key_press_handler(event, self.figure.canvas)
403
+
404
+
405
+ class ToolFullScreen(ToolBase):
406
+ """Tool to toggle full screen."""
407
+
408
+ description = 'Toggle fullscreen mode'
409
+ default_keymap = property(lambda self: mpl.rcParams['keymap.fullscreen'])
410
+
411
+ def trigger(self, sender, event, data=None):
412
+ self.figure.canvas.manager.full_screen_toggle()
413
+
414
+
415
+ class AxisScaleBase(ToolToggleBase):
416
+ """Base Tool to toggle between linear and logarithmic."""
417
+
418
+ def trigger(self, sender, event, data=None):
419
+ if event.inaxes is None:
420
+ return
421
+ super().trigger(sender, event, data)
422
+
423
+ def enable(self, event=None):
424
+ self.set_scale(event.inaxes, 'log')
425
+ self.figure.canvas.draw_idle()
426
+
427
+ def disable(self, event=None):
428
+ self.set_scale(event.inaxes, 'linear')
429
+ self.figure.canvas.draw_idle()
430
+
431
+
432
+ class ToolYScale(AxisScaleBase):
433
+ """Tool to toggle between linear and logarithmic scales on the Y axis."""
434
+
435
+ description = 'Toggle scale Y axis'
436
+ default_keymap = property(lambda self: mpl.rcParams['keymap.yscale'])
437
+
438
+ def set_scale(self, ax, scale):
439
+ ax.set_yscale(scale)
440
+
441
+
442
+ class ToolXScale(AxisScaleBase):
443
+ """Tool to toggle between linear and logarithmic scales on the X axis."""
444
+
445
+ description = 'Toggle scale X axis'
446
+ default_keymap = property(lambda self: mpl.rcParams['keymap.xscale'])
447
+
448
+ def set_scale(self, ax, scale):
449
+ ax.set_xscale(scale)
450
+
451
+
452
+ class ToolViewsPositions(ToolBase):
453
+ """
454
+ Auxiliary Tool to handle changes in views and positions.
455
+
456
+ Runs in the background and should get used by all the tools that
457
+ need to access the figure's history of views and positions, e.g.
458
+
459
+ * `ToolZoom`
460
+ * `ToolPan`
461
+ * `ToolHome`
462
+ * `ToolBack`
463
+ * `ToolForward`
464
+ """
465
+
466
+ def __init__(self, *args, **kwargs):
467
+ self.views = WeakKeyDictionary()
468
+ self.positions = WeakKeyDictionary()
469
+ self.home_views = WeakKeyDictionary()
470
+ super().__init__(*args, **kwargs)
471
+
472
+ def add_figure(self, figure):
473
+ """Add the current figure to the stack of views and positions."""
474
+
475
+ if figure not in self.views:
476
+ self.views[figure] = cbook._Stack()
477
+ self.positions[figure] = cbook._Stack()
478
+ self.home_views[figure] = WeakKeyDictionary()
479
+ # Define Home
480
+ self.push_current(figure)
481
+ # Make sure we add a home view for new Axes as they're added
482
+ figure.add_axobserver(lambda fig: self.update_home_views(fig))
483
+
484
+ def clear(self, figure):
485
+ """Reset the Axes stack."""
486
+ if figure in self.views:
487
+ self.views[figure].clear()
488
+ self.positions[figure].clear()
489
+ self.home_views[figure].clear()
490
+ self.update_home_views()
491
+
492
+ def update_view(self):
493
+ """
494
+ Update the view limits and position for each Axes from the current
495
+ stack position. If any Axes are present in the figure that aren't in
496
+ the current stack position, use the home view limits for those Axes and
497
+ don't update *any* positions.
498
+ """
499
+
500
+ views = self.views[self.figure]()
501
+ if views is None:
502
+ return
503
+ pos = self.positions[self.figure]()
504
+ if pos is None:
505
+ return
506
+ home_views = self.home_views[self.figure]
507
+ all_axes = self.figure.get_axes()
508
+ for a in all_axes:
509
+ if a in views:
510
+ cur_view = views[a]
511
+ else:
512
+ cur_view = home_views[a]
513
+ a._set_view(cur_view)
514
+
515
+ if set(all_axes).issubset(pos):
516
+ for a in all_axes:
517
+ # Restore both the original and modified positions
518
+ a._set_position(pos[a][0], 'original')
519
+ a._set_position(pos[a][1], 'active')
520
+
521
+ self.figure.canvas.draw_idle()
522
+
523
+ def push_current(self, figure=None):
524
+ """
525
+ Push the current view limits and position onto their respective stacks.
526
+ """
527
+ if not figure:
528
+ figure = self.figure
529
+ views = WeakKeyDictionary()
530
+ pos = WeakKeyDictionary()
531
+ for a in figure.get_axes():
532
+ views[a] = a._get_view()
533
+ pos[a] = self._axes_pos(a)
534
+ self.views[figure].push(views)
535
+ self.positions[figure].push(pos)
536
+
537
+ def _axes_pos(self, ax):
538
+ """
539
+ Return the original and modified positions for the specified Axes.
540
+
541
+ Parameters
542
+ ----------
543
+ ax : matplotlib.axes.Axes
544
+ The `.Axes` to get the positions for.
545
+
546
+ Returns
547
+ -------
548
+ original_position, modified_position
549
+ A tuple of the original and modified positions.
550
+ """
551
+
552
+ return (ax.get_position(True).frozen(),
553
+ ax.get_position().frozen())
554
+
555
+ def update_home_views(self, figure=None):
556
+ """
557
+ Make sure that ``self.home_views`` has an entry for all Axes present
558
+ in the figure.
559
+ """
560
+
561
+ if not figure:
562
+ figure = self.figure
563
+ for a in figure.get_axes():
564
+ if a not in self.home_views[figure]:
565
+ self.home_views[figure][a] = a._get_view()
566
+
567
+ def home(self):
568
+ """Recall the first view and position from the stack."""
569
+ self.views[self.figure].home()
570
+ self.positions[self.figure].home()
571
+
572
+ def back(self):
573
+ """Back one step in the stack of views and positions."""
574
+ self.views[self.figure].back()
575
+ self.positions[self.figure].back()
576
+
577
+ def forward(self):
578
+ """Forward one step in the stack of views and positions."""
579
+ self.views[self.figure].forward()
580
+ self.positions[self.figure].forward()
581
+
582
+
583
+ class ViewsPositionsBase(ToolBase):
584
+ """Base class for `ToolHome`, `ToolBack` and `ToolForward`."""
585
+
586
+ _on_trigger = None
587
+
588
+ def trigger(self, sender, event, data=None):
589
+ self.toolmanager.get_tool(_views_positions).add_figure(self.figure)
590
+ getattr(self.toolmanager.get_tool(_views_positions),
591
+ self._on_trigger)()
592
+ self.toolmanager.get_tool(_views_positions).update_view()
593
+
594
+
595
+ class ToolHome(ViewsPositionsBase):
596
+ """Restore the original view limits."""
597
+
598
+ description = 'Reset original view'
599
+ image = 'mpl-data/images/home'
600
+ default_keymap = property(lambda self: mpl.rcParams['keymap.home'])
601
+ _on_trigger = 'home'
602
+
603
+
604
+ class ToolBack(ViewsPositionsBase):
605
+ """Move back up the view limits stack."""
606
+
607
+ description = 'Back to previous view'
608
+ image = 'mpl-data/images/back'
609
+ default_keymap = property(lambda self: mpl.rcParams['keymap.back'])
610
+ _on_trigger = 'back'
611
+
612
+
613
+ class ToolForward(ViewsPositionsBase):
614
+ """Move forward in the view lim stack."""
615
+
616
+ description = 'Forward to next view'
617
+ image = 'mpl-data/images/forward'
618
+ default_keymap = property(lambda self: mpl.rcParams['keymap.forward'])
619
+ _on_trigger = 'forward'
620
+
621
+
622
+ class ConfigureSubplotsBase(ToolBase):
623
+ """Base tool for the configuration of subplots."""
624
+
625
+ description = 'Configure subplots'
626
+ image = 'mpl-data/images/subplots'
627
+
628
+
629
+ class SaveFigureBase(ToolBase):
630
+ """Base tool for figure saving."""
631
+
632
+ description = 'Save the figure'
633
+ image = 'mpl-data/images/filesave'
634
+ default_keymap = property(lambda self: mpl.rcParams['keymap.save'])
635
+
636
+
637
+ class ZoomPanBase(ToolToggleBase):
638
+ """Base class for `ToolZoom` and `ToolPan`."""
639
+ def __init__(self, *args):
640
+ super().__init__(*args)
641
+ self._button_pressed = None
642
+ self._xypress = None
643
+ self._idPress = None
644
+ self._idRelease = None
645
+ self._idScroll = None
646
+ self.base_scale = 2.
647
+ self.scrollthresh = .5 # .5 second scroll threshold
648
+ self.lastscroll = time.time()-self.scrollthresh
649
+
650
+ def enable(self, event=None):
651
+ """Connect press/release events and lock the canvas."""
652
+ self.figure.canvas.widgetlock(self)
653
+ self._idPress = self.figure.canvas.mpl_connect(
654
+ 'button_press_event', self._press)
655
+ self._idRelease = self.figure.canvas.mpl_connect(
656
+ 'button_release_event', self._release)
657
+ self._idScroll = self.figure.canvas.mpl_connect(
658
+ 'scroll_event', self.scroll_zoom)
659
+
660
+ def disable(self, event=None):
661
+ """Release the canvas and disconnect press/release events."""
662
+ self._cancel_action()
663
+ self.figure.canvas.widgetlock.release(self)
664
+ self.figure.canvas.mpl_disconnect(self._idPress)
665
+ self.figure.canvas.mpl_disconnect(self._idRelease)
666
+ self.figure.canvas.mpl_disconnect(self._idScroll)
667
+
668
+ def trigger(self, sender, event, data=None):
669
+ self.toolmanager.get_tool(_views_positions).add_figure(self.figure)
670
+ super().trigger(sender, event, data)
671
+ new_navigate_mode = self.name.upper() if self.toggled else None
672
+ for ax in self.figure.axes:
673
+ ax.set_navigate_mode(new_navigate_mode)
674
+
675
+ def scroll_zoom(self, event):
676
+ # https://gist.github.com/tacaswell/3144287
677
+ if event.inaxes is None:
678
+ return
679
+
680
+ if event.button == 'up':
681
+ # deal with zoom in
682
+ scl = self.base_scale
683
+ elif event.button == 'down':
684
+ # deal with zoom out
685
+ scl = 1/self.base_scale
686
+ else:
687
+ # deal with something that should never happen
688
+ scl = 1
689
+
690
+ ax = event.inaxes
691
+ ax._set_view_from_bbox([event.x, event.y, scl])
692
+
693
+ # If last scroll was done within the timing threshold, delete the
694
+ # previous view
695
+ if (time.time()-self.lastscroll) < self.scrollthresh:
696
+ self.toolmanager.get_tool(_views_positions).back()
697
+
698
+ self.figure.canvas.draw_idle() # force re-draw
699
+
700
+ self.lastscroll = time.time()
701
+ self.toolmanager.get_tool(_views_positions).push_current()
702
+
703
+
704
+ class ToolZoom(ZoomPanBase):
705
+ """A Tool for zooming using a rectangle selector."""
706
+
707
+ description = 'Zoom to rectangle'
708
+ image = 'mpl-data/images/zoom_to_rect'
709
+ default_keymap = property(lambda self: mpl.rcParams['keymap.zoom'])
710
+ cursor = cursors.SELECT_REGION
711
+ radio_group = 'default'
712
+
713
+ def __init__(self, *args):
714
+ super().__init__(*args)
715
+ self._ids_zoom = []
716
+
717
+ def _cancel_action(self):
718
+ for zoom_id in self._ids_zoom:
719
+ self.figure.canvas.mpl_disconnect(zoom_id)
720
+ self.toolmanager.trigger_tool('rubberband', self)
721
+ self.figure.canvas.draw_idle()
722
+ self._xypress = None
723
+ self._button_pressed = None
724
+ self._ids_zoom = []
725
+ return
726
+
727
+ def _press(self, event):
728
+ """Callback for mouse button presses in zoom-to-rectangle mode."""
729
+
730
+ # If we're already in the middle of a zoom, pressing another
731
+ # button works to "cancel"
732
+ if self._ids_zoom:
733
+ self._cancel_action()
734
+
735
+ if event.button == 1:
736
+ self._button_pressed = 1
737
+ elif event.button == 3:
738
+ self._button_pressed = 3
739
+ else:
740
+ self._cancel_action()
741
+ return
742
+
743
+ x, y = event.x, event.y
744
+
745
+ self._xypress = []
746
+ for i, a in enumerate(self.figure.get_axes()):
747
+ if (x is not None and y is not None and a.in_axes(event) and
748
+ a.get_navigate() and a.can_zoom()):
749
+ self._xypress.append((x, y, a, i, a._get_view()))
750
+
751
+ id1 = self.figure.canvas.mpl_connect(
752
+ 'motion_notify_event', self._mouse_move)
753
+ id2 = self.figure.canvas.mpl_connect(
754
+ 'key_press_event', self._switch_on_zoom_mode)
755
+ id3 = self.figure.canvas.mpl_connect(
756
+ 'key_release_event', self._switch_off_zoom_mode)
757
+
758
+ self._ids_zoom = id1, id2, id3
759
+ self._zoom_mode = event.key
760
+
761
+ def _switch_on_zoom_mode(self, event):
762
+ self._zoom_mode = event.key
763
+ self._mouse_move(event)
764
+
765
+ def _switch_off_zoom_mode(self, event):
766
+ self._zoom_mode = None
767
+ self._mouse_move(event)
768
+
769
+ def _mouse_move(self, event):
770
+ """Callback for mouse moves in zoom-to-rectangle mode."""
771
+
772
+ if self._xypress:
773
+ x, y = event.x, event.y
774
+ lastx, lasty, a, ind, view = self._xypress[0]
775
+ (x1, y1), (x2, y2) = np.clip(
776
+ [[lastx, lasty], [x, y]], a.bbox.min, a.bbox.max)
777
+ if self._zoom_mode == "x":
778
+ y1, y2 = a.bbox.intervaly
779
+ elif self._zoom_mode == "y":
780
+ x1, x2 = a.bbox.intervalx
781
+ self.toolmanager.trigger_tool(
782
+ 'rubberband', self, data=(x1, y1, x2, y2))
783
+
784
+ def _release(self, event):
785
+ """Callback for mouse button releases in zoom-to-rectangle mode."""
786
+
787
+ for zoom_id in self._ids_zoom:
788
+ self.figure.canvas.mpl_disconnect(zoom_id)
789
+ self._ids_zoom = []
790
+
791
+ if not self._xypress:
792
+ self._cancel_action()
793
+ return
794
+
795
+ done_ax = []
796
+
797
+ for cur_xypress in self._xypress:
798
+ x, y = event.x, event.y
799
+ lastx, lasty, a, _ind, view = cur_xypress
800
+ # ignore singular clicks - 5 pixels is a threshold
801
+ if abs(x - lastx) < 5 or abs(y - lasty) < 5:
802
+ self._cancel_action()
803
+ return
804
+
805
+ # detect twinx, twiny Axes and avoid double zooming
806
+ twinx = any(a.get_shared_x_axes().joined(a, a1) for a1 in done_ax)
807
+ twiny = any(a.get_shared_y_axes().joined(a, a1) for a1 in done_ax)
808
+ done_ax.append(a)
809
+
810
+ if self._button_pressed == 1:
811
+ direction = 'in'
812
+ elif self._button_pressed == 3:
813
+ direction = 'out'
814
+ else:
815
+ continue
816
+
817
+ a._set_view_from_bbox((lastx, lasty, x, y), direction,
818
+ self._zoom_mode, twinx, twiny)
819
+
820
+ self._zoom_mode = None
821
+ self.toolmanager.get_tool(_views_positions).push_current()
822
+ self._cancel_action()
823
+
824
+
825
+ class ToolPan(ZoomPanBase):
826
+ """Pan Axes with left mouse, zoom with right."""
827
+
828
+ default_keymap = property(lambda self: mpl.rcParams['keymap.pan'])
829
+ description = 'Pan axes with left mouse, zoom with right'
830
+ image = 'mpl-data/images/move'
831
+ cursor = cursors.MOVE
832
+ radio_group = 'default'
833
+
834
+ def __init__(self, *args):
835
+ super().__init__(*args)
836
+ self._id_drag = None
837
+
838
+ def _cancel_action(self):
839
+ self._button_pressed = None
840
+ self._xypress = []
841
+ self.figure.canvas.mpl_disconnect(self._id_drag)
842
+ self.toolmanager.messagelock.release(self)
843
+ self.figure.canvas.draw_idle()
844
+
845
+ def _press(self, event):
846
+ if event.button == 1:
847
+ self._button_pressed = 1
848
+ elif event.button == 3:
849
+ self._button_pressed = 3
850
+ else:
851
+ self._cancel_action()
852
+ return
853
+
854
+ x, y = event.x, event.y
855
+
856
+ self._xypress = []
857
+ for i, a in enumerate(self.figure.get_axes()):
858
+ if (x is not None and y is not None and a.in_axes(event) and
859
+ a.get_navigate() and a.can_pan()):
860
+ a.start_pan(x, y, event.button)
861
+ self._xypress.append((a, i))
862
+ self.toolmanager.messagelock(self)
863
+ self._id_drag = self.figure.canvas.mpl_connect(
864
+ 'motion_notify_event', self._mouse_move)
865
+
866
+ def _release(self, event):
867
+ if self._button_pressed is None:
868
+ self._cancel_action()
869
+ return
870
+
871
+ self.figure.canvas.mpl_disconnect(self._id_drag)
872
+ self.toolmanager.messagelock.release(self)
873
+
874
+ for a, _ind in self._xypress:
875
+ a.end_pan()
876
+ if not self._xypress:
877
+ self._cancel_action()
878
+ return
879
+
880
+ self.toolmanager.get_tool(_views_positions).push_current()
881
+ self._cancel_action()
882
+
883
+ def _mouse_move(self, event):
884
+ for a, _ind in self._xypress:
885
+ # safer to use the recorded button at the _press than current
886
+ # button: # multiple button can get pressed during motion...
887
+ a.drag_pan(self._button_pressed, event.key, event.x, event.y)
888
+ self.toolmanager.canvas.draw_idle()
889
+
890
+
891
+ class ToolHelpBase(ToolBase):
892
+ description = 'Print tool list, shortcuts and description'
893
+ default_keymap = property(lambda self: mpl.rcParams['keymap.help'])
894
+ image = 'mpl-data/images/help'
895
+
896
+ @staticmethod
897
+ def format_shortcut(key_sequence):
898
+ """
899
+ Convert a shortcut string from the notation used in rc config to the
900
+ standard notation for displaying shortcuts, e.g. 'ctrl+a' -> 'Ctrl+A'.
901
+ """
902
+ return (key_sequence if len(key_sequence) == 1 else
903
+ re.sub(r"\+[A-Z]", r"+Shift\g<0>", key_sequence).title())
904
+
905
+ def _format_tool_keymap(self, name):
906
+ keymaps = self.toolmanager.get_tool_keymap(name)
907
+ return ", ".join(self.format_shortcut(keymap) for keymap in keymaps)
908
+
909
+ def _get_help_entries(self):
910
+ return [(name, self._format_tool_keymap(name), tool.description)
911
+ for name, tool in sorted(self.toolmanager.tools.items())
912
+ if tool.description]
913
+
914
+ def _get_help_text(self):
915
+ entries = self._get_help_entries()
916
+ entries = ["{}: {}\n\t{}".format(*entry) for entry in entries]
917
+ return "\n".join(entries)
918
+
919
+ def _get_help_html(self):
920
+ fmt = "<tr><td>{}</td><td>{}</td><td>{}</td></tr>"
921
+ rows = [fmt.format(
922
+ "<b>Action</b>", "<b>Shortcuts</b>", "<b>Description</b>")]
923
+ rows += [fmt.format(*row) for row in self._get_help_entries()]
924
+ return ("<style>td {padding: 0px 4px}</style>"
925
+ "<table><thead>" + rows[0] + "</thead>"
926
+ "<tbody>".join(rows[1:]) + "</tbody></table>")
927
+
928
+
929
+ class ToolCopyToClipboardBase(ToolBase):
930
+ """Tool to copy the figure to the clipboard."""
931
+
932
+ description = 'Copy the canvas figure to clipboard'
933
+ default_keymap = property(lambda self: mpl.rcParams['keymap.copy'])
934
+
935
+ def trigger(self, *args, **kwargs):
936
+ message = "Copy tool is not available"
937
+ self.toolmanager.message_event(message, self)
938
+
939
+
940
+ default_tools = {'home': ToolHome, 'back': ToolBack, 'forward': ToolForward,
941
+ 'zoom': ToolZoom, 'pan': ToolPan,
942
+ 'subplots': ConfigureSubplotsBase,
943
+ 'save': SaveFigureBase,
944
+ 'grid': ToolGrid,
945
+ 'grid_minor': ToolMinorGrid,
946
+ 'fullscreen': ToolFullScreen,
947
+ 'quit': ToolQuit,
948
+ 'quit_all': ToolQuitAll,
949
+ 'xscale': ToolXScale,
950
+ 'yscale': ToolYScale,
951
+ 'position': ToolCursorPosition,
952
+ _views_positions: ToolViewsPositions,
953
+ 'cursor': ToolSetCursor,
954
+ 'rubberband': RubberbandBase,
955
+ 'help': ToolHelpBase,
956
+ 'copy': ToolCopyToClipboardBase,
957
+ }
958
+
959
+ default_toolbar_tools = [['navigation', ['home', 'back', 'forward']],
960
+ ['zoompan', ['pan', 'zoom', 'subplots']],
961
+ ['io', ['save', 'help']]]
962
+
963
+
964
+ def add_tools_to_manager(toolmanager, tools=default_tools):
965
+ """
966
+ Add multiple tools to a `.ToolManager`.
967
+
968
+ Parameters
969
+ ----------
970
+ toolmanager : `.backend_managers.ToolManager`
971
+ Manager to which the tools are added.
972
+ tools : {str: class_like}, optional
973
+ The tools to add in a {name: tool} dict, see
974
+ `.backend_managers.ToolManager.add_tool` for more info.
975
+ """
976
+
977
+ for name, tool in tools.items():
978
+ toolmanager.add_tool(name, tool)
979
+
980
+
981
+ def add_tools_to_container(container, tools=default_toolbar_tools):
982
+ """
983
+ Add multiple tools to the container.
984
+
985
+ Parameters
986
+ ----------
987
+ container : Container
988
+ `.backend_bases.ToolContainerBase` object that will get the tools
989
+ added.
990
+ tools : list, optional
991
+ List in the form ``[[group1, [tool1, tool2 ...]], [group2, [...]]]``
992
+ where the tools ``[tool1, tool2, ...]`` will display in group1.
993
+ See `.backend_bases.ToolContainerBase.add_tool` for details.
994
+ """
995
+
996
+ for group, grouptools in tools:
997
+ for position, tool in enumerate(grouptools):
998
+ container.add_tool(tool, group, position)
moondream/lib/python3.10/site-packages/matplotlib/bezier.py ADDED
@@ -0,0 +1,602 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A module providing some utility functions regarding Bézier path manipulation.
3
+ """
4
+
5
+ from functools import lru_cache
6
+ import math
7
+ import warnings
8
+
9
+ import numpy as np
10
+
11
+ from matplotlib import _api
12
+
13
+
14
+ # same algorithm as 3.8's math.comb
15
+ @np.vectorize
16
+ @lru_cache(maxsize=128)
17
+ def _comb(n, k):
18
+ if k > n:
19
+ return 0
20
+ k = min(k, n - k)
21
+ i = np.arange(1, k + 1)
22
+ return np.prod((n + 1 - i)/i).astype(int)
23
+
24
+
25
+ class NonIntersectingPathException(ValueError):
26
+ pass
27
+
28
+
29
+ # some functions
30
+
31
+
32
+ def get_intersection(cx1, cy1, cos_t1, sin_t1,
33
+ cx2, cy2, cos_t2, sin_t2):
34
+ """
35
+ Return the intersection between the line through (*cx1*, *cy1*) at angle
36
+ *t1* and the line through (*cx2*, *cy2*) at angle *t2*.
37
+ """
38
+
39
+ # line1 => sin_t1 * (x - cx1) - cos_t1 * (y - cy1) = 0.
40
+ # line1 => sin_t1 * x + cos_t1 * y = sin_t1*cx1 - cos_t1*cy1
41
+
42
+ line1_rhs = sin_t1 * cx1 - cos_t1 * cy1
43
+ line2_rhs = sin_t2 * cx2 - cos_t2 * cy2
44
+
45
+ # rhs matrix
46
+ a, b = sin_t1, -cos_t1
47
+ c, d = sin_t2, -cos_t2
48
+
49
+ ad_bc = a * d - b * c
50
+ if abs(ad_bc) < 1e-12:
51
+ raise ValueError("Given lines do not intersect. Please verify that "
52
+ "the angles are not equal or differ by 180 degrees.")
53
+
54
+ # rhs_inverse
55
+ a_, b_ = d, -b
56
+ c_, d_ = -c, a
57
+ a_, b_, c_, d_ = (k / ad_bc for k in [a_, b_, c_, d_])
58
+
59
+ x = a_ * line1_rhs + b_ * line2_rhs
60
+ y = c_ * line1_rhs + d_ * line2_rhs
61
+
62
+ return x, y
63
+
64
+
65
+ def get_normal_points(cx, cy, cos_t, sin_t, length):
66
+ """
67
+ For a line passing through (*cx*, *cy*) and having an angle *t*, return
68
+ locations of the two points located along its perpendicular line at the
69
+ distance of *length*.
70
+ """
71
+
72
+ if length == 0.:
73
+ return cx, cy, cx, cy
74
+
75
+ cos_t1, sin_t1 = sin_t, -cos_t
76
+ cos_t2, sin_t2 = -sin_t, cos_t
77
+
78
+ x1, y1 = length * cos_t1 + cx, length * sin_t1 + cy
79
+ x2, y2 = length * cos_t2 + cx, length * sin_t2 + cy
80
+
81
+ return x1, y1, x2, y2
82
+
83
+
84
+ # BEZIER routines
85
+
86
+ # subdividing bezier curve
87
+ # http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/Bezier/bezier-sub.html
88
+
89
+
90
+ def _de_casteljau1(beta, t):
91
+ next_beta = beta[:-1] * (1 - t) + beta[1:] * t
92
+ return next_beta
93
+
94
+
95
+ def split_de_casteljau(beta, t):
96
+ """
97
+ Split a Bézier segment defined by its control points *beta* into two
98
+ separate segments divided at *t* and return their control points.
99
+ """
100
+ beta = np.asarray(beta)
101
+ beta_list = [beta]
102
+ while True:
103
+ beta = _de_casteljau1(beta, t)
104
+ beta_list.append(beta)
105
+ if len(beta) == 1:
106
+ break
107
+ left_beta = [beta[0] for beta in beta_list]
108
+ right_beta = [beta[-1] for beta in reversed(beta_list)]
109
+
110
+ return left_beta, right_beta
111
+
112
+
113
+ def find_bezier_t_intersecting_with_closedpath(
114
+ bezier_point_at_t, inside_closedpath, t0=0., t1=1., tolerance=0.01):
115
+ """
116
+ Find the intersection of the Bézier curve with a closed path.
117
+
118
+ The intersection point *t* is approximated by two parameters *t0*, *t1*
119
+ such that *t0* <= *t* <= *t1*.
120
+
121
+ Search starts from *t0* and *t1* and uses a simple bisecting algorithm
122
+ therefore one of the end points must be inside the path while the other
123
+ doesn't. The search stops when the distance of the points parametrized by
124
+ *t0* and *t1* gets smaller than the given *tolerance*.
125
+
126
+ Parameters
127
+ ----------
128
+ bezier_point_at_t : callable
129
+ A function returning x, y coordinates of the Bézier at parameter *t*.
130
+ It must have the signature::
131
+
132
+ bezier_point_at_t(t: float) -> tuple[float, float]
133
+
134
+ inside_closedpath : callable
135
+ A function returning True if a given point (x, y) is inside the
136
+ closed path. It must have the signature::
137
+
138
+ inside_closedpath(point: tuple[float, float]) -> bool
139
+
140
+ t0, t1 : float
141
+ Start parameters for the search.
142
+
143
+ tolerance : float
144
+ Maximal allowed distance between the final points.
145
+
146
+ Returns
147
+ -------
148
+ t0, t1 : float
149
+ The Bézier path parameters.
150
+ """
151
+ start = bezier_point_at_t(t0)
152
+ end = bezier_point_at_t(t1)
153
+
154
+ start_inside = inside_closedpath(start)
155
+ end_inside = inside_closedpath(end)
156
+
157
+ if start_inside == end_inside and start != end:
158
+ raise NonIntersectingPathException(
159
+ "Both points are on the same side of the closed path")
160
+
161
+ while True:
162
+
163
+ # return if the distance is smaller than the tolerance
164
+ if np.hypot(start[0] - end[0], start[1] - end[1]) < tolerance:
165
+ return t0, t1
166
+
167
+ # calculate the middle point
168
+ middle_t = 0.5 * (t0 + t1)
169
+ middle = bezier_point_at_t(middle_t)
170
+ middle_inside = inside_closedpath(middle)
171
+
172
+ if start_inside ^ middle_inside:
173
+ t1 = middle_t
174
+ if end == middle:
175
+ # Edge case where infinite loop is possible
176
+ # Caused by large numbers relative to tolerance
177
+ return t0, t1
178
+ end = middle
179
+ else:
180
+ t0 = middle_t
181
+ if start == middle:
182
+ # Edge case where infinite loop is possible
183
+ # Caused by large numbers relative to tolerance
184
+ return t0, t1
185
+ start = middle
186
+ start_inside = middle_inside
187
+
188
+
189
+ class BezierSegment:
190
+ """
191
+ A d-dimensional Bézier segment.
192
+
193
+ Parameters
194
+ ----------
195
+ control_points : (N, d) array
196
+ Location of the *N* control points.
197
+ """
198
+
199
+ def __init__(self, control_points):
200
+ self._cpoints = np.asarray(control_points)
201
+ self._N, self._d = self._cpoints.shape
202
+ self._orders = np.arange(self._N)
203
+ coeff = [math.factorial(self._N - 1)
204
+ // (math.factorial(i) * math.factorial(self._N - 1 - i))
205
+ for i in range(self._N)]
206
+ self._px = (self._cpoints.T * coeff).T
207
+
208
+ def __call__(self, t):
209
+ """
210
+ Evaluate the Bézier curve at point(s) *t* in [0, 1].
211
+
212
+ Parameters
213
+ ----------
214
+ t : (k,) array-like
215
+ Points at which to evaluate the curve.
216
+
217
+ Returns
218
+ -------
219
+ (k, d) array
220
+ Value of the curve for each point in *t*.
221
+ """
222
+ t = np.asarray(t)
223
+ return (np.power.outer(1 - t, self._orders[::-1])
224
+ * np.power.outer(t, self._orders)) @ self._px
225
+
226
+ def point_at_t(self, t):
227
+ """
228
+ Evaluate the curve at a single point, returning a tuple of *d* floats.
229
+ """
230
+ return tuple(self(t))
231
+
232
+ @property
233
+ def control_points(self):
234
+ """The control points of the curve."""
235
+ return self._cpoints
236
+
237
+ @property
238
+ def dimension(self):
239
+ """The dimension of the curve."""
240
+ return self._d
241
+
242
+ @property
243
+ def degree(self):
244
+ """Degree of the polynomial. One less the number of control points."""
245
+ return self._N - 1
246
+
247
+ @property
248
+ def polynomial_coefficients(self):
249
+ r"""
250
+ The polynomial coefficients of the Bézier curve.
251
+
252
+ .. warning:: Follows opposite convention from `numpy.polyval`.
253
+
254
+ Returns
255
+ -------
256
+ (n+1, d) array
257
+ Coefficients after expanding in polynomial basis, where :math:`n`
258
+ is the degree of the Bézier curve and :math:`d` its dimension.
259
+ These are the numbers (:math:`C_j`) such that the curve can be
260
+ written :math:`\sum_{j=0}^n C_j t^j`.
261
+
262
+ Notes
263
+ -----
264
+ The coefficients are calculated as
265
+
266
+ .. math::
267
+
268
+ {n \choose j} \sum_{i=0}^j (-1)^{i+j} {j \choose i} P_i
269
+
270
+ where :math:`P_i` are the control points of the curve.
271
+ """
272
+ n = self.degree
273
+ # matplotlib uses n <= 4. overflow plausible starting around n = 15.
274
+ if n > 10:
275
+ warnings.warn("Polynomial coefficients formula unstable for high "
276
+ "order Bezier curves!", RuntimeWarning)
277
+ P = self.control_points
278
+ j = np.arange(n+1)[:, None]
279
+ i = np.arange(n+1)[None, :] # _comb is non-zero for i <= j
280
+ prefactor = (-1)**(i + j) * _comb(j, i) # j on axis 0, i on axis 1
281
+ return _comb(n, j) * prefactor @ P # j on axis 0, self.dimension on 1
282
+
283
+ def axis_aligned_extrema(self):
284
+ """
285
+ Return the dimension and location of the curve's interior extrema.
286
+
287
+ The extrema are the points along the curve where one of its partial
288
+ derivatives is zero.
289
+
290
+ Returns
291
+ -------
292
+ dims : array of int
293
+ Index :math:`i` of the partial derivative which is zero at each
294
+ interior extrema.
295
+ dzeros : array of float
296
+ Of same size as dims. The :math:`t` such that :math:`d/dx_i B(t) =
297
+ 0`
298
+ """
299
+ n = self.degree
300
+ if n <= 1:
301
+ return np.array([]), np.array([])
302
+ Cj = self.polynomial_coefficients
303
+ dCj = np.arange(1, n+1)[:, None] * Cj[1:]
304
+ dims = []
305
+ roots = []
306
+ for i, pi in enumerate(dCj.T):
307
+ r = np.roots(pi[::-1])
308
+ roots.append(r)
309
+ dims.append(np.full_like(r, i))
310
+ roots = np.concatenate(roots)
311
+ dims = np.concatenate(dims)
312
+ in_range = np.isreal(roots) & (roots >= 0) & (roots <= 1)
313
+ return dims[in_range], np.real(roots)[in_range]
314
+
315
+
316
+ def split_bezier_intersecting_with_closedpath(
317
+ bezier, inside_closedpath, tolerance=0.01):
318
+ """
319
+ Split a Bézier curve into two at the intersection with a closed path.
320
+
321
+ Parameters
322
+ ----------
323
+ bezier : (N, 2) array-like
324
+ Control points of the Bézier segment. See `.BezierSegment`.
325
+ inside_closedpath : callable
326
+ A function returning True if a given point (x, y) is inside the
327
+ closed path. See also `.find_bezier_t_intersecting_with_closedpath`.
328
+ tolerance : float
329
+ The tolerance for the intersection. See also
330
+ `.find_bezier_t_intersecting_with_closedpath`.
331
+
332
+ Returns
333
+ -------
334
+ left, right
335
+ Lists of control points for the two Bézier segments.
336
+ """
337
+
338
+ bz = BezierSegment(bezier)
339
+ bezier_point_at_t = bz.point_at_t
340
+
341
+ t0, t1 = find_bezier_t_intersecting_with_closedpath(
342
+ bezier_point_at_t, inside_closedpath, tolerance=tolerance)
343
+
344
+ _left, _right = split_de_casteljau(bezier, (t0 + t1) / 2.)
345
+ return _left, _right
346
+
347
+
348
+ # matplotlib specific
349
+
350
+
351
+ def split_path_inout(path, inside, tolerance=0.01, reorder_inout=False):
352
+ """
353
+ Divide a path into two segments at the point where ``inside(x, y)`` becomes
354
+ False.
355
+ """
356
+ from .path import Path
357
+ path_iter = path.iter_segments()
358
+
359
+ ctl_points, command = next(path_iter)
360
+ begin_inside = inside(ctl_points[-2:]) # true if begin point is inside
361
+
362
+ ctl_points_old = ctl_points
363
+
364
+ iold = 0
365
+ i = 1
366
+
367
+ for ctl_points, command in path_iter:
368
+ iold = i
369
+ i += len(ctl_points) // 2
370
+ if inside(ctl_points[-2:]) != begin_inside:
371
+ bezier_path = np.concatenate([ctl_points_old[-2:], ctl_points])
372
+ break
373
+ ctl_points_old = ctl_points
374
+ else:
375
+ raise ValueError("The path does not intersect with the patch")
376
+
377
+ bp = bezier_path.reshape((-1, 2))
378
+ left, right = split_bezier_intersecting_with_closedpath(
379
+ bp, inside, tolerance)
380
+ if len(left) == 2:
381
+ codes_left = [Path.LINETO]
382
+ codes_right = [Path.MOVETO, Path.LINETO]
383
+ elif len(left) == 3:
384
+ codes_left = [Path.CURVE3, Path.CURVE3]
385
+ codes_right = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
386
+ elif len(left) == 4:
387
+ codes_left = [Path.CURVE4, Path.CURVE4, Path.CURVE4]
388
+ codes_right = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4]
389
+ else:
390
+ raise AssertionError("This should never be reached")
391
+
392
+ verts_left = left[1:]
393
+ verts_right = right[:]
394
+
395
+ if path.codes is None:
396
+ path_in = Path(np.concatenate([path.vertices[:i], verts_left]))
397
+ path_out = Path(np.concatenate([verts_right, path.vertices[i:]]))
398
+
399
+ else:
400
+ path_in = Path(np.concatenate([path.vertices[:iold], verts_left]),
401
+ np.concatenate([path.codes[:iold], codes_left]))
402
+
403
+ path_out = Path(np.concatenate([verts_right, path.vertices[i:]]),
404
+ np.concatenate([codes_right, path.codes[i:]]))
405
+
406
+ if reorder_inout and not begin_inside:
407
+ path_in, path_out = path_out, path_in
408
+
409
+ return path_in, path_out
410
+
411
+
412
+ def inside_circle(cx, cy, r):
413
+ """
414
+ Return a function that checks whether a point is in a circle with center
415
+ (*cx*, *cy*) and radius *r*.
416
+
417
+ The returned function has the signature::
418
+
419
+ f(xy: tuple[float, float]) -> bool
420
+ """
421
+ r2 = r ** 2
422
+
423
+ def _f(xy):
424
+ x, y = xy
425
+ return (x - cx) ** 2 + (y - cy) ** 2 < r2
426
+ return _f
427
+
428
+
429
+ # quadratic Bezier lines
430
+
431
+ def get_cos_sin(x0, y0, x1, y1):
432
+ dx, dy = x1 - x0, y1 - y0
433
+ d = (dx * dx + dy * dy) ** .5
434
+ # Account for divide by zero
435
+ if d == 0:
436
+ return 0.0, 0.0
437
+ return dx / d, dy / d
438
+
439
+
440
+ def check_if_parallel(dx1, dy1, dx2, dy2, tolerance=1.e-5):
441
+ """
442
+ Check if two lines are parallel.
443
+
444
+ Parameters
445
+ ----------
446
+ dx1, dy1, dx2, dy2 : float
447
+ The gradients *dy*/*dx* of the two lines.
448
+ tolerance : float
449
+ The angular tolerance in radians up to which the lines are considered
450
+ parallel.
451
+
452
+ Returns
453
+ -------
454
+ is_parallel
455
+ - 1 if two lines are parallel in same direction.
456
+ - -1 if two lines are parallel in opposite direction.
457
+ - False otherwise.
458
+ """
459
+ theta1 = np.arctan2(dx1, dy1)
460
+ theta2 = np.arctan2(dx2, dy2)
461
+ dtheta = abs(theta1 - theta2)
462
+ if dtheta < tolerance:
463
+ return 1
464
+ elif abs(dtheta - np.pi) < tolerance:
465
+ return -1
466
+ else:
467
+ return False
468
+
469
+
470
+ def get_parallels(bezier2, width):
471
+ """
472
+ Given the quadratic Bézier control points *bezier2*, returns
473
+ control points of quadratic Bézier lines roughly parallel to given
474
+ one separated by *width*.
475
+ """
476
+
477
+ # The parallel Bezier lines are constructed by following ways.
478
+ # c1 and c2 are control points representing the start and end of the
479
+ # Bezier line.
480
+ # cm is the middle point
481
+
482
+ c1x, c1y = bezier2[0]
483
+ cmx, cmy = bezier2[1]
484
+ c2x, c2y = bezier2[2]
485
+
486
+ parallel_test = check_if_parallel(c1x - cmx, c1y - cmy,
487
+ cmx - c2x, cmy - c2y)
488
+
489
+ if parallel_test == -1:
490
+ _api.warn_external(
491
+ "Lines do not intersect. A straight line is used instead.")
492
+ cos_t1, sin_t1 = get_cos_sin(c1x, c1y, c2x, c2y)
493
+ cos_t2, sin_t2 = cos_t1, sin_t1
494
+ else:
495
+ # t1 and t2 is the angle between c1 and cm, cm, c2. They are
496
+ # also an angle of the tangential line of the path at c1 and c2
497
+ cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)
498
+ cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c2x, c2y)
499
+
500
+ # find c1_left, c1_right which are located along the lines
501
+ # through c1 and perpendicular to the tangential lines of the
502
+ # Bezier path at a distance of width. Same thing for c2_left and
503
+ # c2_right with respect to c2.
504
+ c1x_left, c1y_left, c1x_right, c1y_right = (
505
+ get_normal_points(c1x, c1y, cos_t1, sin_t1, width)
506
+ )
507
+ c2x_left, c2y_left, c2x_right, c2y_right = (
508
+ get_normal_points(c2x, c2y, cos_t2, sin_t2, width)
509
+ )
510
+
511
+ # find cm_left which is the intersecting point of a line through
512
+ # c1_left with angle t1 and a line through c2_left with angle
513
+ # t2. Same with cm_right.
514
+ try:
515
+ cmx_left, cmy_left = get_intersection(c1x_left, c1y_left, cos_t1,
516
+ sin_t1, c2x_left, c2y_left,
517
+ cos_t2, sin_t2)
518
+ cmx_right, cmy_right = get_intersection(c1x_right, c1y_right, cos_t1,
519
+ sin_t1, c2x_right, c2y_right,
520
+ cos_t2, sin_t2)
521
+ except ValueError:
522
+ # Special case straight lines, i.e., angle between two lines is
523
+ # less than the threshold used by get_intersection (we don't use
524
+ # check_if_parallel as the threshold is not the same).
525
+ cmx_left, cmy_left = (
526
+ 0.5 * (c1x_left + c2x_left), 0.5 * (c1y_left + c2y_left)
527
+ )
528
+ cmx_right, cmy_right = (
529
+ 0.5 * (c1x_right + c2x_right), 0.5 * (c1y_right + c2y_right)
530
+ )
531
+
532
+ # the parallel Bezier lines are created with control points of
533
+ # [c1_left, cm_left, c2_left] and [c1_right, cm_right, c2_right]
534
+ path_left = [(c1x_left, c1y_left),
535
+ (cmx_left, cmy_left),
536
+ (c2x_left, c2y_left)]
537
+ path_right = [(c1x_right, c1y_right),
538
+ (cmx_right, cmy_right),
539
+ (c2x_right, c2y_right)]
540
+
541
+ return path_left, path_right
542
+
543
+
544
+ def find_control_points(c1x, c1y, mmx, mmy, c2x, c2y):
545
+ """
546
+ Find control points of the Bézier curve passing through (*c1x*, *c1y*),
547
+ (*mmx*, *mmy*), and (*c2x*, *c2y*), at parametric values 0, 0.5, and 1.
548
+ """
549
+ cmx = .5 * (4 * mmx - (c1x + c2x))
550
+ cmy = .5 * (4 * mmy - (c1y + c2y))
551
+ return [(c1x, c1y), (cmx, cmy), (c2x, c2y)]
552
+
553
+
554
+ def make_wedged_bezier2(bezier2, width, w1=1., wm=0.5, w2=0.):
555
+ """
556
+ Being similar to `get_parallels`, returns control points of two quadratic
557
+ Bézier lines having a width roughly parallel to given one separated by
558
+ *width*.
559
+ """
560
+
561
+ # c1, cm, c2
562
+ c1x, c1y = bezier2[0]
563
+ cmx, cmy = bezier2[1]
564
+ c3x, c3y = bezier2[2]
565
+
566
+ # t1 and t2 is the angle between c1 and cm, cm, c3.
567
+ # They are also an angle of the tangential line of the path at c1 and c3
568
+ cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)
569
+ cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c3x, c3y)
570
+
571
+ # find c1_left, c1_right which are located along the lines
572
+ # through c1 and perpendicular to the tangential lines of the
573
+ # Bezier path at a distance of width. Same thing for c3_left and
574
+ # c3_right with respect to c3.
575
+ c1x_left, c1y_left, c1x_right, c1y_right = (
576
+ get_normal_points(c1x, c1y, cos_t1, sin_t1, width * w1)
577
+ )
578
+ c3x_left, c3y_left, c3x_right, c3y_right = (
579
+ get_normal_points(c3x, c3y, cos_t2, sin_t2, width * w2)
580
+ )
581
+
582
+ # find c12, c23 and c123 which are middle points of c1-cm, cm-c3 and
583
+ # c12-c23
584
+ c12x, c12y = (c1x + cmx) * .5, (c1y + cmy) * .5
585
+ c23x, c23y = (cmx + c3x) * .5, (cmy + c3y) * .5
586
+ c123x, c123y = (c12x + c23x) * .5, (c12y + c23y) * .5
587
+
588
+ # tangential angle of c123 (angle between c12 and c23)
589
+ cos_t123, sin_t123 = get_cos_sin(c12x, c12y, c23x, c23y)
590
+
591
+ c123x_left, c123y_left, c123x_right, c123y_right = (
592
+ get_normal_points(c123x, c123y, cos_t123, sin_t123, width * wm)
593
+ )
594
+
595
+ path_left = find_control_points(c1x_left, c1y_left,
596
+ c123x_left, c123y_left,
597
+ c3x_left, c3y_left)
598
+ path_right = find_control_points(c1x_right, c1y_right,
599
+ c123x_right, c123y_right,
600
+ c3x_right, c3y_right)
601
+
602
+ return path_left, path_right