ZTWHHH commited on
Commit
dc8f7b3
·
verified ·
1 Parent(s): 79530ba

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. mplug_owl2/lib/python3.10/site-packages/networkx/linalg/attrmatrix.py +465 -0
  2. mplug_owl2/lib/python3.10/site-packages/networkx/linalg/spectrum.py +186 -0
  3. mplug_owl2/lib/python3.10/site-packages/networkx/linalg/tests/__init__.py +0 -0
  4. mplug_owl2/lib/python3.10/site-packages/networkx/linalg/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  5. mplug_owl2/lib/python3.10/site-packages/networkx/linalg/tests/__pycache__/test_algebraic_connectivity.cpython-310.pyc +0 -0
  6. mplug_owl2/lib/python3.10/site-packages/networkx/linalg/tests/__pycache__/test_graphmatrix.cpython-310.pyc +0 -0
  7. mplug_owl2/lib/python3.10/site-packages/networkx/linalg/tests/__pycache__/test_laplacian.cpython-310.pyc +0 -0
  8. mplug_owl2/lib/python3.10/site-packages/networkx/linalg/tests/__pycache__/test_spectrum.cpython-310.pyc +0 -0
  9. mplug_owl2/lib/python3.10/site-packages/networkx/linalg/tests/test_algebraic_connectivity.py +402 -0
  10. mplug_owl2/lib/python3.10/site-packages/networkx/linalg/tests/test_attrmatrix.py +108 -0
  11. mplug_owl2/lib/python3.10/site-packages/networkx/linalg/tests/test_graphmatrix.py +276 -0
  12. mplug_owl2/lib/python3.10/site-packages/networkx/linalg/tests/test_laplacian.py +336 -0
  13. mplug_owl2/lib/python3.10/site-packages/networkx/linalg/tests/test_modularity.py +87 -0
  14. mplug_owl2/lib/python3.10/site-packages/networkx/linalg/tests/test_spectrum.py +71 -0
  15. mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/graphml.py +1053 -0
  16. mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/leda.py +108 -0
  17. mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/pajek.py +286 -0
  18. mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/sparse6.py +377 -0
  19. mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/tests/__init__.py +0 -0
  20. mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_adjlist.cpython-310.pyc +0 -0
  21. mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_gml.cpython-310.pyc +0 -0
  22. mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_pajek.cpython-310.pyc +0 -0
  23. mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/tests/test_adjlist.py +262 -0
  24. mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/tests/test_edgelist.py +314 -0
  25. mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/tests/test_gexf.py +557 -0
  26. mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/tests/test_gml.py +744 -0
  27. mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/tests/test_graph6.py +168 -0
  28. mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/tests/test_graphml.py +1531 -0
  29. mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/tests/test_leda.py +30 -0
  30. mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/tests/test_p2g.py +62 -0
  31. mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/tests/test_pajek.py +126 -0
  32. mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/tests/test_sparse6.py +166 -0
  33. mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/tests/test_text.py +1742 -0
  34. mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/text.py +852 -0
  35. pllava/lib/python3.10/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h +90 -0
  36. pllava/lib/python3.10/site-packages/numpy/core/include/numpy/npy_3kcompat.h +595 -0
  37. pllava/lib/python3.10/site-packages/numpy/core/include/numpy/npy_cpu.h +129 -0
  38. pllava/lib/python3.10/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h +20 -0
  39. pllava/lib/python3.10/site-packages/numpy/core/include/numpy/npy_os.h +42 -0
  40. pllava/lib/python3.10/site-packages/numpy/core/include/numpy/random/LICENSE.txt +21 -0
  41. pllava/lib/python3.10/site-packages/numpy/core/include/numpy/random/bitgen.h +20 -0
  42. pllava/lib/python3.10/site-packages/numpy/core/include/numpy/random/libdivide.h +2079 -0
  43. pllava/lib/python3.10/site-packages/numpy/core/include/numpy/ufuncobject.h +359 -0
  44. pllava/lib/python3.10/site-packages/numpy/core/tests/data/numpy_2_0_array.pkl +3 -0
  45. pllava/lib/python3.10/site-packages/numpy/fft/__init__.py +212 -0
  46. pllava/lib/python3.10/site-packages/numpy/fft/__init__.pyi +29 -0
  47. pllava/lib/python3.10/site-packages/numpy/fft/__pycache__/_pocketfft.cpython-310.pyc +0 -0
  48. pllava/lib/python3.10/site-packages/numpy/fft/__pycache__/helper.cpython-310.pyc +0 -0
  49. pllava/lib/python3.10/site-packages/numpy/fft/_pocketfft.pyi +108 -0
  50. pllava/lib/python3.10/site-packages/numpy/fft/helper.py +221 -0
mplug_owl2/lib/python3.10/site-packages/networkx/linalg/attrmatrix.py ADDED
@@ -0,0 +1,465 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Functions for constructing matrix-like objects from graph attributes.
3
+ """
4
+
5
+ import networkx as nx
6
+
7
+ __all__ = ["attr_matrix", "attr_sparse_matrix"]
8
+
9
+
10
+ def _node_value(G, node_attr):
11
+ """Returns a function that returns a value from G.nodes[u].
12
+
13
+ We return a function expecting a node as its sole argument. Then, in the
14
+ simplest scenario, the returned function will return G.nodes[u][node_attr].
15
+ However, we also handle the case when `node_attr` is None or when it is a
16
+ function itself.
17
+
18
+ Parameters
19
+ ----------
20
+ G : graph
21
+ A NetworkX graph
22
+
23
+ node_attr : {None, str, callable}
24
+ Specification of how the value of the node attribute should be obtained
25
+ from the node attribute dictionary.
26
+
27
+ Returns
28
+ -------
29
+ value : function
30
+ A function expecting a node as its sole argument. The function will
31
+ returns a value from G.nodes[u] that depends on `edge_attr`.
32
+
33
+ """
34
+ if node_attr is None:
35
+
36
+ def value(u):
37
+ return u
38
+
39
+ elif not callable(node_attr):
40
+ # assume it is a key for the node attribute dictionary
41
+ def value(u):
42
+ return G.nodes[u][node_attr]
43
+
44
+ else:
45
+ # Advanced: Allow users to specify something else.
46
+ #
47
+ # For example,
48
+ # node_attr = lambda u: G.nodes[u].get('size', .5) * 3
49
+ #
50
+ value = node_attr
51
+
52
+ return value
53
+
54
+
55
+ def _edge_value(G, edge_attr):
56
+ """Returns a function that returns a value from G[u][v].
57
+
58
+ Suppose there exists an edge between u and v. Then we return a function
59
+ expecting u and v as arguments. For Graph and DiGraph, G[u][v] is
60
+ the edge attribute dictionary, and the function (essentially) returns
61
+ G[u][v][edge_attr]. However, we also handle cases when `edge_attr` is None
62
+ and when it is a function itself. For MultiGraph and MultiDiGraph, G[u][v]
63
+ is a dictionary of all edges between u and v. In this case, the returned
64
+ function sums the value of `edge_attr` for every edge between u and v.
65
+
66
+ Parameters
67
+ ----------
68
+ G : graph
69
+ A NetworkX graph
70
+
71
+ edge_attr : {None, str, callable}
72
+ Specification of how the value of the edge attribute should be obtained
73
+ from the edge attribute dictionary, G[u][v]. For multigraphs, G[u][v]
74
+ is a dictionary of all the edges between u and v. This allows for
75
+ special treatment of multiedges.
76
+
77
+ Returns
78
+ -------
79
+ value : function
80
+ A function expecting two nodes as parameters. The nodes should
81
+ represent the from- and to- node of an edge. The function will
82
+ return a value from G[u][v] that depends on `edge_attr`.
83
+
84
+ """
85
+
86
+ if edge_attr is None:
87
+ # topological count of edges
88
+
89
+ if G.is_multigraph():
90
+
91
+ def value(u, v):
92
+ return len(G[u][v])
93
+
94
+ else:
95
+
96
+ def value(u, v):
97
+ return 1
98
+
99
+ elif not callable(edge_attr):
100
+ # assume it is a key for the edge attribute dictionary
101
+
102
+ if edge_attr == "weight":
103
+ # provide a default value
104
+ if G.is_multigraph():
105
+
106
+ def value(u, v):
107
+ return sum(d.get(edge_attr, 1) for d in G[u][v].values())
108
+
109
+ else:
110
+
111
+ def value(u, v):
112
+ return G[u][v].get(edge_attr, 1)
113
+
114
+ else:
115
+ # otherwise, the edge attribute MUST exist for each edge
116
+ if G.is_multigraph():
117
+
118
+ def value(u, v):
119
+ return sum(d[edge_attr] for d in G[u][v].values())
120
+
121
+ else:
122
+
123
+ def value(u, v):
124
+ return G[u][v][edge_attr]
125
+
126
+ else:
127
+ # Advanced: Allow users to specify something else.
128
+ #
129
+ # Alternative default value:
130
+ # edge_attr = lambda u,v: G[u][v].get('thickness', .5)
131
+ #
132
+ # Function on an attribute:
133
+ # edge_attr = lambda u,v: abs(G[u][v]['weight'])
134
+ #
135
+ # Handle Multi(Di)Graphs differently:
136
+ # edge_attr = lambda u,v: numpy.prod([d['size'] for d in G[u][v].values()])
137
+ #
138
+ # Ignore multiple edges
139
+ # edge_attr = lambda u,v: 1 if len(G[u][v]) else 0
140
+ #
141
+ value = edge_attr
142
+
143
+ return value
144
+
145
+
146
+ @nx._dispatchable(edge_attrs={"edge_attr": None}, node_attrs="node_attr")
147
+ def attr_matrix(
148
+ G,
149
+ edge_attr=None,
150
+ node_attr=None,
151
+ normalized=False,
152
+ rc_order=None,
153
+ dtype=None,
154
+ order=None,
155
+ ):
156
+ """Returns the attribute matrix using attributes from `G` as a numpy array.
157
+
158
+ If only `G` is passed in, then the adjacency matrix is constructed.
159
+
160
+ Let A be a discrete set of values for the node attribute `node_attr`. Then
161
+ the elements of A represent the rows and columns of the constructed matrix.
162
+ Now, iterate through every edge e=(u,v) in `G` and consider the value
163
+ of the edge attribute `edge_attr`. If ua and va are the values of the
164
+ node attribute `node_attr` for u and v, respectively, then the value of
165
+ the edge attribute is added to the matrix element at (ua, va).
166
+
167
+ Parameters
168
+ ----------
169
+ G : graph
170
+ The NetworkX graph used to construct the attribute matrix.
171
+
172
+ edge_attr : str, optional
173
+ Each element of the matrix represents a running total of the
174
+ specified edge attribute for edges whose node attributes correspond
175
+ to the rows/cols of the matrix. The attribute must be present for
176
+ all edges in the graph. If no attribute is specified, then we
177
+ just count the number of edges whose node attributes correspond
178
+ to the matrix element.
179
+
180
+ node_attr : str, optional
181
+ Each row and column in the matrix represents a particular value
182
+ of the node attribute. The attribute must be present for all nodes
183
+ in the graph. Note, the values of this attribute should be reliably
184
+ hashable. So, float values are not recommended. If no attribute is
185
+ specified, then the rows and columns will be the nodes of the graph.
186
+
187
+ normalized : bool, optional
188
+ If True, then each row is normalized by the summation of its values.
189
+
190
+ rc_order : list, optional
191
+ A list of the node attribute values. This list specifies the ordering
192
+ of rows and columns of the array. If no ordering is provided, then
193
+ the ordering will be random (and also, a return value).
194
+
195
+ Other Parameters
196
+ ----------------
197
+ dtype : NumPy data-type, optional
198
+ A valid NumPy dtype used to initialize the array. Keep in mind certain
199
+ dtypes can yield unexpected results if the array is to be normalized.
200
+ The parameter is passed to numpy.zeros(). If unspecified, the NumPy
201
+ default is used.
202
+
203
+ order : {'C', 'F'}, optional
204
+ Whether to store multidimensional data in C- or Fortran-contiguous
205
+ (row- or column-wise) order in memory. This parameter is passed to
206
+ numpy.zeros(). If unspecified, the NumPy default is used.
207
+
208
+ Returns
209
+ -------
210
+ M : 2D NumPy ndarray
211
+ The attribute matrix.
212
+
213
+ ordering : list
214
+ If `rc_order` was specified, then only the attribute matrix is returned.
215
+ However, if `rc_order` was None, then the ordering used to construct
216
+ the matrix is returned as well.
217
+
218
+ Examples
219
+ --------
220
+ Construct an adjacency matrix:
221
+
222
+ >>> G = nx.Graph()
223
+ >>> G.add_edge(0, 1, thickness=1, weight=3)
224
+ >>> G.add_edge(0, 2, thickness=2)
225
+ >>> G.add_edge(1, 2, thickness=3)
226
+ >>> nx.attr_matrix(G, rc_order=[0, 1, 2])
227
+ array([[0., 1., 1.],
228
+ [1., 0., 1.],
229
+ [1., 1., 0.]])
230
+
231
+ Alternatively, we can obtain the matrix describing edge thickness.
232
+
233
+ >>> nx.attr_matrix(G, edge_attr="thickness", rc_order=[0, 1, 2])
234
+ array([[0., 1., 2.],
235
+ [1., 0., 3.],
236
+ [2., 3., 0.]])
237
+
238
+ We can also color the nodes and ask for the probability distribution over
239
+ all edges (u,v) describing:
240
+
241
+ Pr(v has color Y | u has color X)
242
+
243
+ >>> G.nodes[0]["color"] = "red"
244
+ >>> G.nodes[1]["color"] = "red"
245
+ >>> G.nodes[2]["color"] = "blue"
246
+ >>> rc = ["red", "blue"]
247
+ >>> nx.attr_matrix(G, node_attr="color", normalized=True, rc_order=rc)
248
+ array([[0.33333333, 0.66666667],
249
+ [1. , 0. ]])
250
+
251
+ For example, the above tells us that for all edges (u,v):
252
+
253
+ Pr( v is red | u is red) = 1/3
254
+ Pr( v is blue | u is red) = 2/3
255
+
256
+ Pr( v is red | u is blue) = 1
257
+ Pr( v is blue | u is blue) = 0
258
+
259
+ Finally, we can obtain the total weights listed by the node colors.
260
+
261
+ >>> nx.attr_matrix(G, edge_attr="weight", node_attr="color", rc_order=rc)
262
+ array([[3., 2.],
263
+ [2., 0.]])
264
+
265
+ Thus, the total weight over all edges (u,v) with u and v having colors:
266
+
267
+ (red, red) is 3 # the sole contribution is from edge (0,1)
268
+ (red, blue) is 2 # contributions from edges (0,2) and (1,2)
269
+ (blue, red) is 2 # same as (red, blue) since graph is undirected
270
+ (blue, blue) is 0 # there are no edges with blue endpoints
271
+
272
+ """
273
+ import numpy as np
274
+
275
+ edge_value = _edge_value(G, edge_attr)
276
+ node_value = _node_value(G, node_attr)
277
+
278
+ if rc_order is None:
279
+ ordering = list({node_value(n) for n in G})
280
+ else:
281
+ ordering = rc_order
282
+
283
+ N = len(ordering)
284
+ undirected = not G.is_directed()
285
+ index = dict(zip(ordering, range(N)))
286
+ M = np.zeros((N, N), dtype=dtype, order=order)
287
+
288
+ seen = set()
289
+ for u, nbrdict in G.adjacency():
290
+ for v in nbrdict:
291
+ # Obtain the node attribute values.
292
+ i, j = index[node_value(u)], index[node_value(v)]
293
+ if v not in seen:
294
+ M[i, j] += edge_value(u, v)
295
+ if undirected:
296
+ M[j, i] = M[i, j]
297
+
298
+ if undirected:
299
+ seen.add(u)
300
+
301
+ if normalized:
302
+ M /= M.sum(axis=1).reshape((N, 1))
303
+
304
+ if rc_order is None:
305
+ return M, ordering
306
+ else:
307
+ return M
308
+
309
+
310
+ @nx._dispatchable(edge_attrs={"edge_attr": None}, node_attrs="node_attr")
311
+ def attr_sparse_matrix(
312
+ G, edge_attr=None, node_attr=None, normalized=False, rc_order=None, dtype=None
313
+ ):
314
+ """Returns a SciPy sparse array using attributes from G.
315
+
316
+ If only `G` is passed in, then the adjacency matrix is constructed.
317
+
318
+ Let A be a discrete set of values for the node attribute `node_attr`. Then
319
+ the elements of A represent the rows and columns of the constructed matrix.
320
+ Now, iterate through every edge e=(u,v) in `G` and consider the value
321
+ of the edge attribute `edge_attr`. If ua and va are the values of the
322
+ node attribute `node_attr` for u and v, respectively, then the value of
323
+ the edge attribute is added to the matrix element at (ua, va).
324
+
325
+ Parameters
326
+ ----------
327
+ G : graph
328
+ The NetworkX graph used to construct the NumPy matrix.
329
+
330
+ edge_attr : str, optional
331
+ Each element of the matrix represents a running total of the
332
+ specified edge attribute for edges whose node attributes correspond
333
+ to the rows/cols of the matrix. The attribute must be present for
334
+ all edges in the graph. If no attribute is specified, then we
335
+ just count the number of edges whose node attributes correspond
336
+ to the matrix element.
337
+
338
+ node_attr : str, optional
339
+ Each row and column in the matrix represents a particular value
340
+ of the node attribute. The attribute must be present for all nodes
341
+ in the graph. Note, the values of this attribute should be reliably
342
+ hashable. So, float values are not recommended. If no attribute is
343
+ specified, then the rows and columns will be the nodes of the graph.
344
+
345
+ normalized : bool, optional
346
+ If True, then each row is normalized by the summation of its values.
347
+
348
+ rc_order : list, optional
349
+ A list of the node attribute values. This list specifies the ordering
350
+ of rows and columns of the array. If no ordering is provided, then
351
+ the ordering will be random (and also, a return value).
352
+
353
+ Other Parameters
354
+ ----------------
355
+ dtype : NumPy data-type, optional
356
+ A valid NumPy dtype used to initialize the array. Keep in mind certain
357
+ dtypes can yield unexpected results if the array is to be normalized.
358
+ The parameter is passed to numpy.zeros(). If unspecified, the NumPy
359
+ default is used.
360
+
361
+ Returns
362
+ -------
363
+ M : SciPy sparse array
364
+ The attribute matrix.
365
+
366
+ ordering : list
367
+ If `rc_order` was specified, then only the matrix is returned.
368
+ However, if `rc_order` was None, then the ordering used to construct
369
+ the matrix is returned as well.
370
+
371
+ Examples
372
+ --------
373
+ Construct an adjacency matrix:
374
+
375
+ >>> G = nx.Graph()
376
+ >>> G.add_edge(0, 1, thickness=1, weight=3)
377
+ >>> G.add_edge(0, 2, thickness=2)
378
+ >>> G.add_edge(1, 2, thickness=3)
379
+ >>> M = nx.attr_sparse_matrix(G, rc_order=[0, 1, 2])
380
+ >>> M.toarray()
381
+ array([[0., 1., 1.],
382
+ [1., 0., 1.],
383
+ [1., 1., 0.]])
384
+
385
+ Alternatively, we can obtain the matrix describing edge thickness.
386
+
387
+ >>> M = nx.attr_sparse_matrix(G, edge_attr="thickness", rc_order=[0, 1, 2])
388
+ >>> M.toarray()
389
+ array([[0., 1., 2.],
390
+ [1., 0., 3.],
391
+ [2., 3., 0.]])
392
+
393
+ We can also color the nodes and ask for the probability distribution over
394
+ all edges (u,v) describing:
395
+
396
+ Pr(v has color Y | u has color X)
397
+
398
+ >>> G.nodes[0]["color"] = "red"
399
+ >>> G.nodes[1]["color"] = "red"
400
+ >>> G.nodes[2]["color"] = "blue"
401
+ >>> rc = ["red", "blue"]
402
+ >>> M = nx.attr_sparse_matrix(G, node_attr="color", normalized=True, rc_order=rc)
403
+ >>> M.toarray()
404
+ array([[0.33333333, 0.66666667],
405
+ [1. , 0. ]])
406
+
407
+ For example, the above tells us that for all edges (u,v):
408
+
409
+ Pr( v is red | u is red) = 1/3
410
+ Pr( v is blue | u is red) = 2/3
411
+
412
+ Pr( v is red | u is blue) = 1
413
+ Pr( v is blue | u is blue) = 0
414
+
415
+ Finally, we can obtain the total weights listed by the node colors.
416
+
417
+ >>> M = nx.attr_sparse_matrix(G, edge_attr="weight", node_attr="color", rc_order=rc)
418
+ >>> M.toarray()
419
+ array([[3., 2.],
420
+ [2., 0.]])
421
+
422
+ Thus, the total weight over all edges (u,v) with u and v having colors:
423
+
424
+ (red, red) is 3 # the sole contribution is from edge (0,1)
425
+ (red, blue) is 2 # contributions from edges (0,2) and (1,2)
426
+ (blue, red) is 2 # same as (red, blue) since graph is undirected
427
+ (blue, blue) is 0 # there are no edges with blue endpoints
428
+
429
+ """
430
+ import numpy as np
431
+ import scipy as sp
432
+
433
+ edge_value = _edge_value(G, edge_attr)
434
+ node_value = _node_value(G, node_attr)
435
+
436
+ if rc_order is None:
437
+ ordering = list({node_value(n) for n in G})
438
+ else:
439
+ ordering = rc_order
440
+
441
+ N = len(ordering)
442
+ undirected = not G.is_directed()
443
+ index = dict(zip(ordering, range(N)))
444
+ M = sp.sparse.lil_array((N, N), dtype=dtype)
445
+
446
+ seen = set()
447
+ for u, nbrdict in G.adjacency():
448
+ for v in nbrdict:
449
+ # Obtain the node attribute values.
450
+ i, j = index[node_value(u)], index[node_value(v)]
451
+ if v not in seen:
452
+ M[i, j] += edge_value(u, v)
453
+ if undirected:
454
+ M[j, i] = M[i, j]
455
+
456
+ if undirected:
457
+ seen.add(u)
458
+
459
+ if normalized:
460
+ M *= 1 / M.sum(axis=1)[:, np.newaxis] # in-place mult preserves sparse
461
+
462
+ if rc_order is None:
463
+ return M, ordering
464
+ else:
465
+ return M
mplug_owl2/lib/python3.10/site-packages/networkx/linalg/spectrum.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Eigenvalue spectrum of graphs.
3
+ """
4
+
5
+ import networkx as nx
6
+
7
+ __all__ = [
8
+ "laplacian_spectrum",
9
+ "adjacency_spectrum",
10
+ "modularity_spectrum",
11
+ "normalized_laplacian_spectrum",
12
+ "bethe_hessian_spectrum",
13
+ ]
14
+
15
+
16
+ @nx._dispatchable(edge_attrs="weight")
17
+ def laplacian_spectrum(G, weight="weight"):
18
+ """Returns eigenvalues of the Laplacian of G
19
+
20
+ Parameters
21
+ ----------
22
+ G : graph
23
+ A NetworkX graph
24
+
25
+ weight : string or None, optional (default='weight')
26
+ The edge data key used to compute each value in the matrix.
27
+ If None, then each edge has weight 1.
28
+
29
+ Returns
30
+ -------
31
+ evals : NumPy array
32
+ Eigenvalues
33
+
34
+ Notes
35
+ -----
36
+ For MultiGraph/MultiDiGraph, the edges weights are summed.
37
+ See :func:`~networkx.convert_matrix.to_numpy_array` for other options.
38
+
39
+ See Also
40
+ --------
41
+ laplacian_matrix
42
+
43
+ Examples
44
+ --------
45
+ The multiplicity of 0 as an eigenvalue of the laplacian matrix is equal
46
+ to the number of connected components of G.
47
+
48
+ >>> G = nx.Graph() # Create a graph with 5 nodes and 3 connected components
49
+ >>> G.add_nodes_from(range(5))
50
+ >>> G.add_edges_from([(0, 2), (3, 4)])
51
+ >>> nx.laplacian_spectrum(G)
52
+ array([0., 0., 0., 2., 2.])
53
+
54
+ """
55
+ import scipy as sp
56
+
57
+ return sp.linalg.eigvalsh(nx.laplacian_matrix(G, weight=weight).todense())
58
+
59
+
60
+ @nx._dispatchable(edge_attrs="weight")
61
+ def normalized_laplacian_spectrum(G, weight="weight"):
62
+ """Return eigenvalues of the normalized Laplacian of G
63
+
64
+ Parameters
65
+ ----------
66
+ G : graph
67
+ A NetworkX graph
68
+
69
+ weight : string or None, optional (default='weight')
70
+ The edge data key used to compute each value in the matrix.
71
+ If None, then each edge has weight 1.
72
+
73
+ Returns
74
+ -------
75
+ evals : NumPy array
76
+ Eigenvalues
77
+
78
+ Notes
79
+ -----
80
+ For MultiGraph/MultiDiGraph, the edges weights are summed.
81
+ See to_numpy_array for other options.
82
+
83
+ See Also
84
+ --------
85
+ normalized_laplacian_matrix
86
+ """
87
+ import scipy as sp
88
+
89
+ return sp.linalg.eigvalsh(
90
+ nx.normalized_laplacian_matrix(G, weight=weight).todense()
91
+ )
92
+
93
+
94
+ @nx._dispatchable(edge_attrs="weight")
95
+ def adjacency_spectrum(G, weight="weight"):
96
+ """Returns eigenvalues of the adjacency matrix of G.
97
+
98
+ Parameters
99
+ ----------
100
+ G : graph
101
+ A NetworkX graph
102
+
103
+ weight : string or None, optional (default='weight')
104
+ The edge data key used to compute each value in the matrix.
105
+ If None, then each edge has weight 1.
106
+
107
+ Returns
108
+ -------
109
+ evals : NumPy array
110
+ Eigenvalues
111
+
112
+ Notes
113
+ -----
114
+ For MultiGraph/MultiDiGraph, the edges weights are summed.
115
+ See to_numpy_array for other options.
116
+
117
+ See Also
118
+ --------
119
+ adjacency_matrix
120
+ """
121
+ import scipy as sp
122
+
123
+ return sp.linalg.eigvals(nx.adjacency_matrix(G, weight=weight).todense())
124
+
125
+
126
+ @nx._dispatchable
127
+ def modularity_spectrum(G):
128
+ """Returns eigenvalues of the modularity matrix of G.
129
+
130
+ Parameters
131
+ ----------
132
+ G : Graph
133
+ A NetworkX Graph or DiGraph
134
+
135
+ Returns
136
+ -------
137
+ evals : NumPy array
138
+ Eigenvalues
139
+
140
+ See Also
141
+ --------
142
+ modularity_matrix
143
+
144
+ References
145
+ ----------
146
+ .. [1] M. E. J. Newman, "Modularity and community structure in networks",
147
+ Proc. Natl. Acad. Sci. USA, vol. 103, pp. 8577-8582, 2006.
148
+ """
149
+ import scipy as sp
150
+
151
+ if G.is_directed():
152
+ return sp.linalg.eigvals(nx.directed_modularity_matrix(G))
153
+ else:
154
+ return sp.linalg.eigvals(nx.modularity_matrix(G))
155
+
156
+
157
+ @nx._dispatchable
158
+ def bethe_hessian_spectrum(G, r=None):
159
+ """Returns eigenvalues of the Bethe Hessian matrix of G.
160
+
161
+ Parameters
162
+ ----------
163
+ G : Graph
164
+ A NetworkX Graph or DiGraph
165
+
166
+ r : float
167
+ Regularizer parameter
168
+
169
+ Returns
170
+ -------
171
+ evals : NumPy array
172
+ Eigenvalues
173
+
174
+ See Also
175
+ --------
176
+ bethe_hessian_matrix
177
+
178
+ References
179
+ ----------
180
+ .. [1] A. Saade, F. Krzakala and L. Zdeborová
181
+ "Spectral clustering of graphs with the bethe hessian",
182
+ Advances in Neural Information Processing Systems. 2014.
183
+ """
184
+ import scipy as sp
185
+
186
+ return sp.linalg.eigvalsh(nx.bethe_hessian_matrix(G, r).todense())
mplug_owl2/lib/python3.10/site-packages/networkx/linalg/tests/__init__.py ADDED
File without changes
mplug_owl2/lib/python3.10/site-packages/networkx/linalg/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (177 Bytes). View file
 
mplug_owl2/lib/python3.10/site-packages/networkx/linalg/tests/__pycache__/test_algebraic_connectivity.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/networkx/linalg/tests/__pycache__/test_graphmatrix.cpython-310.pyc ADDED
Binary file (5.61 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/networkx/linalg/tests/__pycache__/test_laplacian.cpython-310.pyc ADDED
Binary file (8.16 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/networkx/linalg/tests/__pycache__/test_spectrum.cpython-310.pyc ADDED
Binary file (3.21 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/networkx/linalg/tests/test_algebraic_connectivity.py ADDED
@@ -0,0 +1,402 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from math import sqrt
2
+
3
+ import pytest
4
+
5
+ np = pytest.importorskip("numpy")
6
+
7
+
8
+ import networkx as nx
9
+
10
+ methods = ("tracemin_pcg", "tracemin_lu", "lanczos", "lobpcg")
11
+
12
+
13
+ def test_algebraic_connectivity_tracemin_chol():
14
+ """Test that "tracemin_chol" raises an exception."""
15
+ pytest.importorskip("scipy")
16
+ G = nx.barbell_graph(5, 4)
17
+ with pytest.raises(nx.NetworkXError):
18
+ nx.algebraic_connectivity(G, method="tracemin_chol")
19
+
20
+
21
+ def test_fiedler_vector_tracemin_chol():
22
+ """Test that "tracemin_chol" raises an exception."""
23
+ pytest.importorskip("scipy")
24
+ G = nx.barbell_graph(5, 4)
25
+ with pytest.raises(nx.NetworkXError):
26
+ nx.fiedler_vector(G, method="tracemin_chol")
27
+
28
+
29
+ def test_spectral_ordering_tracemin_chol():
30
+ """Test that "tracemin_chol" raises an exception."""
31
+ pytest.importorskip("scipy")
32
+ G = nx.barbell_graph(5, 4)
33
+ with pytest.raises(nx.NetworkXError):
34
+ nx.spectral_ordering(G, method="tracemin_chol")
35
+
36
+
37
+ def test_fiedler_vector_tracemin_unknown():
38
+ """Test that "tracemin_unknown" raises an exception."""
39
+ pytest.importorskip("scipy")
40
+ G = nx.barbell_graph(5, 4)
41
+ L = nx.laplacian_matrix(G)
42
+ X = np.asarray(np.random.normal(size=(1, L.shape[0]))).T
43
+ with pytest.raises(nx.NetworkXError, match="Unknown linear system solver"):
44
+ nx.linalg.algebraicconnectivity._tracemin_fiedler(
45
+ L, X, normalized=False, tol=1e-8, method="tracemin_unknown"
46
+ )
47
+
48
+
49
+ def test_spectral_bisection():
50
+ pytest.importorskip("scipy")
51
+ G = nx.barbell_graph(3, 0)
52
+ C = nx.spectral_bisection(G)
53
+ assert C == ({0, 1, 2}, {3, 4, 5})
54
+
55
+ mapping = dict(enumerate("badfec"))
56
+ G = nx.relabel_nodes(G, mapping)
57
+ C = nx.spectral_bisection(G)
58
+ assert C == (
59
+ {mapping[0], mapping[1], mapping[2]},
60
+ {mapping[3], mapping[4], mapping[5]},
61
+ )
62
+
63
+
64
+ def check_eigenvector(A, l, x):
65
+ nx = np.linalg.norm(x)
66
+ # Check zeroness.
67
+ assert nx != pytest.approx(0, abs=1e-07)
68
+ y = A @ x
69
+ ny = np.linalg.norm(y)
70
+ # Check collinearity.
71
+ assert x @ y == pytest.approx(nx * ny, abs=1e-7)
72
+ # Check eigenvalue.
73
+ assert ny == pytest.approx(l * nx, abs=1e-7)
74
+
75
+
76
+ class TestAlgebraicConnectivity:
77
+ @pytest.mark.parametrize("method", methods)
78
+ def test_directed(self, method):
79
+ G = nx.DiGraph()
80
+ pytest.raises(
81
+ nx.NetworkXNotImplemented, nx.algebraic_connectivity, G, method=method
82
+ )
83
+ pytest.raises(nx.NetworkXNotImplemented, nx.fiedler_vector, G, method=method)
84
+
85
+ @pytest.mark.parametrize("method", methods)
86
+ def test_null_and_singleton(self, method):
87
+ G = nx.Graph()
88
+ pytest.raises(nx.NetworkXError, nx.algebraic_connectivity, G, method=method)
89
+ pytest.raises(nx.NetworkXError, nx.fiedler_vector, G, method=method)
90
+ G.add_edge(0, 0)
91
+ pytest.raises(nx.NetworkXError, nx.algebraic_connectivity, G, method=method)
92
+ pytest.raises(nx.NetworkXError, nx.fiedler_vector, G, method=method)
93
+
94
+ @pytest.mark.parametrize("method", methods)
95
+ def test_disconnected(self, method):
96
+ G = nx.Graph()
97
+ G.add_nodes_from(range(2))
98
+ assert nx.algebraic_connectivity(G) == 0
99
+ pytest.raises(nx.NetworkXError, nx.fiedler_vector, G, method=method)
100
+ G.add_edge(0, 1, weight=0)
101
+ assert nx.algebraic_connectivity(G) == 0
102
+ pytest.raises(nx.NetworkXError, nx.fiedler_vector, G, method=method)
103
+
104
+ def test_unrecognized_method(self):
105
+ pytest.importorskip("scipy")
106
+ G = nx.path_graph(4)
107
+ pytest.raises(nx.NetworkXError, nx.algebraic_connectivity, G, method="unknown")
108
+ pytest.raises(nx.NetworkXError, nx.fiedler_vector, G, method="unknown")
109
+
110
+ @pytest.mark.parametrize("method", methods)
111
+ def test_two_nodes(self, method):
112
+ pytest.importorskip("scipy")
113
+ G = nx.Graph()
114
+ G.add_edge(0, 1, weight=1)
115
+ A = nx.laplacian_matrix(G)
116
+ assert nx.algebraic_connectivity(G, tol=1e-12, method=method) == pytest.approx(
117
+ 2, abs=1e-7
118
+ )
119
+ x = nx.fiedler_vector(G, tol=1e-12, method=method)
120
+ check_eigenvector(A, 2, x)
121
+
122
+ @pytest.mark.parametrize("method", methods)
123
+ def test_two_nodes_multigraph(self, method):
124
+ pytest.importorskip("scipy")
125
+ G = nx.MultiGraph()
126
+ G.add_edge(0, 0, spam=1e8)
127
+ G.add_edge(0, 1, spam=1)
128
+ G.add_edge(0, 1, spam=-2)
129
+ A = -3 * nx.laplacian_matrix(G, weight="spam")
130
+ assert nx.algebraic_connectivity(
131
+ G, weight="spam", tol=1e-12, method=method
132
+ ) == pytest.approx(6, abs=1e-7)
133
+ x = nx.fiedler_vector(G, weight="spam", tol=1e-12, method=method)
134
+ check_eigenvector(A, 6, x)
135
+
136
+ def test_abbreviation_of_method(self):
137
+ pytest.importorskip("scipy")
138
+ G = nx.path_graph(8)
139
+ A = nx.laplacian_matrix(G)
140
+ sigma = 2 - sqrt(2 + sqrt(2))
141
+ ac = nx.algebraic_connectivity(G, tol=1e-12, method="tracemin")
142
+ assert ac == pytest.approx(sigma, abs=1e-7)
143
+ x = nx.fiedler_vector(G, tol=1e-12, method="tracemin")
144
+ check_eigenvector(A, sigma, x)
145
+
146
+ @pytest.mark.parametrize("method", methods)
147
+ def test_path(self, method):
148
+ pytest.importorskip("scipy")
149
+ G = nx.path_graph(8)
150
+ A = nx.laplacian_matrix(G)
151
+ sigma = 2 - sqrt(2 + sqrt(2))
152
+ ac = nx.algebraic_connectivity(G, tol=1e-12, method=method)
153
+ assert ac == pytest.approx(sigma, abs=1e-7)
154
+ x = nx.fiedler_vector(G, tol=1e-12, method=method)
155
+ check_eigenvector(A, sigma, x)
156
+
157
+ @pytest.mark.parametrize("method", methods)
158
+ def test_problematic_graph_issue_2381(self, method):
159
+ pytest.importorskip("scipy")
160
+ G = nx.path_graph(4)
161
+ G.add_edges_from([(4, 2), (5, 1)])
162
+ A = nx.laplacian_matrix(G)
163
+ sigma = 0.438447187191
164
+ ac = nx.algebraic_connectivity(G, tol=1e-12, method=method)
165
+ assert ac == pytest.approx(sigma, abs=1e-7)
166
+ x = nx.fiedler_vector(G, tol=1e-12, method=method)
167
+ check_eigenvector(A, sigma, x)
168
+
169
+ @pytest.mark.parametrize("method", methods)
170
+ def test_cycle(self, method):
171
+ pytest.importorskip("scipy")
172
+ G = nx.cycle_graph(8)
173
+ A = nx.laplacian_matrix(G)
174
+ sigma = 2 - sqrt(2)
175
+ ac = nx.algebraic_connectivity(G, tol=1e-12, method=method)
176
+ assert ac == pytest.approx(sigma, abs=1e-7)
177
+ x = nx.fiedler_vector(G, tol=1e-12, method=method)
178
+ check_eigenvector(A, sigma, x)
179
+
180
+ @pytest.mark.parametrize("method", methods)
181
+ def test_seed_argument(self, method):
182
+ pytest.importorskip("scipy")
183
+ G = nx.cycle_graph(8)
184
+ A = nx.laplacian_matrix(G)
185
+ sigma = 2 - sqrt(2)
186
+ ac = nx.algebraic_connectivity(G, tol=1e-12, method=method, seed=1)
187
+ assert ac == pytest.approx(sigma, abs=1e-7)
188
+ x = nx.fiedler_vector(G, tol=1e-12, method=method, seed=1)
189
+ check_eigenvector(A, sigma, x)
190
+
191
+ @pytest.mark.parametrize(
192
+ ("normalized", "sigma", "laplacian_fn"),
193
+ (
194
+ (False, 0.2434017461399311, nx.laplacian_matrix),
195
+ (True, 0.08113391537997749, nx.normalized_laplacian_matrix),
196
+ ),
197
+ )
198
+ @pytest.mark.parametrize("method", methods)
199
+ def test_buckminsterfullerene(self, normalized, sigma, laplacian_fn, method):
200
+ pytest.importorskip("scipy")
201
+ G = nx.Graph(
202
+ [
203
+ (1, 10),
204
+ (1, 41),
205
+ (1, 59),
206
+ (2, 12),
207
+ (2, 42),
208
+ (2, 60),
209
+ (3, 6),
210
+ (3, 43),
211
+ (3, 57),
212
+ (4, 8),
213
+ (4, 44),
214
+ (4, 58),
215
+ (5, 13),
216
+ (5, 56),
217
+ (5, 57),
218
+ (6, 10),
219
+ (6, 31),
220
+ (7, 14),
221
+ (7, 56),
222
+ (7, 58),
223
+ (8, 12),
224
+ (8, 32),
225
+ (9, 23),
226
+ (9, 53),
227
+ (9, 59),
228
+ (10, 15),
229
+ (11, 24),
230
+ (11, 53),
231
+ (11, 60),
232
+ (12, 16),
233
+ (13, 14),
234
+ (13, 25),
235
+ (14, 26),
236
+ (15, 27),
237
+ (15, 49),
238
+ (16, 28),
239
+ (16, 50),
240
+ (17, 18),
241
+ (17, 19),
242
+ (17, 54),
243
+ (18, 20),
244
+ (18, 55),
245
+ (19, 23),
246
+ (19, 41),
247
+ (20, 24),
248
+ (20, 42),
249
+ (21, 31),
250
+ (21, 33),
251
+ (21, 57),
252
+ (22, 32),
253
+ (22, 34),
254
+ (22, 58),
255
+ (23, 24),
256
+ (25, 35),
257
+ (25, 43),
258
+ (26, 36),
259
+ (26, 44),
260
+ (27, 51),
261
+ (27, 59),
262
+ (28, 52),
263
+ (28, 60),
264
+ (29, 33),
265
+ (29, 34),
266
+ (29, 56),
267
+ (30, 51),
268
+ (30, 52),
269
+ (30, 53),
270
+ (31, 47),
271
+ (32, 48),
272
+ (33, 45),
273
+ (34, 46),
274
+ (35, 36),
275
+ (35, 37),
276
+ (36, 38),
277
+ (37, 39),
278
+ (37, 49),
279
+ (38, 40),
280
+ (38, 50),
281
+ (39, 40),
282
+ (39, 51),
283
+ (40, 52),
284
+ (41, 47),
285
+ (42, 48),
286
+ (43, 49),
287
+ (44, 50),
288
+ (45, 46),
289
+ (45, 54),
290
+ (46, 55),
291
+ (47, 54),
292
+ (48, 55),
293
+ ]
294
+ )
295
+ A = laplacian_fn(G)
296
+ try:
297
+ assert nx.algebraic_connectivity(
298
+ G, normalized=normalized, tol=1e-12, method=method
299
+ ) == pytest.approx(sigma, abs=1e-7)
300
+ x = nx.fiedler_vector(G, normalized=normalized, tol=1e-12, method=method)
301
+ check_eigenvector(A, sigma, x)
302
+ except nx.NetworkXError as err:
303
+ if err.args not in (
304
+ ("Cholesky solver unavailable.",),
305
+ ("LU solver unavailable.",),
306
+ ):
307
+ raise
308
+
309
+
310
+ class TestSpectralOrdering:
311
+ _graphs = (nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)
312
+
313
+ @pytest.mark.parametrize("graph", _graphs)
314
+ def test_nullgraph(self, graph):
315
+ G = graph()
316
+ pytest.raises(nx.NetworkXError, nx.spectral_ordering, G)
317
+
318
+ @pytest.mark.parametrize("graph", _graphs)
319
+ def test_singleton(self, graph):
320
+ G = graph()
321
+ G.add_node("x")
322
+ assert nx.spectral_ordering(G) == ["x"]
323
+ G.add_edge("x", "x", weight=33)
324
+ G.add_edge("x", "x", weight=33)
325
+ assert nx.spectral_ordering(G) == ["x"]
326
+
327
+ def test_unrecognized_method(self):
328
+ G = nx.path_graph(4)
329
+ pytest.raises(nx.NetworkXError, nx.spectral_ordering, G, method="unknown")
330
+
331
+ @pytest.mark.parametrize("method", methods)
332
+ def test_three_nodes(self, method):
333
+ pytest.importorskip("scipy")
334
+ G = nx.Graph()
335
+ G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2), (2, 3, 1)], weight="spam")
336
+ order = nx.spectral_ordering(G, weight="spam", method=method)
337
+ assert set(order) == set(G)
338
+ assert {1, 3} in (set(order[:-1]), set(order[1:]))
339
+
340
+ @pytest.mark.parametrize("method", methods)
341
+ def test_three_nodes_multigraph(self, method):
342
+ pytest.importorskip("scipy")
343
+ G = nx.MultiDiGraph()
344
+ G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2), (2, 3, 1), (2, 3, 2)])
345
+ order = nx.spectral_ordering(G, method=method)
346
+ assert set(order) == set(G)
347
+ assert {2, 3} in (set(order[:-1]), set(order[1:]))
348
+
349
+ @pytest.mark.parametrize("method", methods)
350
+ def test_path(self, method):
351
+ pytest.importorskip("scipy")
352
+ path = list(range(10))
353
+ np.random.shuffle(path)
354
+ G = nx.Graph()
355
+ nx.add_path(G, path)
356
+ order = nx.spectral_ordering(G, method=method)
357
+ assert order in [path, list(reversed(path))]
358
+
359
+ @pytest.mark.parametrize("method", methods)
360
+ def test_seed_argument(self, method):
361
+ pytest.importorskip("scipy")
362
+ path = list(range(10))
363
+ np.random.shuffle(path)
364
+ G = nx.Graph()
365
+ nx.add_path(G, path)
366
+ order = nx.spectral_ordering(G, method=method, seed=1)
367
+ assert order in [path, list(reversed(path))]
368
+
369
+ @pytest.mark.parametrize("method", methods)
370
+ def test_disconnected(self, method):
371
+ pytest.importorskip("scipy")
372
+ G = nx.Graph()
373
+ nx.add_path(G, range(0, 10, 2))
374
+ nx.add_path(G, range(1, 10, 2))
375
+ order = nx.spectral_ordering(G, method=method)
376
+ assert set(order) == set(G)
377
+ seqs = [
378
+ list(range(0, 10, 2)),
379
+ list(range(8, -1, -2)),
380
+ list(range(1, 10, 2)),
381
+ list(range(9, -1, -2)),
382
+ ]
383
+ assert order[:5] in seqs
384
+ assert order[5:] in seqs
385
+
386
+ @pytest.mark.parametrize(
387
+ ("normalized", "expected_order"),
388
+ (
389
+ (False, [[1, 2, 0, 3, 4, 5, 6, 9, 7, 8], [8, 7, 9, 6, 5, 4, 3, 0, 2, 1]]),
390
+ (True, [[1, 2, 3, 0, 4, 5, 9, 6, 7, 8], [8, 7, 6, 9, 5, 4, 0, 3, 2, 1]]),
391
+ ),
392
+ )
393
+ @pytest.mark.parametrize("method", methods)
394
+ def test_cycle(self, normalized, expected_order, method):
395
+ pytest.importorskip("scipy")
396
+ path = list(range(10))
397
+ G = nx.Graph()
398
+ nx.add_path(G, path, weight=5)
399
+ G.add_edge(path[-1], path[0], weight=1)
400
+ A = nx.laplacian_matrix(G).todense()
401
+ order = nx.spectral_ordering(G, normalized=normalized, method=method)
402
+ assert order in expected_order
mplug_owl2/lib/python3.10/site-packages/networkx/linalg/tests/test_attrmatrix.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ np = pytest.importorskip("numpy")
4
+
5
+ import networkx as nx
6
+
7
+
8
+ def test_attr_matrix():
9
+ G = nx.Graph()
10
+ G.add_edge(0, 1, thickness=1, weight=3)
11
+ G.add_edge(0, 1, thickness=1, weight=3)
12
+ G.add_edge(0, 2, thickness=2)
13
+ G.add_edge(1, 2, thickness=3)
14
+
15
+ def node_attr(u):
16
+ return G.nodes[u].get("size", 0.5) * 3
17
+
18
+ def edge_attr(u, v):
19
+ return G[u][v].get("thickness", 0.5)
20
+
21
+ M = nx.attr_matrix(G, edge_attr=edge_attr, node_attr=node_attr)
22
+ np.testing.assert_equal(M[0], np.array([[6.0]]))
23
+ assert M[1] == [1.5]
24
+
25
+
26
+ def test_attr_matrix_directed():
27
+ G = nx.DiGraph()
28
+ G.add_edge(0, 1, thickness=1, weight=3)
29
+ G.add_edge(0, 1, thickness=1, weight=3)
30
+ G.add_edge(0, 2, thickness=2)
31
+ G.add_edge(1, 2, thickness=3)
32
+ M = nx.attr_matrix(G, rc_order=[0, 1, 2])
33
+ # fmt: off
34
+ data = np.array(
35
+ [[0., 1., 1.],
36
+ [0., 0., 1.],
37
+ [0., 0., 0.]]
38
+ )
39
+ # fmt: on
40
+ np.testing.assert_equal(M, np.array(data))
41
+
42
+
43
+ def test_attr_matrix_multigraph():
44
+ G = nx.MultiGraph()
45
+ G.add_edge(0, 1, thickness=1, weight=3)
46
+ G.add_edge(0, 1, thickness=1, weight=3)
47
+ G.add_edge(0, 1, thickness=1, weight=3)
48
+ G.add_edge(0, 2, thickness=2)
49
+ G.add_edge(1, 2, thickness=3)
50
+ M = nx.attr_matrix(G, rc_order=[0, 1, 2])
51
+ # fmt: off
52
+ data = np.array(
53
+ [[0., 3., 1.],
54
+ [3., 0., 1.],
55
+ [1., 1., 0.]]
56
+ )
57
+ # fmt: on
58
+ np.testing.assert_equal(M, np.array(data))
59
+ M = nx.attr_matrix(G, edge_attr="weight", rc_order=[0, 1, 2])
60
+ # fmt: off
61
+ data = np.array(
62
+ [[0., 9., 1.],
63
+ [9., 0., 1.],
64
+ [1., 1., 0.]]
65
+ )
66
+ # fmt: on
67
+ np.testing.assert_equal(M, np.array(data))
68
+ M = nx.attr_matrix(G, edge_attr="thickness", rc_order=[0, 1, 2])
69
+ # fmt: off
70
+ data = np.array(
71
+ [[0., 3., 2.],
72
+ [3., 0., 3.],
73
+ [2., 3., 0.]]
74
+ )
75
+ # fmt: on
76
+ np.testing.assert_equal(M, np.array(data))
77
+
78
+
79
+ def test_attr_sparse_matrix():
80
+ pytest.importorskip("scipy")
81
+ G = nx.Graph()
82
+ G.add_edge(0, 1, thickness=1, weight=3)
83
+ G.add_edge(0, 2, thickness=2)
84
+ G.add_edge(1, 2, thickness=3)
85
+ M = nx.attr_sparse_matrix(G)
86
+ mtx = M[0]
87
+ data = np.ones((3, 3), float)
88
+ np.fill_diagonal(data, 0)
89
+ np.testing.assert_equal(mtx.todense(), np.array(data))
90
+ assert M[1] == [0, 1, 2]
91
+
92
+
93
+ def test_attr_sparse_matrix_directed():
94
+ pytest.importorskip("scipy")
95
+ G = nx.DiGraph()
96
+ G.add_edge(0, 1, thickness=1, weight=3)
97
+ G.add_edge(0, 1, thickness=1, weight=3)
98
+ G.add_edge(0, 2, thickness=2)
99
+ G.add_edge(1, 2, thickness=3)
100
+ M = nx.attr_sparse_matrix(G, rc_order=[0, 1, 2])
101
+ # fmt: off
102
+ data = np.array(
103
+ [[0., 1., 1.],
104
+ [0., 0., 1.],
105
+ [0., 0., 0.]]
106
+ )
107
+ # fmt: on
108
+ np.testing.assert_equal(M.todense(), np.array(data))
mplug_owl2/lib/python3.10/site-packages/networkx/linalg/tests/test_graphmatrix.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ np = pytest.importorskip("numpy")
4
+ pytest.importorskip("scipy")
5
+
6
+ import networkx as nx
7
+ from networkx.exception import NetworkXError
8
+ from networkx.generators.degree_seq import havel_hakimi_graph
9
+
10
+
11
+ def test_incidence_matrix_simple():
12
+ deg = [3, 2, 2, 1, 0]
13
+ G = havel_hakimi_graph(deg)
14
+ deg = [(1, 0), (1, 0), (1, 0), (2, 0), (1, 0), (2, 1), (0, 1), (0, 1)]
15
+ MG = nx.random_clustered_graph(deg, seed=42)
16
+
17
+ I = nx.incidence_matrix(G, dtype=int).todense()
18
+ # fmt: off
19
+ expected = np.array(
20
+ [[1, 1, 1, 0],
21
+ [0, 1, 0, 1],
22
+ [1, 0, 0, 1],
23
+ [0, 0, 1, 0],
24
+ [0, 0, 0, 0]]
25
+ )
26
+ # fmt: on
27
+ np.testing.assert_equal(I, expected)
28
+
29
+ I = nx.incidence_matrix(MG, dtype=int).todense()
30
+ # fmt: off
31
+ expected = np.array(
32
+ [[1, 0, 0, 0, 0, 0, 0],
33
+ [1, 0, 0, 0, 0, 0, 0],
34
+ [0, 1, 0, 0, 0, 0, 0],
35
+ [0, 0, 0, 0, 0, 0, 0],
36
+ [0, 1, 0, 0, 0, 0, 0],
37
+ [0, 0, 0, 0, 1, 1, 0],
38
+ [0, 0, 0, 0, 0, 1, 1],
39
+ [0, 0, 0, 0, 1, 0, 1]]
40
+ )
41
+ # fmt: on
42
+ np.testing.assert_equal(I, expected)
43
+
44
+ with pytest.raises(NetworkXError):
45
+ nx.incidence_matrix(G, nodelist=[0, 1])
46
+
47
+
48
+ class TestGraphMatrix:
49
+ @classmethod
50
+ def setup_class(cls):
51
+ deg = [3, 2, 2, 1, 0]
52
+ cls.G = havel_hakimi_graph(deg)
53
+ # fmt: off
54
+ cls.OI = np.array(
55
+ [[-1, -1, -1, 0],
56
+ [1, 0, 0, -1],
57
+ [0, 1, 0, 1],
58
+ [0, 0, 1, 0],
59
+ [0, 0, 0, 0]]
60
+ )
61
+ cls.A = np.array(
62
+ [[0, 1, 1, 1, 0],
63
+ [1, 0, 1, 0, 0],
64
+ [1, 1, 0, 0, 0],
65
+ [1, 0, 0, 0, 0],
66
+ [0, 0, 0, 0, 0]]
67
+ )
68
+ # fmt: on
69
+ cls.WG = havel_hakimi_graph(deg)
70
+ cls.WG.add_edges_from(
71
+ (u, v, {"weight": 0.5, "other": 0.3}) for (u, v) in cls.G.edges()
72
+ )
73
+ # fmt: off
74
+ cls.WA = np.array(
75
+ [[0, 0.5, 0.5, 0.5, 0],
76
+ [0.5, 0, 0.5, 0, 0],
77
+ [0.5, 0.5, 0, 0, 0],
78
+ [0.5, 0, 0, 0, 0],
79
+ [0, 0, 0, 0, 0]]
80
+ )
81
+ # fmt: on
82
+ cls.MG = nx.MultiGraph(cls.G)
83
+ cls.MG2 = cls.MG.copy()
84
+ cls.MG2.add_edge(0, 1)
85
+ # fmt: off
86
+ cls.MG2A = np.array(
87
+ [[0, 2, 1, 1, 0],
88
+ [2, 0, 1, 0, 0],
89
+ [1, 1, 0, 0, 0],
90
+ [1, 0, 0, 0, 0],
91
+ [0, 0, 0, 0, 0]]
92
+ )
93
+ cls.MGOI = np.array(
94
+ [[-1, -1, -1, -1, 0],
95
+ [1, 1, 0, 0, -1],
96
+ [0, 0, 1, 0, 1],
97
+ [0, 0, 0, 1, 0],
98
+ [0, 0, 0, 0, 0]]
99
+ )
100
+ # fmt: on
101
+ cls.no_edges_G = nx.Graph([(1, 2), (3, 2, {"weight": 8})])
102
+ cls.no_edges_A = np.array([[0, 0], [0, 0]])
103
+
104
+ def test_incidence_matrix(self):
105
+ "Conversion to incidence matrix"
106
+ I = nx.incidence_matrix(
107
+ self.G,
108
+ nodelist=sorted(self.G),
109
+ edgelist=sorted(self.G.edges()),
110
+ oriented=True,
111
+ dtype=int,
112
+ ).todense()
113
+ np.testing.assert_equal(I, self.OI)
114
+
115
+ I = nx.incidence_matrix(
116
+ self.G,
117
+ nodelist=sorted(self.G),
118
+ edgelist=sorted(self.G.edges()),
119
+ oriented=False,
120
+ dtype=int,
121
+ ).todense()
122
+ np.testing.assert_equal(I, np.abs(self.OI))
123
+
124
+ I = nx.incidence_matrix(
125
+ self.MG,
126
+ nodelist=sorted(self.MG),
127
+ edgelist=sorted(self.MG.edges()),
128
+ oriented=True,
129
+ dtype=int,
130
+ ).todense()
131
+ np.testing.assert_equal(I, self.OI)
132
+
133
+ I = nx.incidence_matrix(
134
+ self.MG,
135
+ nodelist=sorted(self.MG),
136
+ edgelist=sorted(self.MG.edges()),
137
+ oriented=False,
138
+ dtype=int,
139
+ ).todense()
140
+ np.testing.assert_equal(I, np.abs(self.OI))
141
+
142
+ I = nx.incidence_matrix(
143
+ self.MG2,
144
+ nodelist=sorted(self.MG2),
145
+ edgelist=sorted(self.MG2.edges()),
146
+ oriented=True,
147
+ dtype=int,
148
+ ).todense()
149
+ np.testing.assert_equal(I, self.MGOI)
150
+
151
+ I = nx.incidence_matrix(
152
+ self.MG2,
153
+ nodelist=sorted(self.MG),
154
+ edgelist=sorted(self.MG2.edges()),
155
+ oriented=False,
156
+ dtype=int,
157
+ ).todense()
158
+ np.testing.assert_equal(I, np.abs(self.MGOI))
159
+
160
+ I = nx.incidence_matrix(self.G, dtype=np.uint8)
161
+ assert I.dtype == np.uint8
162
+
163
+ def test_weighted_incidence_matrix(self):
164
+ I = nx.incidence_matrix(
165
+ self.WG,
166
+ nodelist=sorted(self.WG),
167
+ edgelist=sorted(self.WG.edges()),
168
+ oriented=True,
169
+ dtype=int,
170
+ ).todense()
171
+ np.testing.assert_equal(I, self.OI)
172
+
173
+ I = nx.incidence_matrix(
174
+ self.WG,
175
+ nodelist=sorted(self.WG),
176
+ edgelist=sorted(self.WG.edges()),
177
+ oriented=False,
178
+ dtype=int,
179
+ ).todense()
180
+ np.testing.assert_equal(I, np.abs(self.OI))
181
+
182
+ # np.testing.assert_equal(nx.incidence_matrix(self.WG,oriented=True,
183
+ # weight='weight').todense(),0.5*self.OI)
184
+ # np.testing.assert_equal(nx.incidence_matrix(self.WG,weight='weight').todense(),
185
+ # np.abs(0.5*self.OI))
186
+ # np.testing.assert_equal(nx.incidence_matrix(self.WG,oriented=True,weight='other').todense(),
187
+ # 0.3*self.OI)
188
+
189
+ I = nx.incidence_matrix(
190
+ self.WG,
191
+ nodelist=sorted(self.WG),
192
+ edgelist=sorted(self.WG.edges()),
193
+ oriented=True,
194
+ weight="weight",
195
+ ).todense()
196
+ np.testing.assert_equal(I, 0.5 * self.OI)
197
+
198
+ I = nx.incidence_matrix(
199
+ self.WG,
200
+ nodelist=sorted(self.WG),
201
+ edgelist=sorted(self.WG.edges()),
202
+ oriented=False,
203
+ weight="weight",
204
+ ).todense()
205
+ np.testing.assert_equal(I, np.abs(0.5 * self.OI))
206
+
207
+ I = nx.incidence_matrix(
208
+ self.WG,
209
+ nodelist=sorted(self.WG),
210
+ edgelist=sorted(self.WG.edges()),
211
+ oriented=True,
212
+ weight="other",
213
+ ).todense()
214
+ np.testing.assert_equal(I, 0.3 * self.OI)
215
+
216
+ # WMG=nx.MultiGraph(self.WG)
217
+ # WMG.add_edge(0,1,weight=0.5,other=0.3)
218
+ # np.testing.assert_equal(nx.incidence_matrix(WMG,weight='weight').todense(),
219
+ # np.abs(0.5*self.MGOI))
220
+ # np.testing.assert_equal(nx.incidence_matrix(WMG,weight='weight',oriented=True).todense(),
221
+ # 0.5*self.MGOI)
222
+ # np.testing.assert_equal(nx.incidence_matrix(WMG,weight='other',oriented=True).todense(),
223
+ # 0.3*self.MGOI)
224
+
225
+ WMG = nx.MultiGraph(self.WG)
226
+ WMG.add_edge(0, 1, weight=0.5, other=0.3)
227
+
228
+ I = nx.incidence_matrix(
229
+ WMG,
230
+ nodelist=sorted(WMG),
231
+ edgelist=sorted(WMG.edges(keys=True)),
232
+ oriented=True,
233
+ weight="weight",
234
+ ).todense()
235
+ np.testing.assert_equal(I, 0.5 * self.MGOI)
236
+
237
+ I = nx.incidence_matrix(
238
+ WMG,
239
+ nodelist=sorted(WMG),
240
+ edgelist=sorted(WMG.edges(keys=True)),
241
+ oriented=False,
242
+ weight="weight",
243
+ ).todense()
244
+ np.testing.assert_equal(I, np.abs(0.5 * self.MGOI))
245
+
246
+ I = nx.incidence_matrix(
247
+ WMG,
248
+ nodelist=sorted(WMG),
249
+ edgelist=sorted(WMG.edges(keys=True)),
250
+ oriented=True,
251
+ weight="other",
252
+ ).todense()
253
+ np.testing.assert_equal(I, 0.3 * self.MGOI)
254
+
255
+ def test_adjacency_matrix(self):
256
+ "Conversion to adjacency matrix"
257
+ np.testing.assert_equal(nx.adjacency_matrix(self.G).todense(), self.A)
258
+ np.testing.assert_equal(nx.adjacency_matrix(self.MG).todense(), self.A)
259
+ np.testing.assert_equal(nx.adjacency_matrix(self.MG2).todense(), self.MG2A)
260
+ np.testing.assert_equal(
261
+ nx.adjacency_matrix(self.G, nodelist=[0, 1]).todense(), self.A[:2, :2]
262
+ )
263
+ np.testing.assert_equal(nx.adjacency_matrix(self.WG).todense(), self.WA)
264
+ np.testing.assert_equal(
265
+ nx.adjacency_matrix(self.WG, weight=None).todense(), self.A
266
+ )
267
+ np.testing.assert_equal(
268
+ nx.adjacency_matrix(self.MG2, weight=None).todense(), self.MG2A
269
+ )
270
+ np.testing.assert_equal(
271
+ nx.adjacency_matrix(self.WG, weight="other").todense(), 0.6 * self.WA
272
+ )
273
+ np.testing.assert_equal(
274
+ nx.adjacency_matrix(self.no_edges_G, nodelist=[1, 3]).todense(),
275
+ self.no_edges_A,
276
+ )
mplug_owl2/lib/python3.10/site-packages/networkx/linalg/tests/test_laplacian.py ADDED
@@ -0,0 +1,336 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ np = pytest.importorskip("numpy")
4
+ pytest.importorskip("scipy")
5
+
6
+ import networkx as nx
7
+ from networkx.generators.degree_seq import havel_hakimi_graph
8
+ from networkx.generators.expanders import margulis_gabber_galil_graph
9
+
10
+
11
+ class TestLaplacian:
12
+ @classmethod
13
+ def setup_class(cls):
14
+ deg = [3, 2, 2, 1, 0]
15
+ cls.G = havel_hakimi_graph(deg)
16
+ cls.WG = nx.Graph(
17
+ (u, v, {"weight": 0.5, "other": 0.3}) for (u, v) in cls.G.edges()
18
+ )
19
+ cls.WG.add_node(4)
20
+ cls.MG = nx.MultiGraph(cls.G)
21
+
22
+ # Graph with clsloops
23
+ cls.Gsl = cls.G.copy()
24
+ for node in cls.Gsl.nodes():
25
+ cls.Gsl.add_edge(node, node)
26
+
27
+ # Graph used as an example in Sec. 4.1 of Langville and Meyer,
28
+ # "Google's PageRank and Beyond".
29
+ cls.DiG = nx.DiGraph()
30
+ cls.DiG.add_edges_from(
31
+ (
32
+ (1, 2),
33
+ (1, 3),
34
+ (3, 1),
35
+ (3, 2),
36
+ (3, 5),
37
+ (4, 5),
38
+ (4, 6),
39
+ (5, 4),
40
+ (5, 6),
41
+ (6, 4),
42
+ )
43
+ )
44
+ cls.DiMG = nx.MultiDiGraph(cls.DiG)
45
+ cls.DiWG = nx.DiGraph(
46
+ (u, v, {"weight": 0.5, "other": 0.3}) for (u, v) in cls.DiG.edges()
47
+ )
48
+ cls.DiGsl = cls.DiG.copy()
49
+ for node in cls.DiGsl.nodes():
50
+ cls.DiGsl.add_edge(node, node)
51
+
52
+ def test_laplacian(self):
53
+ "Graph Laplacian"
54
+ # fmt: off
55
+ NL = np.array([[ 3, -1, -1, -1, 0],
56
+ [-1, 2, -1, 0, 0],
57
+ [-1, -1, 2, 0, 0],
58
+ [-1, 0, 0, 1, 0],
59
+ [ 0, 0, 0, 0, 0]])
60
+ # fmt: on
61
+ WL = 0.5 * NL
62
+ OL = 0.3 * NL
63
+ # fmt: off
64
+ DiNL = np.array([[ 2, -1, -1, 0, 0, 0],
65
+ [ 0, 0, 0, 0, 0, 0],
66
+ [-1, -1, 3, -1, 0, 0],
67
+ [ 0, 0, 0, 2, -1, -1],
68
+ [ 0, 0, 0, -1, 2, -1],
69
+ [ 0, 0, 0, 0, -1, 1]])
70
+ # fmt: on
71
+ DiWL = 0.5 * DiNL
72
+ DiOL = 0.3 * DiNL
73
+ np.testing.assert_equal(nx.laplacian_matrix(self.G).todense(), NL)
74
+ np.testing.assert_equal(nx.laplacian_matrix(self.MG).todense(), NL)
75
+ np.testing.assert_equal(
76
+ nx.laplacian_matrix(self.G, nodelist=[0, 1]).todense(),
77
+ np.array([[1, -1], [-1, 1]]),
78
+ )
79
+ np.testing.assert_equal(nx.laplacian_matrix(self.WG).todense(), WL)
80
+ np.testing.assert_equal(nx.laplacian_matrix(self.WG, weight=None).todense(), NL)
81
+ np.testing.assert_equal(
82
+ nx.laplacian_matrix(self.WG, weight="other").todense(), OL
83
+ )
84
+
85
+ np.testing.assert_equal(nx.laplacian_matrix(self.DiG).todense(), DiNL)
86
+ np.testing.assert_equal(nx.laplacian_matrix(self.DiMG).todense(), DiNL)
87
+ np.testing.assert_equal(
88
+ nx.laplacian_matrix(self.DiG, nodelist=[1, 2]).todense(),
89
+ np.array([[1, -1], [0, 0]]),
90
+ )
91
+ np.testing.assert_equal(nx.laplacian_matrix(self.DiWG).todense(), DiWL)
92
+ np.testing.assert_equal(
93
+ nx.laplacian_matrix(self.DiWG, weight=None).todense(), DiNL
94
+ )
95
+ np.testing.assert_equal(
96
+ nx.laplacian_matrix(self.DiWG, weight="other").todense(), DiOL
97
+ )
98
+
99
+ def test_normalized_laplacian(self):
100
+ "Generalized Graph Laplacian"
101
+ # fmt: off
102
+ G = np.array([[ 1. , -0.408, -0.408, -0.577, 0.],
103
+ [-0.408, 1. , -0.5 , 0. , 0.],
104
+ [-0.408, -0.5 , 1. , 0. , 0.],
105
+ [-0.577, 0. , 0. , 1. , 0.],
106
+ [ 0. , 0. , 0. , 0. , 0.]])
107
+ GL = np.array([[ 1. , -0.408, -0.408, -0.577, 0. ],
108
+ [-0.408, 1. , -0.5 , 0. , 0. ],
109
+ [-0.408, -0.5 , 1. , 0. , 0. ],
110
+ [-0.577, 0. , 0. , 1. , 0. ],
111
+ [ 0. , 0. , 0. , 0. , 0. ]])
112
+ Lsl = np.array([[ 0.75 , -0.2887, -0.2887, -0.3536, 0. ],
113
+ [-0.2887, 0.6667, -0.3333, 0. , 0. ],
114
+ [-0.2887, -0.3333, 0.6667, 0. , 0. ],
115
+ [-0.3536, 0. , 0. , 0.5 , 0. ],
116
+ [ 0. , 0. , 0. , 0. , 0. ]])
117
+
118
+ DiG = np.array([[ 1. , 0. , -0.4082, 0. , 0. , 0. ],
119
+ [ 0. , 0. , 0. , 0. , 0. , 0. ],
120
+ [-0.4082, 0. , 1. , 0. , -0.4082, 0. ],
121
+ [ 0. , 0. , 0. , 1. , -0.5 , -0.7071],
122
+ [ 0. , 0. , 0. , -0.5 , 1. , -0.7071],
123
+ [ 0. , 0. , 0. , -0.7071, 0. , 1. ]])
124
+ DiGL = np.array([[ 1. , 0. , -0.4082, 0. , 0. , 0. ],
125
+ [ 0. , 0. , 0. , 0. , 0. , 0. ],
126
+ [-0.4082, 0. , 1. , -0.4082, 0. , 0. ],
127
+ [ 0. , 0. , 0. , 1. , -0.5 , -0.7071],
128
+ [ 0. , 0. , 0. , -0.5 , 1. , -0.7071],
129
+ [ 0. , 0. , 0. , 0. , -0.7071, 1. ]])
130
+ DiLsl = np.array([[ 0.6667, -0.5774, -0.2887, 0. , 0. , 0. ],
131
+ [ 0. , 0. , 0. , 0. , 0. , 0. ],
132
+ [-0.2887, -0.5 , 0.75 , -0.2887, 0. , 0. ],
133
+ [ 0. , 0. , 0. , 0.6667, -0.3333, -0.4082],
134
+ [ 0. , 0. , 0. , -0.3333, 0.6667, -0.4082],
135
+ [ 0. , 0. , 0. , 0. , -0.4082, 0.5 ]])
136
+ # fmt: on
137
+
138
+ np.testing.assert_almost_equal(
139
+ nx.normalized_laplacian_matrix(self.G, nodelist=range(5)).todense(),
140
+ G,
141
+ decimal=3,
142
+ )
143
+ np.testing.assert_almost_equal(
144
+ nx.normalized_laplacian_matrix(self.G).todense(), GL, decimal=3
145
+ )
146
+ np.testing.assert_almost_equal(
147
+ nx.normalized_laplacian_matrix(self.MG).todense(), GL, decimal=3
148
+ )
149
+ np.testing.assert_almost_equal(
150
+ nx.normalized_laplacian_matrix(self.WG).todense(), GL, decimal=3
151
+ )
152
+ np.testing.assert_almost_equal(
153
+ nx.normalized_laplacian_matrix(self.WG, weight="other").todense(),
154
+ GL,
155
+ decimal=3,
156
+ )
157
+ np.testing.assert_almost_equal(
158
+ nx.normalized_laplacian_matrix(self.Gsl).todense(), Lsl, decimal=3
159
+ )
160
+
161
+ np.testing.assert_almost_equal(
162
+ nx.normalized_laplacian_matrix(
163
+ self.DiG,
164
+ nodelist=range(1, 1 + 6),
165
+ ).todense(),
166
+ DiG,
167
+ decimal=3,
168
+ )
169
+ np.testing.assert_almost_equal(
170
+ nx.normalized_laplacian_matrix(self.DiG).todense(), DiGL, decimal=3
171
+ )
172
+ np.testing.assert_almost_equal(
173
+ nx.normalized_laplacian_matrix(self.DiMG).todense(), DiGL, decimal=3
174
+ )
175
+ np.testing.assert_almost_equal(
176
+ nx.normalized_laplacian_matrix(self.DiWG).todense(), DiGL, decimal=3
177
+ )
178
+ np.testing.assert_almost_equal(
179
+ nx.normalized_laplacian_matrix(self.DiWG, weight="other").todense(),
180
+ DiGL,
181
+ decimal=3,
182
+ )
183
+ np.testing.assert_almost_equal(
184
+ nx.normalized_laplacian_matrix(self.DiGsl).todense(), DiLsl, decimal=3
185
+ )
186
+
187
+
188
+ def test_directed_laplacian():
189
+ "Directed Laplacian"
190
+ # Graph used as an example in Sec. 4.1 of Langville and Meyer,
191
+ # "Google's PageRank and Beyond". The graph contains dangling nodes, so
192
+ # the pagerank random walk is selected by directed_laplacian
193
+ G = nx.DiGraph()
194
+ G.add_edges_from(
195
+ (
196
+ (1, 2),
197
+ (1, 3),
198
+ (3, 1),
199
+ (3, 2),
200
+ (3, 5),
201
+ (4, 5),
202
+ (4, 6),
203
+ (5, 4),
204
+ (5, 6),
205
+ (6, 4),
206
+ )
207
+ )
208
+ # fmt: off
209
+ GL = np.array([[ 0.9833, -0.2941, -0.3882, -0.0291, -0.0231, -0.0261],
210
+ [-0.2941, 0.8333, -0.2339, -0.0536, -0.0589, -0.0554],
211
+ [-0.3882, -0.2339, 0.9833, -0.0278, -0.0896, -0.0251],
212
+ [-0.0291, -0.0536, -0.0278, 0.9833, -0.4878, -0.6675],
213
+ [-0.0231, -0.0589, -0.0896, -0.4878, 0.9833, -0.2078],
214
+ [-0.0261, -0.0554, -0.0251, -0.6675, -0.2078, 0.9833]])
215
+ # fmt: on
216
+ L = nx.directed_laplacian_matrix(G, alpha=0.9, nodelist=sorted(G))
217
+ np.testing.assert_almost_equal(L, GL, decimal=3)
218
+
219
+ # Make the graph strongly connected, so we can use a random and lazy walk
220
+ G.add_edges_from(((2, 5), (6, 1)))
221
+ # fmt: off
222
+ GL = np.array([[ 1. , -0.3062, -0.4714, 0. , 0. , -0.3227],
223
+ [-0.3062, 1. , -0.1443, 0. , -0.3162, 0. ],
224
+ [-0.4714, -0.1443, 1. , 0. , -0.0913, 0. ],
225
+ [ 0. , 0. , 0. , 1. , -0.5 , -0.5 ],
226
+ [ 0. , -0.3162, -0.0913, -0.5 , 1. , -0.25 ],
227
+ [-0.3227, 0. , 0. , -0.5 , -0.25 , 1. ]])
228
+ # fmt: on
229
+ L = nx.directed_laplacian_matrix(
230
+ G, alpha=0.9, nodelist=sorted(G), walk_type="random"
231
+ )
232
+ np.testing.assert_almost_equal(L, GL, decimal=3)
233
+
234
+ # fmt: off
235
+ GL = np.array([[ 0.5 , -0.1531, -0.2357, 0. , 0. , -0.1614],
236
+ [-0.1531, 0.5 , -0.0722, 0. , -0.1581, 0. ],
237
+ [-0.2357, -0.0722, 0.5 , 0. , -0.0456, 0. ],
238
+ [ 0. , 0. , 0. , 0.5 , -0.25 , -0.25 ],
239
+ [ 0. , -0.1581, -0.0456, -0.25 , 0.5 , -0.125 ],
240
+ [-0.1614, 0. , 0. , -0.25 , -0.125 , 0.5 ]])
241
+ # fmt: on
242
+ L = nx.directed_laplacian_matrix(G, alpha=0.9, nodelist=sorted(G), walk_type="lazy")
243
+ np.testing.assert_almost_equal(L, GL, decimal=3)
244
+
245
+ # Make a strongly connected periodic graph
246
+ G = nx.DiGraph()
247
+ G.add_edges_from(((1, 2), (2, 4), (4, 1), (1, 3), (3, 4)))
248
+ # fmt: off
249
+ GL = np.array([[ 0.5 , -0.176, -0.176, -0.25 ],
250
+ [-0.176, 0.5 , 0. , -0.176],
251
+ [-0.176, 0. , 0.5 , -0.176],
252
+ [-0.25 , -0.176, -0.176, 0.5 ]])
253
+ # fmt: on
254
+ L = nx.directed_laplacian_matrix(G, alpha=0.9, nodelist=sorted(G))
255
+ np.testing.assert_almost_equal(L, GL, decimal=3)
256
+
257
+
258
+ def test_directed_combinatorial_laplacian():
259
+ "Directed combinatorial Laplacian"
260
+ # Graph used as an example in Sec. 4.1 of Langville and Meyer,
261
+ # "Google's PageRank and Beyond". The graph contains dangling nodes, so
262
+ # the pagerank random walk is selected by directed_laplacian
263
+ G = nx.DiGraph()
264
+ G.add_edges_from(
265
+ (
266
+ (1, 2),
267
+ (1, 3),
268
+ (3, 1),
269
+ (3, 2),
270
+ (3, 5),
271
+ (4, 5),
272
+ (4, 6),
273
+ (5, 4),
274
+ (5, 6),
275
+ (6, 4),
276
+ )
277
+ )
278
+ # fmt: off
279
+ GL = np.array([[ 0.0366, -0.0132, -0.0153, -0.0034, -0.0020, -0.0027],
280
+ [-0.0132, 0.0450, -0.0111, -0.0076, -0.0062, -0.0069],
281
+ [-0.0153, -0.0111, 0.0408, -0.0035, -0.0083, -0.0027],
282
+ [-0.0034, -0.0076, -0.0035, 0.3688, -0.1356, -0.2187],
283
+ [-0.0020, -0.0062, -0.0083, -0.1356, 0.2026, -0.0505],
284
+ [-0.0027, -0.0069, -0.0027, -0.2187, -0.0505, 0.2815]])
285
+ # fmt: on
286
+
287
+ L = nx.directed_combinatorial_laplacian_matrix(G, alpha=0.9, nodelist=sorted(G))
288
+ np.testing.assert_almost_equal(L, GL, decimal=3)
289
+
290
+ # Make the graph strongly connected, so we can use a random and lazy walk
291
+ G.add_edges_from(((2, 5), (6, 1)))
292
+
293
+ # fmt: off
294
+ GL = np.array([[ 0.1395, -0.0349, -0.0465, 0. , 0. , -0.0581],
295
+ [-0.0349, 0.093 , -0.0116, 0. , -0.0465, 0. ],
296
+ [-0.0465, -0.0116, 0.0698, 0. , -0.0116, 0. ],
297
+ [ 0. , 0. , 0. , 0.2326, -0.1163, -0.1163],
298
+ [ 0. , -0.0465, -0.0116, -0.1163, 0.2326, -0.0581],
299
+ [-0.0581, 0. , 0. , -0.1163, -0.0581, 0.2326]])
300
+ # fmt: on
301
+
302
+ L = nx.directed_combinatorial_laplacian_matrix(
303
+ G, alpha=0.9, nodelist=sorted(G), walk_type="random"
304
+ )
305
+ np.testing.assert_almost_equal(L, GL, decimal=3)
306
+
307
+ # fmt: off
308
+ GL = np.array([[ 0.0698, -0.0174, -0.0233, 0. , 0. , -0.0291],
309
+ [-0.0174, 0.0465, -0.0058, 0. , -0.0233, 0. ],
310
+ [-0.0233, -0.0058, 0.0349, 0. , -0.0058, 0. ],
311
+ [ 0. , 0. , 0. , 0.1163, -0.0581, -0.0581],
312
+ [ 0. , -0.0233, -0.0058, -0.0581, 0.1163, -0.0291],
313
+ [-0.0291, 0. , 0. , -0.0581, -0.0291, 0.1163]])
314
+ # fmt: on
315
+
316
+ L = nx.directed_combinatorial_laplacian_matrix(
317
+ G, alpha=0.9, nodelist=sorted(G), walk_type="lazy"
318
+ )
319
+ np.testing.assert_almost_equal(L, GL, decimal=3)
320
+
321
+ E = nx.DiGraph(margulis_gabber_galil_graph(2))
322
+ L = nx.directed_combinatorial_laplacian_matrix(E)
323
+ # fmt: off
324
+ expected = np.array(
325
+ [[ 0.16666667, -0.08333333, -0.08333333, 0. ],
326
+ [-0.08333333, 0.16666667, 0. , -0.08333333],
327
+ [-0.08333333, 0. , 0.16666667, -0.08333333],
328
+ [ 0. , -0.08333333, -0.08333333, 0.16666667]]
329
+ )
330
+ # fmt: on
331
+ np.testing.assert_almost_equal(L, expected, decimal=6)
332
+
333
+ with pytest.raises(nx.NetworkXError):
334
+ nx.directed_combinatorial_laplacian_matrix(G, walk_type="pagerank", alpha=100)
335
+ with pytest.raises(nx.NetworkXError):
336
+ nx.directed_combinatorial_laplacian_matrix(G, walk_type="silly")
mplug_owl2/lib/python3.10/site-packages/networkx/linalg/tests/test_modularity.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ np = pytest.importorskip("numpy")
4
+ pytest.importorskip("scipy")
5
+
6
+ import networkx as nx
7
+ from networkx.generators.degree_seq import havel_hakimi_graph
8
+
9
+
10
+ class TestModularity:
11
+ @classmethod
12
+ def setup_class(cls):
13
+ deg = [3, 2, 2, 1, 0]
14
+ cls.G = havel_hakimi_graph(deg)
15
+ # Graph used as an example in Sec. 4.1 of Langville and Meyer,
16
+ # "Google's PageRank and Beyond". (Used for test_directed_laplacian)
17
+ cls.DG = nx.DiGraph()
18
+ cls.DG.add_edges_from(
19
+ (
20
+ (1, 2),
21
+ (1, 3),
22
+ (3, 1),
23
+ (3, 2),
24
+ (3, 5),
25
+ (4, 5),
26
+ (4, 6),
27
+ (5, 4),
28
+ (5, 6),
29
+ (6, 4),
30
+ )
31
+ )
32
+
33
+ def test_modularity(self):
34
+ "Modularity matrix"
35
+ # fmt: off
36
+ B = np.array([[-1.125, 0.25, 0.25, 0.625, 0.],
37
+ [0.25, -0.5, 0.5, -0.25, 0.],
38
+ [0.25, 0.5, -0.5, -0.25, 0.],
39
+ [0.625, -0.25, -0.25, -0.125, 0.],
40
+ [0., 0., 0., 0., 0.]])
41
+ # fmt: on
42
+
43
+ permutation = [4, 0, 1, 2, 3]
44
+ np.testing.assert_equal(nx.modularity_matrix(self.G), B)
45
+ np.testing.assert_equal(
46
+ nx.modularity_matrix(self.G, nodelist=permutation),
47
+ B[np.ix_(permutation, permutation)],
48
+ )
49
+
50
+ def test_modularity_weight(self):
51
+ "Modularity matrix with weights"
52
+ # fmt: off
53
+ B = np.array([[-1.125, 0.25, 0.25, 0.625, 0.],
54
+ [0.25, -0.5, 0.5, -0.25, 0.],
55
+ [0.25, 0.5, -0.5, -0.25, 0.],
56
+ [0.625, -0.25, -0.25, -0.125, 0.],
57
+ [0., 0., 0., 0., 0.]])
58
+ # fmt: on
59
+
60
+ G_weighted = self.G.copy()
61
+ for n1, n2 in G_weighted.edges():
62
+ G_weighted.edges[n1, n2]["weight"] = 0.5
63
+ # The following test would fail in networkx 1.1
64
+ np.testing.assert_equal(nx.modularity_matrix(G_weighted), B)
65
+ # The following test that the modularity matrix get rescaled accordingly
66
+ np.testing.assert_equal(
67
+ nx.modularity_matrix(G_weighted, weight="weight"), 0.5 * B
68
+ )
69
+
70
+ def test_directed_modularity(self):
71
+ "Directed Modularity matrix"
72
+ # fmt: off
73
+ B = np.array([[-0.2, 0.6, 0.8, -0.4, -0.4, -0.4],
74
+ [0., 0., 0., 0., 0., 0.],
75
+ [0.7, 0.4, -0.3, -0.6, 0.4, -0.6],
76
+ [-0.2, -0.4, -0.2, -0.4, 0.6, 0.6],
77
+ [-0.2, -0.4, -0.2, 0.6, -0.4, 0.6],
78
+ [-0.1, -0.2, -0.1, 0.8, -0.2, -0.2]])
79
+ # fmt: on
80
+ node_permutation = [5, 1, 2, 3, 4, 6]
81
+ idx_permutation = [4, 0, 1, 2, 3, 5]
82
+ mm = nx.directed_modularity_matrix(self.DG, nodelist=sorted(self.DG))
83
+ np.testing.assert_equal(mm, B)
84
+ np.testing.assert_equal(
85
+ nx.directed_modularity_matrix(self.DG, nodelist=node_permutation),
86
+ B[np.ix_(idx_permutation, idx_permutation)],
87
+ )
mplug_owl2/lib/python3.10/site-packages/networkx/linalg/tests/test_spectrum.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ np = pytest.importorskip("numpy")
4
+ pytest.importorskip("scipy")
5
+
6
+ import networkx as nx
7
+ from networkx.generators.degree_seq import havel_hakimi_graph
8
+
9
+
10
+ class TestSpectrum:
11
+ @classmethod
12
+ def setup_class(cls):
13
+ deg = [3, 2, 2, 1, 0]
14
+ cls.G = havel_hakimi_graph(deg)
15
+ cls.P = nx.path_graph(3)
16
+ cls.WG = nx.Graph(
17
+ (u, v, {"weight": 0.5, "other": 0.3}) for (u, v) in cls.G.edges()
18
+ )
19
+ cls.WG.add_node(4)
20
+ cls.DG = nx.DiGraph()
21
+ nx.add_path(cls.DG, [0, 1, 2])
22
+
23
+ def test_laplacian_spectrum(self):
24
+ "Laplacian eigenvalues"
25
+ evals = np.array([0, 0, 1, 3, 4])
26
+ e = sorted(nx.laplacian_spectrum(self.G))
27
+ np.testing.assert_almost_equal(e, evals)
28
+ e = sorted(nx.laplacian_spectrum(self.WG, weight=None))
29
+ np.testing.assert_almost_equal(e, evals)
30
+ e = sorted(nx.laplacian_spectrum(self.WG))
31
+ np.testing.assert_almost_equal(e, 0.5 * evals)
32
+ e = sorted(nx.laplacian_spectrum(self.WG, weight="other"))
33
+ np.testing.assert_almost_equal(e, 0.3 * evals)
34
+
35
+ def test_normalized_laplacian_spectrum(self):
36
+ "Normalized Laplacian eigenvalues"
37
+ evals = np.array([0, 0, 0.7712864461218, 1.5, 1.7287135538781])
38
+ e = sorted(nx.normalized_laplacian_spectrum(self.G))
39
+ np.testing.assert_almost_equal(e, evals)
40
+ e = sorted(nx.normalized_laplacian_spectrum(self.WG, weight=None))
41
+ np.testing.assert_almost_equal(e, evals)
42
+ e = sorted(nx.normalized_laplacian_spectrum(self.WG))
43
+ np.testing.assert_almost_equal(e, evals)
44
+ e = sorted(nx.normalized_laplacian_spectrum(self.WG, weight="other"))
45
+ np.testing.assert_almost_equal(e, evals)
46
+
47
+ def test_adjacency_spectrum(self):
48
+ "Adjacency eigenvalues"
49
+ evals = np.array([-np.sqrt(2), 0, np.sqrt(2)])
50
+ e = sorted(nx.adjacency_spectrum(self.P))
51
+ np.testing.assert_almost_equal(e, evals)
52
+
53
+ def test_modularity_spectrum(self):
54
+ "Modularity eigenvalues"
55
+ evals = np.array([-1.5, 0.0, 0.0])
56
+ e = sorted(nx.modularity_spectrum(self.P))
57
+ np.testing.assert_almost_equal(e, evals)
58
+ # Directed modularity eigenvalues
59
+ evals = np.array([-0.5, 0.0, 0.0])
60
+ e = sorted(nx.modularity_spectrum(self.DG))
61
+ np.testing.assert_almost_equal(e, evals)
62
+
63
+ def test_bethe_hessian_spectrum(self):
64
+ "Bethe Hessian eigenvalues"
65
+ evals = np.array([0.5 * (9 - np.sqrt(33)), 4, 0.5 * (9 + np.sqrt(33))])
66
+ e = sorted(nx.bethe_hessian_spectrum(self.P, r=2))
67
+ np.testing.assert_almost_equal(e, evals)
68
+ # Collapses back to Laplacian:
69
+ e1 = sorted(nx.bethe_hessian_spectrum(self.P, r=1))
70
+ e2 = sorted(nx.laplacian_spectrum(self.P))
71
+ np.testing.assert_almost_equal(e1, e2)
mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/graphml.py ADDED
@@ -0,0 +1,1053 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ *******
3
+ GraphML
4
+ *******
5
+ Read and write graphs in GraphML format.
6
+
7
+ .. warning::
8
+
9
+ This parser uses the standard xml library present in Python, which is
10
+ insecure - see :external+python:mod:`xml` for additional information.
11
+ Only parse GraphML files you trust.
12
+
13
+ This implementation does not support mixed graphs (directed and unidirected
14
+ edges together), hyperedges, nested graphs, or ports.
15
+
16
+ "GraphML is a comprehensive and easy-to-use file format for graphs. It
17
+ consists of a language core to describe the structural properties of a
18
+ graph and a flexible extension mechanism to add application-specific
19
+ data. Its main features include support of
20
+
21
+ * directed, undirected, and mixed graphs,
22
+ * hypergraphs,
23
+ * hierarchical graphs,
24
+ * graphical representations,
25
+ * references to external data,
26
+ * application-specific attribute data, and
27
+ * light-weight parsers.
28
+
29
+ Unlike many other file formats for graphs, GraphML does not use a
30
+ custom syntax. Instead, it is based on XML and hence ideally suited as
31
+ a common denominator for all kinds of services generating, archiving,
32
+ or processing graphs."
33
+
34
+ http://graphml.graphdrawing.org/
35
+
36
+ Format
37
+ ------
38
+ GraphML is an XML format. See
39
+ http://graphml.graphdrawing.org/specification.html for the specification and
40
+ http://graphml.graphdrawing.org/primer/graphml-primer.html
41
+ for examples.
42
+ """
43
+
44
+ import warnings
45
+ from collections import defaultdict
46
+
47
+ import networkx as nx
48
+ from networkx.utils import open_file
49
+
50
+ __all__ = [
51
+ "write_graphml",
52
+ "read_graphml",
53
+ "generate_graphml",
54
+ "write_graphml_xml",
55
+ "write_graphml_lxml",
56
+ "parse_graphml",
57
+ "GraphMLWriter",
58
+ "GraphMLReader",
59
+ ]
60
+
61
+
62
+ @open_file(1, mode="wb")
63
+ def write_graphml_xml(
64
+ G,
65
+ path,
66
+ encoding="utf-8",
67
+ prettyprint=True,
68
+ infer_numeric_types=False,
69
+ named_key_ids=False,
70
+ edge_id_from_attribute=None,
71
+ ):
72
+ """Write G in GraphML XML format to path
73
+
74
+ Parameters
75
+ ----------
76
+ G : graph
77
+ A networkx graph
78
+ path : file or string
79
+ File or filename to write.
80
+ Filenames ending in .gz or .bz2 will be compressed.
81
+ encoding : string (optional)
82
+ Encoding for text data.
83
+ prettyprint : bool (optional)
84
+ If True use line breaks and indenting in output XML.
85
+ infer_numeric_types : boolean
86
+ Determine if numeric types should be generalized.
87
+ For example, if edges have both int and float 'weight' attributes,
88
+ we infer in GraphML that both are floats.
89
+ named_key_ids : bool (optional)
90
+ If True use attr.name as value for key elements' id attribute.
91
+ edge_id_from_attribute : dict key (optional)
92
+ If provided, the graphml edge id is set by looking up the corresponding
93
+ edge data attribute keyed by this parameter. If `None` or the key does not exist in edge data,
94
+ the edge id is set by the edge key if `G` is a MultiGraph, else the edge id is left unset.
95
+
96
+ Examples
97
+ --------
98
+ >>> G = nx.path_graph(4)
99
+ >>> nx.write_graphml(G, "test.graphml")
100
+
101
+ Notes
102
+ -----
103
+ This implementation does not support mixed graphs (directed
104
+ and unidirected edges together) hyperedges, nested graphs, or ports.
105
+ """
106
+ writer = GraphMLWriter(
107
+ encoding=encoding,
108
+ prettyprint=prettyprint,
109
+ infer_numeric_types=infer_numeric_types,
110
+ named_key_ids=named_key_ids,
111
+ edge_id_from_attribute=edge_id_from_attribute,
112
+ )
113
+ writer.add_graph_element(G)
114
+ writer.dump(path)
115
+
116
+
117
+ @open_file(1, mode="wb")
118
+ def write_graphml_lxml(
119
+ G,
120
+ path,
121
+ encoding="utf-8",
122
+ prettyprint=True,
123
+ infer_numeric_types=False,
124
+ named_key_ids=False,
125
+ edge_id_from_attribute=None,
126
+ ):
127
+ """Write G in GraphML XML format to path
128
+
129
+ This function uses the LXML framework and should be faster than
130
+ the version using the xml library.
131
+
132
+ Parameters
133
+ ----------
134
+ G : graph
135
+ A networkx graph
136
+ path : file or string
137
+ File or filename to write.
138
+ Filenames ending in .gz or .bz2 will be compressed.
139
+ encoding : string (optional)
140
+ Encoding for text data.
141
+ prettyprint : bool (optional)
142
+ If True use line breaks and indenting in output XML.
143
+ infer_numeric_types : boolean
144
+ Determine if numeric types should be generalized.
145
+ For example, if edges have both int and float 'weight' attributes,
146
+ we infer in GraphML that both are floats.
147
+ named_key_ids : bool (optional)
148
+ If True use attr.name as value for key elements' id attribute.
149
+ edge_id_from_attribute : dict key (optional)
150
+ If provided, the graphml edge id is set by looking up the corresponding
151
+ edge data attribute keyed by this parameter. If `None` or the key does not exist in edge data,
152
+ the edge id is set by the edge key if `G` is a MultiGraph, else the edge id is left unset.
153
+
154
+ Examples
155
+ --------
156
+ >>> G = nx.path_graph(4)
157
+ >>> nx.write_graphml_lxml(G, "fourpath.graphml")
158
+
159
+ Notes
160
+ -----
161
+ This implementation does not support mixed graphs (directed
162
+ and unidirected edges together) hyperedges, nested graphs, or ports.
163
+ """
164
+ try:
165
+ import lxml.etree as lxmletree
166
+ except ImportError:
167
+ return write_graphml_xml(
168
+ G,
169
+ path,
170
+ encoding,
171
+ prettyprint,
172
+ infer_numeric_types,
173
+ named_key_ids,
174
+ edge_id_from_attribute,
175
+ )
176
+
177
+ writer = GraphMLWriterLxml(
178
+ path,
179
+ graph=G,
180
+ encoding=encoding,
181
+ prettyprint=prettyprint,
182
+ infer_numeric_types=infer_numeric_types,
183
+ named_key_ids=named_key_ids,
184
+ edge_id_from_attribute=edge_id_from_attribute,
185
+ )
186
+ writer.dump()
187
+
188
+
189
+ def generate_graphml(
190
+ G,
191
+ encoding="utf-8",
192
+ prettyprint=True,
193
+ named_key_ids=False,
194
+ edge_id_from_attribute=None,
195
+ ):
196
+ """Generate GraphML lines for G
197
+
198
+ Parameters
199
+ ----------
200
+ G : graph
201
+ A networkx graph
202
+ encoding : string (optional)
203
+ Encoding for text data.
204
+ prettyprint : bool (optional)
205
+ If True use line breaks and indenting in output XML.
206
+ named_key_ids : bool (optional)
207
+ If True use attr.name as value for key elements' id attribute.
208
+ edge_id_from_attribute : dict key (optional)
209
+ If provided, the graphml edge id is set by looking up the corresponding
210
+ edge data attribute keyed by this parameter. If `None` or the key does not exist in edge data,
211
+ the edge id is set by the edge key if `G` is a MultiGraph, else the edge id is left unset.
212
+
213
+ Examples
214
+ --------
215
+ >>> G = nx.path_graph(4)
216
+ >>> linefeed = chr(10) # linefeed = \n
217
+ >>> s = linefeed.join(nx.generate_graphml(G))
218
+ >>> for line in nx.generate_graphml(G): # doctest: +SKIP
219
+ ... print(line)
220
+
221
+ Notes
222
+ -----
223
+ This implementation does not support mixed graphs (directed and unidirected
224
+ edges together) hyperedges, nested graphs, or ports.
225
+ """
226
+ writer = GraphMLWriter(
227
+ encoding=encoding,
228
+ prettyprint=prettyprint,
229
+ named_key_ids=named_key_ids,
230
+ edge_id_from_attribute=edge_id_from_attribute,
231
+ )
232
+ writer.add_graph_element(G)
233
+ yield from str(writer).splitlines()
234
+
235
+
236
+ @open_file(0, mode="rb")
237
+ @nx._dispatchable(graphs=None, returns_graph=True)
238
+ def read_graphml(path, node_type=str, edge_key_type=int, force_multigraph=False):
239
+ """Read graph in GraphML format from path.
240
+
241
+ Parameters
242
+ ----------
243
+ path : file or string
244
+ File or filename to write.
245
+ Filenames ending in .gz or .bz2 will be compressed.
246
+
247
+ node_type: Python type (default: str)
248
+ Convert node ids to this type
249
+
250
+ edge_key_type: Python type (default: int)
251
+ Convert graphml edge ids to this type. Multigraphs use id as edge key.
252
+ Non-multigraphs add to edge attribute dict with name "id".
253
+
254
+ force_multigraph : bool (default: False)
255
+ If True, return a multigraph with edge keys. If False (the default)
256
+ return a multigraph when multiedges are in the graph.
257
+
258
+ Returns
259
+ -------
260
+ graph: NetworkX graph
261
+ If parallel edges are present or `force_multigraph=True` then
262
+ a MultiGraph or MultiDiGraph is returned. Otherwise a Graph/DiGraph.
263
+ The returned graph is directed if the file indicates it should be.
264
+
265
+ Notes
266
+ -----
267
+ Default node and edge attributes are not propagated to each node and edge.
268
+ They can be obtained from `G.graph` and applied to node and edge attributes
269
+ if desired using something like this:
270
+
271
+ >>> default_color = G.graph["node_default"]["color"] # doctest: +SKIP
272
+ >>> for node, data in G.nodes(data=True): # doctest: +SKIP
273
+ ... if "color" not in data:
274
+ ... data["color"] = default_color
275
+ >>> default_color = G.graph["edge_default"]["color"] # doctest: +SKIP
276
+ >>> for u, v, data in G.edges(data=True): # doctest: +SKIP
277
+ ... if "color" not in data:
278
+ ... data["color"] = default_color
279
+
280
+ This implementation does not support mixed graphs (directed and unidirected
281
+ edges together), hypergraphs, nested graphs, or ports.
282
+
283
+ For multigraphs the GraphML edge "id" will be used as the edge
284
+ key. If not specified then they "key" attribute will be used. If
285
+ there is no "key" attribute a default NetworkX multigraph edge key
286
+ will be provided.
287
+
288
+ Files with the yEd "yfiles" extension can be read. The type of the node's
289
+ shape is preserved in the `shape_type` node attribute.
290
+
291
+ yEd compressed files ("file.graphmlz" extension) can be read by renaming
292
+ the file to "file.graphml.gz".
293
+
294
+ """
295
+ reader = GraphMLReader(node_type, edge_key_type, force_multigraph)
296
+ # need to check for multiple graphs
297
+ glist = list(reader(path=path))
298
+ if len(glist) == 0:
299
+ # If no graph comes back, try looking for an incomplete header
300
+ header = b'<graphml xmlns="http://graphml.graphdrawing.org/xmlns">'
301
+ path.seek(0)
302
+ old_bytes = path.read()
303
+ new_bytes = old_bytes.replace(b"<graphml>", header)
304
+ glist = list(reader(string=new_bytes))
305
+ if len(glist) == 0:
306
+ raise nx.NetworkXError("file not successfully read as graphml")
307
+ return glist[0]
308
+
309
+
310
+ @nx._dispatchable(graphs=None, returns_graph=True)
311
+ def parse_graphml(
312
+ graphml_string, node_type=str, edge_key_type=int, force_multigraph=False
313
+ ):
314
+ """Read graph in GraphML format from string.
315
+
316
+ Parameters
317
+ ----------
318
+ graphml_string : string
319
+ String containing graphml information
320
+ (e.g., contents of a graphml file).
321
+
322
+ node_type: Python type (default: str)
323
+ Convert node ids to this type
324
+
325
+ edge_key_type: Python type (default: int)
326
+ Convert graphml edge ids to this type. Multigraphs use id as edge key.
327
+ Non-multigraphs add to edge attribute dict with name "id".
328
+
329
+ force_multigraph : bool (default: False)
330
+ If True, return a multigraph with edge keys. If False (the default)
331
+ return a multigraph when multiedges are in the graph.
332
+
333
+
334
+ Returns
335
+ -------
336
+ graph: NetworkX graph
337
+ If no parallel edges are found a Graph or DiGraph is returned.
338
+ Otherwise a MultiGraph or MultiDiGraph is returned.
339
+
340
+ Examples
341
+ --------
342
+ >>> G = nx.path_graph(4)
343
+ >>> linefeed = chr(10) # linefeed = \n
344
+ >>> s = linefeed.join(nx.generate_graphml(G))
345
+ >>> H = nx.parse_graphml(s)
346
+
347
+ Notes
348
+ -----
349
+ Default node and edge attributes are not propagated to each node and edge.
350
+ They can be obtained from `G.graph` and applied to node and edge attributes
351
+ if desired using something like this:
352
+
353
+ >>> default_color = G.graph["node_default"]["color"] # doctest: +SKIP
354
+ >>> for node, data in G.nodes(data=True): # doctest: +SKIP
355
+ ... if "color" not in data:
356
+ ... data["color"] = default_color
357
+ >>> default_color = G.graph["edge_default"]["color"] # doctest: +SKIP
358
+ >>> for u, v, data in G.edges(data=True): # doctest: +SKIP
359
+ ... if "color" not in data:
360
+ ... data["color"] = default_color
361
+
362
+ This implementation does not support mixed graphs (directed and unidirected
363
+ edges together), hypergraphs, nested graphs, or ports.
364
+
365
+ For multigraphs the GraphML edge "id" will be used as the edge
366
+ key. If not specified then they "key" attribute will be used. If
367
+ there is no "key" attribute a default NetworkX multigraph edge key
368
+ will be provided.
369
+
370
+ """
371
+ reader = GraphMLReader(node_type, edge_key_type, force_multigraph)
372
+ # need to check for multiple graphs
373
+ glist = list(reader(string=graphml_string))
374
+ if len(glist) == 0:
375
+ # If no graph comes back, try looking for an incomplete header
376
+ header = '<graphml xmlns="http://graphml.graphdrawing.org/xmlns">'
377
+ new_string = graphml_string.replace("<graphml>", header)
378
+ glist = list(reader(string=new_string))
379
+ if len(glist) == 0:
380
+ raise nx.NetworkXError("file not successfully read as graphml")
381
+ return glist[0]
382
+
383
+
384
+ class GraphML:
385
+ NS_GRAPHML = "http://graphml.graphdrawing.org/xmlns"
386
+ NS_XSI = "http://www.w3.org/2001/XMLSchema-instance"
387
+ # xmlns:y="http://www.yworks.com/xml/graphml"
388
+ NS_Y = "http://www.yworks.com/xml/graphml"
389
+ SCHEMALOCATION = " ".join(
390
+ [
391
+ "http://graphml.graphdrawing.org/xmlns",
392
+ "http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd",
393
+ ]
394
+ )
395
+
396
+ def construct_types(self):
397
+ types = [
398
+ (int, "integer"), # for Gephi GraphML bug
399
+ (str, "yfiles"),
400
+ (str, "string"),
401
+ (int, "int"),
402
+ (int, "long"),
403
+ (float, "float"),
404
+ (float, "double"),
405
+ (bool, "boolean"),
406
+ ]
407
+
408
+ # These additions to types allow writing numpy types
409
+ try:
410
+ import numpy as np
411
+ except:
412
+ pass
413
+ else:
414
+ # prepend so that python types are created upon read (last entry wins)
415
+ types = [
416
+ (np.float64, "float"),
417
+ (np.float32, "float"),
418
+ (np.float16, "float"),
419
+ (np.int_, "int"),
420
+ (np.int8, "int"),
421
+ (np.int16, "int"),
422
+ (np.int32, "int"),
423
+ (np.int64, "int"),
424
+ (np.uint8, "int"),
425
+ (np.uint16, "int"),
426
+ (np.uint32, "int"),
427
+ (np.uint64, "int"),
428
+ (np.int_, "int"),
429
+ (np.intc, "int"),
430
+ (np.intp, "int"),
431
+ ] + types
432
+
433
+ self.xml_type = dict(types)
434
+ self.python_type = dict(reversed(a) for a in types)
435
+
436
+ # This page says that data types in GraphML follow Java(TM).
437
+ # http://graphml.graphdrawing.org/primer/graphml-primer.html#AttributesDefinition
438
+ # true and false are the only boolean literals:
439
+ # http://en.wikibooks.org/wiki/Java_Programming/Literals#Boolean_Literals
440
+ convert_bool = {
441
+ # We use data.lower() in actual use.
442
+ "true": True,
443
+ "false": False,
444
+ # Include integer strings for convenience.
445
+ "0": False,
446
+ 0: False,
447
+ "1": True,
448
+ 1: True,
449
+ }
450
+
451
+ def get_xml_type(self, key):
452
+ """Wrapper around the xml_type dict that raises a more informative
453
+ exception message when a user attempts to use data of a type not
454
+ supported by GraphML."""
455
+ try:
456
+ return self.xml_type[key]
457
+ except KeyError as err:
458
+ raise TypeError(
459
+ f"GraphML does not support type {key} as data values."
460
+ ) from err
461
+
462
+
463
+ class GraphMLWriter(GraphML):
464
+ def __init__(
465
+ self,
466
+ graph=None,
467
+ encoding="utf-8",
468
+ prettyprint=True,
469
+ infer_numeric_types=False,
470
+ named_key_ids=False,
471
+ edge_id_from_attribute=None,
472
+ ):
473
+ self.construct_types()
474
+ from xml.etree.ElementTree import Element
475
+
476
+ self.myElement = Element
477
+
478
+ self.infer_numeric_types = infer_numeric_types
479
+ self.prettyprint = prettyprint
480
+ self.named_key_ids = named_key_ids
481
+ self.edge_id_from_attribute = edge_id_from_attribute
482
+ self.encoding = encoding
483
+ self.xml = self.myElement(
484
+ "graphml",
485
+ {
486
+ "xmlns": self.NS_GRAPHML,
487
+ "xmlns:xsi": self.NS_XSI,
488
+ "xsi:schemaLocation": self.SCHEMALOCATION,
489
+ },
490
+ )
491
+ self.keys = {}
492
+ self.attributes = defaultdict(list)
493
+ self.attribute_types = defaultdict(set)
494
+
495
+ if graph is not None:
496
+ self.add_graph_element(graph)
497
+
498
+ def __str__(self):
499
+ from xml.etree.ElementTree import tostring
500
+
501
+ if self.prettyprint:
502
+ self.indent(self.xml)
503
+ s = tostring(self.xml).decode(self.encoding)
504
+ return s
505
+
506
+ def attr_type(self, name, scope, value):
507
+ """Infer the attribute type of data named name. Currently this only
508
+ supports inference of numeric types.
509
+
510
+ If self.infer_numeric_types is false, type is used. Otherwise, pick the
511
+ most general of types found across all values with name and scope. This
512
+ means edges with data named 'weight' are treated separately from nodes
513
+ with data named 'weight'.
514
+ """
515
+ if self.infer_numeric_types:
516
+ types = self.attribute_types[(name, scope)]
517
+
518
+ if len(types) > 1:
519
+ types = {self.get_xml_type(t) for t in types}
520
+ if "string" in types:
521
+ return str
522
+ elif "float" in types or "double" in types:
523
+ return float
524
+ else:
525
+ return int
526
+ else:
527
+ return list(types)[0]
528
+ else:
529
+ return type(value)
530
+
531
+ def get_key(self, name, attr_type, scope, default):
532
+ keys_key = (name, attr_type, scope)
533
+ try:
534
+ return self.keys[keys_key]
535
+ except KeyError:
536
+ if self.named_key_ids:
537
+ new_id = name
538
+ else:
539
+ new_id = f"d{len(list(self.keys))}"
540
+
541
+ self.keys[keys_key] = new_id
542
+ key_kwargs = {
543
+ "id": new_id,
544
+ "for": scope,
545
+ "attr.name": name,
546
+ "attr.type": attr_type,
547
+ }
548
+ key_element = self.myElement("key", **key_kwargs)
549
+ # add subelement for data default value if present
550
+ if default is not None:
551
+ default_element = self.myElement("default")
552
+ default_element.text = str(default)
553
+ key_element.append(default_element)
554
+ self.xml.insert(0, key_element)
555
+ return new_id
556
+
557
+ def add_data(self, name, element_type, value, scope="all", default=None):
558
+ """
559
+ Make a data element for an edge or a node. Keep a log of the
560
+ type in the keys table.
561
+ """
562
+ if element_type not in self.xml_type:
563
+ raise nx.NetworkXError(
564
+ f"GraphML writer does not support {element_type} as data values."
565
+ )
566
+ keyid = self.get_key(name, self.get_xml_type(element_type), scope, default)
567
+ data_element = self.myElement("data", key=keyid)
568
+ data_element.text = str(value)
569
+ return data_element
570
+
571
+ def add_attributes(self, scope, xml_obj, data, default):
572
+ """Appends attribute data to edges or nodes, and stores type information
573
+ to be added later. See add_graph_element.
574
+ """
575
+ for k, v in data.items():
576
+ self.attribute_types[(str(k), scope)].add(type(v))
577
+ self.attributes[xml_obj].append([k, v, scope, default.get(k)])
578
+
579
+ def add_nodes(self, G, graph_element):
580
+ default = G.graph.get("node_default", {})
581
+ for node, data in G.nodes(data=True):
582
+ node_element = self.myElement("node", id=str(node))
583
+ self.add_attributes("node", node_element, data, default)
584
+ graph_element.append(node_element)
585
+
586
+ def add_edges(self, G, graph_element):
587
+ if G.is_multigraph():
588
+ for u, v, key, data in G.edges(data=True, keys=True):
589
+ edge_element = self.myElement(
590
+ "edge",
591
+ source=str(u),
592
+ target=str(v),
593
+ id=str(data.get(self.edge_id_from_attribute))
594
+ if self.edge_id_from_attribute
595
+ and self.edge_id_from_attribute in data
596
+ else str(key),
597
+ )
598
+ default = G.graph.get("edge_default", {})
599
+ self.add_attributes("edge", edge_element, data, default)
600
+ graph_element.append(edge_element)
601
+ else:
602
+ for u, v, data in G.edges(data=True):
603
+ if self.edge_id_from_attribute and self.edge_id_from_attribute in data:
604
+ # select attribute to be edge id
605
+ edge_element = self.myElement(
606
+ "edge",
607
+ source=str(u),
608
+ target=str(v),
609
+ id=str(data.get(self.edge_id_from_attribute)),
610
+ )
611
+ else:
612
+ # default: no edge id
613
+ edge_element = self.myElement("edge", source=str(u), target=str(v))
614
+ default = G.graph.get("edge_default", {})
615
+ self.add_attributes("edge", edge_element, data, default)
616
+ graph_element.append(edge_element)
617
+
618
+ def add_graph_element(self, G):
619
+ """
620
+ Serialize graph G in GraphML to the stream.
621
+ """
622
+ if G.is_directed():
623
+ default_edge_type = "directed"
624
+ else:
625
+ default_edge_type = "undirected"
626
+
627
+ graphid = G.graph.pop("id", None)
628
+ if graphid is None:
629
+ graph_element = self.myElement("graph", edgedefault=default_edge_type)
630
+ else:
631
+ graph_element = self.myElement(
632
+ "graph", edgedefault=default_edge_type, id=graphid
633
+ )
634
+ default = {}
635
+ data = {
636
+ k: v
637
+ for (k, v) in G.graph.items()
638
+ if k not in ["node_default", "edge_default"]
639
+ }
640
+ self.add_attributes("graph", graph_element, data, default)
641
+ self.add_nodes(G, graph_element)
642
+ self.add_edges(G, graph_element)
643
+
644
+ # self.attributes contains a mapping from XML Objects to a list of
645
+ # data that needs to be added to them.
646
+ # We postpone processing in order to do type inference/generalization.
647
+ # See self.attr_type
648
+ for xml_obj, data in self.attributes.items():
649
+ for k, v, scope, default in data:
650
+ xml_obj.append(
651
+ self.add_data(
652
+ str(k), self.attr_type(k, scope, v), str(v), scope, default
653
+ )
654
+ )
655
+ self.xml.append(graph_element)
656
+
657
+ def add_graphs(self, graph_list):
658
+ """Add many graphs to this GraphML document."""
659
+ for G in graph_list:
660
+ self.add_graph_element(G)
661
+
662
+ def dump(self, stream):
663
+ from xml.etree.ElementTree import ElementTree
664
+
665
+ if self.prettyprint:
666
+ self.indent(self.xml)
667
+ document = ElementTree(self.xml)
668
+ document.write(stream, encoding=self.encoding, xml_declaration=True)
669
+
670
+ def indent(self, elem, level=0):
671
+ # in-place prettyprint formatter
672
+ i = "\n" + level * " "
673
+ if len(elem):
674
+ if not elem.text or not elem.text.strip():
675
+ elem.text = i + " "
676
+ if not elem.tail or not elem.tail.strip():
677
+ elem.tail = i
678
+ for elem in elem:
679
+ self.indent(elem, level + 1)
680
+ if not elem.tail or not elem.tail.strip():
681
+ elem.tail = i
682
+ else:
683
+ if level and (not elem.tail or not elem.tail.strip()):
684
+ elem.tail = i
685
+
686
+
687
+ class IncrementalElement:
688
+ """Wrapper for _IncrementalWriter providing an Element like interface.
689
+
690
+ This wrapper does not intend to be a complete implementation but rather to
691
+ deal with those calls used in GraphMLWriter.
692
+ """
693
+
694
+ def __init__(self, xml, prettyprint):
695
+ self.xml = xml
696
+ self.prettyprint = prettyprint
697
+
698
+ def append(self, element):
699
+ self.xml.write(element, pretty_print=self.prettyprint)
700
+
701
+
702
+ class GraphMLWriterLxml(GraphMLWriter):
703
+ def __init__(
704
+ self,
705
+ path,
706
+ graph=None,
707
+ encoding="utf-8",
708
+ prettyprint=True,
709
+ infer_numeric_types=False,
710
+ named_key_ids=False,
711
+ edge_id_from_attribute=None,
712
+ ):
713
+ self.construct_types()
714
+ import lxml.etree as lxmletree
715
+
716
+ self.myElement = lxmletree.Element
717
+
718
+ self._encoding = encoding
719
+ self._prettyprint = prettyprint
720
+ self.named_key_ids = named_key_ids
721
+ self.edge_id_from_attribute = edge_id_from_attribute
722
+ self.infer_numeric_types = infer_numeric_types
723
+
724
+ self._xml_base = lxmletree.xmlfile(path, encoding=encoding)
725
+ self._xml = self._xml_base.__enter__()
726
+ self._xml.write_declaration()
727
+
728
+ # We need to have a xml variable that support insertion. This call is
729
+ # used for adding the keys to the document.
730
+ # We will store those keys in a plain list, and then after the graph
731
+ # element is closed we will add them to the main graphml element.
732
+ self.xml = []
733
+ self._keys = self.xml
734
+ self._graphml = self._xml.element(
735
+ "graphml",
736
+ {
737
+ "xmlns": self.NS_GRAPHML,
738
+ "xmlns:xsi": self.NS_XSI,
739
+ "xsi:schemaLocation": self.SCHEMALOCATION,
740
+ },
741
+ )
742
+ self._graphml.__enter__()
743
+ self.keys = {}
744
+ self.attribute_types = defaultdict(set)
745
+
746
+ if graph is not None:
747
+ self.add_graph_element(graph)
748
+
749
+ def add_graph_element(self, G):
750
+ """
751
+ Serialize graph G in GraphML to the stream.
752
+ """
753
+ if G.is_directed():
754
+ default_edge_type = "directed"
755
+ else:
756
+ default_edge_type = "undirected"
757
+
758
+ graphid = G.graph.pop("id", None)
759
+ if graphid is None:
760
+ graph_element = self._xml.element("graph", edgedefault=default_edge_type)
761
+ else:
762
+ graph_element = self._xml.element(
763
+ "graph", edgedefault=default_edge_type, id=graphid
764
+ )
765
+
766
+ # gather attributes types for the whole graph
767
+ # to find the most general numeric format needed.
768
+ # Then pass through attributes to create key_id for each.
769
+ graphdata = {
770
+ k: v
771
+ for k, v in G.graph.items()
772
+ if k not in ("node_default", "edge_default")
773
+ }
774
+ node_default = G.graph.get("node_default", {})
775
+ edge_default = G.graph.get("edge_default", {})
776
+ # Graph attributes
777
+ for k, v in graphdata.items():
778
+ self.attribute_types[(str(k), "graph")].add(type(v))
779
+ for k, v in graphdata.items():
780
+ element_type = self.get_xml_type(self.attr_type(k, "graph", v))
781
+ self.get_key(str(k), element_type, "graph", None)
782
+ # Nodes and data
783
+ for node, d in G.nodes(data=True):
784
+ for k, v in d.items():
785
+ self.attribute_types[(str(k), "node")].add(type(v))
786
+ for node, d in G.nodes(data=True):
787
+ for k, v in d.items():
788
+ T = self.get_xml_type(self.attr_type(k, "node", v))
789
+ self.get_key(str(k), T, "node", node_default.get(k))
790
+ # Edges and data
791
+ if G.is_multigraph():
792
+ for u, v, ekey, d in G.edges(keys=True, data=True):
793
+ for k, v in d.items():
794
+ self.attribute_types[(str(k), "edge")].add(type(v))
795
+ for u, v, ekey, d in G.edges(keys=True, data=True):
796
+ for k, v in d.items():
797
+ T = self.get_xml_type(self.attr_type(k, "edge", v))
798
+ self.get_key(str(k), T, "edge", edge_default.get(k))
799
+ else:
800
+ for u, v, d in G.edges(data=True):
801
+ for k, v in d.items():
802
+ self.attribute_types[(str(k), "edge")].add(type(v))
803
+ for u, v, d in G.edges(data=True):
804
+ for k, v in d.items():
805
+ T = self.get_xml_type(self.attr_type(k, "edge", v))
806
+ self.get_key(str(k), T, "edge", edge_default.get(k))
807
+
808
+ # Now add attribute keys to the xml file
809
+ for key in self.xml:
810
+ self._xml.write(key, pretty_print=self._prettyprint)
811
+
812
+ # The incremental_writer writes each node/edge as it is created
813
+ incremental_writer = IncrementalElement(self._xml, self._prettyprint)
814
+ with graph_element:
815
+ self.add_attributes("graph", incremental_writer, graphdata, {})
816
+ self.add_nodes(G, incremental_writer) # adds attributes too
817
+ self.add_edges(G, incremental_writer) # adds attributes too
818
+
819
+ def add_attributes(self, scope, xml_obj, data, default):
820
+ """Appends attribute data."""
821
+ for k, v in data.items():
822
+ data_element = self.add_data(
823
+ str(k), self.attr_type(str(k), scope, v), str(v), scope, default.get(k)
824
+ )
825
+ xml_obj.append(data_element)
826
+
827
+ def __str__(self):
828
+ return object.__str__(self)
829
+
830
+ def dump(self, stream=None):
831
+ self._graphml.__exit__(None, None, None)
832
+ self._xml_base.__exit__(None, None, None)
833
+
834
+
835
+ # default is lxml is present.
836
+ write_graphml = write_graphml_lxml
837
+
838
+
839
+ class GraphMLReader(GraphML):
840
+ """Read a GraphML document. Produces NetworkX graph objects."""
841
+
842
+ def __init__(self, node_type=str, edge_key_type=int, force_multigraph=False):
843
+ self.construct_types()
844
+ self.node_type = node_type
845
+ self.edge_key_type = edge_key_type
846
+ self.multigraph = force_multigraph # If False, test for multiedges
847
+ self.edge_ids = {} # dict mapping (u,v) tuples to edge id attributes
848
+
849
+ def __call__(self, path=None, string=None):
850
+ from xml.etree.ElementTree import ElementTree, fromstring
851
+
852
+ if path is not None:
853
+ self.xml = ElementTree(file=path)
854
+ elif string is not None:
855
+ self.xml = fromstring(string)
856
+ else:
857
+ raise ValueError("Must specify either 'path' or 'string' as kwarg")
858
+ (keys, defaults) = self.find_graphml_keys(self.xml)
859
+ for g in self.xml.findall(f"{{{self.NS_GRAPHML}}}graph"):
860
+ yield self.make_graph(g, keys, defaults)
861
+
862
+ def make_graph(self, graph_xml, graphml_keys, defaults, G=None):
863
+ # set default graph type
864
+ edgedefault = graph_xml.get("edgedefault", None)
865
+ if G is None:
866
+ if edgedefault == "directed":
867
+ G = nx.MultiDiGraph()
868
+ else:
869
+ G = nx.MultiGraph()
870
+ # set defaults for graph attributes
871
+ G.graph["node_default"] = {}
872
+ G.graph["edge_default"] = {}
873
+ for key_id, value in defaults.items():
874
+ key_for = graphml_keys[key_id]["for"]
875
+ name = graphml_keys[key_id]["name"]
876
+ python_type = graphml_keys[key_id]["type"]
877
+ if key_for == "node":
878
+ G.graph["node_default"].update({name: python_type(value)})
879
+ if key_for == "edge":
880
+ G.graph["edge_default"].update({name: python_type(value)})
881
+ # hyperedges are not supported
882
+ hyperedge = graph_xml.find(f"{{{self.NS_GRAPHML}}}hyperedge")
883
+ if hyperedge is not None:
884
+ raise nx.NetworkXError("GraphML reader doesn't support hyperedges")
885
+ # add nodes
886
+ for node_xml in graph_xml.findall(f"{{{self.NS_GRAPHML}}}node"):
887
+ self.add_node(G, node_xml, graphml_keys, defaults)
888
+ # add edges
889
+ for edge_xml in graph_xml.findall(f"{{{self.NS_GRAPHML}}}edge"):
890
+ self.add_edge(G, edge_xml, graphml_keys)
891
+ # add graph data
892
+ data = self.decode_data_elements(graphml_keys, graph_xml)
893
+ G.graph.update(data)
894
+
895
+ # switch to Graph or DiGraph if no parallel edges were found
896
+ if self.multigraph:
897
+ return G
898
+
899
+ G = nx.DiGraph(G) if G.is_directed() else nx.Graph(G)
900
+ # add explicit edge "id" from file as attribute in NX graph.
901
+ nx.set_edge_attributes(G, values=self.edge_ids, name="id")
902
+ return G
903
+
904
+ def add_node(self, G, node_xml, graphml_keys, defaults):
905
+ """Add a node to the graph."""
906
+ # warn on finding unsupported ports tag
907
+ ports = node_xml.find(f"{{{self.NS_GRAPHML}}}port")
908
+ if ports is not None:
909
+ warnings.warn("GraphML port tag not supported.")
910
+ # find the node by id and cast it to the appropriate type
911
+ node_id = self.node_type(node_xml.get("id"))
912
+ # get data/attributes for node
913
+ data = self.decode_data_elements(graphml_keys, node_xml)
914
+ G.add_node(node_id, **data)
915
+ # get child nodes
916
+ if node_xml.attrib.get("yfiles.foldertype") == "group":
917
+ graph_xml = node_xml.find(f"{{{self.NS_GRAPHML}}}graph")
918
+ self.make_graph(graph_xml, graphml_keys, defaults, G)
919
+
920
+ def add_edge(self, G, edge_element, graphml_keys):
921
+ """Add an edge to the graph."""
922
+ # warn on finding unsupported ports tag
923
+ ports = edge_element.find(f"{{{self.NS_GRAPHML}}}port")
924
+ if ports is not None:
925
+ warnings.warn("GraphML port tag not supported.")
926
+
927
+ # raise error if we find mixed directed and undirected edges
928
+ directed = edge_element.get("directed")
929
+ if G.is_directed() and directed == "false":
930
+ msg = "directed=false edge found in directed graph."
931
+ raise nx.NetworkXError(msg)
932
+ if (not G.is_directed()) and directed == "true":
933
+ msg = "directed=true edge found in undirected graph."
934
+ raise nx.NetworkXError(msg)
935
+
936
+ source = self.node_type(edge_element.get("source"))
937
+ target = self.node_type(edge_element.get("target"))
938
+ data = self.decode_data_elements(graphml_keys, edge_element)
939
+ # GraphML stores edge ids as an attribute
940
+ # NetworkX uses them as keys in multigraphs too if no key
941
+ # attribute is specified
942
+ edge_id = edge_element.get("id")
943
+ if edge_id:
944
+ # self.edge_ids is used by `make_graph` method for non-multigraphs
945
+ self.edge_ids[source, target] = edge_id
946
+ try:
947
+ edge_id = self.edge_key_type(edge_id)
948
+ except ValueError: # Could not convert.
949
+ pass
950
+ else:
951
+ edge_id = data.get("key")
952
+
953
+ if G.has_edge(source, target):
954
+ # mark this as a multigraph
955
+ self.multigraph = True
956
+
957
+ # Use add_edges_from to avoid error with add_edge when `'key' in data`
958
+ # Note there is only one edge here...
959
+ G.add_edges_from([(source, target, edge_id, data)])
960
+
961
+ def decode_data_elements(self, graphml_keys, obj_xml):
962
+ """Use the key information to decode the data XML if present."""
963
+ data = {}
964
+ for data_element in obj_xml.findall(f"{{{self.NS_GRAPHML}}}data"):
965
+ key = data_element.get("key")
966
+ try:
967
+ data_name = graphml_keys[key]["name"]
968
+ data_type = graphml_keys[key]["type"]
969
+ except KeyError as err:
970
+ raise nx.NetworkXError(f"Bad GraphML data: no key {key}") from err
971
+ text = data_element.text
972
+ # assume anything with subelements is a yfiles extension
973
+ if text is not None and len(list(data_element)) == 0:
974
+ if data_type == bool:
975
+ # Ignore cases.
976
+ # http://docs.oracle.com/javase/6/docs/api/java/lang/
977
+ # Boolean.html#parseBoolean%28java.lang.String%29
978
+ data[data_name] = self.convert_bool[text.lower()]
979
+ else:
980
+ data[data_name] = data_type(text)
981
+ elif len(list(data_element)) > 0:
982
+ # Assume yfiles as subelements, try to extract node_label
983
+ node_label = None
984
+ # set GenericNode's configuration as shape type
985
+ gn = data_element.find(f"{{{self.NS_Y}}}GenericNode")
986
+ if gn is not None:
987
+ data["shape_type"] = gn.get("configuration")
988
+ for node_type in ["GenericNode", "ShapeNode", "SVGNode", "ImageNode"]:
989
+ pref = f"{{{self.NS_Y}}}{node_type}/{{{self.NS_Y}}}"
990
+ geometry = data_element.find(f"{pref}Geometry")
991
+ if geometry is not None:
992
+ data["x"] = geometry.get("x")
993
+ data["y"] = geometry.get("y")
994
+ if node_label is None:
995
+ node_label = data_element.find(f"{pref}NodeLabel")
996
+ shape = data_element.find(f"{pref}Shape")
997
+ if shape is not None:
998
+ data["shape_type"] = shape.get("type")
999
+ if node_label is not None:
1000
+ data["label"] = node_label.text
1001
+
1002
+ # check all the different types of edges available in yEd.
1003
+ for edge_type in [
1004
+ "PolyLineEdge",
1005
+ "SplineEdge",
1006
+ "QuadCurveEdge",
1007
+ "BezierEdge",
1008
+ "ArcEdge",
1009
+ ]:
1010
+ pref = f"{{{self.NS_Y}}}{edge_type}/{{{self.NS_Y}}}"
1011
+ edge_label = data_element.find(f"{pref}EdgeLabel")
1012
+ if edge_label is not None:
1013
+ break
1014
+ if edge_label is not None:
1015
+ data["label"] = edge_label.text
1016
+ elif text is None:
1017
+ data[data_name] = ""
1018
+ return data
1019
+
1020
+ def find_graphml_keys(self, graph_element):
1021
+ """Extracts all the keys and key defaults from the xml."""
1022
+ graphml_keys = {}
1023
+ graphml_key_defaults = {}
1024
+ for k in graph_element.findall(f"{{{self.NS_GRAPHML}}}key"):
1025
+ attr_id = k.get("id")
1026
+ attr_type = k.get("attr.type")
1027
+ attr_name = k.get("attr.name")
1028
+ yfiles_type = k.get("yfiles.type")
1029
+ if yfiles_type is not None:
1030
+ attr_name = yfiles_type
1031
+ attr_type = "yfiles"
1032
+ if attr_type is None:
1033
+ attr_type = "string"
1034
+ warnings.warn(f"No key type for id {attr_id}. Using string")
1035
+ if attr_name is None:
1036
+ raise nx.NetworkXError(f"Unknown key for id {attr_id}.")
1037
+ graphml_keys[attr_id] = {
1038
+ "name": attr_name,
1039
+ "type": self.python_type[attr_type],
1040
+ "for": k.get("for"),
1041
+ }
1042
+ # check for "default" sub-element of key element
1043
+ default = k.find(f"{{{self.NS_GRAPHML}}}default")
1044
+ if default is not None:
1045
+ # Handle default values identically to data element values
1046
+ python_type = graphml_keys[attr_id]["type"]
1047
+ if python_type == bool:
1048
+ graphml_key_defaults[attr_id] = self.convert_bool[
1049
+ default.text.lower()
1050
+ ]
1051
+ else:
1052
+ graphml_key_defaults[attr_id] = python_type(default.text)
1053
+ return graphml_keys, graphml_key_defaults
mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/leda.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Read graphs in LEDA format.
3
+
4
+ LEDA is a C++ class library for efficient data types and algorithms.
5
+
6
+ Format
7
+ ------
8
+ See http://www.algorithmic-solutions.info/leda_guide/graphs/leda_native_graph_fileformat.html
9
+
10
+ """
11
+ # Original author: D. Eppstein, UC Irvine, August 12, 2003.
12
+ # The original code at http://www.ics.uci.edu/~eppstein/PADS/ is public domain.
13
+
14
+ __all__ = ["read_leda", "parse_leda"]
15
+
16
+ import networkx as nx
17
+ from networkx.exception import NetworkXError
18
+ from networkx.utils import open_file
19
+
20
+
21
+ @open_file(0, mode="rb")
22
+ @nx._dispatchable(graphs=None, returns_graph=True)
23
+ def read_leda(path, encoding="UTF-8"):
24
+ """Read graph in LEDA format from path.
25
+
26
+ Parameters
27
+ ----------
28
+ path : file or string
29
+ File or filename to read. Filenames ending in .gz or .bz2 will be
30
+ uncompressed.
31
+
32
+ Returns
33
+ -------
34
+ G : NetworkX graph
35
+
36
+ Examples
37
+ --------
38
+ G=nx.read_leda('file.leda')
39
+
40
+ References
41
+ ----------
42
+ .. [1] http://www.algorithmic-solutions.info/leda_guide/graphs/leda_native_graph_fileformat.html
43
+ """
44
+ lines = (line.decode(encoding) for line in path)
45
+ G = parse_leda(lines)
46
+ return G
47
+
48
+
49
+ @nx._dispatchable(graphs=None, returns_graph=True)
50
+ def parse_leda(lines):
51
+ """Read graph in LEDA format from string or iterable.
52
+
53
+ Parameters
54
+ ----------
55
+ lines : string or iterable
56
+ Data in LEDA format.
57
+
58
+ Returns
59
+ -------
60
+ G : NetworkX graph
61
+
62
+ Examples
63
+ --------
64
+ G=nx.parse_leda(string)
65
+
66
+ References
67
+ ----------
68
+ .. [1] http://www.algorithmic-solutions.info/leda_guide/graphs/leda_native_graph_fileformat.html
69
+ """
70
+ if isinstance(lines, str):
71
+ lines = iter(lines.split("\n"))
72
+ lines = iter(
73
+ [
74
+ line.rstrip("\n")
75
+ for line in lines
76
+ if not (line.startswith(("#", "\n")) or line == "")
77
+ ]
78
+ )
79
+ for i in range(3):
80
+ next(lines)
81
+ # Graph
82
+ du = int(next(lines)) # -1=directed, -2=undirected
83
+ if du == -1:
84
+ G = nx.DiGraph()
85
+ else:
86
+ G = nx.Graph()
87
+
88
+ # Nodes
89
+ n = int(next(lines)) # number of nodes
90
+ node = {}
91
+ for i in range(1, n + 1): # LEDA counts from 1 to n
92
+ symbol = next(lines).rstrip().strip("|{}| ")
93
+ if symbol == "":
94
+ symbol = str(i) # use int if no label - could be trouble
95
+ node[i] = symbol
96
+
97
+ G.add_nodes_from([s for i, s in node.items()])
98
+
99
+ # Edges
100
+ m = int(next(lines)) # number of edges
101
+ for i in range(m):
102
+ try:
103
+ s, t, reversal, label = next(lines).split()
104
+ except BaseException as err:
105
+ raise NetworkXError(f"Too few fields in LEDA.GRAPH edge {i+1}") from err
106
+ # BEWARE: no handling of reversal edges
107
+ G.add_edge(node[int(s)], node[int(t)], label=label[2:-2])
108
+ return G
mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/pajek.py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ *****
3
+ Pajek
4
+ *****
5
+ Read graphs in Pajek format.
6
+
7
+ This implementation handles directed and undirected graphs including
8
+ those with self loops and parallel edges.
9
+
10
+ Format
11
+ ------
12
+ See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm
13
+ for format information.
14
+
15
+ """
16
+
17
+ import warnings
18
+
19
+ import networkx as nx
20
+ from networkx.utils import open_file
21
+
22
+ __all__ = ["read_pajek", "parse_pajek", "generate_pajek", "write_pajek"]
23
+
24
+
25
+ def generate_pajek(G):
26
+ """Generate lines in Pajek graph format.
27
+
28
+ Parameters
29
+ ----------
30
+ G : graph
31
+ A Networkx graph
32
+
33
+ References
34
+ ----------
35
+ See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm
36
+ for format information.
37
+ """
38
+ if G.name == "":
39
+ name = "NetworkX"
40
+ else:
41
+ name = G.name
42
+ # Apparently many Pajek format readers can't process this line
43
+ # So we'll leave it out for now.
44
+ # yield '*network %s'%name
45
+
46
+ # write nodes with attributes
47
+ yield f"*vertices {G.order()}"
48
+ nodes = list(G)
49
+ # make dictionary mapping nodes to integers
50
+ nodenumber = dict(zip(nodes, range(1, len(nodes) + 1)))
51
+ for n in nodes:
52
+ # copy node attributes and pop mandatory attributes
53
+ # to avoid duplication.
54
+ na = G.nodes.get(n, {}).copy()
55
+ x = na.pop("x", 0.0)
56
+ y = na.pop("y", 0.0)
57
+ try:
58
+ id = int(na.pop("id", nodenumber[n]))
59
+ except ValueError as err:
60
+ err.args += (
61
+ (
62
+ "Pajek format requires 'id' to be an int()."
63
+ " Refer to the 'Relabeling nodes' section."
64
+ ),
65
+ )
66
+ raise
67
+ nodenumber[n] = id
68
+ shape = na.pop("shape", "ellipse")
69
+ s = " ".join(map(make_qstr, (id, n, x, y, shape)))
70
+ # only optional attributes are left in na.
71
+ for k, v in na.items():
72
+ if isinstance(v, str) and v.strip() != "":
73
+ s += f" {make_qstr(k)} {make_qstr(v)}"
74
+ else:
75
+ warnings.warn(
76
+ f"Node attribute {k} is not processed. {('Empty attribute' if isinstance(v, str) else 'Non-string attribute')}."
77
+ )
78
+ yield s
79
+
80
+ # write edges with attributes
81
+ if G.is_directed():
82
+ yield "*arcs"
83
+ else:
84
+ yield "*edges"
85
+ for u, v, edgedata in G.edges(data=True):
86
+ d = edgedata.copy()
87
+ value = d.pop("weight", 1.0) # use 1 as default edge value
88
+ s = " ".join(map(make_qstr, (nodenumber[u], nodenumber[v], value)))
89
+ for k, v in d.items():
90
+ if isinstance(v, str) and v.strip() != "":
91
+ s += f" {make_qstr(k)} {make_qstr(v)}"
92
+ else:
93
+ warnings.warn(
94
+ f"Edge attribute {k} is not processed. {('Empty attribute' if isinstance(v, str) else 'Non-string attribute')}."
95
+ )
96
+ yield s
97
+
98
+
99
+ @open_file(1, mode="wb")
100
+ def write_pajek(G, path, encoding="UTF-8"):
101
+ """Write graph in Pajek format to path.
102
+
103
+ Parameters
104
+ ----------
105
+ G : graph
106
+ A Networkx graph
107
+ path : file or string
108
+ File or filename to write.
109
+ Filenames ending in .gz or .bz2 will be compressed.
110
+
111
+ Examples
112
+ --------
113
+ >>> G = nx.path_graph(4)
114
+ >>> nx.write_pajek(G, "test.net")
115
+
116
+ Warnings
117
+ --------
118
+ Optional node attributes and edge attributes must be non-empty strings.
119
+ Otherwise it will not be written into the file. You will need to
120
+ convert those attributes to strings if you want to keep them.
121
+
122
+ References
123
+ ----------
124
+ See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm
125
+ for format information.
126
+ """
127
+ for line in generate_pajek(G):
128
+ line += "\n"
129
+ path.write(line.encode(encoding))
130
+
131
+
132
+ @open_file(0, mode="rb")
133
+ @nx._dispatchable(graphs=None, returns_graph=True)
134
+ def read_pajek(path, encoding="UTF-8"):
135
+ """Read graph in Pajek format from path.
136
+
137
+ Parameters
138
+ ----------
139
+ path : file or string
140
+ File or filename to write.
141
+ Filenames ending in .gz or .bz2 will be uncompressed.
142
+
143
+ Returns
144
+ -------
145
+ G : NetworkX MultiGraph or MultiDiGraph.
146
+
147
+ Examples
148
+ --------
149
+ >>> G = nx.path_graph(4)
150
+ >>> nx.write_pajek(G, "test.net")
151
+ >>> G = nx.read_pajek("test.net")
152
+
153
+ To create a Graph instead of a MultiGraph use
154
+
155
+ >>> G1 = nx.Graph(G)
156
+
157
+ References
158
+ ----------
159
+ See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm
160
+ for format information.
161
+ """
162
+ lines = (line.decode(encoding) for line in path)
163
+ return parse_pajek(lines)
164
+
165
+
166
+ @nx._dispatchable(graphs=None, returns_graph=True)
167
+ def parse_pajek(lines):
168
+ """Parse Pajek format graph from string or iterable.
169
+
170
+ Parameters
171
+ ----------
172
+ lines : string or iterable
173
+ Data in Pajek format.
174
+
175
+ Returns
176
+ -------
177
+ G : NetworkX graph
178
+
179
+ See Also
180
+ --------
181
+ read_pajek
182
+
183
+ """
184
+ import shlex
185
+
186
+ # multigraph=False
187
+ if isinstance(lines, str):
188
+ lines = iter(lines.split("\n"))
189
+ lines = iter([line.rstrip("\n") for line in lines])
190
+ G = nx.MultiDiGraph() # are multiedges allowed in Pajek? assume yes
191
+ labels = [] # in the order of the file, needed for matrix
192
+ while lines:
193
+ try:
194
+ l = next(lines)
195
+ except: # EOF
196
+ break
197
+ if l.lower().startswith("*network"):
198
+ try:
199
+ label, name = l.split(None, 1)
200
+ except ValueError:
201
+ # Line was not of the form: *network NAME
202
+ pass
203
+ else:
204
+ G.graph["name"] = name
205
+ elif l.lower().startswith("*vertices"):
206
+ nodelabels = {}
207
+ l, nnodes = l.split()
208
+ for i in range(int(nnodes)):
209
+ l = next(lines)
210
+ try:
211
+ splitline = [
212
+ x.decode("utf-8") for x in shlex.split(str(l).encode("utf-8"))
213
+ ]
214
+ except AttributeError:
215
+ splitline = shlex.split(str(l))
216
+ id, label = splitline[0:2]
217
+ labels.append(label)
218
+ G.add_node(label)
219
+ nodelabels[id] = label
220
+ G.nodes[label]["id"] = id
221
+ try:
222
+ x, y, shape = splitline[2:5]
223
+ G.nodes[label].update(
224
+ {"x": float(x), "y": float(y), "shape": shape}
225
+ )
226
+ except:
227
+ pass
228
+ extra_attr = zip(splitline[5::2], splitline[6::2])
229
+ G.nodes[label].update(extra_attr)
230
+ elif l.lower().startswith("*edges") or l.lower().startswith("*arcs"):
231
+ if l.lower().startswith("*edge"):
232
+ # switch from multidigraph to multigraph
233
+ G = nx.MultiGraph(G)
234
+ if l.lower().startswith("*arcs"):
235
+ # switch to directed with multiple arcs for each existing edge
236
+ G = G.to_directed()
237
+ for l in lines:
238
+ try:
239
+ splitline = [
240
+ x.decode("utf-8") for x in shlex.split(str(l).encode("utf-8"))
241
+ ]
242
+ except AttributeError:
243
+ splitline = shlex.split(str(l))
244
+
245
+ if len(splitline) < 2:
246
+ continue
247
+ ui, vi = splitline[0:2]
248
+ u = nodelabels.get(ui, ui)
249
+ v = nodelabels.get(vi, vi)
250
+ # parse the data attached to this edge and put in a dictionary
251
+ edge_data = {}
252
+ try:
253
+ # there should always be a single value on the edge?
254
+ w = splitline[2:3]
255
+ edge_data.update({"weight": float(w[0])})
256
+ except:
257
+ pass
258
+ # if there isn't, just assign a 1
259
+ # edge_data.update({'value':1})
260
+ extra_attr = zip(splitline[3::2], splitline[4::2])
261
+ edge_data.update(extra_attr)
262
+ # if G.has_edge(u,v):
263
+ # multigraph=True
264
+ G.add_edge(u, v, **edge_data)
265
+ elif l.lower().startswith("*matrix"):
266
+ G = nx.DiGraph(G)
267
+ adj_list = (
268
+ (labels[row], labels[col], {"weight": int(data)})
269
+ for (row, line) in enumerate(lines)
270
+ for (col, data) in enumerate(line.split())
271
+ if int(data) != 0
272
+ )
273
+ G.add_edges_from(adj_list)
274
+
275
+ return G
276
+
277
+
278
+ def make_qstr(t):
279
+ """Returns the string representation of t.
280
+ Add outer double-quotes if the string has a space.
281
+ """
282
+ if not isinstance(t, str):
283
+ t = str(t)
284
+ if " " in t:
285
+ t = f'"{t}"'
286
+ return t
mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/sparse6.py ADDED
@@ -0,0 +1,377 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Original author: D. Eppstein, UC Irvine, August 12, 2003.
2
+ # The original code at https://www.ics.uci.edu/~eppstein/PADS/ is public domain.
3
+ """Functions for reading and writing graphs in the *sparse6* format.
4
+
5
+ The *sparse6* file format is a space-efficient format for large sparse
6
+ graphs. For small graphs or large dense graphs, use the *graph6* file
7
+ format.
8
+
9
+ For more information, see the `sparse6`_ homepage.
10
+
11
+ .. _sparse6: https://users.cecs.anu.edu.au/~bdm/data/formats.html
12
+
13
+ """
14
+
15
+ import networkx as nx
16
+ from networkx.exception import NetworkXError
17
+ from networkx.readwrite.graph6 import data_to_n, n_to_data
18
+ from networkx.utils import not_implemented_for, open_file
19
+
20
+ __all__ = ["from_sparse6_bytes", "read_sparse6", "to_sparse6_bytes", "write_sparse6"]
21
+
22
+
23
+ def _generate_sparse6_bytes(G, nodes, header):
24
+ """Yield bytes in the sparse6 encoding of a graph.
25
+
26
+ `G` is an undirected simple graph. `nodes` is the list of nodes for
27
+ which the node-induced subgraph will be encoded; if `nodes` is the
28
+ list of all nodes in the graph, the entire graph will be
29
+ encoded. `header` is a Boolean that specifies whether to generate
30
+ the header ``b'>>sparse6<<'`` before the remaining data.
31
+
32
+ This function generates `bytes` objects in the following order:
33
+
34
+ 1. the header (if requested),
35
+ 2. the encoding of the number of nodes,
36
+ 3. each character, one-at-a-time, in the encoding of the requested
37
+ node-induced subgraph,
38
+ 4. a newline character.
39
+
40
+ This function raises :exc:`ValueError` if the graph is too large for
41
+ the graph6 format (that is, greater than ``2 ** 36`` nodes).
42
+
43
+ """
44
+ n = len(G)
45
+ if n >= 2**36:
46
+ raise ValueError(
47
+ "sparse6 is only defined if number of nodes is less than 2 ** 36"
48
+ )
49
+ if header:
50
+ yield b">>sparse6<<"
51
+ yield b":"
52
+ for d in n_to_data(n):
53
+ yield str.encode(chr(d + 63))
54
+
55
+ k = 1
56
+ while 1 << k < n:
57
+ k += 1
58
+
59
+ def enc(x):
60
+ """Big endian k-bit encoding of x"""
61
+ return [1 if (x & 1 << (k - 1 - i)) else 0 for i in range(k)]
62
+
63
+ edges = sorted((max(u, v), min(u, v)) for u, v in G.edges())
64
+ bits = []
65
+ curv = 0
66
+ for v, u in edges:
67
+ if v == curv: # current vertex edge
68
+ bits.append(0)
69
+ bits.extend(enc(u))
70
+ elif v == curv + 1: # next vertex edge
71
+ curv += 1
72
+ bits.append(1)
73
+ bits.extend(enc(u))
74
+ else: # skip to vertex v and then add edge to u
75
+ curv = v
76
+ bits.append(1)
77
+ bits.extend(enc(v))
78
+ bits.append(0)
79
+ bits.extend(enc(u))
80
+ if k < 6 and n == (1 << k) and ((-len(bits)) % 6) >= k and curv < (n - 1):
81
+ # Padding special case: small k, n=2^k,
82
+ # more than k bits of padding needed,
83
+ # current vertex is not (n-1) --
84
+ # appending 1111... would add a loop on (n-1)
85
+ bits.append(0)
86
+ bits.extend([1] * ((-len(bits)) % 6))
87
+ else:
88
+ bits.extend([1] * ((-len(bits)) % 6))
89
+
90
+ data = [
91
+ (bits[i + 0] << 5)
92
+ + (bits[i + 1] << 4)
93
+ + (bits[i + 2] << 3)
94
+ + (bits[i + 3] << 2)
95
+ + (bits[i + 4] << 1)
96
+ + (bits[i + 5] << 0)
97
+ for i in range(0, len(bits), 6)
98
+ ]
99
+
100
+ for d in data:
101
+ yield str.encode(chr(d + 63))
102
+ yield b"\n"
103
+
104
+
105
+ @nx._dispatchable(graphs=None, returns_graph=True)
106
+ def from_sparse6_bytes(string):
107
+ """Read an undirected graph in sparse6 format from string.
108
+
109
+ Parameters
110
+ ----------
111
+ string : string
112
+ Data in sparse6 format
113
+
114
+ Returns
115
+ -------
116
+ G : Graph
117
+
118
+ Raises
119
+ ------
120
+ NetworkXError
121
+ If the string is unable to be parsed in sparse6 format
122
+
123
+ Examples
124
+ --------
125
+ >>> G = nx.from_sparse6_bytes(b":A_")
126
+ >>> sorted(G.edges())
127
+ [(0, 1), (0, 1), (0, 1)]
128
+
129
+ See Also
130
+ --------
131
+ read_sparse6, write_sparse6
132
+
133
+ References
134
+ ----------
135
+ .. [1] Sparse6 specification
136
+ <https://users.cecs.anu.edu.au/~bdm/data/formats.html>
137
+
138
+ """
139
+ if string.startswith(b">>sparse6<<"):
140
+ string = string[11:]
141
+ if not string.startswith(b":"):
142
+ raise NetworkXError("Expected leading colon in sparse6")
143
+
144
+ chars = [c - 63 for c in string[1:]]
145
+ n, data = data_to_n(chars)
146
+ k = 1
147
+ while 1 << k < n:
148
+ k += 1
149
+
150
+ def parseData():
151
+ """Returns stream of pairs b[i], x[i] for sparse6 format."""
152
+ chunks = iter(data)
153
+ d = None # partial data word
154
+ dLen = 0 # how many unparsed bits are left in d
155
+
156
+ while 1:
157
+ if dLen < 1:
158
+ try:
159
+ d = next(chunks)
160
+ except StopIteration:
161
+ return
162
+ dLen = 6
163
+ dLen -= 1
164
+ b = (d >> dLen) & 1 # grab top remaining bit
165
+
166
+ x = d & ((1 << dLen) - 1) # partially built up value of x
167
+ xLen = dLen # how many bits included so far in x
168
+ while xLen < k: # now grab full chunks until we have enough
169
+ try:
170
+ d = next(chunks)
171
+ except StopIteration:
172
+ return
173
+ dLen = 6
174
+ x = (x << 6) + d
175
+ xLen += 6
176
+ x = x >> (xLen - k) # shift back the extra bits
177
+ dLen = xLen - k
178
+ yield b, x
179
+
180
+ v = 0
181
+
182
+ G = nx.MultiGraph()
183
+ G.add_nodes_from(range(n))
184
+
185
+ multigraph = False
186
+ for b, x in parseData():
187
+ if b == 1:
188
+ v += 1
189
+ # padding with ones can cause overlarge number here
190
+ if x >= n or v >= n:
191
+ break
192
+ elif x > v:
193
+ v = x
194
+ else:
195
+ if G.has_edge(x, v):
196
+ multigraph = True
197
+ G.add_edge(x, v)
198
+ if not multigraph:
199
+ G = nx.Graph(G)
200
+ return G
201
+
202
+
203
+ def to_sparse6_bytes(G, nodes=None, header=True):
204
+ """Convert an undirected graph to bytes in sparse6 format.
205
+
206
+ Parameters
207
+ ----------
208
+ G : Graph (undirected)
209
+
210
+ nodes: list or iterable
211
+ Nodes are labeled 0...n-1 in the order provided. If None the ordering
212
+ given by ``G.nodes()`` is used.
213
+
214
+ header: bool
215
+ If True add '>>sparse6<<' bytes to head of data.
216
+
217
+ Raises
218
+ ------
219
+ NetworkXNotImplemented
220
+ If the graph is directed.
221
+
222
+ ValueError
223
+ If the graph has at least ``2 ** 36`` nodes; the sparse6 format
224
+ is only defined for graphs of order less than ``2 ** 36``.
225
+
226
+ Examples
227
+ --------
228
+ >>> nx.to_sparse6_bytes(nx.path_graph(2))
229
+ b'>>sparse6<<:An\\n'
230
+
231
+ See Also
232
+ --------
233
+ to_sparse6_bytes, read_sparse6, write_sparse6_bytes
234
+
235
+ Notes
236
+ -----
237
+ The returned bytes end with a newline character.
238
+
239
+ The format does not support edge or node labels.
240
+
241
+ References
242
+ ----------
243
+ .. [1] Graph6 specification
244
+ <https://users.cecs.anu.edu.au/~bdm/data/formats.html>
245
+
246
+ """
247
+ if nodes is not None:
248
+ G = G.subgraph(nodes)
249
+ G = nx.convert_node_labels_to_integers(G, ordering="sorted")
250
+ return b"".join(_generate_sparse6_bytes(G, nodes, header))
251
+
252
+
253
+ @open_file(0, mode="rb")
254
+ @nx._dispatchable(graphs=None, returns_graph=True)
255
+ def read_sparse6(path):
256
+ """Read an undirected graph in sparse6 format from path.
257
+
258
+ Parameters
259
+ ----------
260
+ path : file or string
261
+ File or filename to write.
262
+
263
+ Returns
264
+ -------
265
+ G : Graph/Multigraph or list of Graphs/MultiGraphs
266
+ If the file contains multiple lines then a list of graphs is returned
267
+
268
+ Raises
269
+ ------
270
+ NetworkXError
271
+ If the string is unable to be parsed in sparse6 format
272
+
273
+ Examples
274
+ --------
275
+ You can read a sparse6 file by giving the path to the file::
276
+
277
+ >>> import tempfile
278
+ >>> with tempfile.NamedTemporaryFile(delete=False) as f:
279
+ ... _ = f.write(b">>sparse6<<:An\\n")
280
+ ... _ = f.seek(0)
281
+ ... G = nx.read_sparse6(f.name)
282
+ >>> list(G.edges())
283
+ [(0, 1)]
284
+
285
+ You can also read a sparse6 file by giving an open file-like object::
286
+
287
+ >>> import tempfile
288
+ >>> with tempfile.NamedTemporaryFile() as f:
289
+ ... _ = f.write(b">>sparse6<<:An\\n")
290
+ ... _ = f.seek(0)
291
+ ... G = nx.read_sparse6(f)
292
+ >>> list(G.edges())
293
+ [(0, 1)]
294
+
295
+ See Also
296
+ --------
297
+ read_sparse6, from_sparse6_bytes
298
+
299
+ References
300
+ ----------
301
+ .. [1] Sparse6 specification
302
+ <https://users.cecs.anu.edu.au/~bdm/data/formats.html>
303
+
304
+ """
305
+ glist = []
306
+ for line in path:
307
+ line = line.strip()
308
+ if not len(line):
309
+ continue
310
+ glist.append(from_sparse6_bytes(line))
311
+ if len(glist) == 1:
312
+ return glist[0]
313
+ else:
314
+ return glist
315
+
316
+
317
+ @not_implemented_for("directed")
318
+ @open_file(1, mode="wb")
319
+ def write_sparse6(G, path, nodes=None, header=True):
320
+ """Write graph G to given path in sparse6 format.
321
+
322
+ Parameters
323
+ ----------
324
+ G : Graph (undirected)
325
+
326
+ path : file or string
327
+ File or filename to write
328
+
329
+ nodes: list or iterable
330
+ Nodes are labeled 0...n-1 in the order provided. If None the ordering
331
+ given by G.nodes() is used.
332
+
333
+ header: bool
334
+ If True add '>>sparse6<<' string to head of data
335
+
336
+ Raises
337
+ ------
338
+ NetworkXError
339
+ If the graph is directed
340
+
341
+ Examples
342
+ --------
343
+ You can write a sparse6 file by giving the path to the file::
344
+
345
+ >>> import tempfile
346
+ >>> with tempfile.NamedTemporaryFile(delete=False) as f:
347
+ ... nx.write_sparse6(nx.path_graph(2), f.name)
348
+ ... print(f.read())
349
+ b'>>sparse6<<:An\\n'
350
+
351
+ You can also write a sparse6 file by giving an open file-like object::
352
+
353
+ >>> with tempfile.NamedTemporaryFile() as f:
354
+ ... nx.write_sparse6(nx.path_graph(2), f)
355
+ ... _ = f.seek(0)
356
+ ... print(f.read())
357
+ b'>>sparse6<<:An\\n'
358
+
359
+ See Also
360
+ --------
361
+ read_sparse6, from_sparse6_bytes
362
+
363
+ Notes
364
+ -----
365
+ The format does not support edge or node labels.
366
+
367
+ References
368
+ ----------
369
+ .. [1] Sparse6 specification
370
+ <https://users.cecs.anu.edu.au/~bdm/data/formats.html>
371
+
372
+ """
373
+ if nodes is not None:
374
+ G = G.subgraph(nodes)
375
+ G = nx.convert_node_labels_to_integers(G, ordering="sorted")
376
+ for b in _generate_sparse6_bytes(G, nodes, header):
377
+ path.write(b)
mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/tests/__init__.py ADDED
File without changes
mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_adjlist.cpython-310.pyc ADDED
Binary file (8.28 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_gml.cpython-310.pyc ADDED
Binary file (19.6 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_pajek.cpython-310.pyc ADDED
Binary file (4.81 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/tests/test_adjlist.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unit tests for adjlist.
3
+ """
4
+
5
+ import io
6
+
7
+ import pytest
8
+
9
+ import networkx as nx
10
+ from networkx.utils import edges_equal, graphs_equal, nodes_equal
11
+
12
+
13
+ class TestAdjlist:
14
+ @classmethod
15
+ def setup_class(cls):
16
+ cls.G = nx.Graph(name="test")
17
+ e = [("a", "b"), ("b", "c"), ("c", "d"), ("d", "e"), ("e", "f"), ("a", "f")]
18
+ cls.G.add_edges_from(e)
19
+ cls.G.add_node("g")
20
+ cls.DG = nx.DiGraph(cls.G)
21
+ cls.XG = nx.MultiGraph()
22
+ cls.XG.add_weighted_edges_from([(1, 2, 5), (1, 2, 5), (1, 2, 1), (3, 3, 42)])
23
+ cls.XDG = nx.MultiDiGraph(cls.XG)
24
+
25
+ def test_read_multiline_adjlist_1(self):
26
+ # Unit test for https://networkx.lanl.gov/trac/ticket/252
27
+ s = b"""# comment line
28
+ 1 2
29
+ # comment line
30
+ 2
31
+ 3
32
+ """
33
+ bytesIO = io.BytesIO(s)
34
+ G = nx.read_multiline_adjlist(bytesIO)
35
+ adj = {"1": {"3": {}, "2": {}}, "3": {"1": {}}, "2": {"1": {}}}
36
+ assert graphs_equal(G, nx.Graph(adj))
37
+
38
+ def test_unicode(self, tmp_path):
39
+ G = nx.Graph()
40
+ name1 = chr(2344) + chr(123) + chr(6543)
41
+ name2 = chr(5543) + chr(1543) + chr(324)
42
+ G.add_edge(name1, "Radiohead", **{name2: 3})
43
+
44
+ fname = tmp_path / "adjlist.txt"
45
+ nx.write_multiline_adjlist(G, fname)
46
+ H = nx.read_multiline_adjlist(fname)
47
+ assert graphs_equal(G, H)
48
+
49
+ def test_latin1_err(self, tmp_path):
50
+ G = nx.Graph()
51
+ name1 = chr(2344) + chr(123) + chr(6543)
52
+ name2 = chr(5543) + chr(1543) + chr(324)
53
+ G.add_edge(name1, "Radiohead", **{name2: 3})
54
+ fname = tmp_path / "adjlist.txt"
55
+ with pytest.raises(UnicodeEncodeError):
56
+ nx.write_multiline_adjlist(G, fname, encoding="latin-1")
57
+
58
+ def test_latin1(self, tmp_path):
59
+ G = nx.Graph()
60
+ name1 = "Bj" + chr(246) + "rk"
61
+ name2 = chr(220) + "ber"
62
+ G.add_edge(name1, "Radiohead", **{name2: 3})
63
+ fname = tmp_path / "adjlist.txt"
64
+ nx.write_multiline_adjlist(G, fname, encoding="latin-1")
65
+ H = nx.read_multiline_adjlist(fname, encoding="latin-1")
66
+ assert graphs_equal(G, H)
67
+
68
+ def test_parse_adjlist(self):
69
+ lines = ["1 2 5", "2 3 4", "3 5", "4", "5"]
70
+ nx.parse_adjlist(lines, nodetype=int) # smoke test
71
+ with pytest.raises(TypeError):
72
+ nx.parse_adjlist(lines, nodetype="int")
73
+ lines = ["1 2 5", "2 b", "c"]
74
+ with pytest.raises(TypeError):
75
+ nx.parse_adjlist(lines, nodetype=int)
76
+
77
+ def test_adjlist_graph(self, tmp_path):
78
+ G = self.G
79
+ fname = tmp_path / "adjlist.txt"
80
+ nx.write_adjlist(G, fname)
81
+ H = nx.read_adjlist(fname)
82
+ H2 = nx.read_adjlist(fname)
83
+ assert H is not H2 # they should be different graphs
84
+ assert nodes_equal(list(H), list(G))
85
+ assert edges_equal(list(H.edges()), list(G.edges()))
86
+
87
+ def test_adjlist_digraph(self, tmp_path):
88
+ G = self.DG
89
+ fname = tmp_path / "adjlist.txt"
90
+ nx.write_adjlist(G, fname)
91
+ H = nx.read_adjlist(fname, create_using=nx.DiGraph())
92
+ H2 = nx.read_adjlist(fname, create_using=nx.DiGraph())
93
+ assert H is not H2 # they should be different graphs
94
+ assert nodes_equal(list(H), list(G))
95
+ assert edges_equal(list(H.edges()), list(G.edges()))
96
+
97
+ def test_adjlist_integers(self, tmp_path):
98
+ fname = tmp_path / "adjlist.txt"
99
+ G = nx.convert_node_labels_to_integers(self.G)
100
+ nx.write_adjlist(G, fname)
101
+ H = nx.read_adjlist(fname, nodetype=int)
102
+ H2 = nx.read_adjlist(fname, nodetype=int)
103
+ assert H is not H2 # they should be different graphs
104
+ assert nodes_equal(list(H), list(G))
105
+ assert edges_equal(list(H.edges()), list(G.edges()))
106
+
107
+ def test_adjlist_multigraph(self, tmp_path):
108
+ G = self.XG
109
+ fname = tmp_path / "adjlist.txt"
110
+ nx.write_adjlist(G, fname)
111
+ H = nx.read_adjlist(fname, nodetype=int, create_using=nx.MultiGraph())
112
+ H2 = nx.read_adjlist(fname, nodetype=int, create_using=nx.MultiGraph())
113
+ assert H is not H2 # they should be different graphs
114
+ assert nodes_equal(list(H), list(G))
115
+ assert edges_equal(list(H.edges()), list(G.edges()))
116
+
117
+ def test_adjlist_multidigraph(self, tmp_path):
118
+ G = self.XDG
119
+ fname = tmp_path / "adjlist.txt"
120
+ nx.write_adjlist(G, fname)
121
+ H = nx.read_adjlist(fname, nodetype=int, create_using=nx.MultiDiGraph())
122
+ H2 = nx.read_adjlist(fname, nodetype=int, create_using=nx.MultiDiGraph())
123
+ assert H is not H2 # they should be different graphs
124
+ assert nodes_equal(list(H), list(G))
125
+ assert edges_equal(list(H.edges()), list(G.edges()))
126
+
127
+ def test_adjlist_delimiter(self):
128
+ fh = io.BytesIO()
129
+ G = nx.path_graph(3)
130
+ nx.write_adjlist(G, fh, delimiter=":")
131
+ fh.seek(0)
132
+ H = nx.read_adjlist(fh, nodetype=int, delimiter=":")
133
+ assert nodes_equal(list(H), list(G))
134
+ assert edges_equal(list(H.edges()), list(G.edges()))
135
+
136
+
137
+ class TestMultilineAdjlist:
138
+ @classmethod
139
+ def setup_class(cls):
140
+ cls.G = nx.Graph(name="test")
141
+ e = [("a", "b"), ("b", "c"), ("c", "d"), ("d", "e"), ("e", "f"), ("a", "f")]
142
+ cls.G.add_edges_from(e)
143
+ cls.G.add_node("g")
144
+ cls.DG = nx.DiGraph(cls.G)
145
+ cls.DG.remove_edge("b", "a")
146
+ cls.DG.remove_edge("b", "c")
147
+ cls.XG = nx.MultiGraph()
148
+ cls.XG.add_weighted_edges_from([(1, 2, 5), (1, 2, 5), (1, 2, 1), (3, 3, 42)])
149
+ cls.XDG = nx.MultiDiGraph(cls.XG)
150
+
151
+ def test_parse_multiline_adjlist(self):
152
+ lines = [
153
+ "1 2",
154
+ "b {'weight':3, 'name': 'Frodo'}",
155
+ "c {}",
156
+ "d 1",
157
+ "e {'weight':6, 'name': 'Saruman'}",
158
+ ]
159
+ nx.parse_multiline_adjlist(iter(lines)) # smoke test
160
+ with pytest.raises(TypeError):
161
+ nx.parse_multiline_adjlist(iter(lines), nodetype=int)
162
+ nx.parse_multiline_adjlist(iter(lines), edgetype=str) # smoke test
163
+ with pytest.raises(TypeError):
164
+ nx.parse_multiline_adjlist(iter(lines), nodetype=int)
165
+ lines = ["1 a"]
166
+ with pytest.raises(TypeError):
167
+ nx.parse_multiline_adjlist(iter(lines))
168
+ lines = ["a 2"]
169
+ with pytest.raises(TypeError):
170
+ nx.parse_multiline_adjlist(iter(lines), nodetype=int)
171
+ lines = ["1 2"]
172
+ with pytest.raises(TypeError):
173
+ nx.parse_multiline_adjlist(iter(lines))
174
+ lines = ["1 2", "2 {}"]
175
+ with pytest.raises(TypeError):
176
+ nx.parse_multiline_adjlist(iter(lines))
177
+
178
+ def test_multiline_adjlist_graph(self, tmp_path):
179
+ G = self.G
180
+ fname = tmp_path / "adjlist.txt"
181
+ nx.write_multiline_adjlist(G, fname)
182
+ H = nx.read_multiline_adjlist(fname)
183
+ H2 = nx.read_multiline_adjlist(fname)
184
+ assert H is not H2 # they should be different graphs
185
+ assert nodes_equal(list(H), list(G))
186
+ assert edges_equal(list(H.edges()), list(G.edges()))
187
+
188
+ def test_multiline_adjlist_digraph(self, tmp_path):
189
+ G = self.DG
190
+ fname = tmp_path / "adjlist.txt"
191
+ nx.write_multiline_adjlist(G, fname)
192
+ H = nx.read_multiline_adjlist(fname, create_using=nx.DiGraph())
193
+ H2 = nx.read_multiline_adjlist(fname, create_using=nx.DiGraph())
194
+ assert H is not H2 # they should be different graphs
195
+ assert nodes_equal(list(H), list(G))
196
+ assert edges_equal(list(H.edges()), list(G.edges()))
197
+
198
+ def test_multiline_adjlist_integers(self, tmp_path):
199
+ fname = tmp_path / "adjlist.txt"
200
+ G = nx.convert_node_labels_to_integers(self.G)
201
+ nx.write_multiline_adjlist(G, fname)
202
+ H = nx.read_multiline_adjlist(fname, nodetype=int)
203
+ H2 = nx.read_multiline_adjlist(fname, nodetype=int)
204
+ assert H is not H2 # they should be different graphs
205
+ assert nodes_equal(list(H), list(G))
206
+ assert edges_equal(list(H.edges()), list(G.edges()))
207
+
208
+ def test_multiline_adjlist_multigraph(self, tmp_path):
209
+ G = self.XG
210
+ fname = tmp_path / "adjlist.txt"
211
+ nx.write_multiline_adjlist(G, fname)
212
+ H = nx.read_multiline_adjlist(fname, nodetype=int, create_using=nx.MultiGraph())
213
+ H2 = nx.read_multiline_adjlist(
214
+ fname, nodetype=int, create_using=nx.MultiGraph()
215
+ )
216
+ assert H is not H2 # they should be different graphs
217
+ assert nodes_equal(list(H), list(G))
218
+ assert edges_equal(list(H.edges()), list(G.edges()))
219
+
220
+ def test_multiline_adjlist_multidigraph(self, tmp_path):
221
+ G = self.XDG
222
+ fname = tmp_path / "adjlist.txt"
223
+ nx.write_multiline_adjlist(G, fname)
224
+ H = nx.read_multiline_adjlist(
225
+ fname, nodetype=int, create_using=nx.MultiDiGraph()
226
+ )
227
+ H2 = nx.read_multiline_adjlist(
228
+ fname, nodetype=int, create_using=nx.MultiDiGraph()
229
+ )
230
+ assert H is not H2 # they should be different graphs
231
+ assert nodes_equal(list(H), list(G))
232
+ assert edges_equal(list(H.edges()), list(G.edges()))
233
+
234
+ def test_multiline_adjlist_delimiter(self):
235
+ fh = io.BytesIO()
236
+ G = nx.path_graph(3)
237
+ nx.write_multiline_adjlist(G, fh, delimiter=":")
238
+ fh.seek(0)
239
+ H = nx.read_multiline_adjlist(fh, nodetype=int, delimiter=":")
240
+ assert nodes_equal(list(H), list(G))
241
+ assert edges_equal(list(H.edges()), list(G.edges()))
242
+
243
+
244
+ @pytest.mark.parametrize(
245
+ ("lines", "delim"),
246
+ (
247
+ (["1 2 5", "2 3 4", "3 5", "4", "5"], None), # No extra whitespace
248
+ (["1\t2\t5", "2\t3\t4", "3\t5", "4", "5"], "\t"), # tab-delimited
249
+ (
250
+ ["1\t2\t5", "2\t3\t4", "3\t5\t", "4\t", "5"],
251
+ "\t",
252
+ ), # tab-delimited, extra delims
253
+ (
254
+ ["1\t2\t5", "2\t3\t4", "3\t5\t\t\n", "4\t", "5"],
255
+ "\t",
256
+ ), # extra delim+newlines
257
+ ),
258
+ )
259
+ def test_adjlist_rstrip_parsing(lines, delim):
260
+ """Regression test related to gh-7465"""
261
+ expected = nx.Graph([(1, 2), (1, 5), (2, 3), (2, 4), (3, 5)])
262
+ nx.utils.graphs_equal(nx.parse_adjlist(lines, delimiter=delim), expected)
mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/tests/test_edgelist.py ADDED
@@ -0,0 +1,314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unit tests for edgelists.
3
+ """
4
+
5
+ import io
6
+ import textwrap
7
+
8
+ import pytest
9
+
10
+ import networkx as nx
11
+ from networkx.utils import edges_equal, graphs_equal, nodes_equal
12
+
13
+ edges_no_data = textwrap.dedent(
14
+ """
15
+ # comment line
16
+ 1 2
17
+ # comment line
18
+ 2 3
19
+ """
20
+ )
21
+
22
+
23
+ edges_with_values = textwrap.dedent(
24
+ """
25
+ # comment line
26
+ 1 2 2.0
27
+ # comment line
28
+ 2 3 3.0
29
+ """
30
+ )
31
+
32
+
33
+ edges_with_weight = textwrap.dedent(
34
+ """
35
+ # comment line
36
+ 1 2 {'weight':2.0}
37
+ # comment line
38
+ 2 3 {'weight':3.0}
39
+ """
40
+ )
41
+
42
+
43
+ edges_with_multiple_attrs = textwrap.dedent(
44
+ """
45
+ # comment line
46
+ 1 2 {'weight':2.0, 'color':'green'}
47
+ # comment line
48
+ 2 3 {'weight':3.0, 'color':'red'}
49
+ """
50
+ )
51
+
52
+
53
+ edges_with_multiple_attrs_csv = textwrap.dedent(
54
+ """
55
+ # comment line
56
+ 1, 2, {'weight':2.0, 'color':'green'}
57
+ # comment line
58
+ 2, 3, {'weight':3.0, 'color':'red'}
59
+ """
60
+ )
61
+
62
+
63
+ _expected_edges_weights = [(1, 2, {"weight": 2.0}), (2, 3, {"weight": 3.0})]
64
+ _expected_edges_multiattr = [
65
+ (1, 2, {"weight": 2.0, "color": "green"}),
66
+ (2, 3, {"weight": 3.0, "color": "red"}),
67
+ ]
68
+
69
+
70
+ @pytest.mark.parametrize(
71
+ ("data", "extra_kwargs"),
72
+ (
73
+ (edges_no_data, {}),
74
+ (edges_with_values, {}),
75
+ (edges_with_weight, {}),
76
+ (edges_with_multiple_attrs, {}),
77
+ (edges_with_multiple_attrs_csv, {"delimiter": ","}),
78
+ ),
79
+ )
80
+ def test_read_edgelist_no_data(data, extra_kwargs):
81
+ bytesIO = io.BytesIO(data.encode("utf-8"))
82
+ G = nx.read_edgelist(bytesIO, nodetype=int, data=False, **extra_kwargs)
83
+ assert edges_equal(G.edges(), [(1, 2), (2, 3)])
84
+
85
+
86
+ def test_read_weighted_edgelist():
87
+ bytesIO = io.BytesIO(edges_with_values.encode("utf-8"))
88
+ G = nx.read_weighted_edgelist(bytesIO, nodetype=int)
89
+ assert edges_equal(G.edges(data=True), _expected_edges_weights)
90
+
91
+
92
+ @pytest.mark.parametrize(
93
+ ("data", "extra_kwargs", "expected"),
94
+ (
95
+ (edges_with_weight, {}, _expected_edges_weights),
96
+ (edges_with_multiple_attrs, {}, _expected_edges_multiattr),
97
+ (edges_with_multiple_attrs_csv, {"delimiter": ","}, _expected_edges_multiattr),
98
+ ),
99
+ )
100
+ def test_read_edgelist_with_data(data, extra_kwargs, expected):
101
+ bytesIO = io.BytesIO(data.encode("utf-8"))
102
+ G = nx.read_edgelist(bytesIO, nodetype=int, **extra_kwargs)
103
+ assert edges_equal(G.edges(data=True), expected)
104
+
105
+
106
+ @pytest.fixture
107
+ def example_graph():
108
+ G = nx.Graph()
109
+ G.add_weighted_edges_from([(1, 2, 3.0), (2, 3, 27.0), (3, 4, 3.0)])
110
+ return G
111
+
112
+
113
+ def test_parse_edgelist_no_data(example_graph):
114
+ G = example_graph
115
+ H = nx.parse_edgelist(["1 2", "2 3", "3 4"], nodetype=int)
116
+ assert nodes_equal(G.nodes, H.nodes)
117
+ assert edges_equal(G.edges, H.edges)
118
+
119
+
120
+ def test_parse_edgelist_with_data_dict(example_graph):
121
+ G = example_graph
122
+ H = nx.parse_edgelist(
123
+ ["1 2 {'weight': 3}", "2 3 {'weight': 27}", "3 4 {'weight': 3.0}"], nodetype=int
124
+ )
125
+ assert nodes_equal(G.nodes, H.nodes)
126
+ assert edges_equal(G.edges(data=True), H.edges(data=True))
127
+
128
+
129
+ def test_parse_edgelist_with_data_list(example_graph):
130
+ G = example_graph
131
+ H = nx.parse_edgelist(
132
+ ["1 2 3", "2 3 27", "3 4 3.0"], nodetype=int, data=(("weight", float),)
133
+ )
134
+ assert nodes_equal(G.nodes, H.nodes)
135
+ assert edges_equal(G.edges(data=True), H.edges(data=True))
136
+
137
+
138
+ def test_parse_edgelist():
139
+ # ignore lines with less than 2 nodes
140
+ lines = ["1;2", "2 3", "3 4"]
141
+ G = nx.parse_edgelist(lines, nodetype=int)
142
+ assert list(G.edges()) == [(2, 3), (3, 4)]
143
+ # unknown nodetype
144
+ with pytest.raises(TypeError, match="Failed to convert nodes"):
145
+ lines = ["1 2", "2 3", "3 4"]
146
+ nx.parse_edgelist(lines, nodetype="nope")
147
+ # lines have invalid edge format
148
+ with pytest.raises(TypeError, match="Failed to convert edge data"):
149
+ lines = ["1 2 3", "2 3", "3 4"]
150
+ nx.parse_edgelist(lines, nodetype=int)
151
+ # edge data and data_keys not the same length
152
+ with pytest.raises(IndexError, match="not the same length"):
153
+ lines = ["1 2 3", "2 3 27", "3 4 3.0"]
154
+ nx.parse_edgelist(
155
+ lines, nodetype=int, data=(("weight", float), ("capacity", int))
156
+ )
157
+ # edge data can't be converted to edge type
158
+ with pytest.raises(TypeError, match="Failed to convert"):
159
+ lines = ["1 2 't1'", "2 3 't3'", "3 4 't3'"]
160
+ nx.parse_edgelist(lines, nodetype=int, data=(("weight", float),))
161
+
162
+
163
+ def test_comments_None():
164
+ edgelist = ["node#1 node#2", "node#2 node#3"]
165
+ # comments=None supported to ignore all comment characters
166
+ G = nx.parse_edgelist(edgelist, comments=None)
167
+ H = nx.Graph([e.split(" ") for e in edgelist])
168
+ assert edges_equal(G.edges, H.edges)
169
+
170
+
171
+ class TestEdgelist:
172
+ @classmethod
173
+ def setup_class(cls):
174
+ cls.G = nx.Graph(name="test")
175
+ e = [("a", "b"), ("b", "c"), ("c", "d"), ("d", "e"), ("e", "f"), ("a", "f")]
176
+ cls.G.add_edges_from(e)
177
+ cls.G.add_node("g")
178
+ cls.DG = nx.DiGraph(cls.G)
179
+ cls.XG = nx.MultiGraph()
180
+ cls.XG.add_weighted_edges_from([(1, 2, 5), (1, 2, 5), (1, 2, 1), (3, 3, 42)])
181
+ cls.XDG = nx.MultiDiGraph(cls.XG)
182
+
183
+ def test_write_edgelist_1(self):
184
+ fh = io.BytesIO()
185
+ G = nx.Graph()
186
+ G.add_edges_from([(1, 2), (2, 3)])
187
+ nx.write_edgelist(G, fh, data=False)
188
+ fh.seek(0)
189
+ assert fh.read() == b"1 2\n2 3\n"
190
+
191
+ def test_write_edgelist_2(self):
192
+ fh = io.BytesIO()
193
+ G = nx.Graph()
194
+ G.add_edges_from([(1, 2), (2, 3)])
195
+ nx.write_edgelist(G, fh, data=True)
196
+ fh.seek(0)
197
+ assert fh.read() == b"1 2 {}\n2 3 {}\n"
198
+
199
+ def test_write_edgelist_3(self):
200
+ fh = io.BytesIO()
201
+ G = nx.Graph()
202
+ G.add_edge(1, 2, weight=2.0)
203
+ G.add_edge(2, 3, weight=3.0)
204
+ nx.write_edgelist(G, fh, data=True)
205
+ fh.seek(0)
206
+ assert fh.read() == b"1 2 {'weight': 2.0}\n2 3 {'weight': 3.0}\n"
207
+
208
+ def test_write_edgelist_4(self):
209
+ fh = io.BytesIO()
210
+ G = nx.Graph()
211
+ G.add_edge(1, 2, weight=2.0)
212
+ G.add_edge(2, 3, weight=3.0)
213
+ nx.write_edgelist(G, fh, data=[("weight")])
214
+ fh.seek(0)
215
+ assert fh.read() == b"1 2 2.0\n2 3 3.0\n"
216
+
217
+ def test_unicode(self, tmp_path):
218
+ G = nx.Graph()
219
+ name1 = chr(2344) + chr(123) + chr(6543)
220
+ name2 = chr(5543) + chr(1543) + chr(324)
221
+ G.add_edge(name1, "Radiohead", **{name2: 3})
222
+ fname = tmp_path / "el.txt"
223
+ nx.write_edgelist(G, fname)
224
+ H = nx.read_edgelist(fname)
225
+ assert graphs_equal(G, H)
226
+
227
+ def test_latin1_issue(self, tmp_path):
228
+ G = nx.Graph()
229
+ name1 = chr(2344) + chr(123) + chr(6543)
230
+ name2 = chr(5543) + chr(1543) + chr(324)
231
+ G.add_edge(name1, "Radiohead", **{name2: 3})
232
+ fname = tmp_path / "el.txt"
233
+ with pytest.raises(UnicodeEncodeError):
234
+ nx.write_edgelist(G, fname, encoding="latin-1")
235
+
236
+ def test_latin1(self, tmp_path):
237
+ G = nx.Graph()
238
+ name1 = "Bj" + chr(246) + "rk"
239
+ name2 = chr(220) + "ber"
240
+ G.add_edge(name1, "Radiohead", **{name2: 3})
241
+ fname = tmp_path / "el.txt"
242
+
243
+ nx.write_edgelist(G, fname, encoding="latin-1")
244
+ H = nx.read_edgelist(fname, encoding="latin-1")
245
+ assert graphs_equal(G, H)
246
+
247
+ def test_edgelist_graph(self, tmp_path):
248
+ G = self.G
249
+ fname = tmp_path / "el.txt"
250
+ nx.write_edgelist(G, fname)
251
+ H = nx.read_edgelist(fname)
252
+ H2 = nx.read_edgelist(fname)
253
+ assert H is not H2 # they should be different graphs
254
+ G.remove_node("g") # isolated nodes are not written in edgelist
255
+ assert nodes_equal(list(H), list(G))
256
+ assert edges_equal(list(H.edges()), list(G.edges()))
257
+
258
+ def test_edgelist_digraph(self, tmp_path):
259
+ G = self.DG
260
+ fname = tmp_path / "el.txt"
261
+ nx.write_edgelist(G, fname)
262
+ H = nx.read_edgelist(fname, create_using=nx.DiGraph())
263
+ H2 = nx.read_edgelist(fname, create_using=nx.DiGraph())
264
+ assert H is not H2 # they should be different graphs
265
+ G.remove_node("g") # isolated nodes are not written in edgelist
266
+ assert nodes_equal(list(H), list(G))
267
+ assert edges_equal(list(H.edges()), list(G.edges()))
268
+
269
+ def test_edgelist_integers(self, tmp_path):
270
+ G = nx.convert_node_labels_to_integers(self.G)
271
+ fname = tmp_path / "el.txt"
272
+ nx.write_edgelist(G, fname)
273
+ H = nx.read_edgelist(fname, nodetype=int)
274
+ # isolated nodes are not written in edgelist
275
+ G.remove_nodes_from(list(nx.isolates(G)))
276
+ assert nodes_equal(list(H), list(G))
277
+ assert edges_equal(list(H.edges()), list(G.edges()))
278
+
279
+ def test_edgelist_multigraph(self, tmp_path):
280
+ G = self.XG
281
+ fname = tmp_path / "el.txt"
282
+ nx.write_edgelist(G, fname)
283
+ H = nx.read_edgelist(fname, nodetype=int, create_using=nx.MultiGraph())
284
+ H2 = nx.read_edgelist(fname, nodetype=int, create_using=nx.MultiGraph())
285
+ assert H is not H2 # they should be different graphs
286
+ assert nodes_equal(list(H), list(G))
287
+ assert edges_equal(list(H.edges()), list(G.edges()))
288
+
289
+ def test_edgelist_multidigraph(self, tmp_path):
290
+ G = self.XDG
291
+ fname = tmp_path / "el.txt"
292
+ nx.write_edgelist(G, fname)
293
+ H = nx.read_edgelist(fname, nodetype=int, create_using=nx.MultiDiGraph())
294
+ H2 = nx.read_edgelist(fname, nodetype=int, create_using=nx.MultiDiGraph())
295
+ assert H is not H2 # they should be different graphs
296
+ assert nodes_equal(list(H), list(G))
297
+ assert edges_equal(list(H.edges()), list(G.edges()))
298
+
299
+
300
+ def test_edgelist_consistent_strip_handling():
301
+ """See gh-7462
302
+
303
+ Input when printed looks like::
304
+
305
+ 1 2 3
306
+ 2 3
307
+ 3 4 3.0
308
+
309
+ Note the trailing \\t after the `3` in the second row, indicating an empty
310
+ data value.
311
+ """
312
+ s = io.StringIO("1\t2\t3\n2\t3\t\n3\t4\t3.0")
313
+ G = nx.parse_edgelist(s, delimiter="\t", nodetype=int, data=[("value", str)])
314
+ assert sorted(G.edges(data="value")) == [(1, 2, "3"), (2, 3, ""), (3, 4, "3.0")]
mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/tests/test_gexf.py ADDED
@@ -0,0 +1,557 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import time
3
+
4
+ import pytest
5
+
6
+ import networkx as nx
7
+
8
+
9
+ class TestGEXF:
10
+ @classmethod
11
+ def setup_class(cls):
12
+ cls.simple_directed_data = """<?xml version="1.0" encoding="UTF-8"?>
13
+ <gexf xmlns="http://www.gexf.net/1.2draft" version="1.2">
14
+ <graph mode="static" defaultedgetype="directed">
15
+ <nodes>
16
+ <node id="0" label="Hello" />
17
+ <node id="1" label="Word" />
18
+ </nodes>
19
+ <edges>
20
+ <edge id="0" source="0" target="1" />
21
+ </edges>
22
+ </graph>
23
+ </gexf>
24
+ """
25
+ cls.simple_directed_graph = nx.DiGraph()
26
+ cls.simple_directed_graph.add_node("0", label="Hello")
27
+ cls.simple_directed_graph.add_node("1", label="World")
28
+ cls.simple_directed_graph.add_edge("0", "1", id="0")
29
+
30
+ cls.simple_directed_fh = io.BytesIO(cls.simple_directed_data.encode("UTF-8"))
31
+
32
+ cls.attribute_data = """<?xml version="1.0" encoding="UTF-8"?>\
33
+ <gexf xmlns="http://www.gexf.net/1.2draft" xmlns:xsi="http://www.w3.\
34
+ org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.gexf.net/\
35
+ 1.2draft http://www.gexf.net/1.2draft/gexf.xsd" version="1.2">
36
+ <meta lastmodifieddate="2009-03-20">
37
+ <creator>Gephi.org</creator>
38
+ <description>A Web network</description>
39
+ </meta>
40
+ <graph defaultedgetype="directed">
41
+ <attributes class="node">
42
+ <attribute id="0" title="url" type="string"/>
43
+ <attribute id="1" title="indegree" type="integer"/>
44
+ <attribute id="2" title="frog" type="boolean">
45
+ <default>true</default>
46
+ </attribute>
47
+ </attributes>
48
+ <nodes>
49
+ <node id="0" label="Gephi">
50
+ <attvalues>
51
+ <attvalue for="0" value="https://gephi.org"/>
52
+ <attvalue for="1" value="1"/>
53
+ <attvalue for="2" value="false"/>
54
+ </attvalues>
55
+ </node>
56
+ <node id="1" label="Webatlas">
57
+ <attvalues>
58
+ <attvalue for="0" value="http://webatlas.fr"/>
59
+ <attvalue for="1" value="2"/>
60
+ <attvalue for="2" value="false"/>
61
+ </attvalues>
62
+ </node>
63
+ <node id="2" label="RTGI">
64
+ <attvalues>
65
+ <attvalue for="0" value="http://rtgi.fr"/>
66
+ <attvalue for="1" value="1"/>
67
+ <attvalue for="2" value="true"/>
68
+ </attvalues>
69
+ </node>
70
+ <node id="3" label="BarabasiLab">
71
+ <attvalues>
72
+ <attvalue for="0" value="http://barabasilab.com"/>
73
+ <attvalue for="1" value="1"/>
74
+ <attvalue for="2" value="true"/>
75
+ </attvalues>
76
+ </node>
77
+ </nodes>
78
+ <edges>
79
+ <edge id="0" source="0" target="1" label="foo"/>
80
+ <edge id="1" source="0" target="2"/>
81
+ <edge id="2" source="1" target="0"/>
82
+ <edge id="3" source="2" target="1"/>
83
+ <edge id="4" source="0" target="3"/>
84
+ </edges>
85
+ </graph>
86
+ </gexf>
87
+ """
88
+ cls.attribute_graph = nx.DiGraph()
89
+ cls.attribute_graph.graph["node_default"] = {"frog": True}
90
+ cls.attribute_graph.add_node(
91
+ "0", label="Gephi", url="https://gephi.org", indegree=1, frog=False
92
+ )
93
+ cls.attribute_graph.add_node(
94
+ "1", label="Webatlas", url="http://webatlas.fr", indegree=2, frog=False
95
+ )
96
+ cls.attribute_graph.add_node(
97
+ "2", label="RTGI", url="http://rtgi.fr", indegree=1, frog=True
98
+ )
99
+ cls.attribute_graph.add_node(
100
+ "3",
101
+ label="BarabasiLab",
102
+ url="http://barabasilab.com",
103
+ indegree=1,
104
+ frog=True,
105
+ )
106
+ cls.attribute_graph.add_edge("0", "1", id="0", label="foo")
107
+ cls.attribute_graph.add_edge("0", "2", id="1")
108
+ cls.attribute_graph.add_edge("1", "0", id="2")
109
+ cls.attribute_graph.add_edge("2", "1", id="3")
110
+ cls.attribute_graph.add_edge("0", "3", id="4")
111
+ cls.attribute_fh = io.BytesIO(cls.attribute_data.encode("UTF-8"))
112
+
113
+ cls.simple_undirected_data = """<?xml version="1.0" encoding="UTF-8"?>
114
+ <gexf xmlns="http://www.gexf.net/1.2draft" version="1.2">
115
+ <graph mode="static" defaultedgetype="undirected">
116
+ <nodes>
117
+ <node id="0" label="Hello" />
118
+ <node id="1" label="Word" />
119
+ </nodes>
120
+ <edges>
121
+ <edge id="0" source="0" target="1" />
122
+ </edges>
123
+ </graph>
124
+ </gexf>
125
+ """
126
+ cls.simple_undirected_graph = nx.Graph()
127
+ cls.simple_undirected_graph.add_node("0", label="Hello")
128
+ cls.simple_undirected_graph.add_node("1", label="World")
129
+ cls.simple_undirected_graph.add_edge("0", "1", id="0")
130
+
131
+ cls.simple_undirected_fh = io.BytesIO(
132
+ cls.simple_undirected_data.encode("UTF-8")
133
+ )
134
+
135
+ def test_read_simple_directed_graphml(self):
136
+ G = self.simple_directed_graph
137
+ H = nx.read_gexf(self.simple_directed_fh)
138
+ assert sorted(G.nodes()) == sorted(H.nodes())
139
+ assert sorted(G.edges()) == sorted(H.edges())
140
+ assert sorted(G.edges(data=True)) == sorted(H.edges(data=True))
141
+ self.simple_directed_fh.seek(0)
142
+
143
+ def test_write_read_simple_directed_graphml(self):
144
+ G = self.simple_directed_graph
145
+ fh = io.BytesIO()
146
+ nx.write_gexf(G, fh)
147
+ fh.seek(0)
148
+ H = nx.read_gexf(fh)
149
+ assert sorted(G.nodes()) == sorted(H.nodes())
150
+ assert sorted(G.edges()) == sorted(H.edges())
151
+ assert sorted(G.edges(data=True)) == sorted(H.edges(data=True))
152
+ self.simple_directed_fh.seek(0)
153
+
154
+ def test_read_simple_undirected_graphml(self):
155
+ G = self.simple_undirected_graph
156
+ H = nx.read_gexf(self.simple_undirected_fh)
157
+ assert sorted(G.nodes()) == sorted(H.nodes())
158
+ assert sorted(sorted(e) for e in G.edges()) == sorted(
159
+ sorted(e) for e in H.edges()
160
+ )
161
+ self.simple_undirected_fh.seek(0)
162
+
163
+ def test_read_attribute_graphml(self):
164
+ G = self.attribute_graph
165
+ H = nx.read_gexf(self.attribute_fh)
166
+ assert sorted(G.nodes(True)) == sorted(H.nodes(data=True))
167
+ ge = sorted(G.edges(data=True))
168
+ he = sorted(H.edges(data=True))
169
+ for a, b in zip(ge, he):
170
+ assert a == b
171
+ self.attribute_fh.seek(0)
172
+
173
+ def test_directed_edge_in_undirected(self):
174
+ s = """<?xml version="1.0" encoding="UTF-8"?>
175
+ <gexf xmlns="http://www.gexf.net/1.2draft" version='1.2'>
176
+ <graph mode="static" defaultedgetype="undirected" name="">
177
+ <nodes>
178
+ <node id="0" label="Hello" />
179
+ <node id="1" label="Word" />
180
+ </nodes>
181
+ <edges>
182
+ <edge id="0" source="0" target="1" type="directed"/>
183
+ </edges>
184
+ </graph>
185
+ </gexf>
186
+ """
187
+ fh = io.BytesIO(s.encode("UTF-8"))
188
+ pytest.raises(nx.NetworkXError, nx.read_gexf, fh)
189
+
190
+ def test_undirected_edge_in_directed(self):
191
+ s = """<?xml version="1.0" encoding="UTF-8"?>
192
+ <gexf xmlns="http://www.gexf.net/1.2draft" version='1.2'>
193
+ <graph mode="static" defaultedgetype="directed" name="">
194
+ <nodes>
195
+ <node id="0" label="Hello" />
196
+ <node id="1" label="Word" />
197
+ </nodes>
198
+ <edges>
199
+ <edge id="0" source="0" target="1" type="undirected"/>
200
+ </edges>
201
+ </graph>
202
+ </gexf>
203
+ """
204
+ fh = io.BytesIO(s.encode("UTF-8"))
205
+ pytest.raises(nx.NetworkXError, nx.read_gexf, fh)
206
+
207
+ def test_key_raises(self):
208
+ s = """<?xml version="1.0" encoding="UTF-8"?>
209
+ <gexf xmlns="http://www.gexf.net/1.2draft" version='1.2'>
210
+ <graph mode="static" defaultedgetype="directed" name="">
211
+ <nodes>
212
+ <node id="0" label="Hello">
213
+ <attvalues>
214
+ <attvalue for='0' value='1'/>
215
+ </attvalues>
216
+ </node>
217
+ <node id="1" label="Word" />
218
+ </nodes>
219
+ <edges>
220
+ <edge id="0" source="0" target="1" type="undirected"/>
221
+ </edges>
222
+ </graph>
223
+ </gexf>
224
+ """
225
+ fh = io.BytesIO(s.encode("UTF-8"))
226
+ pytest.raises(nx.NetworkXError, nx.read_gexf, fh)
227
+
228
+ def test_relabel(self):
229
+ s = """<?xml version="1.0" encoding="UTF-8"?>
230
+ <gexf xmlns="http://www.gexf.net/1.2draft" version='1.2'>
231
+ <graph mode="static" defaultedgetype="directed" name="">
232
+ <nodes>
233
+ <node id="0" label="Hello" />
234
+ <node id="1" label="Word" />
235
+ </nodes>
236
+ <edges>
237
+ <edge id="0" source="0" target="1"/>
238
+ </edges>
239
+ </graph>
240
+ </gexf>
241
+ """
242
+ fh = io.BytesIO(s.encode("UTF-8"))
243
+ G = nx.read_gexf(fh, relabel=True)
244
+ assert sorted(G.nodes()) == ["Hello", "Word"]
245
+
246
+ def test_default_attribute(self):
247
+ G = nx.Graph()
248
+ G.add_node(1, label="1", color="green")
249
+ nx.add_path(G, [0, 1, 2, 3])
250
+ G.add_edge(1, 2, foo=3)
251
+ G.graph["node_default"] = {"color": "yellow"}
252
+ G.graph["edge_default"] = {"foo": 7}
253
+ fh = io.BytesIO()
254
+ nx.write_gexf(G, fh)
255
+ fh.seek(0)
256
+ H = nx.read_gexf(fh, node_type=int)
257
+ assert sorted(G.nodes()) == sorted(H.nodes())
258
+ assert sorted(sorted(e) for e in G.edges()) == sorted(
259
+ sorted(e) for e in H.edges()
260
+ )
261
+ # Reading a gexf graph always sets mode attribute to either
262
+ # 'static' or 'dynamic'. Remove the mode attribute from the
263
+ # read graph for the sake of comparing remaining attributes.
264
+ del H.graph["mode"]
265
+ assert G.graph == H.graph
266
+
267
+ def test_serialize_ints_to_strings(self):
268
+ G = nx.Graph()
269
+ G.add_node(1, id=7, label=77)
270
+ fh = io.BytesIO()
271
+ nx.write_gexf(G, fh)
272
+ fh.seek(0)
273
+ H = nx.read_gexf(fh, node_type=int)
274
+ assert list(H) == [7]
275
+ assert H.nodes[7]["label"] == "77"
276
+
277
+ def test_write_with_node_attributes(self):
278
+ # Addresses #673.
279
+ G = nx.Graph()
280
+ G.add_edges_from([(0, 1), (1, 2), (2, 3)])
281
+ for i in range(4):
282
+ G.nodes[i]["id"] = i
283
+ G.nodes[i]["label"] = i
284
+ G.nodes[i]["pid"] = i
285
+ G.nodes[i]["start"] = i
286
+ G.nodes[i]["end"] = i + 1
287
+
288
+ expected = f"""<gexf xmlns="http://www.gexf.net/1.2draft" xmlns:xsi\
289
+ ="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation=\
290
+ "http://www.gexf.net/1.2draft http://www.gexf.net/1.2draft/\
291
+ gexf.xsd" version="1.2">
292
+ <meta lastmodifieddate="{time.strftime('%Y-%m-%d')}">
293
+ <creator>NetworkX {nx.__version__}</creator>
294
+ </meta>
295
+ <graph defaultedgetype="undirected" mode="dynamic" name="" timeformat="long">
296
+ <nodes>
297
+ <node id="0" label="0" pid="0" start="0" end="1" />
298
+ <node id="1" label="1" pid="1" start="1" end="2" />
299
+ <node id="2" label="2" pid="2" start="2" end="3" />
300
+ <node id="3" label="3" pid="3" start="3" end="4" />
301
+ </nodes>
302
+ <edges>
303
+ <edge source="0" target="1" id="0" />
304
+ <edge source="1" target="2" id="1" />
305
+ <edge source="2" target="3" id="2" />
306
+ </edges>
307
+ </graph>
308
+ </gexf>"""
309
+ obtained = "\n".join(nx.generate_gexf(G))
310
+ assert expected == obtained
311
+
312
+ def test_edge_id_construct(self):
313
+ G = nx.Graph()
314
+ G.add_edges_from([(0, 1, {"id": 0}), (1, 2, {"id": 2}), (2, 3)])
315
+
316
+ expected = f"""<gexf xmlns="http://www.gexf.net/1.2draft" xmlns:xsi\
317
+ ="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.\
318
+ gexf.net/1.2draft http://www.gexf.net/1.2draft/gexf.xsd" version="1.2">
319
+ <meta lastmodifieddate="{time.strftime('%Y-%m-%d')}">
320
+ <creator>NetworkX {nx.__version__}</creator>
321
+ </meta>
322
+ <graph defaultedgetype="undirected" mode="static" name="">
323
+ <nodes>
324
+ <node id="0" label="0" />
325
+ <node id="1" label="1" />
326
+ <node id="2" label="2" />
327
+ <node id="3" label="3" />
328
+ </nodes>
329
+ <edges>
330
+ <edge source="0" target="1" id="0" />
331
+ <edge source="1" target="2" id="2" />
332
+ <edge source="2" target="3" id="1" />
333
+ </edges>
334
+ </graph>
335
+ </gexf>"""
336
+
337
+ obtained = "\n".join(nx.generate_gexf(G))
338
+ assert expected == obtained
339
+
340
+ def test_numpy_type(self):
341
+ np = pytest.importorskip("numpy")
342
+ G = nx.path_graph(4)
343
+ nx.set_node_attributes(G, {n: n for n in np.arange(4)}, "number")
344
+ G[0][1]["edge-number"] = np.float64(1.1)
345
+
346
+ expected = f"""<gexf xmlns="http://www.gexf.net/1.2draft"\
347
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation\
348
+ ="http://www.gexf.net/1.2draft http://www.gexf.net/1.2draft/gexf.xsd"\
349
+ version="1.2">
350
+ <meta lastmodifieddate="{time.strftime('%Y-%m-%d')}">
351
+ <creator>NetworkX {nx.__version__}</creator>
352
+ </meta>
353
+ <graph defaultedgetype="undirected" mode="static" name="">
354
+ <attributes mode="static" class="edge">
355
+ <attribute id="1" title="edge-number" type="float" />
356
+ </attributes>
357
+ <attributes mode="static" class="node">
358
+ <attribute id="0" title="number" type="int" />
359
+ </attributes>
360
+ <nodes>
361
+ <node id="0" label="0">
362
+ <attvalues>
363
+ <attvalue for="0" value="0" />
364
+ </attvalues>
365
+ </node>
366
+ <node id="1" label="1">
367
+ <attvalues>
368
+ <attvalue for="0" value="1" />
369
+ </attvalues>
370
+ </node>
371
+ <node id="2" label="2">
372
+ <attvalues>
373
+ <attvalue for="0" value="2" />
374
+ </attvalues>
375
+ </node>
376
+ <node id="3" label="3">
377
+ <attvalues>
378
+ <attvalue for="0" value="3" />
379
+ </attvalues>
380
+ </node>
381
+ </nodes>
382
+ <edges>
383
+ <edge source="0" target="1" id="0">
384
+ <attvalues>
385
+ <attvalue for="1" value="1.1" />
386
+ </attvalues>
387
+ </edge>
388
+ <edge source="1" target="2" id="1" />
389
+ <edge source="2" target="3" id="2" />
390
+ </edges>
391
+ </graph>
392
+ </gexf>"""
393
+ obtained = "\n".join(nx.generate_gexf(G))
394
+ assert expected == obtained
395
+
396
+ def test_bool(self):
397
+ G = nx.Graph()
398
+ G.add_node(1, testattr=True)
399
+ fh = io.BytesIO()
400
+ nx.write_gexf(G, fh)
401
+ fh.seek(0)
402
+ H = nx.read_gexf(fh, node_type=int)
403
+ assert H.nodes[1]["testattr"]
404
+
405
+ # Test for NaN, INF and -INF
406
+ def test_specials(self):
407
+ from math import isnan
408
+
409
+ inf, nan = float("inf"), float("nan")
410
+ G = nx.Graph()
411
+ G.add_node(1, testattr=inf, strdata="inf", key="a")
412
+ G.add_node(2, testattr=nan, strdata="nan", key="b")
413
+ G.add_node(3, testattr=-inf, strdata="-inf", key="c")
414
+
415
+ fh = io.BytesIO()
416
+ nx.write_gexf(G, fh)
417
+ fh.seek(0)
418
+ filetext = fh.read()
419
+ fh.seek(0)
420
+ H = nx.read_gexf(fh, node_type=int)
421
+
422
+ assert b"INF" in filetext
423
+ assert b"NaN" in filetext
424
+ assert b"-INF" in filetext
425
+
426
+ assert H.nodes[1]["testattr"] == inf
427
+ assert isnan(H.nodes[2]["testattr"])
428
+ assert H.nodes[3]["testattr"] == -inf
429
+
430
+ assert H.nodes[1]["strdata"] == "inf"
431
+ assert H.nodes[2]["strdata"] == "nan"
432
+ assert H.nodes[3]["strdata"] == "-inf"
433
+
434
+ assert H.nodes[1]["networkx_key"] == "a"
435
+ assert H.nodes[2]["networkx_key"] == "b"
436
+ assert H.nodes[3]["networkx_key"] == "c"
437
+
438
+ def test_simple_list(self):
439
+ G = nx.Graph()
440
+ list_value = [(1, 2, 3), (9, 1, 2)]
441
+ G.add_node(1, key=list_value)
442
+ fh = io.BytesIO()
443
+ nx.write_gexf(G, fh)
444
+ fh.seek(0)
445
+ H = nx.read_gexf(fh, node_type=int)
446
+ assert H.nodes[1]["networkx_key"] == list_value
447
+
448
+ def test_dynamic_mode(self):
449
+ G = nx.Graph()
450
+ G.add_node(1, label="1", color="green")
451
+ G.graph["mode"] = "dynamic"
452
+ fh = io.BytesIO()
453
+ nx.write_gexf(G, fh)
454
+ fh.seek(0)
455
+ H = nx.read_gexf(fh, node_type=int)
456
+ assert sorted(G.nodes()) == sorted(H.nodes())
457
+ assert sorted(sorted(e) for e in G.edges()) == sorted(
458
+ sorted(e) for e in H.edges()
459
+ )
460
+
461
+ def test_multigraph_with_missing_attributes(self):
462
+ G = nx.MultiGraph()
463
+ G.add_node(0, label="1", color="green")
464
+ G.add_node(1, label="2", color="green")
465
+ G.add_edge(0, 1, id="0", weight=3, type="undirected", start=0, end=1)
466
+ G.add_edge(0, 1, id="1", label="foo", start=0, end=1)
467
+ G.add_edge(0, 1)
468
+ fh = io.BytesIO()
469
+ nx.write_gexf(G, fh)
470
+ fh.seek(0)
471
+ H = nx.read_gexf(fh, node_type=int)
472
+ assert sorted(G.nodes()) == sorted(H.nodes())
473
+ assert sorted(sorted(e) for e in G.edges()) == sorted(
474
+ sorted(e) for e in H.edges()
475
+ )
476
+
477
+ def test_missing_viz_attributes(self):
478
+ G = nx.Graph()
479
+ G.add_node(0, label="1", color="green")
480
+ G.nodes[0]["viz"] = {"size": 54}
481
+ G.nodes[0]["viz"]["position"] = {"x": 0, "y": 1, "z": 0}
482
+ G.nodes[0]["viz"]["color"] = {"r": 0, "g": 0, "b": 256}
483
+ G.nodes[0]["viz"]["shape"] = "http://random.url"
484
+ G.nodes[0]["viz"]["thickness"] = 2
485
+ fh = io.BytesIO()
486
+ nx.write_gexf(G, fh, version="1.1draft")
487
+ fh.seek(0)
488
+ H = nx.read_gexf(fh, node_type=int)
489
+ assert sorted(G.nodes()) == sorted(H.nodes())
490
+ assert sorted(sorted(e) for e in G.edges()) == sorted(
491
+ sorted(e) for e in H.edges()
492
+ )
493
+
494
+ # Test missing alpha value for version >draft1.1 - set default alpha value
495
+ # to 1.0 instead of `None` when writing for better general compatibility
496
+ fh = io.BytesIO()
497
+ # G.nodes[0]["viz"]["color"] does not have an alpha value explicitly defined
498
+ # so the default is used instead
499
+ nx.write_gexf(G, fh, version="1.2draft")
500
+ fh.seek(0)
501
+ H = nx.read_gexf(fh, node_type=int)
502
+ assert H.nodes[0]["viz"]["color"]["a"] == 1.0
503
+
504
+ # Second graph for the other branch
505
+ G = nx.Graph()
506
+ G.add_node(0, label="1", color="green")
507
+ G.nodes[0]["viz"] = {"size": 54}
508
+ G.nodes[0]["viz"]["position"] = {"x": 0, "y": 1, "z": 0}
509
+ G.nodes[0]["viz"]["color"] = {"r": 0, "g": 0, "b": 256, "a": 0.5}
510
+ G.nodes[0]["viz"]["shape"] = "ftp://random.url"
511
+ G.nodes[0]["viz"]["thickness"] = 2
512
+ fh = io.BytesIO()
513
+ nx.write_gexf(G, fh)
514
+ fh.seek(0)
515
+ H = nx.read_gexf(fh, node_type=int)
516
+ assert sorted(G.nodes()) == sorted(H.nodes())
517
+ assert sorted(sorted(e) for e in G.edges()) == sorted(
518
+ sorted(e) for e in H.edges()
519
+ )
520
+
521
+ def test_slice_and_spell(self):
522
+ # Test spell first, so version = 1.2
523
+ G = nx.Graph()
524
+ G.add_node(0, label="1", color="green")
525
+ G.nodes[0]["spells"] = [(1, 2)]
526
+ fh = io.BytesIO()
527
+ nx.write_gexf(G, fh)
528
+ fh.seek(0)
529
+ H = nx.read_gexf(fh, node_type=int)
530
+ assert sorted(G.nodes()) == sorted(H.nodes())
531
+ assert sorted(sorted(e) for e in G.edges()) == sorted(
532
+ sorted(e) for e in H.edges()
533
+ )
534
+
535
+ G = nx.Graph()
536
+ G.add_node(0, label="1", color="green")
537
+ G.nodes[0]["slices"] = [(1, 2)]
538
+ fh = io.BytesIO()
539
+ nx.write_gexf(G, fh, version="1.1draft")
540
+ fh.seek(0)
541
+ H = nx.read_gexf(fh, node_type=int)
542
+ assert sorted(G.nodes()) == sorted(H.nodes())
543
+ assert sorted(sorted(e) for e in G.edges()) == sorted(
544
+ sorted(e) for e in H.edges()
545
+ )
546
+
547
+ def test_add_parent(self):
548
+ G = nx.Graph()
549
+ G.add_node(0, label="1", color="green", parents=[1, 2])
550
+ fh = io.BytesIO()
551
+ nx.write_gexf(G, fh)
552
+ fh.seek(0)
553
+ H = nx.read_gexf(fh, node_type=int)
554
+ assert sorted(G.nodes()) == sorted(H.nodes())
555
+ assert sorted(sorted(e) for e in G.edges()) == sorted(
556
+ sorted(e) for e in H.edges()
557
+ )
mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/tests/test_gml.py ADDED
@@ -0,0 +1,744 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import codecs
2
+ import io
3
+ import math
4
+ from ast import literal_eval
5
+ from contextlib import contextmanager
6
+ from textwrap import dedent
7
+
8
+ import pytest
9
+
10
+ import networkx as nx
11
+ from networkx.readwrite.gml import literal_destringizer, literal_stringizer
12
+
13
+
14
+ class TestGraph:
15
+ @classmethod
16
+ def setup_class(cls):
17
+ cls.simple_data = """Creator "me"
18
+ Version "xx"
19
+ graph [
20
+ comment "This is a sample graph"
21
+ directed 1
22
+ IsPlanar 1
23
+ pos [ x 0 y 1 ]
24
+ node [
25
+ id 1
26
+ label "Node 1"
27
+ pos [ x 1 y 1 ]
28
+ ]
29
+ node [
30
+ id 2
31
+ pos [ x 1 y 2 ]
32
+ label "Node 2"
33
+ ]
34
+ node [
35
+ id 3
36
+ label "Node 3"
37
+ pos [ x 1 y 3 ]
38
+ ]
39
+ edge [
40
+ source 1
41
+ target 2
42
+ label "Edge from node 1 to node 2"
43
+ color [line "blue" thickness 3]
44
+
45
+ ]
46
+ edge [
47
+ source 2
48
+ target 3
49
+ label "Edge from node 2 to node 3"
50
+ ]
51
+ edge [
52
+ source 3
53
+ target 1
54
+ label "Edge from node 3 to node 1"
55
+ ]
56
+ ]
57
+ """
58
+
59
+ def test_parse_gml_cytoscape_bug(self):
60
+ # example from issue #321, originally #324 in trac
61
+ cytoscape_example = """
62
+ Creator "Cytoscape"
63
+ Version 1.0
64
+ graph [
65
+ node [
66
+ root_index -3
67
+ id -3
68
+ graphics [
69
+ x -96.0
70
+ y -67.0
71
+ w 40.0
72
+ h 40.0
73
+ fill "#ff9999"
74
+ type "ellipse"
75
+ outline "#666666"
76
+ outline_width 1.5
77
+ ]
78
+ label "node2"
79
+ ]
80
+ node [
81
+ root_index -2
82
+ id -2
83
+ graphics [
84
+ x 63.0
85
+ y 37.0
86
+ w 40.0
87
+ h 40.0
88
+ fill "#ff9999"
89
+ type "ellipse"
90
+ outline "#666666"
91
+ outline_width 1.5
92
+ ]
93
+ label "node1"
94
+ ]
95
+ node [
96
+ root_index -1
97
+ id -1
98
+ graphics [
99
+ x -31.0
100
+ y -17.0
101
+ w 40.0
102
+ h 40.0
103
+ fill "#ff9999"
104
+ type "ellipse"
105
+ outline "#666666"
106
+ outline_width 1.5
107
+ ]
108
+ label "node0"
109
+ ]
110
+ edge [
111
+ root_index -2
112
+ target -2
113
+ source -1
114
+ graphics [
115
+ width 1.5
116
+ fill "#0000ff"
117
+ type "line"
118
+ Line [
119
+ ]
120
+ source_arrow 0
121
+ target_arrow 3
122
+ ]
123
+ label "DirectedEdge"
124
+ ]
125
+ edge [
126
+ root_index -1
127
+ target -1
128
+ source -3
129
+ graphics [
130
+ width 1.5
131
+ fill "#0000ff"
132
+ type "line"
133
+ Line [
134
+ ]
135
+ source_arrow 0
136
+ target_arrow 3
137
+ ]
138
+ label "DirectedEdge"
139
+ ]
140
+ ]
141
+ """
142
+ nx.parse_gml(cytoscape_example)
143
+
144
+ def test_parse_gml(self):
145
+ G = nx.parse_gml(self.simple_data, label="label")
146
+ assert sorted(G.nodes()) == ["Node 1", "Node 2", "Node 3"]
147
+ assert sorted(G.edges()) == [
148
+ ("Node 1", "Node 2"),
149
+ ("Node 2", "Node 3"),
150
+ ("Node 3", "Node 1"),
151
+ ]
152
+
153
+ assert sorted(G.edges(data=True)) == [
154
+ (
155
+ "Node 1",
156
+ "Node 2",
157
+ {
158
+ "color": {"line": "blue", "thickness": 3},
159
+ "label": "Edge from node 1 to node 2",
160
+ },
161
+ ),
162
+ ("Node 2", "Node 3", {"label": "Edge from node 2 to node 3"}),
163
+ ("Node 3", "Node 1", {"label": "Edge from node 3 to node 1"}),
164
+ ]
165
+
166
+ def test_read_gml(self, tmp_path):
167
+ fname = tmp_path / "test.gml"
168
+ with open(fname, "w") as fh:
169
+ fh.write(self.simple_data)
170
+ Gin = nx.read_gml(fname, label="label")
171
+ G = nx.parse_gml(self.simple_data, label="label")
172
+ assert sorted(G.nodes(data=True)) == sorted(Gin.nodes(data=True))
173
+ assert sorted(G.edges(data=True)) == sorted(Gin.edges(data=True))
174
+
175
+ def test_labels_are_strings(self):
176
+ # GML requires labels to be strings (i.e., in quotes)
177
+ answer = """graph [
178
+ node [
179
+ id 0
180
+ label "1203"
181
+ ]
182
+ ]"""
183
+ G = nx.Graph()
184
+ G.add_node(1203)
185
+ data = "\n".join(nx.generate_gml(G, stringizer=literal_stringizer))
186
+ assert data == answer
187
+
188
+ def test_relabel_duplicate(self):
189
+ data = """
190
+ graph
191
+ [
192
+ label ""
193
+ directed 1
194
+ node
195
+ [
196
+ id 0
197
+ label "same"
198
+ ]
199
+ node
200
+ [
201
+ id 1
202
+ label "same"
203
+ ]
204
+ ]
205
+ """
206
+ fh = io.BytesIO(data.encode("UTF-8"))
207
+ fh.seek(0)
208
+ pytest.raises(nx.NetworkXError, nx.read_gml, fh, label="label")
209
+
210
+ @pytest.mark.parametrize("stringizer", (None, literal_stringizer))
211
+ def test_tuplelabels(self, stringizer):
212
+ # https://github.com/networkx/networkx/pull/1048
213
+ # Writing tuple labels to GML failed.
214
+ G = nx.Graph()
215
+ G.add_edge((0, 1), (1, 0))
216
+ data = "\n".join(nx.generate_gml(G, stringizer=stringizer))
217
+ answer = """graph [
218
+ node [
219
+ id 0
220
+ label "(0,1)"
221
+ ]
222
+ node [
223
+ id 1
224
+ label "(1,0)"
225
+ ]
226
+ edge [
227
+ source 0
228
+ target 1
229
+ ]
230
+ ]"""
231
+ assert data == answer
232
+
233
+ def test_quotes(self, tmp_path):
234
+ # https://github.com/networkx/networkx/issues/1061
235
+ # Encoding quotes as HTML entities.
236
+ G = nx.path_graph(1)
237
+ G.name = "path_graph(1)"
238
+ attr = 'This is "quoted" and this is a copyright: ' + chr(169)
239
+ G.nodes[0]["demo"] = attr
240
+ with open(tmp_path / "test.gml", "w+b") as fobj:
241
+ nx.write_gml(G, fobj)
242
+ fobj.seek(0)
243
+ # Should be bytes in 2.x and 3.x
244
+ data = fobj.read().strip().decode("ascii")
245
+ answer = """graph [
246
+ name "path_graph(1)"
247
+ node [
248
+ id 0
249
+ label "0"
250
+ demo "This is &#34;quoted&#34; and this is a copyright: &#169;"
251
+ ]
252
+ ]"""
253
+ assert data == answer
254
+
255
+ def test_unicode_node(self, tmp_path):
256
+ node = "node" + chr(169)
257
+ G = nx.Graph()
258
+ G.add_node(node)
259
+ with open(tmp_path / "test.gml", "w+b") as fobj:
260
+ nx.write_gml(G, fobj)
261
+ fobj.seek(0)
262
+ # Should be bytes in 2.x and 3.x
263
+ data = fobj.read().strip().decode("ascii")
264
+ answer = """graph [
265
+ node [
266
+ id 0
267
+ label "node&#169;"
268
+ ]
269
+ ]"""
270
+ assert data == answer
271
+
272
+ def test_float_label(self, tmp_path):
273
+ node = 1.0
274
+ G = nx.Graph()
275
+ G.add_node(node)
276
+ with open(tmp_path / "test.gml", "w+b") as fobj:
277
+ nx.write_gml(G, fobj)
278
+ fobj.seek(0)
279
+ # Should be bytes in 2.x and 3.x
280
+ data = fobj.read().strip().decode("ascii")
281
+ answer = """graph [
282
+ node [
283
+ id 0
284
+ label "1.0"
285
+ ]
286
+ ]"""
287
+ assert data == answer
288
+
289
+ def test_special_float_label(self, tmp_path):
290
+ special_floats = [float("nan"), float("+inf"), float("-inf")]
291
+ try:
292
+ import numpy as np
293
+
294
+ special_floats += [np.nan, np.inf, np.inf * -1]
295
+ except ImportError:
296
+ special_floats += special_floats
297
+
298
+ G = nx.cycle_graph(len(special_floats))
299
+ attrs = dict(enumerate(special_floats))
300
+ nx.set_node_attributes(G, attrs, "nodefloat")
301
+ edges = list(G.edges)
302
+ attrs = {edges[i]: value for i, value in enumerate(special_floats)}
303
+ nx.set_edge_attributes(G, attrs, "edgefloat")
304
+
305
+ with open(tmp_path / "test.gml", "w+b") as fobj:
306
+ nx.write_gml(G, fobj)
307
+ fobj.seek(0)
308
+ # Should be bytes in 2.x and 3.x
309
+ data = fobj.read().strip().decode("ascii")
310
+ answer = """graph [
311
+ node [
312
+ id 0
313
+ label "0"
314
+ nodefloat NAN
315
+ ]
316
+ node [
317
+ id 1
318
+ label "1"
319
+ nodefloat +INF
320
+ ]
321
+ node [
322
+ id 2
323
+ label "2"
324
+ nodefloat -INF
325
+ ]
326
+ node [
327
+ id 3
328
+ label "3"
329
+ nodefloat NAN
330
+ ]
331
+ node [
332
+ id 4
333
+ label "4"
334
+ nodefloat +INF
335
+ ]
336
+ node [
337
+ id 5
338
+ label "5"
339
+ nodefloat -INF
340
+ ]
341
+ edge [
342
+ source 0
343
+ target 1
344
+ edgefloat NAN
345
+ ]
346
+ edge [
347
+ source 0
348
+ target 5
349
+ edgefloat +INF
350
+ ]
351
+ edge [
352
+ source 1
353
+ target 2
354
+ edgefloat -INF
355
+ ]
356
+ edge [
357
+ source 2
358
+ target 3
359
+ edgefloat NAN
360
+ ]
361
+ edge [
362
+ source 3
363
+ target 4
364
+ edgefloat +INF
365
+ ]
366
+ edge [
367
+ source 4
368
+ target 5
369
+ edgefloat -INF
370
+ ]
371
+ ]"""
372
+ assert data == answer
373
+
374
+ fobj.seek(0)
375
+ graph = nx.read_gml(fobj)
376
+ for indx, value in enumerate(special_floats):
377
+ node_value = graph.nodes[str(indx)]["nodefloat"]
378
+ if math.isnan(value):
379
+ assert math.isnan(node_value)
380
+ else:
381
+ assert node_value == value
382
+
383
+ edge = edges[indx]
384
+ string_edge = (str(edge[0]), str(edge[1]))
385
+ edge_value = graph.edges[string_edge]["edgefloat"]
386
+ if math.isnan(value):
387
+ assert math.isnan(edge_value)
388
+ else:
389
+ assert edge_value == value
390
+
391
+ def test_name(self):
392
+ G = nx.parse_gml('graph [ name "x" node [ id 0 label "x" ] ]')
393
+ assert "x" == G.graph["name"]
394
+ G = nx.parse_gml('graph [ node [ id 0 label "x" ] ]')
395
+ assert "" == G.name
396
+ assert "name" not in G.graph
397
+
398
+ def test_graph_types(self):
399
+ for directed in [None, False, True]:
400
+ for multigraph in [None, False, True]:
401
+ gml = "graph ["
402
+ if directed is not None:
403
+ gml += " directed " + str(int(directed))
404
+ if multigraph is not None:
405
+ gml += " multigraph " + str(int(multigraph))
406
+ gml += ' node [ id 0 label "0" ]'
407
+ gml += " edge [ source 0 target 0 ]"
408
+ gml += " ]"
409
+ G = nx.parse_gml(gml)
410
+ assert bool(directed) == G.is_directed()
411
+ assert bool(multigraph) == G.is_multigraph()
412
+ gml = "graph [\n"
413
+ if directed is True:
414
+ gml += " directed 1\n"
415
+ if multigraph is True:
416
+ gml += " multigraph 1\n"
417
+ gml += """ node [
418
+ id 0
419
+ label "0"
420
+ ]
421
+ edge [
422
+ source 0
423
+ target 0
424
+ """
425
+ if multigraph:
426
+ gml += " key 0\n"
427
+ gml += " ]\n]"
428
+ assert gml == "\n".join(nx.generate_gml(G))
429
+
430
+ def test_data_types(self):
431
+ data = [
432
+ True,
433
+ False,
434
+ 10**20,
435
+ -2e33,
436
+ "'",
437
+ '"&&amp;&&#34;"',
438
+ [{(b"\xfd",): "\x7f", chr(0x4444): (1, 2)}, (2, "3")],
439
+ ]
440
+ data.append(chr(0x14444))
441
+ data.append(literal_eval("{2.3j, 1 - 2.3j, ()}"))
442
+ G = nx.Graph()
443
+ G.name = data
444
+ G.graph["data"] = data
445
+ G.add_node(0, int=-1, data={"data": data})
446
+ G.add_edge(0, 0, float=-2.5, data=data)
447
+ gml = "\n".join(nx.generate_gml(G, stringizer=literal_stringizer))
448
+ G = nx.parse_gml(gml, destringizer=literal_destringizer)
449
+ assert data == G.name
450
+ assert {"name": data, "data": data} == G.graph
451
+ assert list(G.nodes(data=True)) == [(0, {"int": -1, "data": {"data": data}})]
452
+ assert list(G.edges(data=True)) == [(0, 0, {"float": -2.5, "data": data})]
453
+ G = nx.Graph()
454
+ G.graph["data"] = "frozenset([1, 2, 3])"
455
+ G = nx.parse_gml(nx.generate_gml(G), destringizer=literal_eval)
456
+ assert G.graph["data"] == "frozenset([1, 2, 3])"
457
+
458
+ def test_escape_unescape(self):
459
+ gml = """graph [
460
+ name "&amp;&#34;&#xf;&#x4444;&#1234567890;&#x1234567890abcdef;&unknown;"
461
+ ]"""
462
+ G = nx.parse_gml(gml)
463
+ assert (
464
+ '&"\x0f' + chr(0x4444) + "&#1234567890;&#x1234567890abcdef;&unknown;"
465
+ == G.name
466
+ )
467
+ gml = "\n".join(nx.generate_gml(G))
468
+ alnu = "#1234567890;&#38;#x1234567890abcdef"
469
+ answer = (
470
+ """graph [
471
+ name "&#38;&#34;&#15;&#17476;&#38;"""
472
+ + alnu
473
+ + """;&#38;unknown;"
474
+ ]"""
475
+ )
476
+ assert answer == gml
477
+
478
+ def test_exceptions(self, tmp_path):
479
+ pytest.raises(ValueError, literal_destringizer, "(")
480
+ pytest.raises(ValueError, literal_destringizer, "frozenset([1, 2, 3])")
481
+ pytest.raises(ValueError, literal_destringizer, literal_destringizer)
482
+ pytest.raises(ValueError, literal_stringizer, frozenset([1, 2, 3]))
483
+ pytest.raises(ValueError, literal_stringizer, literal_stringizer)
484
+ with open(tmp_path / "test.gml", "w+b") as f:
485
+ f.write(codecs.BOM_UTF8 + b"graph[]")
486
+ f.seek(0)
487
+ pytest.raises(nx.NetworkXError, nx.read_gml, f)
488
+
489
+ def assert_parse_error(gml):
490
+ pytest.raises(nx.NetworkXError, nx.parse_gml, gml)
491
+
492
+ assert_parse_error(["graph [\n\n", "]"])
493
+ assert_parse_error("")
494
+ assert_parse_error('Creator ""')
495
+ assert_parse_error("0")
496
+ assert_parse_error("graph ]")
497
+ assert_parse_error("graph [ 1 ]")
498
+ assert_parse_error("graph [ 1.E+2 ]")
499
+ assert_parse_error('graph [ "A" ]')
500
+ assert_parse_error("graph [ ] graph ]")
501
+ assert_parse_error("graph [ ] graph [ ]")
502
+ assert_parse_error("graph [ data [1, 2, 3] ]")
503
+ assert_parse_error("graph [ node [ ] ]")
504
+ assert_parse_error("graph [ node [ id 0 ] ]")
505
+ nx.parse_gml('graph [ node [ id "a" ] ]', label="id")
506
+ assert_parse_error("graph [ node [ id 0 label 0 ] node [ id 0 label 1 ] ]")
507
+ assert_parse_error("graph [ node [ id 0 label 0 ] node [ id 1 label 0 ] ]")
508
+ assert_parse_error("graph [ node [ id 0 label 0 ] edge [ ] ]")
509
+ assert_parse_error("graph [ node [ id 0 label 0 ] edge [ source 0 ] ]")
510
+ nx.parse_gml("graph [edge [ source 0 target 0 ] node [ id 0 label 0 ] ]")
511
+ assert_parse_error("graph [ node [ id 0 label 0 ] edge [ source 1 target 0 ] ]")
512
+ assert_parse_error("graph [ node [ id 0 label 0 ] edge [ source 0 target 1 ] ]")
513
+ assert_parse_error(
514
+ "graph [ node [ id 0 label 0 ] node [ id 1 label 1 ] "
515
+ "edge [ source 0 target 1 ] edge [ source 1 target 0 ] ]"
516
+ )
517
+ nx.parse_gml(
518
+ "graph [ node [ id 0 label 0 ] node [ id 1 label 1 ] "
519
+ "edge [ source 0 target 1 ] edge [ source 1 target 0 ] "
520
+ "directed 1 ]"
521
+ )
522
+ nx.parse_gml(
523
+ "graph [ node [ id 0 label 0 ] node [ id 1 label 1 ] "
524
+ "edge [ source 0 target 1 ] edge [ source 0 target 1 ]"
525
+ "multigraph 1 ]"
526
+ )
527
+ nx.parse_gml(
528
+ "graph [ node [ id 0 label 0 ] node [ id 1 label 1 ] "
529
+ "edge [ source 0 target 1 key 0 ] edge [ source 0 target 1 ]"
530
+ "multigraph 1 ]"
531
+ )
532
+ assert_parse_error(
533
+ "graph [ node [ id 0 label 0 ] node [ id 1 label 1 ] "
534
+ "edge [ source 0 target 1 key 0 ] edge [ source 0 target 1 key 0 ]"
535
+ "multigraph 1 ]"
536
+ )
537
+ nx.parse_gml(
538
+ "graph [ node [ id 0 label 0 ] node [ id 1 label 1 ] "
539
+ "edge [ source 0 target 1 key 0 ] edge [ source 1 target 0 key 0 ]"
540
+ "directed 1 multigraph 1 ]"
541
+ )
542
+
543
+ # Tests for string convertible alphanumeric id and label values
544
+ nx.parse_gml("graph [edge [ source a target a ] node [ id a label b ] ]")
545
+ nx.parse_gml(
546
+ "graph [ node [ id n42 label 0 ] node [ id x43 label 1 ]"
547
+ "edge [ source n42 target x43 key 0 ]"
548
+ "edge [ source x43 target n42 key 0 ]"
549
+ "directed 1 multigraph 1 ]"
550
+ )
551
+ assert_parse_error(
552
+ "graph [edge [ source '\u4200' target '\u4200' ] "
553
+ + "node [ id '\u4200' label b ] ]"
554
+ )
555
+
556
+ def assert_generate_error(*args, **kwargs):
557
+ pytest.raises(
558
+ nx.NetworkXError, lambda: list(nx.generate_gml(*args, **kwargs))
559
+ )
560
+
561
+ G = nx.Graph()
562
+ G.graph[3] = 3
563
+ assert_generate_error(G)
564
+ G = nx.Graph()
565
+ G.graph["3"] = 3
566
+ assert_generate_error(G)
567
+ G = nx.Graph()
568
+ G.graph["data"] = frozenset([1, 2, 3])
569
+ assert_generate_error(G, stringizer=literal_stringizer)
570
+
571
+ def test_label_kwarg(self):
572
+ G = nx.parse_gml(self.simple_data, label="id")
573
+ assert sorted(G.nodes) == [1, 2, 3]
574
+ labels = [G.nodes[n]["label"] for n in sorted(G.nodes)]
575
+ assert labels == ["Node 1", "Node 2", "Node 3"]
576
+
577
+ G = nx.parse_gml(self.simple_data, label=None)
578
+ assert sorted(G.nodes) == [1, 2, 3]
579
+ labels = [G.nodes[n]["label"] for n in sorted(G.nodes)]
580
+ assert labels == ["Node 1", "Node 2", "Node 3"]
581
+
582
+ def test_outofrange_integers(self, tmp_path):
583
+ # GML restricts integers to 32 signed bits.
584
+ # Check that we honor this restriction on export
585
+ G = nx.Graph()
586
+ # Test export for numbers that barely fit or don't fit into 32 bits,
587
+ # and 3 numbers in the middle
588
+ numbers = {
589
+ "toosmall": (-(2**31)) - 1,
590
+ "small": -(2**31),
591
+ "med1": -4,
592
+ "med2": 0,
593
+ "med3": 17,
594
+ "big": (2**31) - 1,
595
+ "toobig": 2**31,
596
+ }
597
+ G.add_node("Node", **numbers)
598
+
599
+ fname = tmp_path / "test.gml"
600
+ nx.write_gml(G, fname)
601
+ # Check that the export wrote the nonfitting numbers as strings
602
+ G2 = nx.read_gml(fname)
603
+ for attr, value in G2.nodes["Node"].items():
604
+ if attr == "toosmall" or attr == "toobig":
605
+ assert type(value) == str
606
+ else:
607
+ assert type(value) == int
608
+
609
+ def test_multiline(self):
610
+ # example from issue #6836
611
+ multiline_example = """
612
+ graph
613
+ [
614
+ node
615
+ [
616
+ id 0
617
+ label "multiline node"
618
+ label2 "multiline1
619
+ multiline2
620
+ multiline3"
621
+ alt_name "id 0"
622
+ ]
623
+ ]
624
+ """
625
+ G = nx.parse_gml(multiline_example)
626
+ assert G.nodes["multiline node"] == {
627
+ "label2": "multiline1 multiline2 multiline3",
628
+ "alt_name": "id 0",
629
+ }
630
+
631
+
632
+ @contextmanager
633
+ def byte_file():
634
+ _file_handle = io.BytesIO()
635
+ yield _file_handle
636
+ _file_handle.seek(0)
637
+
638
+
639
+ class TestPropertyLists:
640
+ def test_writing_graph_with_multi_element_property_list(self):
641
+ g = nx.Graph()
642
+ g.add_node("n1", properties=["element", 0, 1, 2.5, True, False])
643
+ with byte_file() as f:
644
+ nx.write_gml(g, f)
645
+ result = f.read().decode()
646
+
647
+ assert result == dedent(
648
+ """\
649
+ graph [
650
+ node [
651
+ id 0
652
+ label "n1"
653
+ properties "element"
654
+ properties 0
655
+ properties 1
656
+ properties 2.5
657
+ properties 1
658
+ properties 0
659
+ ]
660
+ ]
661
+ """
662
+ )
663
+
664
+ def test_writing_graph_with_one_element_property_list(self):
665
+ g = nx.Graph()
666
+ g.add_node("n1", properties=["element"])
667
+ with byte_file() as f:
668
+ nx.write_gml(g, f)
669
+ result = f.read().decode()
670
+
671
+ assert result == dedent(
672
+ """\
673
+ graph [
674
+ node [
675
+ id 0
676
+ label "n1"
677
+ properties "_networkx_list_start"
678
+ properties "element"
679
+ ]
680
+ ]
681
+ """
682
+ )
683
+
684
+ def test_reading_graph_with_list_property(self):
685
+ with byte_file() as f:
686
+ f.write(
687
+ dedent(
688
+ """
689
+ graph [
690
+ node [
691
+ id 0
692
+ label "n1"
693
+ properties "element"
694
+ properties 0
695
+ properties 1
696
+ properties 2.5
697
+ ]
698
+ ]
699
+ """
700
+ ).encode("ascii")
701
+ )
702
+ f.seek(0)
703
+ graph = nx.read_gml(f)
704
+ assert graph.nodes(data=True)["n1"] == {"properties": ["element", 0, 1, 2.5]}
705
+
706
+ def test_reading_graph_with_single_element_list_property(self):
707
+ with byte_file() as f:
708
+ f.write(
709
+ dedent(
710
+ """
711
+ graph [
712
+ node [
713
+ id 0
714
+ label "n1"
715
+ properties "_networkx_list_start"
716
+ properties "element"
717
+ ]
718
+ ]
719
+ """
720
+ ).encode("ascii")
721
+ )
722
+ f.seek(0)
723
+ graph = nx.read_gml(f)
724
+ assert graph.nodes(data=True)["n1"] == {"properties": ["element"]}
725
+
726
+
727
+ @pytest.mark.parametrize("coll", ([], ()))
728
+ def test_stringize_empty_list_tuple(coll):
729
+ G = nx.path_graph(2)
730
+ G.nodes[0]["test"] = coll # test serializing an empty collection
731
+ f = io.BytesIO()
732
+ nx.write_gml(G, f) # Smoke test - should not raise
733
+ f.seek(0)
734
+ H = nx.read_gml(f)
735
+ assert H.nodes["0"]["test"] == coll # Check empty list round-trips properly
736
+ # Check full round-tripping. Note that nodes are loaded as strings by
737
+ # default, so there needs to be some remapping prior to comparison
738
+ H = nx.relabel_nodes(H, {"0": 0, "1": 1})
739
+ assert nx.utils.graphs_equal(G, H)
740
+ # Same as above, but use destringizer for node remapping. Should have no
741
+ # effect on node attr
742
+ f.seek(0)
743
+ H = nx.read_gml(f, destringizer=int)
744
+ assert nx.utils.graphs_equal(G, H)
mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/tests/test_graph6.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import BytesIO
2
+
3
+ import pytest
4
+
5
+ import networkx as nx
6
+ import networkx.readwrite.graph6 as g6
7
+ from networkx.utils import edges_equal, nodes_equal
8
+
9
+
10
+ class TestGraph6Utils:
11
+ def test_n_data_n_conversion(self):
12
+ for i in [0, 1, 42, 62, 63, 64, 258047, 258048, 7744773, 68719476735]:
13
+ assert g6.data_to_n(g6.n_to_data(i))[0] == i
14
+ assert g6.data_to_n(g6.n_to_data(i))[1] == []
15
+ assert g6.data_to_n(g6.n_to_data(i) + [42, 43])[1] == [42, 43]
16
+
17
+
18
+ class TestFromGraph6Bytes:
19
+ def test_from_graph6_bytes(self):
20
+ data = b"DF{"
21
+ G = nx.from_graph6_bytes(data)
22
+ assert nodes_equal(G.nodes(), [0, 1, 2, 3, 4])
23
+ assert edges_equal(
24
+ G.edges(), [(0, 3), (0, 4), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)]
25
+ )
26
+
27
+ def test_read_equals_from_bytes(self):
28
+ data = b"DF{"
29
+ G = nx.from_graph6_bytes(data)
30
+ fh = BytesIO(data)
31
+ Gin = nx.read_graph6(fh)
32
+ assert nodes_equal(G.nodes(), Gin.nodes())
33
+ assert edges_equal(G.edges(), Gin.edges())
34
+
35
+
36
+ class TestReadGraph6:
37
+ def test_read_many_graph6(self):
38
+ """Test for reading many graphs from a file into a list."""
39
+ data = b"DF{\nD`{\nDqK\nD~{\n"
40
+ fh = BytesIO(data)
41
+ glist = nx.read_graph6(fh)
42
+ assert len(glist) == 4
43
+ for G in glist:
44
+ assert sorted(G) == list(range(5))
45
+
46
+
47
+ class TestWriteGraph6:
48
+ """Unit tests for writing a graph to a file in graph6 format."""
49
+
50
+ def test_null_graph(self):
51
+ result = BytesIO()
52
+ nx.write_graph6(nx.null_graph(), result)
53
+ assert result.getvalue() == b">>graph6<<?\n"
54
+
55
+ def test_trivial_graph(self):
56
+ result = BytesIO()
57
+ nx.write_graph6(nx.trivial_graph(), result)
58
+ assert result.getvalue() == b">>graph6<<@\n"
59
+
60
+ def test_complete_graph(self):
61
+ result = BytesIO()
62
+ nx.write_graph6(nx.complete_graph(4), result)
63
+ assert result.getvalue() == b">>graph6<<C~\n"
64
+
65
+ def test_large_complete_graph(self):
66
+ result = BytesIO()
67
+ nx.write_graph6(nx.complete_graph(67), result, header=False)
68
+ assert result.getvalue() == b"~?@B" + b"~" * 368 + b"w\n"
69
+
70
+ def test_no_header(self):
71
+ result = BytesIO()
72
+ nx.write_graph6(nx.complete_graph(4), result, header=False)
73
+ assert result.getvalue() == b"C~\n"
74
+
75
+ def test_complete_bipartite_graph(self):
76
+ result = BytesIO()
77
+ G = nx.complete_bipartite_graph(6, 9)
78
+ nx.write_graph6(G, result, header=False)
79
+ # The expected encoding here was verified by Sage.
80
+ assert result.getvalue() == b"N??F~z{~Fw^_~?~?^_?\n"
81
+
82
+ @pytest.mark.parametrize("G", (nx.MultiGraph(), nx.DiGraph()))
83
+ def test_no_directed_or_multi_graphs(self, G):
84
+ with pytest.raises(nx.NetworkXNotImplemented):
85
+ nx.write_graph6(G, BytesIO())
86
+
87
+ def test_length(self):
88
+ for i in list(range(13)) + [31, 47, 62, 63, 64, 72]:
89
+ g = nx.random_graphs.gnm_random_graph(i, i * i // 4, seed=i)
90
+ gstr = BytesIO()
91
+ nx.write_graph6(g, gstr, header=False)
92
+ # Strip the trailing newline.
93
+ gstr = gstr.getvalue().rstrip()
94
+ assert len(gstr) == ((i - 1) * i // 2 + 5) // 6 + (1 if i < 63 else 4)
95
+
96
+ def test_roundtrip(self):
97
+ for i in list(range(13)) + [31, 47, 62, 63, 64, 72]:
98
+ G = nx.random_graphs.gnm_random_graph(i, i * i // 4, seed=i)
99
+ f = BytesIO()
100
+ nx.write_graph6(G, f)
101
+ f.seek(0)
102
+ H = nx.read_graph6(f)
103
+ assert nodes_equal(G.nodes(), H.nodes())
104
+ assert edges_equal(G.edges(), H.edges())
105
+
106
+ def test_write_path(self, tmp_path):
107
+ with open(tmp_path / "test.g6", "w+b") as f:
108
+ g6.write_graph6_file(nx.null_graph(), f)
109
+ f.seek(0)
110
+ assert f.read() == b">>graph6<<?\n"
111
+
112
+ @pytest.mark.parametrize("edge", ((0, 1), (1, 2), (1, 42)))
113
+ def test_relabeling(self, edge):
114
+ G = nx.Graph([edge])
115
+ f = BytesIO()
116
+ nx.write_graph6(G, f)
117
+ f.seek(0)
118
+ assert f.read() == b">>graph6<<A_\n"
119
+
120
+
121
+ class TestToGraph6Bytes:
122
+ def test_null_graph(self):
123
+ G = nx.null_graph()
124
+ assert g6.to_graph6_bytes(G) == b">>graph6<<?\n"
125
+
126
+ def test_trivial_graph(self):
127
+ G = nx.trivial_graph()
128
+ assert g6.to_graph6_bytes(G) == b">>graph6<<@\n"
129
+
130
+ def test_complete_graph(self):
131
+ assert g6.to_graph6_bytes(nx.complete_graph(4)) == b">>graph6<<C~\n"
132
+
133
+ def test_large_complete_graph(self):
134
+ G = nx.complete_graph(67)
135
+ assert g6.to_graph6_bytes(G, header=False) == b"~?@B" + b"~" * 368 + b"w\n"
136
+
137
+ def test_no_header(self):
138
+ G = nx.complete_graph(4)
139
+ assert g6.to_graph6_bytes(G, header=False) == b"C~\n"
140
+
141
+ def test_complete_bipartite_graph(self):
142
+ G = nx.complete_bipartite_graph(6, 9)
143
+ assert g6.to_graph6_bytes(G, header=False) == b"N??F~z{~Fw^_~?~?^_?\n"
144
+
145
+ @pytest.mark.parametrize("G", (nx.MultiGraph(), nx.DiGraph()))
146
+ def test_no_directed_or_multi_graphs(self, G):
147
+ with pytest.raises(nx.NetworkXNotImplemented):
148
+ g6.to_graph6_bytes(G)
149
+
150
+ def test_length(self):
151
+ for i in list(range(13)) + [31, 47, 62, 63, 64, 72]:
152
+ G = nx.random_graphs.gnm_random_graph(i, i * i // 4, seed=i)
153
+ # Strip the trailing newline.
154
+ gstr = g6.to_graph6_bytes(G, header=False).rstrip()
155
+ assert len(gstr) == ((i - 1) * i // 2 + 5) // 6 + (1 if i < 63 else 4)
156
+
157
+ def test_roundtrip(self):
158
+ for i in list(range(13)) + [31, 47, 62, 63, 64, 72]:
159
+ G = nx.random_graphs.gnm_random_graph(i, i * i // 4, seed=i)
160
+ data = g6.to_graph6_bytes(G)
161
+ H = nx.from_graph6_bytes(data.rstrip())
162
+ assert nodes_equal(G.nodes(), H.nodes())
163
+ assert edges_equal(G.edges(), H.edges())
164
+
165
+ @pytest.mark.parametrize("edge", ((0, 1), (1, 2), (1, 42)))
166
+ def test_relabeling(self, edge):
167
+ G = nx.Graph([edge])
168
+ assert g6.to_graph6_bytes(G) == b">>graph6<<A_\n"
mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/tests/test_graphml.py ADDED
@@ -0,0 +1,1531 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+
3
+ import pytest
4
+
5
+ import networkx as nx
6
+ from networkx.readwrite.graphml import GraphMLWriter
7
+ from networkx.utils import edges_equal, nodes_equal
8
+
9
+
10
+ class BaseGraphML:
11
+ @classmethod
12
+ def setup_class(cls):
13
+ cls.simple_directed_data = """<?xml version="1.0" encoding="UTF-8"?>
14
+ <!-- This file was written by the JAVA GraphML Library.-->
15
+ <graphml xmlns="http://graphml.graphdrawing.org/xmlns"
16
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
17
+ xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
18
+ http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
19
+ <graph id="G" edgedefault="directed">
20
+ <node id="n0"/>
21
+ <node id="n1"/>
22
+ <node id="n2"/>
23
+ <node id="n3"/>
24
+ <node id="n4"/>
25
+ <node id="n5"/>
26
+ <node id="n6"/>
27
+ <node id="n7"/>
28
+ <node id="n8"/>
29
+ <node id="n9"/>
30
+ <node id="n10"/>
31
+ <edge id="foo" source="n0" target="n2"/>
32
+ <edge source="n1" target="n2"/>
33
+ <edge source="n2" target="n3"/>
34
+ <edge source="n3" target="n5"/>
35
+ <edge source="n3" target="n4"/>
36
+ <edge source="n4" target="n6"/>
37
+ <edge source="n6" target="n5"/>
38
+ <edge source="n5" target="n7"/>
39
+ <edge source="n6" target="n8"/>
40
+ <edge source="n8" target="n7"/>
41
+ <edge source="n8" target="n9"/>
42
+ </graph>
43
+ </graphml>"""
44
+ cls.simple_directed_graph = nx.DiGraph()
45
+ cls.simple_directed_graph.add_node("n10")
46
+ cls.simple_directed_graph.add_edge("n0", "n2", id="foo")
47
+ cls.simple_directed_graph.add_edge("n0", "n2")
48
+ cls.simple_directed_graph.add_edges_from(
49
+ [
50
+ ("n1", "n2"),
51
+ ("n2", "n3"),
52
+ ("n3", "n5"),
53
+ ("n3", "n4"),
54
+ ("n4", "n6"),
55
+ ("n6", "n5"),
56
+ ("n5", "n7"),
57
+ ("n6", "n8"),
58
+ ("n8", "n7"),
59
+ ("n8", "n9"),
60
+ ]
61
+ )
62
+ cls.simple_directed_fh = io.BytesIO(cls.simple_directed_data.encode("UTF-8"))
63
+
64
+ cls.attribute_data = """<?xml version="1.0" encoding="UTF-8"?>
65
+ <graphml xmlns="http://graphml.graphdrawing.org/xmlns"
66
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
67
+ xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
68
+ http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
69
+ <key id="d0" for="node" attr.name="color" attr.type="string">
70
+ <default>yellow</default>
71
+ </key>
72
+ <key id="d1" for="edge" attr.name="weight" attr.type="double"/>
73
+ <graph id="G" edgedefault="directed">
74
+ <node id="n0">
75
+ <data key="d0">green</data>
76
+ </node>
77
+ <node id="n1"/>
78
+ <node id="n2">
79
+ <data key="d0">blue</data>
80
+ </node>
81
+ <node id="n3">
82
+ <data key="d0">red</data>
83
+ </node>
84
+ <node id="n4"/>
85
+ <node id="n5">
86
+ <data key="d0">turquoise</data>
87
+ </node>
88
+ <edge id="e0" source="n0" target="n2">
89
+ <data key="d1">1.0</data>
90
+ </edge>
91
+ <edge id="e1" source="n0" target="n1">
92
+ <data key="d1">1.0</data>
93
+ </edge>
94
+ <edge id="e2" source="n1" target="n3">
95
+ <data key="d1">2.0</data>
96
+ </edge>
97
+ <edge id="e3" source="n3" target="n2"/>
98
+ <edge id="e4" source="n2" target="n4"/>
99
+ <edge id="e5" source="n3" target="n5"/>
100
+ <edge id="e6" source="n5" target="n4">
101
+ <data key="d1">1.1</data>
102
+ </edge>
103
+ </graph>
104
+ </graphml>
105
+ """
106
+ cls.attribute_graph = nx.DiGraph(id="G")
107
+ cls.attribute_graph.graph["node_default"] = {"color": "yellow"}
108
+ cls.attribute_graph.add_node("n0", color="green")
109
+ cls.attribute_graph.add_node("n2", color="blue")
110
+ cls.attribute_graph.add_node("n3", color="red")
111
+ cls.attribute_graph.add_node("n4")
112
+ cls.attribute_graph.add_node("n5", color="turquoise")
113
+ cls.attribute_graph.add_edge("n0", "n2", id="e0", weight=1.0)
114
+ cls.attribute_graph.add_edge("n0", "n1", id="e1", weight=1.0)
115
+ cls.attribute_graph.add_edge("n1", "n3", id="e2", weight=2.0)
116
+ cls.attribute_graph.add_edge("n3", "n2", id="e3")
117
+ cls.attribute_graph.add_edge("n2", "n4", id="e4")
118
+ cls.attribute_graph.add_edge("n3", "n5", id="e5")
119
+ cls.attribute_graph.add_edge("n5", "n4", id="e6", weight=1.1)
120
+ cls.attribute_fh = io.BytesIO(cls.attribute_data.encode("UTF-8"))
121
+
122
+ cls.node_attribute_default_data = """<?xml version="1.0" encoding="UTF-8"?>
123
+ <graphml xmlns="http://graphml.graphdrawing.org/xmlns"
124
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
125
+ xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
126
+ http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
127
+ <key id="d0" for="node" attr.name="boolean_attribute" attr.type="boolean"><default>false</default></key>
128
+ <key id="d1" for="node" attr.name="int_attribute" attr.type="int"><default>0</default></key>
129
+ <key id="d2" for="node" attr.name="long_attribute" attr.type="long"><default>0</default></key>
130
+ <key id="d3" for="node" attr.name="float_attribute" attr.type="float"><default>0.0</default></key>
131
+ <key id="d4" for="node" attr.name="double_attribute" attr.type="double"><default>0.0</default></key>
132
+ <key id="d5" for="node" attr.name="string_attribute" attr.type="string"><default>Foo</default></key>
133
+ <graph id="G" edgedefault="directed">
134
+ <node id="n0"/>
135
+ <node id="n1"/>
136
+ <edge id="e0" source="n0" target="n1"/>
137
+ </graph>
138
+ </graphml>
139
+ """
140
+ cls.node_attribute_default_graph = nx.DiGraph(id="G")
141
+ cls.node_attribute_default_graph.graph["node_default"] = {
142
+ "boolean_attribute": False,
143
+ "int_attribute": 0,
144
+ "long_attribute": 0,
145
+ "float_attribute": 0.0,
146
+ "double_attribute": 0.0,
147
+ "string_attribute": "Foo",
148
+ }
149
+ cls.node_attribute_default_graph.add_node("n0")
150
+ cls.node_attribute_default_graph.add_node("n1")
151
+ cls.node_attribute_default_graph.add_edge("n0", "n1", id="e0")
152
+ cls.node_attribute_default_fh = io.BytesIO(
153
+ cls.node_attribute_default_data.encode("UTF-8")
154
+ )
155
+
156
+ cls.attribute_named_key_ids_data = """<?xml version='1.0' encoding='utf-8'?>
157
+ <graphml xmlns="http://graphml.graphdrawing.org/xmlns"
158
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
159
+ xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
160
+ http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
161
+ <key id="edge_prop" for="edge" attr.name="edge_prop" attr.type="string"/>
162
+ <key id="prop2" for="node" attr.name="prop2" attr.type="string"/>
163
+ <key id="prop1" for="node" attr.name="prop1" attr.type="string"/>
164
+ <graph edgedefault="directed">
165
+ <node id="0">
166
+ <data key="prop1">val1</data>
167
+ <data key="prop2">val2</data>
168
+ </node>
169
+ <node id="1">
170
+ <data key="prop1">val_one</data>
171
+ <data key="prop2">val2</data>
172
+ </node>
173
+ <edge source="0" target="1">
174
+ <data key="edge_prop">edge_value</data>
175
+ </edge>
176
+ </graph>
177
+ </graphml>
178
+ """
179
+ cls.attribute_named_key_ids_graph = nx.DiGraph()
180
+ cls.attribute_named_key_ids_graph.add_node("0", prop1="val1", prop2="val2")
181
+ cls.attribute_named_key_ids_graph.add_node("1", prop1="val_one", prop2="val2")
182
+ cls.attribute_named_key_ids_graph.add_edge("0", "1", edge_prop="edge_value")
183
+ fh = io.BytesIO(cls.attribute_named_key_ids_data.encode("UTF-8"))
184
+ cls.attribute_named_key_ids_fh = fh
185
+
186
+ cls.attribute_numeric_type_data = """<?xml version='1.0' encoding='utf-8'?>
187
+ <graphml xmlns="http://graphml.graphdrawing.org/xmlns"
188
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
189
+ xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
190
+ http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
191
+ <key attr.name="weight" attr.type="double" for="node" id="d1" />
192
+ <key attr.name="weight" attr.type="double" for="edge" id="d0" />
193
+ <graph edgedefault="directed">
194
+ <node id="n0">
195
+ <data key="d1">1</data>
196
+ </node>
197
+ <node id="n1">
198
+ <data key="d1">2.0</data>
199
+ </node>
200
+ <edge source="n0" target="n1">
201
+ <data key="d0">1</data>
202
+ </edge>
203
+ <edge source="n1" target="n0">
204
+ <data key="d0">k</data>
205
+ </edge>
206
+ <edge source="n1" target="n1">
207
+ <data key="d0">1.0</data>
208
+ </edge>
209
+ </graph>
210
+ </graphml>
211
+ """
212
+ cls.attribute_numeric_type_graph = nx.DiGraph()
213
+ cls.attribute_numeric_type_graph.add_node("n0", weight=1)
214
+ cls.attribute_numeric_type_graph.add_node("n1", weight=2.0)
215
+ cls.attribute_numeric_type_graph.add_edge("n0", "n1", weight=1)
216
+ cls.attribute_numeric_type_graph.add_edge("n1", "n1", weight=1.0)
217
+ fh = io.BytesIO(cls.attribute_numeric_type_data.encode("UTF-8"))
218
+ cls.attribute_numeric_type_fh = fh
219
+
220
+ cls.simple_undirected_data = """<?xml version="1.0" encoding="UTF-8"?>
221
+ <graphml xmlns="http://graphml.graphdrawing.org/xmlns"
222
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
223
+ xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
224
+ http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
225
+ <graph id="G">
226
+ <node id="n0"/>
227
+ <node id="n1"/>
228
+ <node id="n2"/>
229
+ <node id="n10"/>
230
+ <edge id="foo" source="n0" target="n2"/>
231
+ <edge source="n1" target="n2"/>
232
+ <edge source="n2" target="n3"/>
233
+ </graph>
234
+ </graphml>"""
235
+ # <edge source="n8" target="n10" directed="false"/>
236
+ cls.simple_undirected_graph = nx.Graph()
237
+ cls.simple_undirected_graph.add_node("n10")
238
+ cls.simple_undirected_graph.add_edge("n0", "n2", id="foo")
239
+ cls.simple_undirected_graph.add_edges_from([("n1", "n2"), ("n2", "n3")])
240
+ fh = io.BytesIO(cls.simple_undirected_data.encode("UTF-8"))
241
+ cls.simple_undirected_fh = fh
242
+
243
+ cls.undirected_multigraph_data = """<?xml version="1.0" encoding="UTF-8"?>
244
+ <graphml xmlns="http://graphml.graphdrawing.org/xmlns"
245
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
246
+ xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
247
+ http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
248
+ <graph id="G">
249
+ <node id="n0"/>
250
+ <node id="n1"/>
251
+ <node id="n2"/>
252
+ <node id="n10"/>
253
+ <edge id="e0" source="n0" target="n2"/>
254
+ <edge id="e1" source="n1" target="n2"/>
255
+ <edge id="e2" source="n2" target="n1"/>
256
+ </graph>
257
+ </graphml>"""
258
+ cls.undirected_multigraph = nx.MultiGraph()
259
+ cls.undirected_multigraph.add_node("n10")
260
+ cls.undirected_multigraph.add_edge("n0", "n2", id="e0")
261
+ cls.undirected_multigraph.add_edge("n1", "n2", id="e1")
262
+ cls.undirected_multigraph.add_edge("n2", "n1", id="e2")
263
+ fh = io.BytesIO(cls.undirected_multigraph_data.encode("UTF-8"))
264
+ cls.undirected_multigraph_fh = fh
265
+
266
+ cls.undirected_multigraph_no_multiedge_data = """<?xml version="1.0" encoding="UTF-8"?>
267
+ <graphml xmlns="http://graphml.graphdrawing.org/xmlns"
268
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
269
+ xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
270
+ http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
271
+ <graph id="G">
272
+ <node id="n0"/>
273
+ <node id="n1"/>
274
+ <node id="n2"/>
275
+ <node id="n10"/>
276
+ <edge id="e0" source="n0" target="n2"/>
277
+ <edge id="e1" source="n1" target="n2"/>
278
+ <edge id="e2" source="n2" target="n3"/>
279
+ </graph>
280
+ </graphml>"""
281
+ cls.undirected_multigraph_no_multiedge = nx.MultiGraph()
282
+ cls.undirected_multigraph_no_multiedge.add_node("n10")
283
+ cls.undirected_multigraph_no_multiedge.add_edge("n0", "n2", id="e0")
284
+ cls.undirected_multigraph_no_multiedge.add_edge("n1", "n2", id="e1")
285
+ cls.undirected_multigraph_no_multiedge.add_edge("n2", "n3", id="e2")
286
+ fh = io.BytesIO(cls.undirected_multigraph_no_multiedge_data.encode("UTF-8"))
287
+ cls.undirected_multigraph_no_multiedge_fh = fh
288
+
289
+ cls.multigraph_only_ids_for_multiedges_data = """<?xml version="1.0" encoding="UTF-8"?>
290
+ <graphml xmlns="http://graphml.graphdrawing.org/xmlns"
291
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
292
+ xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
293
+ http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
294
+ <graph id="G">
295
+ <node id="n0"/>
296
+ <node id="n1"/>
297
+ <node id="n2"/>
298
+ <node id="n10"/>
299
+ <edge source="n0" target="n2"/>
300
+ <edge id="e1" source="n1" target="n2"/>
301
+ <edge id="e2" source="n2" target="n1"/>
302
+ </graph>
303
+ </graphml>"""
304
+ cls.multigraph_only_ids_for_multiedges = nx.MultiGraph()
305
+ cls.multigraph_only_ids_for_multiedges.add_node("n10")
306
+ cls.multigraph_only_ids_for_multiedges.add_edge("n0", "n2")
307
+ cls.multigraph_only_ids_for_multiedges.add_edge("n1", "n2", id="e1")
308
+ cls.multigraph_only_ids_for_multiedges.add_edge("n2", "n1", id="e2")
309
+ fh = io.BytesIO(cls.multigraph_only_ids_for_multiedges_data.encode("UTF-8"))
310
+ cls.multigraph_only_ids_for_multiedges_fh = fh
311
+
312
+
313
+ class TestReadGraphML(BaseGraphML):
314
+ def test_read_simple_directed_graphml(self):
315
+ G = self.simple_directed_graph
316
+ H = nx.read_graphml(self.simple_directed_fh)
317
+ assert sorted(G.nodes()) == sorted(H.nodes())
318
+ assert sorted(G.edges()) == sorted(H.edges())
319
+ assert sorted(G.edges(data=True)) == sorted(H.edges(data=True))
320
+ self.simple_directed_fh.seek(0)
321
+
322
+ PG = nx.parse_graphml(self.simple_directed_data)
323
+ assert sorted(G.nodes()) == sorted(PG.nodes())
324
+ assert sorted(G.edges()) == sorted(PG.edges())
325
+ assert sorted(G.edges(data=True)) == sorted(PG.edges(data=True))
326
+
327
+ def test_read_simple_undirected_graphml(self):
328
+ G = self.simple_undirected_graph
329
+ H = nx.read_graphml(self.simple_undirected_fh)
330
+ assert nodes_equal(G.nodes(), H.nodes())
331
+ assert edges_equal(G.edges(), H.edges())
332
+ self.simple_undirected_fh.seek(0)
333
+
334
+ PG = nx.parse_graphml(self.simple_undirected_data)
335
+ assert nodes_equal(G.nodes(), PG.nodes())
336
+ assert edges_equal(G.edges(), PG.edges())
337
+
338
+ def test_read_undirected_multigraph_graphml(self):
339
+ G = self.undirected_multigraph
340
+ H = nx.read_graphml(self.undirected_multigraph_fh)
341
+ assert nodes_equal(G.nodes(), H.nodes())
342
+ assert edges_equal(G.edges(), H.edges())
343
+ self.undirected_multigraph_fh.seek(0)
344
+
345
+ PG = nx.parse_graphml(self.undirected_multigraph_data)
346
+ assert nodes_equal(G.nodes(), PG.nodes())
347
+ assert edges_equal(G.edges(), PG.edges())
348
+
349
+ def test_read_undirected_multigraph_no_multiedge_graphml(self):
350
+ G = self.undirected_multigraph_no_multiedge
351
+ H = nx.read_graphml(self.undirected_multigraph_no_multiedge_fh)
352
+ assert nodes_equal(G.nodes(), H.nodes())
353
+ assert edges_equal(G.edges(), H.edges())
354
+ self.undirected_multigraph_no_multiedge_fh.seek(0)
355
+
356
+ PG = nx.parse_graphml(self.undirected_multigraph_no_multiedge_data)
357
+ assert nodes_equal(G.nodes(), PG.nodes())
358
+ assert edges_equal(G.edges(), PG.edges())
359
+
360
+ def test_read_undirected_multigraph_only_ids_for_multiedges_graphml(self):
361
+ G = self.multigraph_only_ids_for_multiedges
362
+ H = nx.read_graphml(self.multigraph_only_ids_for_multiedges_fh)
363
+ assert nodes_equal(G.nodes(), H.nodes())
364
+ assert edges_equal(G.edges(), H.edges())
365
+ self.multigraph_only_ids_for_multiedges_fh.seek(0)
366
+
367
+ PG = nx.parse_graphml(self.multigraph_only_ids_for_multiedges_data)
368
+ assert nodes_equal(G.nodes(), PG.nodes())
369
+ assert edges_equal(G.edges(), PG.edges())
370
+
371
+ def test_read_attribute_graphml(self):
372
+ G = self.attribute_graph
373
+ H = nx.read_graphml(self.attribute_fh)
374
+ assert nodes_equal(G.nodes(True), sorted(H.nodes(data=True)))
375
+ ge = sorted(G.edges(data=True))
376
+ he = sorted(H.edges(data=True))
377
+ for a, b in zip(ge, he):
378
+ assert a == b
379
+ self.attribute_fh.seek(0)
380
+
381
+ PG = nx.parse_graphml(self.attribute_data)
382
+ assert sorted(G.nodes(True)) == sorted(PG.nodes(data=True))
383
+ ge = sorted(G.edges(data=True))
384
+ he = sorted(PG.edges(data=True))
385
+ for a, b in zip(ge, he):
386
+ assert a == b
387
+
388
+ def test_node_default_attribute_graphml(self):
389
+ G = self.node_attribute_default_graph
390
+ H = nx.read_graphml(self.node_attribute_default_fh)
391
+ assert G.graph["node_default"] == H.graph["node_default"]
392
+
393
+ def test_directed_edge_in_undirected(self):
394
+ s = """<?xml version="1.0" encoding="UTF-8"?>
395
+ <graphml xmlns="http://graphml.graphdrawing.org/xmlns"
396
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
397
+ xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
398
+ http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
399
+ <graph id="G">
400
+ <node id="n0"/>
401
+ <node id="n1"/>
402
+ <node id="n2"/>
403
+ <edge source="n0" target="n1"/>
404
+ <edge source="n1" target="n2" directed='true'/>
405
+ </graph>
406
+ </graphml>"""
407
+ fh = io.BytesIO(s.encode("UTF-8"))
408
+ pytest.raises(nx.NetworkXError, nx.read_graphml, fh)
409
+ pytest.raises(nx.NetworkXError, nx.parse_graphml, s)
410
+
411
+ def test_undirected_edge_in_directed(self):
412
+ s = """<?xml version="1.0" encoding="UTF-8"?>
413
+ <graphml xmlns="http://graphml.graphdrawing.org/xmlns"
414
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
415
+ xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
416
+ http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
417
+ <graph id="G" edgedefault='directed'>
418
+ <node id="n0"/>
419
+ <node id="n1"/>
420
+ <node id="n2"/>
421
+ <edge source="n0" target="n1"/>
422
+ <edge source="n1" target="n2" directed='false'/>
423
+ </graph>
424
+ </graphml>"""
425
+ fh = io.BytesIO(s.encode("UTF-8"))
426
+ pytest.raises(nx.NetworkXError, nx.read_graphml, fh)
427
+ pytest.raises(nx.NetworkXError, nx.parse_graphml, s)
428
+
429
+ def test_key_raise(self):
430
+ s = """<?xml version="1.0" encoding="UTF-8"?>
431
+ <graphml xmlns="http://graphml.graphdrawing.org/xmlns"
432
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
433
+ xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
434
+ http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
435
+ <key id="d0" for="node" attr.name="color" attr.type="string">
436
+ <default>yellow</default>
437
+ </key>
438
+ <key id="d1" for="edge" attr.name="weight" attr.type="double"/>
439
+ <graph id="G" edgedefault="directed">
440
+ <node id="n0">
441
+ <data key="d0">green</data>
442
+ </node>
443
+ <node id="n1"/>
444
+ <node id="n2">
445
+ <data key="d0">blue</data>
446
+ </node>
447
+ <edge id="e0" source="n0" target="n2">
448
+ <data key="d2">1.0</data>
449
+ </edge>
450
+ </graph>
451
+ </graphml>
452
+ """
453
+ fh = io.BytesIO(s.encode("UTF-8"))
454
+ pytest.raises(nx.NetworkXError, nx.read_graphml, fh)
455
+ pytest.raises(nx.NetworkXError, nx.parse_graphml, s)
456
+
457
+ def test_hyperedge_raise(self):
458
+ s = """<?xml version="1.0" encoding="UTF-8"?>
459
+ <graphml xmlns="http://graphml.graphdrawing.org/xmlns"
460
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
461
+ xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
462
+ http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
463
+ <key id="d0" for="node" attr.name="color" attr.type="string">
464
+ <default>yellow</default>
465
+ </key>
466
+ <key id="d1" for="edge" attr.name="weight" attr.type="double"/>
467
+ <graph id="G" edgedefault="directed">
468
+ <node id="n0">
469
+ <data key="d0">green</data>
470
+ </node>
471
+ <node id="n1"/>
472
+ <node id="n2">
473
+ <data key="d0">blue</data>
474
+ </node>
475
+ <hyperedge id="e0" source="n0" target="n2">
476
+ <endpoint node="n0"/>
477
+ <endpoint node="n1"/>
478
+ <endpoint node="n2"/>
479
+ </hyperedge>
480
+ </graph>
481
+ </graphml>
482
+ """
483
+ fh = io.BytesIO(s.encode("UTF-8"))
484
+ pytest.raises(nx.NetworkXError, nx.read_graphml, fh)
485
+ pytest.raises(nx.NetworkXError, nx.parse_graphml, s)
486
+
487
+ def test_multigraph_keys(self):
488
+ # Test that reading multigraphs uses edge id attributes as keys
489
+ s = """<?xml version="1.0" encoding="UTF-8"?>
490
+ <graphml xmlns="http://graphml.graphdrawing.org/xmlns"
491
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
492
+ xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
493
+ http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
494
+ <graph id="G" edgedefault="directed">
495
+ <node id="n0"/>
496
+ <node id="n1"/>
497
+ <edge id="e0" source="n0" target="n1"/>
498
+ <edge id="e1" source="n0" target="n1"/>
499
+ </graph>
500
+ </graphml>
501
+ """
502
+ fh = io.BytesIO(s.encode("UTF-8"))
503
+ G = nx.read_graphml(fh)
504
+ expected = [("n0", "n1", "e0"), ("n0", "n1", "e1")]
505
+ assert sorted(G.edges(keys=True)) == expected
506
+ fh.seek(0)
507
+ H = nx.parse_graphml(s)
508
+ assert sorted(H.edges(keys=True)) == expected
509
+
510
+ def test_preserve_multi_edge_data(self):
511
+ """
512
+ Test that data and keys of edges are preserved on consequent
513
+ write and reads
514
+ """
515
+ G = nx.MultiGraph()
516
+ G.add_node(1)
517
+ G.add_node(2)
518
+ G.add_edges_from(
519
+ [
520
+ # edges with no data, no keys:
521
+ (1, 2),
522
+ # edges with only data:
523
+ (1, 2, {"key": "data_key1"}),
524
+ (1, 2, {"id": "data_id2"}),
525
+ (1, 2, {"key": "data_key3", "id": "data_id3"}),
526
+ # edges with both data and keys:
527
+ (1, 2, 103, {"key": "data_key4"}),
528
+ (1, 2, 104, {"id": "data_id5"}),
529
+ (1, 2, 105, {"key": "data_key6", "id": "data_id7"}),
530
+ ]
531
+ )
532
+ fh = io.BytesIO()
533
+ nx.write_graphml(G, fh)
534
+ fh.seek(0)
535
+ H = nx.read_graphml(fh, node_type=int)
536
+ assert edges_equal(G.edges(data=True, keys=True), H.edges(data=True, keys=True))
537
+ assert G._adj == H._adj
538
+
539
+ Gadj = {
540
+ str(node): {
541
+ str(nbr): {str(ekey): dd for ekey, dd in key_dict.items()}
542
+ for nbr, key_dict in nbr_dict.items()
543
+ }
544
+ for node, nbr_dict in G._adj.items()
545
+ }
546
+ fh.seek(0)
547
+ HH = nx.read_graphml(fh, node_type=str, edge_key_type=str)
548
+ assert Gadj == HH._adj
549
+
550
+ fh.seek(0)
551
+ string_fh = fh.read()
552
+ HH = nx.parse_graphml(string_fh, node_type=str, edge_key_type=str)
553
+ assert Gadj == HH._adj
554
+
555
+ def test_yfiles_extension(self):
556
+ data = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
557
+ <graphml xmlns="http://graphml.graphdrawing.org/xmlns"
558
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
559
+ xmlns:y="http://www.yworks.com/xml/graphml"
560
+ xmlns:yed="http://www.yworks.com/xml/yed/3"
561
+ xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
562
+ http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
563
+ <!--Created by yFiles for Java 2.7-->
564
+ <key for="graphml" id="d0" yfiles.type="resources"/>
565
+ <key attr.name="url" attr.type="string" for="node" id="d1"/>
566
+ <key attr.name="description" attr.type="string" for="node" id="d2"/>
567
+ <key for="node" id="d3" yfiles.type="nodegraphics"/>
568
+ <key attr.name="Description" attr.type="string" for="graph" id="d4">
569
+ <default/>
570
+ </key>
571
+ <key attr.name="url" attr.type="string" for="edge" id="d5"/>
572
+ <key attr.name="description" attr.type="string" for="edge" id="d6"/>
573
+ <key for="edge" id="d7" yfiles.type="edgegraphics"/>
574
+ <graph edgedefault="directed" id="G">
575
+ <node id="n0">
576
+ <data key="d3">
577
+ <y:ShapeNode>
578
+ <y:Geometry height="30.0" width="30.0" x="125.0" y="100.0"/>
579
+ <y:Fill color="#FFCC00" transparent="false"/>
580
+ <y:BorderStyle color="#000000" type="line" width="1.0"/>
581
+ <y:NodeLabel alignment="center" autoSizePolicy="content"
582
+ borderDistance="0.0" fontFamily="Dialog" fontSize="13"
583
+ fontStyle="plain" hasBackgroundColor="false" hasLineColor="false"
584
+ height="19.1328125" modelName="internal" modelPosition="c"
585
+ textColor="#000000" visible="true" width="12.27099609375"
586
+ x="8.864501953125" y="5.43359375">1</y:NodeLabel>
587
+ <y:Shape type="rectangle"/>
588
+ </y:ShapeNode>
589
+ </data>
590
+ </node>
591
+ <node id="n1">
592
+ <data key="d3">
593
+ <y:ShapeNode>
594
+ <y:Geometry height="30.0" width="30.0" x="183.0" y="205.0"/>
595
+ <y:Fill color="#FFCC00" transparent="false"/>
596
+ <y:BorderStyle color="#000000" type="line" width="1.0"/>
597
+ <y:NodeLabel alignment="center" autoSizePolicy="content"
598
+ borderDistance="0.0" fontFamily="Dialog" fontSize="13"
599
+ fontStyle="plain" hasBackgroundColor="false" hasLineColor="false"
600
+ height="19.1328125" modelName="internal" modelPosition="c"
601
+ textColor="#000000" visible="true" width="12.27099609375"
602
+ x="8.864501953125" y="5.43359375">2</y:NodeLabel>
603
+ <y:Shape type="rectangle"/>
604
+ </y:ShapeNode>
605
+ </data>
606
+ </node>
607
+ <node id="n2">
608
+ <data key="d6" xml:space="preserve"><![CDATA[description
609
+ line1
610
+ line2]]></data>
611
+ <data key="d3">
612
+ <y:GenericNode configuration="com.yworks.flowchart.terminator">
613
+ <y:Geometry height="40.0" width="80.0" x="950.0" y="286.0"/>
614
+ <y:Fill color="#E8EEF7" color2="#B7C9E3" transparent="false"/>
615
+ <y:BorderStyle color="#000000" type="line" width="1.0"/>
616
+ <y:NodeLabel alignment="center" autoSizePolicy="content"
617
+ fontFamily="Dialog" fontSize="12" fontStyle="plain"
618
+ hasBackgroundColor="false" hasLineColor="false" height="17.96875"
619
+ horizontalTextPosition="center" iconTextGap="4" modelName="custom"
620
+ textColor="#000000" verticalTextPosition="bottom" visible="true"
621
+ width="67.984375" x="6.0078125" xml:space="preserve"
622
+ y="11.015625">3<y:LabelModel>
623
+ <y:SmartNodeLabelModel distance="4.0"/></y:LabelModel>
624
+ <y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0"
625
+ labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0"
626
+ offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
627
+ </y:GenericNode>
628
+ </data>
629
+ </node>
630
+ <edge id="e0" source="n0" target="n1">
631
+ <data key="d7">
632
+ <y:PolyLineEdge>
633
+ <y:Path sx="0.0" sy="0.0" tx="0.0" ty="0.0"/>
634
+ <y:LineStyle color="#000000" type="line" width="1.0"/>
635
+ <y:Arrows source="none" target="standard"/>
636
+ <y:BendStyle smoothed="false"/>
637
+ </y:PolyLineEdge>
638
+ </data>
639
+ </edge>
640
+ </graph>
641
+ <data key="d0">
642
+ <y:Resources/>
643
+ </data>
644
+ </graphml>
645
+ """
646
+ fh = io.BytesIO(data.encode("UTF-8"))
647
+ G = nx.read_graphml(fh, force_multigraph=True)
648
+ assert list(G.edges()) == [("n0", "n1")]
649
+ assert G.has_edge("n0", "n1", key="e0")
650
+ assert G.nodes["n0"]["label"] == "1"
651
+ assert G.nodes["n1"]["label"] == "2"
652
+ assert G.nodes["n2"]["label"] == "3"
653
+ assert G.nodes["n0"]["shape_type"] == "rectangle"
654
+ assert G.nodes["n1"]["shape_type"] == "rectangle"
655
+ assert G.nodes["n2"]["shape_type"] == "com.yworks.flowchart.terminator"
656
+ assert G.nodes["n2"]["description"] == "description\nline1\nline2"
657
+ fh.seek(0)
658
+ G = nx.read_graphml(fh)
659
+ assert list(G.edges()) == [("n0", "n1")]
660
+ assert G["n0"]["n1"]["id"] == "e0"
661
+ assert G.nodes["n0"]["label"] == "1"
662
+ assert G.nodes["n1"]["label"] == "2"
663
+ assert G.nodes["n2"]["label"] == "3"
664
+ assert G.nodes["n0"]["shape_type"] == "rectangle"
665
+ assert G.nodes["n1"]["shape_type"] == "rectangle"
666
+ assert G.nodes["n2"]["shape_type"] == "com.yworks.flowchart.terminator"
667
+ assert G.nodes["n2"]["description"] == "description\nline1\nline2"
668
+
669
+ H = nx.parse_graphml(data, force_multigraph=True)
670
+ assert list(H.edges()) == [("n0", "n1")]
671
+ assert H.has_edge("n0", "n1", key="e0")
672
+ assert H.nodes["n0"]["label"] == "1"
673
+ assert H.nodes["n1"]["label"] == "2"
674
+ assert H.nodes["n2"]["label"] == "3"
675
+
676
+ H = nx.parse_graphml(data)
677
+ assert list(H.edges()) == [("n0", "n1")]
678
+ assert H["n0"]["n1"]["id"] == "e0"
679
+ assert H.nodes["n0"]["label"] == "1"
680
+ assert H.nodes["n1"]["label"] == "2"
681
+ assert H.nodes["n2"]["label"] == "3"
682
+
683
+ def test_bool(self):
684
+ s = """<?xml version="1.0" encoding="UTF-8"?>
685
+ <graphml xmlns="http://graphml.graphdrawing.org/xmlns"
686
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
687
+ xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
688
+ http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
689
+ <key id="d0" for="node" attr.name="test" attr.type="boolean">
690
+ <default>false</default>
691
+ </key>
692
+ <graph id="G" edgedefault="directed">
693
+ <node id="n0">
694
+ <data key="d0">true</data>
695
+ </node>
696
+ <node id="n1"/>
697
+ <node id="n2">
698
+ <data key="d0">false</data>
699
+ </node>
700
+ <node id="n3">
701
+ <data key="d0">FaLsE</data>
702
+ </node>
703
+ <node id="n4">
704
+ <data key="d0">True</data>
705
+ </node>
706
+ <node id="n5">
707
+ <data key="d0">0</data>
708
+ </node>
709
+ <node id="n6">
710
+ <data key="d0">1</data>
711
+ </node>
712
+ </graph>
713
+ </graphml>
714
+ """
715
+ fh = io.BytesIO(s.encode("UTF-8"))
716
+ G = nx.read_graphml(fh)
717
+ H = nx.parse_graphml(s)
718
+ for graph in [G, H]:
719
+ assert graph.nodes["n0"]["test"]
720
+ assert not graph.nodes["n2"]["test"]
721
+ assert not graph.nodes["n3"]["test"]
722
+ assert graph.nodes["n4"]["test"]
723
+ assert not graph.nodes["n5"]["test"]
724
+ assert graph.nodes["n6"]["test"]
725
+
726
+ def test_graphml_header_line(self):
727
+ good = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
728
+ <graphml xmlns="http://graphml.graphdrawing.org/xmlns"
729
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
730
+ xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
731
+ http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
732
+ <key id="d0" for="node" attr.name="test" attr.type="boolean">
733
+ <default>false</default>
734
+ </key>
735
+ <graph id="G">
736
+ <node id="n0">
737
+ <data key="d0">true</data>
738
+ </node>
739
+ </graph>
740
+ </graphml>
741
+ """
742
+ bad = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
743
+ <graphml>
744
+ <key id="d0" for="node" attr.name="test" attr.type="boolean">
745
+ <default>false</default>
746
+ </key>
747
+ <graph id="G">
748
+ <node id="n0">
749
+ <data key="d0">true</data>
750
+ </node>
751
+ </graph>
752
+ </graphml>
753
+ """
754
+ ugly = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
755
+ <graphml xmlns="https://ghghgh">
756
+ <key id="d0" for="node" attr.name="test" attr.type="boolean">
757
+ <default>false</default>
758
+ </key>
759
+ <graph id="G">
760
+ <node id="n0">
761
+ <data key="d0">true</data>
762
+ </node>
763
+ </graph>
764
+ </graphml>
765
+ """
766
+ for s in (good, bad):
767
+ fh = io.BytesIO(s.encode("UTF-8"))
768
+ G = nx.read_graphml(fh)
769
+ H = nx.parse_graphml(s)
770
+ for graph in [G, H]:
771
+ assert graph.nodes["n0"]["test"]
772
+
773
+ fh = io.BytesIO(ugly.encode("UTF-8"))
774
+ pytest.raises(nx.NetworkXError, nx.read_graphml, fh)
775
+ pytest.raises(nx.NetworkXError, nx.parse_graphml, ugly)
776
+
777
+ def test_read_attributes_with_groups(self):
778
+ data = """\
779
+ <?xml version="1.0" encoding="UTF-8" standalone="no"?>
780
+ <graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:java="http://www.yworks.com/xml/yfiles-common/1.0/java" xmlns:sys="http://www.yworks.com/xml/yfiles-common/markup/primitives/2.0" xmlns:x="http://www.yworks.com/xml/yfiles-common/markup/2.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:y="http://www.yworks.com/xml/graphml" xmlns:yed="http://www.yworks.com/xml/yed/3" xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://www.yworks.com/xml/schema/graphml/1.1/ygraphml.xsd">
781
+ <!--Created by yEd 3.17-->
782
+ <key attr.name="Description" attr.type="string" for="graph" id="d0"/>
783
+ <key for="port" id="d1" yfiles.type="portgraphics"/>
784
+ <key for="port" id="d2" yfiles.type="portgeometry"/>
785
+ <key for="port" id="d3" yfiles.type="portuserdata"/>
786
+ <key attr.name="CustomProperty" attr.type="string" for="node" id="d4">
787
+ <default/>
788
+ </key>
789
+ <key attr.name="url" attr.type="string" for="node" id="d5"/>
790
+ <key attr.name="description" attr.type="string" for="node" id="d6"/>
791
+ <key for="node" id="d7" yfiles.type="nodegraphics"/>
792
+ <key for="graphml" id="d8" yfiles.type="resources"/>
793
+ <key attr.name="url" attr.type="string" for="edge" id="d9"/>
794
+ <key attr.name="description" attr.type="string" for="edge" id="d10"/>
795
+ <key for="edge" id="d11" yfiles.type="edgegraphics"/>
796
+ <graph edgedefault="directed" id="G">
797
+ <data key="d0"/>
798
+ <node id="n0">
799
+ <data key="d4"><![CDATA[CustomPropertyValue]]></data>
800
+ <data key="d6"/>
801
+ <data key="d7">
802
+ <y:ShapeNode>
803
+ <y:Geometry height="30.0" width="30.0" x="125.0" y="-255.4611111111111"/>
804
+ <y:Fill color="#FFCC00" transparent="false"/>
805
+ <y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
806
+ <y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="11.634765625" x="9.1826171875" y="6.015625">2<y:LabelModel>
807
+ <y:SmartNodeLabelModel distance="4.0"/>
808
+ </y:LabelModel>
809
+ <y:ModelParameter>
810
+ <y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
811
+ </y:ModelParameter>
812
+ </y:NodeLabel>
813
+ <y:Shape type="rectangle"/>
814
+ </y:ShapeNode>
815
+ </data>
816
+ </node>
817
+ <node id="n1" yfiles.foldertype="group">
818
+ <data key="d4"><![CDATA[CustomPropertyValue]]></data>
819
+ <data key="d5"/>
820
+ <data key="d6"/>
821
+ <data key="d7">
822
+ <y:ProxyAutoBoundsNode>
823
+ <y:Realizers active="0">
824
+ <y:GroupNode>
825
+ <y:Geometry height="250.38333333333333" width="140.0" x="-30.0" y="-330.3833333333333"/>
826
+ <y:Fill color="#F5F5F5" transparent="false"/>
827
+ <y:BorderStyle color="#000000" type="dashed" width="1.0"/>
828
+ <y:NodeLabel alignment="right" autoSizePolicy="node_width" backgroundColor="#EBEBEB" borderDistance="0.0" fontFamily="Dialog" fontSize="15" fontStyle="plain" hasLineColor="false" height="21.4609375" horizontalTextPosition="center" iconTextGap="4" modelName="internal" modelPosition="t" textColor="#000000" verticalTextPosition="bottom" visible="true" width="140.0" x="0.0" y="0.0">Group 3</y:NodeLabel>
829
+ <y:Shape type="roundrectangle"/>
830
+ <y:State closed="false" closedHeight="50.0" closedWidth="50.0" innerGraphDisplayEnabled="false"/>
831
+ <y:Insets bottom="15" bottomF="15.0" left="15" leftF="15.0" right="15" rightF="15.0" top="15" topF="15.0"/>
832
+ <y:BorderInsets bottom="1" bottomF="1.0" left="0" leftF="0.0" right="0" rightF="0.0" top="1" topF="1.0001736111111086"/>
833
+ </y:GroupNode>
834
+ <y:GroupNode>
835
+ <y:Geometry height="50.0" width="50.0" x="0.0" y="60.0"/>
836
+ <y:Fill color="#F5F5F5" transparent="false"/>
837
+ <y:BorderStyle color="#000000" type="dashed" width="1.0"/>
838
+ <y:NodeLabel alignment="right" autoSizePolicy="node_width" backgroundColor="#EBEBEB" borderDistance="0.0" fontFamily="Dialog" fontSize="15" fontStyle="plain" hasLineColor="false" height="21.4609375" horizontalTextPosition="center" iconTextGap="4" modelName="internal" modelPosition="t" textColor="#000000" verticalTextPosition="bottom" visible="true" width="65.201171875" x="-7.6005859375" y="0.0">Folder 3</y:NodeLabel>
839
+ <y:Shape type="roundrectangle"/>
840
+ <y:State closed="true" closedHeight="50.0" closedWidth="50.0" innerGraphDisplayEnabled="false"/>
841
+ <y:Insets bottom="5" bottomF="5.0" left="5" leftF="5.0" right="5" rightF="5.0" top="5" topF="5.0"/>
842
+ <y:BorderInsets bottom="0" bottomF="0.0" left="0" leftF="0.0" right="0" rightF="0.0" top="0" topF="0.0"/>
843
+ </y:GroupNode>
844
+ </y:Realizers>
845
+ </y:ProxyAutoBoundsNode>
846
+ </data>
847
+ <graph edgedefault="directed" id="n1:">
848
+ <node id="n1::n0" yfiles.foldertype="group">
849
+ <data key="d4"><![CDATA[CustomPropertyValue]]></data>
850
+ <data key="d5"/>
851
+ <data key="d6"/>
852
+ <data key="d7">
853
+ <y:ProxyAutoBoundsNode>
854
+ <y:Realizers active="0">
855
+ <y:GroupNode>
856
+ <y:Geometry height="83.46111111111111" width="110.0" x="-15.0" y="-292.9222222222222"/>
857
+ <y:Fill color="#F5F5F5" transparent="false"/>
858
+ <y:BorderStyle color="#000000" type="dashed" width="1.0"/>
859
+ <y:NodeLabel alignment="right" autoSizePolicy="node_width" backgroundColor="#EBEBEB" borderDistance="0.0" fontFamily="Dialog" fontSize="15" fontStyle="plain" hasLineColor="false" height="21.4609375" horizontalTextPosition="center" iconTextGap="4" modelName="internal" modelPosition="t" textColor="#000000" verticalTextPosition="bottom" visible="true" width="110.0" x="0.0" y="0.0">Group 1</y:NodeLabel>
860
+ <y:Shape type="roundrectangle"/>
861
+ <y:State closed="false" closedHeight="50.0" closedWidth="50.0" innerGraphDisplayEnabled="false"/>
862
+ <y:Insets bottom="15" bottomF="15.0" left="15" leftF="15.0" right="15" rightF="15.0" top="15" topF="15.0"/>
863
+ <y:BorderInsets bottom="1" bottomF="1.0" left="0" leftF="0.0" right="0" rightF="0.0" top="1" topF="1.0001736111111086"/>
864
+ </y:GroupNode>
865
+ <y:GroupNode>
866
+ <y:Geometry height="50.0" width="50.0" x="0.0" y="60.0"/>
867
+ <y:Fill color="#F5F5F5" transparent="false"/>
868
+ <y:BorderStyle color="#000000" type="dashed" width="1.0"/>
869
+ <y:NodeLabel alignment="right" autoSizePolicy="node_width" backgroundColor="#EBEBEB" borderDistance="0.0" fontFamily="Dialog" fontSize="15" fontStyle="plain" hasLineColor="false" height="21.4609375" horizontalTextPosition="center" iconTextGap="4" modelName="internal" modelPosition="t" textColor="#000000" verticalTextPosition="bottom" visible="true" width="65.201171875" x="-7.6005859375" y="0.0">Folder 1</y:NodeLabel>
870
+ <y:Shape type="roundrectangle"/>
871
+ <y:State closed="true" closedHeight="50.0" closedWidth="50.0" innerGraphDisplayEnabled="false"/>
872
+ <y:Insets bottom="5" bottomF="5.0" left="5" leftF="5.0" right="5" rightF="5.0" top="5" topF="5.0"/>
873
+ <y:BorderInsets bottom="0" bottomF="0.0" left="0" leftF="0.0" right="0" rightF="0.0" top="0" topF="0.0"/>
874
+ </y:GroupNode>
875
+ </y:Realizers>
876
+ </y:ProxyAutoBoundsNode>
877
+ </data>
878
+ <graph edgedefault="directed" id="n1::n0:">
879
+ <node id="n1::n0::n0">
880
+ <data key="d4"><![CDATA[CustomPropertyValue]]></data>
881
+ <data key="d6"/>
882
+ <data key="d7">
883
+ <y:ShapeNode>
884
+ <y:Geometry height="30.0" width="30.0" x="50.0" y="-255.4611111111111"/>
885
+ <y:Fill color="#FFCC00" transparent="false"/>
886
+ <y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
887
+ <y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="11.634765625" x="9.1826171875" y="6.015625">1<y:LabelModel>
888
+ <y:SmartNodeLabelModel distance="4.0"/>
889
+ </y:LabelModel>
890
+ <y:ModelParameter>
891
+ <y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
892
+ </y:ModelParameter>
893
+ </y:NodeLabel>
894
+ <y:Shape type="rectangle"/>
895
+ </y:ShapeNode>
896
+ </data>
897
+ </node>
898
+ <node id="n1::n0::n1">
899
+ <data key="d4"><![CDATA[CustomPropertyValue]]></data>
900
+ <data key="d6"/>
901
+ <data key="d7">
902
+ <y:ShapeNode>
903
+ <y:Geometry height="30.0" width="30.0" x="0.0" y="-255.4611111111111"/>
904
+ <y:Fill color="#FFCC00" transparent="false"/>
905
+ <y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
906
+ <y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="11.634765625" x="9.1826171875" y="6.015625">3<y:LabelModel>
907
+ <y:SmartNodeLabelModel distance="4.0"/>
908
+ </y:LabelModel>
909
+ <y:ModelParameter>
910
+ <y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
911
+ </y:ModelParameter>
912
+ </y:NodeLabel>
913
+ <y:Shape type="rectangle"/>
914
+ </y:ShapeNode>
915
+ </data>
916
+ </node>
917
+ </graph>
918
+ </node>
919
+ <node id="n1::n1" yfiles.foldertype="group">
920
+ <data key="d4"><![CDATA[CustomPropertyValue]]></data>
921
+ <data key="d5"/>
922
+ <data key="d6"/>
923
+ <data key="d7">
924
+ <y:ProxyAutoBoundsNode>
925
+ <y:Realizers active="0">
926
+ <y:GroupNode>
927
+ <y:Geometry height="83.46111111111111" width="110.0" x="-15.0" y="-179.4611111111111"/>
928
+ <y:Fill color="#F5F5F5" transparent="false"/>
929
+ <y:BorderStyle color="#000000" type="dashed" width="1.0"/>
930
+ <y:NodeLabel alignment="right" autoSizePolicy="node_width" backgroundColor="#EBEBEB" borderDistance="0.0" fontFamily="Dialog" fontSize="15" fontStyle="plain" hasLineColor="false" height="21.4609375" horizontalTextPosition="center" iconTextGap="4" modelName="internal" modelPosition="t" textColor="#000000" verticalTextPosition="bottom" visible="true" width="110.0" x="0.0" y="0.0">Group 2</y:NodeLabel>
931
+ <y:Shape type="roundrectangle"/>
932
+ <y:State closed="false" closedHeight="50.0" closedWidth="50.0" innerGraphDisplayEnabled="false"/>
933
+ <y:Insets bottom="15" bottomF="15.0" left="15" leftF="15.0" right="15" rightF="15.0" top="15" topF="15.0"/>
934
+ <y:BorderInsets bottom="1" bottomF="1.0" left="0" leftF="0.0" right="0" rightF="0.0" top="1" topF="1.0001736111111086"/>
935
+ </y:GroupNode>
936
+ <y:GroupNode>
937
+ <y:Geometry height="50.0" width="50.0" x="0.0" y="60.0"/>
938
+ <y:Fill color="#F5F5F5" transparent="false"/>
939
+ <y:BorderStyle color="#000000" type="dashed" width="1.0"/>
940
+ <y:NodeLabel alignment="right" autoSizePolicy="node_width" backgroundColor="#EBEBEB" borderDistance="0.0" fontFamily="Dialog" fontSize="15" fontStyle="plain" hasLineColor="false" height="21.4609375" horizontalTextPosition="center" iconTextGap="4" modelName="internal" modelPosition="t" textColor="#000000" verticalTextPosition="bottom" visible="true" width="65.201171875" x="-7.6005859375" y="0.0">Folder 2</y:NodeLabel>
941
+ <y:Shape type="roundrectangle"/>
942
+ <y:State closed="true" closedHeight="50.0" closedWidth="50.0" innerGraphDisplayEnabled="false"/>
943
+ <y:Insets bottom="5" bottomF="5.0" left="5" leftF="5.0" right="5" rightF="5.0" top="5" topF="5.0"/>
944
+ <y:BorderInsets bottom="0" bottomF="0.0" left="0" leftF="0.0" right="0" rightF="0.0" top="0" topF="0.0"/>
945
+ </y:GroupNode>
946
+ </y:Realizers>
947
+ </y:ProxyAutoBoundsNode>
948
+ </data>
949
+ <graph edgedefault="directed" id="n1::n1:">
950
+ <node id="n1::n1::n0">
951
+ <data key="d4"><![CDATA[CustomPropertyValue]]></data>
952
+ <data key="d6"/>
953
+ <data key="d7">
954
+ <y:ShapeNode>
955
+ <y:Geometry height="30.0" width="30.0" x="0.0" y="-142.0"/>
956
+ <y:Fill color="#FFCC00" transparent="false"/>
957
+ <y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
958
+ <y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="11.634765625" x="9.1826171875" y="6.015625">5<y:LabelModel>
959
+ <y:SmartNodeLabelModel distance="4.0"/>
960
+ </y:LabelModel>
961
+ <y:ModelParameter>
962
+ <y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
963
+ </y:ModelParameter>
964
+ </y:NodeLabel>
965
+ <y:Shape type="rectangle"/>
966
+ </y:ShapeNode>
967
+ </data>
968
+ </node>
969
+ <node id="n1::n1::n1">
970
+ <data key="d4"><![CDATA[CustomPropertyValue]]></data>
971
+ <data key="d6"/>
972
+ <data key="d7">
973
+ <y:ShapeNode>
974
+ <y:Geometry height="30.0" width="30.0" x="50.0" y="-142.0"/>
975
+ <y:Fill color="#FFCC00" transparent="false"/>
976
+ <y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
977
+ <y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="11.634765625" x="9.1826171875" y="6.015625">6<y:LabelModel>
978
+ <y:SmartNodeLabelModel distance="4.0"/>
979
+ </y:LabelModel>
980
+ <y:ModelParameter>
981
+ <y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
982
+ </y:ModelParameter>
983
+ </y:NodeLabel>
984
+ <y:Shape type="rectangle"/>
985
+ </y:ShapeNode>
986
+ </data>
987
+ </node>
988
+ </graph>
989
+ </node>
990
+ </graph>
991
+ </node>
992
+ <node id="n2">
993
+ <data key="d4"><![CDATA[CustomPropertyValue]]></data>
994
+ <data key="d6"/>
995
+ <data key="d7">
996
+ <y:ShapeNode>
997
+ <y:Geometry height="30.0" width="30.0" x="125.0" y="-142.0"/>
998
+ <y:Fill color="#FFCC00" transparent="false"/>
999
+ <y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
1000
+ <y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="11.634765625" x="9.1826171875" y="6.015625">9<y:LabelModel>
1001
+ <y:SmartNodeLabelModel distance="4.0"/>
1002
+ </y:LabelModel>
1003
+ <y:ModelParameter>
1004
+ <y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
1005
+ </y:ModelParameter>
1006
+ </y:NodeLabel>
1007
+ <y:Shape type="rectangle"/>
1008
+ </y:ShapeNode>
1009
+ </data>
1010
+ </node>
1011
+ <edge id="n1::n1::e0" source="n1::n1::n0" target="n1::n1::n1">
1012
+ <data key="d10"/>
1013
+ <data key="d11">
1014
+ <y:PolyLineEdge>
1015
+ <y:Path sx="15.0" sy="-0.0" tx="-15.0" ty="-0.0"/>
1016
+ <y:LineStyle color="#000000" type="line" width="1.0"/>
1017
+ <y:Arrows source="none" target="standard"/>
1018
+ <y:BendStyle smoothed="false"/>
1019
+ </y:PolyLineEdge>
1020
+ </data>
1021
+ </edge>
1022
+ <edge id="n1::n0::e0" source="n1::n0::n1" target="n1::n0::n0">
1023
+ <data key="d10"/>
1024
+ <data key="d11">
1025
+ <y:PolyLineEdge>
1026
+ <y:Path sx="15.0" sy="-0.0" tx="-15.0" ty="-0.0"/>
1027
+ <y:LineStyle color="#000000" type="line" width="1.0"/>
1028
+ <y:Arrows source="none" target="standard"/>
1029
+ <y:BendStyle smoothed="false"/>
1030
+ </y:PolyLineEdge>
1031
+ </data>
1032
+ </edge>
1033
+ <edge id="e0" source="n1::n0::n0" target="n0">
1034
+ <data key="d10"/>
1035
+ <data key="d11">
1036
+ <y:PolyLineEdge>
1037
+ <y:Path sx="15.0" sy="-0.0" tx="-15.0" ty="-0.0"/>
1038
+ <y:LineStyle color="#000000" type="line" width="1.0"/>
1039
+ <y:Arrows source="none" target="standard"/>
1040
+ <y:BendStyle smoothed="false"/>
1041
+ </y:PolyLineEdge>
1042
+ </data>
1043
+ </edge>
1044
+ <edge id="e1" source="n1::n1::n1" target="n2">
1045
+ <data key="d10"/>
1046
+ <data key="d11">
1047
+ <y:PolyLineEdge>
1048
+ <y:Path sx="15.0" sy="-0.0" tx="-15.0" ty="-0.0"/>
1049
+ <y:LineStyle color="#000000" type="line" width="1.0"/>
1050
+ <y:Arrows source="none" target="standard"/>
1051
+ <y:BendStyle smoothed="false"/>
1052
+ </y:PolyLineEdge>
1053
+ </data>
1054
+ </edge>
1055
+ </graph>
1056
+ <data key="d8">
1057
+ <y:Resources/>
1058
+ </data>
1059
+ </graphml>
1060
+ """
1061
+ # verify that nodes / attributes are correctly read when part of a group
1062
+ fh = io.BytesIO(data.encode("UTF-8"))
1063
+ G = nx.read_graphml(fh)
1064
+ data = [x for _, x in G.nodes(data=True)]
1065
+ assert len(data) == 9
1066
+ for node_data in data:
1067
+ assert node_data["CustomProperty"] != ""
1068
+
1069
+ def test_long_attribute_type(self):
1070
+ # test that graphs with attr.type="long" (as produced by botch and
1071
+ # dose3) can be parsed
1072
+ s = """<?xml version='1.0' encoding='utf-8'?>
1073
+ <graphml xmlns="http://graphml.graphdrawing.org/xmlns"
1074
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
1075
+ xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
1076
+ http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
1077
+ <key attr.name="cudfversion" attr.type="long" for="node" id="d6" />
1078
+ <graph edgedefault="directed">
1079
+ <node id="n1">
1080
+ <data key="d6">4284</data>
1081
+ </node>
1082
+ </graph>
1083
+ </graphml>"""
1084
+ fh = io.BytesIO(s.encode("UTF-8"))
1085
+ G = nx.read_graphml(fh)
1086
+ expected = [("n1", {"cudfversion": 4284})]
1087
+ assert sorted(G.nodes(data=True)) == expected
1088
+ fh.seek(0)
1089
+ H = nx.parse_graphml(s)
1090
+ assert sorted(H.nodes(data=True)) == expected
1091
+
1092
+
1093
+ class TestWriteGraphML(BaseGraphML):
1094
+ writer = staticmethod(nx.write_graphml_lxml)
1095
+
1096
+ @classmethod
1097
+ def setup_class(cls):
1098
+ BaseGraphML.setup_class()
1099
+ _ = pytest.importorskip("lxml.etree")
1100
+
1101
+ def test_write_interface(self):
1102
+ try:
1103
+ import lxml.etree
1104
+
1105
+ assert nx.write_graphml == nx.write_graphml_lxml
1106
+ except ImportError:
1107
+ assert nx.write_graphml == nx.write_graphml_xml
1108
+
1109
+ def test_write_read_simple_directed_graphml(self):
1110
+ G = self.simple_directed_graph
1111
+ G.graph["hi"] = "there"
1112
+ fh = io.BytesIO()
1113
+ self.writer(G, fh)
1114
+ fh.seek(0)
1115
+ H = nx.read_graphml(fh)
1116
+ assert sorted(G.nodes()) == sorted(H.nodes())
1117
+ assert sorted(G.edges()) == sorted(H.edges())
1118
+ assert sorted(G.edges(data=True)) == sorted(H.edges(data=True))
1119
+ self.simple_directed_fh.seek(0)
1120
+
1121
+ def test_GraphMLWriter_add_graphs(self):
1122
+ gmlw = GraphMLWriter()
1123
+ G = self.simple_directed_graph
1124
+ H = G.copy()
1125
+ gmlw.add_graphs([G, H])
1126
+
1127
+ def test_write_read_simple_no_prettyprint(self):
1128
+ G = self.simple_directed_graph
1129
+ G.graph["hi"] = "there"
1130
+ G.graph["id"] = "1"
1131
+ fh = io.BytesIO()
1132
+ self.writer(G, fh, prettyprint=False)
1133
+ fh.seek(0)
1134
+ H = nx.read_graphml(fh)
1135
+ assert sorted(G.nodes()) == sorted(H.nodes())
1136
+ assert sorted(G.edges()) == sorted(H.edges())
1137
+ assert sorted(G.edges(data=True)) == sorted(H.edges(data=True))
1138
+ self.simple_directed_fh.seek(0)
1139
+
1140
+ def test_write_read_attribute_named_key_ids_graphml(self):
1141
+ from xml.etree.ElementTree import parse
1142
+
1143
+ G = self.attribute_named_key_ids_graph
1144
+ fh = io.BytesIO()
1145
+ self.writer(G, fh, named_key_ids=True)
1146
+ fh.seek(0)
1147
+ H = nx.read_graphml(fh)
1148
+ fh.seek(0)
1149
+
1150
+ assert nodes_equal(G.nodes(), H.nodes())
1151
+ assert edges_equal(G.edges(), H.edges())
1152
+ assert edges_equal(G.edges(data=True), H.edges(data=True))
1153
+ self.attribute_named_key_ids_fh.seek(0)
1154
+
1155
+ xml = parse(fh)
1156
+ # Children are the key elements, and the graph element
1157
+ children = list(xml.getroot())
1158
+ assert len(children) == 4
1159
+
1160
+ keys = [child.items() for child in children[:3]]
1161
+
1162
+ assert len(keys) == 3
1163
+ assert ("id", "edge_prop") in keys[0]
1164
+ assert ("attr.name", "edge_prop") in keys[0]
1165
+ assert ("id", "prop2") in keys[1]
1166
+ assert ("attr.name", "prop2") in keys[1]
1167
+ assert ("id", "prop1") in keys[2]
1168
+ assert ("attr.name", "prop1") in keys[2]
1169
+
1170
+ # Confirm the read graph nodes/edge are identical when compared to
1171
+ # default writing behavior.
1172
+ default_behavior_fh = io.BytesIO()
1173
+ nx.write_graphml(G, default_behavior_fh)
1174
+ default_behavior_fh.seek(0)
1175
+ H = nx.read_graphml(default_behavior_fh)
1176
+
1177
+ named_key_ids_behavior_fh = io.BytesIO()
1178
+ nx.write_graphml(G, named_key_ids_behavior_fh, named_key_ids=True)
1179
+ named_key_ids_behavior_fh.seek(0)
1180
+ J = nx.read_graphml(named_key_ids_behavior_fh)
1181
+
1182
+ assert all(n1 == n2 for (n1, n2) in zip(H.nodes, J.nodes))
1183
+ assert all(e1 == e2 for (e1, e2) in zip(H.edges, J.edges))
1184
+
1185
+ def test_write_read_attribute_numeric_type_graphml(self):
1186
+ from xml.etree.ElementTree import parse
1187
+
1188
+ G = self.attribute_numeric_type_graph
1189
+ fh = io.BytesIO()
1190
+ self.writer(G, fh, infer_numeric_types=True)
1191
+ fh.seek(0)
1192
+ H = nx.read_graphml(fh)
1193
+ fh.seek(0)
1194
+
1195
+ assert nodes_equal(G.nodes(), H.nodes())
1196
+ assert edges_equal(G.edges(), H.edges())
1197
+ assert edges_equal(G.edges(data=True), H.edges(data=True))
1198
+ self.attribute_numeric_type_fh.seek(0)
1199
+
1200
+ xml = parse(fh)
1201
+ # Children are the key elements, and the graph element
1202
+ children = list(xml.getroot())
1203
+ assert len(children) == 3
1204
+
1205
+ keys = [child.items() for child in children[:2]]
1206
+
1207
+ assert len(keys) == 2
1208
+ assert ("attr.type", "double") in keys[0]
1209
+ assert ("attr.type", "double") in keys[1]
1210
+
1211
+ def test_more_multigraph_keys(self, tmp_path):
1212
+ """Writing keys as edge id attributes means keys become strings.
1213
+ The original keys are stored as data, so read them back in
1214
+ if `str(key) == edge_id`
1215
+ This allows the adjacency to remain the same.
1216
+ """
1217
+ G = nx.MultiGraph()
1218
+ G.add_edges_from([("a", "b", 2), ("a", "b", 3)])
1219
+ fname = tmp_path / "test.graphml"
1220
+ self.writer(G, fname)
1221
+ H = nx.read_graphml(fname)
1222
+ assert H.is_multigraph()
1223
+ assert edges_equal(G.edges(keys=True), H.edges(keys=True))
1224
+ assert G._adj == H._adj
1225
+
1226
+ def test_default_attribute(self):
1227
+ G = nx.Graph(name="Fred")
1228
+ G.add_node(1, label=1, color="green")
1229
+ nx.add_path(G, [0, 1, 2, 3])
1230
+ G.add_edge(1, 2, weight=3)
1231
+ G.graph["node_default"] = {"color": "yellow"}
1232
+ G.graph["edge_default"] = {"weight": 7}
1233
+ fh = io.BytesIO()
1234
+ self.writer(G, fh)
1235
+ fh.seek(0)
1236
+ H = nx.read_graphml(fh, node_type=int)
1237
+ assert nodes_equal(G.nodes(), H.nodes())
1238
+ assert edges_equal(G.edges(), H.edges())
1239
+ assert G.graph == H.graph
1240
+
1241
+ def test_mixed_type_attributes(self):
1242
+ G = nx.MultiGraph()
1243
+ G.add_node("n0", special=False)
1244
+ G.add_node("n1", special=0)
1245
+ G.add_edge("n0", "n1", special=False)
1246
+ G.add_edge("n0", "n1", special=0)
1247
+ fh = io.BytesIO()
1248
+ self.writer(G, fh)
1249
+ fh.seek(0)
1250
+ H = nx.read_graphml(fh)
1251
+ assert not H.nodes["n0"]["special"]
1252
+ assert H.nodes["n1"]["special"] == 0
1253
+ assert not H.edges["n0", "n1", 0]["special"]
1254
+ assert H.edges["n0", "n1", 1]["special"] == 0
1255
+
1256
+ def test_str_number_mixed_type_attributes(self):
1257
+ G = nx.MultiGraph()
1258
+ G.add_node("n0", special="hello")
1259
+ G.add_node("n1", special=0)
1260
+ G.add_edge("n0", "n1", special="hello")
1261
+ G.add_edge("n0", "n1", special=0)
1262
+ fh = io.BytesIO()
1263
+ self.writer(G, fh)
1264
+ fh.seek(0)
1265
+ H = nx.read_graphml(fh)
1266
+ assert H.nodes["n0"]["special"] == "hello"
1267
+ assert H.nodes["n1"]["special"] == 0
1268
+ assert H.edges["n0", "n1", 0]["special"] == "hello"
1269
+ assert H.edges["n0", "n1", 1]["special"] == 0
1270
+
1271
+ def test_mixed_int_type_number_attributes(self):
1272
+ np = pytest.importorskip("numpy")
1273
+ G = nx.MultiGraph()
1274
+ G.add_node("n0", special=np.int64(0))
1275
+ G.add_node("n1", special=1)
1276
+ G.add_edge("n0", "n1", special=np.int64(2))
1277
+ G.add_edge("n0", "n1", special=3)
1278
+ fh = io.BytesIO()
1279
+ self.writer(G, fh)
1280
+ fh.seek(0)
1281
+ H = nx.read_graphml(fh)
1282
+ assert H.nodes["n0"]["special"] == 0
1283
+ assert H.nodes["n1"]["special"] == 1
1284
+ assert H.edges["n0", "n1", 0]["special"] == 2
1285
+ assert H.edges["n0", "n1", 1]["special"] == 3
1286
+
1287
+ def test_multigraph_to_graph(self, tmp_path):
1288
+ # test converting multigraph to graph if no parallel edges found
1289
+ G = nx.MultiGraph()
1290
+ G.add_edges_from([("a", "b", 2), ("b", "c", 3)]) # no multiedges
1291
+ fname = tmp_path / "test.graphml"
1292
+ self.writer(G, fname)
1293
+ H = nx.read_graphml(fname)
1294
+ assert not H.is_multigraph()
1295
+ H = nx.read_graphml(fname, force_multigraph=True)
1296
+ assert H.is_multigraph()
1297
+
1298
+ # add a multiedge
1299
+ G.add_edge("a", "b", "e-id")
1300
+ fname = tmp_path / "test.graphml"
1301
+ self.writer(G, fname)
1302
+ H = nx.read_graphml(fname)
1303
+ assert H.is_multigraph()
1304
+ H = nx.read_graphml(fname, force_multigraph=True)
1305
+ assert H.is_multigraph()
1306
+
1307
+ def test_write_generate_edge_id_from_attribute(self, tmp_path):
1308
+ from xml.etree.ElementTree import parse
1309
+
1310
+ G = nx.Graph()
1311
+ G.add_edges_from([("a", "b"), ("b", "c"), ("a", "c")])
1312
+ edge_attributes = {e: str(e) for e in G.edges}
1313
+ nx.set_edge_attributes(G, edge_attributes, "eid")
1314
+ fname = tmp_path / "test.graphml"
1315
+ # set edge_id_from_attribute e.g. "eid" for write_graphml()
1316
+ self.writer(G, fname, edge_id_from_attribute="eid")
1317
+ # set edge_id_from_attribute e.g. "eid" for generate_graphml()
1318
+ generator = nx.generate_graphml(G, edge_id_from_attribute="eid")
1319
+
1320
+ H = nx.read_graphml(fname)
1321
+ assert nodes_equal(G.nodes(), H.nodes())
1322
+ assert edges_equal(G.edges(), H.edges())
1323
+ # NetworkX adds explicit edge "id" from file as attribute
1324
+ nx.set_edge_attributes(G, edge_attributes, "id")
1325
+ assert edges_equal(G.edges(data=True), H.edges(data=True))
1326
+
1327
+ tree = parse(fname)
1328
+ children = list(tree.getroot())
1329
+ assert len(children) == 2
1330
+ edge_ids = [
1331
+ edge.attrib["id"]
1332
+ for edge in tree.getroot().findall(
1333
+ ".//{http://graphml.graphdrawing.org/xmlns}edge"
1334
+ )
1335
+ ]
1336
+ # verify edge id value is equal to specified attribute value
1337
+ assert sorted(edge_ids) == sorted(edge_attributes.values())
1338
+
1339
+ # check graphml generated from generate_graphml()
1340
+ data = "".join(generator)
1341
+ J = nx.parse_graphml(data)
1342
+ assert sorted(G.nodes()) == sorted(J.nodes())
1343
+ assert sorted(G.edges()) == sorted(J.edges())
1344
+ # NetworkX adds explicit edge "id" from file as attribute
1345
+ nx.set_edge_attributes(G, edge_attributes, "id")
1346
+ assert edges_equal(G.edges(data=True), J.edges(data=True))
1347
+
1348
+ def test_multigraph_write_generate_edge_id_from_attribute(self, tmp_path):
1349
+ from xml.etree.ElementTree import parse
1350
+
1351
+ G = nx.MultiGraph()
1352
+ G.add_edges_from([("a", "b"), ("b", "c"), ("a", "c"), ("a", "b")])
1353
+ edge_attributes = {e: str(e) for e in G.edges}
1354
+ nx.set_edge_attributes(G, edge_attributes, "eid")
1355
+ fname = tmp_path / "test.graphml"
1356
+ # set edge_id_from_attribute e.g. "eid" for write_graphml()
1357
+ self.writer(G, fname, edge_id_from_attribute="eid")
1358
+ # set edge_id_from_attribute e.g. "eid" for generate_graphml()
1359
+ generator = nx.generate_graphml(G, edge_id_from_attribute="eid")
1360
+
1361
+ H = nx.read_graphml(fname)
1362
+ assert H.is_multigraph()
1363
+ H = nx.read_graphml(fname, force_multigraph=True)
1364
+ assert H.is_multigraph()
1365
+
1366
+ assert nodes_equal(G.nodes(), H.nodes())
1367
+ assert edges_equal(G.edges(), H.edges())
1368
+ assert sorted(data.get("eid") for u, v, data in H.edges(data=True)) == sorted(
1369
+ edge_attributes.values()
1370
+ )
1371
+ # NetworkX uses edge_ids as keys in multigraphs if no key
1372
+ assert sorted(key for u, v, key in H.edges(keys=True)) == sorted(
1373
+ edge_attributes.values()
1374
+ )
1375
+
1376
+ tree = parse(fname)
1377
+ children = list(tree.getroot())
1378
+ assert len(children) == 2
1379
+ edge_ids = [
1380
+ edge.attrib["id"]
1381
+ for edge in tree.getroot().findall(
1382
+ ".//{http://graphml.graphdrawing.org/xmlns}edge"
1383
+ )
1384
+ ]
1385
+ # verify edge id value is equal to specified attribute value
1386
+ assert sorted(edge_ids) == sorted(edge_attributes.values())
1387
+
1388
+ # check graphml generated from generate_graphml()
1389
+ graphml_data = "".join(generator)
1390
+ J = nx.parse_graphml(graphml_data)
1391
+ assert J.is_multigraph()
1392
+
1393
+ assert nodes_equal(G.nodes(), J.nodes())
1394
+ assert edges_equal(G.edges(), J.edges())
1395
+ assert sorted(data.get("eid") for u, v, data in J.edges(data=True)) == sorted(
1396
+ edge_attributes.values()
1397
+ )
1398
+ # NetworkX uses edge_ids as keys in multigraphs if no key
1399
+ assert sorted(key for u, v, key in J.edges(keys=True)) == sorted(
1400
+ edge_attributes.values()
1401
+ )
1402
+
1403
+ def test_numpy_float64(self, tmp_path):
1404
+ np = pytest.importorskip("numpy")
1405
+ wt = np.float64(3.4)
1406
+ G = nx.Graph([(1, 2, {"weight": wt})])
1407
+ fname = tmp_path / "test.graphml"
1408
+ self.writer(G, fname)
1409
+ H = nx.read_graphml(fname, node_type=int)
1410
+ assert G.edges == H.edges
1411
+ wtG = G[1][2]["weight"]
1412
+ wtH = H[1][2]["weight"]
1413
+ assert wtG == pytest.approx(wtH, abs=1e-6)
1414
+ assert type(wtG) == np.float64
1415
+ assert type(wtH) == float
1416
+
1417
+ def test_numpy_float32(self, tmp_path):
1418
+ np = pytest.importorskip("numpy")
1419
+ wt = np.float32(3.4)
1420
+ G = nx.Graph([(1, 2, {"weight": wt})])
1421
+ fname = tmp_path / "test.graphml"
1422
+ self.writer(G, fname)
1423
+ H = nx.read_graphml(fname, node_type=int)
1424
+ assert G.edges == H.edges
1425
+ wtG = G[1][2]["weight"]
1426
+ wtH = H[1][2]["weight"]
1427
+ assert wtG == pytest.approx(wtH, abs=1e-6)
1428
+ assert type(wtG) == np.float32
1429
+ assert type(wtH) == float
1430
+
1431
+ def test_numpy_float64_inference(self, tmp_path):
1432
+ np = pytest.importorskip("numpy")
1433
+ G = self.attribute_numeric_type_graph
1434
+ G.edges[("n1", "n1")]["weight"] = np.float64(1.1)
1435
+ fname = tmp_path / "test.graphml"
1436
+ self.writer(G, fname, infer_numeric_types=True)
1437
+ H = nx.read_graphml(fname)
1438
+ assert G._adj == H._adj
1439
+
1440
+ def test_unicode_attributes(self, tmp_path):
1441
+ G = nx.Graph()
1442
+ name1 = chr(2344) + chr(123) + chr(6543)
1443
+ name2 = chr(5543) + chr(1543) + chr(324)
1444
+ node_type = str
1445
+ G.add_edge(name1, "Radiohead", foo=name2)
1446
+ fname = tmp_path / "test.graphml"
1447
+ self.writer(G, fname)
1448
+ H = nx.read_graphml(fname, node_type=node_type)
1449
+ assert G._adj == H._adj
1450
+
1451
+ def test_unicode_escape(self):
1452
+ # test for handling json escaped strings in python 2 Issue #1880
1453
+ import json
1454
+
1455
+ a = {"a": '{"a": "123"}'} # an object with many chars to escape
1456
+ sa = json.dumps(a)
1457
+ G = nx.Graph()
1458
+ G.graph["test"] = sa
1459
+ fh = io.BytesIO()
1460
+ self.writer(G, fh)
1461
+ fh.seek(0)
1462
+ H = nx.read_graphml(fh)
1463
+ assert G.graph["test"] == H.graph["test"]
1464
+
1465
+
1466
+ class TestXMLGraphML(TestWriteGraphML):
1467
+ writer = staticmethod(nx.write_graphml_xml)
1468
+
1469
+ @classmethod
1470
+ def setup_class(cls):
1471
+ TestWriteGraphML.setup_class()
1472
+
1473
+
1474
+ def test_exception_for_unsupported_datatype_node_attr():
1475
+ """Test that a detailed exception is raised when an attribute is of a type
1476
+ not supported by GraphML, e.g. a list"""
1477
+ pytest.importorskip("lxml.etree")
1478
+ # node attribute
1479
+ G = nx.Graph()
1480
+ G.add_node(0, my_list_attribute=[0, 1, 2])
1481
+ fh = io.BytesIO()
1482
+ with pytest.raises(TypeError, match="GraphML does not support"):
1483
+ nx.write_graphml(G, fh)
1484
+
1485
+
1486
+ def test_exception_for_unsupported_datatype_edge_attr():
1487
+ """Test that a detailed exception is raised when an attribute is of a type
1488
+ not supported by GraphML, e.g. a list"""
1489
+ pytest.importorskip("lxml.etree")
1490
+ # edge attribute
1491
+ G = nx.Graph()
1492
+ G.add_edge(0, 1, my_list_attribute=[0, 1, 2])
1493
+ fh = io.BytesIO()
1494
+ with pytest.raises(TypeError, match="GraphML does not support"):
1495
+ nx.write_graphml(G, fh)
1496
+
1497
+
1498
+ def test_exception_for_unsupported_datatype_graph_attr():
1499
+ """Test that a detailed exception is raised when an attribute is of a type
1500
+ not supported by GraphML, e.g. a list"""
1501
+ pytest.importorskip("lxml.etree")
1502
+ # graph attribute
1503
+ G = nx.Graph()
1504
+ G.graph["my_list_attribute"] = [0, 1, 2]
1505
+ fh = io.BytesIO()
1506
+ with pytest.raises(TypeError, match="GraphML does not support"):
1507
+ nx.write_graphml(G, fh)
1508
+
1509
+
1510
+ def test_empty_attribute():
1511
+ """Tests that a GraphML string with an empty attribute can be parsed
1512
+ correctly."""
1513
+ s = """<?xml version='1.0' encoding='utf-8'?>
1514
+ <graphml>
1515
+ <key id="d1" for="node" attr.name="foo" attr.type="string"/>
1516
+ <key id="d2" for="node" attr.name="bar" attr.type="string"/>
1517
+ <graph>
1518
+ <node id="0">
1519
+ <data key="d1">aaa</data>
1520
+ <data key="d2">bbb</data>
1521
+ </node>
1522
+ <node id="1">
1523
+ <data key="d1">ccc</data>
1524
+ <data key="d2"></data>
1525
+ </node>
1526
+ </graph>
1527
+ </graphml>"""
1528
+ fh = io.BytesIO(s.encode("UTF-8"))
1529
+ G = nx.read_graphml(fh)
1530
+ assert G.nodes["0"] == {"foo": "aaa", "bar": "bbb"}
1531
+ assert G.nodes["1"] == {"foo": "ccc", "bar": ""}
mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/tests/test_leda.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+
3
+ import networkx as nx
4
+
5
+
6
+ class TestLEDA:
7
+ def test_parse_leda(self):
8
+ data = """#header section \nLEDA.GRAPH \nstring\nint\n-1\n#nodes section\n5 \n|{v1}| \n|{v2}| \n|{v3}| \n|{v4}| \n|{v5}| \n\n#edges section\n7 \n1 2 0 |{4}| \n1 3 0 |{3}| \n2 3 0 |{2}| \n3 4 0 |{3}| \n3 5 0 |{7}| \n4 5 0 |{6}| \n5 1 0 |{foo}|"""
9
+ G = nx.parse_leda(data)
10
+ G = nx.parse_leda(data.split("\n"))
11
+ assert sorted(G.nodes()) == ["v1", "v2", "v3", "v4", "v5"]
12
+ assert sorted(G.edges(data=True)) == [
13
+ ("v1", "v2", {"label": "4"}),
14
+ ("v1", "v3", {"label": "3"}),
15
+ ("v2", "v3", {"label": "2"}),
16
+ ("v3", "v4", {"label": "3"}),
17
+ ("v3", "v5", {"label": "7"}),
18
+ ("v4", "v5", {"label": "6"}),
19
+ ("v5", "v1", {"label": "foo"}),
20
+ ]
21
+
22
+ def test_read_LEDA(self):
23
+ fh = io.BytesIO()
24
+ data = """#header section \nLEDA.GRAPH \nstring\nint\n-1\n#nodes section\n5 \n|{v1}| \n|{v2}| \n|{v3}| \n|{v4}| \n|{v5}| \n\n#edges section\n7 \n1 2 0 |{4}| \n1 3 0 |{3}| \n2 3 0 |{2}| \n3 4 0 |{3}| \n3 5 0 |{7}| \n4 5 0 |{6}| \n5 1 0 |{foo}|"""
25
+ G = nx.parse_leda(data)
26
+ fh.write(data.encode("UTF-8"))
27
+ fh.seek(0)
28
+ Gin = nx.read_leda(fh)
29
+ assert sorted(G.nodes()) == sorted(Gin.nodes())
30
+ assert sorted(G.edges()) == sorted(Gin.edges())
mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/tests/test_p2g.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+
3
+ import networkx as nx
4
+ from networkx.readwrite.p2g import read_p2g, write_p2g
5
+ from networkx.utils import edges_equal
6
+
7
+
8
+ class TestP2G:
9
+ @classmethod
10
+ def setup_class(cls):
11
+ cls.G = nx.Graph(name="test")
12
+ e = [("a", "b"), ("b", "c"), ("c", "d"), ("d", "e"), ("e", "f"), ("a", "f")]
13
+ cls.G.add_edges_from(e)
14
+ cls.G.add_node("g")
15
+ cls.DG = nx.DiGraph(cls.G)
16
+
17
+ def test_read_p2g(self):
18
+ s = b"""\
19
+ name
20
+ 3 4
21
+ a
22
+ 1 2
23
+ b
24
+
25
+ c
26
+ 0 2
27
+ """
28
+ bytesIO = io.BytesIO(s)
29
+ G = read_p2g(bytesIO)
30
+ assert G.name == "name"
31
+ assert sorted(G) == ["a", "b", "c"]
32
+ edges = [(str(u), str(v)) for u, v in G.edges()]
33
+ assert edges_equal(G.edges(), [("a", "c"), ("a", "b"), ("c", "a"), ("c", "c")])
34
+
35
+ def test_write_p2g(self):
36
+ s = b"""foo
37
+ 3 2
38
+ 1
39
+ 1
40
+ 2
41
+ 2
42
+ 3
43
+
44
+ """
45
+ fh = io.BytesIO()
46
+ G = nx.DiGraph()
47
+ G.name = "foo"
48
+ G.add_edges_from([(1, 2), (2, 3)])
49
+ write_p2g(G, fh)
50
+ fh.seek(0)
51
+ r = fh.read()
52
+ assert r == s
53
+
54
+ def test_write_read_p2g(self):
55
+ fh = io.BytesIO()
56
+ G = nx.DiGraph()
57
+ G.name = "foo"
58
+ G.add_edges_from([("a", "b"), ("b", "c")])
59
+ write_p2g(G, fh)
60
+ fh.seek(0)
61
+ H = read_p2g(fh)
62
+ assert edges_equal(G.edges(), H.edges())
mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/tests/test_pajek.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Pajek tests
3
+ """
4
+
5
+ import networkx as nx
6
+ from networkx.utils import edges_equal, nodes_equal
7
+
8
+
9
+ class TestPajek:
10
+ @classmethod
11
+ def setup_class(cls):
12
+ cls.data = """*network Tralala\n*vertices 4\n 1 "A1" 0.0938 0.0896 ellipse x_fact 1 y_fact 1\n 2 "Bb" 0.8188 0.2458 ellipse x_fact 1 y_fact 1\n 3 "C" 0.3688 0.7792 ellipse x_fact 1\n 4 "D2" 0.9583 0.8563 ellipse x_fact 1\n*arcs\n1 1 1 h2 0 w 3 c Blue s 3 a1 -130 k1 0.6 a2 -130 k2 0.6 ap 0.5 l "Bezier loop" lc BlueViolet fos 20 lr 58 lp 0.3 la 360\n2 1 1 h2 0 a1 120 k1 1.3 a2 -120 k2 0.3 ap 25 l "Bezier arc" lphi 270 la 180 lr 19 lp 0.5\n1 2 1 h2 0 a1 40 k1 2.8 a2 30 k2 0.8 ap 25 l "Bezier arc" lphi 90 la 0 lp 0.65\n4 2 -1 h2 0 w 1 k1 -2 k2 250 ap 25 l "Circular arc" c Red lc OrangeRed\n3 4 1 p Dashed h2 0 w 2 c OliveGreen ap 25 l "Straight arc" lc PineGreen\n1 3 1 p Dashed h2 0 w 5 k1 -1 k2 -20 ap 25 l "Oval arc" c Brown lc Black\n3 3 -1 h1 6 w 1 h2 12 k1 -2 k2 -15 ap 0.5 l "Circular loop" c Red lc OrangeRed lphi 270 la 180"""
13
+ cls.G = nx.MultiDiGraph()
14
+ cls.G.add_nodes_from(["A1", "Bb", "C", "D2"])
15
+ cls.G.add_edges_from(
16
+ [
17
+ ("A1", "A1"),
18
+ ("A1", "Bb"),
19
+ ("A1", "C"),
20
+ ("Bb", "A1"),
21
+ ("C", "C"),
22
+ ("C", "D2"),
23
+ ("D2", "Bb"),
24
+ ]
25
+ )
26
+
27
+ cls.G.graph["name"] = "Tralala"
28
+
29
+ def test_parse_pajek_simple(self):
30
+ # Example without node positions or shape
31
+ data = """*Vertices 2\n1 "1"\n2 "2"\n*Edges\n1 2\n2 1"""
32
+ G = nx.parse_pajek(data)
33
+ assert sorted(G.nodes()) == ["1", "2"]
34
+ assert edges_equal(G.edges(), [("1", "2"), ("1", "2")])
35
+
36
+ def test_parse_pajek(self):
37
+ G = nx.parse_pajek(self.data)
38
+ assert sorted(G.nodes()) == ["A1", "Bb", "C", "D2"]
39
+ assert edges_equal(
40
+ G.edges(),
41
+ [
42
+ ("A1", "A1"),
43
+ ("A1", "Bb"),
44
+ ("A1", "C"),
45
+ ("Bb", "A1"),
46
+ ("C", "C"),
47
+ ("C", "D2"),
48
+ ("D2", "Bb"),
49
+ ],
50
+ )
51
+
52
+ def test_parse_pajet_mat(self):
53
+ data = """*Vertices 3\n1 "one"\n2 "two"\n3 "three"\n*Matrix\n1 1 0\n0 1 0\n0 1 0\n"""
54
+ G = nx.parse_pajek(data)
55
+ assert set(G.nodes()) == {"one", "two", "three"}
56
+ assert G.nodes["two"] == {"id": "2"}
57
+ assert edges_equal(
58
+ set(G.edges()),
59
+ {("one", "one"), ("two", "one"), ("two", "two"), ("two", "three")},
60
+ )
61
+
62
+ def test_read_pajek(self, tmp_path):
63
+ G = nx.parse_pajek(self.data)
64
+ # Read data from file
65
+ fname = tmp_path / "test.pjk"
66
+ with open(fname, "wb") as fh:
67
+ fh.write(self.data.encode("UTF-8"))
68
+
69
+ Gin = nx.read_pajek(fname)
70
+ assert sorted(G.nodes()) == sorted(Gin.nodes())
71
+ assert edges_equal(G.edges(), Gin.edges())
72
+ assert self.G.graph == Gin.graph
73
+ for n in G:
74
+ assert G.nodes[n] == Gin.nodes[n]
75
+
76
+ def test_write_pajek(self):
77
+ import io
78
+
79
+ G = nx.parse_pajek(self.data)
80
+ fh = io.BytesIO()
81
+ nx.write_pajek(G, fh)
82
+ fh.seek(0)
83
+ H = nx.read_pajek(fh)
84
+ assert nodes_equal(list(G), list(H))
85
+ assert edges_equal(list(G.edges()), list(H.edges()))
86
+ # Graph name is left out for now, therefore it is not tested.
87
+ # assert_equal(G.graph, H.graph)
88
+
89
+ def test_ignored_attribute(self):
90
+ import io
91
+
92
+ G = nx.Graph()
93
+ fh = io.BytesIO()
94
+ G.add_node(1, int_attr=1)
95
+ G.add_node(2, empty_attr=" ")
96
+ G.add_edge(1, 2, int_attr=2)
97
+ G.add_edge(2, 3, empty_attr=" ")
98
+
99
+ import warnings
100
+
101
+ with warnings.catch_warnings(record=True) as w:
102
+ nx.write_pajek(G, fh)
103
+ assert len(w) == 4
104
+
105
+ def test_noname(self):
106
+ # Make sure we can parse a line such as: *network
107
+ # Issue #952
108
+ line = "*network\n"
109
+ other_lines = self.data.split("\n")[1:]
110
+ data = line + "\n".join(other_lines)
111
+ G = nx.parse_pajek(data)
112
+
113
+ def test_unicode(self):
114
+ import io
115
+
116
+ G = nx.Graph()
117
+ name1 = chr(2344) + chr(123) + chr(6543)
118
+ name2 = chr(5543) + chr(1543) + chr(324)
119
+ G.add_edge(name1, "Radiohead", foo=name2)
120
+ fh = io.BytesIO()
121
+ nx.write_pajek(G, fh)
122
+ fh.seek(0)
123
+ H = nx.read_pajek(fh)
124
+ assert nodes_equal(list(G), list(H))
125
+ assert edges_equal(list(G.edges()), list(H.edges()))
126
+ assert G.graph == H.graph
mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/tests/test_sparse6.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import BytesIO
2
+
3
+ import pytest
4
+
5
+ import networkx as nx
6
+ from networkx.utils import edges_equal, nodes_equal
7
+
8
+
9
+ class TestSparseGraph6:
10
+ def test_from_sparse6_bytes(self):
11
+ data = b":Q___eDcdFcDeFcE`GaJ`IaHbKNbLM"
12
+ G = nx.from_sparse6_bytes(data)
13
+ assert nodes_equal(
14
+ sorted(G.nodes()),
15
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17],
16
+ )
17
+ assert edges_equal(
18
+ G.edges(),
19
+ [
20
+ (0, 1),
21
+ (0, 2),
22
+ (0, 3),
23
+ (1, 12),
24
+ (1, 14),
25
+ (2, 13),
26
+ (2, 15),
27
+ (3, 16),
28
+ (3, 17),
29
+ (4, 7),
30
+ (4, 9),
31
+ (4, 11),
32
+ (5, 6),
33
+ (5, 8),
34
+ (5, 9),
35
+ (6, 10),
36
+ (6, 11),
37
+ (7, 8),
38
+ (7, 10),
39
+ (8, 12),
40
+ (9, 15),
41
+ (10, 14),
42
+ (11, 13),
43
+ (12, 16),
44
+ (13, 17),
45
+ (14, 17),
46
+ (15, 16),
47
+ ],
48
+ )
49
+
50
+ def test_from_bytes_multigraph_graph(self):
51
+ graph_data = b":An"
52
+ G = nx.from_sparse6_bytes(graph_data)
53
+ assert type(G) == nx.Graph
54
+ multigraph_data = b":Ab"
55
+ M = nx.from_sparse6_bytes(multigraph_data)
56
+ assert type(M) == nx.MultiGraph
57
+
58
+ def test_read_sparse6(self):
59
+ data = b":Q___eDcdFcDeFcE`GaJ`IaHbKNbLM"
60
+ G = nx.from_sparse6_bytes(data)
61
+ fh = BytesIO(data)
62
+ Gin = nx.read_sparse6(fh)
63
+ assert nodes_equal(G.nodes(), Gin.nodes())
64
+ assert edges_equal(G.edges(), Gin.edges())
65
+
66
+ def test_read_many_graph6(self):
67
+ # Read many graphs into list
68
+ data = b":Q___eDcdFcDeFcE`GaJ`IaHbKNbLM\n" b":Q___dCfDEdcEgcbEGbFIaJ`JaHN`IM"
69
+ fh = BytesIO(data)
70
+ glist = nx.read_sparse6(fh)
71
+ assert len(glist) == 2
72
+ for G in glist:
73
+ assert nodes_equal(
74
+ G.nodes(),
75
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17],
76
+ )
77
+
78
+
79
+ class TestWriteSparse6:
80
+ """Unit tests for writing graphs in the sparse6 format.
81
+
82
+ Most of the test cases were checked against the sparse6 encoder in Sage.
83
+
84
+ """
85
+
86
+ def test_null_graph(self):
87
+ G = nx.null_graph()
88
+ result = BytesIO()
89
+ nx.write_sparse6(G, result)
90
+ assert result.getvalue() == b">>sparse6<<:?\n"
91
+
92
+ def test_trivial_graph(self):
93
+ G = nx.trivial_graph()
94
+ result = BytesIO()
95
+ nx.write_sparse6(G, result)
96
+ assert result.getvalue() == b">>sparse6<<:@\n"
97
+
98
+ def test_empty_graph(self):
99
+ G = nx.empty_graph(5)
100
+ result = BytesIO()
101
+ nx.write_sparse6(G, result)
102
+ assert result.getvalue() == b">>sparse6<<:D\n"
103
+
104
+ def test_large_empty_graph(self):
105
+ G = nx.empty_graph(68)
106
+ result = BytesIO()
107
+ nx.write_sparse6(G, result)
108
+ assert result.getvalue() == b">>sparse6<<:~?@C\n"
109
+
110
+ def test_very_large_empty_graph(self):
111
+ G = nx.empty_graph(258049)
112
+ result = BytesIO()
113
+ nx.write_sparse6(G, result)
114
+ assert result.getvalue() == b">>sparse6<<:~~???~?@\n"
115
+
116
+ def test_complete_graph(self):
117
+ G = nx.complete_graph(4)
118
+ result = BytesIO()
119
+ nx.write_sparse6(G, result)
120
+ assert result.getvalue() == b">>sparse6<<:CcKI\n"
121
+
122
+ def test_no_header(self):
123
+ G = nx.complete_graph(4)
124
+ result = BytesIO()
125
+ nx.write_sparse6(G, result, header=False)
126
+ assert result.getvalue() == b":CcKI\n"
127
+
128
+ def test_padding(self):
129
+ codes = (b":Cdv", b":DaYn", b":EaYnN", b":FaYnL", b":GaYnLz")
130
+ for n, code in enumerate(codes, start=4):
131
+ G = nx.path_graph(n)
132
+ result = BytesIO()
133
+ nx.write_sparse6(G, result, header=False)
134
+ assert result.getvalue() == code + b"\n"
135
+
136
+ def test_complete_bipartite(self):
137
+ G = nx.complete_bipartite_graph(6, 9)
138
+ result = BytesIO()
139
+ nx.write_sparse6(G, result)
140
+ # Compared with sage
141
+ expected = b">>sparse6<<:Nk" + b"?G`cJ" * 9 + b"\n"
142
+ assert result.getvalue() == expected
143
+
144
+ def test_read_write_inverse(self):
145
+ for i in list(range(13)) + [31, 47, 62, 63, 64, 72]:
146
+ m = min(2 * i, i * i // 2)
147
+ g = nx.random_graphs.gnm_random_graph(i, m, seed=i)
148
+ gstr = BytesIO()
149
+ nx.write_sparse6(g, gstr, header=False)
150
+ # Strip the trailing newline.
151
+ gstr = gstr.getvalue().rstrip()
152
+ g2 = nx.from_sparse6_bytes(gstr)
153
+ assert g2.order() == g.order()
154
+ assert edges_equal(g2.edges(), g.edges())
155
+
156
+ def test_no_directed_graphs(self):
157
+ with pytest.raises(nx.NetworkXNotImplemented):
158
+ nx.write_sparse6(nx.DiGraph(), BytesIO())
159
+
160
+ def test_write_path(self, tmp_path):
161
+ # Get a valid temporary file name
162
+ fullfilename = str(tmp_path / "test.s6")
163
+ # file should be closed now, so write_sparse6 can open it
164
+ nx.write_sparse6(nx.null_graph(), fullfilename)
165
+ with open(fullfilename, mode="rb") as fh:
166
+ assert fh.read() == b">>sparse6<<:?\n"
mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/tests/test_text.py ADDED
@@ -0,0 +1,1742 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ from itertools import product
3
+ from textwrap import dedent
4
+
5
+ import pytest
6
+
7
+ import networkx as nx
8
+
9
+
10
+ def test_generate_network_text_forest_directed():
11
+ # Create a directed forest with labels
12
+ graph = nx.balanced_tree(r=2, h=2, create_using=nx.DiGraph)
13
+ for node in graph.nodes:
14
+ graph.nodes[node]["label"] = "node_" + chr(ord("a") + node)
15
+
16
+ node_target = dedent(
17
+ """
18
+ ╙── 0
19
+ ├─╼ 1
20
+ │ ├─╼ 3
21
+ │ └─╼ 4
22
+ └─╼ 2
23
+ ├─╼ 5
24
+ └─╼ 6
25
+ """
26
+ ).strip()
27
+
28
+ label_target = dedent(
29
+ """
30
+ ╙── node_a
31
+ ├─╼ node_b
32
+ │ ├─╼ node_d
33
+ │ └─╼ node_e
34
+ └─╼ node_c
35
+ ├─╼ node_f
36
+ └─╼ node_g
37
+ """
38
+ ).strip()
39
+
40
+ # Basic node case
41
+ ret = nx.generate_network_text(graph, with_labels=False)
42
+ assert "\n".join(ret) == node_target
43
+
44
+ # Basic label case
45
+ ret = nx.generate_network_text(graph, with_labels=True)
46
+ assert "\n".join(ret) == label_target
47
+
48
+
49
+ def test_write_network_text_empty_graph():
50
+ def _graph_str(g, **kw):
51
+ printbuf = []
52
+ nx.write_network_text(g, printbuf.append, end="", **kw)
53
+ return "\n".join(printbuf)
54
+
55
+ assert _graph_str(nx.DiGraph()) == "╙"
56
+ assert _graph_str(nx.Graph()) == "╙"
57
+ assert _graph_str(nx.DiGraph(), ascii_only=True) == "+"
58
+ assert _graph_str(nx.Graph(), ascii_only=True) == "+"
59
+
60
+
61
+ def test_write_network_text_within_forest_glyph():
62
+ g = nx.DiGraph()
63
+ g.add_nodes_from([1, 2, 3, 4])
64
+ g.add_edge(2, 4)
65
+ lines = []
66
+ write = lines.append
67
+ nx.write_network_text(g, path=write, end="")
68
+ nx.write_network_text(g, path=write, ascii_only=True, end="")
69
+ text = "\n".join(lines)
70
+ target = dedent(
71
+ """
72
+ ╟── 1
73
+ ╟── 2
74
+ ╎ └─╼ 4
75
+ ╙── 3
76
+ +-- 1
77
+ +-- 2
78
+ : L-> 4
79
+ +-- 3
80
+ """
81
+ ).strip()
82
+ assert text == target
83
+
84
+
85
+ def test_generate_network_text_directed_multi_tree():
86
+ tree1 = nx.balanced_tree(r=2, h=2, create_using=nx.DiGraph)
87
+ tree2 = nx.balanced_tree(r=2, h=2, create_using=nx.DiGraph)
88
+ forest = nx.disjoint_union_all([tree1, tree2])
89
+ ret = "\n".join(nx.generate_network_text(forest))
90
+
91
+ target = dedent(
92
+ """
93
+ ╟── 0
94
+ ╎ ├─╼ 1
95
+ ╎ │ ├─╼ 3
96
+ ╎ │ └─╼ 4
97
+ ╎ └─╼ 2
98
+ ╎ ├─╼ 5
99
+ ╎ └─╼ 6
100
+ ╙── 7
101
+ ├─╼ 8
102
+ │ ├─╼ 10
103
+ │ └─╼ 11
104
+ └─╼ 9
105
+ ├─╼ 12
106
+ └─╼ 13
107
+ """
108
+ ).strip()
109
+ assert ret == target
110
+
111
+ tree3 = nx.balanced_tree(r=2, h=2, create_using=nx.DiGraph)
112
+ forest = nx.disjoint_union_all([tree1, tree2, tree3])
113
+ ret = "\n".join(nx.generate_network_text(forest, sources=[0, 14, 7]))
114
+
115
+ target = dedent(
116
+ """
117
+ ╟── 0
118
+ ╎ ├─╼ 1
119
+ ╎ │ ├─╼ 3
120
+ ╎ │ └─╼ 4
121
+ ╎ └─╼ 2
122
+ ╎ ├─╼ 5
123
+ ╎ └─╼ 6
124
+ ╟── 14
125
+ ╎ ├─╼ 15
126
+ ╎ │ ├─╼ 17
127
+ ╎ │ └─╼ 18
128
+ ╎ └─╼ 16
129
+ ╎ ├─╼ 19
130
+ ╎ └─╼ 20
131
+ ╙── 7
132
+ ├─╼ 8
133
+ │ ├─╼ 10
134
+ │ └─╼ 11
135
+ └─╼ 9
136
+ ├─╼ 12
137
+ └─╼ 13
138
+ """
139
+ ).strip()
140
+ assert ret == target
141
+
142
+ ret = "\n".join(
143
+ nx.generate_network_text(forest, sources=[0, 14, 7], ascii_only=True)
144
+ )
145
+
146
+ target = dedent(
147
+ """
148
+ +-- 0
149
+ : |-> 1
150
+ : | |-> 3
151
+ : | L-> 4
152
+ : L-> 2
153
+ : |-> 5
154
+ : L-> 6
155
+ +-- 14
156
+ : |-> 15
157
+ : | |-> 17
158
+ : | L-> 18
159
+ : L-> 16
160
+ : |-> 19
161
+ : L-> 20
162
+ +-- 7
163
+ |-> 8
164
+ | |-> 10
165
+ | L-> 11
166
+ L-> 9
167
+ |-> 12
168
+ L-> 13
169
+ """
170
+ ).strip()
171
+ assert ret == target
172
+
173
+
174
+ def test_generate_network_text_undirected_multi_tree():
175
+ tree1 = nx.balanced_tree(r=2, h=2, create_using=nx.Graph)
176
+ tree2 = nx.balanced_tree(r=2, h=2, create_using=nx.Graph)
177
+ tree2 = nx.relabel_nodes(tree2, {n: n + len(tree1) for n in tree2.nodes})
178
+ forest = nx.union(tree1, tree2)
179
+ ret = "\n".join(nx.generate_network_text(forest, sources=[0, 7]))
180
+
181
+ target = dedent(
182
+ """
183
+ ╟── 0
184
+ ╎ ├── 1
185
+ ╎ │ ├── 3
186
+ ╎ │ └── 4
187
+ ╎ └── 2
188
+ ╎ ├── 5
189
+ ╎ └── 6
190
+ ╙── 7
191
+ ├── 8
192
+ │ ├── 10
193
+ │ └── 11
194
+ └── 9
195
+ ├── 12
196
+ └── 13
197
+ """
198
+ ).strip()
199
+ assert ret == target
200
+
201
+ ret = "\n".join(nx.generate_network_text(forest, sources=[0, 7], ascii_only=True))
202
+
203
+ target = dedent(
204
+ """
205
+ +-- 0
206
+ : |-- 1
207
+ : | |-- 3
208
+ : | L-- 4
209
+ : L-- 2
210
+ : |-- 5
211
+ : L-- 6
212
+ +-- 7
213
+ |-- 8
214
+ | |-- 10
215
+ | L-- 11
216
+ L-- 9
217
+ |-- 12
218
+ L-- 13
219
+ """
220
+ ).strip()
221
+ assert ret == target
222
+
223
+
224
+ def test_generate_network_text_forest_undirected():
225
+ # Create a directed forest
226
+ graph = nx.balanced_tree(r=2, h=2, create_using=nx.Graph)
227
+
228
+ node_target0 = dedent(
229
+ """
230
+ ╙── 0
231
+ ├── 1
232
+ │ ├── 3
233
+ │ └── 4
234
+ └── 2
235
+ ├── 5
236
+ └── 6
237
+ """
238
+ ).strip()
239
+
240
+ # defined starting point
241
+ ret = "\n".join(nx.generate_network_text(graph, sources=[0]))
242
+ assert ret == node_target0
243
+
244
+ # defined starting point
245
+ node_target2 = dedent(
246
+ """
247
+ ╙── 2
248
+ ├── 0
249
+ │ └── 1
250
+ │ ├── 3
251
+ │ └── 4
252
+ ├── 5
253
+ └── 6
254
+ """
255
+ ).strip()
256
+ ret = "\n".join(nx.generate_network_text(graph, sources=[2]))
257
+ assert ret == node_target2
258
+
259
+
260
+ def test_generate_network_text_overspecified_sources():
261
+ """
262
+ When sources are directly specified, we won't be able to determine when we
263
+ are in the last component, so there will always be a trailing, leftmost
264
+ pipe.
265
+ """
266
+ graph = nx.disjoint_union_all(
267
+ [
268
+ nx.balanced_tree(r=2, h=1, create_using=nx.DiGraph),
269
+ nx.balanced_tree(r=1, h=2, create_using=nx.DiGraph),
270
+ nx.balanced_tree(r=2, h=1, create_using=nx.DiGraph),
271
+ ]
272
+ )
273
+
274
+ # defined starting point
275
+ target1 = dedent(
276
+ """
277
+ ╟── 0
278
+ ╎ ├─╼ 1
279
+ ╎ └─╼ 2
280
+ ╟── 3
281
+ ╎ └─╼ 4
282
+ ╎ └─╼ 5
283
+ ╟── 6
284
+ ╎ ├─╼ 7
285
+ ╎ └─╼ 8
286
+ """
287
+ ).strip()
288
+
289
+ target2 = dedent(
290
+ """
291
+ ╟── 0
292
+ ╎ ├─╼ 1
293
+ ╎ └─╼ 2
294
+ ╟── 3
295
+ ╎ └─╼ 4
296
+ ╎ └─╼ 5
297
+ ╙── 6
298
+ ├─╼ 7
299
+ └─╼ 8
300
+ """
301
+ ).strip()
302
+
303
+ got1 = "\n".join(nx.generate_network_text(graph, sources=graph.nodes))
304
+ got2 = "\n".join(nx.generate_network_text(graph))
305
+ assert got1 == target1
306
+ assert got2 == target2
307
+
308
+
309
+ def test_write_network_text_iterative_add_directed_edges():
310
+ """
311
+ Walk through the cases going from a disconnected to fully connected graph
312
+ """
313
+ graph = nx.DiGraph()
314
+ graph.add_nodes_from([1, 2, 3, 4])
315
+ lines = []
316
+ write = lines.append
317
+ write("--- initial state ---")
318
+ nx.write_network_text(graph, path=write, end="")
319
+ for i, j in product(graph.nodes, graph.nodes):
320
+ write(f"--- add_edge({i}, {j}) ---")
321
+ graph.add_edge(i, j)
322
+ nx.write_network_text(graph, path=write, end="")
323
+ text = "\n".join(lines)
324
+ # defined starting point
325
+ target = dedent(
326
+ """
327
+ --- initial state ---
328
+ ╟── 1
329
+ ╟── 2
330
+ ╟── 3
331
+ ╙── 4
332
+ --- add_edge(1, 1) ---
333
+ ╟── 1 ╾ 1
334
+ ╎ └─╼ ...
335
+ ╟── 2
336
+ ╟── 3
337
+ ╙── 4
338
+ --- add_edge(1, 2) ---
339
+ ╟── 1 ╾ 1
340
+ ╎ ├─╼ 2
341
+ ╎ └─╼ ...
342
+ ╟── 3
343
+ ╙── 4
344
+ --- add_edge(1, 3) ---
345
+ ╟── 1 ╾ 1
346
+ ╎ ├─╼ 2
347
+ ╎ ├─╼ 3
348
+ ╎ └─╼ ...
349
+ ╙── 4
350
+ --- add_edge(1, 4) ---
351
+ ╙── 1 ╾ 1
352
+ ├─╼ 2
353
+ ├─╼ 3
354
+ ├─╼ 4
355
+ └─╼ ...
356
+ --- add_edge(2, 1) ---
357
+ ╙── 2 ╾ 1
358
+ └─╼ 1 ╾ 1
359
+ ├─╼ 3
360
+ ├─╼ 4
361
+ └─╼ ...
362
+ --- add_edge(2, 2) ---
363
+ ╙── 1 ╾ 1, 2
364
+ ├─╼ 2 ╾ 2
365
+ │ └─╼ ...
366
+ ├─╼ 3
367
+ ├─╼ 4
368
+ └─╼ ...
369
+ --- add_edge(2, 3) ---
370
+ ╙── 1 ╾ 1, 2
371
+ ├─╼ 2 ╾ 2
372
+ │ ├─╼ 3 ╾ 1
373
+ │ └─╼ ...
374
+ ├─╼ 4
375
+ └─╼ ...
376
+ --- add_edge(2, 4) ---
377
+ ╙── 1 ╾ 1, 2
378
+ ├─╼ 2 ╾ 2
379
+ │ ├─╼ 3 ╾ 1
380
+ │ ├─╼ 4 ╾ 1
381
+ │ └─╼ ...
382
+ └─╼ ...
383
+ --- add_edge(3, 1) ---
384
+ ╙── 2 ╾ 1, 2
385
+ ├─╼ 1 ╾ 1, 3
386
+ │ ├─╼ 3 ╾ 2
387
+ │ │ └─╼ ...
388
+ │ ├─╼ 4 ╾ 2
389
+ │ └─╼ ...
390
+ └─╼ ...
391
+ --- add_edge(3, 2) ---
392
+ ╙── 3 ╾ 1, 2
393
+ ├─╼ 1 ╾ 1, 2
394
+ │ ├─╼ 2 ╾ 2, 3
395
+ │ │ ├─╼ 4 ╾ 1
396
+ │ │ └─╼ ...
397
+ │ └─╼ ...
398
+ └─╼ ...
399
+ --- add_edge(3, 3) ---
400
+ ╙── 1 ╾ 1, 2, 3
401
+ ├─╼ 2 ╾ 2, 3
402
+ │ ├─╼ 3 ╾ 1, 3
403
+ │ │ └─╼ ...
404
+ │ ├─╼ 4 ╾ 1
405
+ │ └─╼ ...
406
+ └─╼ ...
407
+ --- add_edge(3, 4) ---
408
+ ╙── 1 ╾ 1, 2, 3
409
+ ├─╼ 2 ╾ 2, 3
410
+ │ ├─╼ 3 ╾ 1, 3
411
+ │ │ ├─╼ 4 ╾ 1, 2
412
+ │ │ └─╼ ...
413
+ │ └─╼ ...
414
+ └─╼ ...
415
+ --- add_edge(4, 1) ---
416
+ ╙── 2 ╾ 1, 2, 3
417
+ ├─╼ 1 ╾ 1, 3, 4
418
+ │ ├─╼ 3 ╾ 2, 3
419
+ │ │ ├─╼ 4 ╾ 1, 2
420
+ │ │ │ └─╼ ...
421
+ │ │ └─╼ ...
422
+ │ └─╼ ...
423
+ └─╼ ...
424
+ --- add_edge(4, 2) ---
425
+ ╙── 3 ╾ 1, 2, 3
426
+ ├─╼ 1 ╾ 1, 2, 4
427
+ │ ├─╼ 2 ╾ 2, 3, 4
428
+ │ │ ├─╼ 4 ╾ 1, 3
429
+ │ │ │ └─╼ ...
430
+ │ │ └─╼ ...
431
+ │ └─╼ ...
432
+ └─╼ ...
433
+ --- add_edge(4, 3) ---
434
+ ╙── 4 ╾ 1, 2, 3
435
+ ├─╼ 1 ╾ 1, 2, 3
436
+ │ ├─╼ 2 ╾ 2, 3, 4
437
+ │ │ ├─╼ 3 ╾ 1, 3, 4
438
+ │ │ │ └─╼ ...
439
+ │ │ └─╼ ...
440
+ │ └─╼ ...
441
+ └─╼ ...
442
+ --- add_edge(4, 4) ---
443
+ ╙── 1 ╾ 1, 2, 3, 4
444
+ ├─╼ 2 ╾ 2, 3, 4
445
+ │ ├─╼ 3 ╾ 1, 3, 4
446
+ │ │ ├─╼ 4 ╾ 1, 2, 4
447
+ │ │ │ └─╼ ...
448
+ │ │ └─╼ ...
449
+ │ └─╼ ...
450
+ └─╼ ...
451
+ """
452
+ ).strip()
453
+ assert target == text
454
+
455
+
456
+ def test_write_network_text_iterative_add_undirected_edges():
457
+ """
458
+ Walk through the cases going from a disconnected to fully connected graph
459
+ """
460
+ graph = nx.Graph()
461
+ graph.add_nodes_from([1, 2, 3, 4])
462
+ lines = []
463
+ write = lines.append
464
+ write("--- initial state ---")
465
+ nx.write_network_text(graph, path=write, end="")
466
+ for i, j in product(graph.nodes, graph.nodes):
467
+ if i == j:
468
+ continue
469
+ write(f"--- add_edge({i}, {j}) ---")
470
+ graph.add_edge(i, j)
471
+ nx.write_network_text(graph, path=write, end="")
472
+ text = "\n".join(lines)
473
+ target = dedent(
474
+ """
475
+ --- initial state ---
476
+ ╟── 1
477
+ ╟── 2
478
+ ╟── 3
479
+ ╙── 4
480
+ --- add_edge(1, 2) ---
481
+ ╟── 3
482
+ ╟── 4
483
+ ╙── 1
484
+ └── 2
485
+ --- add_edge(1, 3) ---
486
+ ╟── 4
487
+ ╙── 2
488
+ └── 1
489
+ └── 3
490
+ --- add_edge(1, 4) ---
491
+ ╙── 2
492
+ └── 1
493
+ ├── 3
494
+ └── 4
495
+ --- add_edge(2, 1) ---
496
+ ╙── 2
497
+ └── 1
498
+ ├── 3
499
+ └── 4
500
+ --- add_edge(2, 3) ---
501
+ ╙── 4
502
+ └── 1
503
+ ├── 2
504
+ │ └── 3 ─ 1
505
+ └── ...
506
+ --- add_edge(2, 4) ---
507
+ ╙── 3
508
+ ├── 1
509
+ │ ├── 2 ─ 3
510
+ │ │ └── 4 ─ 1
511
+ │ └── ...
512
+ └── ...
513
+ --- add_edge(3, 1) ---
514
+ ╙── 3
515
+ ├── 1
516
+ │ ├── 2 ─ 3
517
+ │ │ └── 4 ─ 1
518
+ │ └── ...
519
+ └── ...
520
+ --- add_edge(3, 2) ---
521
+ ╙── 3
522
+ ├── 1
523
+ │ ├── 2 ─ 3
524
+ │ │ └── 4 ─ 1
525
+ │ └── ...
526
+ └── ...
527
+ --- add_edge(3, 4) ---
528
+ ╙── 1
529
+ ├── 2
530
+ │ ├── 3 ─ 1
531
+ │ │ └── 4 ─ 1, 2
532
+ │ └── ...
533
+ └── ...
534
+ --- add_edge(4, 1) ---
535
+ ╙── 1
536
+ ├── 2
537
+ │ ├── 3 ─ 1
538
+ │ │ └── 4 ─ 1, 2
539
+ │ └── ...
540
+ └── ...
541
+ --- add_edge(4, 2) ---
542
+ ╙── 1
543
+ ├── 2
544
+ │ ├── 3 ─ 1
545
+ │ │ └── 4 ─ 1, 2
546
+ │ └── ...
547
+ └── ...
548
+ --- add_edge(4, 3) ---
549
+ ╙── 1
550
+ ├── 2
551
+ │ ├── 3 ─ 1
552
+ │ │ └── 4 ─ 1, 2
553
+ │ └── ...
554
+ └── ...
555
+ """
556
+ ).strip()
557
+ assert target == text
558
+
559
+
560
+ def test_write_network_text_iterative_add_random_directed_edges():
561
+ """
562
+ Walk through the cases going from a disconnected to fully connected graph
563
+ """
564
+
565
+ rng = random.Random(724466096)
566
+ graph = nx.DiGraph()
567
+ graph.add_nodes_from([1, 2, 3, 4, 5])
568
+ possible_edges = list(product(graph.nodes, graph.nodes))
569
+ rng.shuffle(possible_edges)
570
+ graph.add_edges_from(possible_edges[0:8])
571
+ lines = []
572
+ write = lines.append
573
+ write("--- initial state ---")
574
+ nx.write_network_text(graph, path=write, end="")
575
+ for i, j in possible_edges[8:12]:
576
+ write(f"--- add_edge({i}, {j}) ---")
577
+ graph.add_edge(i, j)
578
+ nx.write_network_text(graph, path=write, end="")
579
+ text = "\n".join(lines)
580
+ target = dedent(
581
+ """
582
+ --- initial state ---
583
+ ╙── 3 ╾ 5
584
+ └─╼ 2 ╾ 2
585
+ ├─╼ 4 ╾ 4
586
+ │ ├─╼ 5
587
+ │ │ ├─╼ 1 ╾ 1
588
+ │ │ │ └─╼ ...
589
+ │ │ └─╼ ...
590
+ │ └─╼ ...
591
+ └─╼ ...
592
+ --- add_edge(4, 1) ---
593
+ ╙── 3 ╾ 5
594
+ └─╼ 2 ╾ 2
595
+ ├─╼ 4 ╾ 4
596
+ │ ├─╼ 5
597
+ │ │ ├─╼ 1 ╾ 1, 4
598
+ │ │ │ └─╼ ...
599
+ │ │ └─╼ ...
600
+ │ └─╼ ...
601
+ └─╼ ...
602
+ --- add_edge(2, 1) ---
603
+ ╙── 3 ╾ 5
604
+ └─╼ 2 ╾ 2
605
+ ├─╼ 4 ╾ 4
606
+ │ ├─╼ 5
607
+ │ │ ├─╼ 1 ╾ 1, 4, 2
608
+ │ │ │ └─╼ ...
609
+ │ │ └─╼ ...
610
+ │ └─╼ ...
611
+ └─╼ ...
612
+ --- add_edge(5, 2) ---
613
+ ╙── 3 ╾ 5
614
+ └─╼ 2 ╾ 2, 5
615
+ ├─╼ 4 ╾ 4
616
+ │ ├─╼ 5
617
+ │ │ ├─╼ 1 ╾ 1, 4, 2
618
+ │ │ │ └─╼ ...
619
+ │ │ └─╼ ...
620
+ │ └─╼ ...
621
+ └─╼ ...
622
+ --- add_edge(1, 5) ---
623
+ ╙── 3 ╾ 5
624
+ └─╼ 2 ╾ 2, 5
625
+ ├─╼ 4 ╾ 4
626
+ │ ├─╼ 5 ╾ 1
627
+ │ │ ├─╼ 1 ╾ 1, 4, 2
628
+ │ │ │ └─╼ ...
629
+ │ │ └─╼ ...
630
+ │ └─╼ ...
631
+ └─╼ ...
632
+
633
+ """
634
+ ).strip()
635
+ assert target == text
636
+
637
+
638
+ def test_write_network_text_nearly_forest():
639
+ g = nx.DiGraph()
640
+ g.add_edge(1, 2)
641
+ g.add_edge(1, 5)
642
+ g.add_edge(2, 3)
643
+ g.add_edge(3, 4)
644
+ g.add_edge(5, 6)
645
+ g.add_edge(6, 7)
646
+ g.add_edge(6, 8)
647
+ orig = g.copy()
648
+ g.add_edge(1, 8) # forward edge
649
+ g.add_edge(4, 2) # back edge
650
+ g.add_edge(6, 3) # cross edge
651
+ lines = []
652
+ write = lines.append
653
+ write("--- directed case ---")
654
+ nx.write_network_text(orig, path=write, end="")
655
+ write("--- add (1, 8), (4, 2), (6, 3) ---")
656
+ nx.write_network_text(g, path=write, end="")
657
+ write("--- undirected case ---")
658
+ nx.write_network_text(orig.to_undirected(), path=write, sources=[1], end="")
659
+ write("--- add (1, 8), (4, 2), (6, 3) ---")
660
+ nx.write_network_text(g.to_undirected(), path=write, sources=[1], end="")
661
+ text = "\n".join(lines)
662
+ target = dedent(
663
+ """
664
+ --- directed case ---
665
+ ╙── 1
666
+ ├─╼ 2
667
+ │ └─╼ 3
668
+ │ └─╼ 4
669
+ └─╼ 5
670
+ └─╼ 6
671
+ ├─╼ 7
672
+ └─╼ 8
673
+ --- add (1, 8), (4, 2), (6, 3) ---
674
+ ╙── 1
675
+ ├─╼ 2 ╾ 4
676
+ │ └─╼ 3 ╾ 6
677
+ │ └─╼ 4
678
+ │ └─╼ ...
679
+ ├─╼ 5
680
+ │ └─╼ 6
681
+ │ ├─╼ 7
682
+ │ ├─╼ 8 ╾ 1
683
+ │ └─╼ ...
684
+ └─╼ ...
685
+ --- undirected case ---
686
+ ╙── 1
687
+ ├── 2
688
+ │ └── 3
689
+ │ └── 4
690
+ └── 5
691
+ └── 6
692
+ ├���─ 7
693
+ └── 8
694
+ --- add (1, 8), (4, 2), (6, 3) ---
695
+ ╙── 1
696
+ ├── 2
697
+ │ ├── 3
698
+ │ │ ├── 4 ─ 2
699
+ │ │ └── 6
700
+ │ │ ├── 5 ─ 1
701
+ │ │ ├── 7
702
+ │ │ └── 8 ─ 1
703
+ │ └── ...
704
+ └── ...
705
+ """
706
+ ).strip()
707
+ assert target == text
708
+
709
+
710
+ def test_write_network_text_complete_graph_ascii_only():
711
+ graph = nx.generators.complete_graph(5, create_using=nx.DiGraph)
712
+ lines = []
713
+ write = lines.append
714
+ write("--- directed case ---")
715
+ nx.write_network_text(graph, path=write, ascii_only=True, end="")
716
+ write("--- undirected case ---")
717
+ nx.write_network_text(graph.to_undirected(), path=write, ascii_only=True, end="")
718
+ text = "\n".join(lines)
719
+ target = dedent(
720
+ """
721
+ --- directed case ---
722
+ +-- 0 <- 1, 2, 3, 4
723
+ |-> 1 <- 2, 3, 4
724
+ | |-> 2 <- 0, 3, 4
725
+ | | |-> 3 <- 0, 1, 4
726
+ | | | |-> 4 <- 0, 1, 2
727
+ | | | | L-> ...
728
+ | | | L-> ...
729
+ | | L-> ...
730
+ | L-> ...
731
+ L-> ...
732
+ --- undirected case ---
733
+ +-- 0
734
+ |-- 1
735
+ | |-- 2 - 0
736
+ | | |-- 3 - 0, 1
737
+ | | | L-- 4 - 0, 1, 2
738
+ | | L-- ...
739
+ | L-- ...
740
+ L-- ...
741
+ """
742
+ ).strip()
743
+ assert target == text
744
+
745
+
746
+ def test_write_network_text_with_labels():
747
+ graph = nx.generators.complete_graph(5, create_using=nx.DiGraph)
748
+ for n in graph.nodes:
749
+ graph.nodes[n]["label"] = f"Node(n={n})"
750
+ lines = []
751
+ write = lines.append
752
+ nx.write_network_text(graph, path=write, with_labels=True, ascii_only=False, end="")
753
+ text = "\n".join(lines)
754
+ # Non trees with labels can get somewhat out of hand with network text
755
+ # because we need to immediately show every non-tree edge to the right
756
+ target = dedent(
757
+ """
758
+ ╙── Node(n=0) ╾ Node(n=1), Node(n=2), Node(n=3), Node(n=4)
759
+ ├─╼ Node(n=1) ╾ Node(n=2), Node(n=3), Node(n=4)
760
+ │ ├─╼ Node(n=2) ╾ Node(n=0), Node(n=3), Node(n=4)
761
+ │ │ ├─╼ Node(n=3) ╾ Node(n=0), Node(n=1), Node(n=4)
762
+ │ │ │ ├─╼ Node(n=4) ╾ Node(n=0), Node(n=1), Node(n=2)
763
+ │ │ │ │ └─╼ ...
764
+ │ │ │ └─╼ ...
765
+ │ │ └─╼ ...
766
+ │ └─╼ ...
767
+ └─╼ ...
768
+ """
769
+ ).strip()
770
+ assert target == text
771
+
772
+
773
+ def test_write_network_text_complete_graphs():
774
+ lines = []
775
+ write = lines.append
776
+ for k in [0, 1, 2, 3, 4, 5]:
777
+ g = nx.generators.complete_graph(k)
778
+ write(f"--- undirected k={k} ---")
779
+ nx.write_network_text(g, path=write, end="")
780
+
781
+ for k in [0, 1, 2, 3, 4, 5]:
782
+ g = nx.generators.complete_graph(k, nx.DiGraph)
783
+ write(f"--- directed k={k} ---")
784
+ nx.write_network_text(g, path=write, end="")
785
+ text = "\n".join(lines)
786
+ target = dedent(
787
+ """
788
+ --- undirected k=0 ---
789
+
790
+ --- undirected k=1 ---
791
+ ╙── 0
792
+ --- undirected k=2 ---
793
+ ╙── 0
794
+ └── 1
795
+ --- undirected k=3 ---
796
+ ╙── 0
797
+ ├── 1
798
+ │ └── 2 ─ 0
799
+ └── ...
800
+ --- undirected k=4 ---
801
+ ╙── 0
802
+ ├── 1
803
+ │ ├── 2 ─ 0
804
+ │ │ └── 3 ─ 0, 1
805
+ │ └── ...
806
+ └── ...
807
+ --- undirected k=5 ---
808
+ ╙── 0
809
+ ├── 1
810
+ │ ├── 2 ─ 0
811
+ │ │ ├── 3 ─ 0, 1
812
+ │ │ │ └── 4 ─ 0, 1, 2
813
+ │ │ └── ...
814
+ │ └── ...
815
+ └── ...
816
+ --- directed k=0 ---
817
+
818
+ --- directed k=1 ---
819
+ ╙── 0
820
+ --- directed k=2 ---
821
+ ╙── 0 ╾ 1
822
+ └─╼ 1
823
+ └─╼ ...
824
+ --- directed k=3 ---
825
+ ╙── 0 ╾ 1, 2
826
+ ├─╼ 1 ╾ 2
827
+ │ ├─╼ 2 ╾ 0
828
+ │ │ └─╼ ...
829
+ │ └─╼ ...
830
+ └─╼ ...
831
+ --- directed k=4 ---
832
+ ╙── 0 ╾ 1, 2, 3
833
+ ├─╼ 1 ╾ 2, 3
834
+ │ ├─╼ 2 ╾ 0, 3
835
+ │ │ ├─╼ 3 ╾ 0, 1
836
+ │ │ │ └─╼ ...
837
+ │ │ └─╼ ...
838
+ │ └─╼ ...
839
+ └─╼ ...
840
+ --- directed k=5 ---
841
+ ╙── 0 ╾ 1, 2, 3, 4
842
+ ├─╼ 1 ╾ 2, 3, 4
843
+ │ ├─╼ 2 ╾ 0, 3, 4
844
+ �� │ ├─╼ 3 ╾ 0, 1, 4
845
+ │ │ │ ├─╼ 4 ╾ 0, 1, 2
846
+ │ │ │ │ └─╼ ...
847
+ │ │ │ └─╼ ...
848
+ │ │ └─╼ ...
849
+ │ └─╼ ...
850
+ └─╼ ...
851
+ """
852
+ ).strip()
853
+ assert target == text
854
+
855
+
856
+ def test_write_network_text_multiple_sources():
857
+ g = nx.DiGraph()
858
+ g.add_edge(1, 2)
859
+ g.add_edge(1, 3)
860
+ g.add_edge(2, 4)
861
+ g.add_edge(3, 5)
862
+ g.add_edge(3, 6)
863
+ g.add_edge(5, 4)
864
+ g.add_edge(4, 1)
865
+ g.add_edge(1, 5)
866
+ lines = []
867
+ write = lines.append
868
+ # Use each node as the starting point to demonstrate how the representation
869
+ # changes.
870
+ nodes = sorted(g.nodes())
871
+ for n in nodes:
872
+ write(f"--- source node: {n} ---")
873
+ nx.write_network_text(g, path=write, sources=[n], end="")
874
+ text = "\n".join(lines)
875
+ target = dedent(
876
+ """
877
+ --- source node: 1 ---
878
+ ╙── 1 ╾ 4
879
+ ├─╼ 2
880
+ │ └─╼ 4 ╾ 5
881
+ │ └─╼ ...
882
+ ├─╼ 3
883
+ │ ├─╼ 5 ╾ 1
884
+ │ │ └─╼ ...
885
+ │ └─╼ 6
886
+ └─╼ ...
887
+ --- source node: 2 ---
888
+ ╙── 2 ╾ 1
889
+ └─╼ 4 ╾ 5
890
+ └─╼ 1
891
+ ├─╼ 3
892
+ │ ├─╼ 5 ╾ 1
893
+ │ │ └─╼ ...
894
+ │ └─╼ 6
895
+ └─╼ ...
896
+ --- source node: 3 ---
897
+ ╙── 3 ╾ 1
898
+ ├─╼ 5 ╾ 1
899
+ │ └─╼ 4 ╾ 2
900
+ │ └─╼ 1
901
+ │ ├─╼ 2
902
+ │ │ └─╼ ...
903
+ │ └─╼ ...
904
+ └─╼ 6
905
+ --- source node: 4 ---
906
+ ╙── 4 ╾ 2, 5
907
+ └─╼ 1
908
+ ├─╼ 2
909
+ │ └─╼ ...
910
+ ├─╼ 3
911
+ │ ├─╼ 5 ╾ 1
912
+ │ │ └─╼ ...
913
+ │ └─╼ 6
914
+ └─╼ ...
915
+ --- source node: 5 ---
916
+ ╙── 5 ╾ 3, 1
917
+ └─╼ 4 ╾ 2
918
+ └─╼ 1
919
+ ├─╼ 2
920
+ │ └─╼ ...
921
+ ├─╼ 3
922
+ │ ├─╼ 6
923
+ │ └─╼ ...
924
+ └─╼ ...
925
+ --- source node: 6 ---
926
+ ╙── 6 ╾ 3
927
+ """
928
+ ).strip()
929
+ assert target == text
930
+
931
+
932
+ def test_write_network_text_star_graph():
933
+ graph = nx.star_graph(5, create_using=nx.Graph)
934
+ lines = []
935
+ write = lines.append
936
+ nx.write_network_text(graph, path=write, end="")
937
+ text = "\n".join(lines)
938
+ target = dedent(
939
+ """
940
+ ╙── 1
941
+ └── 0
942
+ ├── 2
943
+ ├── 3
944
+ ├── 4
945
+ └── 5
946
+ """
947
+ ).strip()
948
+ assert target == text
949
+
950
+
951
+ def test_write_network_text_path_graph():
952
+ graph = nx.path_graph(3, create_using=nx.Graph)
953
+ lines = []
954
+ write = lines.append
955
+ nx.write_network_text(graph, path=write, end="")
956
+ text = "\n".join(lines)
957
+ target = dedent(
958
+ """
959
+ ╙── 0
960
+ └── 1
961
+ └── 2
962
+ """
963
+ ).strip()
964
+ assert target == text
965
+
966
+
967
+ def test_write_network_text_lollipop_graph():
968
+ graph = nx.lollipop_graph(4, 2, create_using=nx.Graph)
969
+ lines = []
970
+ write = lines.append
971
+ nx.write_network_text(graph, path=write, end="")
972
+ text = "\n".join(lines)
973
+ target = dedent(
974
+ """
975
+ ╙── 5
976
+ └── 4
977
+ └── 3
978
+ ├── 0
979
+ │ ├── 1 ─ 3
980
+ │ │ └── 2 ─ 0, 3
981
+ │ └── ...
982
+ └── ...
983
+ """
984
+ ).strip()
985
+ assert target == text
986
+
987
+
988
+ def test_write_network_text_wheel_graph():
989
+ graph = nx.wheel_graph(7, create_using=nx.Graph)
990
+ lines = []
991
+ write = lines.append
992
+ nx.write_network_text(graph, path=write, end="")
993
+ text = "\n".join(lines)
994
+ target = dedent(
995
+ """
996
+ ╙── 1
997
+ ├── 0
998
+ │ ├── 2 ─ 1
999
+ │ │ └── 3 ─ 0
1000
+ │ │ └── 4 ─ 0
1001
+ │ │ └── 5 ─ 0
1002
+ │ │ └── 6 ─ 0, 1
1003
+ │ └── ...
1004
+ └── ...
1005
+ """
1006
+ ).strip()
1007
+ assert target == text
1008
+
1009
+
1010
+ def test_write_network_text_circular_ladder_graph():
1011
+ graph = nx.circular_ladder_graph(4, create_using=nx.Graph)
1012
+ lines = []
1013
+ write = lines.append
1014
+ nx.write_network_text(graph, path=write, end="")
1015
+ text = "\n".join(lines)
1016
+ target = dedent(
1017
+ """
1018
+ ╙── 0
1019
+ ├── 1
1020
+ │ ├── 2
1021
+ │ │ ├── 3 ─ 0
1022
+ │ │ │ └── 7
1023
+ │ │ │ ├── 6 ─ 2
1024
+ │ │ │ │ └── 5 ─ 1
1025
+ │ │ │ │ └── 4 ─ 0, 7
1026
+ │ │ │ └── ...
1027
+ │ │ └── ...
1028
+ │ └── ...
1029
+ └── ...
1030
+ """
1031
+ ).strip()
1032
+ assert target == text
1033
+
1034
+
1035
+ def test_write_network_text_dorogovtsev_goltsev_mendes_graph():
1036
+ graph = nx.dorogovtsev_goltsev_mendes_graph(4, create_using=nx.Graph)
1037
+ lines = []
1038
+ write = lines.append
1039
+ nx.write_network_text(graph, path=write, end="")
1040
+ text = "\n".join(lines)
1041
+ target = dedent(
1042
+ """
1043
+ ╙── 15
1044
+ ├── 0
1045
+ │ ├── 1 ─ 15
1046
+ │ │ ├── 2 ─ 0
1047
+ │ │ │ ├── 4 ─ 0
1048
+ │ │ │ │ ├── 9 ─ 0
1049
+ │ │ │ │ │ ├── 22 ─ 0
1050
+ │ │ │ │ │ └── 38 ─ 4
1051
+ │ │ │ │ ├── 13 ─ 2
1052
+ │ │ │ │ │ ├── 34 ─ 2
1053
+ │ │ │ │ │ └── 39 ─ 4
1054
+ │ │ │ │ ├── 18 ─ 0
1055
+ │ │ │ │ ├── 30 ─ 2
1056
+ │ │ │ │ └── ...
1057
+ │ │ │ ├── 5 ─ 1
1058
+ │ │ │ │ ├── 12 ─ 1
1059
+ │ │ │ │ │ ├── 29 ─ 1
1060
+ │ │ │ │ │ └── 40 ─ 5
1061
+ │ │ │ │ ├── 14 ─ 2
1062
+ │ │ │ │ │ ├── 35 ─ 2
1063
+ │ │ │ │ │ └── 41 ─ 5
1064
+ │ │ │ │ ├── 25 ─ 1
1065
+ │ │ │ │ ├── 31 ─ 2
1066
+ │ │ │ │ └── ...
1067
+ │ │ │ ├── 7 ─ 0
1068
+ │ │ │ │ ├── 20 ─ 0
1069
+ │ │ │ │ └── 32 ─ 2
1070
+ │ │ │ ├── 10 ─ 1
1071
+ │ │ │ │ ├── 27 ─ 1
1072
+ │ │ │ │ └── 33 ─ 2
1073
+ │ │ │ ├── 16 ─ 0
1074
+ │ │ │ ├── 23 ─ 1
1075
+ │ │ │ └── ...
1076
+ │ │ ├── 3 ─ 0
1077
+ │ │ │ ├── 8 ─ 0
1078
+ │ │ │ │ ├── 21 ─ 0
1079
+ │ │ │ │ └── 36 ─ 3
1080
+ │ │ │ ├── 11 ─ 1
1081
+ │ │ │ │ ├── 28 ─ 1
1082
+ │ │ │ │ └── 37 ─ 3
1083
+ │ │ │ ├── 17 ─ 0
1084
+ │ │ │ ├── 24 ─ 1
1085
+ │ │ │ └── ...
1086
+ │ │ ├── 6 ─ 0
1087
+ │ │ │ ├── 19 ─ 0
1088
+ │ │ │ └── 26 ─ 1
1089
+ │ │ └── ...
1090
+ │ └── ...
1091
+ └── ...
1092
+ """
1093
+ ).strip()
1094
+ assert target == text
1095
+
1096
+
1097
+ def test_write_network_text_tree_max_depth():
1098
+ orig = nx.balanced_tree(r=1, h=3, create_using=nx.DiGraph)
1099
+ lines = []
1100
+ write = lines.append
1101
+ write("--- directed case, max_depth=0 ---")
1102
+ nx.write_network_text(orig, path=write, end="", max_depth=0)
1103
+ write("--- directed case, max_depth=1 ---")
1104
+ nx.write_network_text(orig, path=write, end="", max_depth=1)
1105
+ write("--- directed case, max_depth=2 ---")
1106
+ nx.write_network_text(orig, path=write, end="", max_depth=2)
1107
+ write("--- directed case, max_depth=3 ---")
1108
+ nx.write_network_text(orig, path=write, end="", max_depth=3)
1109
+ write("--- directed case, max_depth=4 ---")
1110
+ nx.write_network_text(orig, path=write, end="", max_depth=4)
1111
+ write("--- undirected case, max_depth=0 ---")
1112
+ nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=0)
1113
+ write("--- undirected case, max_depth=1 ---")
1114
+ nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=1)
1115
+ write("--- undirected case, max_depth=2 ---")
1116
+ nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=2)
1117
+ write("--- undirected case, max_depth=3 ---")
1118
+ nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=3)
1119
+ write("--- undirected case, max_depth=4 ---")
1120
+ nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=4)
1121
+ text = "\n".join(lines)
1122
+ target = dedent(
1123
+ """
1124
+ --- directed case, max_depth=0 ---
1125
+ ╙ ...
1126
+ --- directed case, max_depth=1 ---
1127
+ ╙── 0
1128
+ └─╼ ...
1129
+ --- directed case, max_depth=2 ---
1130
+ ╙── 0
1131
+ └─��� 1
1132
+ └─╼ ...
1133
+ --- directed case, max_depth=3 ---
1134
+ ╙── 0
1135
+ └─╼ 1
1136
+ └─╼ 2
1137
+ └─╼ ...
1138
+ --- directed case, max_depth=4 ---
1139
+ ╙── 0
1140
+ └─╼ 1
1141
+ └─╼ 2
1142
+ └─╼ 3
1143
+ --- undirected case, max_depth=0 ---
1144
+ ╙ ...
1145
+ --- undirected case, max_depth=1 ---
1146
+ ╙── 0 ─ 1
1147
+ └── ...
1148
+ --- undirected case, max_depth=2 ---
1149
+ ╙── 0
1150
+ └── 1 ─ 2
1151
+ └── ...
1152
+ --- undirected case, max_depth=3 ---
1153
+ ╙── 0
1154
+ └── 1
1155
+ └── 2 ─ 3
1156
+ └── ...
1157
+ --- undirected case, max_depth=4 ---
1158
+ ╙── 0
1159
+ └── 1
1160
+ └── 2
1161
+ └── 3
1162
+ """
1163
+ ).strip()
1164
+ assert target == text
1165
+
1166
+
1167
+ def test_write_network_text_graph_max_depth():
1168
+ orig = nx.erdos_renyi_graph(10, 0.15, directed=True, seed=40392)
1169
+ lines = []
1170
+ write = lines.append
1171
+ write("--- directed case, max_depth=None ---")
1172
+ nx.write_network_text(orig, path=write, end="", max_depth=None)
1173
+ write("--- directed case, max_depth=0 ---")
1174
+ nx.write_network_text(orig, path=write, end="", max_depth=0)
1175
+ write("--- directed case, max_depth=1 ---")
1176
+ nx.write_network_text(orig, path=write, end="", max_depth=1)
1177
+ write("--- directed case, max_depth=2 ---")
1178
+ nx.write_network_text(orig, path=write, end="", max_depth=2)
1179
+ write("--- directed case, max_depth=3 ---")
1180
+ nx.write_network_text(orig, path=write, end="", max_depth=3)
1181
+ write("--- undirected case, max_depth=None ---")
1182
+ nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=None)
1183
+ write("--- undirected case, max_depth=0 ---")
1184
+ nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=0)
1185
+ write("--- undirected case, max_depth=1 ---")
1186
+ nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=1)
1187
+ write("--- undirected case, max_depth=2 ---")
1188
+ nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=2)
1189
+ write("--- undirected case, max_depth=3 ---")
1190
+ nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=3)
1191
+ text = "\n".join(lines)
1192
+ target = dedent(
1193
+ """
1194
+ --- directed case, max_depth=None ---
1195
+ ╟── 4
1196
+ ╎ ├─╼ 0 ╾ 3
1197
+ ╎ ├─╼ 5 ╾ 7
1198
+ ╎ │ └─╼ 3
1199
+ ╎ │ ├─╼ 1 ╾ 9
1200
+ ╎ │ │ └─╼ 9 ╾ 6
1201
+ ╎ │ │ ├─╼ 6
1202
+ ╎ │ │ │ └─╼ ...
1203
+ ╎ │ │ ├─╼ 7 ╾ 4
1204
+ ╎ │ │ │ ├─╼ 2
1205
+ ╎ │ │ │ └─╼ ...
1206
+ ╎ │ │ └─╼ ...
1207
+ ╎ │ └─╼ ...
1208
+ ╎ └─╼ ...
1209
+ ╙── 8
1210
+ --- directed case, max_depth=0 ---
1211
+ ╙ ...
1212
+ --- directed case, max_depth=1 ---
1213
+ ╟── 4
1214
+ ╎ └─╼ ...
1215
+ ╙── 8
1216
+ --- directed case, max_depth=2 ---
1217
+ ╟── 4
1218
+ ╎ ├─╼ 0 ╾ 3
1219
+ ╎ ├─╼ 5 ╾ 7
1220
+ ╎ │ └─╼ ...
1221
+ ╎ └─╼ 7 ╾ 9
1222
+ ╎ └─╼ ...
1223
+ ╙── 8
1224
+ --- directed case, max_depth=3 ---
1225
+ ╟── 4
1226
+ ╎ ├─╼ 0 ╾ 3
1227
+ ╎ ├─╼ 5 ╾ 7
1228
+ ╎ │ └─╼ 3
1229
+ ╎ │ └─╼ ...
1230
+ ╎ └─╼ 7 ╾ 9
1231
+ ╎ ├─╼ 2
1232
+ ╎ └─╼ ...
1233
+ ╙── 8
1234
+ --- undirected case, max_depth=None ---
1235
+ ╟── 8
1236
+ ╙── 2
1237
+ └── 7
1238
+ ├── 4
1239
+ │ ├── 0
1240
+ │ │ └── 3
1241
+ │ │ ├── 1
1242
+ │ │ │ └── 9 ─ 7
1243
+ │ │ │ └── 6
1244
+ │ │ └── 5 ─ 4, 7
1245
+ │ └── ...
1246
+ └── ...
1247
+ --- undirected case, max_depth=0 ---
1248
+ ╙ ...
1249
+ --- undirected case, max_depth=1 ---
1250
+ ╟── 8
1251
+ ╙── 2 ─ 7
1252
+ └── ...
1253
+ --- undirected case, max_depth=2 ---
1254
+ ╟── 8
1255
+ ╙── 2
1256
+ └── 7 ─ 4, 5, 9
1257
+ └── ...
1258
+ --- undirected case, max_depth=3 ---
1259
+ ╟── 8
1260
+ ╙── 2
1261
+ └── 7
1262
+ ├── 4 ─ 0, 5
1263
+ │ └── ...
1264
+ ├── 5 ─ 4, 3
1265
+ │ └── ...
1266
+ └── 9 ─ 1, 6
1267
+ └── ...
1268
+ """
1269
+ ).strip()
1270
+ assert target == text
1271
+
1272
+
1273
+ def test_write_network_text_clique_max_depth():
1274
+ orig = nx.complete_graph(5, nx.DiGraph)
1275
+ lines = []
1276
+ write = lines.append
1277
+ write("--- directed case, max_depth=None ---")
1278
+ nx.write_network_text(orig, path=write, end="", max_depth=None)
1279
+ write("--- directed case, max_depth=0 ---")
1280
+ nx.write_network_text(orig, path=write, end="", max_depth=0)
1281
+ write("--- directed case, max_depth=1 ---")
1282
+ nx.write_network_text(orig, path=write, end="", max_depth=1)
1283
+ write("--- directed case, max_depth=2 ---")
1284
+ nx.write_network_text(orig, path=write, end="", max_depth=2)
1285
+ write("--- directed case, max_depth=3 ---")
1286
+ nx.write_network_text(orig, path=write, end="", max_depth=3)
1287
+ write("--- undirected case, max_depth=None ---")
1288
+ nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=None)
1289
+ write("--- undirected case, max_depth=0 ---")
1290
+ nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=0)
1291
+ write("--- undirected case, max_depth=1 ---")
1292
+ nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=1)
1293
+ write("--- undirected case, max_depth=2 ---")
1294
+ nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=2)
1295
+ write("--- undirected case, max_depth=3 ---")
1296
+ nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=3)
1297
+ text = "\n".join(lines)
1298
+ target = dedent(
1299
+ """
1300
+ --- directed case, max_depth=None ---
1301
+ ╙── 0 ╾ 1, 2, 3, 4
1302
+ ├─╼ 1 ╾ 2, 3, 4
1303
+ │ ├─╼ 2 ╾ 0, 3, 4
1304
+ │ │ ├─╼ 3 ╾ 0, 1, 4
1305
+ │ │ │ ├─╼ 4 ╾ 0, 1, 2
1306
+ │ │ │ │ └─╼ ...
1307
+ │ │ │ └─╼ ...
1308
+ │ │ └─╼ ...
1309
+ │ └─╼ ...
1310
+ └─╼ ...
1311
+ --- directed case, max_depth=0 ---
1312
+ ╙ ...
1313
+ --- directed case, max_depth=1 ---
1314
+ ╙── 0 ╾ 1, 2, 3, 4
1315
+ └─╼ ...
1316
+ --- directed case, max_depth=2 ---
1317
+ ╙── 0 ╾ 1, 2, 3, 4
1318
+ ├─╼ 1 ╾ 2, 3, 4
1319
+ │ └─╼ ...
1320
+ ├─╼ 2 ╾ 1, 3, 4
1321
+ │ └─╼ ...
1322
+ ├─╼ 3 ╾ 1, 2, 4
1323
+ │ └─╼ ...
1324
+ └─╼ 4 ╾ 1, 2, 3
1325
+ └─╼ ...
1326
+ --- directed case, max_depth=3 ---
1327
+ ╙── 0 ╾ 1, 2, 3, 4
1328
+ ├─╼ 1 ╾ 2, 3, 4
1329
+ │ ├─╼ 2 ╾ 0, 3, 4
1330
+ │ │ └─╼ ...
1331
+ │ ├─╼ 3 ╾ 0, 2, 4
1332
+ │ │ └─╼ ...
1333
+ │ ├─╼ 4 ╾ 0, 2, 3
1334
+ │ │ └─╼ ...
1335
+ │ └─╼ ...
1336
+ └─╼ ...
1337
+ --- undirected case, max_depth=None ---
1338
+ ╙── 0
1339
+ ├── 1
1340
+ │ ├── 2 ─ 0
1341
+ │ │ ├── 3 ─ 0, 1
1342
+ │ │ │ └── 4 ─ 0, 1, 2
1343
+ │ │ └── ...
1344
+ │ └── ...
1345
+ └── ...
1346
+ --- undirected case, max_depth=0 ---
1347
+ ╙ ...
1348
+ --- undirected case, max_depth=1 ---
1349
+ ╙── 0 ─ 1, 2, 3, 4
1350
+ └── ...
1351
+ --- undirected case, max_depth=2 ---
1352
+ ╙── 0
1353
+ ├── 1 ─ 2, 3, 4
1354
+ │ └── ...
1355
+ ├── 2 ─ 1, 3, 4
1356
+ │ └── ...
1357
+ ├── 3 ─ 1, 2, 4
1358
+ │ └── ...
1359
+ └── 4 ─ 1, 2, 3
1360
+ --- undirected case, max_depth=3 ---
1361
+ ╙── 0
1362
+ ├── 1
1363
+ │ ├── 2 ─ 0, 3, 4
1364
+ │ │ └── ...
1365
+ │ ├── 3 ─ 0, 2, 4
1366
+ │ │ └── ...
1367
+ │ └── 4 ─ 0, 2, 3
1368
+ └── ...
1369
+ """
1370
+ ).strip()
1371
+ assert target == text
1372
+
1373
+
1374
+ def test_write_network_text_custom_label():
1375
+ # Create a directed forest with labels
1376
+ graph = nx.erdos_renyi_graph(5, 0.4, directed=True, seed=359222358)
1377
+ for node in graph.nodes:
1378
+ graph.nodes[node]["label"] = f"Node({node})"
1379
+ graph.nodes[node]["chr"] = chr(node + ord("a") - 1)
1380
+ if node % 2 == 0:
1381
+ graph.nodes[node]["part"] = chr(node + ord("a"))
1382
+
1383
+ lines = []
1384
+ write = lines.append
1385
+ write("--- when with_labels=True, uses the 'label' attr ---")
1386
+ nx.write_network_text(graph, path=write, with_labels=True, end="", max_depth=None)
1387
+ write("--- when with_labels=False, uses str(node) value ---")
1388
+ nx.write_network_text(graph, path=write, with_labels=False, end="", max_depth=None)
1389
+ write("--- when with_labels is a string, use that attr ---")
1390
+ nx.write_network_text(graph, path=write, with_labels="chr", end="", max_depth=None)
1391
+ write("--- fallback to str(node) when the attr does not exist ---")
1392
+ nx.write_network_text(graph, path=write, with_labels="part", end="", max_depth=None)
1393
+
1394
+ text = "\n".join(lines)
1395
+ target = dedent(
1396
+ """
1397
+ --- when with_labels=True, uses the 'label' attr ---
1398
+ ╙── Node(1)
1399
+ └─╼ Node(3) ╾ Node(2)
1400
+ ├─╼ Node(0)
1401
+ │ ├─╼ Node(2) ╾ Node(3), Node(4)
1402
+ │ │ └─╼ ...
1403
+ │ └─╼ Node(4)
1404
+ │ └─╼ ...
1405
+ └─╼ ...
1406
+ --- when with_labels=False, uses str(node) value ---
1407
+ ╙── 1
1408
+ └─╼ 3 ╾ 2
1409
+ ├─╼ 0
1410
+ │ ├─╼ 2 ╾ 3, 4
1411
+ │ │ └─╼ ...
1412
+ │ └─╼ 4
1413
+ │ └─╼ ...
1414
+ └─╼ ...
1415
+ --- when with_labels is a string, use that attr ---
1416
+ ╙── a
1417
+ └─╼ c ╾ b
1418
+ ├─╼ `
1419
+ │ ├─╼ b ╾ c, d
1420
+ │ │ └─╼ ...
1421
+ │ └─╼ d
1422
+ │ └─╼ ...
1423
+ └─╼ ...
1424
+ --- fallback to str(node) when the attr does not exist ---
1425
+ ╙── 1
1426
+ └─╼ 3 ╾ c
1427
+ ├─╼ a
1428
+ │ ├─╼ c ╾ 3, e
1429
+ │ │ └─╼ ...
1430
+ │ └─╼ e
1431
+ │ └─╼ ...
1432
+ └─╼ ...
1433
+ """
1434
+ ).strip()
1435
+ assert target == text
1436
+
1437
+
1438
+ def test_write_network_text_vertical_chains():
1439
+ graph1 = nx.lollipop_graph(4, 2, create_using=nx.Graph)
1440
+ graph1.add_edge(0, -1)
1441
+ graph1.add_edge(-1, -2)
1442
+ graph1.add_edge(-2, -3)
1443
+
1444
+ graph2 = graph1.to_directed()
1445
+ graph2.remove_edges_from([(u, v) for u, v in graph2.edges if v > u])
1446
+
1447
+ lines = []
1448
+ write = lines.append
1449
+ write("--- Undirected UTF ---")
1450
+ nx.write_network_text(graph1, path=write, end="", vertical_chains=True)
1451
+ write("--- Undirected ASCI ---")
1452
+ nx.write_network_text(
1453
+ graph1, path=write, end="", vertical_chains=True, ascii_only=True
1454
+ )
1455
+ write("--- Directed UTF ---")
1456
+ nx.write_network_text(graph2, path=write, end="", vertical_chains=True)
1457
+ write("--- Directed ASCI ---")
1458
+ nx.write_network_text(
1459
+ graph2, path=write, end="", vertical_chains=True, ascii_only=True
1460
+ )
1461
+
1462
+ text = "\n".join(lines)
1463
+ target = dedent(
1464
+ """
1465
+ --- Undirected UTF ---
1466
+ ╙── 5
1467
+
1468
+ 4
1469
+
1470
+ 3
1471
+ ├── 0
1472
+ │ ├── 1 ─ 3
1473
+ │ │ │
1474
+ │ │ 2 ─ 0, 3
1475
+ │ ├── -1
1476
+ │ │ │
1477
+ │ │ -2
1478
+ │ │ │
1479
+ │ │ -3
1480
+ │ └── ...
1481
+ └── ...
1482
+ --- Undirected ASCI ---
1483
+ +-- 5
1484
+ |
1485
+ 4
1486
+ |
1487
+ 3
1488
+ |-- 0
1489
+ | |-- 1 - 3
1490
+ | | |
1491
+ | | 2 - 0, 3
1492
+ | |-- -1
1493
+ | | |
1494
+ | | -2
1495
+ | | |
1496
+ | | -3
1497
+ | L-- ...
1498
+ L-- ...
1499
+ --- Directed UTF ---
1500
+ ╙── 5
1501
+
1502
+ 4
1503
+
1504
+ 3
1505
+ ├─╼ 0 ╾ 1, 2
1506
+ │ ╽
1507
+ │ -1
1508
+ │ ╽
1509
+ │ -2
1510
+ │ ╽
1511
+ │ -3
1512
+ ├─╼ 1 ╾ 2
1513
+ │ └─╼ ...
1514
+ └─╼ 2
1515
+ └─╼ ...
1516
+ --- Directed ASCI ---
1517
+ +-- 5
1518
+ !
1519
+ 4
1520
+ !
1521
+ 3
1522
+ |-> 0 <- 1, 2
1523
+ | !
1524
+ | -1
1525
+ | !
1526
+ | -2
1527
+ | !
1528
+ | -3
1529
+ |-> 1 <- 2
1530
+ | L-> ...
1531
+ L-> 2
1532
+ L-> ...
1533
+ """
1534
+ ).strip()
1535
+ assert target == text
1536
+
1537
+
1538
+ def test_collapse_directed():
1539
+ graph = nx.balanced_tree(r=2, h=3, create_using=nx.DiGraph)
1540
+ lines = []
1541
+ write = lines.append
1542
+ write("--- Original ---")
1543
+ nx.write_network_text(graph, path=write, end="")
1544
+ graph.nodes[1]["collapse"] = True
1545
+ write("--- Collapse Node 1 ---")
1546
+ nx.write_network_text(graph, path=write, end="")
1547
+ write("--- Add alternate path (5, 3) to collapsed zone")
1548
+ graph.add_edge(5, 3)
1549
+ nx.write_network_text(graph, path=write, end="")
1550
+ write("--- Collapse Node 0 ---")
1551
+ graph.nodes[0]["collapse"] = True
1552
+ nx.write_network_text(graph, path=write, end="")
1553
+ text = "\n".join(lines)
1554
+ target = dedent(
1555
+ """
1556
+ --- Original ---
1557
+ ╙── 0
1558
+ ├─╼ 1
1559
+ │ ├─╼ 3
1560
+ │ │ ├─╼ 7
1561
+ │ │ └─╼ 8
1562
+ │ └─╼ 4
1563
+ │ ├─╼ 9
1564
+ │ └─╼ 10
1565
+ └─╼ 2
1566
+ ├─╼ 5
1567
+ │ ├─╼ 11
1568
+ │ └─╼ 12
1569
+ └─╼ 6
1570
+ ├─╼ 13
1571
+ └─╼ 14
1572
+ --- Collapse Node 1 ---
1573
+ ╙── 0
1574
+ ├─╼ 1
1575
+ │ └─╼ ...
1576
+ └─╼ 2
1577
+ ├─╼ 5
1578
+ │ ├─╼ 11
1579
+ │ └─╼ 12
1580
+ └─╼ 6
1581
+ ├─╼ 13
1582
+ └─╼ 14
1583
+ --- Add alternate path (5, 3) to collapsed zone
1584
+ ╙── 0
1585
+ ├─╼ 1
1586
+ │ └─╼ ...
1587
+ └─╼ 2
1588
+ ├─╼ 5
1589
+ │ ├─╼ 11
1590
+ │ ├─╼ 12
1591
+ │ └─╼ 3 ╾ 1
1592
+ │ ├─╼ 7
1593
+ │ └─╼ 8
1594
+ └─╼ 6
1595
+ ├─╼ 13
1596
+ └─╼ 14
1597
+ --- Collapse Node 0 ---
1598
+ ╙── 0
1599
+ └─╼ ...
1600
+ """
1601
+ ).strip()
1602
+ assert target == text
1603
+
1604
+
1605
+ def test_collapse_undirected():
1606
+ graph = nx.balanced_tree(r=2, h=3, create_using=nx.Graph)
1607
+ lines = []
1608
+ write = lines.append
1609
+ write("--- Original ---")
1610
+ nx.write_network_text(graph, path=write, end="", sources=[0])
1611
+ graph.nodes[1]["collapse"] = True
1612
+ write("--- Collapse Node 1 ---")
1613
+ nx.write_network_text(graph, path=write, end="", sources=[0])
1614
+ write("--- Add alternate path (5, 3) to collapsed zone")
1615
+ graph.add_edge(5, 3)
1616
+ nx.write_network_text(graph, path=write, end="", sources=[0])
1617
+ write("--- Collapse Node 0 ---")
1618
+ graph.nodes[0]["collapse"] = True
1619
+ nx.write_network_text(graph, path=write, end="", sources=[0])
1620
+ text = "\n".join(lines)
1621
+ target = dedent(
1622
+ """
1623
+ --- Original ---
1624
+ ╙── 0
1625
+ ├── 1
1626
+ │ ├── 3
1627
+ │ │ ├── 7
1628
+ │ │ └── 8
1629
+ │ └── 4
1630
+ │ ├── 9
1631
+ │ └── 10
1632
+ └── 2
1633
+ ├── 5
1634
+ │ ├── 11
1635
+ │ └── 12
1636
+ └── 6
1637
+ ├── 13
1638
+ └── 14
1639
+ --- Collapse Node 1 ---
1640
+ ╙── 0
1641
+ ├── 1 ─ 3, 4
1642
+ │ └── ...
1643
+ └── 2
1644
+ ├── 5
1645
+ │ ├── 11
1646
+ │ └── 12
1647
+ └── 6
1648
+ ├── 13
1649
+ └── 14
1650
+ --- Add alternate path (5, 3) to collapsed zone
1651
+ ╙── 0
1652
+ ├── 1 ─ 3, 4
1653
+ │ └── ...
1654
+ └── 2
1655
+ ├── 5
1656
+ │ ├── 11
1657
+ │ ├── 12
1658
+ │ └── 3 ─ 1
1659
+ │ ├── 7
1660
+ │ └── 8
1661
+ └── 6
1662
+ ├── 13
1663
+ └── 14
1664
+ --- Collapse Node 0 ---
1665
+ ╙── 0 ─ 1, 2
1666
+ └── ...
1667
+ """
1668
+ ).strip()
1669
+ assert target == text
1670
+
1671
+
1672
+ def generate_test_graphs():
1673
+ """
1674
+ Generate a gauntlet of different test graphs with different properties
1675
+ """
1676
+ import random
1677
+
1678
+ rng = random.Random(976689776)
1679
+ num_randomized = 3
1680
+
1681
+ for directed in [0, 1]:
1682
+ cls = nx.DiGraph if directed else nx.Graph
1683
+
1684
+ for num_nodes in range(17):
1685
+ # Disconnected graph
1686
+ graph = cls()
1687
+ graph.add_nodes_from(range(num_nodes))
1688
+ yield graph
1689
+
1690
+ # Randomize graphs
1691
+ if num_nodes > 0:
1692
+ for p in [0.1, 0.3, 0.5, 0.7, 0.9]:
1693
+ for seed in range(num_randomized):
1694
+ graph = nx.erdos_renyi_graph(
1695
+ num_nodes, p, directed=directed, seed=rng
1696
+ )
1697
+ yield graph
1698
+
1699
+ yield nx.complete_graph(num_nodes, cls)
1700
+
1701
+ yield nx.path_graph(3, create_using=cls)
1702
+ yield nx.balanced_tree(r=1, h=3, create_using=cls)
1703
+ if not directed:
1704
+ yield nx.circular_ladder_graph(4, create_using=cls)
1705
+ yield nx.star_graph(5, create_using=cls)
1706
+ yield nx.lollipop_graph(4, 2, create_using=cls)
1707
+ yield nx.wheel_graph(7, create_using=cls)
1708
+ yield nx.dorogovtsev_goltsev_mendes_graph(4, create_using=cls)
1709
+
1710
+
1711
+ @pytest.mark.parametrize(
1712
+ ("vertical_chains", "ascii_only"),
1713
+ tuple(
1714
+ [
1715
+ (vertical_chains, ascii_only)
1716
+ for vertical_chains in [0, 1]
1717
+ for ascii_only in [0, 1]
1718
+ ]
1719
+ ),
1720
+ )
1721
+ def test_network_text_round_trip(vertical_chains, ascii_only):
1722
+ """
1723
+ Write the graph to network text format, then parse it back in, assert it is
1724
+ the same as the original graph. Passing this test is strong validation of
1725
+ both the format generator and parser.
1726
+ """
1727
+ from networkx.readwrite.text import _parse_network_text
1728
+
1729
+ for graph in generate_test_graphs():
1730
+ graph = nx.relabel_nodes(graph, {n: str(n) for n in graph.nodes})
1731
+ lines = list(
1732
+ nx.generate_network_text(
1733
+ graph, vertical_chains=vertical_chains, ascii_only=ascii_only
1734
+ )
1735
+ )
1736
+ new = _parse_network_text(lines)
1737
+ try:
1738
+ assert new.nodes == graph.nodes
1739
+ assert new.edges == graph.edges
1740
+ except Exception:
1741
+ nx.write_network_text(graph)
1742
+ raise
mplug_owl2/lib/python3.10/site-packages/networkx/readwrite/text.py ADDED
@@ -0,0 +1,852 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Text-based visual representations of graphs
3
+ """
4
+
5
+ import sys
6
+ import warnings
7
+ from collections import defaultdict
8
+
9
+ import networkx as nx
10
+ from networkx.utils import open_file
11
+
12
+ __all__ = ["generate_network_text", "write_network_text"]
13
+
14
+
15
+ class BaseGlyphs:
16
+ @classmethod
17
+ def as_dict(cls):
18
+ return {
19
+ a: getattr(cls, a)
20
+ for a in dir(cls)
21
+ if not a.startswith("_") and a != "as_dict"
22
+ }
23
+
24
+
25
+ class AsciiBaseGlyphs(BaseGlyphs):
26
+ empty: str = "+"
27
+ newtree_last: str = "+-- "
28
+ newtree_mid: str = "+-- "
29
+ endof_forest: str = " "
30
+ within_forest: str = ": "
31
+ within_tree: str = "| "
32
+
33
+
34
+ class AsciiDirectedGlyphs(AsciiBaseGlyphs):
35
+ last: str = "L-> "
36
+ mid: str = "|-> "
37
+ backedge: str = "<-"
38
+ vertical_edge: str = "!"
39
+
40
+
41
+ class AsciiUndirectedGlyphs(AsciiBaseGlyphs):
42
+ last: str = "L-- "
43
+ mid: str = "|-- "
44
+ backedge: str = "-"
45
+ vertical_edge: str = "|"
46
+
47
+
48
+ class UtfBaseGlyphs(BaseGlyphs):
49
+ # Notes on available box and arrow characters
50
+ # https://en.wikipedia.org/wiki/Box-drawing_character
51
+ # https://stackoverflow.com/questions/2701192/triangle-arrow
52
+ empty: str = "╙"
53
+ newtree_last: str = "╙── "
54
+ newtree_mid: str = "╟── "
55
+ endof_forest: str = " "
56
+ within_forest: str = "╎ "
57
+ within_tree: str = "│ "
58
+
59
+
60
+ class UtfDirectedGlyphs(UtfBaseGlyphs):
61
+ last: str = "└─╼ "
62
+ mid: str = "├─╼ "
63
+ backedge: str = "╾"
64
+ vertical_edge: str = "╽"
65
+
66
+
67
+ class UtfUndirectedGlyphs(UtfBaseGlyphs):
68
+ last: str = "└── "
69
+ mid: str = "├── "
70
+ backedge: str = "─"
71
+ vertical_edge: str = "│"
72
+
73
+
74
+ def generate_network_text(
75
+ graph,
76
+ with_labels=True,
77
+ sources=None,
78
+ max_depth=None,
79
+ ascii_only=False,
80
+ vertical_chains=False,
81
+ ):
82
+ """Generate lines in the "network text" format
83
+
84
+ This works via a depth-first traversal of the graph and writing a line for
85
+ each unique node encountered. Non-tree edges are written to the right of
86
+ each node, and connection to a non-tree edge is indicated with an ellipsis.
87
+ This representation works best when the input graph is a forest, but any
88
+ graph can be represented.
89
+
90
+ This notation is original to networkx, although it is simple enough that it
91
+ may be known in existing literature. See #5602 for details. The procedure
92
+ is summarized as follows:
93
+
94
+ 1. Given a set of source nodes (which can be specified, or automatically
95
+ discovered via finding the (strongly) connected components and choosing one
96
+ node with minimum degree from each), we traverse the graph in depth first
97
+ order.
98
+
99
+ 2. Each reachable node will be printed exactly once on it's own line.
100
+
101
+ 3. Edges are indicated in one of four ways:
102
+
103
+ a. a parent "L-style" connection on the upper left. This corresponds to
104
+ a traversal in the directed DFS tree.
105
+
106
+ b. a backref "<-style" connection shown directly on the right. For
107
+ directed graphs, these are drawn for any incoming edges to a node that
108
+ is not a parent edge. For undirected graphs, these are drawn for only
109
+ the non-parent edges that have already been represented (The edges that
110
+ have not been represented will be handled in the recursive case).
111
+
112
+ c. a child "L-style" connection on the lower right. Drawing of the
113
+ children are handled recursively.
114
+
115
+ d. if ``vertical_chains`` is true, and a parent node only has one child
116
+ a "vertical-style" edge is drawn between them.
117
+
118
+ 4. The children of each node (wrt the directed DFS tree) are drawn
119
+ underneath and to the right of it. In the case that a child node has already
120
+ been drawn the connection is replaced with an ellipsis ("...") to indicate
121
+ that there is one or more connections represented elsewhere.
122
+
123
+ 5. If a maximum depth is specified, an edge to nodes past this maximum
124
+ depth will be represented by an ellipsis.
125
+
126
+ 6. If a node has a truthy "collapse" value, then we do not traverse past
127
+ that node.
128
+
129
+ Parameters
130
+ ----------
131
+ graph : nx.DiGraph | nx.Graph
132
+ Graph to represent
133
+
134
+ with_labels : bool | str
135
+ If True will use the "label" attribute of a node to display if it
136
+ exists otherwise it will use the node value itself. If given as a
137
+ string, then that attribute name will be used instead of "label".
138
+ Defaults to True.
139
+
140
+ sources : List
141
+ Specifies which nodes to start traversal from. Note: nodes that are not
142
+ reachable from one of these sources may not be shown. If unspecified,
143
+ the minimal set of nodes needed to reach all others will be used.
144
+
145
+ max_depth : int | None
146
+ The maximum depth to traverse before stopping. Defaults to None.
147
+
148
+ ascii_only : Boolean
149
+ If True only ASCII characters are used to construct the visualization
150
+
151
+ vertical_chains : Boolean
152
+ If True, chains of nodes will be drawn vertically when possible.
153
+
154
+ Yields
155
+ ------
156
+ str : a line of generated text
157
+
158
+ Examples
159
+ --------
160
+ >>> graph = nx.path_graph(10)
161
+ >>> graph.add_node("A")
162
+ >>> graph.add_node("B")
163
+ >>> graph.add_node("C")
164
+ >>> graph.add_node("D")
165
+ >>> graph.add_edge(9, "A")
166
+ >>> graph.add_edge(9, "B")
167
+ >>> graph.add_edge(9, "C")
168
+ >>> graph.add_edge("C", "D")
169
+ >>> graph.add_edge("C", "E")
170
+ >>> graph.add_edge("C", "F")
171
+ >>> nx.write_network_text(graph)
172
+ ╙── 0
173
+ └── 1
174
+ └── 2
175
+ └── 3
176
+ └── 4
177
+ └── 5
178
+ └── 6
179
+ └── 7
180
+ └── 8
181
+ └── 9
182
+ ├── A
183
+ ├── B
184
+ └── C
185
+ ├── D
186
+ ├── E
187
+ └── F
188
+ >>> nx.write_network_text(graph, vertical_chains=True)
189
+ ╙── 0
190
+
191
+ 1
192
+
193
+ 2
194
+
195
+ 3
196
+
197
+ 4
198
+
199
+ 5
200
+
201
+ 6
202
+
203
+ 7
204
+
205
+ 8
206
+
207
+ 9
208
+ ├── A
209
+ ├── B
210
+ └── C
211
+ ├── D
212
+ ├── E
213
+ └── F
214
+ """
215
+ from typing import Any, NamedTuple
216
+
217
+ class StackFrame(NamedTuple):
218
+ parent: Any
219
+ node: Any
220
+ indents: list
221
+ this_islast: bool
222
+ this_vertical: bool
223
+
224
+ collapse_attr = "collapse"
225
+
226
+ is_directed = graph.is_directed()
227
+
228
+ if is_directed:
229
+ glyphs = AsciiDirectedGlyphs if ascii_only else UtfDirectedGlyphs
230
+ succ = graph.succ
231
+ pred = graph.pred
232
+ else:
233
+ glyphs = AsciiUndirectedGlyphs if ascii_only else UtfUndirectedGlyphs
234
+ succ = graph.adj
235
+ pred = graph.adj
236
+
237
+ if isinstance(with_labels, str):
238
+ label_attr = with_labels
239
+ elif with_labels:
240
+ label_attr = "label"
241
+ else:
242
+ label_attr = None
243
+
244
+ if max_depth == 0:
245
+ yield glyphs.empty + " ..."
246
+ elif len(graph.nodes) == 0:
247
+ yield glyphs.empty
248
+ else:
249
+ # If the nodes to traverse are unspecified, find the minimal set of
250
+ # nodes that will reach the entire graph
251
+ if sources is None:
252
+ sources = _find_sources(graph)
253
+
254
+ # Populate the stack with each:
255
+ # 1. parent node in the DFS tree (or None for root nodes),
256
+ # 2. the current node in the DFS tree
257
+ # 2. a list of indentations indicating depth
258
+ # 3. a flag indicating if the node is the final one to be written.
259
+ # Reverse the stack so sources are popped in the correct order.
260
+ last_idx = len(sources) - 1
261
+ stack = [
262
+ StackFrame(None, node, [], (idx == last_idx), False)
263
+ for idx, node in enumerate(sources)
264
+ ][::-1]
265
+
266
+ num_skipped_children = defaultdict(lambda: 0)
267
+ seen_nodes = set()
268
+ while stack:
269
+ parent, node, indents, this_islast, this_vertical = stack.pop()
270
+
271
+ if node is not Ellipsis:
272
+ skip = node in seen_nodes
273
+ if skip:
274
+ # Mark that we skipped a parent's child
275
+ num_skipped_children[parent] += 1
276
+
277
+ if this_islast:
278
+ # If we reached the last child of a parent, and we skipped
279
+ # any of that parents children, then we should emit an
280
+ # ellipsis at the end after this.
281
+ if num_skipped_children[parent] and parent is not None:
282
+ # Append the ellipsis to be emitted last
283
+ next_islast = True
284
+ try_frame = StackFrame(
285
+ node, Ellipsis, indents, next_islast, False
286
+ )
287
+ stack.append(try_frame)
288
+
289
+ # Redo this frame, but not as a last object
290
+ next_islast = False
291
+ try_frame = StackFrame(
292
+ parent, node, indents, next_islast, this_vertical
293
+ )
294
+ stack.append(try_frame)
295
+ continue
296
+
297
+ if skip:
298
+ continue
299
+ seen_nodes.add(node)
300
+
301
+ if not indents:
302
+ # Top level items (i.e. trees in the forest) get different
303
+ # glyphs to indicate they are not actually connected
304
+ if this_islast:
305
+ this_vertical = False
306
+ this_prefix = indents + [glyphs.newtree_last]
307
+ next_prefix = indents + [glyphs.endof_forest]
308
+ else:
309
+ this_prefix = indents + [glyphs.newtree_mid]
310
+ next_prefix = indents + [glyphs.within_forest]
311
+
312
+ else:
313
+ # Non-top-level items
314
+ if this_vertical:
315
+ this_prefix = indents
316
+ next_prefix = indents
317
+ else:
318
+ if this_islast:
319
+ this_prefix = indents + [glyphs.last]
320
+ next_prefix = indents + [glyphs.endof_forest]
321
+ else:
322
+ this_prefix = indents + [glyphs.mid]
323
+ next_prefix = indents + [glyphs.within_tree]
324
+
325
+ if node is Ellipsis:
326
+ label = " ..."
327
+ suffix = ""
328
+ children = []
329
+ else:
330
+ if label_attr is not None:
331
+ label = str(graph.nodes[node].get(label_attr, node))
332
+ else:
333
+ label = str(node)
334
+
335
+ # Determine if we want to show the children of this node.
336
+ if collapse_attr is not None:
337
+ collapse = graph.nodes[node].get(collapse_attr, False)
338
+ else:
339
+ collapse = False
340
+
341
+ # Determine:
342
+ # (1) children to traverse into after showing this node.
343
+ # (2) parents to immediately show to the right of this node.
344
+ if is_directed:
345
+ # In the directed case we must show every successor node
346
+ # note: it may be skipped later, but we don't have that
347
+ # information here.
348
+ children = list(succ[node])
349
+ # In the directed case we must show every predecessor
350
+ # except for parent we directly traversed from.
351
+ handled_parents = {parent}
352
+ else:
353
+ # Showing only the unseen children results in a more
354
+ # concise representation for the undirected case.
355
+ children = [
356
+ child for child in succ[node] if child not in seen_nodes
357
+ ]
358
+
359
+ # In the undirected case, parents are also children, so we
360
+ # only need to immediately show the ones we can no longer
361
+ # traverse
362
+ handled_parents = {*children, parent}
363
+
364
+ if max_depth is not None and len(indents) == max_depth - 1:
365
+ # Use ellipsis to indicate we have reached maximum depth
366
+ if children:
367
+ children = [Ellipsis]
368
+ handled_parents = {parent}
369
+
370
+ if collapse:
371
+ # Collapsing a node is the same as reaching maximum depth
372
+ if children:
373
+ children = [Ellipsis]
374
+ handled_parents = {parent}
375
+
376
+ # The other parents are other predecessors of this node that
377
+ # are not handled elsewhere.
378
+ other_parents = [p for p in pred[node] if p not in handled_parents]
379
+ if other_parents:
380
+ if label_attr is not None:
381
+ other_parents_labels = ", ".join(
382
+ [
383
+ str(graph.nodes[p].get(label_attr, p))
384
+ for p in other_parents
385
+ ]
386
+ )
387
+ else:
388
+ other_parents_labels = ", ".join(
389
+ [str(p) for p in other_parents]
390
+ )
391
+ suffix = " ".join(["", glyphs.backedge, other_parents_labels])
392
+ else:
393
+ suffix = ""
394
+
395
+ # Emit the line for this node, this will be called for each node
396
+ # exactly once.
397
+ if this_vertical:
398
+ yield "".join(this_prefix + [glyphs.vertical_edge])
399
+
400
+ yield "".join(this_prefix + [label, suffix])
401
+
402
+ if vertical_chains:
403
+ if is_directed:
404
+ num_children = len(set(children))
405
+ else:
406
+ num_children = len(set(children) - {parent})
407
+ # The next node can be drawn vertically if it is the only
408
+ # remaining child of this node.
409
+ next_is_vertical = num_children == 1
410
+ else:
411
+ next_is_vertical = False
412
+
413
+ # Push children on the stack in reverse order so they are popped in
414
+ # the original order.
415
+ for idx, child in enumerate(children[::-1]):
416
+ next_islast = idx == 0
417
+ try_frame = StackFrame(
418
+ node, child, next_prefix, next_islast, next_is_vertical
419
+ )
420
+ stack.append(try_frame)
421
+
422
+
423
+ @open_file(1, "w")
424
+ def write_network_text(
425
+ graph,
426
+ path=None,
427
+ with_labels=True,
428
+ sources=None,
429
+ max_depth=None,
430
+ ascii_only=False,
431
+ end="\n",
432
+ vertical_chains=False,
433
+ ):
434
+ """Creates a nice text representation of a graph
435
+
436
+ This works via a depth-first traversal of the graph and writing a line for
437
+ each unique node encountered. Non-tree edges are written to the right of
438
+ each node, and connection to a non-tree edge is indicated with an ellipsis.
439
+ This representation works best when the input graph is a forest, but any
440
+ graph can be represented.
441
+
442
+ Parameters
443
+ ----------
444
+ graph : nx.DiGraph | nx.Graph
445
+ Graph to represent
446
+
447
+ path : string or file or callable or None
448
+ Filename or file handle for data output.
449
+ if a function, then it will be called for each generated line.
450
+ if None, this will default to "sys.stdout.write"
451
+
452
+ with_labels : bool | str
453
+ If True will use the "label" attribute of a node to display if it
454
+ exists otherwise it will use the node value itself. If given as a
455
+ string, then that attribute name will be used instead of "label".
456
+ Defaults to True.
457
+
458
+ sources : List
459
+ Specifies which nodes to start traversal from. Note: nodes that are not
460
+ reachable from one of these sources may not be shown. If unspecified,
461
+ the minimal set of nodes needed to reach all others will be used.
462
+
463
+ max_depth : int | None
464
+ The maximum depth to traverse before stopping. Defaults to None.
465
+
466
+ ascii_only : Boolean
467
+ If True only ASCII characters are used to construct the visualization
468
+
469
+ end : string
470
+ The line ending character
471
+
472
+ vertical_chains : Boolean
473
+ If True, chains of nodes will be drawn vertically when possible.
474
+
475
+ Examples
476
+ --------
477
+ >>> graph = nx.balanced_tree(r=2, h=2, create_using=nx.DiGraph)
478
+ >>> nx.write_network_text(graph)
479
+ ╙── 0
480
+ ├─╼ 1
481
+ │ ├─╼ 3
482
+ │ └─╼ 4
483
+ └─╼ 2
484
+ ├─╼ 5
485
+ └─╼ 6
486
+
487
+ >>> # A near tree with one non-tree edge
488
+ >>> graph.add_edge(5, 1)
489
+ >>> nx.write_network_text(graph)
490
+ ╙── 0
491
+ ├─╼ 1 ╾ 5
492
+ │ ├─╼ 3
493
+ │ └─╼ 4
494
+ └─╼ 2
495
+ ├─╼ 5
496
+ │ └─╼ ...
497
+ └─╼ 6
498
+
499
+ >>> graph = nx.cycle_graph(5)
500
+ >>> nx.write_network_text(graph)
501
+ ╙── 0
502
+ ├── 1
503
+ │ └── 2
504
+ │ └── 3
505
+ │ └── 4 ─ 0
506
+ └── ...
507
+
508
+ >>> graph = nx.cycle_graph(5, nx.DiGraph)
509
+ >>> nx.write_network_text(graph, vertical_chains=True)
510
+ ╙── 0 ╾ 4
511
+
512
+ 1
513
+
514
+ 2
515
+
516
+ 3
517
+
518
+ 4
519
+ └─╼ ...
520
+
521
+ >>> nx.write_network_text(graph, vertical_chains=True, ascii_only=True)
522
+ +-- 0 <- 4
523
+ !
524
+ 1
525
+ !
526
+ 2
527
+ !
528
+ 3
529
+ !
530
+ 4
531
+ L-> ...
532
+
533
+ >>> graph = nx.generators.barbell_graph(4, 2)
534
+ >>> nx.write_network_text(graph, vertical_chains=False)
535
+ ╙── 4
536
+ ├── 5
537
+ │ └── 6
538
+ │ ├── 7
539
+ │ │ ├── 8 ─ 6
540
+ │ │ │ └── 9 ─ 6, 7
541
+ │ │ └── ...
542
+ │ └── ...
543
+ └── 3
544
+ ├── 0
545
+ │ ├── 1 ─ 3
546
+ │ │ └── 2 ─ 0, 3
547
+ │ └── ...
548
+ └── ...
549
+ >>> nx.write_network_text(graph, vertical_chains=True)
550
+ ╙── 4
551
+ ├── 5
552
+ │ │
553
+ │ 6
554
+ │ ├── 7
555
+ │ │ ├── 8 ─ 6
556
+ │ │ │ │
557
+ │ │ │ 9 ─ 6, 7
558
+ │ │ └── ...
559
+ │ └── ...
560
+ └── 3
561
+ ├── 0
562
+ │ ├── 1 ─ 3
563
+ │ │ │
564
+ │ │ 2 ─ 0, 3
565
+ │ └── ...
566
+ └── ...
567
+
568
+ >>> graph = nx.complete_graph(5, create_using=nx.Graph)
569
+ >>> nx.write_network_text(graph)
570
+ ╙── 0
571
+ ├── 1
572
+ │ ├── 2 ─ 0
573
+ │ │ ├── 3 ─ 0, 1
574
+ │ │ │ └── 4 ─ 0, 1, 2
575
+ │ │ └── ...
576
+ │ └── ...
577
+ └── ...
578
+
579
+ >>> graph = nx.complete_graph(3, create_using=nx.DiGraph)
580
+ >>> nx.write_network_text(graph)
581
+ ╙── 0 ╾ 1, 2
582
+ ├─╼ 1 ╾ 2
583
+ │ ├─╼ 2 ╾ 0
584
+ │ │ └─╼ ...
585
+ │ └─╼ ...
586
+ └─╼ ...
587
+ """
588
+ if path is None:
589
+ # The path is unspecified, write to stdout
590
+ _write = sys.stdout.write
591
+ elif hasattr(path, "write"):
592
+ # The path is already an open file
593
+ _write = path.write
594
+ elif callable(path):
595
+ # The path is a custom callable
596
+ _write = path
597
+ else:
598
+ raise TypeError(type(path))
599
+
600
+ for line in generate_network_text(
601
+ graph,
602
+ with_labels=with_labels,
603
+ sources=sources,
604
+ max_depth=max_depth,
605
+ ascii_only=ascii_only,
606
+ vertical_chains=vertical_chains,
607
+ ):
608
+ _write(line + end)
609
+
610
+
611
+ def _find_sources(graph):
612
+ """
613
+ Determine a minimal set of nodes such that the entire graph is reachable
614
+ """
615
+ # For each connected part of the graph, choose at least
616
+ # one node as a starting point, preferably without a parent
617
+ if graph.is_directed():
618
+ # Choose one node from each SCC with minimum in_degree
619
+ sccs = list(nx.strongly_connected_components(graph))
620
+ # condensing the SCCs forms a dag, the nodes in this graph with
621
+ # 0 in-degree correspond to the SCCs from which the minimum set
622
+ # of nodes from which all other nodes can be reached.
623
+ scc_graph = nx.condensation(graph, sccs)
624
+ supernode_to_nodes = {sn: [] for sn in scc_graph.nodes()}
625
+ # Note: the order of mapping differs between pypy and cpython
626
+ # so we have to loop over graph nodes for consistency
627
+ mapping = scc_graph.graph["mapping"]
628
+ for n in graph.nodes:
629
+ sn = mapping[n]
630
+ supernode_to_nodes[sn].append(n)
631
+ sources = []
632
+ for sn in scc_graph.nodes():
633
+ if scc_graph.in_degree[sn] == 0:
634
+ scc = supernode_to_nodes[sn]
635
+ node = min(scc, key=lambda n: graph.in_degree[n])
636
+ sources.append(node)
637
+ else:
638
+ # For undirected graph, the entire graph will be reachable as
639
+ # long as we consider one node from every connected component
640
+ sources = [
641
+ min(cc, key=lambda n: graph.degree[n])
642
+ for cc in nx.connected_components(graph)
643
+ ]
644
+ sources = sorted(sources, key=lambda n: graph.degree[n])
645
+ return sources
646
+
647
+
648
+ def _parse_network_text(lines):
649
+ """Reconstructs a graph from a network text representation.
650
+
651
+ This is mainly used for testing. Network text is for display, not
652
+ serialization, as such this cannot parse all network text representations
653
+ because node labels can be ambiguous with the glyphs and indentation used
654
+ to represent edge structure. Additionally, there is no way to determine if
655
+ disconnected graphs were originally directed or undirected.
656
+
657
+ Parameters
658
+ ----------
659
+ lines : list or iterator of strings
660
+ Input data in network text format
661
+
662
+ Returns
663
+ -------
664
+ G: NetworkX graph
665
+ The graph corresponding to the lines in network text format.
666
+ """
667
+ from itertools import chain
668
+ from typing import Any, NamedTuple, Union
669
+
670
+ class ParseStackFrame(NamedTuple):
671
+ node: Any
672
+ indent: int
673
+ has_vertical_child: int | None
674
+
675
+ initial_line_iter = iter(lines)
676
+
677
+ is_ascii = None
678
+ is_directed = None
679
+
680
+ ##############
681
+ # Initial Pass
682
+ ##############
683
+
684
+ # Do an initial pass over the lines to determine what type of graph it is.
685
+ # Remember what these lines were, so we can reiterate over them in the
686
+ # parsing pass.
687
+ initial_lines = []
688
+ try:
689
+ first_line = next(initial_line_iter)
690
+ except StopIteration:
691
+ ...
692
+ else:
693
+ initial_lines.append(first_line)
694
+ # The first character indicates if it is an ASCII or UTF graph
695
+ first_char = first_line[0]
696
+ if first_char in {
697
+ UtfBaseGlyphs.empty,
698
+ UtfBaseGlyphs.newtree_mid[0],
699
+ UtfBaseGlyphs.newtree_last[0],
700
+ }:
701
+ is_ascii = False
702
+ elif first_char in {
703
+ AsciiBaseGlyphs.empty,
704
+ AsciiBaseGlyphs.newtree_mid[0],
705
+ AsciiBaseGlyphs.newtree_last[0],
706
+ }:
707
+ is_ascii = True
708
+ else:
709
+ raise AssertionError(f"Unexpected first character: {first_char}")
710
+
711
+ if is_ascii:
712
+ directed_glyphs = AsciiDirectedGlyphs.as_dict()
713
+ undirected_glyphs = AsciiUndirectedGlyphs.as_dict()
714
+ else:
715
+ directed_glyphs = UtfDirectedGlyphs.as_dict()
716
+ undirected_glyphs = UtfUndirectedGlyphs.as_dict()
717
+
718
+ # For both directed / undirected glyphs, determine which glyphs never
719
+ # appear as substrings in the other undirected / directed glyphs. Glyphs
720
+ # with this property unambiguously indicates if a graph is directed /
721
+ # undirected.
722
+ directed_items = set(directed_glyphs.values())
723
+ undirected_items = set(undirected_glyphs.values())
724
+ unambiguous_directed_items = []
725
+ for item in directed_items:
726
+ other_items = undirected_items
727
+ other_supersets = [other for other in other_items if item in other]
728
+ if not other_supersets:
729
+ unambiguous_directed_items.append(item)
730
+ unambiguous_undirected_items = []
731
+ for item in undirected_items:
732
+ other_items = directed_items
733
+ other_supersets = [other for other in other_items if item in other]
734
+ if not other_supersets:
735
+ unambiguous_undirected_items.append(item)
736
+
737
+ for line in initial_line_iter:
738
+ initial_lines.append(line)
739
+ if any(item in line for item in unambiguous_undirected_items):
740
+ is_directed = False
741
+ break
742
+ elif any(item in line for item in unambiguous_directed_items):
743
+ is_directed = True
744
+ break
745
+
746
+ if is_directed is None:
747
+ # Not enough information to determine, choose undirected by default
748
+ is_directed = False
749
+
750
+ glyphs = directed_glyphs if is_directed else undirected_glyphs
751
+
752
+ # the backedge symbol by itself can be ambiguous, but with spaces around it
753
+ # becomes unambiguous.
754
+ backedge_symbol = " " + glyphs["backedge"] + " "
755
+
756
+ # Reconstruct an iterator over all of the lines.
757
+ parsing_line_iter = chain(initial_lines, initial_line_iter)
758
+
759
+ ##############
760
+ # Parsing Pass
761
+ ##############
762
+
763
+ edges = []
764
+ nodes = []
765
+ is_empty = None
766
+
767
+ noparent = object() # sentinel value
768
+
769
+ # keep a stack of previous nodes that could be parents of subsequent nodes
770
+ stack = [ParseStackFrame(noparent, -1, None)]
771
+
772
+ for line in parsing_line_iter:
773
+ if line == glyphs["empty"]:
774
+ # If the line is the empty glyph, we are done.
775
+ # There shouldn't be anything else after this.
776
+ is_empty = True
777
+ continue
778
+
779
+ if backedge_symbol in line:
780
+ # This line has one or more backedges, separate those out
781
+ node_part, backedge_part = line.split(backedge_symbol)
782
+ backedge_nodes = [u.strip() for u in backedge_part.split(", ")]
783
+ # Now the node can be parsed
784
+ node_part = node_part.rstrip()
785
+ prefix, node = node_part.rsplit(" ", 1)
786
+ node = node.strip()
787
+ # Add the backedges to the edge list
788
+ edges.extend([(u, node) for u in backedge_nodes])
789
+ else:
790
+ # No backedge, the tail of this line is the node
791
+ prefix, node = line.rsplit(" ", 1)
792
+ node = node.strip()
793
+
794
+ prev = stack.pop()
795
+
796
+ if node in glyphs["vertical_edge"]:
797
+ # Previous node is still the previous node, but we know it will
798
+ # have exactly one child, which will need to have its nesting level
799
+ # adjusted.
800
+ modified_prev = ParseStackFrame(
801
+ prev.node,
802
+ prev.indent,
803
+ True,
804
+ )
805
+ stack.append(modified_prev)
806
+ continue
807
+
808
+ # The length of the string before the node characters give us a hint
809
+ # about our nesting level. The only case where this doesn't work is
810
+ # when there are vertical chains, which is handled explicitly.
811
+ indent = len(prefix)
812
+ curr = ParseStackFrame(node, indent, None)
813
+
814
+ if prev.has_vertical_child:
815
+ # In this case we know prev must be the parent of our current line,
816
+ # so we don't have to search the stack. (which is good because the
817
+ # indentation check wouldn't work in this case).
818
+ ...
819
+ else:
820
+ # If the previous node nesting-level is greater than the current
821
+ # nodes nesting-level than the previous node was the end of a path,
822
+ # and is not our parent. We can safely pop nodes off the stack
823
+ # until we find one with a comparable nesting-level, which is our
824
+ # parent.
825
+ while curr.indent <= prev.indent:
826
+ prev = stack.pop()
827
+
828
+ if node == "...":
829
+ # The current previous node is no longer a valid parent,
830
+ # keep it popped from the stack.
831
+ stack.append(prev)
832
+ else:
833
+ # The previous and current nodes may still be parents, so add them
834
+ # back onto the stack.
835
+ stack.append(prev)
836
+ stack.append(curr)
837
+
838
+ # Add the node and the edge to its parent to the node / edge lists.
839
+ nodes.append(curr.node)
840
+ if prev.node is not noparent:
841
+ edges.append((prev.node, curr.node))
842
+
843
+ if is_empty:
844
+ # Sanity check
845
+ assert len(nodes) == 0
846
+
847
+ # Reconstruct the graph
848
+ cls = nx.DiGraph if is_directed else nx.Graph
849
+ new = cls()
850
+ new.add_nodes_from(nodes)
851
+ new.add_edges_from(edges)
852
+ return new
pllava/lib/python3.10/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_
2
+ #error You should not include this header directly
3
+ #endif
4
+ /*
5
+ * Private API (here for inline)
6
+ */
7
+ static inline int
8
+ _PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter);
9
+
10
+ /*
11
+ * Update to next item of the iterator
12
+ *
13
+ * Note: this simply increment the coordinates vector, last dimension
14
+ * incremented first , i.e, for dimension 3
15
+ * ...
16
+ * -1, -1, -1
17
+ * -1, -1, 0
18
+ * -1, -1, 1
19
+ * ....
20
+ * -1, 0, -1
21
+ * -1, 0, 0
22
+ * ....
23
+ * 0, -1, -1
24
+ * 0, -1, 0
25
+ * ....
26
+ */
27
+ #define _UPDATE_COORD_ITER(c) \
28
+ wb = iter->coordinates[c] < iter->bounds[c][1]; \
29
+ if (wb) { \
30
+ iter->coordinates[c] += 1; \
31
+ return 0; \
32
+ } \
33
+ else { \
34
+ iter->coordinates[c] = iter->bounds[c][0]; \
35
+ }
36
+
37
+ static inline int
38
+ _PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter)
39
+ {
40
+ npy_intp i, wb;
41
+
42
+ for (i = iter->nd - 1; i >= 0; --i) {
43
+ _UPDATE_COORD_ITER(i)
44
+ }
45
+
46
+ return 0;
47
+ }
48
+
49
+ /*
50
+ * Version optimized for 2d arrays, manual loop unrolling
51
+ */
52
+ static inline int
53
+ _PyArrayNeighborhoodIter_IncrCoord2D(PyArrayNeighborhoodIterObject* iter)
54
+ {
55
+ npy_intp wb;
56
+
57
+ _UPDATE_COORD_ITER(1)
58
+ _UPDATE_COORD_ITER(0)
59
+
60
+ return 0;
61
+ }
62
+ #undef _UPDATE_COORD_ITER
63
+
64
+ /*
65
+ * Advance to the next neighbour
66
+ */
67
+ static inline int
68
+ PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter)
69
+ {
70
+ _PyArrayNeighborhoodIter_IncrCoord (iter);
71
+ iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates);
72
+
73
+ return 0;
74
+ }
75
+
76
+ /*
77
+ * Reset functions
78
+ */
79
+ static inline int
80
+ PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter)
81
+ {
82
+ npy_intp i;
83
+
84
+ for (i = 0; i < iter->nd; ++i) {
85
+ iter->coordinates[i] = iter->bounds[i][0];
86
+ }
87
+ iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates);
88
+
89
+ return 0;
90
+ }
pllava/lib/python3.10/site-packages/numpy/core/include/numpy/npy_3kcompat.h ADDED
@@ -0,0 +1,595 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * This is a convenience header file providing compatibility utilities
3
+ * for supporting different minor versions of Python 3.
4
+ * It was originally used to support the transition from Python 2,
5
+ * hence the "3k" naming.
6
+ *
7
+ * If you want to use this for your own projects, it's recommended to make a
8
+ * copy of it. Although the stuff below is unlikely to change, we don't provide
9
+ * strong backwards compatibility guarantees at the moment.
10
+ */
11
+
12
+ #ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_
13
+ #define NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_
14
+
15
+ #include <Python.h>
16
+ #include <stdio.h>
17
+
18
+ #ifndef NPY_PY3K
19
+ #define NPY_PY3K 1
20
+ #endif
21
+
22
+ #include "numpy/npy_common.h"
23
+ #include "numpy/ndarrayobject.h"
24
+
25
+ #ifdef __cplusplus
26
+ extern "C" {
27
+ #endif
28
+
29
+ /*
30
+ * PyInt -> PyLong
31
+ */
32
+
33
+
34
+ /*
35
+ * This is a renamed copy of the Python non-limited API function _PyLong_AsInt. It is
36
+ * included here because it is missing from the PyPy API. It completes the PyLong_As*
37
+ * group of functions and can be useful in replacing PyInt_Check.
38
+ */
39
+ static inline int
40
+ Npy__PyLong_AsInt(PyObject *obj)
41
+ {
42
+ int overflow;
43
+ long result = PyLong_AsLongAndOverflow(obj, &overflow);
44
+
45
+ /* INT_MAX and INT_MIN are defined in Python.h */
46
+ if (overflow || result > INT_MAX || result < INT_MIN) {
47
+ /* XXX: could be cute and give a different
48
+ message for overflow == -1 */
49
+ PyErr_SetString(PyExc_OverflowError,
50
+ "Python int too large to convert to C int");
51
+ return -1;
52
+ }
53
+ return (int)result;
54
+ }
55
+
56
+
57
+ #if defined(NPY_PY3K)
58
+ /* Return True only if the long fits in a C long */
59
+ static inline int PyInt_Check(PyObject *op) {
60
+ int overflow = 0;
61
+ if (!PyLong_Check(op)) {
62
+ return 0;
63
+ }
64
+ PyLong_AsLongAndOverflow(op, &overflow);
65
+ return (overflow == 0);
66
+ }
67
+
68
+
69
+ #define PyInt_FromLong PyLong_FromLong
70
+ #define PyInt_AsLong PyLong_AsLong
71
+ #define PyInt_AS_LONG PyLong_AsLong
72
+ #define PyInt_AsSsize_t PyLong_AsSsize_t
73
+ #define PyNumber_Int PyNumber_Long
74
+
75
+ /* NOTE:
76
+ *
77
+ * Since the PyLong type is very different from the fixed-range PyInt,
78
+ * we don't define PyInt_Type -> PyLong_Type.
79
+ */
80
+ #endif /* NPY_PY3K */
81
+
82
+ /* Py3 changes PySlice_GetIndicesEx' first argument's type to PyObject* */
83
+ #ifdef NPY_PY3K
84
+ # define NpySlice_GetIndicesEx PySlice_GetIndicesEx
85
+ #else
86
+ # define NpySlice_GetIndicesEx(op, nop, start, end, step, slicelength) \
87
+ PySlice_GetIndicesEx((PySliceObject *)op, nop, start, end, step, slicelength)
88
+ #endif
89
+
90
+ #if PY_VERSION_HEX < 0x030900a4
91
+ /* Introduced in https://github.com/python/cpython/commit/d2ec81a8c99796b51fb8c49b77a7fe369863226f */
92
+ #define Py_SET_TYPE(obj, type) ((Py_TYPE(obj) = (type)), (void)0)
93
+ /* Introduced in https://github.com/python/cpython/commit/b10dc3e7a11fcdb97e285882eba6da92594f90f9 */
94
+ #define Py_SET_SIZE(obj, size) ((Py_SIZE(obj) = (size)), (void)0)
95
+ /* Introduced in https://github.com/python/cpython/commit/c86a11221df7e37da389f9c6ce6e47ea22dc44ff */
96
+ #define Py_SET_REFCNT(obj, refcnt) ((Py_REFCNT(obj) = (refcnt)), (void)0)
97
+ #endif
98
+
99
+
100
+ #define Npy_EnterRecursiveCall(x) Py_EnterRecursiveCall(x)
101
+
102
+ /*
103
+ * PyString -> PyBytes
104
+ */
105
+
106
+ #if defined(NPY_PY3K)
107
+
108
+ #define PyString_Type PyBytes_Type
109
+ #define PyString_Check PyBytes_Check
110
+ #define PyStringObject PyBytesObject
111
+ #define PyString_FromString PyBytes_FromString
112
+ #define PyString_FromStringAndSize PyBytes_FromStringAndSize
113
+ #define PyString_AS_STRING PyBytes_AS_STRING
114
+ #define PyString_AsStringAndSize PyBytes_AsStringAndSize
115
+ #define PyString_FromFormat PyBytes_FromFormat
116
+ #define PyString_Concat PyBytes_Concat
117
+ #define PyString_ConcatAndDel PyBytes_ConcatAndDel
118
+ #define PyString_AsString PyBytes_AsString
119
+ #define PyString_GET_SIZE PyBytes_GET_SIZE
120
+ #define PyString_Size PyBytes_Size
121
+
122
+ #define PyUString_Type PyUnicode_Type
123
+ #define PyUString_Check PyUnicode_Check
124
+ #define PyUStringObject PyUnicodeObject
125
+ #define PyUString_FromString PyUnicode_FromString
126
+ #define PyUString_FromStringAndSize PyUnicode_FromStringAndSize
127
+ #define PyUString_FromFormat PyUnicode_FromFormat
128
+ #define PyUString_Concat PyUnicode_Concat2
129
+ #define PyUString_ConcatAndDel PyUnicode_ConcatAndDel
130
+ #define PyUString_GET_SIZE PyUnicode_GET_SIZE
131
+ #define PyUString_Size PyUnicode_Size
132
+ #define PyUString_InternFromString PyUnicode_InternFromString
133
+ #define PyUString_Format PyUnicode_Format
134
+
135
+ #define PyBaseString_Check(obj) (PyUnicode_Check(obj))
136
+
137
+ #else
138
+
139
+ #define PyBytes_Type PyString_Type
140
+ #define PyBytes_Check PyString_Check
141
+ #define PyBytesObject PyStringObject
142
+ #define PyBytes_FromString PyString_FromString
143
+ #define PyBytes_FromStringAndSize PyString_FromStringAndSize
144
+ #define PyBytes_AS_STRING PyString_AS_STRING
145
+ #define PyBytes_AsStringAndSize PyString_AsStringAndSize
146
+ #define PyBytes_FromFormat PyString_FromFormat
147
+ #define PyBytes_Concat PyString_Concat
148
+ #define PyBytes_ConcatAndDel PyString_ConcatAndDel
149
+ #define PyBytes_AsString PyString_AsString
150
+ #define PyBytes_GET_SIZE PyString_GET_SIZE
151
+ #define PyBytes_Size PyString_Size
152
+
153
+ #define PyUString_Type PyString_Type
154
+ #define PyUString_Check PyString_Check
155
+ #define PyUStringObject PyStringObject
156
+ #define PyUString_FromString PyString_FromString
157
+ #define PyUString_FromStringAndSize PyString_FromStringAndSize
158
+ #define PyUString_FromFormat PyString_FromFormat
159
+ #define PyUString_Concat PyString_Concat
160
+ #define PyUString_ConcatAndDel PyString_ConcatAndDel
161
+ #define PyUString_GET_SIZE PyString_GET_SIZE
162
+ #define PyUString_Size PyString_Size
163
+ #define PyUString_InternFromString PyString_InternFromString
164
+ #define PyUString_Format PyString_Format
165
+
166
+ #define PyBaseString_Check(obj) (PyBytes_Check(obj) || PyUnicode_Check(obj))
167
+
168
+ #endif /* NPY_PY3K */
169
+
170
+ /*
171
+ * Macros to protect CRT calls against instant termination when passed an
172
+ * invalid parameter (https://bugs.python.org/issue23524).
173
+ */
174
+ #if defined _MSC_VER && _MSC_VER >= 1900
175
+
176
+ #include <stdlib.h>
177
+
178
+ extern _invalid_parameter_handler _Py_silent_invalid_parameter_handler;
179
+ #define NPY_BEGIN_SUPPRESS_IPH { _invalid_parameter_handler _Py_old_handler = \
180
+ _set_thread_local_invalid_parameter_handler(_Py_silent_invalid_parameter_handler);
181
+ #define NPY_END_SUPPRESS_IPH _set_thread_local_invalid_parameter_handler(_Py_old_handler); }
182
+
183
+ #else
184
+
185
+ #define NPY_BEGIN_SUPPRESS_IPH
186
+ #define NPY_END_SUPPRESS_IPH
187
+
188
+ #endif /* _MSC_VER >= 1900 */
189
+
190
+
191
+ static inline void
192
+ PyUnicode_ConcatAndDel(PyObject **left, PyObject *right)
193
+ {
194
+ Py_SETREF(*left, PyUnicode_Concat(*left, right));
195
+ Py_DECREF(right);
196
+ }
197
+
198
+ static inline void
199
+ PyUnicode_Concat2(PyObject **left, PyObject *right)
200
+ {
201
+ Py_SETREF(*left, PyUnicode_Concat(*left, right));
202
+ }
203
+
204
+ /*
205
+ * PyFile_* compatibility
206
+ */
207
+
208
+ /*
209
+ * Get a FILE* handle to the file represented by the Python object
210
+ */
211
+ static inline FILE*
212
+ npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos)
213
+ {
214
+ int fd, fd2, unbuf;
215
+ Py_ssize_t fd2_tmp;
216
+ PyObject *ret, *os, *io, *io_raw;
217
+ npy_off_t pos;
218
+ FILE *handle;
219
+
220
+ /* For Python 2 PyFileObject, use PyFile_AsFile */
221
+ #if !defined(NPY_PY3K)
222
+ if (PyFile_Check(file)) {
223
+ return PyFile_AsFile(file);
224
+ }
225
+ #endif
226
+
227
+ /* Flush first to ensure things end up in the file in the correct order */
228
+ ret = PyObject_CallMethod(file, "flush", "");
229
+ if (ret == NULL) {
230
+ return NULL;
231
+ }
232
+ Py_DECREF(ret);
233
+ fd = PyObject_AsFileDescriptor(file);
234
+ if (fd == -1) {
235
+ return NULL;
236
+ }
237
+
238
+ /*
239
+ * The handle needs to be dup'd because we have to call fclose
240
+ * at the end
241
+ */
242
+ os = PyImport_ImportModule("os");
243
+ if (os == NULL) {
244
+ return NULL;
245
+ }
246
+ ret = PyObject_CallMethod(os, "dup", "i", fd);
247
+ Py_DECREF(os);
248
+ if (ret == NULL) {
249
+ return NULL;
250
+ }
251
+ fd2_tmp = PyNumber_AsSsize_t(ret, PyExc_IOError);
252
+ Py_DECREF(ret);
253
+ if (fd2_tmp == -1 && PyErr_Occurred()) {
254
+ return NULL;
255
+ }
256
+ if (fd2_tmp < INT_MIN || fd2_tmp > INT_MAX) {
257
+ PyErr_SetString(PyExc_IOError,
258
+ "Getting an 'int' from os.dup() failed");
259
+ return NULL;
260
+ }
261
+ fd2 = (int)fd2_tmp;
262
+
263
+ /* Convert to FILE* handle */
264
+ #ifdef _WIN32
265
+ NPY_BEGIN_SUPPRESS_IPH
266
+ handle = _fdopen(fd2, mode);
267
+ NPY_END_SUPPRESS_IPH
268
+ #else
269
+ handle = fdopen(fd2, mode);
270
+ #endif
271
+ if (handle == NULL) {
272
+ PyErr_SetString(PyExc_IOError,
273
+ "Getting a FILE* from a Python file object via "
274
+ "_fdopen failed. If you built NumPy, you probably "
275
+ "linked with the wrong debug/release runtime");
276
+ return NULL;
277
+ }
278
+
279
+ /* Record the original raw file handle position */
280
+ *orig_pos = npy_ftell(handle);
281
+ if (*orig_pos == -1) {
282
+ /* The io module is needed to determine if buffering is used */
283
+ io = PyImport_ImportModule("io");
284
+ if (io == NULL) {
285
+ fclose(handle);
286
+ return NULL;
287
+ }
288
+ /* File object instances of RawIOBase are unbuffered */
289
+ io_raw = PyObject_GetAttrString(io, "RawIOBase");
290
+ Py_DECREF(io);
291
+ if (io_raw == NULL) {
292
+ fclose(handle);
293
+ return NULL;
294
+ }
295
+ unbuf = PyObject_IsInstance(file, io_raw);
296
+ Py_DECREF(io_raw);
297
+ if (unbuf == 1) {
298
+ /* Succeed if the IO is unbuffered */
299
+ return handle;
300
+ }
301
+ else {
302
+ PyErr_SetString(PyExc_IOError, "obtaining file position failed");
303
+ fclose(handle);
304
+ return NULL;
305
+ }
306
+ }
307
+
308
+ /* Seek raw handle to the Python-side position */
309
+ ret = PyObject_CallMethod(file, "tell", "");
310
+ if (ret == NULL) {
311
+ fclose(handle);
312
+ return NULL;
313
+ }
314
+ pos = PyLong_AsLongLong(ret);
315
+ Py_DECREF(ret);
316
+ if (PyErr_Occurred()) {
317
+ fclose(handle);
318
+ return NULL;
319
+ }
320
+ if (npy_fseek(handle, pos, SEEK_SET) == -1) {
321
+ PyErr_SetString(PyExc_IOError, "seeking file failed");
322
+ fclose(handle);
323
+ return NULL;
324
+ }
325
+ return handle;
326
+ }
327
+
328
+ /*
329
+ * Close the dup-ed file handle, and seek the Python one to the current position
330
+ */
331
+ static inline int
332
+ npy_PyFile_DupClose2(PyObject *file, FILE* handle, npy_off_t orig_pos)
333
+ {
334
+ int fd, unbuf;
335
+ PyObject *ret, *io, *io_raw;
336
+ npy_off_t position;
337
+
338
+ /* For Python 2 PyFileObject, do nothing */
339
+ #if !defined(NPY_PY3K)
340
+ if (PyFile_Check(file)) {
341
+ return 0;
342
+ }
343
+ #endif
344
+
345
+ position = npy_ftell(handle);
346
+
347
+ /* Close the FILE* handle */
348
+ fclose(handle);
349
+
350
+ /*
351
+ * Restore original file handle position, in order to not confuse
352
+ * Python-side data structures
353
+ */
354
+ fd = PyObject_AsFileDescriptor(file);
355
+ if (fd == -1) {
356
+ return -1;
357
+ }
358
+
359
+ if (npy_lseek(fd, orig_pos, SEEK_SET) == -1) {
360
+
361
+ /* The io module is needed to determine if buffering is used */
362
+ io = PyImport_ImportModule("io");
363
+ if (io == NULL) {
364
+ return -1;
365
+ }
366
+ /* File object instances of RawIOBase are unbuffered */
367
+ io_raw = PyObject_GetAttrString(io, "RawIOBase");
368
+ Py_DECREF(io);
369
+ if (io_raw == NULL) {
370
+ return -1;
371
+ }
372
+ unbuf = PyObject_IsInstance(file, io_raw);
373
+ Py_DECREF(io_raw);
374
+ if (unbuf == 1) {
375
+ /* Succeed if the IO is unbuffered */
376
+ return 0;
377
+ }
378
+ else {
379
+ PyErr_SetString(PyExc_IOError, "seeking file failed");
380
+ return -1;
381
+ }
382
+ }
383
+
384
+ if (position == -1) {
385
+ PyErr_SetString(PyExc_IOError, "obtaining file position failed");
386
+ return -1;
387
+ }
388
+
389
+ /* Seek Python-side handle to the FILE* handle position */
390
+ ret = PyObject_CallMethod(file, "seek", NPY_OFF_T_PYFMT "i", position, 0);
391
+ if (ret == NULL) {
392
+ return -1;
393
+ }
394
+ Py_DECREF(ret);
395
+ return 0;
396
+ }
397
+
398
+ static inline int
399
+ npy_PyFile_Check(PyObject *file)
400
+ {
401
+ int fd;
402
+ /* For Python 2, check if it is a PyFileObject */
403
+ #if !defined(NPY_PY3K)
404
+ if (PyFile_Check(file)) {
405
+ return 1;
406
+ }
407
+ #endif
408
+ fd = PyObject_AsFileDescriptor(file);
409
+ if (fd == -1) {
410
+ PyErr_Clear();
411
+ return 0;
412
+ }
413
+ return 1;
414
+ }
415
+
416
+ static inline PyObject*
417
+ npy_PyFile_OpenFile(PyObject *filename, const char *mode)
418
+ {
419
+ PyObject *open;
420
+ open = PyDict_GetItemString(PyEval_GetBuiltins(), "open");
421
+ if (open == NULL) {
422
+ return NULL;
423
+ }
424
+ return PyObject_CallFunction(open, "Os", filename, mode);
425
+ }
426
+
427
+ static inline int
428
+ npy_PyFile_CloseFile(PyObject *file)
429
+ {
430
+ PyObject *ret;
431
+
432
+ ret = PyObject_CallMethod(file, "close", NULL);
433
+ if (ret == NULL) {
434
+ return -1;
435
+ }
436
+ Py_DECREF(ret);
437
+ return 0;
438
+ }
439
+
440
+
441
+ /* This is a copy of _PyErr_ChainExceptions
442
+ */
443
+ static inline void
444
+ npy_PyErr_ChainExceptions(PyObject *exc, PyObject *val, PyObject *tb)
445
+ {
446
+ if (exc == NULL)
447
+ return;
448
+
449
+ if (PyErr_Occurred()) {
450
+ /* only py3 supports this anyway */
451
+ #ifdef NPY_PY3K
452
+ PyObject *exc2, *val2, *tb2;
453
+ PyErr_Fetch(&exc2, &val2, &tb2);
454
+ PyErr_NormalizeException(&exc, &val, &tb);
455
+ if (tb != NULL) {
456
+ PyException_SetTraceback(val, tb);
457
+ Py_DECREF(tb);
458
+ }
459
+ Py_DECREF(exc);
460
+ PyErr_NormalizeException(&exc2, &val2, &tb2);
461
+ PyException_SetContext(val2, val);
462
+ PyErr_Restore(exc2, val2, tb2);
463
+ #endif
464
+ }
465
+ else {
466
+ PyErr_Restore(exc, val, tb);
467
+ }
468
+ }
469
+
470
+
471
+ /* This is a copy of _PyErr_ChainExceptions, with:
472
+ * - a minimal implementation for python 2
473
+ * - __cause__ used instead of __context__
474
+ */
475
+ static inline void
476
+ npy_PyErr_ChainExceptionsCause(PyObject *exc, PyObject *val, PyObject *tb)
477
+ {
478
+ if (exc == NULL)
479
+ return;
480
+
481
+ if (PyErr_Occurred()) {
482
+ /* only py3 supports this anyway */
483
+ #ifdef NPY_PY3K
484
+ PyObject *exc2, *val2, *tb2;
485
+ PyErr_Fetch(&exc2, &val2, &tb2);
486
+ PyErr_NormalizeException(&exc, &val, &tb);
487
+ if (tb != NULL) {
488
+ PyException_SetTraceback(val, tb);
489
+ Py_DECREF(tb);
490
+ }
491
+ Py_DECREF(exc);
492
+ PyErr_NormalizeException(&exc2, &val2, &tb2);
493
+ PyException_SetCause(val2, val);
494
+ PyErr_Restore(exc2, val2, tb2);
495
+ #endif
496
+ }
497
+ else {
498
+ PyErr_Restore(exc, val, tb);
499
+ }
500
+ }
501
+
502
+ /*
503
+ * PyObject_Cmp
504
+ */
505
+ #if defined(NPY_PY3K)
506
+ static inline int
507
+ PyObject_Cmp(PyObject *i1, PyObject *i2, int *cmp)
508
+ {
509
+ int v;
510
+ v = PyObject_RichCompareBool(i1, i2, Py_LT);
511
+ if (v == 1) {
512
+ *cmp = -1;
513
+ return 1;
514
+ }
515
+ else if (v == -1) {
516
+ return -1;
517
+ }
518
+
519
+ v = PyObject_RichCompareBool(i1, i2, Py_GT);
520
+ if (v == 1) {
521
+ *cmp = 1;
522
+ return 1;
523
+ }
524
+ else if (v == -1) {
525
+ return -1;
526
+ }
527
+
528
+ v = PyObject_RichCompareBool(i1, i2, Py_EQ);
529
+ if (v == 1) {
530
+ *cmp = 0;
531
+ return 1;
532
+ }
533
+ else {
534
+ *cmp = 0;
535
+ return -1;
536
+ }
537
+ }
538
+ #endif
539
+
540
+ /*
541
+ * PyCObject functions adapted to PyCapsules.
542
+ *
543
+ * The main job here is to get rid of the improved error handling
544
+ * of PyCapsules. It's a shame...
545
+ */
546
+ static inline PyObject *
547
+ NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *))
548
+ {
549
+ PyObject *ret = PyCapsule_New(ptr, NULL, dtor);
550
+ if (ret == NULL) {
551
+ PyErr_Clear();
552
+ }
553
+ return ret;
554
+ }
555
+
556
+ static inline PyObject *
557
+ NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context, void (*dtor)(PyObject *))
558
+ {
559
+ PyObject *ret = NpyCapsule_FromVoidPtr(ptr, dtor);
560
+ if (ret != NULL && PyCapsule_SetContext(ret, context) != 0) {
561
+ PyErr_Clear();
562
+ Py_DECREF(ret);
563
+ ret = NULL;
564
+ }
565
+ return ret;
566
+ }
567
+
568
+ static inline void *
569
+ NpyCapsule_AsVoidPtr(PyObject *obj)
570
+ {
571
+ void *ret = PyCapsule_GetPointer(obj, NULL);
572
+ if (ret == NULL) {
573
+ PyErr_Clear();
574
+ }
575
+ return ret;
576
+ }
577
+
578
+ static inline void *
579
+ NpyCapsule_GetDesc(PyObject *obj)
580
+ {
581
+ return PyCapsule_GetContext(obj);
582
+ }
583
+
584
+ static inline int
585
+ NpyCapsule_Check(PyObject *ptr)
586
+ {
587
+ return PyCapsule_CheckExact(ptr);
588
+ }
589
+
590
+ #ifdef __cplusplus
591
+ }
592
+ #endif
593
+
594
+
595
+ #endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_ */
pllava/lib/python3.10/site-packages/numpy/core/include/numpy/npy_cpu.h ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * This set (target) cpu specific macros:
3
+ * - Possible values:
4
+ * NPY_CPU_X86
5
+ * NPY_CPU_AMD64
6
+ * NPY_CPU_PPC
7
+ * NPY_CPU_PPC64
8
+ * NPY_CPU_PPC64LE
9
+ * NPY_CPU_SPARC
10
+ * NPY_CPU_S390
11
+ * NPY_CPU_IA64
12
+ * NPY_CPU_HPPA
13
+ * NPY_CPU_ALPHA
14
+ * NPY_CPU_ARMEL
15
+ * NPY_CPU_ARMEB
16
+ * NPY_CPU_SH_LE
17
+ * NPY_CPU_SH_BE
18
+ * NPY_CPU_ARCEL
19
+ * NPY_CPU_ARCEB
20
+ * NPY_CPU_RISCV64
21
+ * NPY_CPU_LOONGARCH
22
+ * NPY_CPU_WASM
23
+ */
24
+ #ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_
25
+ #define NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_
26
+
27
+ #include "numpyconfig.h"
28
+
29
+ #if defined( __i386__ ) || defined(i386) || defined(_M_IX86)
30
+ /*
31
+ * __i386__ is defined by gcc and Intel compiler on Linux,
32
+ * _M_IX86 by VS compiler,
33
+ * i386 by Sun compilers on opensolaris at least
34
+ */
35
+ #define NPY_CPU_X86
36
+ #elif defined(__x86_64__) || defined(__amd64__) || defined(__x86_64) || defined(_M_AMD64)
37
+ /*
38
+ * both __x86_64__ and __amd64__ are defined by gcc
39
+ * __x86_64 defined by sun compiler on opensolaris at least
40
+ * _M_AMD64 defined by MS compiler
41
+ */
42
+ #define NPY_CPU_AMD64
43
+ #elif defined(__powerpc64__) && defined(__LITTLE_ENDIAN__)
44
+ #define NPY_CPU_PPC64LE
45
+ #elif defined(__powerpc64__) && defined(__BIG_ENDIAN__)
46
+ #define NPY_CPU_PPC64
47
+ #elif defined(__ppc__) || defined(__powerpc__) || defined(_ARCH_PPC)
48
+ /*
49
+ * __ppc__ is defined by gcc, I remember having seen __powerpc__ once,
50
+ * but can't find it ATM
51
+ * _ARCH_PPC is used by at least gcc on AIX
52
+ * As __powerpc__ and _ARCH_PPC are also defined by PPC64 check
53
+ * for those specifically first before defaulting to ppc
54
+ */
55
+ #define NPY_CPU_PPC
56
+ #elif defined(__sparc__) || defined(__sparc)
57
+ /* __sparc__ is defined by gcc and Forte (e.g. Sun) compilers */
58
+ #define NPY_CPU_SPARC
59
+ #elif defined(__s390__)
60
+ #define NPY_CPU_S390
61
+ #elif defined(__ia64)
62
+ #define NPY_CPU_IA64
63
+ #elif defined(__hppa)
64
+ #define NPY_CPU_HPPA
65
+ #elif defined(__alpha__)
66
+ #define NPY_CPU_ALPHA
67
+ #elif defined(__arm__) || defined(__aarch64__) || defined(_M_ARM64)
68
+ /* _M_ARM64 is defined in MSVC for ARM64 compilation on Windows */
69
+ #if defined(__ARMEB__) || defined(__AARCH64EB__)
70
+ #if defined(__ARM_32BIT_STATE)
71
+ #define NPY_CPU_ARMEB_AARCH32
72
+ #elif defined(__ARM_64BIT_STATE)
73
+ #define NPY_CPU_ARMEB_AARCH64
74
+ #else
75
+ #define NPY_CPU_ARMEB
76
+ #endif
77
+ #elif defined(__ARMEL__) || defined(__AARCH64EL__) || defined(_M_ARM64)
78
+ #if defined(__ARM_32BIT_STATE)
79
+ #define NPY_CPU_ARMEL_AARCH32
80
+ #elif defined(__ARM_64BIT_STATE) || defined(_M_ARM64) || defined(__AARCH64EL__)
81
+ #define NPY_CPU_ARMEL_AARCH64
82
+ #else
83
+ #define NPY_CPU_ARMEL
84
+ #endif
85
+ #else
86
+ # error Unknown ARM CPU, please report this to numpy maintainers with \
87
+ information about your platform (OS, CPU and compiler)
88
+ #endif
89
+ #elif defined(__sh__) && defined(__LITTLE_ENDIAN__)
90
+ #define NPY_CPU_SH_LE
91
+ #elif defined(__sh__) && defined(__BIG_ENDIAN__)
92
+ #define NPY_CPU_SH_BE
93
+ #elif defined(__MIPSEL__)
94
+ #define NPY_CPU_MIPSEL
95
+ #elif defined(__MIPSEB__)
96
+ #define NPY_CPU_MIPSEB
97
+ #elif defined(__or1k__)
98
+ #define NPY_CPU_OR1K
99
+ #elif defined(__mc68000__)
100
+ #define NPY_CPU_M68K
101
+ #elif defined(__arc__) && defined(__LITTLE_ENDIAN__)
102
+ #define NPY_CPU_ARCEL
103
+ #elif defined(__arc__) && defined(__BIG_ENDIAN__)
104
+ #define NPY_CPU_ARCEB
105
+ #elif defined(__riscv) && defined(__riscv_xlen) && __riscv_xlen == 64
106
+ #define NPY_CPU_RISCV64
107
+ #elif defined(__loongarch__)
108
+ #define NPY_CPU_LOONGARCH
109
+ #elif defined(__EMSCRIPTEN__)
110
+ /* __EMSCRIPTEN__ is defined by emscripten: an LLVM-to-Web compiler */
111
+ #define NPY_CPU_WASM
112
+ #else
113
+ #error Unknown CPU, please report this to numpy maintainers with \
114
+ information about your platform (OS, CPU and compiler)
115
+ #endif
116
+
117
+ /*
118
+ * Except for the following architectures, memory access is limited to the natural
119
+ * alignment of data types otherwise it may lead to bus error or performance regression.
120
+ * For more details about unaligned access, see https://www.kernel.org/doc/Documentation/unaligned-memory-access.txt.
121
+ */
122
+ #if defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64) || defined(__aarch64__) || defined(__powerpc64__)
123
+ #define NPY_ALIGNMENT_REQUIRED 0
124
+ #endif
125
+ #ifndef NPY_ALIGNMENT_REQUIRED
126
+ #define NPY_ALIGNMENT_REQUIRED 1
127
+ #endif
128
+
129
+ #endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ */
pllava/lib/python3.10/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * This include file is provided for inclusion in Cython *.pyd files where
3
+ * one would like to define the NPY_NO_DEPRECATED_API macro. It can be
4
+ * included by
5
+ *
6
+ * cdef extern from "npy_no_deprecated_api.h": pass
7
+ *
8
+ */
9
+ #ifndef NPY_NO_DEPRECATED_API
10
+
11
+ /* put this check here since there may be multiple includes in C extensions. */
12
+ #if defined(NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_) || \
13
+ defined(NUMPY_CORE_INCLUDE_NUMPY_NPY_DEPRECATED_API_H) || \
14
+ defined(NUMPY_CORE_INCLUDE_NUMPY_OLD_DEFINES_H_)
15
+ #error "npy_no_deprecated_api.h" must be first among numpy includes.
16
+ #else
17
+ #define NPY_NO_DEPRECATED_API NPY_API_VERSION
18
+ #endif
19
+
20
+ #endif /* NPY_NO_DEPRECATED_API */
pllava/lib/python3.10/site-packages/numpy/core/include/numpy/npy_os.h ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_
2
+ #define NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_
3
+
4
+ #if defined(linux) || defined(__linux) || defined(__linux__)
5
+ #define NPY_OS_LINUX
6
+ #elif defined(__FreeBSD__) || defined(__NetBSD__) || \
7
+ defined(__OpenBSD__) || defined(__DragonFly__)
8
+ #define NPY_OS_BSD
9
+ #ifdef __FreeBSD__
10
+ #define NPY_OS_FREEBSD
11
+ #elif defined(__NetBSD__)
12
+ #define NPY_OS_NETBSD
13
+ #elif defined(__OpenBSD__)
14
+ #define NPY_OS_OPENBSD
15
+ #elif defined(__DragonFly__)
16
+ #define NPY_OS_DRAGONFLY
17
+ #endif
18
+ #elif defined(sun) || defined(__sun)
19
+ #define NPY_OS_SOLARIS
20
+ #elif defined(__CYGWIN__)
21
+ #define NPY_OS_CYGWIN
22
+ /* We are on Windows.*/
23
+ #elif defined(_WIN32)
24
+ /* We are using MinGW (64-bit or 32-bit)*/
25
+ #if defined(__MINGW32__) || defined(__MINGW64__)
26
+ #define NPY_OS_MINGW
27
+ /* Otherwise, if _WIN64 is defined, we are targeting 64-bit Windows*/
28
+ #elif defined(_WIN64)
29
+ #define NPY_OS_WIN64
30
+ /* Otherwise assume we are targeting 32-bit Windows*/
31
+ #else
32
+ #define NPY_OS_WIN32
33
+ #endif
34
+ #elif defined(__APPLE__)
35
+ #define NPY_OS_DARWIN
36
+ #elif defined(__HAIKU__)
37
+ #define NPY_OS_HAIKU
38
+ #else
39
+ #define NPY_OS_UNKNOWN
40
+ #endif
41
+
42
+ #endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_ */
pllava/lib/python3.10/site-packages/numpy/core/include/numpy/random/LICENSE.txt ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ zlib License
2
+ ------------
3
+
4
+ Copyright (C) 2010 - 2019 ridiculous_fish, <libdivide@ridiculousfish.com>
5
+ Copyright (C) 2016 - 2019 Kim Walisch, <kim.walisch@gmail.com>
6
+
7
+ This software is provided 'as-is', without any express or implied
8
+ warranty. In no event will the authors be held liable for any damages
9
+ arising from the use of this software.
10
+
11
+ Permission is granted to anyone to use this software for any purpose,
12
+ including commercial applications, and to alter it and redistribute it
13
+ freely, subject to the following restrictions:
14
+
15
+ 1. The origin of this software must not be misrepresented; you must not
16
+ claim that you wrote the original software. If you use this software
17
+ in a product, an acknowledgment in the product documentation would be
18
+ appreciated but is not required.
19
+ 2. Altered source versions must be plainly marked as such, and must not be
20
+ misrepresented as being the original software.
21
+ 3. This notice may not be removed or altered from any source distribution.
pllava/lib/python3.10/site-packages/numpy/core/include/numpy/random/bitgen.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef NUMPY_CORE_INCLUDE_NUMPY_RANDOM_BITGEN_H_
2
+ #define NUMPY_CORE_INCLUDE_NUMPY_RANDOM_BITGEN_H_
3
+
4
+ #pragma once
5
+ #include <stddef.h>
6
+ #include <stdbool.h>
7
+ #include <stdint.h>
8
+
9
+ /* Must match the declaration in numpy/random/<any>.pxd */
10
+
11
+ typedef struct bitgen {
12
+ void *state;
13
+ uint64_t (*next_uint64)(void *st);
14
+ uint32_t (*next_uint32)(void *st);
15
+ double (*next_double)(void *st);
16
+ uint64_t (*next_raw)(void *st);
17
+ } bitgen_t;
18
+
19
+
20
+ #endif /* NUMPY_CORE_INCLUDE_NUMPY_RANDOM_BITGEN_H_ */
pllava/lib/python3.10/site-packages/numpy/core/include/numpy/random/libdivide.h ADDED
@@ -0,0 +1,2079 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // libdivide.h - Optimized integer division
2
+ // https://libdivide.com
3
+ //
4
+ // Copyright (C) 2010 - 2019 ridiculous_fish, <libdivide@ridiculousfish.com>
5
+ // Copyright (C) 2016 - 2019 Kim Walisch, <kim.walisch@gmail.com>
6
+ //
7
+ // libdivide is dual-licensed under the Boost or zlib licenses.
8
+ // You may use libdivide under the terms of either of these.
9
+ // See LICENSE.txt for more details.
10
+
11
+ #ifndef NUMPY_CORE_INCLUDE_NUMPY_LIBDIVIDE_LIBDIVIDE_H_
12
+ #define NUMPY_CORE_INCLUDE_NUMPY_LIBDIVIDE_LIBDIVIDE_H_
13
+
14
+ #define LIBDIVIDE_VERSION "3.0"
15
+ #define LIBDIVIDE_VERSION_MAJOR 3
16
+ #define LIBDIVIDE_VERSION_MINOR 0
17
+
18
+ #include <stdint.h>
19
+
20
+ #if defined(__cplusplus)
21
+ #include <cstdlib>
22
+ #include <cstdio>
23
+ #include <type_traits>
24
+ #else
25
+ #include <stdlib.h>
26
+ #include <stdio.h>
27
+ #endif
28
+
29
+ #if defined(LIBDIVIDE_AVX512)
30
+ #include <immintrin.h>
31
+ #elif defined(LIBDIVIDE_AVX2)
32
+ #include <immintrin.h>
33
+ #elif defined(LIBDIVIDE_SSE2)
34
+ #include <emmintrin.h>
35
+ #endif
36
+
37
+ #if defined(_MSC_VER)
38
+ #include <intrin.h>
39
+ // disable warning C4146: unary minus operator applied
40
+ // to unsigned type, result still unsigned
41
+ #pragma warning(disable: 4146)
42
+ #define LIBDIVIDE_VC
43
+ #endif
44
+
45
+ #if !defined(__has_builtin)
46
+ #define __has_builtin(x) 0
47
+ #endif
48
+
49
+ #if defined(__SIZEOF_INT128__)
50
+ #define HAS_INT128_T
51
+ // clang-cl on Windows does not yet support 128-bit division
52
+ #if !(defined(__clang__) && defined(LIBDIVIDE_VC))
53
+ #define HAS_INT128_DIV
54
+ #endif
55
+ #endif
56
+
57
+ #if defined(__x86_64__) || defined(_M_X64)
58
+ #define LIBDIVIDE_X86_64
59
+ #endif
60
+
61
+ #if defined(__i386__)
62
+ #define LIBDIVIDE_i386
63
+ #endif
64
+
65
+ #if defined(__GNUC__) || defined(__clang__)
66
+ #define LIBDIVIDE_GCC_STYLE_ASM
67
+ #endif
68
+
69
+ #if defined(__cplusplus) || defined(LIBDIVIDE_VC)
70
+ #define LIBDIVIDE_FUNCTION __FUNCTION__
71
+ #else
72
+ #define LIBDIVIDE_FUNCTION __func__
73
+ #endif
74
+
75
+ #define LIBDIVIDE_ERROR(msg) \
76
+ do { \
77
+ fprintf(stderr, "libdivide.h:%d: %s(): Error: %s\n", \
78
+ __LINE__, LIBDIVIDE_FUNCTION, msg); \
79
+ abort(); \
80
+ } while (0)
81
+
82
+ #if defined(LIBDIVIDE_ASSERTIONS_ON)
83
+ #define LIBDIVIDE_ASSERT(x) \
84
+ do { \
85
+ if (!(x)) { \
86
+ fprintf(stderr, "libdivide.h:%d: %s(): Assertion failed: %s\n", \
87
+ __LINE__, LIBDIVIDE_FUNCTION, #x); \
88
+ abort(); \
89
+ } \
90
+ } while (0)
91
+ #else
92
+ #define LIBDIVIDE_ASSERT(x)
93
+ #endif
94
+
95
+ #ifdef __cplusplus
96
+ namespace libdivide {
97
+ #endif
98
+
99
+ // pack divider structs to prevent compilers from padding.
100
+ // This reduces memory usage by up to 43% when using a large
101
+ // array of libdivide dividers and improves performance
102
+ // by up to 10% because of reduced memory bandwidth.
103
+ #pragma pack(push, 1)
104
+
105
+ struct libdivide_u32_t {
106
+ uint32_t magic;
107
+ uint8_t more;
108
+ };
109
+
110
+ struct libdivide_s32_t {
111
+ int32_t magic;
112
+ uint8_t more;
113
+ };
114
+
115
+ struct libdivide_u64_t {
116
+ uint64_t magic;
117
+ uint8_t more;
118
+ };
119
+
120
+ struct libdivide_s64_t {
121
+ int64_t magic;
122
+ uint8_t more;
123
+ };
124
+
125
+ struct libdivide_u32_branchfree_t {
126
+ uint32_t magic;
127
+ uint8_t more;
128
+ };
129
+
130
+ struct libdivide_s32_branchfree_t {
131
+ int32_t magic;
132
+ uint8_t more;
133
+ };
134
+
135
+ struct libdivide_u64_branchfree_t {
136
+ uint64_t magic;
137
+ uint8_t more;
138
+ };
139
+
140
+ struct libdivide_s64_branchfree_t {
141
+ int64_t magic;
142
+ uint8_t more;
143
+ };
144
+
145
+ #pragma pack(pop)
146
+
147
+ // Explanation of the "more" field:
148
+ //
149
+ // * Bits 0-5 is the shift value (for shift path or mult path).
150
+ // * Bit 6 is the add indicator for mult path.
151
+ // * Bit 7 is set if the divisor is negative. We use bit 7 as the negative
152
+ // divisor indicator so that we can efficiently use sign extension to
153
+ // create a bitmask with all bits set to 1 (if the divisor is negative)
154
+ // or 0 (if the divisor is positive).
155
+ //
156
+ // u32: [0-4] shift value
157
+ // [5] ignored
158
+ // [6] add indicator
159
+ // magic number of 0 indicates shift path
160
+ //
161
+ // s32: [0-4] shift value
162
+ // [5] ignored
163
+ // [6] add indicator
164
+ // [7] indicates negative divisor
165
+ // magic number of 0 indicates shift path
166
+ //
167
+ // u64: [0-5] shift value
168
+ // [6] add indicator
169
+ // magic number of 0 indicates shift path
170
+ //
171
+ // s64: [0-5] shift value
172
+ // [6] add indicator
173
+ // [7] indicates negative divisor
174
+ // magic number of 0 indicates shift path
175
+ //
176
+ // In s32 and s64 branchfree modes, the magic number is negated according to
177
+ // whether the divisor is negated. In branchfree strategy, it is not negated.
178
+
179
+ enum {
180
+ LIBDIVIDE_32_SHIFT_MASK = 0x1F,
181
+ LIBDIVIDE_64_SHIFT_MASK = 0x3F,
182
+ LIBDIVIDE_ADD_MARKER = 0x40,
183
+ LIBDIVIDE_NEGATIVE_DIVISOR = 0x80
184
+ };
185
+
186
+ static inline struct libdivide_s32_t libdivide_s32_gen(int32_t d);
187
+ static inline struct libdivide_u32_t libdivide_u32_gen(uint32_t d);
188
+ static inline struct libdivide_s64_t libdivide_s64_gen(int64_t d);
189
+ static inline struct libdivide_u64_t libdivide_u64_gen(uint64_t d);
190
+
191
+ static inline struct libdivide_s32_branchfree_t libdivide_s32_branchfree_gen(int32_t d);
192
+ static inline struct libdivide_u32_branchfree_t libdivide_u32_branchfree_gen(uint32_t d);
193
+ static inline struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d);
194
+ static inline struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d);
195
+
196
+ static inline int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom);
197
+ static inline uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom);
198
+ static inline int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom);
199
+ static inline uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom);
200
+
201
+ static inline int32_t libdivide_s32_branchfree_do(int32_t numer, const struct libdivide_s32_branchfree_t *denom);
202
+ static inline uint32_t libdivide_u32_branchfree_do(uint32_t numer, const struct libdivide_u32_branchfree_t *denom);
203
+ static inline int64_t libdivide_s64_branchfree_do(int64_t numer, const struct libdivide_s64_branchfree_t *denom);
204
+ static inline uint64_t libdivide_u64_branchfree_do(uint64_t numer, const struct libdivide_u64_branchfree_t *denom);
205
+
206
+ static inline int32_t libdivide_s32_recover(const struct libdivide_s32_t *denom);
207
+ static inline uint32_t libdivide_u32_recover(const struct libdivide_u32_t *denom);
208
+ static inline int64_t libdivide_s64_recover(const struct libdivide_s64_t *denom);
209
+ static inline uint64_t libdivide_u64_recover(const struct libdivide_u64_t *denom);
210
+
211
+ static inline int32_t libdivide_s32_branchfree_recover(const struct libdivide_s32_branchfree_t *denom);
212
+ static inline uint32_t libdivide_u32_branchfree_recover(const struct libdivide_u32_branchfree_t *denom);
213
+ static inline int64_t libdivide_s64_branchfree_recover(const struct libdivide_s64_branchfree_t *denom);
214
+ static inline uint64_t libdivide_u64_branchfree_recover(const struct libdivide_u64_branchfree_t *denom);
215
+
216
+ //////// Internal Utility Functions
217
+
218
+ static inline uint32_t libdivide_mullhi_u32(uint32_t x, uint32_t y) {
219
+ uint64_t xl = x, yl = y;
220
+ uint64_t rl = xl * yl;
221
+ return (uint32_t)(rl >> 32);
222
+ }
223
+
224
+ static inline int32_t libdivide_mullhi_s32(int32_t x, int32_t y) {
225
+ int64_t xl = x, yl = y;
226
+ int64_t rl = xl * yl;
227
+ // needs to be arithmetic shift
228
+ return (int32_t)(rl >> 32);
229
+ }
230
+
231
+ static inline uint64_t libdivide_mullhi_u64(uint64_t x, uint64_t y) {
232
+ #if defined(LIBDIVIDE_VC) && \
233
+ defined(LIBDIVIDE_X86_64)
234
+ return __umulh(x, y);
235
+ #elif defined(HAS_INT128_T)
236
+ __uint128_t xl = x, yl = y;
237
+ __uint128_t rl = xl * yl;
238
+ return (uint64_t)(rl >> 64);
239
+ #else
240
+ // full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64)
241
+ uint32_t mask = 0xFFFFFFFF;
242
+ uint32_t x0 = (uint32_t)(x & mask);
243
+ uint32_t x1 = (uint32_t)(x >> 32);
244
+ uint32_t y0 = (uint32_t)(y & mask);
245
+ uint32_t y1 = (uint32_t)(y >> 32);
246
+ uint32_t x0y0_hi = libdivide_mullhi_u32(x0, y0);
247
+ uint64_t x0y1 = x0 * (uint64_t)y1;
248
+ uint64_t x1y0 = x1 * (uint64_t)y0;
249
+ uint64_t x1y1 = x1 * (uint64_t)y1;
250
+ uint64_t temp = x1y0 + x0y0_hi;
251
+ uint64_t temp_lo = temp & mask;
252
+ uint64_t temp_hi = temp >> 32;
253
+
254
+ return x1y1 + temp_hi + ((temp_lo + x0y1) >> 32);
255
+ #endif
256
+ }
257
+
258
+ static inline int64_t libdivide_mullhi_s64(int64_t x, int64_t y) {
259
+ #if defined(LIBDIVIDE_VC) && \
260
+ defined(LIBDIVIDE_X86_64)
261
+ return __mulh(x, y);
262
+ #elif defined(HAS_INT128_T)
263
+ __int128_t xl = x, yl = y;
264
+ __int128_t rl = xl * yl;
265
+ return (int64_t)(rl >> 64);
266
+ #else
267
+ // full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64)
268
+ uint32_t mask = 0xFFFFFFFF;
269
+ uint32_t x0 = (uint32_t)(x & mask);
270
+ uint32_t y0 = (uint32_t)(y & mask);
271
+ int32_t x1 = (int32_t)(x >> 32);
272
+ int32_t y1 = (int32_t)(y >> 32);
273
+ uint32_t x0y0_hi = libdivide_mullhi_u32(x0, y0);
274
+ int64_t t = x1 * (int64_t)y0 + x0y0_hi;
275
+ int64_t w1 = x0 * (int64_t)y1 + (t & mask);
276
+
277
+ return x1 * (int64_t)y1 + (t >> 32) + (w1 >> 32);
278
+ #endif
279
+ }
280
+
281
+ static inline int32_t libdivide_count_leading_zeros32(uint32_t val) {
282
+ #if defined(__GNUC__) || \
283
+ __has_builtin(__builtin_clz)
284
+ // Fast way to count leading zeros
285
+ return __builtin_clz(val);
286
+ #elif defined(LIBDIVIDE_VC)
287
+ unsigned long result;
288
+ if (_BitScanReverse(&result, val)) {
289
+ return 31 - result;
290
+ }
291
+ return 0;
292
+ #else
293
+ if (val == 0)
294
+ return 32;
295
+ int32_t result = 8;
296
+ uint32_t hi = 0xFFU << 24;
297
+ while ((val & hi) == 0) {
298
+ hi >>= 8;
299
+ result += 8;
300
+ }
301
+ while (val & hi) {
302
+ result -= 1;
303
+ hi <<= 1;
304
+ }
305
+ return result;
306
+ #endif
307
+ }
308
+
309
+ static inline int32_t libdivide_count_leading_zeros64(uint64_t val) {
310
+ #if defined(__GNUC__) || \
311
+ __has_builtin(__builtin_clzll)
312
+ // Fast way to count leading zeros
313
+ return __builtin_clzll(val);
314
+ #elif defined(LIBDIVIDE_VC) && defined(_WIN64)
315
+ unsigned long result;
316
+ if (_BitScanReverse64(&result, val)) {
317
+ return 63 - result;
318
+ }
319
+ return 0;
320
+ #else
321
+ uint32_t hi = val >> 32;
322
+ uint32_t lo = val & 0xFFFFFFFF;
323
+ if (hi != 0) return libdivide_count_leading_zeros32(hi);
324
+ return 32 + libdivide_count_leading_zeros32(lo);
325
+ #endif
326
+ }
327
+
328
+ // libdivide_64_div_32_to_32: divides a 64-bit uint {u1, u0} by a 32-bit
329
+ // uint {v}. The result must fit in 32 bits.
330
+ // Returns the quotient directly and the remainder in *r
331
+ static inline uint32_t libdivide_64_div_32_to_32(uint32_t u1, uint32_t u0, uint32_t v, uint32_t *r) {
332
+ #if (defined(LIBDIVIDE_i386) || defined(LIBDIVIDE_X86_64)) && \
333
+ defined(LIBDIVIDE_GCC_STYLE_ASM)
334
+ uint32_t result;
335
+ __asm__("divl %[v]"
336
+ : "=a"(result), "=d"(*r)
337
+ : [v] "r"(v), "a"(u0), "d"(u1)
338
+ );
339
+ return result;
340
+ #else
341
+ uint64_t n = ((uint64_t)u1 << 32) | u0;
342
+ uint32_t result = (uint32_t)(n / v);
343
+ *r = (uint32_t)(n - result * (uint64_t)v);
344
+ return result;
345
+ #endif
346
+ }
347
+
348
+ // libdivide_128_div_64_to_64: divides a 128-bit uint {u1, u0} by a 64-bit
349
+ // uint {v}. The result must fit in 64 bits.
350
+ // Returns the quotient directly and the remainder in *r
351
+ static uint64_t libdivide_128_div_64_to_64(uint64_t u1, uint64_t u0, uint64_t v, uint64_t *r) {
352
+ #if defined(LIBDIVIDE_X86_64) && \
353
+ defined(LIBDIVIDE_GCC_STYLE_ASM)
354
+ uint64_t result;
355
+ __asm__("divq %[v]"
356
+ : "=a"(result), "=d"(*r)
357
+ : [v] "r"(v), "a"(u0), "d"(u1)
358
+ );
359
+ return result;
360
+ #elif defined(HAS_INT128_T) && \
361
+ defined(HAS_INT128_DIV)
362
+ __uint128_t n = ((__uint128_t)u1 << 64) | u0;
363
+ uint64_t result = (uint64_t)(n / v);
364
+ *r = (uint64_t)(n - result * (__uint128_t)v);
365
+ return result;
366
+ #else
367
+ // Code taken from Hacker's Delight:
368
+ // http://www.hackersdelight.org/HDcode/divlu.c.
369
+ // License permits inclusion here per:
370
+ // http://www.hackersdelight.org/permissions.htm
371
+
372
+ const uint64_t b = (1ULL << 32); // Number base (32 bits)
373
+ uint64_t un1, un0; // Norm. dividend LSD's
374
+ uint64_t vn1, vn0; // Norm. divisor digits
375
+ uint64_t q1, q0; // Quotient digits
376
+ uint64_t un64, un21, un10; // Dividend digit pairs
377
+ uint64_t rhat; // A remainder
378
+ int32_t s; // Shift amount for norm
379
+
380
+ // If overflow, set rem. to an impossible value,
381
+ // and return the largest possible quotient
382
+ if (u1 >= v) {
383
+ *r = (uint64_t) -1;
384
+ return (uint64_t) -1;
385
+ }
386
+
387
+ // count leading zeros
388
+ s = libdivide_count_leading_zeros64(v);
389
+ if (s > 0) {
390
+ // Normalize divisor
391
+ v = v << s;
392
+ un64 = (u1 << s) | (u0 >> (64 - s));
393
+ un10 = u0 << s; // Shift dividend left
394
+ } else {
395
+ // Avoid undefined behavior of (u0 >> 64).
396
+ // The behavior is undefined if the right operand is
397
+ // negative, or greater than or equal to the length
398
+ // in bits of the promoted left operand.
399
+ un64 = u1;
400
+ un10 = u0;
401
+ }
402
+
403
+ // Break divisor up into two 32-bit digits
404
+ vn1 = v >> 32;
405
+ vn0 = v & 0xFFFFFFFF;
406
+
407
+ // Break right half of dividend into two digits
408
+ un1 = un10 >> 32;
409
+ un0 = un10 & 0xFFFFFFFF;
410
+
411
+ // Compute the first quotient digit, q1
412
+ q1 = un64 / vn1;
413
+ rhat = un64 - q1 * vn1;
414
+
415
+ while (q1 >= b || q1 * vn0 > b * rhat + un1) {
416
+ q1 = q1 - 1;
417
+ rhat = rhat + vn1;
418
+ if (rhat >= b)
419
+ break;
420
+ }
421
+
422
+ // Multiply and subtract
423
+ un21 = un64 * b + un1 - q1 * v;
424
+
425
+ // Compute the second quotient digit
426
+ q0 = un21 / vn1;
427
+ rhat = un21 - q0 * vn1;
428
+
429
+ while (q0 >= b || q0 * vn0 > b * rhat + un0) {
430
+ q0 = q0 - 1;
431
+ rhat = rhat + vn1;
432
+ if (rhat >= b)
433
+ break;
434
+ }
435
+
436
+ *r = (un21 * b + un0 - q0 * v) >> s;
437
+ return q1 * b + q0;
438
+ #endif
439
+ }
440
+
441
+ // Bitshift a u128 in place, left (signed_shift > 0) or right (signed_shift < 0)
442
+ static inline void libdivide_u128_shift(uint64_t *u1, uint64_t *u0, int32_t signed_shift) {
443
+ if (signed_shift > 0) {
444
+ uint32_t shift = signed_shift;
445
+ *u1 <<= shift;
446
+ *u1 |= *u0 >> (64 - shift);
447
+ *u0 <<= shift;
448
+ }
449
+ else if (signed_shift < 0) {
450
+ uint32_t shift = -signed_shift;
451
+ *u0 >>= shift;
452
+ *u0 |= *u1 << (64 - shift);
453
+ *u1 >>= shift;
454
+ }
455
+ }
456
+
457
+ // Computes a 128 / 128 -> 64 bit division, with a 128 bit remainder.
458
+ static uint64_t libdivide_128_div_128_to_64(uint64_t u_hi, uint64_t u_lo, uint64_t v_hi, uint64_t v_lo, uint64_t *r_hi, uint64_t *r_lo) {
459
+ #if defined(HAS_INT128_T) && \
460
+ defined(HAS_INT128_DIV)
461
+ __uint128_t ufull = u_hi;
462
+ __uint128_t vfull = v_hi;
463
+ ufull = (ufull << 64) | u_lo;
464
+ vfull = (vfull << 64) | v_lo;
465
+ uint64_t res = (uint64_t)(ufull / vfull);
466
+ __uint128_t remainder = ufull - (vfull * res);
467
+ *r_lo = (uint64_t)remainder;
468
+ *r_hi = (uint64_t)(remainder >> 64);
469
+ return res;
470
+ #else
471
+ // Adapted from "Unsigned Doubleword Division" in Hacker's Delight
472
+ // We want to compute u / v
473
+ typedef struct { uint64_t hi; uint64_t lo; } u128_t;
474
+ u128_t u = {u_hi, u_lo};
475
+ u128_t v = {v_hi, v_lo};
476
+
477
+ if (v.hi == 0) {
478
+ // divisor v is a 64 bit value, so we just need one 128/64 division
479
+ // Note that we are simpler than Hacker's Delight here, because we know
480
+ // the quotient fits in 64 bits whereas Hacker's Delight demands a full
481
+ // 128 bit quotient
482
+ *r_hi = 0;
483
+ return libdivide_128_div_64_to_64(u.hi, u.lo, v.lo, r_lo);
484
+ }
485
+ // Here v >= 2**64
486
+ // We know that v.hi != 0, so count leading zeros is OK
487
+ // We have 0 <= n <= 63
488
+ uint32_t n = libdivide_count_leading_zeros64(v.hi);
489
+
490
+ // Normalize the divisor so its MSB is 1
491
+ u128_t v1t = v;
492
+ libdivide_u128_shift(&v1t.hi, &v1t.lo, n);
493
+ uint64_t v1 = v1t.hi; // i.e. v1 = v1t >> 64
494
+
495
+ // To ensure no overflow
496
+ u128_t u1 = u;
497
+ libdivide_u128_shift(&u1.hi, &u1.lo, -1);
498
+
499
+ // Get quotient from divide unsigned insn.
500
+ uint64_t rem_ignored;
501
+ uint64_t q1 = libdivide_128_div_64_to_64(u1.hi, u1.lo, v1, &rem_ignored);
502
+
503
+ // Undo normalization and division of u by 2.
504
+ u128_t q0 = {0, q1};
505
+ libdivide_u128_shift(&q0.hi, &q0.lo, n);
506
+ libdivide_u128_shift(&q0.hi, &q0.lo, -63);
507
+
508
+ // Make q0 correct or too small by 1
509
+ // Equivalent to `if (q0 != 0) q0 = q0 - 1;`
510
+ if (q0.hi != 0 || q0.lo != 0) {
511
+ q0.hi -= (q0.lo == 0); // borrow
512
+ q0.lo -= 1;
513
+ }
514
+
515
+ // Now q0 is correct.
516
+ // Compute q0 * v as q0v
517
+ // = (q0.hi << 64 + q0.lo) * (v.hi << 64 + v.lo)
518
+ // = (q0.hi * v.hi << 128) + (q0.hi * v.lo << 64) +
519
+ // (q0.lo * v.hi << 64) + q0.lo * v.lo)
520
+ // Each term is 128 bit
521
+ // High half of full product (upper 128 bits!) are dropped
522
+ u128_t q0v = {0, 0};
523
+ q0v.hi = q0.hi*v.lo + q0.lo*v.hi + libdivide_mullhi_u64(q0.lo, v.lo);
524
+ q0v.lo = q0.lo*v.lo;
525
+
526
+ // Compute u - q0v as u_q0v
527
+ // This is the remainder
528
+ u128_t u_q0v = u;
529
+ u_q0v.hi -= q0v.hi + (u.lo < q0v.lo); // second term is borrow
530
+ u_q0v.lo -= q0v.lo;
531
+
532
+ // Check if u_q0v >= v
533
+ // This checks if our remainder is larger than the divisor
534
+ if ((u_q0v.hi > v.hi) ||
535
+ (u_q0v.hi == v.hi && u_q0v.lo >= v.lo)) {
536
+ // Increment q0
537
+ q0.lo += 1;
538
+ q0.hi += (q0.lo == 0); // carry
539
+
540
+ // Subtract v from remainder
541
+ u_q0v.hi -= v.hi + (u_q0v.lo < v.lo);
542
+ u_q0v.lo -= v.lo;
543
+ }
544
+
545
+ *r_hi = u_q0v.hi;
546
+ *r_lo = u_q0v.lo;
547
+
548
+ LIBDIVIDE_ASSERT(q0.hi == 0);
549
+ return q0.lo;
550
+ #endif
551
+ }
552
+
553
+ ////////// UINT32
554
+
555
+ static inline struct libdivide_u32_t libdivide_internal_u32_gen(uint32_t d, int branchfree) {
556
+ if (d == 0) {
557
+ LIBDIVIDE_ERROR("divider must be != 0");
558
+ }
559
+
560
+ struct libdivide_u32_t result;
561
+ uint32_t floor_log_2_d = 31 - libdivide_count_leading_zeros32(d);
562
+
563
+ // Power of 2
564
+ if ((d & (d - 1)) == 0) {
565
+ // We need to subtract 1 from the shift value in case of an unsigned
566
+ // branchfree divider because there is a hardcoded right shift by 1
567
+ // in its division algorithm. Because of this we also need to add back
568
+ // 1 in its recovery algorithm.
569
+ result.magic = 0;
570
+ result.more = (uint8_t)(floor_log_2_d - (branchfree != 0));
571
+ } else {
572
+ uint8_t more;
573
+ uint32_t rem, proposed_m;
574
+ proposed_m = libdivide_64_div_32_to_32(1U << floor_log_2_d, 0, d, &rem);
575
+
576
+ LIBDIVIDE_ASSERT(rem > 0 && rem < d);
577
+ const uint32_t e = d - rem;
578
+
579
+ // This power works if e < 2**floor_log_2_d.
580
+ if (!branchfree && (e < (1U << floor_log_2_d))) {
581
+ // This power works
582
+ more = floor_log_2_d;
583
+ } else {
584
+ // We have to use the general 33-bit algorithm. We need to compute
585
+ // (2**power) / d. However, we already have (2**(power-1))/d and
586
+ // its remainder. By doubling both, and then correcting the
587
+ // remainder, we can compute the larger division.
588
+ // don't care about overflow here - in fact, we expect it
589
+ proposed_m += proposed_m;
590
+ const uint32_t twice_rem = rem + rem;
591
+ if (twice_rem >= d || twice_rem < rem) proposed_m += 1;
592
+ more = floor_log_2_d | LIBDIVIDE_ADD_MARKER;
593
+ }
594
+ result.magic = 1 + proposed_m;
595
+ result.more = more;
596
+ // result.more's shift should in general be ceil_log_2_d. But if we
597
+ // used the smaller power, we subtract one from the shift because we're
598
+ // using the smaller power. If we're using the larger power, we
599
+ // subtract one from the shift because it's taken care of by the add
600
+ // indicator. So floor_log_2_d happens to be correct in both cases.
601
+ }
602
+ return result;
603
+ }
604
+
605
+ struct libdivide_u32_t libdivide_u32_gen(uint32_t d) {
606
+ return libdivide_internal_u32_gen(d, 0);
607
+ }
608
+
609
+ struct libdivide_u32_branchfree_t libdivide_u32_branchfree_gen(uint32_t d) {
610
+ if (d == 1) {
611
+ LIBDIVIDE_ERROR("branchfree divider must be != 1");
612
+ }
613
+ struct libdivide_u32_t tmp = libdivide_internal_u32_gen(d, 1);
614
+ struct libdivide_u32_branchfree_t ret = {tmp.magic, (uint8_t)(tmp.more & LIBDIVIDE_32_SHIFT_MASK)};
615
+ return ret;
616
+ }
617
+
618
+ uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom) {
619
+ uint8_t more = denom->more;
620
+ if (!denom->magic) {
621
+ return numer >> more;
622
+ }
623
+ else {
624
+ uint32_t q = libdivide_mullhi_u32(denom->magic, numer);
625
+ if (more & LIBDIVIDE_ADD_MARKER) {
626
+ uint32_t t = ((numer - q) >> 1) + q;
627
+ return t >> (more & LIBDIVIDE_32_SHIFT_MASK);
628
+ }
629
+ else {
630
+ // All upper bits are 0,
631
+ // don't need to mask them off.
632
+ return q >> more;
633
+ }
634
+ }
635
+ }
636
+
637
+ uint32_t libdivide_u32_branchfree_do(uint32_t numer, const struct libdivide_u32_branchfree_t *denom) {
638
+ uint32_t q = libdivide_mullhi_u32(denom->magic, numer);
639
+ uint32_t t = ((numer - q) >> 1) + q;
640
+ return t >> denom->more;
641
+ }
642
+
643
+ uint32_t libdivide_u32_recover(const struct libdivide_u32_t *denom) {
644
+ uint8_t more = denom->more;
645
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
646
+
647
+ if (!denom->magic) {
648
+ return 1U << shift;
649
+ } else if (!(more & LIBDIVIDE_ADD_MARKER)) {
650
+ // We compute q = n/d = n*m / 2^(32 + shift)
651
+ // Therefore we have d = 2^(32 + shift) / m
652
+ // We need to ceil it.
653
+ // We know d is not a power of 2, so m is not a power of 2,
654
+ // so we can just add 1 to the floor
655
+ uint32_t hi_dividend = 1U << shift;
656
+ uint32_t rem_ignored;
657
+ return 1 + libdivide_64_div_32_to_32(hi_dividend, 0, denom->magic, &rem_ignored);
658
+ } else {
659
+ // Here we wish to compute d = 2^(32+shift+1)/(m+2^32).
660
+ // Notice (m + 2^32) is a 33 bit number. Use 64 bit division for now
661
+ // Also note that shift may be as high as 31, so shift + 1 will
662
+ // overflow. So we have to compute it as 2^(32+shift)/(m+2^32), and
663
+ // then double the quotient and remainder.
664
+ uint64_t half_n = 1ULL << (32 + shift);
665
+ uint64_t d = (1ULL << 32) | denom->magic;
666
+ // Note that the quotient is guaranteed <= 32 bits, but the remainder
667
+ // may need 33!
668
+ uint32_t half_q = (uint32_t)(half_n / d);
669
+ uint64_t rem = half_n % d;
670
+ // We computed 2^(32+shift)/(m+2^32)
671
+ // Need to double it, and then add 1 to the quotient if doubling th
672
+ // remainder would increase the quotient.
673
+ // Note that rem<<1 cannot overflow, since rem < d and d is 33 bits
674
+ uint32_t full_q = half_q + half_q + ((rem<<1) >= d);
675
+
676
+ // We rounded down in gen (hence +1)
677
+ return full_q + 1;
678
+ }
679
+ }
680
+
681
+ uint32_t libdivide_u32_branchfree_recover(const struct libdivide_u32_branchfree_t *denom) {
682
+ uint8_t more = denom->more;
683
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
684
+
685
+ if (!denom->magic) {
686
+ return 1U << (shift + 1);
687
+ } else {
688
+ // Here we wish to compute d = 2^(32+shift+1)/(m+2^32).
689
+ // Notice (m + 2^32) is a 33 bit number. Use 64 bit division for now
690
+ // Also note that shift may be as high as 31, so shift + 1 will
691
+ // overflow. So we have to compute it as 2^(32+shift)/(m+2^32), and
692
+ // then double the quotient and remainder.
693
+ uint64_t half_n = 1ULL << (32 + shift);
694
+ uint64_t d = (1ULL << 32) | denom->magic;
695
+ // Note that the quotient is guaranteed <= 32 bits, but the remainder
696
+ // may need 33!
697
+ uint32_t half_q = (uint32_t)(half_n / d);
698
+ uint64_t rem = half_n % d;
699
+ // We computed 2^(32+shift)/(m+2^32)
700
+ // Need to double it, and then add 1 to the quotient if doubling th
701
+ // remainder would increase the quotient.
702
+ // Note that rem<<1 cannot overflow, since rem < d and d is 33 bits
703
+ uint32_t full_q = half_q + half_q + ((rem<<1) >= d);
704
+
705
+ // We rounded down in gen (hence +1)
706
+ return full_q + 1;
707
+ }
708
+ }
709
+
710
+ /////////// UINT64
711
+
712
+ static inline struct libdivide_u64_t libdivide_internal_u64_gen(uint64_t d, int branchfree) {
713
+ if (d == 0) {
714
+ LIBDIVIDE_ERROR("divider must be != 0");
715
+ }
716
+
717
+ struct libdivide_u64_t result;
718
+ uint32_t floor_log_2_d = 63 - libdivide_count_leading_zeros64(d);
719
+
720
+ // Power of 2
721
+ if ((d & (d - 1)) == 0) {
722
+ // We need to subtract 1 from the shift value in case of an unsigned
723
+ // branchfree divider because there is a hardcoded right shift by 1
724
+ // in its division algorithm. Because of this we also need to add back
725
+ // 1 in its recovery algorithm.
726
+ result.magic = 0;
727
+ result.more = (uint8_t)(floor_log_2_d - (branchfree != 0));
728
+ } else {
729
+ uint64_t proposed_m, rem;
730
+ uint8_t more;
731
+ // (1 << (64 + floor_log_2_d)) / d
732
+ proposed_m = libdivide_128_div_64_to_64(1ULL << floor_log_2_d, 0, d, &rem);
733
+
734
+ LIBDIVIDE_ASSERT(rem > 0 && rem < d);
735
+ const uint64_t e = d - rem;
736
+
737
+ // This power works if e < 2**floor_log_2_d.
738
+ if (!branchfree && e < (1ULL << floor_log_2_d)) {
739
+ // This power works
740
+ more = floor_log_2_d;
741
+ } else {
742
+ // We have to use the general 65-bit algorithm. We need to compute
743
+ // (2**power) / d. However, we already have (2**(power-1))/d and
744
+ // its remainder. By doubling both, and then correcting the
745
+ // remainder, we can compute the larger division.
746
+ // don't care about overflow here - in fact, we expect it
747
+ proposed_m += proposed_m;
748
+ const uint64_t twice_rem = rem + rem;
749
+ if (twice_rem >= d || twice_rem < rem) proposed_m += 1;
750
+ more = floor_log_2_d | LIBDIVIDE_ADD_MARKER;
751
+ }
752
+ result.magic = 1 + proposed_m;
753
+ result.more = more;
754
+ // result.more's shift should in general be ceil_log_2_d. But if we
755
+ // used the smaller power, we subtract one from the shift because we're
756
+ // using the smaller power. If we're using the larger power, we
757
+ // subtract one from the shift because it's taken care of by the add
758
+ // indicator. So floor_log_2_d happens to be correct in both cases,
759
+ // which is why we do it outside of the if statement.
760
+ }
761
+ return result;
762
+ }
763
+
764
+ struct libdivide_u64_t libdivide_u64_gen(uint64_t d) {
765
+ return libdivide_internal_u64_gen(d, 0);
766
+ }
767
+
768
+ struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d) {
769
+ if (d == 1) {
770
+ LIBDIVIDE_ERROR("branchfree divider must be != 1");
771
+ }
772
+ struct libdivide_u64_t tmp = libdivide_internal_u64_gen(d, 1);
773
+ struct libdivide_u64_branchfree_t ret = {tmp.magic, (uint8_t)(tmp.more & LIBDIVIDE_64_SHIFT_MASK)};
774
+ return ret;
775
+ }
776
+
777
+ uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom) {
778
+ uint8_t more = denom->more;
779
+ if (!denom->magic) {
780
+ return numer >> more;
781
+ }
782
+ else {
783
+ uint64_t q = libdivide_mullhi_u64(denom->magic, numer);
784
+ if (more & LIBDIVIDE_ADD_MARKER) {
785
+ uint64_t t = ((numer - q) >> 1) + q;
786
+ return t >> (more & LIBDIVIDE_64_SHIFT_MASK);
787
+ }
788
+ else {
789
+ // All upper bits are 0,
790
+ // don't need to mask them off.
791
+ return q >> more;
792
+ }
793
+ }
794
+ }
795
+
796
+ uint64_t libdivide_u64_branchfree_do(uint64_t numer, const struct libdivide_u64_branchfree_t *denom) {
797
+ uint64_t q = libdivide_mullhi_u64(denom->magic, numer);
798
+ uint64_t t = ((numer - q) >> 1) + q;
799
+ return t >> denom->more;
800
+ }
801
+
802
+ uint64_t libdivide_u64_recover(const struct libdivide_u64_t *denom) {
803
+ uint8_t more = denom->more;
804
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
805
+
806
+ if (!denom->magic) {
807
+ return 1ULL << shift;
808
+ } else if (!(more & LIBDIVIDE_ADD_MARKER)) {
809
+ // We compute q = n/d = n*m / 2^(64 + shift)
810
+ // Therefore we have d = 2^(64 + shift) / m
811
+ // We need to ceil it.
812
+ // We know d is not a power of 2, so m is not a power of 2,
813
+ // so we can just add 1 to the floor
814
+ uint64_t hi_dividend = 1ULL << shift;
815
+ uint64_t rem_ignored;
816
+ return 1 + libdivide_128_div_64_to_64(hi_dividend, 0, denom->magic, &rem_ignored);
817
+ } else {
818
+ // Here we wish to compute d = 2^(64+shift+1)/(m+2^64).
819
+ // Notice (m + 2^64) is a 65 bit number. This gets hairy. See
820
+ // libdivide_u32_recover for more on what we do here.
821
+ // TODO: do something better than 128 bit math
822
+
823
+ // Full n is a (potentially) 129 bit value
824
+ // half_n is a 128 bit value
825
+ // Compute the hi half of half_n. Low half is 0.
826
+ uint64_t half_n_hi = 1ULL << shift, half_n_lo = 0;
827
+ // d is a 65 bit value. The high bit is always set to 1.
828
+ const uint64_t d_hi = 1, d_lo = denom->magic;
829
+ // Note that the quotient is guaranteed <= 64 bits,
830
+ // but the remainder may need 65!
831
+ uint64_t r_hi, r_lo;
832
+ uint64_t half_q = libdivide_128_div_128_to_64(half_n_hi, half_n_lo, d_hi, d_lo, &r_hi, &r_lo);
833
+ // We computed 2^(64+shift)/(m+2^64)
834
+ // Double the remainder ('dr') and check if that is larger than d
835
+ // Note that d is a 65 bit value, so r1 is small and so r1 + r1
836
+ // cannot overflow
837
+ uint64_t dr_lo = r_lo + r_lo;
838
+ uint64_t dr_hi = r_hi + r_hi + (dr_lo < r_lo); // last term is carry
839
+ int dr_exceeds_d = (dr_hi > d_hi) || (dr_hi == d_hi && dr_lo >= d_lo);
840
+ uint64_t full_q = half_q + half_q + (dr_exceeds_d ? 1 : 0);
841
+ return full_q + 1;
842
+ }
843
+ }
844
+
845
+ uint64_t libdivide_u64_branchfree_recover(const struct libdivide_u64_branchfree_t *denom) {
846
+ uint8_t more = denom->more;
847
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
848
+
849
+ if (!denom->magic) {
850
+ return 1ULL << (shift + 1);
851
+ } else {
852
+ // Here we wish to compute d = 2^(64+shift+1)/(m+2^64).
853
+ // Notice (m + 2^64) is a 65 bit number. This gets hairy. See
854
+ // libdivide_u32_recover for more on what we do here.
855
+ // TODO: do something better than 128 bit math
856
+
857
+ // Full n is a (potentially) 129 bit value
858
+ // half_n is a 128 bit value
859
+ // Compute the hi half of half_n. Low half is 0.
860
+ uint64_t half_n_hi = 1ULL << shift, half_n_lo = 0;
861
+ // d is a 65 bit value. The high bit is always set to 1.
862
+ const uint64_t d_hi = 1, d_lo = denom->magic;
863
+ // Note that the quotient is guaranteed <= 64 bits,
864
+ // but the remainder may need 65!
865
+ uint64_t r_hi, r_lo;
866
+ uint64_t half_q = libdivide_128_div_128_to_64(half_n_hi, half_n_lo, d_hi, d_lo, &r_hi, &r_lo);
867
+ // We computed 2^(64+shift)/(m+2^64)
868
+ // Double the remainder ('dr') and check if that is larger than d
869
+ // Note that d is a 65 bit value, so r1 is small and so r1 + r1
870
+ // cannot overflow
871
+ uint64_t dr_lo = r_lo + r_lo;
872
+ uint64_t dr_hi = r_hi + r_hi + (dr_lo < r_lo); // last term is carry
873
+ int dr_exceeds_d = (dr_hi > d_hi) || (dr_hi == d_hi && dr_lo >= d_lo);
874
+ uint64_t full_q = half_q + half_q + (dr_exceeds_d ? 1 : 0);
875
+ return full_q + 1;
876
+ }
877
+ }
878
+
879
+ /////////// SINT32
880
+
881
+ static inline struct libdivide_s32_t libdivide_internal_s32_gen(int32_t d, int branchfree) {
882
+ if (d == 0) {
883
+ LIBDIVIDE_ERROR("divider must be != 0");
884
+ }
885
+
886
+ struct libdivide_s32_t result;
887
+
888
+ // If d is a power of 2, or negative a power of 2, we have to use a shift.
889
+ // This is especially important because the magic algorithm fails for -1.
890
+ // To check if d is a power of 2 or its inverse, it suffices to check
891
+ // whether its absolute value has exactly one bit set. This works even for
892
+ // INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set
893
+ // and is a power of 2.
894
+ uint32_t ud = (uint32_t)d;
895
+ uint32_t absD = (d < 0) ? -ud : ud;
896
+ uint32_t floor_log_2_d = 31 - libdivide_count_leading_zeros32(absD);
897
+ // check if exactly one bit is set,
898
+ // don't care if absD is 0 since that's divide by zero
899
+ if ((absD & (absD - 1)) == 0) {
900
+ // Branchfree and normal paths are exactly the same
901
+ result.magic = 0;
902
+ result.more = floor_log_2_d | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0);
903
+ } else {
904
+ LIBDIVIDE_ASSERT(floor_log_2_d >= 1);
905
+
906
+ uint8_t more;
907
+ // the dividend here is 2**(floor_log_2_d + 31), so the low 32 bit word
908
+ // is 0 and the high word is floor_log_2_d - 1
909
+ uint32_t rem, proposed_m;
910
+ proposed_m = libdivide_64_div_32_to_32(1U << (floor_log_2_d - 1), 0, absD, &rem);
911
+ const uint32_t e = absD - rem;
912
+
913
+ // We are going to start with a power of floor_log_2_d - 1.
914
+ // This works if works if e < 2**floor_log_2_d.
915
+ if (!branchfree && e < (1U << floor_log_2_d)) {
916
+ // This power works
917
+ more = floor_log_2_d - 1;
918
+ } else {
919
+ // We need to go one higher. This should not make proposed_m
920
+ // overflow, but it will make it negative when interpreted as an
921
+ // int32_t.
922
+ proposed_m += proposed_m;
923
+ const uint32_t twice_rem = rem + rem;
924
+ if (twice_rem >= absD || twice_rem < rem) proposed_m += 1;
925
+ more = floor_log_2_d | LIBDIVIDE_ADD_MARKER;
926
+ }
927
+
928
+ proposed_m += 1;
929
+ int32_t magic = (int32_t)proposed_m;
930
+
931
+ // Mark if we are negative. Note we only negate the magic number in the
932
+ // branchfull case.
933
+ if (d < 0) {
934
+ more |= LIBDIVIDE_NEGATIVE_DIVISOR;
935
+ if (!branchfree) {
936
+ magic = -magic;
937
+ }
938
+ }
939
+
940
+ result.more = more;
941
+ result.magic = magic;
942
+ }
943
+ return result;
944
+ }
945
+
946
+ struct libdivide_s32_t libdivide_s32_gen(int32_t d) {
947
+ return libdivide_internal_s32_gen(d, 0);
948
+ }
949
+
950
+ struct libdivide_s32_branchfree_t libdivide_s32_branchfree_gen(int32_t d) {
951
+ struct libdivide_s32_t tmp = libdivide_internal_s32_gen(d, 1);
952
+ struct libdivide_s32_branchfree_t result = {tmp.magic, tmp.more};
953
+ return result;
954
+ }
955
+
956
+ int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom) {
957
+ uint8_t more = denom->more;
958
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
959
+
960
+ if (!denom->magic) {
961
+ uint32_t sign = (int8_t)more >> 7;
962
+ uint32_t mask = (1U << shift) - 1;
963
+ uint32_t uq = numer + ((numer >> 31) & mask);
964
+ int32_t q = (int32_t)uq;
965
+ q >>= shift;
966
+ q = (q ^ sign) - sign;
967
+ return q;
968
+ } else {
969
+ uint32_t uq = (uint32_t)libdivide_mullhi_s32(denom->magic, numer);
970
+ if (more & LIBDIVIDE_ADD_MARKER) {
971
+ // must be arithmetic shift and then sign extend
972
+ int32_t sign = (int8_t)more >> 7;
973
+ // q += (more < 0 ? -numer : numer)
974
+ // cast required to avoid UB
975
+ uq += ((uint32_t)numer ^ sign) - sign;
976
+ }
977
+ int32_t q = (int32_t)uq;
978
+ q >>= shift;
979
+ q += (q < 0);
980
+ return q;
981
+ }
982
+ }
983
+
984
+ int32_t libdivide_s32_branchfree_do(int32_t numer, const struct libdivide_s32_branchfree_t *denom) {
985
+ uint8_t more = denom->more;
986
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
987
+ // must be arithmetic shift and then sign extend
988
+ int32_t sign = (int8_t)more >> 7;
989
+ int32_t magic = denom->magic;
990
+ int32_t q = libdivide_mullhi_s32(magic, numer);
991
+ q += numer;
992
+
993
+ // If q is non-negative, we have nothing to do
994
+ // If q is negative, we want to add either (2**shift)-1 if d is a power of
995
+ // 2, or (2**shift) if it is not a power of 2
996
+ uint32_t is_power_of_2 = (magic == 0);
997
+ uint32_t q_sign = (uint32_t)(q >> 31);
998
+ q += q_sign & ((1U << shift) - is_power_of_2);
999
+
1000
+ // Now arithmetic right shift
1001
+ q >>= shift;
1002
+ // Negate if needed
1003
+ q = (q ^ sign) - sign;
1004
+
1005
+ return q;
1006
+ }
1007
+
1008
+ int32_t libdivide_s32_recover(const struct libdivide_s32_t *denom) {
1009
+ uint8_t more = denom->more;
1010
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
1011
+ if (!denom->magic) {
1012
+ uint32_t absD = 1U << shift;
1013
+ if (more & LIBDIVIDE_NEGATIVE_DIVISOR) {
1014
+ absD = -absD;
1015
+ }
1016
+ return (int32_t)absD;
1017
+ } else {
1018
+ // Unsigned math is much easier
1019
+ // We negate the magic number only in the branchfull case, and we don't
1020
+ // know which case we're in. However we have enough information to
1021
+ // determine the correct sign of the magic number. The divisor was
1022
+ // negative if LIBDIVIDE_NEGATIVE_DIVISOR is set. If ADD_MARKER is set,
1023
+ // the magic number's sign is opposite that of the divisor.
1024
+ // We want to compute the positive magic number.
1025
+ int negative_divisor = (more & LIBDIVIDE_NEGATIVE_DIVISOR);
1026
+ int magic_was_negated = (more & LIBDIVIDE_ADD_MARKER)
1027
+ ? denom->magic > 0 : denom->magic < 0;
1028
+
1029
+ // Handle the power of 2 case (including branchfree)
1030
+ if (denom->magic == 0) {
1031
+ int32_t result = 1U << shift;
1032
+ return negative_divisor ? -result : result;
1033
+ }
1034
+
1035
+ uint32_t d = (uint32_t)(magic_was_negated ? -denom->magic : denom->magic);
1036
+ uint64_t n = 1ULL << (32 + shift); // this shift cannot exceed 30
1037
+ uint32_t q = (uint32_t)(n / d);
1038
+ int32_t result = (int32_t)q;
1039
+ result += 1;
1040
+ return negative_divisor ? -result : result;
1041
+ }
1042
+ }
1043
+
1044
+ int32_t libdivide_s32_branchfree_recover(const struct libdivide_s32_branchfree_t *denom) {
1045
+ return libdivide_s32_recover((const struct libdivide_s32_t *)denom);
1046
+ }
1047
+
1048
+ ///////////// SINT64
1049
+
1050
+ static inline struct libdivide_s64_t libdivide_internal_s64_gen(int64_t d, int branchfree) {
1051
+ if (d == 0) {
1052
+ LIBDIVIDE_ERROR("divider must be != 0");
1053
+ }
1054
+
1055
+ struct libdivide_s64_t result;
1056
+
1057
+ // If d is a power of 2, or negative a power of 2, we have to use a shift.
1058
+ // This is especially important because the magic algorithm fails for -1.
1059
+ // To check if d is a power of 2 or its inverse, it suffices to check
1060
+ // whether its absolute value has exactly one bit set. This works even for
1061
+ // INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set
1062
+ // and is a power of 2.
1063
+ uint64_t ud = (uint64_t)d;
1064
+ uint64_t absD = (d < 0) ? -ud : ud;
1065
+ uint32_t floor_log_2_d = 63 - libdivide_count_leading_zeros64(absD);
1066
+ // check if exactly one bit is set,
1067
+ // don't care if absD is 0 since that's divide by zero
1068
+ if ((absD & (absD - 1)) == 0) {
1069
+ // Branchfree and non-branchfree cases are the same
1070
+ result.magic = 0;
1071
+ result.more = floor_log_2_d | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0);
1072
+ } else {
1073
+ // the dividend here is 2**(floor_log_2_d + 63), so the low 64 bit word
1074
+ // is 0 and the high word is floor_log_2_d - 1
1075
+ uint8_t more;
1076
+ uint64_t rem, proposed_m;
1077
+ proposed_m = libdivide_128_div_64_to_64(1ULL << (floor_log_2_d - 1), 0, absD, &rem);
1078
+ const uint64_t e = absD - rem;
1079
+
1080
+ // We are going to start with a power of floor_log_2_d - 1.
1081
+ // This works if works if e < 2**floor_log_2_d.
1082
+ if (!branchfree && e < (1ULL << floor_log_2_d)) {
1083
+ // This power works
1084
+ more = floor_log_2_d - 1;
1085
+ } else {
1086
+ // We need to go one higher. This should not make proposed_m
1087
+ // overflow, but it will make it negative when interpreted as an
1088
+ // int32_t.
1089
+ proposed_m += proposed_m;
1090
+ const uint64_t twice_rem = rem + rem;
1091
+ if (twice_rem >= absD || twice_rem < rem) proposed_m += 1;
1092
+ // note that we only set the LIBDIVIDE_NEGATIVE_DIVISOR bit if we
1093
+ // also set ADD_MARKER this is an annoying optimization that
1094
+ // enables algorithm #4 to avoid the mask. However we always set it
1095
+ // in the branchfree case
1096
+ more = floor_log_2_d | LIBDIVIDE_ADD_MARKER;
1097
+ }
1098
+ proposed_m += 1;
1099
+ int64_t magic = (int64_t)proposed_m;
1100
+
1101
+ // Mark if we are negative
1102
+ if (d < 0) {
1103
+ more |= LIBDIVIDE_NEGATIVE_DIVISOR;
1104
+ if (!branchfree) {
1105
+ magic = -magic;
1106
+ }
1107
+ }
1108
+
1109
+ result.more = more;
1110
+ result.magic = magic;
1111
+ }
1112
+ return result;
1113
+ }
1114
+
1115
+ struct libdivide_s64_t libdivide_s64_gen(int64_t d) {
1116
+ return libdivide_internal_s64_gen(d, 0);
1117
+ }
1118
+
1119
+ struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d) {
1120
+ struct libdivide_s64_t tmp = libdivide_internal_s64_gen(d, 1);
1121
+ struct libdivide_s64_branchfree_t ret = {tmp.magic, tmp.more};
1122
+ return ret;
1123
+ }
1124
+
1125
+ int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom) {
1126
+ uint8_t more = denom->more;
1127
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
1128
+
1129
+ if (!denom->magic) { // shift path
1130
+ uint64_t mask = (1ULL << shift) - 1;
1131
+ uint64_t uq = numer + ((numer >> 63) & mask);
1132
+ int64_t q = (int64_t)uq;
1133
+ q >>= shift;
1134
+ // must be arithmetic shift and then sign-extend
1135
+ int64_t sign = (int8_t)more >> 7;
1136
+ q = (q ^ sign) - sign;
1137
+ return q;
1138
+ } else {
1139
+ uint64_t uq = (uint64_t)libdivide_mullhi_s64(denom->magic, numer);
1140
+ if (more & LIBDIVIDE_ADD_MARKER) {
1141
+ // must be arithmetic shift and then sign extend
1142
+ int64_t sign = (int8_t)more >> 7;
1143
+ // q += (more < 0 ? -numer : numer)
1144
+ // cast required to avoid UB
1145
+ uq += ((uint64_t)numer ^ sign) - sign;
1146
+ }
1147
+ int64_t q = (int64_t)uq;
1148
+ q >>= shift;
1149
+ q += (q < 0);
1150
+ return q;
1151
+ }
1152
+ }
1153
+
1154
+ int64_t libdivide_s64_branchfree_do(int64_t numer, const struct libdivide_s64_branchfree_t *denom) {
1155
+ uint8_t more = denom->more;
1156
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
1157
+ // must be arithmetic shift and then sign extend
1158
+ int64_t sign = (int8_t)more >> 7;
1159
+ int64_t magic = denom->magic;
1160
+ int64_t q = libdivide_mullhi_s64(magic, numer);
1161
+ q += numer;
1162
+
1163
+ // If q is non-negative, we have nothing to do.
1164
+ // If q is negative, we want to add either (2**shift)-1 if d is a power of
1165
+ // 2, or (2**shift) if it is not a power of 2.
1166
+ uint64_t is_power_of_2 = (magic == 0);
1167
+ uint64_t q_sign = (uint64_t)(q >> 63);
1168
+ q += q_sign & ((1ULL << shift) - is_power_of_2);
1169
+
1170
+ // Arithmetic right shift
1171
+ q >>= shift;
1172
+ // Negate if needed
1173
+ q = (q ^ sign) - sign;
1174
+
1175
+ return q;
1176
+ }
1177
+
1178
+ int64_t libdivide_s64_recover(const struct libdivide_s64_t *denom) {
1179
+ uint8_t more = denom->more;
1180
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
1181
+ if (denom->magic == 0) { // shift path
1182
+ uint64_t absD = 1ULL << shift;
1183
+ if (more & LIBDIVIDE_NEGATIVE_DIVISOR) {
1184
+ absD = -absD;
1185
+ }
1186
+ return (int64_t)absD;
1187
+ } else {
1188
+ // Unsigned math is much easier
1189
+ int negative_divisor = (more & LIBDIVIDE_NEGATIVE_DIVISOR);
1190
+ int magic_was_negated = (more & LIBDIVIDE_ADD_MARKER)
1191
+ ? denom->magic > 0 : denom->magic < 0;
1192
+
1193
+ uint64_t d = (uint64_t)(magic_was_negated ? -denom->magic : denom->magic);
1194
+ uint64_t n_hi = 1ULL << shift, n_lo = 0;
1195
+ uint64_t rem_ignored;
1196
+ uint64_t q = libdivide_128_div_64_to_64(n_hi, n_lo, d, &rem_ignored);
1197
+ int64_t result = (int64_t)(q + 1);
1198
+ if (negative_divisor) {
1199
+ result = -result;
1200
+ }
1201
+ return result;
1202
+ }
1203
+ }
1204
+
1205
+ int64_t libdivide_s64_branchfree_recover(const struct libdivide_s64_branchfree_t *denom) {
1206
+ return libdivide_s64_recover((const struct libdivide_s64_t *)denom);
1207
+ }
1208
+
1209
+ #if defined(LIBDIVIDE_AVX512)
1210
+
1211
+ static inline __m512i libdivide_u32_do_vector(__m512i numers, const struct libdivide_u32_t *denom);
1212
+ static inline __m512i libdivide_s32_do_vector(__m512i numers, const struct libdivide_s32_t *denom);
1213
+ static inline __m512i libdivide_u64_do_vector(__m512i numers, const struct libdivide_u64_t *denom);
1214
+ static inline __m512i libdivide_s64_do_vector(__m512i numers, const struct libdivide_s64_t *denom);
1215
+
1216
+ static inline __m512i libdivide_u32_branchfree_do_vector(__m512i numers, const struct libdivide_u32_branchfree_t *denom);
1217
+ static inline __m512i libdivide_s32_branchfree_do_vector(__m512i numers, const struct libdivide_s32_branchfree_t *denom);
1218
+ static inline __m512i libdivide_u64_branchfree_do_vector(__m512i numers, const struct libdivide_u64_branchfree_t *denom);
1219
+ static inline __m512i libdivide_s64_branchfree_do_vector(__m512i numers, const struct libdivide_s64_branchfree_t *denom);
1220
+
1221
+ //////// Internal Utility Functions
1222
+
1223
+ static inline __m512i libdivide_s64_signbits(__m512i v) {;
1224
+ return _mm512_srai_epi64(v, 63);
1225
+ }
1226
+
1227
+ static inline __m512i libdivide_s64_shift_right_vector(__m512i v, int amt) {
1228
+ return _mm512_srai_epi64(v, amt);
1229
+ }
1230
+
1231
+ // Here, b is assumed to contain one 32-bit value repeated.
1232
+ static inline __m512i libdivide_mullhi_u32_vector(__m512i a, __m512i b) {
1233
+ __m512i hi_product_0Z2Z = _mm512_srli_epi64(_mm512_mul_epu32(a, b), 32);
1234
+ __m512i a1X3X = _mm512_srli_epi64(a, 32);
1235
+ __m512i mask = _mm512_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0);
1236
+ __m512i hi_product_Z1Z3 = _mm512_and_si512(_mm512_mul_epu32(a1X3X, b), mask);
1237
+ return _mm512_or_si512(hi_product_0Z2Z, hi_product_Z1Z3);
1238
+ }
1239
+
1240
+ // b is one 32-bit value repeated.
1241
+ static inline __m512i libdivide_mullhi_s32_vector(__m512i a, __m512i b) {
1242
+ __m512i hi_product_0Z2Z = _mm512_srli_epi64(_mm512_mul_epi32(a, b), 32);
1243
+ __m512i a1X3X = _mm512_srli_epi64(a, 32);
1244
+ __m512i mask = _mm512_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0);
1245
+ __m512i hi_product_Z1Z3 = _mm512_and_si512(_mm512_mul_epi32(a1X3X, b), mask);
1246
+ return _mm512_or_si512(hi_product_0Z2Z, hi_product_Z1Z3);
1247
+ }
1248
+
1249
+ // Here, y is assumed to contain one 64-bit value repeated.
1250
+ // https://stackoverflow.com/a/28827013
1251
+ static inline __m512i libdivide_mullhi_u64_vector(__m512i x, __m512i y) {
1252
+ __m512i lomask = _mm512_set1_epi64(0xffffffff);
1253
+ __m512i xh = _mm512_shuffle_epi32(x, (_MM_PERM_ENUM) 0xB1);
1254
+ __m512i yh = _mm512_shuffle_epi32(y, (_MM_PERM_ENUM) 0xB1);
1255
+ __m512i w0 = _mm512_mul_epu32(x, y);
1256
+ __m512i w1 = _mm512_mul_epu32(x, yh);
1257
+ __m512i w2 = _mm512_mul_epu32(xh, y);
1258
+ __m512i w3 = _mm512_mul_epu32(xh, yh);
1259
+ __m512i w0h = _mm512_srli_epi64(w0, 32);
1260
+ __m512i s1 = _mm512_add_epi64(w1, w0h);
1261
+ __m512i s1l = _mm512_and_si512(s1, lomask);
1262
+ __m512i s1h = _mm512_srli_epi64(s1, 32);
1263
+ __m512i s2 = _mm512_add_epi64(w2, s1l);
1264
+ __m512i s2h = _mm512_srli_epi64(s2, 32);
1265
+ __m512i hi = _mm512_add_epi64(w3, s1h);
1266
+ hi = _mm512_add_epi64(hi, s2h);
1267
+
1268
+ return hi;
1269
+ }
1270
+
1271
+ // y is one 64-bit value repeated.
1272
+ static inline __m512i libdivide_mullhi_s64_vector(__m512i x, __m512i y) {
1273
+ __m512i p = libdivide_mullhi_u64_vector(x, y);
1274
+ __m512i t1 = _mm512_and_si512(libdivide_s64_signbits(x), y);
1275
+ __m512i t2 = _mm512_and_si512(libdivide_s64_signbits(y), x);
1276
+ p = _mm512_sub_epi64(p, t1);
1277
+ p = _mm512_sub_epi64(p, t2);
1278
+ return p;
1279
+ }
1280
+
1281
+ ////////// UINT32
1282
+
1283
+ __m512i libdivide_u32_do_vector(__m512i numers, const struct libdivide_u32_t *denom) {
1284
+ uint8_t more = denom->more;
1285
+ if (!denom->magic) {
1286
+ return _mm512_srli_epi32(numers, more);
1287
+ }
1288
+ else {
1289
+ __m512i q = libdivide_mullhi_u32_vector(numers, _mm512_set1_epi32(denom->magic));
1290
+ if (more & LIBDIVIDE_ADD_MARKER) {
1291
+ // uint32_t t = ((numer - q) >> 1) + q;
1292
+ // return t >> denom->shift;
1293
+ uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
1294
+ __m512i t = _mm512_add_epi32(_mm512_srli_epi32(_mm512_sub_epi32(numers, q), 1), q);
1295
+ return _mm512_srli_epi32(t, shift);
1296
+ }
1297
+ else {
1298
+ return _mm512_srli_epi32(q, more);
1299
+ }
1300
+ }
1301
+ }
1302
+
1303
+ __m512i libdivide_u32_branchfree_do_vector(__m512i numers, const struct libdivide_u32_branchfree_t *denom) {
1304
+ __m512i q = libdivide_mullhi_u32_vector(numers, _mm512_set1_epi32(denom->magic));
1305
+ __m512i t = _mm512_add_epi32(_mm512_srli_epi32(_mm512_sub_epi32(numers, q), 1), q);
1306
+ return _mm512_srli_epi32(t, denom->more);
1307
+ }
1308
+
1309
+ ////////// UINT64
1310
+
1311
+ __m512i libdivide_u64_do_vector(__m512i numers, const struct libdivide_u64_t *denom) {
1312
+ uint8_t more = denom->more;
1313
+ if (!denom->magic) {
1314
+ return _mm512_srli_epi64(numers, more);
1315
+ }
1316
+ else {
1317
+ __m512i q = libdivide_mullhi_u64_vector(numers, _mm512_set1_epi64(denom->magic));
1318
+ if (more & LIBDIVIDE_ADD_MARKER) {
1319
+ // uint32_t t = ((numer - q) >> 1) + q;
1320
+ // return t >> denom->shift;
1321
+ uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
1322
+ __m512i t = _mm512_add_epi64(_mm512_srli_epi64(_mm512_sub_epi64(numers, q), 1), q);
1323
+ return _mm512_srli_epi64(t, shift);
1324
+ }
1325
+ else {
1326
+ return _mm512_srli_epi64(q, more);
1327
+ }
1328
+ }
1329
+ }
1330
+
1331
+ __m512i libdivide_u64_branchfree_do_vector(__m512i numers, const struct libdivide_u64_branchfree_t *denom) {
1332
+ __m512i q = libdivide_mullhi_u64_vector(numers, _mm512_set1_epi64(denom->magic));
1333
+ __m512i t = _mm512_add_epi64(_mm512_srli_epi64(_mm512_sub_epi64(numers, q), 1), q);
1334
+ return _mm512_srli_epi64(t, denom->more);
1335
+ }
1336
+
1337
+ ////////// SINT32
1338
+
1339
+ __m512i libdivide_s32_do_vector(__m512i numers, const struct libdivide_s32_t *denom) {
1340
+ uint8_t more = denom->more;
1341
+ if (!denom->magic) {
1342
+ uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
1343
+ uint32_t mask = (1U << shift) - 1;
1344
+ __m512i roundToZeroTweak = _mm512_set1_epi32(mask);
1345
+ // q = numer + ((numer >> 31) & roundToZeroTweak);
1346
+ __m512i q = _mm512_add_epi32(numers, _mm512_and_si512(_mm512_srai_epi32(numers, 31), roundToZeroTweak));
1347
+ q = _mm512_srai_epi32(q, shift);
1348
+ __m512i sign = _mm512_set1_epi32((int8_t)more >> 7);
1349
+ // q = (q ^ sign) - sign;
1350
+ q = _mm512_sub_epi32(_mm512_xor_si512(q, sign), sign);
1351
+ return q;
1352
+ }
1353
+ else {
1354
+ __m512i q = libdivide_mullhi_s32_vector(numers, _mm512_set1_epi32(denom->magic));
1355
+ if (more & LIBDIVIDE_ADD_MARKER) {
1356
+ // must be arithmetic shift
1357
+ __m512i sign = _mm512_set1_epi32((int8_t)more >> 7);
1358
+ // q += ((numer ^ sign) - sign);
1359
+ q = _mm512_add_epi32(q, _mm512_sub_epi32(_mm512_xor_si512(numers, sign), sign));
1360
+ }
1361
+ // q >>= shift
1362
+ q = _mm512_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK);
1363
+ q = _mm512_add_epi32(q, _mm512_srli_epi32(q, 31)); // q += (q < 0)
1364
+ return q;
1365
+ }
1366
+ }
1367
+
1368
+ __m512i libdivide_s32_branchfree_do_vector(__m512i numers, const struct libdivide_s32_branchfree_t *denom) {
1369
+ int32_t magic = denom->magic;
1370
+ uint8_t more = denom->more;
1371
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
1372
+ // must be arithmetic shift
1373
+ __m512i sign = _mm512_set1_epi32((int8_t)more >> 7);
1374
+ __m512i q = libdivide_mullhi_s32_vector(numers, _mm512_set1_epi32(magic));
1375
+ q = _mm512_add_epi32(q, numers); // q += numers
1376
+
1377
+ // If q is non-negative, we have nothing to do
1378
+ // If q is negative, we want to add either (2**shift)-1 if d is
1379
+ // a power of 2, or (2**shift) if it is not a power of 2
1380
+ uint32_t is_power_of_2 = (magic == 0);
1381
+ __m512i q_sign = _mm512_srai_epi32(q, 31); // q_sign = q >> 31
1382
+ __m512i mask = _mm512_set1_epi32((1U << shift) - is_power_of_2);
1383
+ q = _mm512_add_epi32(q, _mm512_and_si512(q_sign, mask)); // q = q + (q_sign & mask)
1384
+ q = _mm512_srai_epi32(q, shift); // q >>= shift
1385
+ q = _mm512_sub_epi32(_mm512_xor_si512(q, sign), sign); // q = (q ^ sign) - sign
1386
+ return q;
1387
+ }
1388
+
1389
+ ////////// SINT64
1390
+
1391
+ __m512i libdivide_s64_do_vector(__m512i numers, const struct libdivide_s64_t *denom) {
1392
+ uint8_t more = denom->more;
1393
+ int64_t magic = denom->magic;
1394
+ if (magic == 0) { // shift path
1395
+ uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
1396
+ uint64_t mask = (1ULL << shift) - 1;
1397
+ __m512i roundToZeroTweak = _mm512_set1_epi64(mask);
1398
+ // q = numer + ((numer >> 63) & roundToZeroTweak);
1399
+ __m512i q = _mm512_add_epi64(numers, _mm512_and_si512(libdivide_s64_signbits(numers), roundToZeroTweak));
1400
+ q = libdivide_s64_shift_right_vector(q, shift);
1401
+ __m512i sign = _mm512_set1_epi32((int8_t)more >> 7);
1402
+ // q = (q ^ sign) - sign;
1403
+ q = _mm512_sub_epi64(_mm512_xor_si512(q, sign), sign);
1404
+ return q;
1405
+ }
1406
+ else {
1407
+ __m512i q = libdivide_mullhi_s64_vector(numers, _mm512_set1_epi64(magic));
1408
+ if (more & LIBDIVIDE_ADD_MARKER) {
1409
+ // must be arithmetic shift
1410
+ __m512i sign = _mm512_set1_epi32((int8_t)more >> 7);
1411
+ // q += ((numer ^ sign) - sign);
1412
+ q = _mm512_add_epi64(q, _mm512_sub_epi64(_mm512_xor_si512(numers, sign), sign));
1413
+ }
1414
+ // q >>= denom->mult_path.shift
1415
+ q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK);
1416
+ q = _mm512_add_epi64(q, _mm512_srli_epi64(q, 63)); // q += (q < 0)
1417
+ return q;
1418
+ }
1419
+ }
1420
+
1421
+ __m512i libdivide_s64_branchfree_do_vector(__m512i numers, const struct libdivide_s64_branchfree_t *denom) {
1422
+ int64_t magic = denom->magic;
1423
+ uint8_t more = denom->more;
1424
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
1425
+ // must be arithmetic shift
1426
+ __m512i sign = _mm512_set1_epi32((int8_t)more >> 7);
1427
+
1428
+ // libdivide_mullhi_s64(numers, magic);
1429
+ __m512i q = libdivide_mullhi_s64_vector(numers, _mm512_set1_epi64(magic));
1430
+ q = _mm512_add_epi64(q, numers); // q += numers
1431
+
1432
+ // If q is non-negative, we have nothing to do.
1433
+ // If q is negative, we want to add either (2**shift)-1 if d is
1434
+ // a power of 2, or (2**shift) if it is not a power of 2.
1435
+ uint32_t is_power_of_2 = (magic == 0);
1436
+ __m512i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63
1437
+ __m512i mask = _mm512_set1_epi64((1ULL << shift) - is_power_of_2);
1438
+ q = _mm512_add_epi64(q, _mm512_and_si512(q_sign, mask)); // q = q + (q_sign & mask)
1439
+ q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift
1440
+ q = _mm512_sub_epi64(_mm512_xor_si512(q, sign), sign); // q = (q ^ sign) - sign
1441
+ return q;
1442
+ }
1443
+
1444
+ #elif defined(LIBDIVIDE_AVX2)
1445
+
1446
+ static inline __m256i libdivide_u32_do_vector(__m256i numers, const struct libdivide_u32_t *denom);
1447
+ static inline __m256i libdivide_s32_do_vector(__m256i numers, const struct libdivide_s32_t *denom);
1448
+ static inline __m256i libdivide_u64_do_vector(__m256i numers, const struct libdivide_u64_t *denom);
1449
+ static inline __m256i libdivide_s64_do_vector(__m256i numers, const struct libdivide_s64_t *denom);
1450
+
1451
+ static inline __m256i libdivide_u32_branchfree_do_vector(__m256i numers, const struct libdivide_u32_branchfree_t *denom);
1452
+ static inline __m256i libdivide_s32_branchfree_do_vector(__m256i numers, const struct libdivide_s32_branchfree_t *denom);
1453
+ static inline __m256i libdivide_u64_branchfree_do_vector(__m256i numers, const struct libdivide_u64_branchfree_t *denom);
1454
+ static inline __m256i libdivide_s64_branchfree_do_vector(__m256i numers, const struct libdivide_s64_branchfree_t *denom);
1455
+
1456
+ //////// Internal Utility Functions
1457
+
1458
+ // Implementation of _mm256_srai_epi64(v, 63) (from AVX512).
1459
+ static inline __m256i libdivide_s64_signbits(__m256i v) {
1460
+ __m256i hiBitsDuped = _mm256_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1));
1461
+ __m256i signBits = _mm256_srai_epi32(hiBitsDuped, 31);
1462
+ return signBits;
1463
+ }
1464
+
1465
+ // Implementation of _mm256_srai_epi64 (from AVX512).
1466
+ static inline __m256i libdivide_s64_shift_right_vector(__m256i v, int amt) {
1467
+ const int b = 64 - amt;
1468
+ __m256i m = _mm256_set1_epi64x(1ULL << (b - 1));
1469
+ __m256i x = _mm256_srli_epi64(v, amt);
1470
+ __m256i result = _mm256_sub_epi64(_mm256_xor_si256(x, m), m);
1471
+ return result;
1472
+ }
1473
+
1474
+ // Here, b is assumed to contain one 32-bit value repeated.
1475
+ static inline __m256i libdivide_mullhi_u32_vector(__m256i a, __m256i b) {
1476
+ __m256i hi_product_0Z2Z = _mm256_srli_epi64(_mm256_mul_epu32(a, b), 32);
1477
+ __m256i a1X3X = _mm256_srli_epi64(a, 32);
1478
+ __m256i mask = _mm256_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0);
1479
+ __m256i hi_product_Z1Z3 = _mm256_and_si256(_mm256_mul_epu32(a1X3X, b), mask);
1480
+ return _mm256_or_si256(hi_product_0Z2Z, hi_product_Z1Z3);
1481
+ }
1482
+
1483
+ // b is one 32-bit value repeated.
1484
+ static inline __m256i libdivide_mullhi_s32_vector(__m256i a, __m256i b) {
1485
+ __m256i hi_product_0Z2Z = _mm256_srli_epi64(_mm256_mul_epi32(a, b), 32);
1486
+ __m256i a1X3X = _mm256_srli_epi64(a, 32);
1487
+ __m256i mask = _mm256_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0);
1488
+ __m256i hi_product_Z1Z3 = _mm256_and_si256(_mm256_mul_epi32(a1X3X, b), mask);
1489
+ return _mm256_or_si256(hi_product_0Z2Z, hi_product_Z1Z3);
1490
+ }
1491
+
1492
+ // Here, y is assumed to contain one 64-bit value repeated.
1493
+ // https://stackoverflow.com/a/28827013
1494
+ static inline __m256i libdivide_mullhi_u64_vector(__m256i x, __m256i y) {
1495
+ __m256i lomask = _mm256_set1_epi64x(0xffffffff);
1496
+ __m256i xh = _mm256_shuffle_epi32(x, 0xB1); // x0l, x0h, x1l, x1h
1497
+ __m256i yh = _mm256_shuffle_epi32(y, 0xB1); // y0l, y0h, y1l, y1h
1498
+ __m256i w0 = _mm256_mul_epu32(x, y); // x0l*y0l, x1l*y1l
1499
+ __m256i w1 = _mm256_mul_epu32(x, yh); // x0l*y0h, x1l*y1h
1500
+ __m256i w2 = _mm256_mul_epu32(xh, y); // x0h*y0l, x1h*y0l
1501
+ __m256i w3 = _mm256_mul_epu32(xh, yh); // x0h*y0h, x1h*y1h
1502
+ __m256i w0h = _mm256_srli_epi64(w0, 32);
1503
+ __m256i s1 = _mm256_add_epi64(w1, w0h);
1504
+ __m256i s1l = _mm256_and_si256(s1, lomask);
1505
+ __m256i s1h = _mm256_srli_epi64(s1, 32);
1506
+ __m256i s2 = _mm256_add_epi64(w2, s1l);
1507
+ __m256i s2h = _mm256_srli_epi64(s2, 32);
1508
+ __m256i hi = _mm256_add_epi64(w3, s1h);
1509
+ hi = _mm256_add_epi64(hi, s2h);
1510
+
1511
+ return hi;
1512
+ }
1513
+
1514
+ // y is one 64-bit value repeated.
1515
+ static inline __m256i libdivide_mullhi_s64_vector(__m256i x, __m256i y) {
1516
+ __m256i p = libdivide_mullhi_u64_vector(x, y);
1517
+ __m256i t1 = _mm256_and_si256(libdivide_s64_signbits(x), y);
1518
+ __m256i t2 = _mm256_and_si256(libdivide_s64_signbits(y), x);
1519
+ p = _mm256_sub_epi64(p, t1);
1520
+ p = _mm256_sub_epi64(p, t2);
1521
+ return p;
1522
+ }
1523
+
1524
+ ////////// UINT32
1525
+
1526
+ __m256i libdivide_u32_do_vector(__m256i numers, const struct libdivide_u32_t *denom) {
1527
+ uint8_t more = denom->more;
1528
+ if (!denom->magic) {
1529
+ return _mm256_srli_epi32(numers, more);
1530
+ }
1531
+ else {
1532
+ __m256i q = libdivide_mullhi_u32_vector(numers, _mm256_set1_epi32(denom->magic));
1533
+ if (more & LIBDIVIDE_ADD_MARKER) {
1534
+ // uint32_t t = ((numer - q) >> 1) + q;
1535
+ // return t >> denom->shift;
1536
+ uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
1537
+ __m256i t = _mm256_add_epi32(_mm256_srli_epi32(_mm256_sub_epi32(numers, q), 1), q);
1538
+ return _mm256_srli_epi32(t, shift);
1539
+ }
1540
+ else {
1541
+ return _mm256_srli_epi32(q, more);
1542
+ }
1543
+ }
1544
+ }
1545
+
1546
+ __m256i libdivide_u32_branchfree_do_vector(__m256i numers, const struct libdivide_u32_branchfree_t *denom) {
1547
+ __m256i q = libdivide_mullhi_u32_vector(numers, _mm256_set1_epi32(denom->magic));
1548
+ __m256i t = _mm256_add_epi32(_mm256_srli_epi32(_mm256_sub_epi32(numers, q), 1), q);
1549
+ return _mm256_srli_epi32(t, denom->more);
1550
+ }
1551
+
1552
+ ////////// UINT64
1553
+
1554
+ __m256i libdivide_u64_do_vector(__m256i numers, const struct libdivide_u64_t *denom) {
1555
+ uint8_t more = denom->more;
1556
+ if (!denom->magic) {
1557
+ return _mm256_srli_epi64(numers, more);
1558
+ }
1559
+ else {
1560
+ __m256i q = libdivide_mullhi_u64_vector(numers, _mm256_set1_epi64x(denom->magic));
1561
+ if (more & LIBDIVIDE_ADD_MARKER) {
1562
+ // uint32_t t = ((numer - q) >> 1) + q;
1563
+ // return t >> denom->shift;
1564
+ uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
1565
+ __m256i t = _mm256_add_epi64(_mm256_srli_epi64(_mm256_sub_epi64(numers, q), 1), q);
1566
+ return _mm256_srli_epi64(t, shift);
1567
+ }
1568
+ else {
1569
+ return _mm256_srli_epi64(q, more);
1570
+ }
1571
+ }
1572
+ }
1573
+
1574
+ __m256i libdivide_u64_branchfree_do_vector(__m256i numers, const struct libdivide_u64_branchfree_t *denom) {
1575
+ __m256i q = libdivide_mullhi_u64_vector(numers, _mm256_set1_epi64x(denom->magic));
1576
+ __m256i t = _mm256_add_epi64(_mm256_srli_epi64(_mm256_sub_epi64(numers, q), 1), q);
1577
+ return _mm256_srli_epi64(t, denom->more);
1578
+ }
1579
+
1580
+ ////////// SINT32
1581
+
1582
+ __m256i libdivide_s32_do_vector(__m256i numers, const struct libdivide_s32_t *denom) {
1583
+ uint8_t more = denom->more;
1584
+ if (!denom->magic) {
1585
+ uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
1586
+ uint32_t mask = (1U << shift) - 1;
1587
+ __m256i roundToZeroTweak = _mm256_set1_epi32(mask);
1588
+ // q = numer + ((numer >> 31) & roundToZeroTweak);
1589
+ __m256i q = _mm256_add_epi32(numers, _mm256_and_si256(_mm256_srai_epi32(numers, 31), roundToZeroTweak));
1590
+ q = _mm256_srai_epi32(q, shift);
1591
+ __m256i sign = _mm256_set1_epi32((int8_t)more >> 7);
1592
+ // q = (q ^ sign) - sign;
1593
+ q = _mm256_sub_epi32(_mm256_xor_si256(q, sign), sign);
1594
+ return q;
1595
+ }
1596
+ else {
1597
+ __m256i q = libdivide_mullhi_s32_vector(numers, _mm256_set1_epi32(denom->magic));
1598
+ if (more & LIBDIVIDE_ADD_MARKER) {
1599
+ // must be arithmetic shift
1600
+ __m256i sign = _mm256_set1_epi32((int8_t)more >> 7);
1601
+ // q += ((numer ^ sign) - sign);
1602
+ q = _mm256_add_epi32(q, _mm256_sub_epi32(_mm256_xor_si256(numers, sign), sign));
1603
+ }
1604
+ // q >>= shift
1605
+ q = _mm256_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK);
1606
+ q = _mm256_add_epi32(q, _mm256_srli_epi32(q, 31)); // q += (q < 0)
1607
+ return q;
1608
+ }
1609
+ }
1610
+
1611
+ __m256i libdivide_s32_branchfree_do_vector(__m256i numers, const struct libdivide_s32_branchfree_t *denom) {
1612
+ int32_t magic = denom->magic;
1613
+ uint8_t more = denom->more;
1614
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
1615
+ // must be arithmetic shift
1616
+ __m256i sign = _mm256_set1_epi32((int8_t)more >> 7);
1617
+ __m256i q = libdivide_mullhi_s32_vector(numers, _mm256_set1_epi32(magic));
1618
+ q = _mm256_add_epi32(q, numers); // q += numers
1619
+
1620
+ // If q is non-negative, we have nothing to do
1621
+ // If q is negative, we want to add either (2**shift)-1 if d is
1622
+ // a power of 2, or (2**shift) if it is not a power of 2
1623
+ uint32_t is_power_of_2 = (magic == 0);
1624
+ __m256i q_sign = _mm256_srai_epi32(q, 31); // q_sign = q >> 31
1625
+ __m256i mask = _mm256_set1_epi32((1U << shift) - is_power_of_2);
1626
+ q = _mm256_add_epi32(q, _mm256_and_si256(q_sign, mask)); // q = q + (q_sign & mask)
1627
+ q = _mm256_srai_epi32(q, shift); // q >>= shift
1628
+ q = _mm256_sub_epi32(_mm256_xor_si256(q, sign), sign); // q = (q ^ sign) - sign
1629
+ return q;
1630
+ }
1631
+
1632
+ ////////// SINT64
1633
+
1634
+ __m256i libdivide_s64_do_vector(__m256i numers, const struct libdivide_s64_t *denom) {
1635
+ uint8_t more = denom->more;
1636
+ int64_t magic = denom->magic;
1637
+ if (magic == 0) { // shift path
1638
+ uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
1639
+ uint64_t mask = (1ULL << shift) - 1;
1640
+ __m256i roundToZeroTweak = _mm256_set1_epi64x(mask);
1641
+ // q = numer + ((numer >> 63) & roundToZeroTweak);
1642
+ __m256i q = _mm256_add_epi64(numers, _mm256_and_si256(libdivide_s64_signbits(numers), roundToZeroTweak));
1643
+ q = libdivide_s64_shift_right_vector(q, shift);
1644
+ __m256i sign = _mm256_set1_epi32((int8_t)more >> 7);
1645
+ // q = (q ^ sign) - sign;
1646
+ q = _mm256_sub_epi64(_mm256_xor_si256(q, sign), sign);
1647
+ return q;
1648
+ }
1649
+ else {
1650
+ __m256i q = libdivide_mullhi_s64_vector(numers, _mm256_set1_epi64x(magic));
1651
+ if (more & LIBDIVIDE_ADD_MARKER) {
1652
+ // must be arithmetic shift
1653
+ __m256i sign = _mm256_set1_epi32((int8_t)more >> 7);
1654
+ // q += ((numer ^ sign) - sign);
1655
+ q = _mm256_add_epi64(q, _mm256_sub_epi64(_mm256_xor_si256(numers, sign), sign));
1656
+ }
1657
+ // q >>= denom->mult_path.shift
1658
+ q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK);
1659
+ q = _mm256_add_epi64(q, _mm256_srli_epi64(q, 63)); // q += (q < 0)
1660
+ return q;
1661
+ }
1662
+ }
1663
+
1664
+ __m256i libdivide_s64_branchfree_do_vector(__m256i numers, const struct libdivide_s64_branchfree_t *denom) {
1665
+ int64_t magic = denom->magic;
1666
+ uint8_t more = denom->more;
1667
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
1668
+ // must be arithmetic shift
1669
+ __m256i sign = _mm256_set1_epi32((int8_t)more >> 7);
1670
+
1671
+ // libdivide_mullhi_s64(numers, magic);
1672
+ __m256i q = libdivide_mullhi_s64_vector(numers, _mm256_set1_epi64x(magic));
1673
+ q = _mm256_add_epi64(q, numers); // q += numers
1674
+
1675
+ // If q is non-negative, we have nothing to do.
1676
+ // If q is negative, we want to add either (2**shift)-1 if d is
1677
+ // a power of 2, or (2**shift) if it is not a power of 2.
1678
+ uint32_t is_power_of_2 = (magic == 0);
1679
+ __m256i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63
1680
+ __m256i mask = _mm256_set1_epi64x((1ULL << shift) - is_power_of_2);
1681
+ q = _mm256_add_epi64(q, _mm256_and_si256(q_sign, mask)); // q = q + (q_sign & mask)
1682
+ q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift
1683
+ q = _mm256_sub_epi64(_mm256_xor_si256(q, sign), sign); // q = (q ^ sign) - sign
1684
+ return q;
1685
+ }
1686
+
1687
+ #elif defined(LIBDIVIDE_SSE2)
1688
+
1689
+ static inline __m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom);
1690
+ static inline __m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t *denom);
1691
+ static inline __m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t *denom);
1692
+ static inline __m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t *denom);
1693
+
1694
+ static inline __m128i libdivide_u32_branchfree_do_vector(__m128i numers, const struct libdivide_u32_branchfree_t *denom);
1695
+ static inline __m128i libdivide_s32_branchfree_do_vector(__m128i numers, const struct libdivide_s32_branchfree_t *denom);
1696
+ static inline __m128i libdivide_u64_branchfree_do_vector(__m128i numers, const struct libdivide_u64_branchfree_t *denom);
1697
+ static inline __m128i libdivide_s64_branchfree_do_vector(__m128i numers, const struct libdivide_s64_branchfree_t *denom);
1698
+
1699
+ //////// Internal Utility Functions
1700
+
1701
+ // Implementation of _mm_srai_epi64(v, 63) (from AVX512).
1702
+ static inline __m128i libdivide_s64_signbits(__m128i v) {
1703
+ __m128i hiBitsDuped = _mm_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1));
1704
+ __m128i signBits = _mm_srai_epi32(hiBitsDuped, 31);
1705
+ return signBits;
1706
+ }
1707
+
1708
+ // Implementation of _mm_srai_epi64 (from AVX512).
1709
+ static inline __m128i libdivide_s64_shift_right_vector(__m128i v, int amt) {
1710
+ const int b = 64 - amt;
1711
+ __m128i m = _mm_set1_epi64x(1ULL << (b - 1));
1712
+ __m128i x = _mm_srli_epi64(v, amt);
1713
+ __m128i result = _mm_sub_epi64(_mm_xor_si128(x, m), m);
1714
+ return result;
1715
+ }
1716
+
1717
+ // Here, b is assumed to contain one 32-bit value repeated.
1718
+ static inline __m128i libdivide_mullhi_u32_vector(__m128i a, __m128i b) {
1719
+ __m128i hi_product_0Z2Z = _mm_srli_epi64(_mm_mul_epu32(a, b), 32);
1720
+ __m128i a1X3X = _mm_srli_epi64(a, 32);
1721
+ __m128i mask = _mm_set_epi32(-1, 0, -1, 0);
1722
+ __m128i hi_product_Z1Z3 = _mm_and_si128(_mm_mul_epu32(a1X3X, b), mask);
1723
+ return _mm_or_si128(hi_product_0Z2Z, hi_product_Z1Z3);
1724
+ }
1725
+
1726
+ // SSE2 does not have a signed multiplication instruction, but we can convert
1727
+ // unsigned to signed pretty efficiently. Again, b is just a 32 bit value
1728
+ // repeated four times.
1729
+ static inline __m128i libdivide_mullhi_s32_vector(__m128i a, __m128i b) {
1730
+ __m128i p = libdivide_mullhi_u32_vector(a, b);
1731
+ // t1 = (a >> 31) & y, arithmetic shift
1732
+ __m128i t1 = _mm_and_si128(_mm_srai_epi32(a, 31), b);
1733
+ __m128i t2 = _mm_and_si128(_mm_srai_epi32(b, 31), a);
1734
+ p = _mm_sub_epi32(p, t1);
1735
+ p = _mm_sub_epi32(p, t2);
1736
+ return p;
1737
+ }
1738
+
1739
+ // Here, y is assumed to contain one 64-bit value repeated.
1740
+ // https://stackoverflow.com/a/28827013
1741
+ static inline __m128i libdivide_mullhi_u64_vector(__m128i x, __m128i y) {
1742
+ __m128i lomask = _mm_set1_epi64x(0xffffffff);
1743
+ __m128i xh = _mm_shuffle_epi32(x, 0xB1); // x0l, x0h, x1l, x1h
1744
+ __m128i yh = _mm_shuffle_epi32(y, 0xB1); // y0l, y0h, y1l, y1h
1745
+ __m128i w0 = _mm_mul_epu32(x, y); // x0l*y0l, x1l*y1l
1746
+ __m128i w1 = _mm_mul_epu32(x, yh); // x0l*y0h, x1l*y1h
1747
+ __m128i w2 = _mm_mul_epu32(xh, y); // x0h*y0l, x1h*y0l
1748
+ __m128i w3 = _mm_mul_epu32(xh, yh); // x0h*y0h, x1h*y1h
1749
+ __m128i w0h = _mm_srli_epi64(w0, 32);
1750
+ __m128i s1 = _mm_add_epi64(w1, w0h);
1751
+ __m128i s1l = _mm_and_si128(s1, lomask);
1752
+ __m128i s1h = _mm_srli_epi64(s1, 32);
1753
+ __m128i s2 = _mm_add_epi64(w2, s1l);
1754
+ __m128i s2h = _mm_srli_epi64(s2, 32);
1755
+ __m128i hi = _mm_add_epi64(w3, s1h);
1756
+ hi = _mm_add_epi64(hi, s2h);
1757
+
1758
+ return hi;
1759
+ }
1760
+
1761
+ // y is one 64-bit value repeated.
1762
+ static inline __m128i libdivide_mullhi_s64_vector(__m128i x, __m128i y) {
1763
+ __m128i p = libdivide_mullhi_u64_vector(x, y);
1764
+ __m128i t1 = _mm_and_si128(libdivide_s64_signbits(x), y);
1765
+ __m128i t2 = _mm_and_si128(libdivide_s64_signbits(y), x);
1766
+ p = _mm_sub_epi64(p, t1);
1767
+ p = _mm_sub_epi64(p, t2);
1768
+ return p;
1769
+ }
1770
+
1771
+ ////////// UINT32
1772
+
1773
+ __m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom) {
1774
+ uint8_t more = denom->more;
1775
+ if (!denom->magic) {
1776
+ return _mm_srli_epi32(numers, more);
1777
+ }
1778
+ else {
1779
+ __m128i q = libdivide_mullhi_u32_vector(numers, _mm_set1_epi32(denom->magic));
1780
+ if (more & LIBDIVIDE_ADD_MARKER) {
1781
+ // uint32_t t = ((numer - q) >> 1) + q;
1782
+ // return t >> denom->shift;
1783
+ uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
1784
+ __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q);
1785
+ return _mm_srli_epi32(t, shift);
1786
+ }
1787
+ else {
1788
+ return _mm_srli_epi32(q, more);
1789
+ }
1790
+ }
1791
+ }
1792
+
1793
+ __m128i libdivide_u32_branchfree_do_vector(__m128i numers, const struct libdivide_u32_branchfree_t *denom) {
1794
+ __m128i q = libdivide_mullhi_u32_vector(numers, _mm_set1_epi32(denom->magic));
1795
+ __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q);
1796
+ return _mm_srli_epi32(t, denom->more);
1797
+ }
1798
+
1799
+ ////////// UINT64
1800
+
1801
+ __m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t *denom) {
1802
+ uint8_t more = denom->more;
1803
+ if (!denom->magic) {
1804
+ return _mm_srli_epi64(numers, more);
1805
+ }
1806
+ else {
1807
+ __m128i q = libdivide_mullhi_u64_vector(numers, _mm_set1_epi64x(denom->magic));
1808
+ if (more & LIBDIVIDE_ADD_MARKER) {
1809
+ // uint32_t t = ((numer - q) >> 1) + q;
1810
+ // return t >> denom->shift;
1811
+ uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
1812
+ __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q);
1813
+ return _mm_srli_epi64(t, shift);
1814
+ }
1815
+ else {
1816
+ return _mm_srli_epi64(q, more);
1817
+ }
1818
+ }
1819
+ }
1820
+
1821
+ __m128i libdivide_u64_branchfree_do_vector(__m128i numers, const struct libdivide_u64_branchfree_t *denom) {
1822
+ __m128i q = libdivide_mullhi_u64_vector(numers, _mm_set1_epi64x(denom->magic));
1823
+ __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q);
1824
+ return _mm_srli_epi64(t, denom->more);
1825
+ }
1826
+
1827
+ ////////// SINT32
1828
+
1829
+ __m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t *denom) {
1830
+ uint8_t more = denom->more;
1831
+ if (!denom->magic) {
1832
+ uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
1833
+ uint32_t mask = (1U << shift) - 1;
1834
+ __m128i roundToZeroTweak = _mm_set1_epi32(mask);
1835
+ // q = numer + ((numer >> 31) & roundToZeroTweak);
1836
+ __m128i q = _mm_add_epi32(numers, _mm_and_si128(_mm_srai_epi32(numers, 31), roundToZeroTweak));
1837
+ q = _mm_srai_epi32(q, shift);
1838
+ __m128i sign = _mm_set1_epi32((int8_t)more >> 7);
1839
+ // q = (q ^ sign) - sign;
1840
+ q = _mm_sub_epi32(_mm_xor_si128(q, sign), sign);
1841
+ return q;
1842
+ }
1843
+ else {
1844
+ __m128i q = libdivide_mullhi_s32_vector(numers, _mm_set1_epi32(denom->magic));
1845
+ if (more & LIBDIVIDE_ADD_MARKER) {
1846
+ // must be arithmetic shift
1847
+ __m128i sign = _mm_set1_epi32((int8_t)more >> 7);
1848
+ // q += ((numer ^ sign) - sign);
1849
+ q = _mm_add_epi32(q, _mm_sub_epi32(_mm_xor_si128(numers, sign), sign));
1850
+ }
1851
+ // q >>= shift
1852
+ q = _mm_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK);
1853
+ q = _mm_add_epi32(q, _mm_srli_epi32(q, 31)); // q += (q < 0)
1854
+ return q;
1855
+ }
1856
+ }
1857
+
1858
+ __m128i libdivide_s32_branchfree_do_vector(__m128i numers, const struct libdivide_s32_branchfree_t *denom) {
1859
+ int32_t magic = denom->magic;
1860
+ uint8_t more = denom->more;
1861
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
1862
+ // must be arithmetic shift
1863
+ __m128i sign = _mm_set1_epi32((int8_t)more >> 7);
1864
+ __m128i q = libdivide_mullhi_s32_vector(numers, _mm_set1_epi32(magic));
1865
+ q = _mm_add_epi32(q, numers); // q += numers
1866
+
1867
+ // If q is non-negative, we have nothing to do
1868
+ // If q is negative, we want to add either (2**shift)-1 if d is
1869
+ // a power of 2, or (2**shift) if it is not a power of 2
1870
+ uint32_t is_power_of_2 = (magic == 0);
1871
+ __m128i q_sign = _mm_srai_epi32(q, 31); // q_sign = q >> 31
1872
+ __m128i mask = _mm_set1_epi32((1U << shift) - is_power_of_2);
1873
+ q = _mm_add_epi32(q, _mm_and_si128(q_sign, mask)); // q = q + (q_sign & mask)
1874
+ q = _mm_srai_epi32(q, shift); // q >>= shift
1875
+ q = _mm_sub_epi32(_mm_xor_si128(q, sign), sign); // q = (q ^ sign) - sign
1876
+ return q;
1877
+ }
1878
+
1879
+ ////////// SINT64
1880
+
1881
+ __m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t *denom) {
1882
+ uint8_t more = denom->more;
1883
+ int64_t magic = denom->magic;
1884
+ if (magic == 0) { // shift path
1885
+ uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
1886
+ uint64_t mask = (1ULL << shift) - 1;
1887
+ __m128i roundToZeroTweak = _mm_set1_epi64x(mask);
1888
+ // q = numer + ((numer >> 63) & roundToZeroTweak);
1889
+ __m128i q = _mm_add_epi64(numers, _mm_and_si128(libdivide_s64_signbits(numers), roundToZeroTweak));
1890
+ q = libdivide_s64_shift_right_vector(q, shift);
1891
+ __m128i sign = _mm_set1_epi32((int8_t)more >> 7);
1892
+ // q = (q ^ sign) - sign;
1893
+ q = _mm_sub_epi64(_mm_xor_si128(q, sign), sign);
1894
+ return q;
1895
+ }
1896
+ else {
1897
+ __m128i q = libdivide_mullhi_s64_vector(numers, _mm_set1_epi64x(magic));
1898
+ if (more & LIBDIVIDE_ADD_MARKER) {
1899
+ // must be arithmetic shift
1900
+ __m128i sign = _mm_set1_epi32((int8_t)more >> 7);
1901
+ // q += ((numer ^ sign) - sign);
1902
+ q = _mm_add_epi64(q, _mm_sub_epi64(_mm_xor_si128(numers, sign), sign));
1903
+ }
1904
+ // q >>= denom->mult_path.shift
1905
+ q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK);
1906
+ q = _mm_add_epi64(q, _mm_srli_epi64(q, 63)); // q += (q < 0)
1907
+ return q;
1908
+ }
1909
+ }
1910
+
1911
+ __m128i libdivide_s64_branchfree_do_vector(__m128i numers, const struct libdivide_s64_branchfree_t *denom) {
1912
+ int64_t magic = denom->magic;
1913
+ uint8_t more = denom->more;
1914
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
1915
+ // must be arithmetic shift
1916
+ __m128i sign = _mm_set1_epi32((int8_t)more >> 7);
1917
+
1918
+ // libdivide_mullhi_s64(numers, magic);
1919
+ __m128i q = libdivide_mullhi_s64_vector(numers, _mm_set1_epi64x(magic));
1920
+ q = _mm_add_epi64(q, numers); // q += numers
1921
+
1922
+ // If q is non-negative, we have nothing to do.
1923
+ // If q is negative, we want to add either (2**shift)-1 if d is
1924
+ // a power of 2, or (2**shift) if it is not a power of 2.
1925
+ uint32_t is_power_of_2 = (magic == 0);
1926
+ __m128i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63
1927
+ __m128i mask = _mm_set1_epi64x((1ULL << shift) - is_power_of_2);
1928
+ q = _mm_add_epi64(q, _mm_and_si128(q_sign, mask)); // q = q + (q_sign & mask)
1929
+ q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift
1930
+ q = _mm_sub_epi64(_mm_xor_si128(q, sign), sign); // q = (q ^ sign) - sign
1931
+ return q;
1932
+ }
1933
+
1934
+ #endif
1935
+
1936
+ /////////// C++ stuff
1937
+
1938
+ #ifdef __cplusplus
1939
+
1940
+ // The C++ divider class is templated on both an integer type
1941
+ // (like uint64_t) and an algorithm type.
1942
+ // * BRANCHFULL is the default algorithm type.
1943
+ // * BRANCHFREE is the branchfree algorithm type.
1944
+ enum {
1945
+ BRANCHFULL,
1946
+ BRANCHFREE
1947
+ };
1948
+
1949
+ #if defined(LIBDIVIDE_AVX512)
1950
+ #define LIBDIVIDE_VECTOR_TYPE __m512i
1951
+ #elif defined(LIBDIVIDE_AVX2)
1952
+ #define LIBDIVIDE_VECTOR_TYPE __m256i
1953
+ #elif defined(LIBDIVIDE_SSE2)
1954
+ #define LIBDIVIDE_VECTOR_TYPE __m128i
1955
+ #endif
1956
+
1957
+ #if !defined(LIBDIVIDE_VECTOR_TYPE)
1958
+ #define LIBDIVIDE_DIVIDE_VECTOR(ALGO)
1959
+ #else
1960
+ #define LIBDIVIDE_DIVIDE_VECTOR(ALGO) \
1961
+ LIBDIVIDE_VECTOR_TYPE divide(LIBDIVIDE_VECTOR_TYPE n) const { \
1962
+ return libdivide_##ALGO##_do_vector(n, &denom); \
1963
+ }
1964
+ #endif
1965
+
1966
+ // The DISPATCHER_GEN() macro generates C++ methods (for the given integer
1967
+ // and algorithm types) that redirect to libdivide's C API.
1968
+ #define DISPATCHER_GEN(T, ALGO) \
1969
+ libdivide_##ALGO##_t denom; \
1970
+ dispatcher() { } \
1971
+ dispatcher(T d) \
1972
+ : denom(libdivide_##ALGO##_gen(d)) \
1973
+ { } \
1974
+ T divide(T n) const { \
1975
+ return libdivide_##ALGO##_do(n, &denom); \
1976
+ } \
1977
+ LIBDIVIDE_DIVIDE_VECTOR(ALGO) \
1978
+ T recover() const { \
1979
+ return libdivide_##ALGO##_recover(&denom); \
1980
+ }
1981
+
1982
+ // The dispatcher selects a specific division algorithm for a given
1983
+ // type and ALGO using partial template specialization.
1984
+ template<bool IS_INTEGRAL, bool IS_SIGNED, int SIZEOF, int ALGO> struct dispatcher { };
1985
+
1986
+ template<> struct dispatcher<true, true, sizeof(int32_t), BRANCHFULL> { DISPATCHER_GEN(int32_t, s32) };
1987
+ template<> struct dispatcher<true, true, sizeof(int32_t), BRANCHFREE> { DISPATCHER_GEN(int32_t, s32_branchfree) };
1988
+ template<> struct dispatcher<true, false, sizeof(uint32_t), BRANCHFULL> { DISPATCHER_GEN(uint32_t, u32) };
1989
+ template<> struct dispatcher<true, false, sizeof(uint32_t), BRANCHFREE> { DISPATCHER_GEN(uint32_t, u32_branchfree) };
1990
+ template<> struct dispatcher<true, true, sizeof(int64_t), BRANCHFULL> { DISPATCHER_GEN(int64_t, s64) };
1991
+ template<> struct dispatcher<true, true, sizeof(int64_t), BRANCHFREE> { DISPATCHER_GEN(int64_t, s64_branchfree) };
1992
+ template<> struct dispatcher<true, false, sizeof(uint64_t), BRANCHFULL> { DISPATCHER_GEN(uint64_t, u64) };
1993
+ template<> struct dispatcher<true, false, sizeof(uint64_t), BRANCHFREE> { DISPATCHER_GEN(uint64_t, u64_branchfree) };
1994
+
1995
+ // This is the main divider class for use by the user (C++ API).
1996
+ // The actual division algorithm is selected using the dispatcher struct
1997
+ // based on the integer and algorithm template parameters.
1998
+ template<typename T, int ALGO = BRANCHFULL>
1999
+ class divider {
2000
+ public:
2001
+ // We leave the default constructor empty so that creating
2002
+ // an array of dividers and then initializing them
2003
+ // later doesn't slow us down.
2004
+ divider() { }
2005
+
2006
+ // Constructor that takes the divisor as a parameter
2007
+ divider(T d) : div(d) { }
2008
+
2009
+ // Divides n by the divisor
2010
+ T divide(T n) const {
2011
+ return div.divide(n);
2012
+ }
2013
+
2014
+ // Recovers the divisor, returns the value that was
2015
+ // used to initialize this divider object.
2016
+ T recover() const {
2017
+ return div.recover();
2018
+ }
2019
+
2020
+ bool operator==(const divider<T, ALGO>& other) const {
2021
+ return div.denom.magic == other.denom.magic &&
2022
+ div.denom.more == other.denom.more;
2023
+ }
2024
+
2025
+ bool operator!=(const divider<T, ALGO>& other) const {
2026
+ return !(*this == other);
2027
+ }
2028
+
2029
+ #if defined(LIBDIVIDE_VECTOR_TYPE)
2030
+ // Treats the vector as packed integer values with the same type as
2031
+ // the divider (e.g. s32, u32, s64, u64) and divides each of
2032
+ // them by the divider, returning the packed quotients.
2033
+ LIBDIVIDE_VECTOR_TYPE divide(LIBDIVIDE_VECTOR_TYPE n) const {
2034
+ return div.divide(n);
2035
+ }
2036
+ #endif
2037
+
2038
+ private:
2039
+ // Storage for the actual divisor
2040
+ dispatcher<std::is_integral<T>::value,
2041
+ std::is_signed<T>::value, sizeof(T), ALGO> div;
2042
+ };
2043
+
2044
+ // Overload of operator / for scalar division
2045
+ template<typename T, int ALGO>
2046
+ T operator/(T n, const divider<T, ALGO>& div) {
2047
+ return div.divide(n);
2048
+ }
2049
+
2050
+ // Overload of operator /= for scalar division
2051
+ template<typename T, int ALGO>
2052
+ T& operator/=(T& n, const divider<T, ALGO>& div) {
2053
+ n = div.divide(n);
2054
+ return n;
2055
+ }
2056
+
2057
+ #if defined(LIBDIVIDE_VECTOR_TYPE)
2058
+ // Overload of operator / for vector division
2059
+ template<typename T, int ALGO>
2060
+ LIBDIVIDE_VECTOR_TYPE operator/(LIBDIVIDE_VECTOR_TYPE n, const divider<T, ALGO>& div) {
2061
+ return div.divide(n);
2062
+ }
2063
+ // Overload of operator /= for vector division
2064
+ template<typename T, int ALGO>
2065
+ LIBDIVIDE_VECTOR_TYPE& operator/=(LIBDIVIDE_VECTOR_TYPE& n, const divider<T, ALGO>& div) {
2066
+ n = div.divide(n);
2067
+ return n;
2068
+ }
2069
+ #endif
2070
+
2071
+ // libdivdie::branchfree_divider<T>
2072
+ template <typename T>
2073
+ using branchfree_divider = divider<T, BRANCHFREE>;
2074
+
2075
+ } // namespace libdivide
2076
+
2077
+ #endif // __cplusplus
2078
+
2079
+ #endif // NUMPY_CORE_INCLUDE_NUMPY_LIBDIVIDE_LIBDIVIDE_H_
pllava/lib/python3.10/site-packages/numpy/core/include/numpy/ufuncobject.h ADDED
@@ -0,0 +1,359 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_
2
+ #define NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_
3
+
4
+ #include <numpy/npy_math.h>
5
+ #include <numpy/npy_common.h>
6
+
7
+ #ifdef __cplusplus
8
+ extern "C" {
9
+ #endif
10
+
11
+ /*
12
+ * The legacy generic inner loop for a standard element-wise or
13
+ * generalized ufunc.
14
+ */
15
+ typedef void (*PyUFuncGenericFunction)
16
+ (char **args,
17
+ npy_intp const *dimensions,
18
+ npy_intp const *strides,
19
+ void *innerloopdata);
20
+
21
+ /*
22
+ * The most generic one-dimensional inner loop for
23
+ * a masked standard element-wise ufunc. "Masked" here means that it skips
24
+ * doing calculations on any items for which the maskptr array has a true
25
+ * value.
26
+ */
27
+ typedef void (PyUFunc_MaskedStridedInnerLoopFunc)(
28
+ char **dataptrs, npy_intp *strides,
29
+ char *maskptr, npy_intp mask_stride,
30
+ npy_intp count,
31
+ NpyAuxData *innerloopdata);
32
+
33
+ /* Forward declaration for the type resolver and loop selector typedefs */
34
+ struct _tagPyUFuncObject;
35
+
36
+ /*
37
+ * Given the operands for calling a ufunc, should determine the
38
+ * calculation input and output data types and return an inner loop function.
39
+ * This function should validate that the casting rule is being followed,
40
+ * and fail if it is not.
41
+ *
42
+ * For backwards compatibility, the regular type resolution function does not
43
+ * support auxiliary data with object semantics. The type resolution call
44
+ * which returns a masked generic function returns a standard NpyAuxData
45
+ * object, for which the NPY_AUXDATA_FREE and NPY_AUXDATA_CLONE macros
46
+ * work.
47
+ *
48
+ * ufunc: The ufunc object.
49
+ * casting: The 'casting' parameter provided to the ufunc.
50
+ * operands: An array of length (ufunc->nin + ufunc->nout),
51
+ * with the output parameters possibly NULL.
52
+ * type_tup: Either NULL, or the type_tup passed to the ufunc.
53
+ * out_dtypes: An array which should be populated with new
54
+ * references to (ufunc->nin + ufunc->nout) new
55
+ * dtypes, one for each input and output. These
56
+ * dtypes should all be in native-endian format.
57
+ *
58
+ * Should return 0 on success, -1 on failure (with exception set),
59
+ * or -2 if Py_NotImplemented should be returned.
60
+ */
61
+ typedef int (PyUFunc_TypeResolutionFunc)(
62
+ struct _tagPyUFuncObject *ufunc,
63
+ NPY_CASTING casting,
64
+ PyArrayObject **operands,
65
+ PyObject *type_tup,
66
+ PyArray_Descr **out_dtypes);
67
+
68
+ /*
69
+ * Legacy loop selector. (This should NOT normally be used and we can expect
70
+ * that only the `PyUFunc_DefaultLegacyInnerLoopSelector` is ever set).
71
+ * However, unlike the masked version, it probably still works.
72
+ *
73
+ * ufunc: The ufunc object.
74
+ * dtypes: An array which has been populated with dtypes,
75
+ * in most cases by the type resolution function
76
+ * for the same ufunc.
77
+ * out_innerloop: Should be populated with the correct ufunc inner
78
+ * loop for the given type.
79
+ * out_innerloopdata: Should be populated with the void* data to
80
+ * be passed into the out_innerloop function.
81
+ * out_needs_api: If the inner loop needs to use the Python API,
82
+ * should set the to 1, otherwise should leave
83
+ * this untouched.
84
+ */
85
+ typedef int (PyUFunc_LegacyInnerLoopSelectionFunc)(
86
+ struct _tagPyUFuncObject *ufunc,
87
+ PyArray_Descr **dtypes,
88
+ PyUFuncGenericFunction *out_innerloop,
89
+ void **out_innerloopdata,
90
+ int *out_needs_api);
91
+
92
+
93
+ typedef struct _tagPyUFuncObject {
94
+ PyObject_HEAD
95
+ /*
96
+ * nin: Number of inputs
97
+ * nout: Number of outputs
98
+ * nargs: Always nin + nout (Why is it stored?)
99
+ */
100
+ int nin, nout, nargs;
101
+
102
+ /*
103
+ * Identity for reduction, any of PyUFunc_One, PyUFunc_Zero
104
+ * PyUFunc_MinusOne, PyUFunc_None, PyUFunc_ReorderableNone,
105
+ * PyUFunc_IdentityValue.
106
+ */
107
+ int identity;
108
+
109
+ /* Array of one-dimensional core loops */
110
+ PyUFuncGenericFunction *functions;
111
+ /* Array of funcdata that gets passed into the functions */
112
+ void **data;
113
+ /* The number of elements in 'functions' and 'data' */
114
+ int ntypes;
115
+
116
+ /* Used to be unused field 'check_return' */
117
+ int reserved1;
118
+
119
+ /* The name of the ufunc */
120
+ const char *name;
121
+
122
+ /* Array of type numbers, of size ('nargs' * 'ntypes') */
123
+ char *types;
124
+
125
+ /* Documentation string */
126
+ const char *doc;
127
+
128
+ void *ptr;
129
+ PyObject *obj;
130
+ PyObject *userloops;
131
+
132
+ /* generalized ufunc parameters */
133
+
134
+ /* 0 for scalar ufunc; 1 for generalized ufunc */
135
+ int core_enabled;
136
+ /* number of distinct dimension names in signature */
137
+ int core_num_dim_ix;
138
+
139
+ /*
140
+ * dimension indices of input/output argument k are stored in
141
+ * core_dim_ixs[core_offsets[k]..core_offsets[k]+core_num_dims[k]-1]
142
+ */
143
+
144
+ /* numbers of core dimensions of each argument */
145
+ int *core_num_dims;
146
+ /*
147
+ * dimension indices in a flatted form; indices
148
+ * are in the range of [0,core_num_dim_ix)
149
+ */
150
+ int *core_dim_ixs;
151
+ /*
152
+ * positions of 1st core dimensions of each
153
+ * argument in core_dim_ixs, equivalent to cumsum(core_num_dims)
154
+ */
155
+ int *core_offsets;
156
+ /* signature string for printing purpose */
157
+ char *core_signature;
158
+
159
+ /*
160
+ * A function which resolves the types and fills an array
161
+ * with the dtypes for the inputs and outputs.
162
+ */
163
+ PyUFunc_TypeResolutionFunc *type_resolver;
164
+ /*
165
+ * A function which returns an inner loop written for
166
+ * NumPy 1.6 and earlier ufuncs. This is for backwards
167
+ * compatibility, and may be NULL if inner_loop_selector
168
+ * is specified.
169
+ */
170
+ PyUFunc_LegacyInnerLoopSelectionFunc *legacy_inner_loop_selector;
171
+ /*
172
+ * This was blocked off to be the "new" inner loop selector in 1.7,
173
+ * but this was never implemented. (This is also why the above
174
+ * selector is called the "legacy" selector.)
175
+ */
176
+ #ifndef Py_LIMITED_API
177
+ vectorcallfunc vectorcall;
178
+ #else
179
+ void *vectorcall;
180
+ #endif
181
+
182
+ /* Was previously the `PyUFunc_MaskedInnerLoopSelectionFunc` */
183
+ void *_always_null_previously_masked_innerloop_selector;
184
+
185
+ /*
186
+ * List of flags for each operand when ufunc is called by nditer object.
187
+ * These flags will be used in addition to the default flags for each
188
+ * operand set by nditer object.
189
+ */
190
+ npy_uint32 *op_flags;
191
+
192
+ /*
193
+ * List of global flags used when ufunc is called by nditer object.
194
+ * These flags will be used in addition to the default global flags
195
+ * set by nditer object.
196
+ */
197
+ npy_uint32 iter_flags;
198
+
199
+ /* New in NPY_API_VERSION 0x0000000D and above */
200
+ #if NPY_FEATURE_VERSION >= NPY_1_16_API_VERSION
201
+ /*
202
+ * for each core_num_dim_ix distinct dimension names,
203
+ * the possible "frozen" size (-1 if not frozen).
204
+ */
205
+ npy_intp *core_dim_sizes;
206
+
207
+ /*
208
+ * for each distinct core dimension, a set of UFUNC_CORE_DIM* flags
209
+ */
210
+ npy_uint32 *core_dim_flags;
211
+
212
+ /* Identity for reduction, when identity == PyUFunc_IdentityValue */
213
+ PyObject *identity_value;
214
+ #endif /* NPY_FEATURE_VERSION >= NPY_1_16_API_VERSION */
215
+
216
+ /* New in NPY_API_VERSION 0x0000000F and above */
217
+ #if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION
218
+ /* New private fields related to dispatching */
219
+ void *_dispatch_cache;
220
+ /* A PyListObject of `(tuple of DTypes, ArrayMethod/Promoter)` */
221
+ PyObject *_loops;
222
+ #endif
223
+ } PyUFuncObject;
224
+
225
+ #include "arrayobject.h"
226
+ /* Generalized ufunc; 0x0001 reserved for possible use as CORE_ENABLED */
227
+ /* the core dimension's size will be determined by the operands. */
228
+ #define UFUNC_CORE_DIM_SIZE_INFERRED 0x0002
229
+ /* the core dimension may be absent */
230
+ #define UFUNC_CORE_DIM_CAN_IGNORE 0x0004
231
+ /* flags inferred during execution */
232
+ #define UFUNC_CORE_DIM_MISSING 0x00040000
233
+
234
+ #define UFUNC_ERR_IGNORE 0
235
+ #define UFUNC_ERR_WARN 1
236
+ #define UFUNC_ERR_RAISE 2
237
+ #define UFUNC_ERR_CALL 3
238
+ #define UFUNC_ERR_PRINT 4
239
+ #define UFUNC_ERR_LOG 5
240
+
241
+ /* Python side integer mask */
242
+
243
+ #define UFUNC_MASK_DIVIDEBYZERO 0x07
244
+ #define UFUNC_MASK_OVERFLOW 0x3f
245
+ #define UFUNC_MASK_UNDERFLOW 0x1ff
246
+ #define UFUNC_MASK_INVALID 0xfff
247
+
248
+ #define UFUNC_SHIFT_DIVIDEBYZERO 0
249
+ #define UFUNC_SHIFT_OVERFLOW 3
250
+ #define UFUNC_SHIFT_UNDERFLOW 6
251
+ #define UFUNC_SHIFT_INVALID 9
252
+
253
+
254
+ #define UFUNC_OBJ_ISOBJECT 1
255
+ #define UFUNC_OBJ_NEEDS_API 2
256
+
257
+ /* Default user error mode */
258
+ #define UFUNC_ERR_DEFAULT \
259
+ (UFUNC_ERR_WARN << UFUNC_SHIFT_DIVIDEBYZERO) + \
260
+ (UFUNC_ERR_WARN << UFUNC_SHIFT_OVERFLOW) + \
261
+ (UFUNC_ERR_WARN << UFUNC_SHIFT_INVALID)
262
+
263
+ #if NPY_ALLOW_THREADS
264
+ #define NPY_LOOP_BEGIN_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) _save = PyEval_SaveThread();} while (0);
265
+ #define NPY_LOOP_END_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) PyEval_RestoreThread(_save);} while (0);
266
+ #else
267
+ #define NPY_LOOP_BEGIN_THREADS
268
+ #define NPY_LOOP_END_THREADS
269
+ #endif
270
+
271
+ /*
272
+ * UFunc has unit of 0, and the order of operations can be reordered
273
+ * This case allows reduction with multiple axes at once.
274
+ */
275
+ #define PyUFunc_Zero 0
276
+ /*
277
+ * UFunc has unit of 1, and the order of operations can be reordered
278
+ * This case allows reduction with multiple axes at once.
279
+ */
280
+ #define PyUFunc_One 1
281
+ /*
282
+ * UFunc has unit of -1, and the order of operations can be reordered
283
+ * This case allows reduction with multiple axes at once. Intended for
284
+ * bitwise_and reduction.
285
+ */
286
+ #define PyUFunc_MinusOne 2
287
+ /*
288
+ * UFunc has no unit, and the order of operations cannot be reordered.
289
+ * This case does not allow reduction with multiple axes at once.
290
+ */
291
+ #define PyUFunc_None -1
292
+ /*
293
+ * UFunc has no unit, and the order of operations can be reordered
294
+ * This case allows reduction with multiple axes at once.
295
+ */
296
+ #define PyUFunc_ReorderableNone -2
297
+ /*
298
+ * UFunc unit is an identity_value, and the order of operations can be reordered
299
+ * This case allows reduction with multiple axes at once.
300
+ */
301
+ #define PyUFunc_IdentityValue -3
302
+
303
+
304
+ #define UFUNC_REDUCE 0
305
+ #define UFUNC_ACCUMULATE 1
306
+ #define UFUNC_REDUCEAT 2
307
+ #define UFUNC_OUTER 3
308
+
309
+
310
+ typedef struct {
311
+ int nin;
312
+ int nout;
313
+ PyObject *callable;
314
+ } PyUFunc_PyFuncData;
315
+
316
+ /* A linked-list of function information for
317
+ user-defined 1-d loops.
318
+ */
319
+ typedef struct _loop1d_info {
320
+ PyUFuncGenericFunction func;
321
+ void *data;
322
+ int *arg_types;
323
+ struct _loop1d_info *next;
324
+ int nargs;
325
+ PyArray_Descr **arg_dtypes;
326
+ } PyUFunc_Loop1d;
327
+
328
+
329
+ #include "__ufunc_api.h"
330
+
331
+ #define UFUNC_PYVALS_NAME "UFUNC_PYVALS"
332
+
333
+ /*
334
+ * THESE MACROS ARE DEPRECATED.
335
+ * Use npy_set_floatstatus_* in the npymath library.
336
+ */
337
+ #define UFUNC_FPE_DIVIDEBYZERO NPY_FPE_DIVIDEBYZERO
338
+ #define UFUNC_FPE_OVERFLOW NPY_FPE_OVERFLOW
339
+ #define UFUNC_FPE_UNDERFLOW NPY_FPE_UNDERFLOW
340
+ #define UFUNC_FPE_INVALID NPY_FPE_INVALID
341
+
342
+ #define generate_divbyzero_error() npy_set_floatstatus_divbyzero()
343
+ #define generate_overflow_error() npy_set_floatstatus_overflow()
344
+
345
+ /* Make sure it gets defined if it isn't already */
346
+ #ifndef UFUNC_NOFPE
347
+ /* Clear the floating point exception default of Borland C++ */
348
+ #if defined(__BORLANDC__)
349
+ #define UFUNC_NOFPE _control87(MCW_EM, MCW_EM);
350
+ #else
351
+ #define UFUNC_NOFPE
352
+ #endif
353
+ #endif
354
+
355
+ #ifdef __cplusplus
356
+ }
357
+ #endif
358
+
359
+ #endif /* NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_ */
pllava/lib/python3.10/site-packages/numpy/core/tests/data/numpy_2_0_array.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:561d36b5dc82ca96bc35be10cdd5619c30225c43b6590adcc1cbce7430c5179c
3
+ size 718
pllava/lib/python3.10/site-packages/numpy/fft/__init__.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Discrete Fourier Transform (:mod:`numpy.fft`)
3
+ =============================================
4
+
5
+ .. currentmodule:: numpy.fft
6
+
7
+ The SciPy module `scipy.fft` is a more comprehensive superset
8
+ of ``numpy.fft``, which includes only a basic set of routines.
9
+
10
+ Standard FFTs
11
+ -------------
12
+
13
+ .. autosummary::
14
+ :toctree: generated/
15
+
16
+ fft Discrete Fourier transform.
17
+ ifft Inverse discrete Fourier transform.
18
+ fft2 Discrete Fourier transform in two dimensions.
19
+ ifft2 Inverse discrete Fourier transform in two dimensions.
20
+ fftn Discrete Fourier transform in N-dimensions.
21
+ ifftn Inverse discrete Fourier transform in N dimensions.
22
+
23
+ Real FFTs
24
+ ---------
25
+
26
+ .. autosummary::
27
+ :toctree: generated/
28
+
29
+ rfft Real discrete Fourier transform.
30
+ irfft Inverse real discrete Fourier transform.
31
+ rfft2 Real discrete Fourier transform in two dimensions.
32
+ irfft2 Inverse real discrete Fourier transform in two dimensions.
33
+ rfftn Real discrete Fourier transform in N dimensions.
34
+ irfftn Inverse real discrete Fourier transform in N dimensions.
35
+
36
+ Hermitian FFTs
37
+ --------------
38
+
39
+ .. autosummary::
40
+ :toctree: generated/
41
+
42
+ hfft Hermitian discrete Fourier transform.
43
+ ihfft Inverse Hermitian discrete Fourier transform.
44
+
45
+ Helper routines
46
+ ---------------
47
+
48
+ .. autosummary::
49
+ :toctree: generated/
50
+
51
+ fftfreq Discrete Fourier Transform sample frequencies.
52
+ rfftfreq DFT sample frequencies (for usage with rfft, irfft).
53
+ fftshift Shift zero-frequency component to center of spectrum.
54
+ ifftshift Inverse of fftshift.
55
+
56
+
57
+ Background information
58
+ ----------------------
59
+
60
+ Fourier analysis is fundamentally a method for expressing a function as a
61
+ sum of periodic components, and for recovering the function from those
62
+ components. When both the function and its Fourier transform are
63
+ replaced with discretized counterparts, it is called the discrete Fourier
64
+ transform (DFT). The DFT has become a mainstay of numerical computing in
65
+ part because of a very fast algorithm for computing it, called the Fast
66
+ Fourier Transform (FFT), which was known to Gauss (1805) and was brought
67
+ to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_
68
+ provide an accessible introduction to Fourier analysis and its
69
+ applications.
70
+
71
+ Because the discrete Fourier transform separates its input into
72
+ components that contribute at discrete frequencies, it has a great number
73
+ of applications in digital signal processing, e.g., for filtering, and in
74
+ this context the discretized input to the transform is customarily
75
+ referred to as a *signal*, which exists in the *time domain*. The output
76
+ is called a *spectrum* or *transform* and exists in the *frequency
77
+ domain*.
78
+
79
+ Implementation details
80
+ ----------------------
81
+
82
+ There are many ways to define the DFT, varying in the sign of the
83
+ exponent, normalization, etc. In this implementation, the DFT is defined
84
+ as
85
+
86
+ .. math::
87
+ A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\}
88
+ \\qquad k = 0,\\ldots,n-1.
89
+
90
+ The DFT is in general defined for complex inputs and outputs, and a
91
+ single-frequency component at linear frequency :math:`f` is
92
+ represented by a complex exponential
93
+ :math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t`
94
+ is the sampling interval.
95
+
96
+ The values in the result follow so-called "standard" order: If ``A =
97
+ fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the sum of
98
+ the signal), which is always purely real for real inputs. Then ``A[1:n/2]``
99
+ contains the positive-frequency terms, and ``A[n/2+1:]`` contains the
100
+ negative-frequency terms, in order of decreasingly negative frequency.
101
+ For an even number of input points, ``A[n/2]`` represents both positive and
102
+ negative Nyquist frequency, and is also purely real for real input. For
103
+ an odd number of input points, ``A[(n-1)/2]`` contains the largest positive
104
+ frequency, while ``A[(n+1)/2]`` contains the largest negative frequency.
105
+ The routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies
106
+ of corresponding elements in the output. The routine
107
+ ``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the
108
+ zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes
109
+ that shift.
110
+
111
+ When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)``
112
+ is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum.
113
+ The phase spectrum is obtained by ``np.angle(A)``.
114
+
115
+ The inverse DFT is defined as
116
+
117
+ .. math::
118
+ a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\}
119
+ \\qquad m = 0,\\ldots,n-1.
120
+
121
+ It differs from the forward transform by the sign of the exponential
122
+ argument and the default normalization by :math:`1/n`.
123
+
124
+ Type Promotion
125
+ --------------
126
+
127
+ `numpy.fft` promotes ``float32`` and ``complex64`` arrays to ``float64`` and
128
+ ``complex128`` arrays respectively. For an FFT implementation that does not
129
+ promote input arrays, see `scipy.fftpack`.
130
+
131
+ Normalization
132
+ -------------
133
+
134
+ The argument ``norm`` indicates which direction of the pair of direct/inverse
135
+ transforms is scaled and with what normalization factor.
136
+ The default normalization (``"backward"``) has the direct (forward) transforms
137
+ unscaled and the inverse (backward) transforms scaled by :math:`1/n`. It is
138
+ possible to obtain unitary transforms by setting the keyword argument ``norm``
139
+ to ``"ortho"`` so that both direct and inverse transforms are scaled by
140
+ :math:`1/\\sqrt{n}`. Finally, setting the keyword argument ``norm`` to
141
+ ``"forward"`` has the direct transforms scaled by :math:`1/n` and the inverse
142
+ transforms unscaled (i.e. exactly opposite to the default ``"backward"``).
143
+ `None` is an alias of the default option ``"backward"`` for backward
144
+ compatibility.
145
+
146
+ Real and Hermitian transforms
147
+ -----------------------------
148
+
149
+ When the input is purely real, its transform is Hermitian, i.e., the
150
+ component at frequency :math:`f_k` is the complex conjugate of the
151
+ component at frequency :math:`-f_k`, which means that for real
152
+ inputs there is no information in the negative frequency components that
153
+ is not already available from the positive frequency components.
154
+ The family of `rfft` functions is
155
+ designed to operate on real inputs, and exploits this symmetry by
156
+ computing only the positive frequency components, up to and including the
157
+ Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex
158
+ output points. The inverses of this family assumes the same symmetry of
159
+ its input, and for an output of ``n`` points uses ``n/2+1`` input points.
160
+
161
+ Correspondingly, when the spectrum is purely real, the signal is
162
+ Hermitian. The `hfft` family of functions exploits this symmetry by
163
+ using ``n/2+1`` complex points in the input (time) domain for ``n`` real
164
+ points in the frequency domain.
165
+
166
+ In higher dimensions, FFTs are used, e.g., for image analysis and
167
+ filtering. The computational efficiency of the FFT means that it can
168
+ also be a faster way to compute large convolutions, using the property
169
+ that a convolution in the time domain is equivalent to a point-by-point
170
+ multiplication in the frequency domain.
171
+
172
+ Higher dimensions
173
+ -----------------
174
+
175
+ In two dimensions, the DFT is defined as
176
+
177
+ .. math::
178
+ A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1}
179
+ a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\}
180
+ \\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1,
181
+
182
+ which extends in the obvious way to higher dimensions, and the inverses
183
+ in higher dimensions also extend in the same way.
184
+
185
+ References
186
+ ----------
187
+
188
+ .. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
189
+ machine calculation of complex Fourier series," *Math. Comput.*
190
+ 19: 297-301.
191
+
192
+ .. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P.,
193
+ 2007, *Numerical Recipes: The Art of Scientific Computing*, ch.
194
+ 12-13. Cambridge Univ. Press, Cambridge, UK.
195
+
196
+ Examples
197
+ --------
198
+
199
+ For examples, see the various functions.
200
+
201
+ """
202
+
203
+ from . import _pocketfft, helper
204
+ from ._pocketfft import *
205
+ from .helper import *
206
+
207
+ __all__ = _pocketfft.__all__.copy()
208
+ __all__ += helper.__all__
209
+
210
+ from numpy._pytesttester import PytestTester
211
+ test = PytestTester(__name__)
212
+ del PytestTester
pllava/lib/python3.10/site-packages/numpy/fft/__init__.pyi ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy._pytesttester import PytestTester
2
+
3
+ from numpy.fft._pocketfft import (
4
+ fft as fft,
5
+ ifft as ifft,
6
+ rfft as rfft,
7
+ irfft as irfft,
8
+ hfft as hfft,
9
+ ihfft as ihfft,
10
+ rfftn as rfftn,
11
+ irfftn as irfftn,
12
+ rfft2 as rfft2,
13
+ irfft2 as irfft2,
14
+ fft2 as fft2,
15
+ ifft2 as ifft2,
16
+ fftn as fftn,
17
+ ifftn as ifftn,
18
+ )
19
+
20
+ from numpy.fft.helper import (
21
+ fftshift as fftshift,
22
+ ifftshift as ifftshift,
23
+ fftfreq as fftfreq,
24
+ rfftfreq as rfftfreq,
25
+ )
26
+
27
+ __all__: list[str]
28
+ __path__: list[str]
29
+ test: PytestTester
pllava/lib/python3.10/site-packages/numpy/fft/__pycache__/_pocketfft.cpython-310.pyc ADDED
Binary file (51.8 kB). View file
 
pllava/lib/python3.10/site-packages/numpy/fft/__pycache__/helper.cpython-310.pyc ADDED
Binary file (6.68 kB). View file
 
pllava/lib/python3.10/site-packages/numpy/fft/_pocketfft.pyi ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Sequence
2
+ from typing import Literal as L
3
+
4
+ from numpy import complex128, float64
5
+ from numpy._typing import ArrayLike, NDArray, _ArrayLikeNumber_co
6
+
7
+ _NormKind = L[None, "backward", "ortho", "forward"]
8
+
9
+ __all__: list[str]
10
+
11
+ def fft(
12
+ a: ArrayLike,
13
+ n: None | int = ...,
14
+ axis: int = ...,
15
+ norm: _NormKind = ...,
16
+ ) -> NDArray[complex128]: ...
17
+
18
+ def ifft(
19
+ a: ArrayLike,
20
+ n: None | int = ...,
21
+ axis: int = ...,
22
+ norm: _NormKind = ...,
23
+ ) -> NDArray[complex128]: ...
24
+
25
+ def rfft(
26
+ a: ArrayLike,
27
+ n: None | int = ...,
28
+ axis: int = ...,
29
+ norm: _NormKind = ...,
30
+ ) -> NDArray[complex128]: ...
31
+
32
+ def irfft(
33
+ a: ArrayLike,
34
+ n: None | int = ...,
35
+ axis: int = ...,
36
+ norm: _NormKind = ...,
37
+ ) -> NDArray[float64]: ...
38
+
39
+ # Input array must be compatible with `np.conjugate`
40
+ def hfft(
41
+ a: _ArrayLikeNumber_co,
42
+ n: None | int = ...,
43
+ axis: int = ...,
44
+ norm: _NormKind = ...,
45
+ ) -> NDArray[float64]: ...
46
+
47
+ def ihfft(
48
+ a: ArrayLike,
49
+ n: None | int = ...,
50
+ axis: int = ...,
51
+ norm: _NormKind = ...,
52
+ ) -> NDArray[complex128]: ...
53
+
54
+ def fftn(
55
+ a: ArrayLike,
56
+ s: None | Sequence[int] = ...,
57
+ axes: None | Sequence[int] = ...,
58
+ norm: _NormKind = ...,
59
+ ) -> NDArray[complex128]: ...
60
+
61
+ def ifftn(
62
+ a: ArrayLike,
63
+ s: None | Sequence[int] = ...,
64
+ axes: None | Sequence[int] = ...,
65
+ norm: _NormKind = ...,
66
+ ) -> NDArray[complex128]: ...
67
+
68
+ def rfftn(
69
+ a: ArrayLike,
70
+ s: None | Sequence[int] = ...,
71
+ axes: None | Sequence[int] = ...,
72
+ norm: _NormKind = ...,
73
+ ) -> NDArray[complex128]: ...
74
+
75
+ def irfftn(
76
+ a: ArrayLike,
77
+ s: None | Sequence[int] = ...,
78
+ axes: None | Sequence[int] = ...,
79
+ norm: _NormKind = ...,
80
+ ) -> NDArray[float64]: ...
81
+
82
+ def fft2(
83
+ a: ArrayLike,
84
+ s: None | Sequence[int] = ...,
85
+ axes: None | Sequence[int] = ...,
86
+ norm: _NormKind = ...,
87
+ ) -> NDArray[complex128]: ...
88
+
89
+ def ifft2(
90
+ a: ArrayLike,
91
+ s: None | Sequence[int] = ...,
92
+ axes: None | Sequence[int] = ...,
93
+ norm: _NormKind = ...,
94
+ ) -> NDArray[complex128]: ...
95
+
96
+ def rfft2(
97
+ a: ArrayLike,
98
+ s: None | Sequence[int] = ...,
99
+ axes: None | Sequence[int] = ...,
100
+ norm: _NormKind = ...,
101
+ ) -> NDArray[complex128]: ...
102
+
103
+ def irfft2(
104
+ a: ArrayLike,
105
+ s: None | Sequence[int] = ...,
106
+ axes: None | Sequence[int] = ...,
107
+ norm: _NormKind = ...,
108
+ ) -> NDArray[float64]: ...
pllava/lib/python3.10/site-packages/numpy/fft/helper.py ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Discrete Fourier Transforms - helper.py
3
+
4
+ """
5
+ from numpy.core import integer, empty, arange, asarray, roll
6
+ from numpy.core.overrides import array_function_dispatch, set_module
7
+
8
+ # Created by Pearu Peterson, September 2002
9
+
10
+ __all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq']
11
+
12
+ integer_types = (int, integer)
13
+
14
+
15
+ def _fftshift_dispatcher(x, axes=None):
16
+ return (x,)
17
+
18
+
19
+ @array_function_dispatch(_fftshift_dispatcher, module='numpy.fft')
20
+ def fftshift(x, axes=None):
21
+ """
22
+ Shift the zero-frequency component to the center of the spectrum.
23
+
24
+ This function swaps half-spaces for all axes listed (defaults to all).
25
+ Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even.
26
+
27
+ Parameters
28
+ ----------
29
+ x : array_like
30
+ Input array.
31
+ axes : int or shape tuple, optional
32
+ Axes over which to shift. Default is None, which shifts all axes.
33
+
34
+ Returns
35
+ -------
36
+ y : ndarray
37
+ The shifted array.
38
+
39
+ See Also
40
+ --------
41
+ ifftshift : The inverse of `fftshift`.
42
+
43
+ Examples
44
+ --------
45
+ >>> freqs = np.fft.fftfreq(10, 0.1)
46
+ >>> freqs
47
+ array([ 0., 1., 2., ..., -3., -2., -1.])
48
+ >>> np.fft.fftshift(freqs)
49
+ array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.])
50
+
51
+ Shift the zero-frequency component only along the second axis:
52
+
53
+ >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3)
54
+ >>> freqs
55
+ array([[ 0., 1., 2.],
56
+ [ 3., 4., -4.],
57
+ [-3., -2., -1.]])
58
+ >>> np.fft.fftshift(freqs, axes=(1,))
59
+ array([[ 2., 0., 1.],
60
+ [-4., 3., 4.],
61
+ [-1., -3., -2.]])
62
+
63
+ """
64
+ x = asarray(x)
65
+ if axes is None:
66
+ axes = tuple(range(x.ndim))
67
+ shift = [dim // 2 for dim in x.shape]
68
+ elif isinstance(axes, integer_types):
69
+ shift = x.shape[axes] // 2
70
+ else:
71
+ shift = [x.shape[ax] // 2 for ax in axes]
72
+
73
+ return roll(x, shift, axes)
74
+
75
+
76
+ @array_function_dispatch(_fftshift_dispatcher, module='numpy.fft')
77
+ def ifftshift(x, axes=None):
78
+ """
79
+ The inverse of `fftshift`. Although identical for even-length `x`, the
80
+ functions differ by one sample for odd-length `x`.
81
+
82
+ Parameters
83
+ ----------
84
+ x : array_like
85
+ Input array.
86
+ axes : int or shape tuple, optional
87
+ Axes over which to calculate. Defaults to None, which shifts all axes.
88
+
89
+ Returns
90
+ -------
91
+ y : ndarray
92
+ The shifted array.
93
+
94
+ See Also
95
+ --------
96
+ fftshift : Shift zero-frequency component to the center of the spectrum.
97
+
98
+ Examples
99
+ --------
100
+ >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3)
101
+ >>> freqs
102
+ array([[ 0., 1., 2.],
103
+ [ 3., 4., -4.],
104
+ [-3., -2., -1.]])
105
+ >>> np.fft.ifftshift(np.fft.fftshift(freqs))
106
+ array([[ 0., 1., 2.],
107
+ [ 3., 4., -4.],
108
+ [-3., -2., -1.]])
109
+
110
+ """
111
+ x = asarray(x)
112
+ if axes is None:
113
+ axes = tuple(range(x.ndim))
114
+ shift = [-(dim // 2) for dim in x.shape]
115
+ elif isinstance(axes, integer_types):
116
+ shift = -(x.shape[axes] // 2)
117
+ else:
118
+ shift = [-(x.shape[ax] // 2) for ax in axes]
119
+
120
+ return roll(x, shift, axes)
121
+
122
+
123
+ @set_module('numpy.fft')
124
+ def fftfreq(n, d=1.0):
125
+ """
126
+ Return the Discrete Fourier Transform sample frequencies.
127
+
128
+ The returned float array `f` contains the frequency bin centers in cycles
129
+ per unit of the sample spacing (with zero at the start). For instance, if
130
+ the sample spacing is in seconds, then the frequency unit is cycles/second.
131
+
132
+ Given a window length `n` and a sample spacing `d`::
133
+
134
+ f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even
135
+ f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd
136
+
137
+ Parameters
138
+ ----------
139
+ n : int
140
+ Window length.
141
+ d : scalar, optional
142
+ Sample spacing (inverse of the sampling rate). Defaults to 1.
143
+
144
+ Returns
145
+ -------
146
+ f : ndarray
147
+ Array of length `n` containing the sample frequencies.
148
+
149
+ Examples
150
+ --------
151
+ >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float)
152
+ >>> fourier = np.fft.fft(signal)
153
+ >>> n = signal.size
154
+ >>> timestep = 0.1
155
+ >>> freq = np.fft.fftfreq(n, d=timestep)
156
+ >>> freq
157
+ array([ 0. , 1.25, 2.5 , ..., -3.75, -2.5 , -1.25])
158
+
159
+ """
160
+ if not isinstance(n, integer_types):
161
+ raise ValueError("n should be an integer")
162
+ val = 1.0 / (n * d)
163
+ results = empty(n, int)
164
+ N = (n-1)//2 + 1
165
+ p1 = arange(0, N, dtype=int)
166
+ results[:N] = p1
167
+ p2 = arange(-(n//2), 0, dtype=int)
168
+ results[N:] = p2
169
+ return results * val
170
+
171
+
172
+ @set_module('numpy.fft')
173
+ def rfftfreq(n, d=1.0):
174
+ """
175
+ Return the Discrete Fourier Transform sample frequencies
176
+ (for usage with rfft, irfft).
177
+
178
+ The returned float array `f` contains the frequency bin centers in cycles
179
+ per unit of the sample spacing (with zero at the start). For instance, if
180
+ the sample spacing is in seconds, then the frequency unit is cycles/second.
181
+
182
+ Given a window length `n` and a sample spacing `d`::
183
+
184
+ f = [0, 1, ..., n/2-1, n/2] / (d*n) if n is even
185
+ f = [0, 1, ..., (n-1)/2-1, (n-1)/2] / (d*n) if n is odd
186
+
187
+ Unlike `fftfreq` (but like `scipy.fftpack.rfftfreq`)
188
+ the Nyquist frequency component is considered to be positive.
189
+
190
+ Parameters
191
+ ----------
192
+ n : int
193
+ Window length.
194
+ d : scalar, optional
195
+ Sample spacing (inverse of the sampling rate). Defaults to 1.
196
+
197
+ Returns
198
+ -------
199
+ f : ndarray
200
+ Array of length ``n//2 + 1`` containing the sample frequencies.
201
+
202
+ Examples
203
+ --------
204
+ >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float)
205
+ >>> fourier = np.fft.rfft(signal)
206
+ >>> n = signal.size
207
+ >>> sample_rate = 100
208
+ >>> freq = np.fft.fftfreq(n, d=1./sample_rate)
209
+ >>> freq
210
+ array([ 0., 10., 20., ..., -30., -20., -10.])
211
+ >>> freq = np.fft.rfftfreq(n, d=1./sample_rate)
212
+ >>> freq
213
+ array([ 0., 10., 20., 30., 40., 50.])
214
+
215
+ """
216
+ if not isinstance(n, integer_types):
217
+ raise ValueError("n should be an integer")
218
+ val = 1.0/(n*d)
219
+ N = n//2 + 1
220
+ results = arange(0, N, dtype=int)
221
+ return results * val