Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/__pycache__/swap.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/betweenness.py +435 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/closeness.py +281 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/current_flow_betweenness_subset.py +226 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/current_flow_closeness.py +97 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/degree_alg.py +149 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/dispersion.py +107 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/eigenvector.py +341 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/flow_matrix.py +130 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/group.py +785 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/katz.py +331 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/laplacian.py +146 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/percolation.py +128 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/second_order.py +138 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/subgraph_alg.py +340 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__init__.py +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_betweenness_centrality_subset.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality.py +780 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py +340 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/test_closeness_centrality.py +306 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/test_current_flow_closeness.py +43 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/test_dispersion.py +73 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py +175 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/test_harmonic_centrality.py +115 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/test_laplacian_centrality.py +221 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/test_load_centrality.py +344 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/test_percolation_centrality.py +87 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/test_reaching.py +117 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/test_second_order_centrality.py +82 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/test_voterank.py +65 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/trophic.py +162 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/voterank_alg.py +94 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/cycles.py +1230 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/flow/__pycache__/dinitz_alg.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/flow/__pycache__/preflowpush.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/flow/capacityscaling.py +405 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/flow/gomory_hu.py +177 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/flow/maxflow.py +607 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/link_analysis/tests/__init__.py +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/polynomials.py +305 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/shortest_paths/tests/__init__.py +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/__init__.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_astar.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_dense_numpy.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/shortest_paths/tests/test_dense.py +212 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/shortest_paths/tests/test_generic.py +444 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/similarity.py +1710 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/sparsifiers.py +295 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/__pycache__/test_chains.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/__pycache__/test_chordal.cpython-311.pyc +0 -0
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/__pycache__/swap.cpython-311.pyc
ADDED
|
Binary file (15.8 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/betweenness.py
ADDED
|
@@ -0,0 +1,435 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Betweenness centrality measures."""
|
| 2 |
+
from collections import deque
|
| 3 |
+
from heapq import heappop, heappush
|
| 4 |
+
from itertools import count
|
| 5 |
+
|
| 6 |
+
import networkx as nx
|
| 7 |
+
from networkx.algorithms.shortest_paths.weighted import _weight_function
|
| 8 |
+
from networkx.utils import py_random_state
|
| 9 |
+
from networkx.utils.decorators import not_implemented_for
|
| 10 |
+
|
| 11 |
+
__all__ = ["betweenness_centrality", "edge_betweenness_centrality"]
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@py_random_state(5)
|
| 15 |
+
@nx._dispatch(edge_attrs="weight")
|
| 16 |
+
def betweenness_centrality(
|
| 17 |
+
G, k=None, normalized=True, weight=None, endpoints=False, seed=None
|
| 18 |
+
):
|
| 19 |
+
r"""Compute the shortest-path betweenness centrality for nodes.
|
| 20 |
+
|
| 21 |
+
Betweenness centrality of a node $v$ is the sum of the
|
| 22 |
+
fraction of all-pairs shortest paths that pass through $v$
|
| 23 |
+
|
| 24 |
+
.. math::
|
| 25 |
+
|
| 26 |
+
c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)}
|
| 27 |
+
|
| 28 |
+
where $V$ is the set of nodes, $\sigma(s, t)$ is the number of
|
| 29 |
+
shortest $(s, t)$-paths, and $\sigma(s, t|v)$ is the number of
|
| 30 |
+
those paths passing through some node $v$ other than $s, t$.
|
| 31 |
+
If $s = t$, $\sigma(s, t) = 1$, and if $v \in {s, t}$,
|
| 32 |
+
$\sigma(s, t|v) = 0$ [2]_.
|
| 33 |
+
|
| 34 |
+
Parameters
|
| 35 |
+
----------
|
| 36 |
+
G : graph
|
| 37 |
+
A NetworkX graph.
|
| 38 |
+
|
| 39 |
+
k : int, optional (default=None)
|
| 40 |
+
If k is not None use k node samples to estimate betweenness.
|
| 41 |
+
The value of k <= n where n is the number of nodes in the graph.
|
| 42 |
+
Higher values give better approximation.
|
| 43 |
+
|
| 44 |
+
normalized : bool, optional
|
| 45 |
+
If True the betweenness values are normalized by `2/((n-1)(n-2))`
|
| 46 |
+
for graphs, and `1/((n-1)(n-2))` for directed graphs where `n`
|
| 47 |
+
is the number of nodes in G.
|
| 48 |
+
|
| 49 |
+
weight : None or string, optional (default=None)
|
| 50 |
+
If None, all edge weights are considered equal.
|
| 51 |
+
Otherwise holds the name of the edge attribute used as weight.
|
| 52 |
+
Weights are used to calculate weighted shortest paths, so they are
|
| 53 |
+
interpreted as distances.
|
| 54 |
+
|
| 55 |
+
endpoints : bool, optional
|
| 56 |
+
If True include the endpoints in the shortest path counts.
|
| 57 |
+
|
| 58 |
+
seed : integer, random_state, or None (default)
|
| 59 |
+
Indicator of random number generation state.
|
| 60 |
+
See :ref:`Randomness<randomness>`.
|
| 61 |
+
Note that this is only used if k is not None.
|
| 62 |
+
|
| 63 |
+
Returns
|
| 64 |
+
-------
|
| 65 |
+
nodes : dictionary
|
| 66 |
+
Dictionary of nodes with betweenness centrality as the value.
|
| 67 |
+
|
| 68 |
+
See Also
|
| 69 |
+
--------
|
| 70 |
+
edge_betweenness_centrality
|
| 71 |
+
load_centrality
|
| 72 |
+
|
| 73 |
+
Notes
|
| 74 |
+
-----
|
| 75 |
+
The algorithm is from Ulrik Brandes [1]_.
|
| 76 |
+
See [4]_ for the original first published version and [2]_ for details on
|
| 77 |
+
algorithms for variations and related metrics.
|
| 78 |
+
|
| 79 |
+
For approximate betweenness calculations set k=#samples to use
|
| 80 |
+
k nodes ("pivots") to estimate the betweenness values. For an estimate
|
| 81 |
+
of the number of pivots needed see [3]_.
|
| 82 |
+
|
| 83 |
+
For weighted graphs the edge weights must be greater than zero.
|
| 84 |
+
Zero edge weights can produce an infinite number of equal length
|
| 85 |
+
paths between pairs of nodes.
|
| 86 |
+
|
| 87 |
+
The total number of paths between source and target is counted
|
| 88 |
+
differently for directed and undirected graphs. Directed paths
|
| 89 |
+
are easy to count. Undirected paths are tricky: should a path
|
| 90 |
+
from "u" to "v" count as 1 undirected path or as 2 directed paths?
|
| 91 |
+
|
| 92 |
+
For betweenness_centrality we report the number of undirected
|
| 93 |
+
paths when G is undirected.
|
| 94 |
+
|
| 95 |
+
For betweenness_centrality_subset the reporting is different.
|
| 96 |
+
If the source and target subsets are the same, then we want
|
| 97 |
+
to count undirected paths. But if the source and target subsets
|
| 98 |
+
differ -- for example, if sources is {0} and targets is {1},
|
| 99 |
+
then we are only counting the paths in one direction. They are
|
| 100 |
+
undirected paths but we are counting them in a directed way.
|
| 101 |
+
To count them as undirected paths, each should count as half a path.
|
| 102 |
+
|
| 103 |
+
This algorithm is not guaranteed to be correct if edge weights
|
| 104 |
+
are floating point numbers. As a workaround you can use integer
|
| 105 |
+
numbers by multiplying the relevant edge attributes by a convenient
|
| 106 |
+
constant factor (eg 100) and converting to integers.
|
| 107 |
+
|
| 108 |
+
References
|
| 109 |
+
----------
|
| 110 |
+
.. [1] Ulrik Brandes:
|
| 111 |
+
A Faster Algorithm for Betweenness Centrality.
|
| 112 |
+
Journal of Mathematical Sociology 25(2):163-177, 2001.
|
| 113 |
+
https://doi.org/10.1080/0022250X.2001.9990249
|
| 114 |
+
.. [2] Ulrik Brandes:
|
| 115 |
+
On Variants of Shortest-Path Betweenness
|
| 116 |
+
Centrality and their Generic Computation.
|
| 117 |
+
Social Networks 30(2):136-145, 2008.
|
| 118 |
+
https://doi.org/10.1016/j.socnet.2007.11.001
|
| 119 |
+
.. [3] Ulrik Brandes and Christian Pich:
|
| 120 |
+
Centrality Estimation in Large Networks.
|
| 121 |
+
International Journal of Bifurcation and Chaos 17(7):2303-2318, 2007.
|
| 122 |
+
https://dx.doi.org/10.1142/S0218127407018403
|
| 123 |
+
.. [4] Linton C. Freeman:
|
| 124 |
+
A set of measures of centrality based on betweenness.
|
| 125 |
+
Sociometry 40: 35–41, 1977
|
| 126 |
+
https://doi.org/10.2307/3033543
|
| 127 |
+
"""
|
| 128 |
+
betweenness = dict.fromkeys(G, 0.0) # b[v]=0 for v in G
|
| 129 |
+
if k is None:
|
| 130 |
+
nodes = G
|
| 131 |
+
else:
|
| 132 |
+
nodes = seed.sample(list(G.nodes()), k)
|
| 133 |
+
for s in nodes:
|
| 134 |
+
# single source shortest paths
|
| 135 |
+
if weight is None: # use BFS
|
| 136 |
+
S, P, sigma, _ = _single_source_shortest_path_basic(G, s)
|
| 137 |
+
else: # use Dijkstra's algorithm
|
| 138 |
+
S, P, sigma, _ = _single_source_dijkstra_path_basic(G, s, weight)
|
| 139 |
+
# accumulation
|
| 140 |
+
if endpoints:
|
| 141 |
+
betweenness, _ = _accumulate_endpoints(betweenness, S, P, sigma, s)
|
| 142 |
+
else:
|
| 143 |
+
betweenness, _ = _accumulate_basic(betweenness, S, P, sigma, s)
|
| 144 |
+
# rescaling
|
| 145 |
+
betweenness = _rescale(
|
| 146 |
+
betweenness,
|
| 147 |
+
len(G),
|
| 148 |
+
normalized=normalized,
|
| 149 |
+
directed=G.is_directed(),
|
| 150 |
+
k=k,
|
| 151 |
+
endpoints=endpoints,
|
| 152 |
+
)
|
| 153 |
+
return betweenness
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
@py_random_state(4)
|
| 157 |
+
@nx._dispatch(edge_attrs="weight")
|
| 158 |
+
def edge_betweenness_centrality(G, k=None, normalized=True, weight=None, seed=None):
|
| 159 |
+
r"""Compute betweenness centrality for edges.
|
| 160 |
+
|
| 161 |
+
Betweenness centrality of an edge $e$ is the sum of the
|
| 162 |
+
fraction of all-pairs shortest paths that pass through $e$
|
| 163 |
+
|
| 164 |
+
.. math::
|
| 165 |
+
|
| 166 |
+
c_B(e) =\sum_{s,t \in V} \frac{\sigma(s, t|e)}{\sigma(s, t)}
|
| 167 |
+
|
| 168 |
+
where $V$ is the set of nodes, $\sigma(s, t)$ is the number of
|
| 169 |
+
shortest $(s, t)$-paths, and $\sigma(s, t|e)$ is the number of
|
| 170 |
+
those paths passing through edge $e$ [2]_.
|
| 171 |
+
|
| 172 |
+
Parameters
|
| 173 |
+
----------
|
| 174 |
+
G : graph
|
| 175 |
+
A NetworkX graph.
|
| 176 |
+
|
| 177 |
+
k : int, optional (default=None)
|
| 178 |
+
If k is not None use k node samples to estimate betweenness.
|
| 179 |
+
The value of k <= n where n is the number of nodes in the graph.
|
| 180 |
+
Higher values give better approximation.
|
| 181 |
+
|
| 182 |
+
normalized : bool, optional
|
| 183 |
+
If True the betweenness values are normalized by $2/(n(n-1))$
|
| 184 |
+
for graphs, and $1/(n(n-1))$ for directed graphs where $n$
|
| 185 |
+
is the number of nodes in G.
|
| 186 |
+
|
| 187 |
+
weight : None or string, optional (default=None)
|
| 188 |
+
If None, all edge weights are considered equal.
|
| 189 |
+
Otherwise holds the name of the edge attribute used as weight.
|
| 190 |
+
Weights are used to calculate weighted shortest paths, so they are
|
| 191 |
+
interpreted as distances.
|
| 192 |
+
|
| 193 |
+
seed : integer, random_state, or None (default)
|
| 194 |
+
Indicator of random number generation state.
|
| 195 |
+
See :ref:`Randomness<randomness>`.
|
| 196 |
+
Note that this is only used if k is not None.
|
| 197 |
+
|
| 198 |
+
Returns
|
| 199 |
+
-------
|
| 200 |
+
edges : dictionary
|
| 201 |
+
Dictionary of edges with betweenness centrality as the value.
|
| 202 |
+
|
| 203 |
+
See Also
|
| 204 |
+
--------
|
| 205 |
+
betweenness_centrality
|
| 206 |
+
edge_load
|
| 207 |
+
|
| 208 |
+
Notes
|
| 209 |
+
-----
|
| 210 |
+
The algorithm is from Ulrik Brandes [1]_.
|
| 211 |
+
|
| 212 |
+
For weighted graphs the edge weights must be greater than zero.
|
| 213 |
+
Zero edge weights can produce an infinite number of equal length
|
| 214 |
+
paths between pairs of nodes.
|
| 215 |
+
|
| 216 |
+
References
|
| 217 |
+
----------
|
| 218 |
+
.. [1] A Faster Algorithm for Betweenness Centrality. Ulrik Brandes,
|
| 219 |
+
Journal of Mathematical Sociology 25(2):163-177, 2001.
|
| 220 |
+
https://doi.org/10.1080/0022250X.2001.9990249
|
| 221 |
+
.. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness
|
| 222 |
+
Centrality and their Generic Computation.
|
| 223 |
+
Social Networks 30(2):136-145, 2008.
|
| 224 |
+
https://doi.org/10.1016/j.socnet.2007.11.001
|
| 225 |
+
"""
|
| 226 |
+
betweenness = dict.fromkeys(G, 0.0) # b[v]=0 for v in G
|
| 227 |
+
# b[e]=0 for e in G.edges()
|
| 228 |
+
betweenness.update(dict.fromkeys(G.edges(), 0.0))
|
| 229 |
+
if k is None:
|
| 230 |
+
nodes = G
|
| 231 |
+
else:
|
| 232 |
+
nodes = seed.sample(list(G.nodes()), k)
|
| 233 |
+
for s in nodes:
|
| 234 |
+
# single source shortest paths
|
| 235 |
+
if weight is None: # use BFS
|
| 236 |
+
S, P, sigma, _ = _single_source_shortest_path_basic(G, s)
|
| 237 |
+
else: # use Dijkstra's algorithm
|
| 238 |
+
S, P, sigma, _ = _single_source_dijkstra_path_basic(G, s, weight)
|
| 239 |
+
# accumulation
|
| 240 |
+
betweenness = _accumulate_edges(betweenness, S, P, sigma, s)
|
| 241 |
+
# rescaling
|
| 242 |
+
for n in G: # remove nodes to only return edges
|
| 243 |
+
del betweenness[n]
|
| 244 |
+
betweenness = _rescale_e(
|
| 245 |
+
betweenness, len(G), normalized=normalized, directed=G.is_directed()
|
| 246 |
+
)
|
| 247 |
+
if G.is_multigraph():
|
| 248 |
+
betweenness = _add_edge_keys(G, betweenness, weight=weight)
|
| 249 |
+
return betweenness
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
# helpers for betweenness centrality
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
def _single_source_shortest_path_basic(G, s):
|
| 256 |
+
S = []
|
| 257 |
+
P = {}
|
| 258 |
+
for v in G:
|
| 259 |
+
P[v] = []
|
| 260 |
+
sigma = dict.fromkeys(G, 0.0) # sigma[v]=0 for v in G
|
| 261 |
+
D = {}
|
| 262 |
+
sigma[s] = 1.0
|
| 263 |
+
D[s] = 0
|
| 264 |
+
Q = deque([s])
|
| 265 |
+
while Q: # use BFS to find shortest paths
|
| 266 |
+
v = Q.popleft()
|
| 267 |
+
S.append(v)
|
| 268 |
+
Dv = D[v]
|
| 269 |
+
sigmav = sigma[v]
|
| 270 |
+
for w in G[v]:
|
| 271 |
+
if w not in D:
|
| 272 |
+
Q.append(w)
|
| 273 |
+
D[w] = Dv + 1
|
| 274 |
+
if D[w] == Dv + 1: # this is a shortest path, count paths
|
| 275 |
+
sigma[w] += sigmav
|
| 276 |
+
P[w].append(v) # predecessors
|
| 277 |
+
return S, P, sigma, D
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
def _single_source_dijkstra_path_basic(G, s, weight):
|
| 281 |
+
weight = _weight_function(G, weight)
|
| 282 |
+
# modified from Eppstein
|
| 283 |
+
S = []
|
| 284 |
+
P = {}
|
| 285 |
+
for v in G:
|
| 286 |
+
P[v] = []
|
| 287 |
+
sigma = dict.fromkeys(G, 0.0) # sigma[v]=0 for v in G
|
| 288 |
+
D = {}
|
| 289 |
+
sigma[s] = 1.0
|
| 290 |
+
push = heappush
|
| 291 |
+
pop = heappop
|
| 292 |
+
seen = {s: 0}
|
| 293 |
+
c = count()
|
| 294 |
+
Q = [] # use Q as heap with (distance,node id) tuples
|
| 295 |
+
push(Q, (0, next(c), s, s))
|
| 296 |
+
while Q:
|
| 297 |
+
(dist, _, pred, v) = pop(Q)
|
| 298 |
+
if v in D:
|
| 299 |
+
continue # already searched this node.
|
| 300 |
+
sigma[v] += sigma[pred] # count paths
|
| 301 |
+
S.append(v)
|
| 302 |
+
D[v] = dist
|
| 303 |
+
for w, edgedata in G[v].items():
|
| 304 |
+
vw_dist = dist + weight(v, w, edgedata)
|
| 305 |
+
if w not in D and (w not in seen or vw_dist < seen[w]):
|
| 306 |
+
seen[w] = vw_dist
|
| 307 |
+
push(Q, (vw_dist, next(c), v, w))
|
| 308 |
+
sigma[w] = 0.0
|
| 309 |
+
P[w] = [v]
|
| 310 |
+
elif vw_dist == seen[w]: # handle equal paths
|
| 311 |
+
sigma[w] += sigma[v]
|
| 312 |
+
P[w].append(v)
|
| 313 |
+
return S, P, sigma, D
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
def _accumulate_basic(betweenness, S, P, sigma, s):
|
| 317 |
+
delta = dict.fromkeys(S, 0)
|
| 318 |
+
while S:
|
| 319 |
+
w = S.pop()
|
| 320 |
+
coeff = (1 + delta[w]) / sigma[w]
|
| 321 |
+
for v in P[w]:
|
| 322 |
+
delta[v] += sigma[v] * coeff
|
| 323 |
+
if w != s:
|
| 324 |
+
betweenness[w] += delta[w]
|
| 325 |
+
return betweenness, delta
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
def _accumulate_endpoints(betweenness, S, P, sigma, s):
|
| 329 |
+
betweenness[s] += len(S) - 1
|
| 330 |
+
delta = dict.fromkeys(S, 0)
|
| 331 |
+
while S:
|
| 332 |
+
w = S.pop()
|
| 333 |
+
coeff = (1 + delta[w]) / sigma[w]
|
| 334 |
+
for v in P[w]:
|
| 335 |
+
delta[v] += sigma[v] * coeff
|
| 336 |
+
if w != s:
|
| 337 |
+
betweenness[w] += delta[w] + 1
|
| 338 |
+
return betweenness, delta
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
def _accumulate_edges(betweenness, S, P, sigma, s):
|
| 342 |
+
delta = dict.fromkeys(S, 0)
|
| 343 |
+
while S:
|
| 344 |
+
w = S.pop()
|
| 345 |
+
coeff = (1 + delta[w]) / sigma[w]
|
| 346 |
+
for v in P[w]:
|
| 347 |
+
c = sigma[v] * coeff
|
| 348 |
+
if (v, w) not in betweenness:
|
| 349 |
+
betweenness[(w, v)] += c
|
| 350 |
+
else:
|
| 351 |
+
betweenness[(v, w)] += c
|
| 352 |
+
delta[v] += c
|
| 353 |
+
if w != s:
|
| 354 |
+
betweenness[w] += delta[w]
|
| 355 |
+
return betweenness
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
def _rescale(betweenness, n, normalized, directed=False, k=None, endpoints=False):
|
| 359 |
+
if normalized:
|
| 360 |
+
if endpoints:
|
| 361 |
+
if n < 2:
|
| 362 |
+
scale = None # no normalization
|
| 363 |
+
else:
|
| 364 |
+
# Scale factor should include endpoint nodes
|
| 365 |
+
scale = 1 / (n * (n - 1))
|
| 366 |
+
elif n <= 2:
|
| 367 |
+
scale = None # no normalization b=0 for all nodes
|
| 368 |
+
else:
|
| 369 |
+
scale = 1 / ((n - 1) * (n - 2))
|
| 370 |
+
else: # rescale by 2 for undirected graphs
|
| 371 |
+
if not directed:
|
| 372 |
+
scale = 0.5
|
| 373 |
+
else:
|
| 374 |
+
scale = None
|
| 375 |
+
if scale is not None:
|
| 376 |
+
if k is not None:
|
| 377 |
+
scale = scale * n / k
|
| 378 |
+
for v in betweenness:
|
| 379 |
+
betweenness[v] *= scale
|
| 380 |
+
return betweenness
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
def _rescale_e(betweenness, n, normalized, directed=False, k=None):
|
| 384 |
+
if normalized:
|
| 385 |
+
if n <= 1:
|
| 386 |
+
scale = None # no normalization b=0 for all nodes
|
| 387 |
+
else:
|
| 388 |
+
scale = 1 / (n * (n - 1))
|
| 389 |
+
else: # rescale by 2 for undirected graphs
|
| 390 |
+
if not directed:
|
| 391 |
+
scale = 0.5
|
| 392 |
+
else:
|
| 393 |
+
scale = None
|
| 394 |
+
if scale is not None:
|
| 395 |
+
if k is not None:
|
| 396 |
+
scale = scale * n / k
|
| 397 |
+
for v in betweenness:
|
| 398 |
+
betweenness[v] *= scale
|
| 399 |
+
return betweenness
|
| 400 |
+
|
| 401 |
+
|
| 402 |
+
@not_implemented_for("graph")
|
| 403 |
+
def _add_edge_keys(G, betweenness, weight=None):
|
| 404 |
+
r"""Adds the corrected betweenness centrality (BC) values for multigraphs.
|
| 405 |
+
|
| 406 |
+
Parameters
|
| 407 |
+
----------
|
| 408 |
+
G : NetworkX graph.
|
| 409 |
+
|
| 410 |
+
betweenness : dictionary
|
| 411 |
+
Dictionary mapping adjacent node tuples to betweenness centrality values.
|
| 412 |
+
|
| 413 |
+
weight : string or function
|
| 414 |
+
See `_weight_function` for details. Defaults to `None`.
|
| 415 |
+
|
| 416 |
+
Returns
|
| 417 |
+
-------
|
| 418 |
+
edges : dictionary
|
| 419 |
+
The parameter `betweenness` including edges with keys and their
|
| 420 |
+
betweenness centrality values.
|
| 421 |
+
|
| 422 |
+
The BC value is divided among edges of equal weight.
|
| 423 |
+
"""
|
| 424 |
+
_weight = _weight_function(G, weight)
|
| 425 |
+
|
| 426 |
+
edge_bc = dict.fromkeys(G.edges, 0.0)
|
| 427 |
+
for u, v in betweenness:
|
| 428 |
+
d = G[u][v]
|
| 429 |
+
wt = _weight(u, v, d)
|
| 430 |
+
keys = [k for k in d if _weight(u, v, {k: d[k]}) == wt]
|
| 431 |
+
bc = betweenness[(u, v)] / len(keys)
|
| 432 |
+
for k in keys:
|
| 433 |
+
edge_bc[(u, v, k)] = bc
|
| 434 |
+
|
| 435 |
+
return edge_bc
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/closeness.py
ADDED
|
@@ -0,0 +1,281 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Closeness centrality measures.
|
| 3 |
+
"""
|
| 4 |
+
import functools
|
| 5 |
+
|
| 6 |
+
import networkx as nx
|
| 7 |
+
from networkx.exception import NetworkXError
|
| 8 |
+
from networkx.utils.decorators import not_implemented_for
|
| 9 |
+
|
| 10 |
+
__all__ = ["closeness_centrality", "incremental_closeness_centrality"]
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@nx._dispatch(edge_attrs="distance")
|
| 14 |
+
def closeness_centrality(G, u=None, distance=None, wf_improved=True):
|
| 15 |
+
r"""Compute closeness centrality for nodes.
|
| 16 |
+
|
| 17 |
+
Closeness centrality [1]_ of a node `u` is the reciprocal of the
|
| 18 |
+
average shortest path distance to `u` over all `n-1` reachable nodes.
|
| 19 |
+
|
| 20 |
+
.. math::
|
| 21 |
+
|
| 22 |
+
C(u) = \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)},
|
| 23 |
+
|
| 24 |
+
where `d(v, u)` is the shortest-path distance between `v` and `u`,
|
| 25 |
+
and `n-1` is the number of nodes reachable from `u`. Notice that the
|
| 26 |
+
closeness distance function computes the incoming distance to `u`
|
| 27 |
+
for directed graphs. To use outward distance, act on `G.reverse()`.
|
| 28 |
+
|
| 29 |
+
Notice that higher values of closeness indicate higher centrality.
|
| 30 |
+
|
| 31 |
+
Wasserman and Faust propose an improved formula for graphs with
|
| 32 |
+
more than one connected component. The result is "a ratio of the
|
| 33 |
+
fraction of actors in the group who are reachable, to the average
|
| 34 |
+
distance" from the reachable actors [2]_. You might think this
|
| 35 |
+
scale factor is inverted but it is not. As is, nodes from small
|
| 36 |
+
components receive a smaller closeness value. Letting `N` denote
|
| 37 |
+
the number of nodes in the graph,
|
| 38 |
+
|
| 39 |
+
.. math::
|
| 40 |
+
|
| 41 |
+
C_{WF}(u) = \frac{n-1}{N-1} \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)},
|
| 42 |
+
|
| 43 |
+
Parameters
|
| 44 |
+
----------
|
| 45 |
+
G : graph
|
| 46 |
+
A NetworkX graph
|
| 47 |
+
|
| 48 |
+
u : node, optional
|
| 49 |
+
Return only the value for node u
|
| 50 |
+
|
| 51 |
+
distance : edge attribute key, optional (default=None)
|
| 52 |
+
Use the specified edge attribute as the edge distance in shortest
|
| 53 |
+
path calculations. If `None` (the default) all edges have a distance of 1.
|
| 54 |
+
Absent edge attributes are assigned a distance of 1. Note that no check
|
| 55 |
+
is performed to ensure that edges have the provided attribute.
|
| 56 |
+
|
| 57 |
+
wf_improved : bool, optional (default=True)
|
| 58 |
+
If True, scale by the fraction of nodes reachable. This gives the
|
| 59 |
+
Wasserman and Faust improved formula. For single component graphs
|
| 60 |
+
it is the same as the original formula.
|
| 61 |
+
|
| 62 |
+
Returns
|
| 63 |
+
-------
|
| 64 |
+
nodes : dictionary
|
| 65 |
+
Dictionary of nodes with closeness centrality as the value.
|
| 66 |
+
|
| 67 |
+
Examples
|
| 68 |
+
--------
|
| 69 |
+
>>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)])
|
| 70 |
+
>>> nx.closeness_centrality(G)
|
| 71 |
+
{0: 1.0, 1: 1.0, 2: 0.75, 3: 0.75}
|
| 72 |
+
|
| 73 |
+
See Also
|
| 74 |
+
--------
|
| 75 |
+
betweenness_centrality, load_centrality, eigenvector_centrality,
|
| 76 |
+
degree_centrality, incremental_closeness_centrality
|
| 77 |
+
|
| 78 |
+
Notes
|
| 79 |
+
-----
|
| 80 |
+
The closeness centrality is normalized to `(n-1)/(|G|-1)` where
|
| 81 |
+
`n` is the number of nodes in the connected part of graph
|
| 82 |
+
containing the node. If the graph is not completely connected,
|
| 83 |
+
this algorithm computes the closeness centrality for each
|
| 84 |
+
connected part separately scaled by that parts size.
|
| 85 |
+
|
| 86 |
+
If the 'distance' keyword is set to an edge attribute key then the
|
| 87 |
+
shortest-path length will be computed using Dijkstra's algorithm with
|
| 88 |
+
that edge attribute as the edge weight.
|
| 89 |
+
|
| 90 |
+
The closeness centrality uses *inward* distance to a node, not outward.
|
| 91 |
+
If you want to use outword distances apply the function to `G.reverse()`
|
| 92 |
+
|
| 93 |
+
In NetworkX 2.2 and earlier a bug caused Dijkstra's algorithm to use the
|
| 94 |
+
outward distance rather than the inward distance. If you use a 'distance'
|
| 95 |
+
keyword and a DiGraph, your results will change between v2.2 and v2.3.
|
| 96 |
+
|
| 97 |
+
References
|
| 98 |
+
----------
|
| 99 |
+
.. [1] Linton C. Freeman: Centrality in networks: I.
|
| 100 |
+
Conceptual clarification. Social Networks 1:215-239, 1979.
|
| 101 |
+
https://doi.org/10.1016/0378-8733(78)90021-7
|
| 102 |
+
.. [2] pg. 201 of Wasserman, S. and Faust, K.,
|
| 103 |
+
Social Network Analysis: Methods and Applications, 1994,
|
| 104 |
+
Cambridge University Press.
|
| 105 |
+
"""
|
| 106 |
+
if G.is_directed():
|
| 107 |
+
G = G.reverse() # create a reversed graph view
|
| 108 |
+
|
| 109 |
+
if distance is not None:
|
| 110 |
+
# use Dijkstra's algorithm with specified attribute as edge weight
|
| 111 |
+
path_length = functools.partial(
|
| 112 |
+
nx.single_source_dijkstra_path_length, weight=distance
|
| 113 |
+
)
|
| 114 |
+
else:
|
| 115 |
+
path_length = nx.single_source_shortest_path_length
|
| 116 |
+
|
| 117 |
+
if u is None:
|
| 118 |
+
nodes = G.nodes
|
| 119 |
+
else:
|
| 120 |
+
nodes = [u]
|
| 121 |
+
closeness_dict = {}
|
| 122 |
+
for n in nodes:
|
| 123 |
+
sp = path_length(G, n)
|
| 124 |
+
totsp = sum(sp.values())
|
| 125 |
+
len_G = len(G)
|
| 126 |
+
_closeness_centrality = 0.0
|
| 127 |
+
if totsp > 0.0 and len_G > 1:
|
| 128 |
+
_closeness_centrality = (len(sp) - 1.0) / totsp
|
| 129 |
+
# normalize to number of nodes-1 in connected part
|
| 130 |
+
if wf_improved:
|
| 131 |
+
s = (len(sp) - 1.0) / (len_G - 1)
|
| 132 |
+
_closeness_centrality *= s
|
| 133 |
+
closeness_dict[n] = _closeness_centrality
|
| 134 |
+
if u is not None:
|
| 135 |
+
return closeness_dict[u]
|
| 136 |
+
return closeness_dict
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
@not_implemented_for("directed")
|
| 140 |
+
@nx._dispatch
|
| 141 |
+
def incremental_closeness_centrality(
|
| 142 |
+
G, edge, prev_cc=None, insertion=True, wf_improved=True
|
| 143 |
+
):
|
| 144 |
+
r"""Incremental closeness centrality for nodes.
|
| 145 |
+
|
| 146 |
+
Compute closeness centrality for nodes using level-based work filtering
|
| 147 |
+
as described in Incremental Algorithms for Closeness Centrality by Sariyuce et al.
|
| 148 |
+
|
| 149 |
+
Level-based work filtering detects unnecessary updates to the closeness
|
| 150 |
+
centrality and filters them out.
|
| 151 |
+
|
| 152 |
+
---
|
| 153 |
+
From "Incremental Algorithms for Closeness Centrality":
|
| 154 |
+
|
| 155 |
+
Theorem 1: Let :math:`G = (V, E)` be a graph and u and v be two vertices in V
|
| 156 |
+
such that there is no edge (u, v) in E. Let :math:`G' = (V, E \cup uv)`
|
| 157 |
+
Then :math:`cc[s] = cc'[s]` if and only if :math:`\left|dG(s, u) - dG(s, v)\right| \leq 1`.
|
| 158 |
+
|
| 159 |
+
Where :math:`dG(u, v)` denotes the length of the shortest path between
|
| 160 |
+
two vertices u, v in a graph G, cc[s] is the closeness centrality for a
|
| 161 |
+
vertex s in V, and cc'[s] is the closeness centrality for a
|
| 162 |
+
vertex s in V, with the (u, v) edge added.
|
| 163 |
+
---
|
| 164 |
+
|
| 165 |
+
We use Theorem 1 to filter out updates when adding or removing an edge.
|
| 166 |
+
When adding an edge (u, v), we compute the shortest path lengths from all
|
| 167 |
+
other nodes to u and to v before the node is added. When removing an edge,
|
| 168 |
+
we compute the shortest path lengths after the edge is removed. Then we
|
| 169 |
+
apply Theorem 1 to use previously computed closeness centrality for nodes
|
| 170 |
+
where :math:`\left|dG(s, u) - dG(s, v)\right| \leq 1`. This works only for
|
| 171 |
+
undirected, unweighted graphs; the distance argument is not supported.
|
| 172 |
+
|
| 173 |
+
Closeness centrality [1]_ of a node `u` is the reciprocal of the
|
| 174 |
+
sum of the shortest path distances from `u` to all `n-1` other nodes.
|
| 175 |
+
Since the sum of distances depends on the number of nodes in the
|
| 176 |
+
graph, closeness is normalized by the sum of minimum possible
|
| 177 |
+
distances `n-1`.
|
| 178 |
+
|
| 179 |
+
.. math::
|
| 180 |
+
|
| 181 |
+
C(u) = \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)},
|
| 182 |
+
|
| 183 |
+
where `d(v, u)` is the shortest-path distance between `v` and `u`,
|
| 184 |
+
and `n` is the number of nodes in the graph.
|
| 185 |
+
|
| 186 |
+
Notice that higher values of closeness indicate higher centrality.
|
| 187 |
+
|
| 188 |
+
Parameters
|
| 189 |
+
----------
|
| 190 |
+
G : graph
|
| 191 |
+
A NetworkX graph
|
| 192 |
+
|
| 193 |
+
edge : tuple
|
| 194 |
+
The modified edge (u, v) in the graph.
|
| 195 |
+
|
| 196 |
+
prev_cc : dictionary
|
| 197 |
+
The previous closeness centrality for all nodes in the graph.
|
| 198 |
+
|
| 199 |
+
insertion : bool, optional
|
| 200 |
+
If True (default) the edge was inserted, otherwise it was deleted from the graph.
|
| 201 |
+
|
| 202 |
+
wf_improved : bool, optional (default=True)
|
| 203 |
+
If True, scale by the fraction of nodes reachable. This gives the
|
| 204 |
+
Wasserman and Faust improved formula. For single component graphs
|
| 205 |
+
it is the same as the original formula.
|
| 206 |
+
|
| 207 |
+
Returns
|
| 208 |
+
-------
|
| 209 |
+
nodes : dictionary
|
| 210 |
+
Dictionary of nodes with closeness centrality as the value.
|
| 211 |
+
|
| 212 |
+
See Also
|
| 213 |
+
--------
|
| 214 |
+
betweenness_centrality, load_centrality, eigenvector_centrality,
|
| 215 |
+
degree_centrality, closeness_centrality
|
| 216 |
+
|
| 217 |
+
Notes
|
| 218 |
+
-----
|
| 219 |
+
The closeness centrality is normalized to `(n-1)/(|G|-1)` where
|
| 220 |
+
`n` is the number of nodes in the connected part of graph
|
| 221 |
+
containing the node. If the graph is not completely connected,
|
| 222 |
+
this algorithm computes the closeness centrality for each
|
| 223 |
+
connected part separately.
|
| 224 |
+
|
| 225 |
+
References
|
| 226 |
+
----------
|
| 227 |
+
.. [1] Freeman, L.C., 1979. Centrality in networks: I.
|
| 228 |
+
Conceptual clarification. Social Networks 1, 215--239.
|
| 229 |
+
https://doi.org/10.1016/0378-8733(78)90021-7
|
| 230 |
+
.. [2] Sariyuce, A.E. ; Kaya, K. ; Saule, E. ; Catalyiirek, U.V. Incremental
|
| 231 |
+
Algorithms for Closeness Centrality. 2013 IEEE International Conference on Big Data
|
| 232 |
+
http://sariyuce.com/papers/bigdata13.pdf
|
| 233 |
+
"""
|
| 234 |
+
if prev_cc is not None and set(prev_cc.keys()) != set(G.nodes()):
|
| 235 |
+
raise NetworkXError("prev_cc and G do not have the same nodes")
|
| 236 |
+
|
| 237 |
+
# Unpack edge
|
| 238 |
+
(u, v) = edge
|
| 239 |
+
path_length = nx.single_source_shortest_path_length
|
| 240 |
+
|
| 241 |
+
if insertion:
|
| 242 |
+
# For edge insertion, we want shortest paths before the edge is inserted
|
| 243 |
+
du = path_length(G, u)
|
| 244 |
+
dv = path_length(G, v)
|
| 245 |
+
|
| 246 |
+
G.add_edge(u, v)
|
| 247 |
+
else:
|
| 248 |
+
G.remove_edge(u, v)
|
| 249 |
+
|
| 250 |
+
# For edge removal, we want shortest paths after the edge is removed
|
| 251 |
+
du = path_length(G, u)
|
| 252 |
+
dv = path_length(G, v)
|
| 253 |
+
|
| 254 |
+
if prev_cc is None:
|
| 255 |
+
return nx.closeness_centrality(G)
|
| 256 |
+
|
| 257 |
+
nodes = G.nodes()
|
| 258 |
+
closeness_dict = {}
|
| 259 |
+
for n in nodes:
|
| 260 |
+
if n in du and n in dv and abs(du[n] - dv[n]) <= 1:
|
| 261 |
+
closeness_dict[n] = prev_cc[n]
|
| 262 |
+
else:
|
| 263 |
+
sp = path_length(G, n)
|
| 264 |
+
totsp = sum(sp.values())
|
| 265 |
+
len_G = len(G)
|
| 266 |
+
_closeness_centrality = 0.0
|
| 267 |
+
if totsp > 0.0 and len_G > 1:
|
| 268 |
+
_closeness_centrality = (len(sp) - 1.0) / totsp
|
| 269 |
+
# normalize to number of nodes-1 in connected part
|
| 270 |
+
if wf_improved:
|
| 271 |
+
s = (len(sp) - 1.0) / (len_G - 1)
|
| 272 |
+
_closeness_centrality *= s
|
| 273 |
+
closeness_dict[n] = _closeness_centrality
|
| 274 |
+
|
| 275 |
+
# Leave the graph as we found it
|
| 276 |
+
if insertion:
|
| 277 |
+
G.remove_edge(u, v)
|
| 278 |
+
else:
|
| 279 |
+
G.add_edge(u, v)
|
| 280 |
+
|
| 281 |
+
return closeness_dict
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/current_flow_betweenness_subset.py
ADDED
|
@@ -0,0 +1,226 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Current-flow betweenness centrality measures for subsets of nodes."""
|
| 2 |
+
import networkx as nx
|
| 3 |
+
from networkx.algorithms.centrality.flow_matrix import flow_matrix_row
|
| 4 |
+
from networkx.utils import not_implemented_for, reverse_cuthill_mckee_ordering
|
| 5 |
+
|
| 6 |
+
__all__ = [
|
| 7 |
+
"current_flow_betweenness_centrality_subset",
|
| 8 |
+
"edge_current_flow_betweenness_centrality_subset",
|
| 9 |
+
]
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@not_implemented_for("directed")
|
| 13 |
+
@nx._dispatch(edge_attrs="weight")
|
| 14 |
+
def current_flow_betweenness_centrality_subset(
|
| 15 |
+
G, sources, targets, normalized=True, weight=None, dtype=float, solver="lu"
|
| 16 |
+
):
|
| 17 |
+
r"""Compute current-flow betweenness centrality for subsets of nodes.
|
| 18 |
+
|
| 19 |
+
Current-flow betweenness centrality uses an electrical current
|
| 20 |
+
model for information spreading in contrast to betweenness
|
| 21 |
+
centrality which uses shortest paths.
|
| 22 |
+
|
| 23 |
+
Current-flow betweenness centrality is also known as
|
| 24 |
+
random-walk betweenness centrality [2]_.
|
| 25 |
+
|
| 26 |
+
Parameters
|
| 27 |
+
----------
|
| 28 |
+
G : graph
|
| 29 |
+
A NetworkX graph
|
| 30 |
+
|
| 31 |
+
sources: list of nodes
|
| 32 |
+
Nodes to use as sources for current
|
| 33 |
+
|
| 34 |
+
targets: list of nodes
|
| 35 |
+
Nodes to use as sinks for current
|
| 36 |
+
|
| 37 |
+
normalized : bool, optional (default=True)
|
| 38 |
+
If True the betweenness values are normalized by b=b/(n-1)(n-2) where
|
| 39 |
+
n is the number of nodes in G.
|
| 40 |
+
|
| 41 |
+
weight : string or None, optional (default=None)
|
| 42 |
+
Key for edge data used as the edge weight.
|
| 43 |
+
If None, then use 1 as each edge weight.
|
| 44 |
+
The weight reflects the capacity or the strength of the
|
| 45 |
+
edge.
|
| 46 |
+
|
| 47 |
+
dtype: data type (float)
|
| 48 |
+
Default data type for internal matrices.
|
| 49 |
+
Set to np.float32 for lower memory consumption.
|
| 50 |
+
|
| 51 |
+
solver: string (default='lu')
|
| 52 |
+
Type of linear solver to use for computing the flow matrix.
|
| 53 |
+
Options are "full" (uses most memory), "lu" (recommended), and
|
| 54 |
+
"cg" (uses least memory).
|
| 55 |
+
|
| 56 |
+
Returns
|
| 57 |
+
-------
|
| 58 |
+
nodes : dictionary
|
| 59 |
+
Dictionary of nodes with betweenness centrality as the value.
|
| 60 |
+
|
| 61 |
+
See Also
|
| 62 |
+
--------
|
| 63 |
+
approximate_current_flow_betweenness_centrality
|
| 64 |
+
betweenness_centrality
|
| 65 |
+
edge_betweenness_centrality
|
| 66 |
+
edge_current_flow_betweenness_centrality
|
| 67 |
+
|
| 68 |
+
Notes
|
| 69 |
+
-----
|
| 70 |
+
Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$
|
| 71 |
+
time [1]_, where $I(n-1)$ is the time needed to compute the
|
| 72 |
+
inverse Laplacian. For a full matrix this is $O(n^3)$ but using
|
| 73 |
+
sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the
|
| 74 |
+
Laplacian matrix condition number.
|
| 75 |
+
|
| 76 |
+
The space required is $O(nw)$ where $w$ is the width of the sparse
|
| 77 |
+
Laplacian matrix. Worse case is $w=n$ for $O(n^2)$.
|
| 78 |
+
|
| 79 |
+
If the edges have a 'weight' attribute they will be used as
|
| 80 |
+
weights in this algorithm. Unspecified weights are set to 1.
|
| 81 |
+
|
| 82 |
+
References
|
| 83 |
+
----------
|
| 84 |
+
.. [1] Centrality Measures Based on Current Flow.
|
| 85 |
+
Ulrik Brandes and Daniel Fleischer,
|
| 86 |
+
Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
|
| 87 |
+
LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
|
| 88 |
+
https://doi.org/10.1007/978-3-540-31856-9_44
|
| 89 |
+
|
| 90 |
+
.. [2] A measure of betweenness centrality based on random walks,
|
| 91 |
+
M. E. J. Newman, Social Networks 27, 39-54 (2005).
|
| 92 |
+
"""
|
| 93 |
+
import numpy as np
|
| 94 |
+
|
| 95 |
+
from networkx.utils import reverse_cuthill_mckee_ordering
|
| 96 |
+
|
| 97 |
+
if not nx.is_connected(G):
|
| 98 |
+
raise nx.NetworkXError("Graph not connected.")
|
| 99 |
+
n = G.number_of_nodes()
|
| 100 |
+
ordering = list(reverse_cuthill_mckee_ordering(G))
|
| 101 |
+
# make a copy with integer labels according to rcm ordering
|
| 102 |
+
# this could be done without a copy if we really wanted to
|
| 103 |
+
mapping = dict(zip(ordering, range(n)))
|
| 104 |
+
H = nx.relabel_nodes(G, mapping)
|
| 105 |
+
betweenness = dict.fromkeys(H, 0.0) # b[v]=0 for v in H
|
| 106 |
+
for row, (s, t) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver):
|
| 107 |
+
for ss in sources:
|
| 108 |
+
i = mapping[ss]
|
| 109 |
+
for tt in targets:
|
| 110 |
+
j = mapping[tt]
|
| 111 |
+
betweenness[s] += 0.5 * np.abs(row[i] - row[j])
|
| 112 |
+
betweenness[t] += 0.5 * np.abs(row[i] - row[j])
|
| 113 |
+
if normalized:
|
| 114 |
+
nb = (n - 1.0) * (n - 2.0) # normalization factor
|
| 115 |
+
else:
|
| 116 |
+
nb = 2.0
|
| 117 |
+
for v in H:
|
| 118 |
+
betweenness[v] = betweenness[v] / nb + 1.0 / (2 - n)
|
| 119 |
+
return {ordering[k]: v for k, v in betweenness.items()}
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
@not_implemented_for("directed")
|
| 123 |
+
@nx._dispatch(edge_attrs="weight")
|
| 124 |
+
def edge_current_flow_betweenness_centrality_subset(
|
| 125 |
+
G, sources, targets, normalized=True, weight=None, dtype=float, solver="lu"
|
| 126 |
+
):
|
| 127 |
+
r"""Compute current-flow betweenness centrality for edges using subsets
|
| 128 |
+
of nodes.
|
| 129 |
+
|
| 130 |
+
Current-flow betweenness centrality uses an electrical current
|
| 131 |
+
model for information spreading in contrast to betweenness
|
| 132 |
+
centrality which uses shortest paths.
|
| 133 |
+
|
| 134 |
+
Current-flow betweenness centrality is also known as
|
| 135 |
+
random-walk betweenness centrality [2]_.
|
| 136 |
+
|
| 137 |
+
Parameters
|
| 138 |
+
----------
|
| 139 |
+
G : graph
|
| 140 |
+
A NetworkX graph
|
| 141 |
+
|
| 142 |
+
sources: list of nodes
|
| 143 |
+
Nodes to use as sources for current
|
| 144 |
+
|
| 145 |
+
targets: list of nodes
|
| 146 |
+
Nodes to use as sinks for current
|
| 147 |
+
|
| 148 |
+
normalized : bool, optional (default=True)
|
| 149 |
+
If True the betweenness values are normalized by b=b/(n-1)(n-2) where
|
| 150 |
+
n is the number of nodes in G.
|
| 151 |
+
|
| 152 |
+
weight : string or None, optional (default=None)
|
| 153 |
+
Key for edge data used as the edge weight.
|
| 154 |
+
If None, then use 1 as each edge weight.
|
| 155 |
+
The weight reflects the capacity or the strength of the
|
| 156 |
+
edge.
|
| 157 |
+
|
| 158 |
+
dtype: data type (float)
|
| 159 |
+
Default data type for internal matrices.
|
| 160 |
+
Set to np.float32 for lower memory consumption.
|
| 161 |
+
|
| 162 |
+
solver: string (default='lu')
|
| 163 |
+
Type of linear solver to use for computing the flow matrix.
|
| 164 |
+
Options are "full" (uses most memory), "lu" (recommended), and
|
| 165 |
+
"cg" (uses least memory).
|
| 166 |
+
|
| 167 |
+
Returns
|
| 168 |
+
-------
|
| 169 |
+
nodes : dict
|
| 170 |
+
Dictionary of edge tuples with betweenness centrality as the value.
|
| 171 |
+
|
| 172 |
+
See Also
|
| 173 |
+
--------
|
| 174 |
+
betweenness_centrality
|
| 175 |
+
edge_betweenness_centrality
|
| 176 |
+
current_flow_betweenness_centrality
|
| 177 |
+
|
| 178 |
+
Notes
|
| 179 |
+
-----
|
| 180 |
+
Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$
|
| 181 |
+
time [1]_, where $I(n-1)$ is the time needed to compute the
|
| 182 |
+
inverse Laplacian. For a full matrix this is $O(n^3)$ but using
|
| 183 |
+
sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the
|
| 184 |
+
Laplacian matrix condition number.
|
| 185 |
+
|
| 186 |
+
The space required is $O(nw)$ where $w$ is the width of the sparse
|
| 187 |
+
Laplacian matrix. Worse case is $w=n$ for $O(n^2)$.
|
| 188 |
+
|
| 189 |
+
If the edges have a 'weight' attribute they will be used as
|
| 190 |
+
weights in this algorithm. Unspecified weights are set to 1.
|
| 191 |
+
|
| 192 |
+
References
|
| 193 |
+
----------
|
| 194 |
+
.. [1] Centrality Measures Based on Current Flow.
|
| 195 |
+
Ulrik Brandes and Daniel Fleischer,
|
| 196 |
+
Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
|
| 197 |
+
LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
|
| 198 |
+
https://doi.org/10.1007/978-3-540-31856-9_44
|
| 199 |
+
|
| 200 |
+
.. [2] A measure of betweenness centrality based on random walks,
|
| 201 |
+
M. E. J. Newman, Social Networks 27, 39-54 (2005).
|
| 202 |
+
"""
|
| 203 |
+
import numpy as np
|
| 204 |
+
|
| 205 |
+
if not nx.is_connected(G):
|
| 206 |
+
raise nx.NetworkXError("Graph not connected.")
|
| 207 |
+
n = G.number_of_nodes()
|
| 208 |
+
ordering = list(reverse_cuthill_mckee_ordering(G))
|
| 209 |
+
# make a copy with integer labels according to rcm ordering
|
| 210 |
+
# this could be done without a copy if we really wanted to
|
| 211 |
+
mapping = dict(zip(ordering, range(n)))
|
| 212 |
+
H = nx.relabel_nodes(G, mapping)
|
| 213 |
+
edges = (tuple(sorted((u, v))) for u, v in H.edges())
|
| 214 |
+
betweenness = dict.fromkeys(edges, 0.0)
|
| 215 |
+
if normalized:
|
| 216 |
+
nb = (n - 1.0) * (n - 2.0) # normalization factor
|
| 217 |
+
else:
|
| 218 |
+
nb = 2.0
|
| 219 |
+
for row, (e) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver):
|
| 220 |
+
for ss in sources:
|
| 221 |
+
i = mapping[ss]
|
| 222 |
+
for tt in targets:
|
| 223 |
+
j = mapping[tt]
|
| 224 |
+
betweenness[e] += 0.5 * np.abs(row[i] - row[j])
|
| 225 |
+
betweenness[e] /= nb
|
| 226 |
+
return {(ordering[s], ordering[t]): v for (s, t), v in betweenness.items()}
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/current_flow_closeness.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Current-flow closeness centrality measures."""
|
| 2 |
+
import networkx as nx
|
| 3 |
+
from networkx.algorithms.centrality.flow_matrix import (
|
| 4 |
+
CGInverseLaplacian,
|
| 5 |
+
FullInverseLaplacian,
|
| 6 |
+
SuperLUInverseLaplacian,
|
| 7 |
+
)
|
| 8 |
+
from networkx.utils import not_implemented_for, reverse_cuthill_mckee_ordering
|
| 9 |
+
|
| 10 |
+
__all__ = ["current_flow_closeness_centrality", "information_centrality"]
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@not_implemented_for("directed")
|
| 14 |
+
@nx._dispatch(edge_attrs="weight")
|
| 15 |
+
def current_flow_closeness_centrality(G, weight=None, dtype=float, solver="lu"):
|
| 16 |
+
"""Compute current-flow closeness centrality for nodes.
|
| 17 |
+
|
| 18 |
+
Current-flow closeness centrality is variant of closeness
|
| 19 |
+
centrality based on effective resistance between nodes in
|
| 20 |
+
a network. This metric is also known as information centrality.
|
| 21 |
+
|
| 22 |
+
Parameters
|
| 23 |
+
----------
|
| 24 |
+
G : graph
|
| 25 |
+
A NetworkX graph.
|
| 26 |
+
|
| 27 |
+
weight : None or string, optional (default=None)
|
| 28 |
+
If None, all edge weights are considered equal.
|
| 29 |
+
Otherwise holds the name of the edge attribute used as weight.
|
| 30 |
+
The weight reflects the capacity or the strength of the
|
| 31 |
+
edge.
|
| 32 |
+
|
| 33 |
+
dtype: data type (default=float)
|
| 34 |
+
Default data type for internal matrices.
|
| 35 |
+
Set to np.float32 for lower memory consumption.
|
| 36 |
+
|
| 37 |
+
solver: string (default='lu')
|
| 38 |
+
Type of linear solver to use for computing the flow matrix.
|
| 39 |
+
Options are "full" (uses most memory), "lu" (recommended), and
|
| 40 |
+
"cg" (uses least memory).
|
| 41 |
+
|
| 42 |
+
Returns
|
| 43 |
+
-------
|
| 44 |
+
nodes : dictionary
|
| 45 |
+
Dictionary of nodes with current flow closeness centrality as the value.
|
| 46 |
+
|
| 47 |
+
See Also
|
| 48 |
+
--------
|
| 49 |
+
closeness_centrality
|
| 50 |
+
|
| 51 |
+
Notes
|
| 52 |
+
-----
|
| 53 |
+
The algorithm is from Brandes [1]_.
|
| 54 |
+
|
| 55 |
+
See also [2]_ for the original definition of information centrality.
|
| 56 |
+
|
| 57 |
+
References
|
| 58 |
+
----------
|
| 59 |
+
.. [1] Ulrik Brandes and Daniel Fleischer,
|
| 60 |
+
Centrality Measures Based on Current Flow.
|
| 61 |
+
Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
|
| 62 |
+
LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
|
| 63 |
+
https://doi.org/10.1007/978-3-540-31856-9_44
|
| 64 |
+
|
| 65 |
+
.. [2] Karen Stephenson and Marvin Zelen:
|
| 66 |
+
Rethinking centrality: Methods and examples.
|
| 67 |
+
Social Networks 11(1):1-37, 1989.
|
| 68 |
+
https://doi.org/10.1016/0378-8733(89)90016-6
|
| 69 |
+
"""
|
| 70 |
+
if not nx.is_connected(G):
|
| 71 |
+
raise nx.NetworkXError("Graph not connected.")
|
| 72 |
+
solvername = {
|
| 73 |
+
"full": FullInverseLaplacian,
|
| 74 |
+
"lu": SuperLUInverseLaplacian,
|
| 75 |
+
"cg": CGInverseLaplacian,
|
| 76 |
+
}
|
| 77 |
+
n = G.number_of_nodes()
|
| 78 |
+
ordering = list(reverse_cuthill_mckee_ordering(G))
|
| 79 |
+
# make a copy with integer labels according to rcm ordering
|
| 80 |
+
# this could be done without a copy if we really wanted to
|
| 81 |
+
H = nx.relabel_nodes(G, dict(zip(ordering, range(n))))
|
| 82 |
+
betweenness = dict.fromkeys(H, 0.0) # b[v]=0 for v in H
|
| 83 |
+
n = H.number_of_nodes()
|
| 84 |
+
L = nx.laplacian_matrix(H, nodelist=range(n), weight=weight).asformat("csc")
|
| 85 |
+
L = L.astype(dtype)
|
| 86 |
+
C2 = solvername[solver](L, width=1, dtype=dtype) # initialize solver
|
| 87 |
+
for v in H:
|
| 88 |
+
col = C2.get_row(v)
|
| 89 |
+
for w in H:
|
| 90 |
+
betweenness[v] += col[v] - 2 * col[w]
|
| 91 |
+
betweenness[w] += col[v]
|
| 92 |
+
for v in H:
|
| 93 |
+
betweenness[v] = 1 / (betweenness[v])
|
| 94 |
+
return {ordering[k]: v for k, v in betweenness.items()}
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
information_centrality = current_flow_closeness_centrality
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/degree_alg.py
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Degree centrality measures."""
|
| 2 |
+
import networkx as nx
|
| 3 |
+
from networkx.utils.decorators import not_implemented_for
|
| 4 |
+
|
| 5 |
+
__all__ = ["degree_centrality", "in_degree_centrality", "out_degree_centrality"]
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@nx._dispatch
|
| 9 |
+
def degree_centrality(G):
|
| 10 |
+
"""Compute the degree centrality for nodes.
|
| 11 |
+
|
| 12 |
+
The degree centrality for a node v is the fraction of nodes it
|
| 13 |
+
is connected to.
|
| 14 |
+
|
| 15 |
+
Parameters
|
| 16 |
+
----------
|
| 17 |
+
G : graph
|
| 18 |
+
A networkx graph
|
| 19 |
+
|
| 20 |
+
Returns
|
| 21 |
+
-------
|
| 22 |
+
nodes : dictionary
|
| 23 |
+
Dictionary of nodes with degree centrality as the value.
|
| 24 |
+
|
| 25 |
+
Examples
|
| 26 |
+
--------
|
| 27 |
+
>>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)])
|
| 28 |
+
>>> nx.degree_centrality(G)
|
| 29 |
+
{0: 1.0, 1: 1.0, 2: 0.6666666666666666, 3: 0.6666666666666666}
|
| 30 |
+
|
| 31 |
+
See Also
|
| 32 |
+
--------
|
| 33 |
+
betweenness_centrality, load_centrality, eigenvector_centrality
|
| 34 |
+
|
| 35 |
+
Notes
|
| 36 |
+
-----
|
| 37 |
+
The degree centrality values are normalized by dividing by the maximum
|
| 38 |
+
possible degree in a simple graph n-1 where n is the number of nodes in G.
|
| 39 |
+
|
| 40 |
+
For multigraphs or graphs with self loops the maximum degree might
|
| 41 |
+
be higher than n-1 and values of degree centrality greater than 1
|
| 42 |
+
are possible.
|
| 43 |
+
"""
|
| 44 |
+
if len(G) <= 1:
|
| 45 |
+
return {n: 1 for n in G}
|
| 46 |
+
|
| 47 |
+
s = 1.0 / (len(G) - 1.0)
|
| 48 |
+
centrality = {n: d * s for n, d in G.degree()}
|
| 49 |
+
return centrality
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
@not_implemented_for("undirected")
|
| 53 |
+
@nx._dispatch
|
| 54 |
+
def in_degree_centrality(G):
|
| 55 |
+
"""Compute the in-degree centrality for nodes.
|
| 56 |
+
|
| 57 |
+
The in-degree centrality for a node v is the fraction of nodes its
|
| 58 |
+
incoming edges are connected to.
|
| 59 |
+
|
| 60 |
+
Parameters
|
| 61 |
+
----------
|
| 62 |
+
G : graph
|
| 63 |
+
A NetworkX graph
|
| 64 |
+
|
| 65 |
+
Returns
|
| 66 |
+
-------
|
| 67 |
+
nodes : dictionary
|
| 68 |
+
Dictionary of nodes with in-degree centrality as values.
|
| 69 |
+
|
| 70 |
+
Raises
|
| 71 |
+
------
|
| 72 |
+
NetworkXNotImplemented
|
| 73 |
+
If G is undirected.
|
| 74 |
+
|
| 75 |
+
Examples
|
| 76 |
+
--------
|
| 77 |
+
>>> G = nx.DiGraph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)])
|
| 78 |
+
>>> nx.in_degree_centrality(G)
|
| 79 |
+
{0: 0.0, 1: 0.3333333333333333, 2: 0.6666666666666666, 3: 0.6666666666666666}
|
| 80 |
+
|
| 81 |
+
See Also
|
| 82 |
+
--------
|
| 83 |
+
degree_centrality, out_degree_centrality
|
| 84 |
+
|
| 85 |
+
Notes
|
| 86 |
+
-----
|
| 87 |
+
The degree centrality values are normalized by dividing by the maximum
|
| 88 |
+
possible degree in a simple graph n-1 where n is the number of nodes in G.
|
| 89 |
+
|
| 90 |
+
For multigraphs or graphs with self loops the maximum degree might
|
| 91 |
+
be higher than n-1 and values of degree centrality greater than 1
|
| 92 |
+
are possible.
|
| 93 |
+
"""
|
| 94 |
+
if len(G) <= 1:
|
| 95 |
+
return {n: 1 for n in G}
|
| 96 |
+
|
| 97 |
+
s = 1.0 / (len(G) - 1.0)
|
| 98 |
+
centrality = {n: d * s for n, d in G.in_degree()}
|
| 99 |
+
return centrality
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
@not_implemented_for("undirected")
|
| 103 |
+
@nx._dispatch
|
| 104 |
+
def out_degree_centrality(G):
|
| 105 |
+
"""Compute the out-degree centrality for nodes.
|
| 106 |
+
|
| 107 |
+
The out-degree centrality for a node v is the fraction of nodes its
|
| 108 |
+
outgoing edges are connected to.
|
| 109 |
+
|
| 110 |
+
Parameters
|
| 111 |
+
----------
|
| 112 |
+
G : graph
|
| 113 |
+
A NetworkX graph
|
| 114 |
+
|
| 115 |
+
Returns
|
| 116 |
+
-------
|
| 117 |
+
nodes : dictionary
|
| 118 |
+
Dictionary of nodes with out-degree centrality as values.
|
| 119 |
+
|
| 120 |
+
Raises
|
| 121 |
+
------
|
| 122 |
+
NetworkXNotImplemented
|
| 123 |
+
If G is undirected.
|
| 124 |
+
|
| 125 |
+
Examples
|
| 126 |
+
--------
|
| 127 |
+
>>> G = nx.DiGraph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)])
|
| 128 |
+
>>> nx.out_degree_centrality(G)
|
| 129 |
+
{0: 1.0, 1: 0.6666666666666666, 2: 0.0, 3: 0.0}
|
| 130 |
+
|
| 131 |
+
See Also
|
| 132 |
+
--------
|
| 133 |
+
degree_centrality, in_degree_centrality
|
| 134 |
+
|
| 135 |
+
Notes
|
| 136 |
+
-----
|
| 137 |
+
The degree centrality values are normalized by dividing by the maximum
|
| 138 |
+
possible degree in a simple graph n-1 where n is the number of nodes in G.
|
| 139 |
+
|
| 140 |
+
For multigraphs or graphs with self loops the maximum degree might
|
| 141 |
+
be higher than n-1 and values of degree centrality greater than 1
|
| 142 |
+
are possible.
|
| 143 |
+
"""
|
| 144 |
+
if len(G) <= 1:
|
| 145 |
+
return {n: 1 for n in G}
|
| 146 |
+
|
| 147 |
+
s = 1.0 / (len(G) - 1.0)
|
| 148 |
+
centrality = {n: d * s for n, d in G.out_degree()}
|
| 149 |
+
return centrality
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/dispersion.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from itertools import combinations
|
| 2 |
+
|
| 3 |
+
import networkx as nx
|
| 4 |
+
|
| 5 |
+
__all__ = ["dispersion"]
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@nx._dispatch
|
| 9 |
+
def dispersion(G, u=None, v=None, normalized=True, alpha=1.0, b=0.0, c=0.0):
|
| 10 |
+
r"""Calculate dispersion between `u` and `v` in `G`.
|
| 11 |
+
|
| 12 |
+
A link between two actors (`u` and `v`) has a high dispersion when their
|
| 13 |
+
mutual ties (`s` and `t`) are not well connected with each other.
|
| 14 |
+
|
| 15 |
+
Parameters
|
| 16 |
+
----------
|
| 17 |
+
G : graph
|
| 18 |
+
A NetworkX graph.
|
| 19 |
+
u : node, optional
|
| 20 |
+
The source for the dispersion score (e.g. ego node of the network).
|
| 21 |
+
v : node, optional
|
| 22 |
+
The target of the dispersion score if specified.
|
| 23 |
+
normalized : bool
|
| 24 |
+
If True (default) normalize by the embeddedness of the nodes (u and v).
|
| 25 |
+
alpha, b, c : float
|
| 26 |
+
Parameters for the normalization procedure. When `normalized` is True,
|
| 27 |
+
the dispersion value is normalized by::
|
| 28 |
+
|
| 29 |
+
result = ((dispersion + b) ** alpha) / (embeddedness + c)
|
| 30 |
+
|
| 31 |
+
as long as the denominator is nonzero.
|
| 32 |
+
|
| 33 |
+
Returns
|
| 34 |
+
-------
|
| 35 |
+
nodes : dictionary
|
| 36 |
+
If u (v) is specified, returns a dictionary of nodes with dispersion
|
| 37 |
+
score for all "target" ("source") nodes. If neither u nor v is
|
| 38 |
+
specified, returns a dictionary of dictionaries for all nodes 'u' in the
|
| 39 |
+
graph with a dispersion score for each node 'v'.
|
| 40 |
+
|
| 41 |
+
Notes
|
| 42 |
+
-----
|
| 43 |
+
This implementation follows Lars Backstrom and Jon Kleinberg [1]_. Typical
|
| 44 |
+
usage would be to run dispersion on the ego network $G_u$ if $u$ were
|
| 45 |
+
specified. Running :func:`dispersion` with neither $u$ nor $v$ specified
|
| 46 |
+
can take some time to complete.
|
| 47 |
+
|
| 48 |
+
References
|
| 49 |
+
----------
|
| 50 |
+
.. [1] Romantic Partnerships and the Dispersion of Social Ties:
|
| 51 |
+
A Network Analysis of Relationship Status on Facebook.
|
| 52 |
+
Lars Backstrom, Jon Kleinberg.
|
| 53 |
+
https://arxiv.org/pdf/1310.6753v1.pdf
|
| 54 |
+
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
def _dispersion(G_u, u, v):
|
| 58 |
+
"""dispersion for all nodes 'v' in a ego network G_u of node 'u'"""
|
| 59 |
+
u_nbrs = set(G_u[u])
|
| 60 |
+
ST = {n for n in G_u[v] if n in u_nbrs}
|
| 61 |
+
set_uv = {u, v}
|
| 62 |
+
# all possible ties of connections that u and b share
|
| 63 |
+
possib = combinations(ST, 2)
|
| 64 |
+
total = 0
|
| 65 |
+
for s, t in possib:
|
| 66 |
+
# neighbors of s that are in G_u, not including u and v
|
| 67 |
+
nbrs_s = u_nbrs.intersection(G_u[s]) - set_uv
|
| 68 |
+
# s and t are not directly connected
|
| 69 |
+
if t not in nbrs_s:
|
| 70 |
+
# s and t do not share a connection
|
| 71 |
+
if nbrs_s.isdisjoint(G_u[t]):
|
| 72 |
+
# tick for disp(u, v)
|
| 73 |
+
total += 1
|
| 74 |
+
# neighbors that u and v share
|
| 75 |
+
embeddedness = len(ST)
|
| 76 |
+
|
| 77 |
+
dispersion_val = total
|
| 78 |
+
if normalized:
|
| 79 |
+
dispersion_val = (total + b) ** alpha
|
| 80 |
+
if embeddedness + c != 0:
|
| 81 |
+
dispersion_val /= embeddedness + c
|
| 82 |
+
|
| 83 |
+
return dispersion_val
|
| 84 |
+
|
| 85 |
+
if u is None:
|
| 86 |
+
# v and u are not specified
|
| 87 |
+
if v is None:
|
| 88 |
+
results = {n: {} for n in G}
|
| 89 |
+
for u in G:
|
| 90 |
+
for v in G[u]:
|
| 91 |
+
results[u][v] = _dispersion(G, u, v)
|
| 92 |
+
# u is not specified, but v is
|
| 93 |
+
else:
|
| 94 |
+
results = dict.fromkeys(G[v], {})
|
| 95 |
+
for u in G[v]:
|
| 96 |
+
results[u] = _dispersion(G, v, u)
|
| 97 |
+
else:
|
| 98 |
+
# u is specified with no target v
|
| 99 |
+
if v is None:
|
| 100 |
+
results = dict.fromkeys(G[u], {})
|
| 101 |
+
for v in G[u]:
|
| 102 |
+
results[v] = _dispersion(G, u, v)
|
| 103 |
+
# both u and v are specified
|
| 104 |
+
else:
|
| 105 |
+
results = _dispersion(G, u, v)
|
| 106 |
+
|
| 107 |
+
return results
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/eigenvector.py
ADDED
|
@@ -0,0 +1,341 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Functions for computing eigenvector centrality."""
|
| 2 |
+
import math
|
| 3 |
+
|
| 4 |
+
import networkx as nx
|
| 5 |
+
from networkx.utils import not_implemented_for
|
| 6 |
+
|
| 7 |
+
__all__ = ["eigenvector_centrality", "eigenvector_centrality_numpy"]
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@not_implemented_for("multigraph")
|
| 11 |
+
@nx._dispatch(edge_attrs="weight")
|
| 12 |
+
def eigenvector_centrality(G, max_iter=100, tol=1.0e-6, nstart=None, weight=None):
|
| 13 |
+
r"""Compute the eigenvector centrality for the graph G.
|
| 14 |
+
|
| 15 |
+
Eigenvector centrality computes the centrality for a node by adding
|
| 16 |
+
the centrality of its predecessors. The centrality for node $i$ is the
|
| 17 |
+
$i$-th element of a left eigenvector associated with the eigenvalue $\lambda$
|
| 18 |
+
of maximum modulus that is positive. Such an eigenvector $x$ is
|
| 19 |
+
defined up to a multiplicative constant by the equation
|
| 20 |
+
|
| 21 |
+
.. math::
|
| 22 |
+
|
| 23 |
+
\lambda x^T = x^T A,
|
| 24 |
+
|
| 25 |
+
where $A$ is the adjacency matrix of the graph G. By definition of
|
| 26 |
+
row-column product, the equation above is equivalent to
|
| 27 |
+
|
| 28 |
+
.. math::
|
| 29 |
+
|
| 30 |
+
\lambda x_i = \sum_{j\to i}x_j.
|
| 31 |
+
|
| 32 |
+
That is, adding the eigenvector centralities of the predecessors of
|
| 33 |
+
$i$ one obtains the eigenvector centrality of $i$ multiplied by
|
| 34 |
+
$\lambda$. In the case of undirected graphs, $x$ also solves the familiar
|
| 35 |
+
right-eigenvector equation $Ax = \lambda x$.
|
| 36 |
+
|
| 37 |
+
By virtue of the Perron–Frobenius theorem [1]_, if G is strongly
|
| 38 |
+
connected there is a unique eigenvector $x$, and all its entries
|
| 39 |
+
are strictly positive.
|
| 40 |
+
|
| 41 |
+
If G is not strongly connected there might be several left
|
| 42 |
+
eigenvectors associated with $\lambda$, and some of their elements
|
| 43 |
+
might be zero.
|
| 44 |
+
|
| 45 |
+
Parameters
|
| 46 |
+
----------
|
| 47 |
+
G : graph
|
| 48 |
+
A networkx graph.
|
| 49 |
+
|
| 50 |
+
max_iter : integer, optional (default=100)
|
| 51 |
+
Maximum number of power iterations.
|
| 52 |
+
|
| 53 |
+
tol : float, optional (default=1.0e-6)
|
| 54 |
+
Error tolerance (in Euclidean norm) used to check convergence in
|
| 55 |
+
power iteration.
|
| 56 |
+
|
| 57 |
+
nstart : dictionary, optional (default=None)
|
| 58 |
+
Starting value of power iteration for each node. Must have a nonzero
|
| 59 |
+
projection on the desired eigenvector for the power method to converge.
|
| 60 |
+
If None, this implementation uses an all-ones vector, which is a safe
|
| 61 |
+
choice.
|
| 62 |
+
|
| 63 |
+
weight : None or string, optional (default=None)
|
| 64 |
+
If None, all edge weights are considered equal. Otherwise holds the
|
| 65 |
+
name of the edge attribute used as weight. In this measure the
|
| 66 |
+
weight is interpreted as the connection strength.
|
| 67 |
+
|
| 68 |
+
Returns
|
| 69 |
+
-------
|
| 70 |
+
nodes : dictionary
|
| 71 |
+
Dictionary of nodes with eigenvector centrality as the value. The
|
| 72 |
+
associated vector has unit Euclidean norm and the values are
|
| 73 |
+
nonegative.
|
| 74 |
+
|
| 75 |
+
Examples
|
| 76 |
+
--------
|
| 77 |
+
>>> G = nx.path_graph(4)
|
| 78 |
+
>>> centrality = nx.eigenvector_centrality(G)
|
| 79 |
+
>>> sorted((v, f"{c:0.2f}") for v, c in centrality.items())
|
| 80 |
+
[(0, '0.37'), (1, '0.60'), (2, '0.60'), (3, '0.37')]
|
| 81 |
+
|
| 82 |
+
Raises
|
| 83 |
+
------
|
| 84 |
+
NetworkXPointlessConcept
|
| 85 |
+
If the graph G is the null graph.
|
| 86 |
+
|
| 87 |
+
NetworkXError
|
| 88 |
+
If each value in `nstart` is zero.
|
| 89 |
+
|
| 90 |
+
PowerIterationFailedConvergence
|
| 91 |
+
If the algorithm fails to converge to the specified tolerance
|
| 92 |
+
within the specified number of iterations of the power iteration
|
| 93 |
+
method.
|
| 94 |
+
|
| 95 |
+
See Also
|
| 96 |
+
--------
|
| 97 |
+
eigenvector_centrality_numpy
|
| 98 |
+
:func:`~networkx.algorithms.link_analysis.pagerank_alg.pagerank`
|
| 99 |
+
:func:`~networkx.algorithms.link_analysis.hits_alg.hits`
|
| 100 |
+
|
| 101 |
+
Notes
|
| 102 |
+
-----
|
| 103 |
+
Eigenvector centrality was introduced by Landau [2]_ for chess
|
| 104 |
+
tournaments. It was later rediscovered by Wei [3]_ and then
|
| 105 |
+
popularized by Kendall [4]_ in the context of sport ranking. Berge
|
| 106 |
+
introduced a general definition for graphs based on social connections
|
| 107 |
+
[5]_. Bonacich [6]_ reintroduced again eigenvector centrality and made
|
| 108 |
+
it popular in link analysis.
|
| 109 |
+
|
| 110 |
+
This function computes the left dominant eigenvector, which corresponds
|
| 111 |
+
to adding the centrality of predecessors: this is the usual approach.
|
| 112 |
+
To add the centrality of successors first reverse the graph with
|
| 113 |
+
``G.reverse()``.
|
| 114 |
+
|
| 115 |
+
The implementation uses power iteration [7]_ to compute a dominant
|
| 116 |
+
eigenvector starting from the provided vector `nstart`. Convergence is
|
| 117 |
+
guaranteed as long as `nstart` has a nonzero projection on a dominant
|
| 118 |
+
eigenvector, which certainly happens using the default value.
|
| 119 |
+
|
| 120 |
+
The method stops when the change in the computed vector between two
|
| 121 |
+
iterations is smaller than an error tolerance of ``G.number_of_nodes()
|
| 122 |
+
* tol`` or after ``max_iter`` iterations, but in the second case it
|
| 123 |
+
raises an exception.
|
| 124 |
+
|
| 125 |
+
This implementation uses $(A + I)$ rather than the adjacency matrix
|
| 126 |
+
$A$ because the change preserves eigenvectors, but it shifts the
|
| 127 |
+
spectrum, thus guaranteeing convergence even for networks with
|
| 128 |
+
negative eigenvalues of maximum modulus.
|
| 129 |
+
|
| 130 |
+
References
|
| 131 |
+
----------
|
| 132 |
+
.. [1] Abraham Berman and Robert J. Plemmons.
|
| 133 |
+
"Nonnegative Matrices in the Mathematical Sciences."
|
| 134 |
+
Classics in Applied Mathematics. SIAM, 1994.
|
| 135 |
+
|
| 136 |
+
.. [2] Edmund Landau.
|
| 137 |
+
"Zur relativen Wertbemessung der Turnierresultate."
|
| 138 |
+
Deutsches Wochenschach, 11:366–369, 1895.
|
| 139 |
+
|
| 140 |
+
.. [3] Teh-Hsing Wei.
|
| 141 |
+
"The Algebraic Foundations of Ranking Theory."
|
| 142 |
+
PhD thesis, University of Cambridge, 1952.
|
| 143 |
+
|
| 144 |
+
.. [4] Maurice G. Kendall.
|
| 145 |
+
"Further contributions to the theory of paired comparisons."
|
| 146 |
+
Biometrics, 11(1):43–62, 1955.
|
| 147 |
+
https://www.jstor.org/stable/3001479
|
| 148 |
+
|
| 149 |
+
.. [5] Claude Berge
|
| 150 |
+
"Théorie des graphes et ses applications."
|
| 151 |
+
Dunod, Paris, France, 1958.
|
| 152 |
+
|
| 153 |
+
.. [6] Phillip Bonacich.
|
| 154 |
+
"Technique for analyzing overlapping memberships."
|
| 155 |
+
Sociological Methodology, 4:176–185, 1972.
|
| 156 |
+
https://www.jstor.org/stable/270732
|
| 157 |
+
|
| 158 |
+
.. [7] Power iteration:: https://en.wikipedia.org/wiki/Power_iteration
|
| 159 |
+
|
| 160 |
+
"""
|
| 161 |
+
if len(G) == 0:
|
| 162 |
+
raise nx.NetworkXPointlessConcept(
|
| 163 |
+
"cannot compute centrality for the null graph"
|
| 164 |
+
)
|
| 165 |
+
# If no initial vector is provided, start with the all-ones vector.
|
| 166 |
+
if nstart is None:
|
| 167 |
+
nstart = {v: 1 for v in G}
|
| 168 |
+
if all(v == 0 for v in nstart.values()):
|
| 169 |
+
raise nx.NetworkXError("initial vector cannot have all zero values")
|
| 170 |
+
# Normalize the initial vector so that each entry is in [0, 1]. This is
|
| 171 |
+
# guaranteed to never have a divide-by-zero error by the previous line.
|
| 172 |
+
nstart_sum = sum(nstart.values())
|
| 173 |
+
x = {k: v / nstart_sum for k, v in nstart.items()}
|
| 174 |
+
nnodes = G.number_of_nodes()
|
| 175 |
+
# make up to max_iter iterations
|
| 176 |
+
for _ in range(max_iter):
|
| 177 |
+
xlast = x
|
| 178 |
+
x = xlast.copy() # Start with xlast times I to iterate with (A+I)
|
| 179 |
+
# do the multiplication y^T = x^T A (left eigenvector)
|
| 180 |
+
for n in x:
|
| 181 |
+
for nbr in G[n]:
|
| 182 |
+
w = G[n][nbr].get(weight, 1) if weight else 1
|
| 183 |
+
x[nbr] += xlast[n] * w
|
| 184 |
+
# Normalize the vector. The normalization denominator `norm`
|
| 185 |
+
# should never be zero by the Perron--Frobenius
|
| 186 |
+
# theorem. However, in case it is due to numerical error, we
|
| 187 |
+
# assume the norm to be one instead.
|
| 188 |
+
norm = math.hypot(*x.values()) or 1
|
| 189 |
+
x = {k: v / norm for k, v in x.items()}
|
| 190 |
+
# Check for convergence (in the L_1 norm).
|
| 191 |
+
if sum(abs(x[n] - xlast[n]) for n in x) < nnodes * tol:
|
| 192 |
+
return x
|
| 193 |
+
raise nx.PowerIterationFailedConvergence(max_iter)
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
@nx._dispatch(edge_attrs="weight")
|
| 197 |
+
def eigenvector_centrality_numpy(G, weight=None, max_iter=50, tol=0):
|
| 198 |
+
r"""Compute the eigenvector centrality for the graph G.
|
| 199 |
+
|
| 200 |
+
Eigenvector centrality computes the centrality for a node by adding
|
| 201 |
+
the centrality of its predecessors. The centrality for node $i$ is the
|
| 202 |
+
$i$-th element of a left eigenvector associated with the eigenvalue $\lambda$
|
| 203 |
+
of maximum modulus that is positive. Such an eigenvector $x$ is
|
| 204 |
+
defined up to a multiplicative constant by the equation
|
| 205 |
+
|
| 206 |
+
.. math::
|
| 207 |
+
|
| 208 |
+
\lambda x^T = x^T A,
|
| 209 |
+
|
| 210 |
+
where $A$ is the adjacency matrix of the graph G. By definition of
|
| 211 |
+
row-column product, the equation above is equivalent to
|
| 212 |
+
|
| 213 |
+
.. math::
|
| 214 |
+
|
| 215 |
+
\lambda x_i = \sum_{j\to i}x_j.
|
| 216 |
+
|
| 217 |
+
That is, adding the eigenvector centralities of the predecessors of
|
| 218 |
+
$i$ one obtains the eigenvector centrality of $i$ multiplied by
|
| 219 |
+
$\lambda$. In the case of undirected graphs, $x$ also solves the familiar
|
| 220 |
+
right-eigenvector equation $Ax = \lambda x$.
|
| 221 |
+
|
| 222 |
+
By virtue of the Perron–Frobenius theorem [1]_, if G is strongly
|
| 223 |
+
connected there is a unique eigenvector $x$, and all its entries
|
| 224 |
+
are strictly positive.
|
| 225 |
+
|
| 226 |
+
If G is not strongly connected there might be several left
|
| 227 |
+
eigenvectors associated with $\lambda$, and some of their elements
|
| 228 |
+
might be zero.
|
| 229 |
+
|
| 230 |
+
Parameters
|
| 231 |
+
----------
|
| 232 |
+
G : graph
|
| 233 |
+
A networkx graph.
|
| 234 |
+
|
| 235 |
+
max_iter : integer, optional (default=50)
|
| 236 |
+
Maximum number of Arnoldi update iterations allowed.
|
| 237 |
+
|
| 238 |
+
tol : float, optional (default=0)
|
| 239 |
+
Relative accuracy for eigenvalues (stopping criterion).
|
| 240 |
+
The default value of 0 implies machine precision.
|
| 241 |
+
|
| 242 |
+
weight : None or string, optional (default=None)
|
| 243 |
+
If None, all edge weights are considered equal. Otherwise holds the
|
| 244 |
+
name of the edge attribute used as weight. In this measure the
|
| 245 |
+
weight is interpreted as the connection strength.
|
| 246 |
+
|
| 247 |
+
Returns
|
| 248 |
+
-------
|
| 249 |
+
nodes : dictionary
|
| 250 |
+
Dictionary of nodes with eigenvector centrality as the value. The
|
| 251 |
+
associated vector has unit Euclidean norm and the values are
|
| 252 |
+
nonegative.
|
| 253 |
+
|
| 254 |
+
Examples
|
| 255 |
+
--------
|
| 256 |
+
>>> G = nx.path_graph(4)
|
| 257 |
+
>>> centrality = nx.eigenvector_centrality_numpy(G)
|
| 258 |
+
>>> print([f"{node} {centrality[node]:0.2f}" for node in centrality])
|
| 259 |
+
['0 0.37', '1 0.60', '2 0.60', '3 0.37']
|
| 260 |
+
|
| 261 |
+
Raises
|
| 262 |
+
------
|
| 263 |
+
NetworkXPointlessConcept
|
| 264 |
+
If the graph G is the null graph.
|
| 265 |
+
|
| 266 |
+
ArpackNoConvergence
|
| 267 |
+
When the requested convergence is not obtained. The currently
|
| 268 |
+
converged eigenvalues and eigenvectors can be found as
|
| 269 |
+
eigenvalues and eigenvectors attributes of the exception object.
|
| 270 |
+
|
| 271 |
+
See Also
|
| 272 |
+
--------
|
| 273 |
+
:func:`scipy.sparse.linalg.eigs`
|
| 274 |
+
eigenvector_centrality
|
| 275 |
+
:func:`~networkx.algorithms.link_analysis.pagerank_alg.pagerank`
|
| 276 |
+
:func:`~networkx.algorithms.link_analysis.hits_alg.hits`
|
| 277 |
+
|
| 278 |
+
Notes
|
| 279 |
+
-----
|
| 280 |
+
Eigenvector centrality was introduced by Landau [2]_ for chess
|
| 281 |
+
tournaments. It was later rediscovered by Wei [3]_ and then
|
| 282 |
+
popularized by Kendall [4]_ in the context of sport ranking. Berge
|
| 283 |
+
introduced a general definition for graphs based on social connections
|
| 284 |
+
[5]_. Bonacich [6]_ reintroduced again eigenvector centrality and made
|
| 285 |
+
it popular in link analysis.
|
| 286 |
+
|
| 287 |
+
This function computes the left dominant eigenvector, which corresponds
|
| 288 |
+
to adding the centrality of predecessors: this is the usual approach.
|
| 289 |
+
To add the centrality of successors first reverse the graph with
|
| 290 |
+
``G.reverse()``.
|
| 291 |
+
|
| 292 |
+
This implementation uses the
|
| 293 |
+
:func:`SciPy sparse eigenvalue solver<scipy.sparse.linalg.eigs>` (ARPACK)
|
| 294 |
+
to find the largest eigenvalue/eigenvector pair using Arnoldi iterations
|
| 295 |
+
[7]_.
|
| 296 |
+
|
| 297 |
+
References
|
| 298 |
+
----------
|
| 299 |
+
.. [1] Abraham Berman and Robert J. Plemmons.
|
| 300 |
+
"Nonnegative Matrices in the Mathematical Sciences."
|
| 301 |
+
Classics in Applied Mathematics. SIAM, 1994.
|
| 302 |
+
|
| 303 |
+
.. [2] Edmund Landau.
|
| 304 |
+
"Zur relativen Wertbemessung der Turnierresultate."
|
| 305 |
+
Deutsches Wochenschach, 11:366–369, 1895.
|
| 306 |
+
|
| 307 |
+
.. [3] Teh-Hsing Wei.
|
| 308 |
+
"The Algebraic Foundations of Ranking Theory."
|
| 309 |
+
PhD thesis, University of Cambridge, 1952.
|
| 310 |
+
|
| 311 |
+
.. [4] Maurice G. Kendall.
|
| 312 |
+
"Further contributions to the theory of paired comparisons."
|
| 313 |
+
Biometrics, 11(1):43–62, 1955.
|
| 314 |
+
https://www.jstor.org/stable/3001479
|
| 315 |
+
|
| 316 |
+
.. [5] Claude Berge
|
| 317 |
+
"Théorie des graphes et ses applications."
|
| 318 |
+
Dunod, Paris, France, 1958.
|
| 319 |
+
|
| 320 |
+
.. [6] Phillip Bonacich.
|
| 321 |
+
"Technique for analyzing overlapping memberships."
|
| 322 |
+
Sociological Methodology, 4:176–185, 1972.
|
| 323 |
+
https://www.jstor.org/stable/270732
|
| 324 |
+
|
| 325 |
+
.. [7] Arnoldi iteration:: https://en.wikipedia.org/wiki/Arnoldi_iteration
|
| 326 |
+
|
| 327 |
+
"""
|
| 328 |
+
import numpy as np
|
| 329 |
+
import scipy as sp
|
| 330 |
+
|
| 331 |
+
if len(G) == 0:
|
| 332 |
+
raise nx.NetworkXPointlessConcept(
|
| 333 |
+
"cannot compute centrality for the null graph"
|
| 334 |
+
)
|
| 335 |
+
M = nx.to_scipy_sparse_array(G, nodelist=list(G), weight=weight, dtype=float)
|
| 336 |
+
_, eigenvector = sp.sparse.linalg.eigs(
|
| 337 |
+
M.T, k=1, which="LR", maxiter=max_iter, tol=tol
|
| 338 |
+
)
|
| 339 |
+
largest = eigenvector.flatten().real
|
| 340 |
+
norm = np.sign(largest.sum()) * sp.linalg.norm(largest)
|
| 341 |
+
return dict(zip(G, largest / norm))
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/flow_matrix.py
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Helpers for current-flow betweenness and current-flow closeness
|
| 2 |
+
# Lazy computations for inverse Laplacian and flow-matrix rows.
|
| 3 |
+
import networkx as nx
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@nx._dispatch(edge_attrs="weight")
|
| 7 |
+
def flow_matrix_row(G, weight=None, dtype=float, solver="lu"):
|
| 8 |
+
# Generate a row of the current-flow matrix
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
solvername = {
|
| 12 |
+
"full": FullInverseLaplacian,
|
| 13 |
+
"lu": SuperLUInverseLaplacian,
|
| 14 |
+
"cg": CGInverseLaplacian,
|
| 15 |
+
}
|
| 16 |
+
n = G.number_of_nodes()
|
| 17 |
+
L = nx.laplacian_matrix(G, nodelist=range(n), weight=weight).asformat("csc")
|
| 18 |
+
L = L.astype(dtype)
|
| 19 |
+
C = solvername[solver](L, dtype=dtype) # initialize solver
|
| 20 |
+
w = C.w # w is the Laplacian matrix width
|
| 21 |
+
# row-by-row flow matrix
|
| 22 |
+
for u, v in sorted(sorted((u, v)) for u, v in G.edges()):
|
| 23 |
+
B = np.zeros(w, dtype=dtype)
|
| 24 |
+
c = G[u][v].get(weight, 1.0)
|
| 25 |
+
B[u % w] = c
|
| 26 |
+
B[v % w] = -c
|
| 27 |
+
# get only the rows needed in the inverse laplacian
|
| 28 |
+
# and multiply to get the flow matrix row
|
| 29 |
+
row = B @ C.get_rows(u, v)
|
| 30 |
+
yield row, (u, v)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
# Class to compute the inverse laplacian only for specified rows
|
| 34 |
+
# Allows computation of the current-flow matrix without storing entire
|
| 35 |
+
# inverse laplacian matrix
|
| 36 |
+
class InverseLaplacian:
|
| 37 |
+
def __init__(self, L, width=None, dtype=None):
|
| 38 |
+
global np
|
| 39 |
+
import numpy as np
|
| 40 |
+
|
| 41 |
+
(n, n) = L.shape
|
| 42 |
+
self.dtype = dtype
|
| 43 |
+
self.n = n
|
| 44 |
+
if width is None:
|
| 45 |
+
self.w = self.width(L)
|
| 46 |
+
else:
|
| 47 |
+
self.w = width
|
| 48 |
+
self.C = np.zeros((self.w, n), dtype=dtype)
|
| 49 |
+
self.L1 = L[1:, 1:]
|
| 50 |
+
self.init_solver(L)
|
| 51 |
+
|
| 52 |
+
def init_solver(self, L):
|
| 53 |
+
pass
|
| 54 |
+
|
| 55 |
+
def solve(self, r):
|
| 56 |
+
raise nx.NetworkXError("Implement solver")
|
| 57 |
+
|
| 58 |
+
def solve_inverse(self, r):
|
| 59 |
+
raise nx.NetworkXError("Implement solver")
|
| 60 |
+
|
| 61 |
+
def get_rows(self, r1, r2):
|
| 62 |
+
for r in range(r1, r2 + 1):
|
| 63 |
+
self.C[r % self.w, 1:] = self.solve_inverse(r)
|
| 64 |
+
return self.C
|
| 65 |
+
|
| 66 |
+
def get_row(self, r):
|
| 67 |
+
self.C[r % self.w, 1:] = self.solve_inverse(r)
|
| 68 |
+
return self.C[r % self.w]
|
| 69 |
+
|
| 70 |
+
def width(self, L):
|
| 71 |
+
m = 0
|
| 72 |
+
for i, row in enumerate(L):
|
| 73 |
+
w = 0
|
| 74 |
+
x, y = np.nonzero(row)
|
| 75 |
+
if len(y) > 0:
|
| 76 |
+
v = y - i
|
| 77 |
+
w = v.max() - v.min() + 1
|
| 78 |
+
m = max(w, m)
|
| 79 |
+
return m
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
class FullInverseLaplacian(InverseLaplacian):
|
| 83 |
+
def init_solver(self, L):
|
| 84 |
+
self.IL = np.zeros(L.shape, dtype=self.dtype)
|
| 85 |
+
self.IL[1:, 1:] = np.linalg.inv(self.L1.todense())
|
| 86 |
+
|
| 87 |
+
def solve(self, rhs):
|
| 88 |
+
s = np.zeros(rhs.shape, dtype=self.dtype)
|
| 89 |
+
s = self.IL @ rhs
|
| 90 |
+
return s
|
| 91 |
+
|
| 92 |
+
def solve_inverse(self, r):
|
| 93 |
+
return self.IL[r, 1:]
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
class SuperLUInverseLaplacian(InverseLaplacian):
|
| 97 |
+
def init_solver(self, L):
|
| 98 |
+
import scipy as sp
|
| 99 |
+
|
| 100 |
+
self.lusolve = sp.sparse.linalg.factorized(self.L1.tocsc())
|
| 101 |
+
|
| 102 |
+
def solve_inverse(self, r):
|
| 103 |
+
rhs = np.zeros(self.n, dtype=self.dtype)
|
| 104 |
+
rhs[r] = 1
|
| 105 |
+
return self.lusolve(rhs[1:])
|
| 106 |
+
|
| 107 |
+
def solve(self, rhs):
|
| 108 |
+
s = np.zeros(rhs.shape, dtype=self.dtype)
|
| 109 |
+
s[1:] = self.lusolve(rhs[1:])
|
| 110 |
+
return s
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
class CGInverseLaplacian(InverseLaplacian):
|
| 114 |
+
def init_solver(self, L):
|
| 115 |
+
global sp
|
| 116 |
+
import scipy as sp
|
| 117 |
+
|
| 118 |
+
ilu = sp.sparse.linalg.spilu(self.L1.tocsc())
|
| 119 |
+
n = self.n - 1
|
| 120 |
+
self.M = sp.sparse.linalg.LinearOperator(shape=(n, n), matvec=ilu.solve)
|
| 121 |
+
|
| 122 |
+
def solve(self, rhs):
|
| 123 |
+
s = np.zeros(rhs.shape, dtype=self.dtype)
|
| 124 |
+
s[1:] = sp.sparse.linalg.cg(self.L1, rhs[1:], M=self.M, atol=0)[0]
|
| 125 |
+
return s
|
| 126 |
+
|
| 127 |
+
def solve_inverse(self, r):
|
| 128 |
+
rhs = np.zeros(self.n, self.dtype)
|
| 129 |
+
rhs[r] = 1
|
| 130 |
+
return sp.sparse.linalg.cg(self.L1, rhs[1:], M=self.M, atol=0)[0]
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/group.py
ADDED
|
@@ -0,0 +1,785 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Group centrality measures."""
|
| 2 |
+
from copy import deepcopy
|
| 3 |
+
|
| 4 |
+
import networkx as nx
|
| 5 |
+
from networkx.algorithms.centrality.betweenness import (
|
| 6 |
+
_accumulate_endpoints,
|
| 7 |
+
_single_source_dijkstra_path_basic,
|
| 8 |
+
_single_source_shortest_path_basic,
|
| 9 |
+
)
|
| 10 |
+
from networkx.utils.decorators import not_implemented_for
|
| 11 |
+
|
| 12 |
+
__all__ = [
|
| 13 |
+
"group_betweenness_centrality",
|
| 14 |
+
"group_closeness_centrality",
|
| 15 |
+
"group_degree_centrality",
|
| 16 |
+
"group_in_degree_centrality",
|
| 17 |
+
"group_out_degree_centrality",
|
| 18 |
+
"prominent_group",
|
| 19 |
+
]
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@nx._dispatch(edge_attrs="weight")
|
| 23 |
+
def group_betweenness_centrality(G, C, normalized=True, weight=None, endpoints=False):
|
| 24 |
+
r"""Compute the group betweenness centrality for a group of nodes.
|
| 25 |
+
|
| 26 |
+
Group betweenness centrality of a group of nodes $C$ is the sum of the
|
| 27 |
+
fraction of all-pairs shortest paths that pass through any vertex in $C$
|
| 28 |
+
|
| 29 |
+
.. math::
|
| 30 |
+
|
| 31 |
+
c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)}
|
| 32 |
+
|
| 33 |
+
where $V$ is the set of nodes, $\sigma(s, t)$ is the number of
|
| 34 |
+
shortest $(s, t)$-paths, and $\sigma(s, t|C)$ is the number of
|
| 35 |
+
those paths passing through some node in group $C$. Note that
|
| 36 |
+
$(s, t)$ are not members of the group ($V-C$ is the set of nodes
|
| 37 |
+
in $V$ that are not in $C$).
|
| 38 |
+
|
| 39 |
+
Parameters
|
| 40 |
+
----------
|
| 41 |
+
G : graph
|
| 42 |
+
A NetworkX graph.
|
| 43 |
+
|
| 44 |
+
C : list or set or list of lists or list of sets
|
| 45 |
+
A group or a list of groups containing nodes which belong to G, for which group betweenness
|
| 46 |
+
centrality is to be calculated.
|
| 47 |
+
|
| 48 |
+
normalized : bool, optional (default=True)
|
| 49 |
+
If True, group betweenness is normalized by `1/((|V|-|C|)(|V|-|C|-1))`
|
| 50 |
+
where `|V|` is the number of nodes in G and `|C|` is the number of nodes in C.
|
| 51 |
+
|
| 52 |
+
weight : None or string, optional (default=None)
|
| 53 |
+
If None, all edge weights are considered equal.
|
| 54 |
+
Otherwise holds the name of the edge attribute used as weight.
|
| 55 |
+
The weight of an edge is treated as the length or distance between the two sides.
|
| 56 |
+
|
| 57 |
+
endpoints : bool, optional (default=False)
|
| 58 |
+
If True include the endpoints in the shortest path counts.
|
| 59 |
+
|
| 60 |
+
Raises
|
| 61 |
+
------
|
| 62 |
+
NodeNotFound
|
| 63 |
+
If node(s) in C are not present in G.
|
| 64 |
+
|
| 65 |
+
Returns
|
| 66 |
+
-------
|
| 67 |
+
betweenness : list of floats or float
|
| 68 |
+
If C is a single group then return a float. If C is a list with
|
| 69 |
+
several groups then return a list of group betweenness centralities.
|
| 70 |
+
|
| 71 |
+
See Also
|
| 72 |
+
--------
|
| 73 |
+
betweenness_centrality
|
| 74 |
+
|
| 75 |
+
Notes
|
| 76 |
+
-----
|
| 77 |
+
Group betweenness centrality is described in [1]_ and its importance discussed in [3]_.
|
| 78 |
+
The initial implementation of the algorithm is mentioned in [2]_. This function uses
|
| 79 |
+
an improved algorithm presented in [4]_.
|
| 80 |
+
|
| 81 |
+
The number of nodes in the group must be a maximum of n - 2 where `n`
|
| 82 |
+
is the total number of nodes in the graph.
|
| 83 |
+
|
| 84 |
+
For weighted graphs the edge weights must be greater than zero.
|
| 85 |
+
Zero edge weights can produce an infinite number of equal length
|
| 86 |
+
paths between pairs of nodes.
|
| 87 |
+
|
| 88 |
+
The total number of paths between source and target is counted
|
| 89 |
+
differently for directed and undirected graphs. Directed paths
|
| 90 |
+
between "u" and "v" are counted as two possible paths (one each
|
| 91 |
+
direction) while undirected paths between "u" and "v" are counted
|
| 92 |
+
as one path. Said another way, the sum in the expression above is
|
| 93 |
+
over all ``s != t`` for directed graphs and for ``s < t`` for undirected graphs.
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
References
|
| 97 |
+
----------
|
| 98 |
+
.. [1] M G Everett and S P Borgatti:
|
| 99 |
+
The Centrality of Groups and Classes.
|
| 100 |
+
Journal of Mathematical Sociology. 23(3): 181-201. 1999.
|
| 101 |
+
http://www.analytictech.com/borgatti/group_centrality.htm
|
| 102 |
+
.. [2] Ulrik Brandes:
|
| 103 |
+
On Variants of Shortest-Path Betweenness
|
| 104 |
+
Centrality and their Generic Computation.
|
| 105 |
+
Social Networks 30(2):136-145, 2008.
|
| 106 |
+
http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.72.9610&rep=rep1&type=pdf
|
| 107 |
+
.. [3] Sourav Medya et. al.:
|
| 108 |
+
Group Centrality Maximization via Network Design.
|
| 109 |
+
SIAM International Conference on Data Mining, SDM 2018, 126–134.
|
| 110 |
+
https://sites.cs.ucsb.edu/~arlei/pubs/sdm18.pdf
|
| 111 |
+
.. [4] Rami Puzis, Yuval Elovici, and Shlomi Dolev.
|
| 112 |
+
"Fast algorithm for successive computation of group betweenness centrality."
|
| 113 |
+
https://journals.aps.org/pre/pdf/10.1103/PhysRevE.76.056709
|
| 114 |
+
|
| 115 |
+
"""
|
| 116 |
+
GBC = [] # initialize betweenness
|
| 117 |
+
list_of_groups = True
|
| 118 |
+
# check weather C contains one or many groups
|
| 119 |
+
if any(el in G for el in C):
|
| 120 |
+
C = [C]
|
| 121 |
+
list_of_groups = False
|
| 122 |
+
set_v = {node for group in C for node in group}
|
| 123 |
+
if set_v - G.nodes: # element(s) of C not in G
|
| 124 |
+
raise nx.NodeNotFound(f"The node(s) {set_v - G.nodes} are in C but not in G.")
|
| 125 |
+
|
| 126 |
+
# pre-processing
|
| 127 |
+
PB, sigma, D = _group_preprocessing(G, set_v, weight)
|
| 128 |
+
|
| 129 |
+
# the algorithm for each group
|
| 130 |
+
for group in C:
|
| 131 |
+
group = set(group) # set of nodes in group
|
| 132 |
+
# initialize the matrices of the sigma and the PB
|
| 133 |
+
GBC_group = 0
|
| 134 |
+
sigma_m = deepcopy(sigma)
|
| 135 |
+
PB_m = deepcopy(PB)
|
| 136 |
+
sigma_m_v = deepcopy(sigma_m)
|
| 137 |
+
PB_m_v = deepcopy(PB_m)
|
| 138 |
+
for v in group:
|
| 139 |
+
GBC_group += PB_m[v][v]
|
| 140 |
+
for x in group:
|
| 141 |
+
for y in group:
|
| 142 |
+
dxvy = 0
|
| 143 |
+
dxyv = 0
|
| 144 |
+
dvxy = 0
|
| 145 |
+
if not (
|
| 146 |
+
sigma_m[x][y] == 0 or sigma_m[x][v] == 0 or sigma_m[v][y] == 0
|
| 147 |
+
):
|
| 148 |
+
if D[x][v] == D[x][y] + D[y][v]:
|
| 149 |
+
dxyv = sigma_m[x][y] * sigma_m[y][v] / sigma_m[x][v]
|
| 150 |
+
if D[x][y] == D[x][v] + D[v][y]:
|
| 151 |
+
dxvy = sigma_m[x][v] * sigma_m[v][y] / sigma_m[x][y]
|
| 152 |
+
if D[v][y] == D[v][x] + D[x][y]:
|
| 153 |
+
dvxy = sigma_m[v][x] * sigma[x][y] / sigma[v][y]
|
| 154 |
+
sigma_m_v[x][y] = sigma_m[x][y] * (1 - dxvy)
|
| 155 |
+
PB_m_v[x][y] = PB_m[x][y] - PB_m[x][y] * dxvy
|
| 156 |
+
if y != v:
|
| 157 |
+
PB_m_v[x][y] -= PB_m[x][v] * dxyv
|
| 158 |
+
if x != v:
|
| 159 |
+
PB_m_v[x][y] -= PB_m[v][y] * dvxy
|
| 160 |
+
sigma_m, sigma_m_v = sigma_m_v, sigma_m
|
| 161 |
+
PB_m, PB_m_v = PB_m_v, PB_m
|
| 162 |
+
|
| 163 |
+
# endpoints
|
| 164 |
+
v, c = len(G), len(group)
|
| 165 |
+
if not endpoints:
|
| 166 |
+
scale = 0
|
| 167 |
+
# if the graph is connected then subtract the endpoints from
|
| 168 |
+
# the count for all the nodes in the graph. else count how many
|
| 169 |
+
# nodes are connected to the group's nodes and subtract that.
|
| 170 |
+
if nx.is_directed(G):
|
| 171 |
+
if nx.is_strongly_connected(G):
|
| 172 |
+
scale = c * (2 * v - c - 1)
|
| 173 |
+
elif nx.is_connected(G):
|
| 174 |
+
scale = c * (2 * v - c - 1)
|
| 175 |
+
if scale == 0:
|
| 176 |
+
for group_node1 in group:
|
| 177 |
+
for node in D[group_node1]:
|
| 178 |
+
if node != group_node1:
|
| 179 |
+
if node in group:
|
| 180 |
+
scale += 1
|
| 181 |
+
else:
|
| 182 |
+
scale += 2
|
| 183 |
+
GBC_group -= scale
|
| 184 |
+
|
| 185 |
+
# normalized
|
| 186 |
+
if normalized:
|
| 187 |
+
scale = 1 / ((v - c) * (v - c - 1))
|
| 188 |
+
GBC_group *= scale
|
| 189 |
+
|
| 190 |
+
# If undirected than count only the undirected edges
|
| 191 |
+
elif not G.is_directed():
|
| 192 |
+
GBC_group /= 2
|
| 193 |
+
|
| 194 |
+
GBC.append(GBC_group)
|
| 195 |
+
if list_of_groups:
|
| 196 |
+
return GBC
|
| 197 |
+
return GBC[0]
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def _group_preprocessing(G, set_v, weight):
|
| 201 |
+
sigma = {}
|
| 202 |
+
delta = {}
|
| 203 |
+
D = {}
|
| 204 |
+
betweenness = dict.fromkeys(G, 0)
|
| 205 |
+
for s in G:
|
| 206 |
+
if weight is None: # use BFS
|
| 207 |
+
S, P, sigma[s], D[s] = _single_source_shortest_path_basic(G, s)
|
| 208 |
+
else: # use Dijkstra's algorithm
|
| 209 |
+
S, P, sigma[s], D[s] = _single_source_dijkstra_path_basic(G, s, weight)
|
| 210 |
+
betweenness, delta[s] = _accumulate_endpoints(betweenness, S, P, sigma[s], s)
|
| 211 |
+
for i in delta[s]: # add the paths from s to i and rescale sigma
|
| 212 |
+
if s != i:
|
| 213 |
+
delta[s][i] += 1
|
| 214 |
+
if weight is not None:
|
| 215 |
+
sigma[s][i] = sigma[s][i] / 2
|
| 216 |
+
# building the path betweenness matrix only for nodes that appear in the group
|
| 217 |
+
PB = dict.fromkeys(G)
|
| 218 |
+
for group_node1 in set_v:
|
| 219 |
+
PB[group_node1] = dict.fromkeys(G, 0.0)
|
| 220 |
+
for group_node2 in set_v:
|
| 221 |
+
if group_node2 not in D[group_node1]:
|
| 222 |
+
continue
|
| 223 |
+
for node in G:
|
| 224 |
+
# if node is connected to the two group nodes than continue
|
| 225 |
+
if group_node2 in D[node] and group_node1 in D[node]:
|
| 226 |
+
if (
|
| 227 |
+
D[node][group_node2]
|
| 228 |
+
== D[node][group_node1] + D[group_node1][group_node2]
|
| 229 |
+
):
|
| 230 |
+
PB[group_node1][group_node2] += (
|
| 231 |
+
delta[node][group_node2]
|
| 232 |
+
* sigma[node][group_node1]
|
| 233 |
+
* sigma[group_node1][group_node2]
|
| 234 |
+
/ sigma[node][group_node2]
|
| 235 |
+
)
|
| 236 |
+
return PB, sigma, D
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
@nx._dispatch(edge_attrs="weight")
|
| 240 |
+
def prominent_group(
|
| 241 |
+
G, k, weight=None, C=None, endpoints=False, normalized=True, greedy=False
|
| 242 |
+
):
|
| 243 |
+
r"""Find the prominent group of size $k$ in graph $G$. The prominence of the
|
| 244 |
+
group is evaluated by the group betweenness centrality.
|
| 245 |
+
|
| 246 |
+
Group betweenness centrality of a group of nodes $C$ is the sum of the
|
| 247 |
+
fraction of all-pairs shortest paths that pass through any vertex in $C$
|
| 248 |
+
|
| 249 |
+
.. math::
|
| 250 |
+
|
| 251 |
+
c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)}
|
| 252 |
+
|
| 253 |
+
where $V$ is the set of nodes, $\sigma(s, t)$ is the number of
|
| 254 |
+
shortest $(s, t)$-paths, and $\sigma(s, t|C)$ is the number of
|
| 255 |
+
those paths passing through some node in group $C$. Note that
|
| 256 |
+
$(s, t)$ are not members of the group ($V-C$ is the set of nodes
|
| 257 |
+
in $V$ that are not in $C$).
|
| 258 |
+
|
| 259 |
+
Parameters
|
| 260 |
+
----------
|
| 261 |
+
G : graph
|
| 262 |
+
A NetworkX graph.
|
| 263 |
+
|
| 264 |
+
k : int
|
| 265 |
+
The number of nodes in the group.
|
| 266 |
+
|
| 267 |
+
normalized : bool, optional (default=True)
|
| 268 |
+
If True, group betweenness is normalized by ``1/((|V|-|C|)(|V|-|C|-1))``
|
| 269 |
+
where ``|V|`` is the number of nodes in G and ``|C|`` is the number of
|
| 270 |
+
nodes in C.
|
| 271 |
+
|
| 272 |
+
weight : None or string, optional (default=None)
|
| 273 |
+
If None, all edge weights are considered equal.
|
| 274 |
+
Otherwise holds the name of the edge attribute used as weight.
|
| 275 |
+
The weight of an edge is treated as the length or distance between the two sides.
|
| 276 |
+
|
| 277 |
+
endpoints : bool, optional (default=False)
|
| 278 |
+
If True include the endpoints in the shortest path counts.
|
| 279 |
+
|
| 280 |
+
C : list or set, optional (default=None)
|
| 281 |
+
list of nodes which won't be candidates of the prominent group.
|
| 282 |
+
|
| 283 |
+
greedy : bool, optional (default=False)
|
| 284 |
+
Using a naive greedy algorithm in order to find non-optimal prominent
|
| 285 |
+
group. For scale free networks the results are negligibly below the optimal
|
| 286 |
+
results.
|
| 287 |
+
|
| 288 |
+
Raises
|
| 289 |
+
------
|
| 290 |
+
NodeNotFound
|
| 291 |
+
If node(s) in C are not present in G.
|
| 292 |
+
|
| 293 |
+
Returns
|
| 294 |
+
-------
|
| 295 |
+
max_GBC : float
|
| 296 |
+
The group betweenness centrality of the prominent group.
|
| 297 |
+
|
| 298 |
+
max_group : list
|
| 299 |
+
The list of nodes in the prominent group.
|
| 300 |
+
|
| 301 |
+
See Also
|
| 302 |
+
--------
|
| 303 |
+
betweenness_centrality, group_betweenness_centrality
|
| 304 |
+
|
| 305 |
+
Notes
|
| 306 |
+
-----
|
| 307 |
+
Group betweenness centrality is described in [1]_ and its importance discussed in [3]_.
|
| 308 |
+
The algorithm is described in [2]_ and is based on techniques mentioned in [4]_.
|
| 309 |
+
|
| 310 |
+
The number of nodes in the group must be a maximum of ``n - 2`` where ``n``
|
| 311 |
+
is the total number of nodes in the graph.
|
| 312 |
+
|
| 313 |
+
For weighted graphs the edge weights must be greater than zero.
|
| 314 |
+
Zero edge weights can produce an infinite number of equal length
|
| 315 |
+
paths between pairs of nodes.
|
| 316 |
+
|
| 317 |
+
The total number of paths between source and target is counted
|
| 318 |
+
differently for directed and undirected graphs. Directed paths
|
| 319 |
+
between "u" and "v" are counted as two possible paths (one each
|
| 320 |
+
direction) while undirected paths between "u" and "v" are counted
|
| 321 |
+
as one path. Said another way, the sum in the expression above is
|
| 322 |
+
over all ``s != t`` for directed graphs and for ``s < t`` for undirected graphs.
|
| 323 |
+
|
| 324 |
+
References
|
| 325 |
+
----------
|
| 326 |
+
.. [1] M G Everett and S P Borgatti:
|
| 327 |
+
The Centrality of Groups and Classes.
|
| 328 |
+
Journal of Mathematical Sociology. 23(3): 181-201. 1999.
|
| 329 |
+
http://www.analytictech.com/borgatti/group_centrality.htm
|
| 330 |
+
.. [2] Rami Puzis, Yuval Elovici, and Shlomi Dolev:
|
| 331 |
+
"Finding the Most Prominent Group in Complex Networks"
|
| 332 |
+
AI communications 20(4): 287-296, 2007.
|
| 333 |
+
https://www.researchgate.net/profile/Rami_Puzis2/publication/220308855
|
| 334 |
+
.. [3] Sourav Medya et. al.:
|
| 335 |
+
Group Centrality Maximization via Network Design.
|
| 336 |
+
SIAM International Conference on Data Mining, SDM 2018, 126–134.
|
| 337 |
+
https://sites.cs.ucsb.edu/~arlei/pubs/sdm18.pdf
|
| 338 |
+
.. [4] Rami Puzis, Yuval Elovici, and Shlomi Dolev.
|
| 339 |
+
"Fast algorithm for successive computation of group betweenness centrality."
|
| 340 |
+
https://journals.aps.org/pre/pdf/10.1103/PhysRevE.76.056709
|
| 341 |
+
"""
|
| 342 |
+
import numpy as np
|
| 343 |
+
import pandas as pd
|
| 344 |
+
|
| 345 |
+
if C is not None:
|
| 346 |
+
C = set(C)
|
| 347 |
+
if C - G.nodes: # element(s) of C not in G
|
| 348 |
+
raise nx.NodeNotFound(f"The node(s) {C - G.nodes} are in C but not in G.")
|
| 349 |
+
nodes = list(G.nodes - C)
|
| 350 |
+
else:
|
| 351 |
+
nodes = list(G.nodes)
|
| 352 |
+
DF_tree = nx.Graph()
|
| 353 |
+
PB, sigma, D = _group_preprocessing(G, nodes, weight)
|
| 354 |
+
betweenness = pd.DataFrame.from_dict(PB)
|
| 355 |
+
if C is not None:
|
| 356 |
+
for node in C:
|
| 357 |
+
# remove from the betweenness all the nodes not part of the group
|
| 358 |
+
betweenness.drop(index=node, inplace=True)
|
| 359 |
+
betweenness.drop(columns=node, inplace=True)
|
| 360 |
+
CL = [node for _, node in sorted(zip(np.diag(betweenness), nodes), reverse=True)]
|
| 361 |
+
max_GBC = 0
|
| 362 |
+
max_group = []
|
| 363 |
+
DF_tree.add_node(
|
| 364 |
+
1,
|
| 365 |
+
CL=CL,
|
| 366 |
+
betweenness=betweenness,
|
| 367 |
+
GBC=0,
|
| 368 |
+
GM=[],
|
| 369 |
+
sigma=sigma,
|
| 370 |
+
cont=dict(zip(nodes, np.diag(betweenness))),
|
| 371 |
+
)
|
| 372 |
+
|
| 373 |
+
# the algorithm
|
| 374 |
+
DF_tree.nodes[1]["heu"] = 0
|
| 375 |
+
for i in range(k):
|
| 376 |
+
DF_tree.nodes[1]["heu"] += DF_tree.nodes[1]["cont"][DF_tree.nodes[1]["CL"][i]]
|
| 377 |
+
max_GBC, DF_tree, max_group = _dfbnb(
|
| 378 |
+
G, k, DF_tree, max_GBC, 1, D, max_group, nodes, greedy
|
| 379 |
+
)
|
| 380 |
+
|
| 381 |
+
v = len(G)
|
| 382 |
+
if not endpoints:
|
| 383 |
+
scale = 0
|
| 384 |
+
# if the graph is connected then subtract the endpoints from
|
| 385 |
+
# the count for all the nodes in the graph. else count how many
|
| 386 |
+
# nodes are connected to the group's nodes and subtract that.
|
| 387 |
+
if nx.is_directed(G):
|
| 388 |
+
if nx.is_strongly_connected(G):
|
| 389 |
+
scale = k * (2 * v - k - 1)
|
| 390 |
+
elif nx.is_connected(G):
|
| 391 |
+
scale = k * (2 * v - k - 1)
|
| 392 |
+
if scale == 0:
|
| 393 |
+
for group_node1 in max_group:
|
| 394 |
+
for node in D[group_node1]:
|
| 395 |
+
if node != group_node1:
|
| 396 |
+
if node in max_group:
|
| 397 |
+
scale += 1
|
| 398 |
+
else:
|
| 399 |
+
scale += 2
|
| 400 |
+
max_GBC -= scale
|
| 401 |
+
|
| 402 |
+
# normalized
|
| 403 |
+
if normalized:
|
| 404 |
+
scale = 1 / ((v - k) * (v - k - 1))
|
| 405 |
+
max_GBC *= scale
|
| 406 |
+
|
| 407 |
+
# If undirected then count only the undirected edges
|
| 408 |
+
elif not G.is_directed():
|
| 409 |
+
max_GBC /= 2
|
| 410 |
+
max_GBC = float("%.2f" % max_GBC)
|
| 411 |
+
return max_GBC, max_group
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
def _dfbnb(G, k, DF_tree, max_GBC, root, D, max_group, nodes, greedy):
|
| 415 |
+
# stopping condition - if we found a group of size k and with higher GBC then prune
|
| 416 |
+
if len(DF_tree.nodes[root]["GM"]) == k and DF_tree.nodes[root]["GBC"] > max_GBC:
|
| 417 |
+
return DF_tree.nodes[root]["GBC"], DF_tree, DF_tree.nodes[root]["GM"]
|
| 418 |
+
# stopping condition - if the size of group members equal to k or there are less than
|
| 419 |
+
# k - |GM| in the candidate list or the heuristic function plus the GBC is below the
|
| 420 |
+
# maximal GBC found then prune
|
| 421 |
+
if (
|
| 422 |
+
len(DF_tree.nodes[root]["GM"]) == k
|
| 423 |
+
or len(DF_tree.nodes[root]["CL"]) <= k - len(DF_tree.nodes[root]["GM"])
|
| 424 |
+
or DF_tree.nodes[root]["GBC"] + DF_tree.nodes[root]["heu"] <= max_GBC
|
| 425 |
+
):
|
| 426 |
+
return max_GBC, DF_tree, max_group
|
| 427 |
+
|
| 428 |
+
# finding the heuristic of both children
|
| 429 |
+
node_p, node_m, DF_tree = _heuristic(k, root, DF_tree, D, nodes, greedy)
|
| 430 |
+
|
| 431 |
+
# finding the child with the bigger heuristic + GBC and expand
|
| 432 |
+
# that node first if greedy then only expand the plus node
|
| 433 |
+
if greedy:
|
| 434 |
+
max_GBC, DF_tree, max_group = _dfbnb(
|
| 435 |
+
G, k, DF_tree, max_GBC, node_p, D, max_group, nodes, greedy
|
| 436 |
+
)
|
| 437 |
+
|
| 438 |
+
elif (
|
| 439 |
+
DF_tree.nodes[node_p]["GBC"] + DF_tree.nodes[node_p]["heu"]
|
| 440 |
+
> DF_tree.nodes[node_m]["GBC"] + DF_tree.nodes[node_m]["heu"]
|
| 441 |
+
):
|
| 442 |
+
max_GBC, DF_tree, max_group = _dfbnb(
|
| 443 |
+
G, k, DF_tree, max_GBC, node_p, D, max_group, nodes, greedy
|
| 444 |
+
)
|
| 445 |
+
max_GBC, DF_tree, max_group = _dfbnb(
|
| 446 |
+
G, k, DF_tree, max_GBC, node_m, D, max_group, nodes, greedy
|
| 447 |
+
)
|
| 448 |
+
else:
|
| 449 |
+
max_GBC, DF_tree, max_group = _dfbnb(
|
| 450 |
+
G, k, DF_tree, max_GBC, node_m, D, max_group, nodes, greedy
|
| 451 |
+
)
|
| 452 |
+
max_GBC, DF_tree, max_group = _dfbnb(
|
| 453 |
+
G, k, DF_tree, max_GBC, node_p, D, max_group, nodes, greedy
|
| 454 |
+
)
|
| 455 |
+
return max_GBC, DF_tree, max_group
|
| 456 |
+
|
| 457 |
+
|
| 458 |
+
def _heuristic(k, root, DF_tree, D, nodes, greedy):
|
| 459 |
+
import numpy as np
|
| 460 |
+
|
| 461 |
+
# This helper function add two nodes to DF_tree - one left son and the
|
| 462 |
+
# other right son, finds their heuristic, CL, GBC, and GM
|
| 463 |
+
node_p = DF_tree.number_of_nodes() + 1
|
| 464 |
+
node_m = DF_tree.number_of_nodes() + 2
|
| 465 |
+
added_node = DF_tree.nodes[root]["CL"][0]
|
| 466 |
+
|
| 467 |
+
# adding the plus node
|
| 468 |
+
DF_tree.add_nodes_from([(node_p, deepcopy(DF_tree.nodes[root]))])
|
| 469 |
+
DF_tree.nodes[node_p]["GM"].append(added_node)
|
| 470 |
+
DF_tree.nodes[node_p]["GBC"] += DF_tree.nodes[node_p]["cont"][added_node]
|
| 471 |
+
root_node = DF_tree.nodes[root]
|
| 472 |
+
for x in nodes:
|
| 473 |
+
for y in nodes:
|
| 474 |
+
dxvy = 0
|
| 475 |
+
dxyv = 0
|
| 476 |
+
dvxy = 0
|
| 477 |
+
if not (
|
| 478 |
+
root_node["sigma"][x][y] == 0
|
| 479 |
+
or root_node["sigma"][x][added_node] == 0
|
| 480 |
+
or root_node["sigma"][added_node][y] == 0
|
| 481 |
+
):
|
| 482 |
+
if D[x][added_node] == D[x][y] + D[y][added_node]:
|
| 483 |
+
dxyv = (
|
| 484 |
+
root_node["sigma"][x][y]
|
| 485 |
+
* root_node["sigma"][y][added_node]
|
| 486 |
+
/ root_node["sigma"][x][added_node]
|
| 487 |
+
)
|
| 488 |
+
if D[x][y] == D[x][added_node] + D[added_node][y]:
|
| 489 |
+
dxvy = (
|
| 490 |
+
root_node["sigma"][x][added_node]
|
| 491 |
+
* root_node["sigma"][added_node][y]
|
| 492 |
+
/ root_node["sigma"][x][y]
|
| 493 |
+
)
|
| 494 |
+
if D[added_node][y] == D[added_node][x] + D[x][y]:
|
| 495 |
+
dvxy = (
|
| 496 |
+
root_node["sigma"][added_node][x]
|
| 497 |
+
* root_node["sigma"][x][y]
|
| 498 |
+
/ root_node["sigma"][added_node][y]
|
| 499 |
+
)
|
| 500 |
+
DF_tree.nodes[node_p]["sigma"][x][y] = root_node["sigma"][x][y] * (1 - dxvy)
|
| 501 |
+
DF_tree.nodes[node_p]["betweenness"][x][y] = (
|
| 502 |
+
root_node["betweenness"][x][y] - root_node["betweenness"][x][y] * dxvy
|
| 503 |
+
)
|
| 504 |
+
if y != added_node:
|
| 505 |
+
DF_tree.nodes[node_p]["betweenness"][x][y] -= (
|
| 506 |
+
root_node["betweenness"][x][added_node] * dxyv
|
| 507 |
+
)
|
| 508 |
+
if x != added_node:
|
| 509 |
+
DF_tree.nodes[node_p]["betweenness"][x][y] -= (
|
| 510 |
+
root_node["betweenness"][added_node][y] * dvxy
|
| 511 |
+
)
|
| 512 |
+
|
| 513 |
+
DF_tree.nodes[node_p]["CL"] = [
|
| 514 |
+
node
|
| 515 |
+
for _, node in sorted(
|
| 516 |
+
zip(np.diag(DF_tree.nodes[node_p]["betweenness"]), nodes), reverse=True
|
| 517 |
+
)
|
| 518 |
+
if node not in DF_tree.nodes[node_p]["GM"]
|
| 519 |
+
]
|
| 520 |
+
DF_tree.nodes[node_p]["cont"] = dict(
|
| 521 |
+
zip(nodes, np.diag(DF_tree.nodes[node_p]["betweenness"]))
|
| 522 |
+
)
|
| 523 |
+
DF_tree.nodes[node_p]["heu"] = 0
|
| 524 |
+
for i in range(k - len(DF_tree.nodes[node_p]["GM"])):
|
| 525 |
+
DF_tree.nodes[node_p]["heu"] += DF_tree.nodes[node_p]["cont"][
|
| 526 |
+
DF_tree.nodes[node_p]["CL"][i]
|
| 527 |
+
]
|
| 528 |
+
|
| 529 |
+
# adding the minus node - don't insert the first node in the CL to GM
|
| 530 |
+
# Insert minus node only if isn't greedy type algorithm
|
| 531 |
+
if not greedy:
|
| 532 |
+
DF_tree.add_nodes_from([(node_m, deepcopy(DF_tree.nodes[root]))])
|
| 533 |
+
DF_tree.nodes[node_m]["CL"].pop(0)
|
| 534 |
+
DF_tree.nodes[node_m]["cont"].pop(added_node)
|
| 535 |
+
DF_tree.nodes[node_m]["heu"] = 0
|
| 536 |
+
for i in range(k - len(DF_tree.nodes[node_m]["GM"])):
|
| 537 |
+
DF_tree.nodes[node_m]["heu"] += DF_tree.nodes[node_m]["cont"][
|
| 538 |
+
DF_tree.nodes[node_m]["CL"][i]
|
| 539 |
+
]
|
| 540 |
+
else:
|
| 541 |
+
node_m = None
|
| 542 |
+
|
| 543 |
+
return node_p, node_m, DF_tree
|
| 544 |
+
|
| 545 |
+
|
| 546 |
+
@nx._dispatch(edge_attrs="weight")
|
| 547 |
+
def group_closeness_centrality(G, S, weight=None):
|
| 548 |
+
r"""Compute the group closeness centrality for a group of nodes.
|
| 549 |
+
|
| 550 |
+
Group closeness centrality of a group of nodes $S$ is a measure
|
| 551 |
+
of how close the group is to the other nodes in the graph.
|
| 552 |
+
|
| 553 |
+
.. math::
|
| 554 |
+
|
| 555 |
+
c_{close}(S) = \frac{|V-S|}{\sum_{v \in V-S} d_{S, v}}
|
| 556 |
+
|
| 557 |
+
d_{S, v} = min_{u \in S} (d_{u, v})
|
| 558 |
+
|
| 559 |
+
where $V$ is the set of nodes, $d_{S, v}$ is the distance of
|
| 560 |
+
the group $S$ from $v$ defined as above. ($V-S$ is the set of nodes
|
| 561 |
+
in $V$ that are not in $S$).
|
| 562 |
+
|
| 563 |
+
Parameters
|
| 564 |
+
----------
|
| 565 |
+
G : graph
|
| 566 |
+
A NetworkX graph.
|
| 567 |
+
|
| 568 |
+
S : list or set
|
| 569 |
+
S is a group of nodes which belong to G, for which group closeness
|
| 570 |
+
centrality is to be calculated.
|
| 571 |
+
|
| 572 |
+
weight : None or string, optional (default=None)
|
| 573 |
+
If None, all edge weights are considered equal.
|
| 574 |
+
Otherwise holds the name of the edge attribute used as weight.
|
| 575 |
+
The weight of an edge is treated as the length or distance between the two sides.
|
| 576 |
+
|
| 577 |
+
Raises
|
| 578 |
+
------
|
| 579 |
+
NodeNotFound
|
| 580 |
+
If node(s) in S are not present in G.
|
| 581 |
+
|
| 582 |
+
Returns
|
| 583 |
+
-------
|
| 584 |
+
closeness : float
|
| 585 |
+
Group closeness centrality of the group S.
|
| 586 |
+
|
| 587 |
+
See Also
|
| 588 |
+
--------
|
| 589 |
+
closeness_centrality
|
| 590 |
+
|
| 591 |
+
Notes
|
| 592 |
+
-----
|
| 593 |
+
The measure was introduced in [1]_.
|
| 594 |
+
The formula implemented here is described in [2]_.
|
| 595 |
+
|
| 596 |
+
Higher values of closeness indicate greater centrality.
|
| 597 |
+
|
| 598 |
+
It is assumed that 1 / 0 is 0 (required in the case of directed graphs,
|
| 599 |
+
or when a shortest path length is 0).
|
| 600 |
+
|
| 601 |
+
The number of nodes in the group must be a maximum of n - 1 where `n`
|
| 602 |
+
is the total number of nodes in the graph.
|
| 603 |
+
|
| 604 |
+
For directed graphs, the incoming distance is utilized here. To use the
|
| 605 |
+
outward distance, act on `G.reverse()`.
|
| 606 |
+
|
| 607 |
+
For weighted graphs the edge weights must be greater than zero.
|
| 608 |
+
Zero edge weights can produce an infinite number of equal length
|
| 609 |
+
paths between pairs of nodes.
|
| 610 |
+
|
| 611 |
+
References
|
| 612 |
+
----------
|
| 613 |
+
.. [1] M G Everett and S P Borgatti:
|
| 614 |
+
The Centrality of Groups and Classes.
|
| 615 |
+
Journal of Mathematical Sociology. 23(3): 181-201. 1999.
|
| 616 |
+
http://www.analytictech.com/borgatti/group_centrality.htm
|
| 617 |
+
.. [2] J. Zhao et. al.:
|
| 618 |
+
Measuring and Maximizing Group Closeness Centrality over
|
| 619 |
+
Disk Resident Graphs.
|
| 620 |
+
WWWConference Proceedings, 2014. 689-694.
|
| 621 |
+
https://doi.org/10.1145/2567948.2579356
|
| 622 |
+
"""
|
| 623 |
+
if G.is_directed():
|
| 624 |
+
G = G.reverse() # reverse view
|
| 625 |
+
closeness = 0 # initialize to 0
|
| 626 |
+
V = set(G) # set of nodes in G
|
| 627 |
+
S = set(S) # set of nodes in group S
|
| 628 |
+
V_S = V - S # set of nodes in V but not S
|
| 629 |
+
shortest_path_lengths = nx.multi_source_dijkstra_path_length(G, S, weight=weight)
|
| 630 |
+
# accumulation
|
| 631 |
+
for v in V_S:
|
| 632 |
+
try:
|
| 633 |
+
closeness += shortest_path_lengths[v]
|
| 634 |
+
except KeyError: # no path exists
|
| 635 |
+
closeness += 0
|
| 636 |
+
try:
|
| 637 |
+
closeness = len(V_S) / closeness
|
| 638 |
+
except ZeroDivisionError: # 1 / 0 assumed as 0
|
| 639 |
+
closeness = 0
|
| 640 |
+
return closeness
|
| 641 |
+
|
| 642 |
+
|
| 643 |
+
@nx._dispatch
|
| 644 |
+
def group_degree_centrality(G, S):
|
| 645 |
+
"""Compute the group degree centrality for a group of nodes.
|
| 646 |
+
|
| 647 |
+
Group degree centrality of a group of nodes $S$ is the fraction
|
| 648 |
+
of non-group members connected to group members.
|
| 649 |
+
|
| 650 |
+
Parameters
|
| 651 |
+
----------
|
| 652 |
+
G : graph
|
| 653 |
+
A NetworkX graph.
|
| 654 |
+
|
| 655 |
+
S : list or set
|
| 656 |
+
S is a group of nodes which belong to G, for which group degree
|
| 657 |
+
centrality is to be calculated.
|
| 658 |
+
|
| 659 |
+
Raises
|
| 660 |
+
------
|
| 661 |
+
NetworkXError
|
| 662 |
+
If node(s) in S are not in G.
|
| 663 |
+
|
| 664 |
+
Returns
|
| 665 |
+
-------
|
| 666 |
+
centrality : float
|
| 667 |
+
Group degree centrality of the group S.
|
| 668 |
+
|
| 669 |
+
See Also
|
| 670 |
+
--------
|
| 671 |
+
degree_centrality
|
| 672 |
+
group_in_degree_centrality
|
| 673 |
+
group_out_degree_centrality
|
| 674 |
+
|
| 675 |
+
Notes
|
| 676 |
+
-----
|
| 677 |
+
The measure was introduced in [1]_.
|
| 678 |
+
|
| 679 |
+
The number of nodes in the group must be a maximum of n - 1 where `n`
|
| 680 |
+
is the total number of nodes in the graph.
|
| 681 |
+
|
| 682 |
+
References
|
| 683 |
+
----------
|
| 684 |
+
.. [1] M G Everett and S P Borgatti:
|
| 685 |
+
The Centrality of Groups and Classes.
|
| 686 |
+
Journal of Mathematical Sociology. 23(3): 181-201. 1999.
|
| 687 |
+
http://www.analytictech.com/borgatti/group_centrality.htm
|
| 688 |
+
"""
|
| 689 |
+
centrality = len(set().union(*[set(G.neighbors(i)) for i in S]) - set(S))
|
| 690 |
+
centrality /= len(G.nodes()) - len(S)
|
| 691 |
+
return centrality
|
| 692 |
+
|
| 693 |
+
|
| 694 |
+
@not_implemented_for("undirected")
|
| 695 |
+
@nx._dispatch
|
| 696 |
+
def group_in_degree_centrality(G, S):
|
| 697 |
+
"""Compute the group in-degree centrality for a group of nodes.
|
| 698 |
+
|
| 699 |
+
Group in-degree centrality of a group of nodes $S$ is the fraction
|
| 700 |
+
of non-group members connected to group members by incoming edges.
|
| 701 |
+
|
| 702 |
+
Parameters
|
| 703 |
+
----------
|
| 704 |
+
G : graph
|
| 705 |
+
A NetworkX graph.
|
| 706 |
+
|
| 707 |
+
S : list or set
|
| 708 |
+
S is a group of nodes which belong to G, for which group in-degree
|
| 709 |
+
centrality is to be calculated.
|
| 710 |
+
|
| 711 |
+
Returns
|
| 712 |
+
-------
|
| 713 |
+
centrality : float
|
| 714 |
+
Group in-degree centrality of the group S.
|
| 715 |
+
|
| 716 |
+
Raises
|
| 717 |
+
------
|
| 718 |
+
NetworkXNotImplemented
|
| 719 |
+
If G is undirected.
|
| 720 |
+
|
| 721 |
+
NodeNotFound
|
| 722 |
+
If node(s) in S are not in G.
|
| 723 |
+
|
| 724 |
+
See Also
|
| 725 |
+
--------
|
| 726 |
+
degree_centrality
|
| 727 |
+
group_degree_centrality
|
| 728 |
+
group_out_degree_centrality
|
| 729 |
+
|
| 730 |
+
Notes
|
| 731 |
+
-----
|
| 732 |
+
The number of nodes in the group must be a maximum of n - 1 where `n`
|
| 733 |
+
is the total number of nodes in the graph.
|
| 734 |
+
|
| 735 |
+
`G.neighbors(i)` gives nodes with an outward edge from i, in a DiGraph,
|
| 736 |
+
so for group in-degree centrality, the reverse graph is used.
|
| 737 |
+
"""
|
| 738 |
+
return group_degree_centrality(G.reverse(), S)
|
| 739 |
+
|
| 740 |
+
|
| 741 |
+
@not_implemented_for("undirected")
|
| 742 |
+
@nx._dispatch
|
| 743 |
+
def group_out_degree_centrality(G, S):
|
| 744 |
+
"""Compute the group out-degree centrality for a group of nodes.
|
| 745 |
+
|
| 746 |
+
Group out-degree centrality of a group of nodes $S$ is the fraction
|
| 747 |
+
of non-group members connected to group members by outgoing edges.
|
| 748 |
+
|
| 749 |
+
Parameters
|
| 750 |
+
----------
|
| 751 |
+
G : graph
|
| 752 |
+
A NetworkX graph.
|
| 753 |
+
|
| 754 |
+
S : list or set
|
| 755 |
+
S is a group of nodes which belong to G, for which group in-degree
|
| 756 |
+
centrality is to be calculated.
|
| 757 |
+
|
| 758 |
+
Returns
|
| 759 |
+
-------
|
| 760 |
+
centrality : float
|
| 761 |
+
Group out-degree centrality of the group S.
|
| 762 |
+
|
| 763 |
+
Raises
|
| 764 |
+
------
|
| 765 |
+
NetworkXNotImplemented
|
| 766 |
+
If G is undirected.
|
| 767 |
+
|
| 768 |
+
NodeNotFound
|
| 769 |
+
If node(s) in S are not in G.
|
| 770 |
+
|
| 771 |
+
See Also
|
| 772 |
+
--------
|
| 773 |
+
degree_centrality
|
| 774 |
+
group_degree_centrality
|
| 775 |
+
group_in_degree_centrality
|
| 776 |
+
|
| 777 |
+
Notes
|
| 778 |
+
-----
|
| 779 |
+
The number of nodes in the group must be a maximum of n - 1 where `n`
|
| 780 |
+
is the total number of nodes in the graph.
|
| 781 |
+
|
| 782 |
+
`G.neighbors(i)` gives nodes with an outward edge from i, in a DiGraph,
|
| 783 |
+
so for group out-degree centrality, the graph itself is used.
|
| 784 |
+
"""
|
| 785 |
+
return group_degree_centrality(G, S)
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/katz.py
ADDED
|
@@ -0,0 +1,331 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Katz centrality."""
|
| 2 |
+
import math
|
| 3 |
+
|
| 4 |
+
import networkx as nx
|
| 5 |
+
from networkx.utils import not_implemented_for
|
| 6 |
+
|
| 7 |
+
__all__ = ["katz_centrality", "katz_centrality_numpy"]
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@not_implemented_for("multigraph")
|
| 11 |
+
@nx._dispatch(edge_attrs="weight")
|
| 12 |
+
def katz_centrality(
|
| 13 |
+
G,
|
| 14 |
+
alpha=0.1,
|
| 15 |
+
beta=1.0,
|
| 16 |
+
max_iter=1000,
|
| 17 |
+
tol=1.0e-6,
|
| 18 |
+
nstart=None,
|
| 19 |
+
normalized=True,
|
| 20 |
+
weight=None,
|
| 21 |
+
):
|
| 22 |
+
r"""Compute the Katz centrality for the nodes of the graph G.
|
| 23 |
+
|
| 24 |
+
Katz centrality computes the centrality for a node based on the centrality
|
| 25 |
+
of its neighbors. It is a generalization of the eigenvector centrality. The
|
| 26 |
+
Katz centrality for node $i$ is
|
| 27 |
+
|
| 28 |
+
.. math::
|
| 29 |
+
|
| 30 |
+
x_i = \alpha \sum_{j} A_{ij} x_j + \beta,
|
| 31 |
+
|
| 32 |
+
where $A$ is the adjacency matrix of graph G with eigenvalues $\lambda$.
|
| 33 |
+
|
| 34 |
+
The parameter $\beta$ controls the initial centrality and
|
| 35 |
+
|
| 36 |
+
.. math::
|
| 37 |
+
|
| 38 |
+
\alpha < \frac{1}{\lambda_{\max}}.
|
| 39 |
+
|
| 40 |
+
Katz centrality computes the relative influence of a node within a
|
| 41 |
+
network by measuring the number of the immediate neighbors (first
|
| 42 |
+
degree nodes) and also all other nodes in the network that connect
|
| 43 |
+
to the node under consideration through these immediate neighbors.
|
| 44 |
+
|
| 45 |
+
Extra weight can be provided to immediate neighbors through the
|
| 46 |
+
parameter $\beta$. Connections made with distant neighbors
|
| 47 |
+
are, however, penalized by an attenuation factor $\alpha$ which
|
| 48 |
+
should be strictly less than the inverse largest eigenvalue of the
|
| 49 |
+
adjacency matrix in order for the Katz centrality to be computed
|
| 50 |
+
correctly. More information is provided in [1]_.
|
| 51 |
+
|
| 52 |
+
Parameters
|
| 53 |
+
----------
|
| 54 |
+
G : graph
|
| 55 |
+
A NetworkX graph.
|
| 56 |
+
|
| 57 |
+
alpha : float, optional (default=0.1)
|
| 58 |
+
Attenuation factor
|
| 59 |
+
|
| 60 |
+
beta : scalar or dictionary, optional (default=1.0)
|
| 61 |
+
Weight attributed to the immediate neighborhood. If not a scalar, the
|
| 62 |
+
dictionary must have an value for every node.
|
| 63 |
+
|
| 64 |
+
max_iter : integer, optional (default=1000)
|
| 65 |
+
Maximum number of iterations in power method.
|
| 66 |
+
|
| 67 |
+
tol : float, optional (default=1.0e-6)
|
| 68 |
+
Error tolerance used to check convergence in power method iteration.
|
| 69 |
+
|
| 70 |
+
nstart : dictionary, optional
|
| 71 |
+
Starting value of Katz iteration for each node.
|
| 72 |
+
|
| 73 |
+
normalized : bool, optional (default=True)
|
| 74 |
+
If True normalize the resulting values.
|
| 75 |
+
|
| 76 |
+
weight : None or string, optional (default=None)
|
| 77 |
+
If None, all edge weights are considered equal.
|
| 78 |
+
Otherwise holds the name of the edge attribute used as weight.
|
| 79 |
+
In this measure the weight is interpreted as the connection strength.
|
| 80 |
+
|
| 81 |
+
Returns
|
| 82 |
+
-------
|
| 83 |
+
nodes : dictionary
|
| 84 |
+
Dictionary of nodes with Katz centrality as the value.
|
| 85 |
+
|
| 86 |
+
Raises
|
| 87 |
+
------
|
| 88 |
+
NetworkXError
|
| 89 |
+
If the parameter `beta` is not a scalar but lacks a value for at least
|
| 90 |
+
one node
|
| 91 |
+
|
| 92 |
+
PowerIterationFailedConvergence
|
| 93 |
+
If the algorithm fails to converge to the specified tolerance
|
| 94 |
+
within the specified number of iterations of the power iteration
|
| 95 |
+
method.
|
| 96 |
+
|
| 97 |
+
Examples
|
| 98 |
+
--------
|
| 99 |
+
>>> import math
|
| 100 |
+
>>> G = nx.path_graph(4)
|
| 101 |
+
>>> phi = (1 + math.sqrt(5)) / 2.0 # largest eigenvalue of adj matrix
|
| 102 |
+
>>> centrality = nx.katz_centrality(G, 1 / phi - 0.01)
|
| 103 |
+
>>> for n, c in sorted(centrality.items()):
|
| 104 |
+
... print(f"{n} {c:.2f}")
|
| 105 |
+
0 0.37
|
| 106 |
+
1 0.60
|
| 107 |
+
2 0.60
|
| 108 |
+
3 0.37
|
| 109 |
+
|
| 110 |
+
See Also
|
| 111 |
+
--------
|
| 112 |
+
katz_centrality_numpy
|
| 113 |
+
eigenvector_centrality
|
| 114 |
+
eigenvector_centrality_numpy
|
| 115 |
+
:func:`~networkx.algorithms.link_analysis.pagerank_alg.pagerank`
|
| 116 |
+
:func:`~networkx.algorithms.link_analysis.hits_alg.hits`
|
| 117 |
+
|
| 118 |
+
Notes
|
| 119 |
+
-----
|
| 120 |
+
Katz centrality was introduced by [2]_.
|
| 121 |
+
|
| 122 |
+
This algorithm it uses the power method to find the eigenvector
|
| 123 |
+
corresponding to the largest eigenvalue of the adjacency matrix of ``G``.
|
| 124 |
+
The parameter ``alpha`` should be strictly less than the inverse of largest
|
| 125 |
+
eigenvalue of the adjacency matrix for the algorithm to converge.
|
| 126 |
+
You can use ``max(nx.adjacency_spectrum(G))`` to get $\lambda_{\max}$ the largest
|
| 127 |
+
eigenvalue of the adjacency matrix.
|
| 128 |
+
The iteration will stop after ``max_iter`` iterations or an error tolerance of
|
| 129 |
+
``number_of_nodes(G) * tol`` has been reached.
|
| 130 |
+
|
| 131 |
+
When $\alpha = 1/\lambda_{\max}$ and $\beta=0$, Katz centrality is the same
|
| 132 |
+
as eigenvector centrality.
|
| 133 |
+
|
| 134 |
+
For directed graphs this finds "left" eigenvectors which corresponds
|
| 135 |
+
to the in-edges in the graph. For out-edges Katz centrality
|
| 136 |
+
first reverse the graph with ``G.reverse()``.
|
| 137 |
+
|
| 138 |
+
References
|
| 139 |
+
----------
|
| 140 |
+
.. [1] Mark E. J. Newman:
|
| 141 |
+
Networks: An Introduction.
|
| 142 |
+
Oxford University Press, USA, 2010, p. 720.
|
| 143 |
+
.. [2] Leo Katz:
|
| 144 |
+
A New Status Index Derived from Sociometric Index.
|
| 145 |
+
Psychometrika 18(1):39–43, 1953
|
| 146 |
+
https://link.springer.com/content/pdf/10.1007/BF02289026.pdf
|
| 147 |
+
"""
|
| 148 |
+
if len(G) == 0:
|
| 149 |
+
return {}
|
| 150 |
+
|
| 151 |
+
nnodes = G.number_of_nodes()
|
| 152 |
+
|
| 153 |
+
if nstart is None:
|
| 154 |
+
# choose starting vector with entries of 0
|
| 155 |
+
x = {n: 0 for n in G}
|
| 156 |
+
else:
|
| 157 |
+
x = nstart
|
| 158 |
+
|
| 159 |
+
try:
|
| 160 |
+
b = dict.fromkeys(G, float(beta))
|
| 161 |
+
except (TypeError, ValueError, AttributeError) as err:
|
| 162 |
+
b = beta
|
| 163 |
+
if set(beta) != set(G):
|
| 164 |
+
raise nx.NetworkXError(
|
| 165 |
+
"beta dictionary " "must have a value for every node"
|
| 166 |
+
) from err
|
| 167 |
+
|
| 168 |
+
# make up to max_iter iterations
|
| 169 |
+
for _ in range(max_iter):
|
| 170 |
+
xlast = x
|
| 171 |
+
x = dict.fromkeys(xlast, 0)
|
| 172 |
+
# do the multiplication y^T = Alpha * x^T A + Beta
|
| 173 |
+
for n in x:
|
| 174 |
+
for nbr in G[n]:
|
| 175 |
+
x[nbr] += xlast[n] * G[n][nbr].get(weight, 1)
|
| 176 |
+
for n in x:
|
| 177 |
+
x[n] = alpha * x[n] + b[n]
|
| 178 |
+
|
| 179 |
+
# check convergence
|
| 180 |
+
error = sum(abs(x[n] - xlast[n]) for n in x)
|
| 181 |
+
if error < nnodes * tol:
|
| 182 |
+
if normalized:
|
| 183 |
+
# normalize vector
|
| 184 |
+
try:
|
| 185 |
+
s = 1.0 / math.hypot(*x.values())
|
| 186 |
+
# this should never be zero?
|
| 187 |
+
except ZeroDivisionError:
|
| 188 |
+
s = 1.0
|
| 189 |
+
else:
|
| 190 |
+
s = 1
|
| 191 |
+
for n in x:
|
| 192 |
+
x[n] *= s
|
| 193 |
+
return x
|
| 194 |
+
raise nx.PowerIterationFailedConvergence(max_iter)
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
@not_implemented_for("multigraph")
|
| 198 |
+
@nx._dispatch(edge_attrs="weight")
|
| 199 |
+
def katz_centrality_numpy(G, alpha=0.1, beta=1.0, normalized=True, weight=None):
|
| 200 |
+
r"""Compute the Katz centrality for the graph G.
|
| 201 |
+
|
| 202 |
+
Katz centrality computes the centrality for a node based on the centrality
|
| 203 |
+
of its neighbors. It is a generalization of the eigenvector centrality. The
|
| 204 |
+
Katz centrality for node $i$ is
|
| 205 |
+
|
| 206 |
+
.. math::
|
| 207 |
+
|
| 208 |
+
x_i = \alpha \sum_{j} A_{ij} x_j + \beta,
|
| 209 |
+
|
| 210 |
+
where $A$ is the adjacency matrix of graph G with eigenvalues $\lambda$.
|
| 211 |
+
|
| 212 |
+
The parameter $\beta$ controls the initial centrality and
|
| 213 |
+
|
| 214 |
+
.. math::
|
| 215 |
+
|
| 216 |
+
\alpha < \frac{1}{\lambda_{\max}}.
|
| 217 |
+
|
| 218 |
+
Katz centrality computes the relative influence of a node within a
|
| 219 |
+
network by measuring the number of the immediate neighbors (first
|
| 220 |
+
degree nodes) and also all other nodes in the network that connect
|
| 221 |
+
to the node under consideration through these immediate neighbors.
|
| 222 |
+
|
| 223 |
+
Extra weight can be provided to immediate neighbors through the
|
| 224 |
+
parameter $\beta$. Connections made with distant neighbors
|
| 225 |
+
are, however, penalized by an attenuation factor $\alpha$ which
|
| 226 |
+
should be strictly less than the inverse largest eigenvalue of the
|
| 227 |
+
adjacency matrix in order for the Katz centrality to be computed
|
| 228 |
+
correctly. More information is provided in [1]_.
|
| 229 |
+
|
| 230 |
+
Parameters
|
| 231 |
+
----------
|
| 232 |
+
G : graph
|
| 233 |
+
A NetworkX graph
|
| 234 |
+
|
| 235 |
+
alpha : float
|
| 236 |
+
Attenuation factor
|
| 237 |
+
|
| 238 |
+
beta : scalar or dictionary, optional (default=1.0)
|
| 239 |
+
Weight attributed to the immediate neighborhood. If not a scalar the
|
| 240 |
+
dictionary must have an value for every node.
|
| 241 |
+
|
| 242 |
+
normalized : bool
|
| 243 |
+
If True normalize the resulting values.
|
| 244 |
+
|
| 245 |
+
weight : None or string, optional
|
| 246 |
+
If None, all edge weights are considered equal.
|
| 247 |
+
Otherwise holds the name of the edge attribute used as weight.
|
| 248 |
+
In this measure the weight is interpreted as the connection strength.
|
| 249 |
+
|
| 250 |
+
Returns
|
| 251 |
+
-------
|
| 252 |
+
nodes : dictionary
|
| 253 |
+
Dictionary of nodes with Katz centrality as the value.
|
| 254 |
+
|
| 255 |
+
Raises
|
| 256 |
+
------
|
| 257 |
+
NetworkXError
|
| 258 |
+
If the parameter `beta` is not a scalar but lacks a value for at least
|
| 259 |
+
one node
|
| 260 |
+
|
| 261 |
+
Examples
|
| 262 |
+
--------
|
| 263 |
+
>>> import math
|
| 264 |
+
>>> G = nx.path_graph(4)
|
| 265 |
+
>>> phi = (1 + math.sqrt(5)) / 2.0 # largest eigenvalue of adj matrix
|
| 266 |
+
>>> centrality = nx.katz_centrality_numpy(G, 1 / phi)
|
| 267 |
+
>>> for n, c in sorted(centrality.items()):
|
| 268 |
+
... print(f"{n} {c:.2f}")
|
| 269 |
+
0 0.37
|
| 270 |
+
1 0.60
|
| 271 |
+
2 0.60
|
| 272 |
+
3 0.37
|
| 273 |
+
|
| 274 |
+
See Also
|
| 275 |
+
--------
|
| 276 |
+
katz_centrality
|
| 277 |
+
eigenvector_centrality_numpy
|
| 278 |
+
eigenvector_centrality
|
| 279 |
+
:func:`~networkx.algorithms.link_analysis.pagerank_alg.pagerank`
|
| 280 |
+
:func:`~networkx.algorithms.link_analysis.hits_alg.hits`
|
| 281 |
+
|
| 282 |
+
Notes
|
| 283 |
+
-----
|
| 284 |
+
Katz centrality was introduced by [2]_.
|
| 285 |
+
|
| 286 |
+
This algorithm uses a direct linear solver to solve the above equation.
|
| 287 |
+
The parameter ``alpha`` should be strictly less than the inverse of largest
|
| 288 |
+
eigenvalue of the adjacency matrix for there to be a solution.
|
| 289 |
+
You can use ``max(nx.adjacency_spectrum(G))`` to get $\lambda_{\max}$ the largest
|
| 290 |
+
eigenvalue of the adjacency matrix.
|
| 291 |
+
|
| 292 |
+
When $\alpha = 1/\lambda_{\max}$ and $\beta=0$, Katz centrality is the same
|
| 293 |
+
as eigenvector centrality.
|
| 294 |
+
|
| 295 |
+
For directed graphs this finds "left" eigenvectors which corresponds
|
| 296 |
+
to the in-edges in the graph. For out-edges Katz centrality
|
| 297 |
+
first reverse the graph with ``G.reverse()``.
|
| 298 |
+
|
| 299 |
+
References
|
| 300 |
+
----------
|
| 301 |
+
.. [1] Mark E. J. Newman:
|
| 302 |
+
Networks: An Introduction.
|
| 303 |
+
Oxford University Press, USA, 2010, p. 173.
|
| 304 |
+
.. [2] Leo Katz:
|
| 305 |
+
A New Status Index Derived from Sociometric Index.
|
| 306 |
+
Psychometrika 18(1):39–43, 1953
|
| 307 |
+
https://link.springer.com/content/pdf/10.1007/BF02289026.pdf
|
| 308 |
+
"""
|
| 309 |
+
import numpy as np
|
| 310 |
+
|
| 311 |
+
if len(G) == 0:
|
| 312 |
+
return {}
|
| 313 |
+
try:
|
| 314 |
+
nodelist = beta.keys()
|
| 315 |
+
if set(nodelist) != set(G):
|
| 316 |
+
raise nx.NetworkXError("beta dictionary must have a value for every node")
|
| 317 |
+
b = np.array(list(beta.values()), dtype=float)
|
| 318 |
+
except AttributeError:
|
| 319 |
+
nodelist = list(G)
|
| 320 |
+
try:
|
| 321 |
+
b = np.ones((len(nodelist), 1)) * beta
|
| 322 |
+
except (TypeError, ValueError, AttributeError) as err:
|
| 323 |
+
raise nx.NetworkXError("beta must be a number") from err
|
| 324 |
+
|
| 325 |
+
A = nx.adjacency_matrix(G, nodelist=nodelist, weight=weight).todense().T
|
| 326 |
+
n = A.shape[0]
|
| 327 |
+
centrality = np.linalg.solve(np.eye(n, n) - (alpha * A), b).squeeze()
|
| 328 |
+
|
| 329 |
+
# Normalize: rely on truediv to cast to float
|
| 330 |
+
norm = np.sign(sum(centrality)) * np.linalg.norm(centrality) if normalized else 1
|
| 331 |
+
return dict(zip(nodelist, centrality / norm))
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/laplacian.py
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Laplacian centrality measures.
|
| 3 |
+
"""
|
| 4 |
+
import networkx as nx
|
| 5 |
+
|
| 6 |
+
__all__ = ["laplacian_centrality"]
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@nx._dispatch(edge_attrs="weight")
|
| 10 |
+
def laplacian_centrality(
|
| 11 |
+
G, normalized=True, nodelist=None, weight="weight", walk_type=None, alpha=0.95
|
| 12 |
+
):
|
| 13 |
+
r"""Compute the Laplacian centrality for nodes in the graph `G`.
|
| 14 |
+
|
| 15 |
+
The Laplacian Centrality of a node ``i`` is measured by the drop in the
|
| 16 |
+
Laplacian Energy after deleting node ``i`` from the graph. The Laplacian Energy
|
| 17 |
+
is the sum of the squared eigenvalues of a graph's Laplacian matrix.
|
| 18 |
+
|
| 19 |
+
.. math::
|
| 20 |
+
|
| 21 |
+
C_L(u_i,G) = \frac{(\Delta E)_i}{E_L (G)} = \frac{E_L (G)-E_L (G_i)}{E_L (G)}
|
| 22 |
+
|
| 23 |
+
E_L (G) = \sum_{i=0}^n \lambda_i^2
|
| 24 |
+
|
| 25 |
+
Where $E_L (G)$ is the Laplacian energy of graph `G`,
|
| 26 |
+
E_L (G_i) is the Laplacian energy of graph `G` after deleting node ``i``
|
| 27 |
+
and $\lambda_i$ are the eigenvalues of `G`'s Laplacian matrix.
|
| 28 |
+
This formula shows the normalized value. Without normalization,
|
| 29 |
+
the numerator on the right side is returned.
|
| 30 |
+
|
| 31 |
+
Parameters
|
| 32 |
+
----------
|
| 33 |
+
G : graph
|
| 34 |
+
A networkx graph
|
| 35 |
+
|
| 36 |
+
normalized : bool (default = True)
|
| 37 |
+
If True the centrality score is scaled so the sum over all nodes is 1.
|
| 38 |
+
If False the centrality score for each node is the drop in Laplacian
|
| 39 |
+
energy when that node is removed.
|
| 40 |
+
|
| 41 |
+
nodelist : list, optional (default = None)
|
| 42 |
+
The rows and columns are ordered according to the nodes in nodelist.
|
| 43 |
+
If nodelist is None, then the ordering is produced by G.nodes().
|
| 44 |
+
|
| 45 |
+
weight: string or None, optional (default=`weight`)
|
| 46 |
+
Optional parameter `weight` to compute the Laplacian matrix.
|
| 47 |
+
The edge data key used to compute each value in the matrix.
|
| 48 |
+
If None, then each edge has weight 1.
|
| 49 |
+
|
| 50 |
+
walk_type : string or None, optional (default=None)
|
| 51 |
+
Optional parameter `walk_type` used when calling
|
| 52 |
+
:func:`directed_laplacian_matrix <networkx.directed_laplacian_matrix>`.
|
| 53 |
+
If None, the transition matrix is selected depending on the properties
|
| 54 |
+
of the graph. Otherwise can be `random`, `lazy`, or `pagerank`.
|
| 55 |
+
|
| 56 |
+
alpha : real (default = 0.95)
|
| 57 |
+
Optional parameter `alpha` used when calling
|
| 58 |
+
:func:`directed_laplacian_matrix <networkx.directed_laplacian_matrix>`.
|
| 59 |
+
(1 - alpha) is the teleportation probability used with pagerank.
|
| 60 |
+
|
| 61 |
+
Returns
|
| 62 |
+
-------
|
| 63 |
+
nodes : dictionary
|
| 64 |
+
Dictionary of nodes with Laplacian centrality as the value.
|
| 65 |
+
|
| 66 |
+
Examples
|
| 67 |
+
--------
|
| 68 |
+
>>> G = nx.Graph()
|
| 69 |
+
>>> edges = [(0, 1, 4), (0, 2, 2), (2, 1, 1), (1, 3, 2), (1, 4, 2), (4, 5, 1)]
|
| 70 |
+
>>> G.add_weighted_edges_from(edges)
|
| 71 |
+
>>> sorted((v, f"{c:0.2f}") for v, c in laplacian_centrality(G).items())
|
| 72 |
+
[(0, '0.70'), (1, '0.90'), (2, '0.28'), (3, '0.22'), (4, '0.26'), (5, '0.04')]
|
| 73 |
+
|
| 74 |
+
Notes
|
| 75 |
+
-----
|
| 76 |
+
The algorithm is implemented based on [1]_ with an extension to directed graphs
|
| 77 |
+
using the ``directed_laplacian_matrix`` function.
|
| 78 |
+
|
| 79 |
+
Raises
|
| 80 |
+
------
|
| 81 |
+
NetworkXPointlessConcept
|
| 82 |
+
If the graph `G` is the null graph.
|
| 83 |
+
ZeroDivisionError
|
| 84 |
+
If the graph `G` has no edges (is empty) and normalization is requested.
|
| 85 |
+
|
| 86 |
+
References
|
| 87 |
+
----------
|
| 88 |
+
.. [1] Qi, X., Fuller, E., Wu, Q., Wu, Y., and Zhang, C.-Q. (2012).
|
| 89 |
+
Laplacian centrality: A new centrality measure for weighted networks.
|
| 90 |
+
Information Sciences, 194:240-253.
|
| 91 |
+
https://math.wvu.edu/~cqzhang/Publication-files/my-paper/INS-2012-Laplacian-W.pdf
|
| 92 |
+
|
| 93 |
+
See Also
|
| 94 |
+
--------
|
| 95 |
+
:func:`~networkx.linalg.laplacianmatrix.directed_laplacian_matrix`
|
| 96 |
+
:func:`~networkx.linalg.laplacianmatrix.laplacian_matrix`
|
| 97 |
+
"""
|
| 98 |
+
import numpy as np
|
| 99 |
+
import scipy as sp
|
| 100 |
+
|
| 101 |
+
if len(G) == 0:
|
| 102 |
+
raise nx.NetworkXPointlessConcept("null graph has no centrality defined")
|
| 103 |
+
if G.size(weight=weight) == 0:
|
| 104 |
+
if normalized:
|
| 105 |
+
raise ZeroDivisionError("graph with no edges has zero full energy")
|
| 106 |
+
return {n: 0 for n in G}
|
| 107 |
+
|
| 108 |
+
if nodelist is not None:
|
| 109 |
+
nodeset = set(G.nbunch_iter(nodelist))
|
| 110 |
+
if len(nodeset) != len(nodelist):
|
| 111 |
+
raise nx.NetworkXError("nodelist has duplicate nodes or nodes not in G")
|
| 112 |
+
nodes = nodelist + [n for n in G if n not in nodeset]
|
| 113 |
+
else:
|
| 114 |
+
nodelist = nodes = list(G)
|
| 115 |
+
|
| 116 |
+
if G.is_directed():
|
| 117 |
+
lap_matrix = nx.directed_laplacian_matrix(G, nodes, weight, walk_type, alpha)
|
| 118 |
+
else:
|
| 119 |
+
lap_matrix = nx.laplacian_matrix(G, nodes, weight).toarray()
|
| 120 |
+
|
| 121 |
+
full_energy = np.power(sp.linalg.eigh(lap_matrix, eigvals_only=True), 2).sum()
|
| 122 |
+
|
| 123 |
+
# calculate laplacian centrality
|
| 124 |
+
laplace_centralities_dict = {}
|
| 125 |
+
for i, node in enumerate(nodelist):
|
| 126 |
+
# remove row and col i from lap_matrix
|
| 127 |
+
all_but_i = list(np.arange(lap_matrix.shape[0]))
|
| 128 |
+
all_but_i.remove(i)
|
| 129 |
+
A_2 = lap_matrix[all_but_i, :][:, all_but_i]
|
| 130 |
+
|
| 131 |
+
# Adjust diagonal for removed row
|
| 132 |
+
new_diag = lap_matrix.diagonal() - abs(lap_matrix[:, i])
|
| 133 |
+
np.fill_diagonal(A_2, new_diag[all_but_i])
|
| 134 |
+
|
| 135 |
+
if len(all_but_i) > 0: # catches degenerate case of single node
|
| 136 |
+
new_energy = np.power(sp.linalg.eigh(A_2, eigvals_only=True), 2).sum()
|
| 137 |
+
else:
|
| 138 |
+
new_energy = 0.0
|
| 139 |
+
|
| 140 |
+
lapl_cent = full_energy - new_energy
|
| 141 |
+
if normalized:
|
| 142 |
+
lapl_cent = lapl_cent / full_energy
|
| 143 |
+
|
| 144 |
+
laplace_centralities_dict[node] = lapl_cent
|
| 145 |
+
|
| 146 |
+
return laplace_centralities_dict
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/percolation.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Percolation centrality measures."""
|
| 2 |
+
|
| 3 |
+
import networkx as nx
|
| 4 |
+
from networkx.algorithms.centrality.betweenness import (
|
| 5 |
+
_single_source_dijkstra_path_basic as dijkstra,
|
| 6 |
+
)
|
| 7 |
+
from networkx.algorithms.centrality.betweenness import (
|
| 8 |
+
_single_source_shortest_path_basic as shortest_path,
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
__all__ = ["percolation_centrality"]
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@nx._dispatch(node_attrs="attribute", edge_attrs="weight")
|
| 15 |
+
def percolation_centrality(G, attribute="percolation", states=None, weight=None):
|
| 16 |
+
r"""Compute the percolation centrality for nodes.
|
| 17 |
+
|
| 18 |
+
Percolation centrality of a node $v$, at a given time, is defined
|
| 19 |
+
as the proportion of ‘percolated paths’ that go through that node.
|
| 20 |
+
|
| 21 |
+
This measure quantifies relative impact of nodes based on their
|
| 22 |
+
topological connectivity, as well as their percolation states.
|
| 23 |
+
|
| 24 |
+
Percolation states of nodes are used to depict network percolation
|
| 25 |
+
scenarios (such as during infection transmission in a social network
|
| 26 |
+
of individuals, spreading of computer viruses on computer networks, or
|
| 27 |
+
transmission of disease over a network of towns) over time. In this
|
| 28 |
+
measure usually the percolation state is expressed as a decimal
|
| 29 |
+
between 0.0 and 1.0.
|
| 30 |
+
|
| 31 |
+
When all nodes are in the same percolated state this measure is
|
| 32 |
+
equivalent to betweenness centrality.
|
| 33 |
+
|
| 34 |
+
Parameters
|
| 35 |
+
----------
|
| 36 |
+
G : graph
|
| 37 |
+
A NetworkX graph.
|
| 38 |
+
|
| 39 |
+
attribute : None or string, optional (default='percolation')
|
| 40 |
+
Name of the node attribute to use for percolation state, used
|
| 41 |
+
if `states` is None. If a node does not set the attribute the
|
| 42 |
+
state of that node will be set to the default value of 1.
|
| 43 |
+
If all nodes do not have the attribute all nodes will be set to
|
| 44 |
+
1 and the centrality measure will be equivalent to betweenness centrality.
|
| 45 |
+
|
| 46 |
+
states : None or dict, optional (default=None)
|
| 47 |
+
Specify percolation states for the nodes, nodes as keys states
|
| 48 |
+
as values.
|
| 49 |
+
|
| 50 |
+
weight : None or string, optional (default=None)
|
| 51 |
+
If None, all edge weights are considered equal.
|
| 52 |
+
Otherwise holds the name of the edge attribute used as weight.
|
| 53 |
+
The weight of an edge is treated as the length or distance between the two sides.
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
Returns
|
| 57 |
+
-------
|
| 58 |
+
nodes : dictionary
|
| 59 |
+
Dictionary of nodes with percolation centrality as the value.
|
| 60 |
+
|
| 61 |
+
See Also
|
| 62 |
+
--------
|
| 63 |
+
betweenness_centrality
|
| 64 |
+
|
| 65 |
+
Notes
|
| 66 |
+
-----
|
| 67 |
+
The algorithm is from Mahendra Piraveenan, Mikhail Prokopenko, and
|
| 68 |
+
Liaquat Hossain [1]_
|
| 69 |
+
Pair dependencies are calculated and accumulated using [2]_
|
| 70 |
+
|
| 71 |
+
For weighted graphs the edge weights must be greater than zero.
|
| 72 |
+
Zero edge weights can produce an infinite number of equal length
|
| 73 |
+
paths between pairs of nodes.
|
| 74 |
+
|
| 75 |
+
References
|
| 76 |
+
----------
|
| 77 |
+
.. [1] Mahendra Piraveenan, Mikhail Prokopenko, Liaquat Hossain
|
| 78 |
+
Percolation Centrality: Quantifying Graph-Theoretic Impact of Nodes
|
| 79 |
+
during Percolation in Networks
|
| 80 |
+
http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0053095
|
| 81 |
+
.. [2] Ulrik Brandes:
|
| 82 |
+
A Faster Algorithm for Betweenness Centrality.
|
| 83 |
+
Journal of Mathematical Sociology 25(2):163-177, 2001.
|
| 84 |
+
https://doi.org/10.1080/0022250X.2001.9990249
|
| 85 |
+
"""
|
| 86 |
+
percolation = dict.fromkeys(G, 0.0) # b[v]=0 for v in G
|
| 87 |
+
|
| 88 |
+
nodes = G
|
| 89 |
+
|
| 90 |
+
if states is None:
|
| 91 |
+
states = nx.get_node_attributes(nodes, attribute, default=1)
|
| 92 |
+
|
| 93 |
+
# sum of all percolation states
|
| 94 |
+
p_sigma_x_t = 0.0
|
| 95 |
+
for v in states.values():
|
| 96 |
+
p_sigma_x_t += v
|
| 97 |
+
|
| 98 |
+
for s in nodes:
|
| 99 |
+
# single source shortest paths
|
| 100 |
+
if weight is None: # use BFS
|
| 101 |
+
S, P, sigma, _ = shortest_path(G, s)
|
| 102 |
+
else: # use Dijkstra's algorithm
|
| 103 |
+
S, P, sigma, _ = dijkstra(G, s, weight)
|
| 104 |
+
# accumulation
|
| 105 |
+
percolation = _accumulate_percolation(
|
| 106 |
+
percolation, S, P, sigma, s, states, p_sigma_x_t
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
n = len(G)
|
| 110 |
+
|
| 111 |
+
for v in percolation:
|
| 112 |
+
percolation[v] *= 1 / (n - 2)
|
| 113 |
+
|
| 114 |
+
return percolation
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def _accumulate_percolation(percolation, S, P, sigma, s, states, p_sigma_x_t):
|
| 118 |
+
delta = dict.fromkeys(S, 0)
|
| 119 |
+
while S:
|
| 120 |
+
w = S.pop()
|
| 121 |
+
coeff = (1 + delta[w]) / sigma[w]
|
| 122 |
+
for v in P[w]:
|
| 123 |
+
delta[v] += sigma[v] * coeff
|
| 124 |
+
if w != s:
|
| 125 |
+
# percolation weight
|
| 126 |
+
pw_s_w = states[s] / (p_sigma_x_t - states[w])
|
| 127 |
+
percolation[w] += delta[w] * pw_s_w
|
| 128 |
+
return percolation
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/second_order.py
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Copyright (c) 2015 – Thomson Licensing, SAS
|
| 2 |
+
|
| 3 |
+
Redistribution and use in source and binary forms, with or without
|
| 4 |
+
modification, are permitted (subject to the limitations in the
|
| 5 |
+
disclaimer below) provided that the following conditions are met:
|
| 6 |
+
|
| 7 |
+
* Redistributions of source code must retain the above copyright
|
| 8 |
+
notice, this list of conditions and the following disclaimer.
|
| 9 |
+
|
| 10 |
+
* Redistributions in binary form must reproduce the above copyright
|
| 11 |
+
notice, this list of conditions and the following disclaimer in the
|
| 12 |
+
documentation and/or other materials provided with the distribution.
|
| 13 |
+
|
| 14 |
+
* Neither the name of Thomson Licensing, or Technicolor, nor the names
|
| 15 |
+
of its contributors may be used to endorse or promote products derived
|
| 16 |
+
from this software without specific prior written permission.
|
| 17 |
+
|
| 18 |
+
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
|
| 19 |
+
GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
|
| 20 |
+
HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
|
| 21 |
+
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
| 22 |
+
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 23 |
+
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
| 24 |
+
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
| 25 |
+
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
| 26 |
+
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
| 27 |
+
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
| 28 |
+
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
| 29 |
+
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
|
| 30 |
+
IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
import networkx as nx
|
| 34 |
+
from networkx.utils import not_implemented_for
|
| 35 |
+
|
| 36 |
+
# Authors: Erwan Le Merrer (erwan.lemerrer@technicolor.com)
|
| 37 |
+
|
| 38 |
+
__all__ = ["second_order_centrality"]
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
@not_implemented_for("directed")
|
| 42 |
+
@nx._dispatch(edge_attrs="weight")
|
| 43 |
+
def second_order_centrality(G, weight="weight"):
|
| 44 |
+
"""Compute the second order centrality for nodes of G.
|
| 45 |
+
|
| 46 |
+
The second order centrality of a given node is the standard deviation of
|
| 47 |
+
the return times to that node of a perpetual random walk on G:
|
| 48 |
+
|
| 49 |
+
Parameters
|
| 50 |
+
----------
|
| 51 |
+
G : graph
|
| 52 |
+
A NetworkX connected and undirected graph.
|
| 53 |
+
|
| 54 |
+
weight : string or None, optional (default="weight")
|
| 55 |
+
The name of an edge attribute that holds the numerical value
|
| 56 |
+
used as a weight. If None then each edge has weight 1.
|
| 57 |
+
|
| 58 |
+
Returns
|
| 59 |
+
-------
|
| 60 |
+
nodes : dictionary
|
| 61 |
+
Dictionary keyed by node with second order centrality as the value.
|
| 62 |
+
|
| 63 |
+
Examples
|
| 64 |
+
--------
|
| 65 |
+
>>> G = nx.star_graph(10)
|
| 66 |
+
>>> soc = nx.second_order_centrality(G)
|
| 67 |
+
>>> print(sorted(soc.items(), key=lambda x: x[1])[0][0]) # pick first id
|
| 68 |
+
0
|
| 69 |
+
|
| 70 |
+
Raises
|
| 71 |
+
------
|
| 72 |
+
NetworkXException
|
| 73 |
+
If the graph G is empty, non connected or has negative weights.
|
| 74 |
+
|
| 75 |
+
See Also
|
| 76 |
+
--------
|
| 77 |
+
betweenness_centrality
|
| 78 |
+
|
| 79 |
+
Notes
|
| 80 |
+
-----
|
| 81 |
+
Lower values of second order centrality indicate higher centrality.
|
| 82 |
+
|
| 83 |
+
The algorithm is from Kermarrec, Le Merrer, Sericola and Trédan [1]_.
|
| 84 |
+
|
| 85 |
+
This code implements the analytical version of the algorithm, i.e.,
|
| 86 |
+
there is no simulation of a random walk process involved. The random walk
|
| 87 |
+
is here unbiased (corresponding to eq 6 of the paper [1]_), thus the
|
| 88 |
+
centrality values are the standard deviations for random walk return times
|
| 89 |
+
on the transformed input graph G (equal in-degree at each nodes by adding
|
| 90 |
+
self-loops).
|
| 91 |
+
|
| 92 |
+
Complexity of this implementation, made to run locally on a single machine,
|
| 93 |
+
is O(n^3), with n the size of G, which makes it viable only for small
|
| 94 |
+
graphs.
|
| 95 |
+
|
| 96 |
+
References
|
| 97 |
+
----------
|
| 98 |
+
.. [1] Anne-Marie Kermarrec, Erwan Le Merrer, Bruno Sericola, Gilles Trédan
|
| 99 |
+
"Second order centrality: Distributed assessment of nodes criticity in
|
| 100 |
+
complex networks", Elsevier Computer Communications 34(5):619-628, 2011.
|
| 101 |
+
"""
|
| 102 |
+
import numpy as np
|
| 103 |
+
|
| 104 |
+
n = len(G)
|
| 105 |
+
|
| 106 |
+
if n == 0:
|
| 107 |
+
raise nx.NetworkXException("Empty graph.")
|
| 108 |
+
if not nx.is_connected(G):
|
| 109 |
+
raise nx.NetworkXException("Non connected graph.")
|
| 110 |
+
if any(d.get(weight, 0) < 0 for u, v, d in G.edges(data=True)):
|
| 111 |
+
raise nx.NetworkXException("Graph has negative edge weights.")
|
| 112 |
+
|
| 113 |
+
# balancing G for Metropolis-Hastings random walks
|
| 114 |
+
G = nx.DiGraph(G)
|
| 115 |
+
in_deg = dict(G.in_degree(weight=weight))
|
| 116 |
+
d_max = max(in_deg.values())
|
| 117 |
+
for i, deg in in_deg.items():
|
| 118 |
+
if deg < d_max:
|
| 119 |
+
G.add_edge(i, i, weight=d_max - deg)
|
| 120 |
+
|
| 121 |
+
P = nx.to_numpy_array(G)
|
| 122 |
+
P /= P.sum(axis=1)[:, np.newaxis] # to transition probability matrix
|
| 123 |
+
|
| 124 |
+
def _Qj(P, j):
|
| 125 |
+
P = P.copy()
|
| 126 |
+
P[:, j] = 0
|
| 127 |
+
return P
|
| 128 |
+
|
| 129 |
+
M = np.empty([n, n])
|
| 130 |
+
|
| 131 |
+
for i in range(n):
|
| 132 |
+
M[:, i] = np.linalg.solve(
|
| 133 |
+
np.identity(n) - _Qj(P, i), np.ones([n, 1])[:, 0]
|
| 134 |
+
) # eq 3
|
| 135 |
+
|
| 136 |
+
return dict(
|
| 137 |
+
zip(G.nodes, [np.sqrt(2 * np.sum(M[:, i]) - n * (n + 1)) for i in range(n)])
|
| 138 |
+
) # eq 6
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/subgraph_alg.py
ADDED
|
@@ -0,0 +1,340 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Subraph centrality and communicability betweenness.
|
| 3 |
+
"""
|
| 4 |
+
import networkx as nx
|
| 5 |
+
from networkx.utils import not_implemented_for
|
| 6 |
+
|
| 7 |
+
__all__ = [
|
| 8 |
+
"subgraph_centrality_exp",
|
| 9 |
+
"subgraph_centrality",
|
| 10 |
+
"communicability_betweenness_centrality",
|
| 11 |
+
"estrada_index",
|
| 12 |
+
]
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@not_implemented_for("directed")
|
| 16 |
+
@not_implemented_for("multigraph")
|
| 17 |
+
@nx._dispatch
|
| 18 |
+
def subgraph_centrality_exp(G):
|
| 19 |
+
r"""Returns the subgraph centrality for each node of G.
|
| 20 |
+
|
| 21 |
+
Subgraph centrality of a node `n` is the sum of weighted closed
|
| 22 |
+
walks of all lengths starting and ending at node `n`. The weights
|
| 23 |
+
decrease with path length. Each closed walk is associated with a
|
| 24 |
+
connected subgraph ([1]_).
|
| 25 |
+
|
| 26 |
+
Parameters
|
| 27 |
+
----------
|
| 28 |
+
G: graph
|
| 29 |
+
|
| 30 |
+
Returns
|
| 31 |
+
-------
|
| 32 |
+
nodes:dictionary
|
| 33 |
+
Dictionary of nodes with subgraph centrality as the value.
|
| 34 |
+
|
| 35 |
+
Raises
|
| 36 |
+
------
|
| 37 |
+
NetworkXError
|
| 38 |
+
If the graph is not undirected and simple.
|
| 39 |
+
|
| 40 |
+
See Also
|
| 41 |
+
--------
|
| 42 |
+
subgraph_centrality:
|
| 43 |
+
Alternative algorithm of the subgraph centrality for each node of G.
|
| 44 |
+
|
| 45 |
+
Notes
|
| 46 |
+
-----
|
| 47 |
+
This version of the algorithm exponentiates the adjacency matrix.
|
| 48 |
+
|
| 49 |
+
The subgraph centrality of a node `u` in G can be found using
|
| 50 |
+
the matrix exponential of the adjacency matrix of G [1]_,
|
| 51 |
+
|
| 52 |
+
.. math::
|
| 53 |
+
|
| 54 |
+
SC(u)=(e^A)_{uu} .
|
| 55 |
+
|
| 56 |
+
References
|
| 57 |
+
----------
|
| 58 |
+
.. [1] Ernesto Estrada, Juan A. Rodriguez-Velazquez,
|
| 59 |
+
"Subgraph centrality in complex networks",
|
| 60 |
+
Physical Review E 71, 056103 (2005).
|
| 61 |
+
https://arxiv.org/abs/cond-mat/0504730
|
| 62 |
+
|
| 63 |
+
Examples
|
| 64 |
+
--------
|
| 65 |
+
(Example from [1]_)
|
| 66 |
+
>>> G = nx.Graph(
|
| 67 |
+
... [
|
| 68 |
+
... (1, 2),
|
| 69 |
+
... (1, 5),
|
| 70 |
+
... (1, 8),
|
| 71 |
+
... (2, 3),
|
| 72 |
+
... (2, 8),
|
| 73 |
+
... (3, 4),
|
| 74 |
+
... (3, 6),
|
| 75 |
+
... (4, 5),
|
| 76 |
+
... (4, 7),
|
| 77 |
+
... (5, 6),
|
| 78 |
+
... (6, 7),
|
| 79 |
+
... (7, 8),
|
| 80 |
+
... ]
|
| 81 |
+
... )
|
| 82 |
+
>>> sc = nx.subgraph_centrality_exp(G)
|
| 83 |
+
>>> print([f"{node} {sc[node]:0.2f}" for node in sorted(sc)])
|
| 84 |
+
['1 3.90', '2 3.90', '3 3.64', '4 3.71', '5 3.64', '6 3.71', '7 3.64', '8 3.90']
|
| 85 |
+
"""
|
| 86 |
+
# alternative implementation that calculates the matrix exponential
|
| 87 |
+
import scipy as sp
|
| 88 |
+
|
| 89 |
+
nodelist = list(G) # ordering of nodes in matrix
|
| 90 |
+
A = nx.to_numpy_array(G, nodelist)
|
| 91 |
+
# convert to 0-1 matrix
|
| 92 |
+
A[A != 0.0] = 1
|
| 93 |
+
expA = sp.linalg.expm(A)
|
| 94 |
+
# convert diagonal to dictionary keyed by node
|
| 95 |
+
sc = dict(zip(nodelist, map(float, expA.diagonal())))
|
| 96 |
+
return sc
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
@not_implemented_for("directed")
|
| 100 |
+
@not_implemented_for("multigraph")
|
| 101 |
+
@nx._dispatch
|
| 102 |
+
def subgraph_centrality(G):
|
| 103 |
+
r"""Returns subgraph centrality for each node in G.
|
| 104 |
+
|
| 105 |
+
Subgraph centrality of a node `n` is the sum of weighted closed
|
| 106 |
+
walks of all lengths starting and ending at node `n`. The weights
|
| 107 |
+
decrease with path length. Each closed walk is associated with a
|
| 108 |
+
connected subgraph ([1]_).
|
| 109 |
+
|
| 110 |
+
Parameters
|
| 111 |
+
----------
|
| 112 |
+
G: graph
|
| 113 |
+
|
| 114 |
+
Returns
|
| 115 |
+
-------
|
| 116 |
+
nodes : dictionary
|
| 117 |
+
Dictionary of nodes with subgraph centrality as the value.
|
| 118 |
+
|
| 119 |
+
Raises
|
| 120 |
+
------
|
| 121 |
+
NetworkXError
|
| 122 |
+
If the graph is not undirected and simple.
|
| 123 |
+
|
| 124 |
+
See Also
|
| 125 |
+
--------
|
| 126 |
+
subgraph_centrality_exp:
|
| 127 |
+
Alternative algorithm of the subgraph centrality for each node of G.
|
| 128 |
+
|
| 129 |
+
Notes
|
| 130 |
+
-----
|
| 131 |
+
This version of the algorithm computes eigenvalues and eigenvectors
|
| 132 |
+
of the adjacency matrix.
|
| 133 |
+
|
| 134 |
+
Subgraph centrality of a node `u` in G can be found using
|
| 135 |
+
a spectral decomposition of the adjacency matrix [1]_,
|
| 136 |
+
|
| 137 |
+
.. math::
|
| 138 |
+
|
| 139 |
+
SC(u)=\sum_{j=1}^{N}(v_{j}^{u})^2 e^{\lambda_{j}},
|
| 140 |
+
|
| 141 |
+
where `v_j` is an eigenvector of the adjacency matrix `A` of G
|
| 142 |
+
corresponding to the eigenvalue `\lambda_j`.
|
| 143 |
+
|
| 144 |
+
Examples
|
| 145 |
+
--------
|
| 146 |
+
(Example from [1]_)
|
| 147 |
+
>>> G = nx.Graph(
|
| 148 |
+
... [
|
| 149 |
+
... (1, 2),
|
| 150 |
+
... (1, 5),
|
| 151 |
+
... (1, 8),
|
| 152 |
+
... (2, 3),
|
| 153 |
+
... (2, 8),
|
| 154 |
+
... (3, 4),
|
| 155 |
+
... (3, 6),
|
| 156 |
+
... (4, 5),
|
| 157 |
+
... (4, 7),
|
| 158 |
+
... (5, 6),
|
| 159 |
+
... (6, 7),
|
| 160 |
+
... (7, 8),
|
| 161 |
+
... ]
|
| 162 |
+
... )
|
| 163 |
+
>>> sc = nx.subgraph_centrality(G)
|
| 164 |
+
>>> print([f"{node} {sc[node]:0.2f}" for node in sorted(sc)])
|
| 165 |
+
['1 3.90', '2 3.90', '3 3.64', '4 3.71', '5 3.64', '6 3.71', '7 3.64', '8 3.90']
|
| 166 |
+
|
| 167 |
+
References
|
| 168 |
+
----------
|
| 169 |
+
.. [1] Ernesto Estrada, Juan A. Rodriguez-Velazquez,
|
| 170 |
+
"Subgraph centrality in complex networks",
|
| 171 |
+
Physical Review E 71, 056103 (2005).
|
| 172 |
+
https://arxiv.org/abs/cond-mat/0504730
|
| 173 |
+
|
| 174 |
+
"""
|
| 175 |
+
import numpy as np
|
| 176 |
+
|
| 177 |
+
nodelist = list(G) # ordering of nodes in matrix
|
| 178 |
+
A = nx.to_numpy_array(G, nodelist)
|
| 179 |
+
# convert to 0-1 matrix
|
| 180 |
+
A[np.nonzero(A)] = 1
|
| 181 |
+
w, v = np.linalg.eigh(A)
|
| 182 |
+
vsquare = np.array(v) ** 2
|
| 183 |
+
expw = np.exp(w)
|
| 184 |
+
xg = vsquare @ expw
|
| 185 |
+
# convert vector dictionary keyed by node
|
| 186 |
+
sc = dict(zip(nodelist, map(float, xg)))
|
| 187 |
+
return sc
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
@not_implemented_for("directed")
|
| 191 |
+
@not_implemented_for("multigraph")
|
| 192 |
+
@nx._dispatch
|
| 193 |
+
def communicability_betweenness_centrality(G):
|
| 194 |
+
r"""Returns subgraph communicability for all pairs of nodes in G.
|
| 195 |
+
|
| 196 |
+
Communicability betweenness measure makes use of the number of walks
|
| 197 |
+
connecting every pair of nodes as the basis of a betweenness centrality
|
| 198 |
+
measure.
|
| 199 |
+
|
| 200 |
+
Parameters
|
| 201 |
+
----------
|
| 202 |
+
G: graph
|
| 203 |
+
|
| 204 |
+
Returns
|
| 205 |
+
-------
|
| 206 |
+
nodes : dictionary
|
| 207 |
+
Dictionary of nodes with communicability betweenness as the value.
|
| 208 |
+
|
| 209 |
+
Raises
|
| 210 |
+
------
|
| 211 |
+
NetworkXError
|
| 212 |
+
If the graph is not undirected and simple.
|
| 213 |
+
|
| 214 |
+
Notes
|
| 215 |
+
-----
|
| 216 |
+
Let `G=(V,E)` be a simple undirected graph with `n` nodes and `m` edges,
|
| 217 |
+
and `A` denote the adjacency matrix of `G`.
|
| 218 |
+
|
| 219 |
+
Let `G(r)=(V,E(r))` be the graph resulting from
|
| 220 |
+
removing all edges connected to node `r` but not the node itself.
|
| 221 |
+
|
| 222 |
+
The adjacency matrix for `G(r)` is `A+E(r)`, where `E(r)` has nonzeros
|
| 223 |
+
only in row and column `r`.
|
| 224 |
+
|
| 225 |
+
The subraph betweenness of a node `r` is [1]_
|
| 226 |
+
|
| 227 |
+
.. math::
|
| 228 |
+
|
| 229 |
+
\omega_{r} = \frac{1}{C}\sum_{p}\sum_{q}\frac{G_{prq}}{G_{pq}},
|
| 230 |
+
p\neq q, q\neq r,
|
| 231 |
+
|
| 232 |
+
where
|
| 233 |
+
`G_{prq}=(e^{A}_{pq} - (e^{A+E(r)})_{pq}` is the number of walks
|
| 234 |
+
involving node r,
|
| 235 |
+
`G_{pq}=(e^{A})_{pq}` is the number of closed walks starting
|
| 236 |
+
at node `p` and ending at node `q`,
|
| 237 |
+
and `C=(n-1)^{2}-(n-1)` is a normalization factor equal to the
|
| 238 |
+
number of terms in the sum.
|
| 239 |
+
|
| 240 |
+
The resulting `\omega_{r}` takes values between zero and one.
|
| 241 |
+
The lower bound cannot be attained for a connected
|
| 242 |
+
graph, and the upper bound is attained in the star graph.
|
| 243 |
+
|
| 244 |
+
References
|
| 245 |
+
----------
|
| 246 |
+
.. [1] Ernesto Estrada, Desmond J. Higham, Naomichi Hatano,
|
| 247 |
+
"Communicability Betweenness in Complex Networks"
|
| 248 |
+
Physica A 388 (2009) 764-774.
|
| 249 |
+
https://arxiv.org/abs/0905.4102
|
| 250 |
+
|
| 251 |
+
Examples
|
| 252 |
+
--------
|
| 253 |
+
>>> G = nx.Graph([(0, 1), (1, 2), (1, 5), (5, 4), (2, 4), (2, 3), (4, 3), (3, 6)])
|
| 254 |
+
>>> cbc = nx.communicability_betweenness_centrality(G)
|
| 255 |
+
>>> print([f"{node} {cbc[node]:0.2f}" for node in sorted(cbc)])
|
| 256 |
+
['0 0.03', '1 0.45', '2 0.51', '3 0.45', '4 0.40', '5 0.19', '6 0.03']
|
| 257 |
+
"""
|
| 258 |
+
import numpy as np
|
| 259 |
+
import scipy as sp
|
| 260 |
+
|
| 261 |
+
nodelist = list(G) # ordering of nodes in matrix
|
| 262 |
+
n = len(nodelist)
|
| 263 |
+
A = nx.to_numpy_array(G, nodelist)
|
| 264 |
+
# convert to 0-1 matrix
|
| 265 |
+
A[np.nonzero(A)] = 1
|
| 266 |
+
expA = sp.linalg.expm(A)
|
| 267 |
+
mapping = dict(zip(nodelist, range(n)))
|
| 268 |
+
cbc = {}
|
| 269 |
+
for v in G:
|
| 270 |
+
# remove row and col of node v
|
| 271 |
+
i = mapping[v]
|
| 272 |
+
row = A[i, :].copy()
|
| 273 |
+
col = A[:, i].copy()
|
| 274 |
+
A[i, :] = 0
|
| 275 |
+
A[:, i] = 0
|
| 276 |
+
B = (expA - sp.linalg.expm(A)) / expA
|
| 277 |
+
# sum with row/col of node v and diag set to zero
|
| 278 |
+
B[i, :] = 0
|
| 279 |
+
B[:, i] = 0
|
| 280 |
+
B -= np.diag(np.diag(B))
|
| 281 |
+
cbc[v] = B.sum()
|
| 282 |
+
# put row and col back
|
| 283 |
+
A[i, :] = row
|
| 284 |
+
A[:, i] = col
|
| 285 |
+
# rescale when more than two nodes
|
| 286 |
+
order = len(cbc)
|
| 287 |
+
if order > 2:
|
| 288 |
+
scale = 1.0 / ((order - 1.0) ** 2 - (order - 1.0))
|
| 289 |
+
for v in cbc:
|
| 290 |
+
cbc[v] *= scale
|
| 291 |
+
return cbc
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
@nx._dispatch
|
| 295 |
+
def estrada_index(G):
|
| 296 |
+
r"""Returns the Estrada index of a the graph G.
|
| 297 |
+
|
| 298 |
+
The Estrada Index is a topological index of folding or 3D "compactness" ([1]_).
|
| 299 |
+
|
| 300 |
+
Parameters
|
| 301 |
+
----------
|
| 302 |
+
G: graph
|
| 303 |
+
|
| 304 |
+
Returns
|
| 305 |
+
-------
|
| 306 |
+
estrada index: float
|
| 307 |
+
|
| 308 |
+
Raises
|
| 309 |
+
------
|
| 310 |
+
NetworkXError
|
| 311 |
+
If the graph is not undirected and simple.
|
| 312 |
+
|
| 313 |
+
Notes
|
| 314 |
+
-----
|
| 315 |
+
Let `G=(V,E)` be a simple undirected graph with `n` nodes and let
|
| 316 |
+
`\lambda_{1}\leq\lambda_{2}\leq\cdots\lambda_{n}`
|
| 317 |
+
be a non-increasing ordering of the eigenvalues of its adjacency
|
| 318 |
+
matrix `A`. The Estrada index is ([1]_, [2]_)
|
| 319 |
+
|
| 320 |
+
.. math::
|
| 321 |
+
EE(G)=\sum_{j=1}^n e^{\lambda _j}.
|
| 322 |
+
|
| 323 |
+
References
|
| 324 |
+
----------
|
| 325 |
+
.. [1] E. Estrada, "Characterization of 3D molecular structure",
|
| 326 |
+
Chem. Phys. Lett. 319, 713 (2000).
|
| 327 |
+
https://doi.org/10.1016/S0009-2614(00)00158-5
|
| 328 |
+
.. [2] José Antonio de la Peñaa, Ivan Gutman, Juan Rada,
|
| 329 |
+
"Estimating the Estrada index",
|
| 330 |
+
Linear Algebra and its Applications. 427, 1 (2007).
|
| 331 |
+
https://doi.org/10.1016/j.laa.2007.06.020
|
| 332 |
+
|
| 333 |
+
Examples
|
| 334 |
+
--------
|
| 335 |
+
>>> G = nx.Graph([(0, 1), (1, 2), (1, 5), (5, 4), (2, 4), (2, 3), (4, 3), (3, 6)])
|
| 336 |
+
>>> ei = nx.estrada_index(G)
|
| 337 |
+
>>> print(f"{ei:0.5}")
|
| 338 |
+
20.55
|
| 339 |
+
"""
|
| 340 |
+
return sum(subgraph_centrality(G).values())
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__init__.py
ADDED
|
File without changes
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_betweenness_centrality_subset.cpython-311.pyc
ADDED
|
Binary file (21.9 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality.py
ADDED
|
@@ -0,0 +1,780 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
import networkx as nx
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def weighted_G():
|
| 7 |
+
G = nx.Graph()
|
| 8 |
+
G.add_edge(0, 1, weight=3)
|
| 9 |
+
G.add_edge(0, 2, weight=2)
|
| 10 |
+
G.add_edge(0, 3, weight=6)
|
| 11 |
+
G.add_edge(0, 4, weight=4)
|
| 12 |
+
G.add_edge(1, 3, weight=5)
|
| 13 |
+
G.add_edge(1, 5, weight=5)
|
| 14 |
+
G.add_edge(2, 4, weight=1)
|
| 15 |
+
G.add_edge(3, 4, weight=2)
|
| 16 |
+
G.add_edge(3, 5, weight=1)
|
| 17 |
+
G.add_edge(4, 5, weight=4)
|
| 18 |
+
return G
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class TestBetweennessCentrality:
|
| 22 |
+
def test_K5(self):
|
| 23 |
+
"""Betweenness centrality: K5"""
|
| 24 |
+
G = nx.complete_graph(5)
|
| 25 |
+
b = nx.betweenness_centrality(G, weight=None, normalized=False)
|
| 26 |
+
b_answer = {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0}
|
| 27 |
+
for n in sorted(G):
|
| 28 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 29 |
+
|
| 30 |
+
def test_K5_endpoints(self):
|
| 31 |
+
"""Betweenness centrality: K5 endpoints"""
|
| 32 |
+
G = nx.complete_graph(5)
|
| 33 |
+
b = nx.betweenness_centrality(G, weight=None, normalized=False, endpoints=True)
|
| 34 |
+
b_answer = {0: 4.0, 1: 4.0, 2: 4.0, 3: 4.0, 4: 4.0}
|
| 35 |
+
for n in sorted(G):
|
| 36 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 37 |
+
# normalized = True case
|
| 38 |
+
b = nx.betweenness_centrality(G, weight=None, normalized=True, endpoints=True)
|
| 39 |
+
b_answer = {0: 0.4, 1: 0.4, 2: 0.4, 3: 0.4, 4: 0.4}
|
| 40 |
+
for n in sorted(G):
|
| 41 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 42 |
+
|
| 43 |
+
def test_P3_normalized(self):
|
| 44 |
+
"""Betweenness centrality: P3 normalized"""
|
| 45 |
+
G = nx.path_graph(3)
|
| 46 |
+
b = nx.betweenness_centrality(G, weight=None, normalized=True)
|
| 47 |
+
b_answer = {0: 0.0, 1: 1.0, 2: 0.0}
|
| 48 |
+
for n in sorted(G):
|
| 49 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 50 |
+
|
| 51 |
+
def test_P3(self):
|
| 52 |
+
"""Betweenness centrality: P3"""
|
| 53 |
+
G = nx.path_graph(3)
|
| 54 |
+
b_answer = {0: 0.0, 1: 1.0, 2: 0.0}
|
| 55 |
+
b = nx.betweenness_centrality(G, weight=None, normalized=False)
|
| 56 |
+
for n in sorted(G):
|
| 57 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 58 |
+
|
| 59 |
+
def test_sample_from_P3(self):
|
| 60 |
+
"""Betweenness centrality: P3 sample"""
|
| 61 |
+
G = nx.path_graph(3)
|
| 62 |
+
b_answer = {0: 0.0, 1: 1.0, 2: 0.0}
|
| 63 |
+
b = nx.betweenness_centrality(G, k=3, weight=None, normalized=False, seed=1)
|
| 64 |
+
for n in sorted(G):
|
| 65 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 66 |
+
b = nx.betweenness_centrality(G, k=2, weight=None, normalized=False, seed=1)
|
| 67 |
+
# python versions give different results with same seed
|
| 68 |
+
b_approx1 = {0: 0.0, 1: 1.5, 2: 0.0}
|
| 69 |
+
b_approx2 = {0: 0.0, 1: 0.75, 2: 0.0}
|
| 70 |
+
for n in sorted(G):
|
| 71 |
+
assert b[n] in (b_approx1[n], b_approx2[n])
|
| 72 |
+
|
| 73 |
+
def test_P3_endpoints(self):
|
| 74 |
+
"""Betweenness centrality: P3 endpoints"""
|
| 75 |
+
G = nx.path_graph(3)
|
| 76 |
+
b_answer = {0: 2.0, 1: 3.0, 2: 2.0}
|
| 77 |
+
b = nx.betweenness_centrality(G, weight=None, normalized=False, endpoints=True)
|
| 78 |
+
for n in sorted(G):
|
| 79 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 80 |
+
# normalized = True case
|
| 81 |
+
b_answer = {0: 2 / 3, 1: 1.0, 2: 2 / 3}
|
| 82 |
+
b = nx.betweenness_centrality(G, weight=None, normalized=True, endpoints=True)
|
| 83 |
+
for n in sorted(G):
|
| 84 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 85 |
+
|
| 86 |
+
def test_krackhardt_kite_graph(self):
|
| 87 |
+
"""Betweenness centrality: Krackhardt kite graph"""
|
| 88 |
+
G = nx.krackhardt_kite_graph()
|
| 89 |
+
b_answer = {
|
| 90 |
+
0: 1.667,
|
| 91 |
+
1: 1.667,
|
| 92 |
+
2: 0.000,
|
| 93 |
+
3: 7.333,
|
| 94 |
+
4: 0.000,
|
| 95 |
+
5: 16.667,
|
| 96 |
+
6: 16.667,
|
| 97 |
+
7: 28.000,
|
| 98 |
+
8: 16.000,
|
| 99 |
+
9: 0.000,
|
| 100 |
+
}
|
| 101 |
+
for b in b_answer:
|
| 102 |
+
b_answer[b] /= 2
|
| 103 |
+
b = nx.betweenness_centrality(G, weight=None, normalized=False)
|
| 104 |
+
for n in sorted(G):
|
| 105 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
|
| 106 |
+
|
| 107 |
+
def test_krackhardt_kite_graph_normalized(self):
|
| 108 |
+
"""Betweenness centrality: Krackhardt kite graph normalized"""
|
| 109 |
+
G = nx.krackhardt_kite_graph()
|
| 110 |
+
b_answer = {
|
| 111 |
+
0: 0.023,
|
| 112 |
+
1: 0.023,
|
| 113 |
+
2: 0.000,
|
| 114 |
+
3: 0.102,
|
| 115 |
+
4: 0.000,
|
| 116 |
+
5: 0.231,
|
| 117 |
+
6: 0.231,
|
| 118 |
+
7: 0.389,
|
| 119 |
+
8: 0.222,
|
| 120 |
+
9: 0.000,
|
| 121 |
+
}
|
| 122 |
+
b = nx.betweenness_centrality(G, weight=None, normalized=True)
|
| 123 |
+
for n in sorted(G):
|
| 124 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
|
| 125 |
+
|
| 126 |
+
def test_florentine_families_graph(self):
|
| 127 |
+
"""Betweenness centrality: Florentine families graph"""
|
| 128 |
+
G = nx.florentine_families_graph()
|
| 129 |
+
b_answer = {
|
| 130 |
+
"Acciaiuoli": 0.000,
|
| 131 |
+
"Albizzi": 0.212,
|
| 132 |
+
"Barbadori": 0.093,
|
| 133 |
+
"Bischeri": 0.104,
|
| 134 |
+
"Castellani": 0.055,
|
| 135 |
+
"Ginori": 0.000,
|
| 136 |
+
"Guadagni": 0.255,
|
| 137 |
+
"Lamberteschi": 0.000,
|
| 138 |
+
"Medici": 0.522,
|
| 139 |
+
"Pazzi": 0.000,
|
| 140 |
+
"Peruzzi": 0.022,
|
| 141 |
+
"Ridolfi": 0.114,
|
| 142 |
+
"Salviati": 0.143,
|
| 143 |
+
"Strozzi": 0.103,
|
| 144 |
+
"Tornabuoni": 0.092,
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
b = nx.betweenness_centrality(G, weight=None, normalized=True)
|
| 148 |
+
for n in sorted(G):
|
| 149 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
|
| 150 |
+
|
| 151 |
+
def test_les_miserables_graph(self):
|
| 152 |
+
"""Betweenness centrality: Les Miserables graph"""
|
| 153 |
+
G = nx.les_miserables_graph()
|
| 154 |
+
b_answer = {
|
| 155 |
+
"Napoleon": 0.000,
|
| 156 |
+
"Myriel": 0.177,
|
| 157 |
+
"MlleBaptistine": 0.000,
|
| 158 |
+
"MmeMagloire": 0.000,
|
| 159 |
+
"CountessDeLo": 0.000,
|
| 160 |
+
"Geborand": 0.000,
|
| 161 |
+
"Champtercier": 0.000,
|
| 162 |
+
"Cravatte": 0.000,
|
| 163 |
+
"Count": 0.000,
|
| 164 |
+
"OldMan": 0.000,
|
| 165 |
+
"Valjean": 0.570,
|
| 166 |
+
"Labarre": 0.000,
|
| 167 |
+
"Marguerite": 0.000,
|
| 168 |
+
"MmeDeR": 0.000,
|
| 169 |
+
"Isabeau": 0.000,
|
| 170 |
+
"Gervais": 0.000,
|
| 171 |
+
"Listolier": 0.000,
|
| 172 |
+
"Tholomyes": 0.041,
|
| 173 |
+
"Fameuil": 0.000,
|
| 174 |
+
"Blacheville": 0.000,
|
| 175 |
+
"Favourite": 0.000,
|
| 176 |
+
"Dahlia": 0.000,
|
| 177 |
+
"Zephine": 0.000,
|
| 178 |
+
"Fantine": 0.130,
|
| 179 |
+
"MmeThenardier": 0.029,
|
| 180 |
+
"Thenardier": 0.075,
|
| 181 |
+
"Cosette": 0.024,
|
| 182 |
+
"Javert": 0.054,
|
| 183 |
+
"Fauchelevent": 0.026,
|
| 184 |
+
"Bamatabois": 0.008,
|
| 185 |
+
"Perpetue": 0.000,
|
| 186 |
+
"Simplice": 0.009,
|
| 187 |
+
"Scaufflaire": 0.000,
|
| 188 |
+
"Woman1": 0.000,
|
| 189 |
+
"Judge": 0.000,
|
| 190 |
+
"Champmathieu": 0.000,
|
| 191 |
+
"Brevet": 0.000,
|
| 192 |
+
"Chenildieu": 0.000,
|
| 193 |
+
"Cochepaille": 0.000,
|
| 194 |
+
"Pontmercy": 0.007,
|
| 195 |
+
"Boulatruelle": 0.000,
|
| 196 |
+
"Eponine": 0.011,
|
| 197 |
+
"Anzelma": 0.000,
|
| 198 |
+
"Woman2": 0.000,
|
| 199 |
+
"MotherInnocent": 0.000,
|
| 200 |
+
"Gribier": 0.000,
|
| 201 |
+
"MmeBurgon": 0.026,
|
| 202 |
+
"Jondrette": 0.000,
|
| 203 |
+
"Gavroche": 0.165,
|
| 204 |
+
"Gillenormand": 0.020,
|
| 205 |
+
"Magnon": 0.000,
|
| 206 |
+
"MlleGillenormand": 0.048,
|
| 207 |
+
"MmePontmercy": 0.000,
|
| 208 |
+
"MlleVaubois": 0.000,
|
| 209 |
+
"LtGillenormand": 0.000,
|
| 210 |
+
"Marius": 0.132,
|
| 211 |
+
"BaronessT": 0.000,
|
| 212 |
+
"Mabeuf": 0.028,
|
| 213 |
+
"Enjolras": 0.043,
|
| 214 |
+
"Combeferre": 0.001,
|
| 215 |
+
"Prouvaire": 0.000,
|
| 216 |
+
"Feuilly": 0.001,
|
| 217 |
+
"Courfeyrac": 0.005,
|
| 218 |
+
"Bahorel": 0.002,
|
| 219 |
+
"Bossuet": 0.031,
|
| 220 |
+
"Joly": 0.002,
|
| 221 |
+
"Grantaire": 0.000,
|
| 222 |
+
"MotherPlutarch": 0.000,
|
| 223 |
+
"Gueulemer": 0.005,
|
| 224 |
+
"Babet": 0.005,
|
| 225 |
+
"Claquesous": 0.005,
|
| 226 |
+
"Montparnasse": 0.004,
|
| 227 |
+
"Toussaint": 0.000,
|
| 228 |
+
"Child1": 0.000,
|
| 229 |
+
"Child2": 0.000,
|
| 230 |
+
"Brujon": 0.000,
|
| 231 |
+
"MmeHucheloup": 0.000,
|
| 232 |
+
}
|
| 233 |
+
|
| 234 |
+
b = nx.betweenness_centrality(G, weight=None, normalized=True)
|
| 235 |
+
for n in sorted(G):
|
| 236 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
|
| 237 |
+
|
| 238 |
+
def test_ladder_graph(self):
|
| 239 |
+
"""Betweenness centrality: Ladder graph"""
|
| 240 |
+
G = nx.Graph() # ladder_graph(3)
|
| 241 |
+
G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)])
|
| 242 |
+
b_answer = {0: 1.667, 1: 1.667, 2: 6.667, 3: 6.667, 4: 1.667, 5: 1.667}
|
| 243 |
+
for b in b_answer:
|
| 244 |
+
b_answer[b] /= 2
|
| 245 |
+
b = nx.betweenness_centrality(G, weight=None, normalized=False)
|
| 246 |
+
for n in sorted(G):
|
| 247 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
|
| 248 |
+
|
| 249 |
+
def test_disconnected_path(self):
|
| 250 |
+
"""Betweenness centrality: disconnected path"""
|
| 251 |
+
G = nx.Graph()
|
| 252 |
+
nx.add_path(G, [0, 1, 2])
|
| 253 |
+
nx.add_path(G, [3, 4, 5, 6])
|
| 254 |
+
b_answer = {0: 0, 1: 1, 2: 0, 3: 0, 4: 2, 5: 2, 6: 0}
|
| 255 |
+
b = nx.betweenness_centrality(G, weight=None, normalized=False)
|
| 256 |
+
for n in sorted(G):
|
| 257 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 258 |
+
|
| 259 |
+
def test_disconnected_path_endpoints(self):
|
| 260 |
+
"""Betweenness centrality: disconnected path endpoints"""
|
| 261 |
+
G = nx.Graph()
|
| 262 |
+
nx.add_path(G, [0, 1, 2])
|
| 263 |
+
nx.add_path(G, [3, 4, 5, 6])
|
| 264 |
+
b_answer = {0: 2, 1: 3, 2: 2, 3: 3, 4: 5, 5: 5, 6: 3}
|
| 265 |
+
b = nx.betweenness_centrality(G, weight=None, normalized=False, endpoints=True)
|
| 266 |
+
for n in sorted(G):
|
| 267 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 268 |
+
# normalized = True case
|
| 269 |
+
b = nx.betweenness_centrality(G, weight=None, normalized=True, endpoints=True)
|
| 270 |
+
for n in sorted(G):
|
| 271 |
+
assert b[n] == pytest.approx(b_answer[n] / 21, abs=1e-7)
|
| 272 |
+
|
| 273 |
+
def test_directed_path(self):
|
| 274 |
+
"""Betweenness centrality: directed path"""
|
| 275 |
+
G = nx.DiGraph()
|
| 276 |
+
nx.add_path(G, [0, 1, 2])
|
| 277 |
+
b = nx.betweenness_centrality(G, weight=None, normalized=False)
|
| 278 |
+
b_answer = {0: 0.0, 1: 1.0, 2: 0.0}
|
| 279 |
+
for n in sorted(G):
|
| 280 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 281 |
+
|
| 282 |
+
def test_directed_path_normalized(self):
|
| 283 |
+
"""Betweenness centrality: directed path normalized"""
|
| 284 |
+
G = nx.DiGraph()
|
| 285 |
+
nx.add_path(G, [0, 1, 2])
|
| 286 |
+
b = nx.betweenness_centrality(G, weight=None, normalized=True)
|
| 287 |
+
b_answer = {0: 0.0, 1: 0.5, 2: 0.0}
|
| 288 |
+
for n in sorted(G):
|
| 289 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
class TestWeightedBetweennessCentrality:
|
| 293 |
+
def test_K5(self):
|
| 294 |
+
"""Weighted betweenness centrality: K5"""
|
| 295 |
+
G = nx.complete_graph(5)
|
| 296 |
+
b = nx.betweenness_centrality(G, weight="weight", normalized=False)
|
| 297 |
+
b_answer = {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0}
|
| 298 |
+
for n in sorted(G):
|
| 299 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 300 |
+
|
| 301 |
+
def test_P3_normalized(self):
|
| 302 |
+
"""Weighted betweenness centrality: P3 normalized"""
|
| 303 |
+
G = nx.path_graph(3)
|
| 304 |
+
b = nx.betweenness_centrality(G, weight="weight", normalized=True)
|
| 305 |
+
b_answer = {0: 0.0, 1: 1.0, 2: 0.0}
|
| 306 |
+
for n in sorted(G):
|
| 307 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 308 |
+
|
| 309 |
+
def test_P3(self):
|
| 310 |
+
"""Weighted betweenness centrality: P3"""
|
| 311 |
+
G = nx.path_graph(3)
|
| 312 |
+
b_answer = {0: 0.0, 1: 1.0, 2: 0.0}
|
| 313 |
+
b = nx.betweenness_centrality(G, weight="weight", normalized=False)
|
| 314 |
+
for n in sorted(G):
|
| 315 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 316 |
+
|
| 317 |
+
def test_krackhardt_kite_graph(self):
|
| 318 |
+
"""Weighted betweenness centrality: Krackhardt kite graph"""
|
| 319 |
+
G = nx.krackhardt_kite_graph()
|
| 320 |
+
b_answer = {
|
| 321 |
+
0: 1.667,
|
| 322 |
+
1: 1.667,
|
| 323 |
+
2: 0.000,
|
| 324 |
+
3: 7.333,
|
| 325 |
+
4: 0.000,
|
| 326 |
+
5: 16.667,
|
| 327 |
+
6: 16.667,
|
| 328 |
+
7: 28.000,
|
| 329 |
+
8: 16.000,
|
| 330 |
+
9: 0.000,
|
| 331 |
+
}
|
| 332 |
+
for b in b_answer:
|
| 333 |
+
b_answer[b] /= 2
|
| 334 |
+
|
| 335 |
+
b = nx.betweenness_centrality(G, weight="weight", normalized=False)
|
| 336 |
+
|
| 337 |
+
for n in sorted(G):
|
| 338 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
|
| 339 |
+
|
| 340 |
+
def test_krackhardt_kite_graph_normalized(self):
|
| 341 |
+
"""Weighted betweenness centrality:
|
| 342 |
+
Krackhardt kite graph normalized
|
| 343 |
+
"""
|
| 344 |
+
G = nx.krackhardt_kite_graph()
|
| 345 |
+
b_answer = {
|
| 346 |
+
0: 0.023,
|
| 347 |
+
1: 0.023,
|
| 348 |
+
2: 0.000,
|
| 349 |
+
3: 0.102,
|
| 350 |
+
4: 0.000,
|
| 351 |
+
5: 0.231,
|
| 352 |
+
6: 0.231,
|
| 353 |
+
7: 0.389,
|
| 354 |
+
8: 0.222,
|
| 355 |
+
9: 0.000,
|
| 356 |
+
}
|
| 357 |
+
b = nx.betweenness_centrality(G, weight="weight", normalized=True)
|
| 358 |
+
|
| 359 |
+
for n in sorted(G):
|
| 360 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
|
| 361 |
+
|
| 362 |
+
def test_florentine_families_graph(self):
|
| 363 |
+
"""Weighted betweenness centrality:
|
| 364 |
+
Florentine families graph"""
|
| 365 |
+
G = nx.florentine_families_graph()
|
| 366 |
+
b_answer = {
|
| 367 |
+
"Acciaiuoli": 0.000,
|
| 368 |
+
"Albizzi": 0.212,
|
| 369 |
+
"Barbadori": 0.093,
|
| 370 |
+
"Bischeri": 0.104,
|
| 371 |
+
"Castellani": 0.055,
|
| 372 |
+
"Ginori": 0.000,
|
| 373 |
+
"Guadagni": 0.255,
|
| 374 |
+
"Lamberteschi": 0.000,
|
| 375 |
+
"Medici": 0.522,
|
| 376 |
+
"Pazzi": 0.000,
|
| 377 |
+
"Peruzzi": 0.022,
|
| 378 |
+
"Ridolfi": 0.114,
|
| 379 |
+
"Salviati": 0.143,
|
| 380 |
+
"Strozzi": 0.103,
|
| 381 |
+
"Tornabuoni": 0.092,
|
| 382 |
+
}
|
| 383 |
+
|
| 384 |
+
b = nx.betweenness_centrality(G, weight="weight", normalized=True)
|
| 385 |
+
for n in sorted(G):
|
| 386 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
|
| 387 |
+
|
| 388 |
+
def test_les_miserables_graph(self):
|
| 389 |
+
"""Weighted betweenness centrality: Les Miserables graph"""
|
| 390 |
+
G = nx.les_miserables_graph()
|
| 391 |
+
b_answer = {
|
| 392 |
+
"Napoleon": 0.000,
|
| 393 |
+
"Myriel": 0.177,
|
| 394 |
+
"MlleBaptistine": 0.000,
|
| 395 |
+
"MmeMagloire": 0.000,
|
| 396 |
+
"CountessDeLo": 0.000,
|
| 397 |
+
"Geborand": 0.000,
|
| 398 |
+
"Champtercier": 0.000,
|
| 399 |
+
"Cravatte": 0.000,
|
| 400 |
+
"Count": 0.000,
|
| 401 |
+
"OldMan": 0.000,
|
| 402 |
+
"Valjean": 0.454,
|
| 403 |
+
"Labarre": 0.000,
|
| 404 |
+
"Marguerite": 0.009,
|
| 405 |
+
"MmeDeR": 0.000,
|
| 406 |
+
"Isabeau": 0.000,
|
| 407 |
+
"Gervais": 0.000,
|
| 408 |
+
"Listolier": 0.000,
|
| 409 |
+
"Tholomyes": 0.066,
|
| 410 |
+
"Fameuil": 0.000,
|
| 411 |
+
"Blacheville": 0.000,
|
| 412 |
+
"Favourite": 0.000,
|
| 413 |
+
"Dahlia": 0.000,
|
| 414 |
+
"Zephine": 0.000,
|
| 415 |
+
"Fantine": 0.114,
|
| 416 |
+
"MmeThenardier": 0.046,
|
| 417 |
+
"Thenardier": 0.129,
|
| 418 |
+
"Cosette": 0.075,
|
| 419 |
+
"Javert": 0.193,
|
| 420 |
+
"Fauchelevent": 0.026,
|
| 421 |
+
"Bamatabois": 0.080,
|
| 422 |
+
"Perpetue": 0.000,
|
| 423 |
+
"Simplice": 0.001,
|
| 424 |
+
"Scaufflaire": 0.000,
|
| 425 |
+
"Woman1": 0.000,
|
| 426 |
+
"Judge": 0.000,
|
| 427 |
+
"Champmathieu": 0.000,
|
| 428 |
+
"Brevet": 0.000,
|
| 429 |
+
"Chenildieu": 0.000,
|
| 430 |
+
"Cochepaille": 0.000,
|
| 431 |
+
"Pontmercy": 0.023,
|
| 432 |
+
"Boulatruelle": 0.000,
|
| 433 |
+
"Eponine": 0.023,
|
| 434 |
+
"Anzelma": 0.000,
|
| 435 |
+
"Woman2": 0.000,
|
| 436 |
+
"MotherInnocent": 0.000,
|
| 437 |
+
"Gribier": 0.000,
|
| 438 |
+
"MmeBurgon": 0.026,
|
| 439 |
+
"Jondrette": 0.000,
|
| 440 |
+
"Gavroche": 0.285,
|
| 441 |
+
"Gillenormand": 0.024,
|
| 442 |
+
"Magnon": 0.005,
|
| 443 |
+
"MlleGillenormand": 0.036,
|
| 444 |
+
"MmePontmercy": 0.005,
|
| 445 |
+
"MlleVaubois": 0.000,
|
| 446 |
+
"LtGillenormand": 0.015,
|
| 447 |
+
"Marius": 0.072,
|
| 448 |
+
"BaronessT": 0.004,
|
| 449 |
+
"Mabeuf": 0.089,
|
| 450 |
+
"Enjolras": 0.003,
|
| 451 |
+
"Combeferre": 0.000,
|
| 452 |
+
"Prouvaire": 0.000,
|
| 453 |
+
"Feuilly": 0.004,
|
| 454 |
+
"Courfeyrac": 0.001,
|
| 455 |
+
"Bahorel": 0.007,
|
| 456 |
+
"Bossuet": 0.028,
|
| 457 |
+
"Joly": 0.000,
|
| 458 |
+
"Grantaire": 0.036,
|
| 459 |
+
"MotherPlutarch": 0.000,
|
| 460 |
+
"Gueulemer": 0.025,
|
| 461 |
+
"Babet": 0.015,
|
| 462 |
+
"Claquesous": 0.042,
|
| 463 |
+
"Montparnasse": 0.050,
|
| 464 |
+
"Toussaint": 0.011,
|
| 465 |
+
"Child1": 0.000,
|
| 466 |
+
"Child2": 0.000,
|
| 467 |
+
"Brujon": 0.002,
|
| 468 |
+
"MmeHucheloup": 0.034,
|
| 469 |
+
}
|
| 470 |
+
|
| 471 |
+
b = nx.betweenness_centrality(G, weight="weight", normalized=True)
|
| 472 |
+
for n in sorted(G):
|
| 473 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
|
| 474 |
+
|
| 475 |
+
def test_ladder_graph(self):
|
| 476 |
+
"""Weighted betweenness centrality: Ladder graph"""
|
| 477 |
+
G = nx.Graph() # ladder_graph(3)
|
| 478 |
+
G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)])
|
| 479 |
+
b_answer = {0: 1.667, 1: 1.667, 2: 6.667, 3: 6.667, 4: 1.667, 5: 1.667}
|
| 480 |
+
for b in b_answer:
|
| 481 |
+
b_answer[b] /= 2
|
| 482 |
+
b = nx.betweenness_centrality(G, weight="weight", normalized=False)
|
| 483 |
+
for n in sorted(G):
|
| 484 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
|
| 485 |
+
|
| 486 |
+
def test_G(self):
|
| 487 |
+
"""Weighted betweenness centrality: G"""
|
| 488 |
+
G = weighted_G()
|
| 489 |
+
b_answer = {0: 2.0, 1: 0.0, 2: 4.0, 3: 3.0, 4: 4.0, 5: 0.0}
|
| 490 |
+
b = nx.betweenness_centrality(G, weight="weight", normalized=False)
|
| 491 |
+
for n in sorted(G):
|
| 492 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 493 |
+
|
| 494 |
+
def test_G2(self):
|
| 495 |
+
"""Weighted betweenness centrality: G2"""
|
| 496 |
+
G = nx.DiGraph()
|
| 497 |
+
G.add_weighted_edges_from(
|
| 498 |
+
[
|
| 499 |
+
("s", "u", 10),
|
| 500 |
+
("s", "x", 5),
|
| 501 |
+
("u", "v", 1),
|
| 502 |
+
("u", "x", 2),
|
| 503 |
+
("v", "y", 1),
|
| 504 |
+
("x", "u", 3),
|
| 505 |
+
("x", "v", 5),
|
| 506 |
+
("x", "y", 2),
|
| 507 |
+
("y", "s", 7),
|
| 508 |
+
("y", "v", 6),
|
| 509 |
+
]
|
| 510 |
+
)
|
| 511 |
+
|
| 512 |
+
b_answer = {"y": 5.0, "x": 5.0, "s": 4.0, "u": 2.0, "v": 2.0}
|
| 513 |
+
|
| 514 |
+
b = nx.betweenness_centrality(G, weight="weight", normalized=False)
|
| 515 |
+
for n in sorted(G):
|
| 516 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 517 |
+
|
| 518 |
+
def test_G3(self):
|
| 519 |
+
"""Weighted betweenness centrality: G3"""
|
| 520 |
+
G = nx.MultiGraph(weighted_G())
|
| 521 |
+
es = list(G.edges(data=True))[::2] # duplicate every other edge
|
| 522 |
+
G.add_edges_from(es)
|
| 523 |
+
b_answer = {0: 2.0, 1: 0.0, 2: 4.0, 3: 3.0, 4: 4.0, 5: 0.0}
|
| 524 |
+
b = nx.betweenness_centrality(G, weight="weight", normalized=False)
|
| 525 |
+
for n in sorted(G):
|
| 526 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 527 |
+
|
| 528 |
+
def test_G4(self):
|
| 529 |
+
"""Weighted betweenness centrality: G4"""
|
| 530 |
+
G = nx.MultiDiGraph()
|
| 531 |
+
G.add_weighted_edges_from(
|
| 532 |
+
[
|
| 533 |
+
("s", "u", 10),
|
| 534 |
+
("s", "x", 5),
|
| 535 |
+
("s", "x", 6),
|
| 536 |
+
("u", "v", 1),
|
| 537 |
+
("u", "x", 2),
|
| 538 |
+
("v", "y", 1),
|
| 539 |
+
("v", "y", 1),
|
| 540 |
+
("x", "u", 3),
|
| 541 |
+
("x", "v", 5),
|
| 542 |
+
("x", "y", 2),
|
| 543 |
+
("x", "y", 3),
|
| 544 |
+
("y", "s", 7),
|
| 545 |
+
("y", "v", 6),
|
| 546 |
+
("y", "v", 6),
|
| 547 |
+
]
|
| 548 |
+
)
|
| 549 |
+
|
| 550 |
+
b_answer = {"y": 5.0, "x": 5.0, "s": 4.0, "u": 2.0, "v": 2.0}
|
| 551 |
+
|
| 552 |
+
b = nx.betweenness_centrality(G, weight="weight", normalized=False)
|
| 553 |
+
for n in sorted(G):
|
| 554 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 555 |
+
|
| 556 |
+
|
| 557 |
+
class TestEdgeBetweennessCentrality:
|
| 558 |
+
def test_K5(self):
|
| 559 |
+
"""Edge betweenness centrality: K5"""
|
| 560 |
+
G = nx.complete_graph(5)
|
| 561 |
+
b = nx.edge_betweenness_centrality(G, weight=None, normalized=False)
|
| 562 |
+
b_answer = dict.fromkeys(G.edges(), 1)
|
| 563 |
+
for n in sorted(G.edges()):
|
| 564 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 565 |
+
|
| 566 |
+
def test_normalized_K5(self):
|
| 567 |
+
"""Edge betweenness centrality: K5"""
|
| 568 |
+
G = nx.complete_graph(5)
|
| 569 |
+
b = nx.edge_betweenness_centrality(G, weight=None, normalized=True)
|
| 570 |
+
b_answer = dict.fromkeys(G.edges(), 1 / 10)
|
| 571 |
+
for n in sorted(G.edges()):
|
| 572 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 573 |
+
|
| 574 |
+
def test_C4(self):
|
| 575 |
+
"""Edge betweenness centrality: C4"""
|
| 576 |
+
G = nx.cycle_graph(4)
|
| 577 |
+
b = nx.edge_betweenness_centrality(G, weight=None, normalized=True)
|
| 578 |
+
b_answer = {(0, 1): 2, (0, 3): 2, (1, 2): 2, (2, 3): 2}
|
| 579 |
+
for n in sorted(G.edges()):
|
| 580 |
+
assert b[n] == pytest.approx(b_answer[n] / 6, abs=1e-7)
|
| 581 |
+
|
| 582 |
+
def test_P4(self):
|
| 583 |
+
"""Edge betweenness centrality: P4"""
|
| 584 |
+
G = nx.path_graph(4)
|
| 585 |
+
b = nx.edge_betweenness_centrality(G, weight=None, normalized=False)
|
| 586 |
+
b_answer = {(0, 1): 3, (1, 2): 4, (2, 3): 3}
|
| 587 |
+
for n in sorted(G.edges()):
|
| 588 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 589 |
+
|
| 590 |
+
def test_normalized_P4(self):
|
| 591 |
+
"""Edge betweenness centrality: P4"""
|
| 592 |
+
G = nx.path_graph(4)
|
| 593 |
+
b = nx.edge_betweenness_centrality(G, weight=None, normalized=True)
|
| 594 |
+
b_answer = {(0, 1): 3, (1, 2): 4, (2, 3): 3}
|
| 595 |
+
for n in sorted(G.edges()):
|
| 596 |
+
assert b[n] == pytest.approx(b_answer[n] / 6, abs=1e-7)
|
| 597 |
+
|
| 598 |
+
def test_balanced_tree(self):
|
| 599 |
+
"""Edge betweenness centrality: balanced tree"""
|
| 600 |
+
G = nx.balanced_tree(r=2, h=2)
|
| 601 |
+
b = nx.edge_betweenness_centrality(G, weight=None, normalized=False)
|
| 602 |
+
b_answer = {(0, 1): 12, (0, 2): 12, (1, 3): 6, (1, 4): 6, (2, 5): 6, (2, 6): 6}
|
| 603 |
+
for n in sorted(G.edges()):
|
| 604 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 605 |
+
|
| 606 |
+
|
| 607 |
+
class TestWeightedEdgeBetweennessCentrality:
|
| 608 |
+
def test_K5(self):
|
| 609 |
+
"""Edge betweenness centrality: K5"""
|
| 610 |
+
G = nx.complete_graph(5)
|
| 611 |
+
b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False)
|
| 612 |
+
b_answer = dict.fromkeys(G.edges(), 1)
|
| 613 |
+
for n in sorted(G.edges()):
|
| 614 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 615 |
+
|
| 616 |
+
def test_C4(self):
|
| 617 |
+
"""Edge betweenness centrality: C4"""
|
| 618 |
+
G = nx.cycle_graph(4)
|
| 619 |
+
b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False)
|
| 620 |
+
b_answer = {(0, 1): 2, (0, 3): 2, (1, 2): 2, (2, 3): 2}
|
| 621 |
+
for n in sorted(G.edges()):
|
| 622 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 623 |
+
|
| 624 |
+
def test_P4(self):
|
| 625 |
+
"""Edge betweenness centrality: P4"""
|
| 626 |
+
G = nx.path_graph(4)
|
| 627 |
+
b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False)
|
| 628 |
+
b_answer = {(0, 1): 3, (1, 2): 4, (2, 3): 3}
|
| 629 |
+
for n in sorted(G.edges()):
|
| 630 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 631 |
+
|
| 632 |
+
def test_balanced_tree(self):
|
| 633 |
+
"""Edge betweenness centrality: balanced tree"""
|
| 634 |
+
G = nx.balanced_tree(r=2, h=2)
|
| 635 |
+
b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False)
|
| 636 |
+
b_answer = {(0, 1): 12, (0, 2): 12, (1, 3): 6, (1, 4): 6, (2, 5): 6, (2, 6): 6}
|
| 637 |
+
for n in sorted(G.edges()):
|
| 638 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 639 |
+
|
| 640 |
+
def test_weighted_graph(self):
|
| 641 |
+
"""Edge betweenness centrality: weighted"""
|
| 642 |
+
eList = [
|
| 643 |
+
(0, 1, 5),
|
| 644 |
+
(0, 2, 4),
|
| 645 |
+
(0, 3, 3),
|
| 646 |
+
(0, 4, 2),
|
| 647 |
+
(1, 2, 4),
|
| 648 |
+
(1, 3, 1),
|
| 649 |
+
(1, 4, 3),
|
| 650 |
+
(2, 4, 5),
|
| 651 |
+
(3, 4, 4),
|
| 652 |
+
]
|
| 653 |
+
G = nx.Graph()
|
| 654 |
+
G.add_weighted_edges_from(eList)
|
| 655 |
+
b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False)
|
| 656 |
+
b_answer = {
|
| 657 |
+
(0, 1): 0.0,
|
| 658 |
+
(0, 2): 1.0,
|
| 659 |
+
(0, 3): 2.0,
|
| 660 |
+
(0, 4): 1.0,
|
| 661 |
+
(1, 2): 2.0,
|
| 662 |
+
(1, 3): 3.5,
|
| 663 |
+
(1, 4): 1.5,
|
| 664 |
+
(2, 4): 1.0,
|
| 665 |
+
(3, 4): 0.5,
|
| 666 |
+
}
|
| 667 |
+
for n in sorted(G.edges()):
|
| 668 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 669 |
+
|
| 670 |
+
def test_normalized_weighted_graph(self):
|
| 671 |
+
"""Edge betweenness centrality: normalized weighted"""
|
| 672 |
+
eList = [
|
| 673 |
+
(0, 1, 5),
|
| 674 |
+
(0, 2, 4),
|
| 675 |
+
(0, 3, 3),
|
| 676 |
+
(0, 4, 2),
|
| 677 |
+
(1, 2, 4),
|
| 678 |
+
(1, 3, 1),
|
| 679 |
+
(1, 4, 3),
|
| 680 |
+
(2, 4, 5),
|
| 681 |
+
(3, 4, 4),
|
| 682 |
+
]
|
| 683 |
+
G = nx.Graph()
|
| 684 |
+
G.add_weighted_edges_from(eList)
|
| 685 |
+
b = nx.edge_betweenness_centrality(G, weight="weight", normalized=True)
|
| 686 |
+
b_answer = {
|
| 687 |
+
(0, 1): 0.0,
|
| 688 |
+
(0, 2): 1.0,
|
| 689 |
+
(0, 3): 2.0,
|
| 690 |
+
(0, 4): 1.0,
|
| 691 |
+
(1, 2): 2.0,
|
| 692 |
+
(1, 3): 3.5,
|
| 693 |
+
(1, 4): 1.5,
|
| 694 |
+
(2, 4): 1.0,
|
| 695 |
+
(3, 4): 0.5,
|
| 696 |
+
}
|
| 697 |
+
norm = len(G) * (len(G) - 1) / 2
|
| 698 |
+
for n in sorted(G.edges()):
|
| 699 |
+
assert b[n] == pytest.approx(b_answer[n] / norm, abs=1e-7)
|
| 700 |
+
|
| 701 |
+
def test_weighted_multigraph(self):
|
| 702 |
+
"""Edge betweenness centrality: weighted multigraph"""
|
| 703 |
+
eList = [
|
| 704 |
+
(0, 1, 5),
|
| 705 |
+
(0, 1, 4),
|
| 706 |
+
(0, 2, 4),
|
| 707 |
+
(0, 3, 3),
|
| 708 |
+
(0, 3, 3),
|
| 709 |
+
(0, 4, 2),
|
| 710 |
+
(1, 2, 4),
|
| 711 |
+
(1, 3, 1),
|
| 712 |
+
(1, 3, 2),
|
| 713 |
+
(1, 4, 3),
|
| 714 |
+
(1, 4, 4),
|
| 715 |
+
(2, 4, 5),
|
| 716 |
+
(3, 4, 4),
|
| 717 |
+
(3, 4, 4),
|
| 718 |
+
]
|
| 719 |
+
G = nx.MultiGraph()
|
| 720 |
+
G.add_weighted_edges_from(eList)
|
| 721 |
+
b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False)
|
| 722 |
+
b_answer = {
|
| 723 |
+
(0, 1, 0): 0.0,
|
| 724 |
+
(0, 1, 1): 0.5,
|
| 725 |
+
(0, 2, 0): 1.0,
|
| 726 |
+
(0, 3, 0): 0.75,
|
| 727 |
+
(0, 3, 1): 0.75,
|
| 728 |
+
(0, 4, 0): 1.0,
|
| 729 |
+
(1, 2, 0): 2.0,
|
| 730 |
+
(1, 3, 0): 3.0,
|
| 731 |
+
(1, 3, 1): 0.0,
|
| 732 |
+
(1, 4, 0): 1.5,
|
| 733 |
+
(1, 4, 1): 0.0,
|
| 734 |
+
(2, 4, 0): 1.0,
|
| 735 |
+
(3, 4, 0): 0.25,
|
| 736 |
+
(3, 4, 1): 0.25,
|
| 737 |
+
}
|
| 738 |
+
for n in sorted(G.edges(keys=True)):
|
| 739 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 740 |
+
|
| 741 |
+
def test_normalized_weighted_multigraph(self):
|
| 742 |
+
"""Edge betweenness centrality: normalized weighted multigraph"""
|
| 743 |
+
eList = [
|
| 744 |
+
(0, 1, 5),
|
| 745 |
+
(0, 1, 4),
|
| 746 |
+
(0, 2, 4),
|
| 747 |
+
(0, 3, 3),
|
| 748 |
+
(0, 3, 3),
|
| 749 |
+
(0, 4, 2),
|
| 750 |
+
(1, 2, 4),
|
| 751 |
+
(1, 3, 1),
|
| 752 |
+
(1, 3, 2),
|
| 753 |
+
(1, 4, 3),
|
| 754 |
+
(1, 4, 4),
|
| 755 |
+
(2, 4, 5),
|
| 756 |
+
(3, 4, 4),
|
| 757 |
+
(3, 4, 4),
|
| 758 |
+
]
|
| 759 |
+
G = nx.MultiGraph()
|
| 760 |
+
G.add_weighted_edges_from(eList)
|
| 761 |
+
b = nx.edge_betweenness_centrality(G, weight="weight", normalized=True)
|
| 762 |
+
b_answer = {
|
| 763 |
+
(0, 1, 0): 0.0,
|
| 764 |
+
(0, 1, 1): 0.5,
|
| 765 |
+
(0, 2, 0): 1.0,
|
| 766 |
+
(0, 3, 0): 0.75,
|
| 767 |
+
(0, 3, 1): 0.75,
|
| 768 |
+
(0, 4, 0): 1.0,
|
| 769 |
+
(1, 2, 0): 2.0,
|
| 770 |
+
(1, 3, 0): 3.0,
|
| 771 |
+
(1, 3, 1): 0.0,
|
| 772 |
+
(1, 4, 0): 1.5,
|
| 773 |
+
(1, 4, 1): 0.0,
|
| 774 |
+
(2, 4, 0): 1.0,
|
| 775 |
+
(3, 4, 0): 0.25,
|
| 776 |
+
(3, 4, 1): 0.25,
|
| 777 |
+
}
|
| 778 |
+
norm = len(G) * (len(G) - 1) / 2
|
| 779 |
+
for n in sorted(G.edges(keys=True)):
|
| 780 |
+
assert b[n] == pytest.approx(b_answer[n] / norm, abs=1e-7)
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py
ADDED
|
@@ -0,0 +1,340 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
import networkx as nx
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class TestSubsetBetweennessCentrality:
|
| 7 |
+
def test_K5(self):
|
| 8 |
+
"""Betweenness Centrality Subset: K5"""
|
| 9 |
+
G = nx.complete_graph(5)
|
| 10 |
+
b = nx.betweenness_centrality_subset(
|
| 11 |
+
G, sources=[0], targets=[1, 3], weight=None
|
| 12 |
+
)
|
| 13 |
+
b_answer = {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0}
|
| 14 |
+
for n in sorted(G):
|
| 15 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 16 |
+
|
| 17 |
+
def test_P5_directed(self):
|
| 18 |
+
"""Betweenness Centrality Subset: P5 directed"""
|
| 19 |
+
G = nx.DiGraph()
|
| 20 |
+
nx.add_path(G, range(5))
|
| 21 |
+
b_answer = {0: 0, 1: 1, 2: 1, 3: 0, 4: 0, 5: 0}
|
| 22 |
+
b = nx.betweenness_centrality_subset(G, sources=[0], targets=[3], weight=None)
|
| 23 |
+
for n in sorted(G):
|
| 24 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 25 |
+
|
| 26 |
+
def test_P5(self):
|
| 27 |
+
"""Betweenness Centrality Subset: P5"""
|
| 28 |
+
G = nx.Graph()
|
| 29 |
+
nx.add_path(G, range(5))
|
| 30 |
+
b_answer = {0: 0, 1: 0.5, 2: 0.5, 3: 0, 4: 0, 5: 0}
|
| 31 |
+
b = nx.betweenness_centrality_subset(G, sources=[0], targets=[3], weight=None)
|
| 32 |
+
for n in sorted(G):
|
| 33 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 34 |
+
|
| 35 |
+
def test_P5_multiple_target(self):
|
| 36 |
+
"""Betweenness Centrality Subset: P5 multiple target"""
|
| 37 |
+
G = nx.Graph()
|
| 38 |
+
nx.add_path(G, range(5))
|
| 39 |
+
b_answer = {0: 0, 1: 1, 2: 1, 3: 0.5, 4: 0, 5: 0}
|
| 40 |
+
b = nx.betweenness_centrality_subset(
|
| 41 |
+
G, sources=[0], targets=[3, 4], weight=None
|
| 42 |
+
)
|
| 43 |
+
for n in sorted(G):
|
| 44 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 45 |
+
|
| 46 |
+
def test_box(self):
|
| 47 |
+
"""Betweenness Centrality Subset: box"""
|
| 48 |
+
G = nx.Graph()
|
| 49 |
+
G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)])
|
| 50 |
+
b_answer = {0: 0, 1: 0.25, 2: 0.25, 3: 0}
|
| 51 |
+
b = nx.betweenness_centrality_subset(G, sources=[0], targets=[3], weight=None)
|
| 52 |
+
for n in sorted(G):
|
| 53 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 54 |
+
|
| 55 |
+
def test_box_and_path(self):
|
| 56 |
+
"""Betweenness Centrality Subset: box and path"""
|
| 57 |
+
G = nx.Graph()
|
| 58 |
+
G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (3, 4), (4, 5)])
|
| 59 |
+
b_answer = {0: 0, 1: 0.5, 2: 0.5, 3: 0.5, 4: 0, 5: 0}
|
| 60 |
+
b = nx.betweenness_centrality_subset(
|
| 61 |
+
G, sources=[0], targets=[3, 4], weight=None
|
| 62 |
+
)
|
| 63 |
+
for n in sorted(G):
|
| 64 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 65 |
+
|
| 66 |
+
def test_box_and_path2(self):
|
| 67 |
+
"""Betweenness Centrality Subset: box and path multiple target"""
|
| 68 |
+
G = nx.Graph()
|
| 69 |
+
G.add_edges_from([(0, 1), (1, 2), (2, 3), (1, 20), (20, 3), (3, 4)])
|
| 70 |
+
b_answer = {0: 0, 1: 1.0, 2: 0.5, 20: 0.5, 3: 0.5, 4: 0}
|
| 71 |
+
b = nx.betweenness_centrality_subset(
|
| 72 |
+
G, sources=[0], targets=[3, 4], weight=None
|
| 73 |
+
)
|
| 74 |
+
for n in sorted(G):
|
| 75 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 76 |
+
|
| 77 |
+
def test_diamond_multi_path(self):
|
| 78 |
+
"""Betweenness Centrality Subset: Diamond Multi Path"""
|
| 79 |
+
G = nx.Graph()
|
| 80 |
+
G.add_edges_from(
|
| 81 |
+
[
|
| 82 |
+
(1, 2),
|
| 83 |
+
(1, 3),
|
| 84 |
+
(1, 4),
|
| 85 |
+
(1, 5),
|
| 86 |
+
(1, 10),
|
| 87 |
+
(10, 11),
|
| 88 |
+
(11, 12),
|
| 89 |
+
(12, 9),
|
| 90 |
+
(2, 6),
|
| 91 |
+
(3, 6),
|
| 92 |
+
(4, 6),
|
| 93 |
+
(5, 7),
|
| 94 |
+
(7, 8),
|
| 95 |
+
(6, 8),
|
| 96 |
+
(8, 9),
|
| 97 |
+
]
|
| 98 |
+
)
|
| 99 |
+
b = nx.betweenness_centrality_subset(G, sources=[1], targets=[9], weight=None)
|
| 100 |
+
|
| 101 |
+
expected_b = {
|
| 102 |
+
1: 0,
|
| 103 |
+
2: 1.0 / 10,
|
| 104 |
+
3: 1.0 / 10,
|
| 105 |
+
4: 1.0 / 10,
|
| 106 |
+
5: 1.0 / 10,
|
| 107 |
+
6: 3.0 / 10,
|
| 108 |
+
7: 1.0 / 10,
|
| 109 |
+
8: 4.0 / 10,
|
| 110 |
+
9: 0,
|
| 111 |
+
10: 1.0 / 10,
|
| 112 |
+
11: 1.0 / 10,
|
| 113 |
+
12: 1.0 / 10,
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
for n in sorted(G):
|
| 117 |
+
assert b[n] == pytest.approx(expected_b[n], abs=1e-7)
|
| 118 |
+
|
| 119 |
+
def test_normalized_p2(self):
|
| 120 |
+
"""
|
| 121 |
+
Betweenness Centrality Subset: Normalized P2
|
| 122 |
+
if n <= 2: no normalization, betweenness centrality should be 0 for all nodes.
|
| 123 |
+
"""
|
| 124 |
+
G = nx.Graph()
|
| 125 |
+
nx.add_path(G, range(2))
|
| 126 |
+
b_answer = {0: 0, 1: 0.0}
|
| 127 |
+
b = nx.betweenness_centrality_subset(
|
| 128 |
+
G, sources=[0], targets=[1], normalized=True, weight=None
|
| 129 |
+
)
|
| 130 |
+
for n in sorted(G):
|
| 131 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 132 |
+
|
| 133 |
+
def test_normalized_P5_directed(self):
|
| 134 |
+
"""Betweenness Centrality Subset: Normalized Directed P5"""
|
| 135 |
+
G = nx.DiGraph()
|
| 136 |
+
nx.add_path(G, range(5))
|
| 137 |
+
b_answer = {0: 0, 1: 1.0 / 12.0, 2: 1.0 / 12.0, 3: 0, 4: 0, 5: 0}
|
| 138 |
+
b = nx.betweenness_centrality_subset(
|
| 139 |
+
G, sources=[0], targets=[3], normalized=True, weight=None
|
| 140 |
+
)
|
| 141 |
+
for n in sorted(G):
|
| 142 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 143 |
+
|
| 144 |
+
def test_weighted_graph(self):
|
| 145 |
+
"""Betweenness Centrality Subset: Weighted Graph"""
|
| 146 |
+
G = nx.DiGraph()
|
| 147 |
+
G.add_edge(0, 1, weight=3)
|
| 148 |
+
G.add_edge(0, 2, weight=2)
|
| 149 |
+
G.add_edge(0, 3, weight=6)
|
| 150 |
+
G.add_edge(0, 4, weight=4)
|
| 151 |
+
G.add_edge(1, 3, weight=5)
|
| 152 |
+
G.add_edge(1, 5, weight=5)
|
| 153 |
+
G.add_edge(2, 4, weight=1)
|
| 154 |
+
G.add_edge(3, 4, weight=2)
|
| 155 |
+
G.add_edge(3, 5, weight=1)
|
| 156 |
+
G.add_edge(4, 5, weight=4)
|
| 157 |
+
b_answer = {0: 0.0, 1: 0.0, 2: 0.5, 3: 0.5, 4: 0.5, 5: 0.0}
|
| 158 |
+
b = nx.betweenness_centrality_subset(
|
| 159 |
+
G, sources=[0], targets=[5], normalized=False, weight="weight"
|
| 160 |
+
)
|
| 161 |
+
for n in sorted(G):
|
| 162 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
class TestEdgeSubsetBetweennessCentrality:
|
| 166 |
+
def test_K5(self):
|
| 167 |
+
"""Edge betweenness subset centrality: K5"""
|
| 168 |
+
G = nx.complete_graph(5)
|
| 169 |
+
b = nx.edge_betweenness_centrality_subset(
|
| 170 |
+
G, sources=[0], targets=[1, 3], weight=None
|
| 171 |
+
)
|
| 172 |
+
b_answer = dict.fromkeys(G.edges(), 0)
|
| 173 |
+
b_answer[(0, 3)] = b_answer[(0, 1)] = 0.5
|
| 174 |
+
for n in sorted(G.edges()):
|
| 175 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 176 |
+
|
| 177 |
+
def test_P5_directed(self):
|
| 178 |
+
"""Edge betweenness subset centrality: P5 directed"""
|
| 179 |
+
G = nx.DiGraph()
|
| 180 |
+
nx.add_path(G, range(5))
|
| 181 |
+
b_answer = dict.fromkeys(G.edges(), 0)
|
| 182 |
+
b_answer[(0, 1)] = b_answer[(1, 2)] = b_answer[(2, 3)] = 1
|
| 183 |
+
b = nx.edge_betweenness_centrality_subset(
|
| 184 |
+
G, sources=[0], targets=[3], weight=None
|
| 185 |
+
)
|
| 186 |
+
for n in sorted(G.edges()):
|
| 187 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 188 |
+
|
| 189 |
+
def test_P5(self):
|
| 190 |
+
"""Edge betweenness subset centrality: P5"""
|
| 191 |
+
G = nx.Graph()
|
| 192 |
+
nx.add_path(G, range(5))
|
| 193 |
+
b_answer = dict.fromkeys(G.edges(), 0)
|
| 194 |
+
b_answer[(0, 1)] = b_answer[(1, 2)] = b_answer[(2, 3)] = 0.5
|
| 195 |
+
b = nx.edge_betweenness_centrality_subset(
|
| 196 |
+
G, sources=[0], targets=[3], weight=None
|
| 197 |
+
)
|
| 198 |
+
for n in sorted(G.edges()):
|
| 199 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 200 |
+
|
| 201 |
+
def test_P5_multiple_target(self):
|
| 202 |
+
"""Edge betweenness subset centrality: P5 multiple target"""
|
| 203 |
+
G = nx.Graph()
|
| 204 |
+
nx.add_path(G, range(5))
|
| 205 |
+
b_answer = dict.fromkeys(G.edges(), 0)
|
| 206 |
+
b_answer[(0, 1)] = b_answer[(1, 2)] = b_answer[(2, 3)] = 1
|
| 207 |
+
b_answer[(3, 4)] = 0.5
|
| 208 |
+
b = nx.edge_betweenness_centrality_subset(
|
| 209 |
+
G, sources=[0], targets=[3, 4], weight=None
|
| 210 |
+
)
|
| 211 |
+
for n in sorted(G.edges()):
|
| 212 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 213 |
+
|
| 214 |
+
def test_box(self):
|
| 215 |
+
"""Edge betweenness subset centrality: box"""
|
| 216 |
+
G = nx.Graph()
|
| 217 |
+
G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)])
|
| 218 |
+
b_answer = dict.fromkeys(G.edges(), 0)
|
| 219 |
+
b_answer[(0, 1)] = b_answer[(0, 2)] = 0.25
|
| 220 |
+
b_answer[(1, 3)] = b_answer[(2, 3)] = 0.25
|
| 221 |
+
b = nx.edge_betweenness_centrality_subset(
|
| 222 |
+
G, sources=[0], targets=[3], weight=None
|
| 223 |
+
)
|
| 224 |
+
for n in sorted(G.edges()):
|
| 225 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 226 |
+
|
| 227 |
+
def test_box_and_path(self):
|
| 228 |
+
"""Edge betweenness subset centrality: box and path"""
|
| 229 |
+
G = nx.Graph()
|
| 230 |
+
G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (3, 4), (4, 5)])
|
| 231 |
+
b_answer = dict.fromkeys(G.edges(), 0)
|
| 232 |
+
b_answer[(0, 1)] = b_answer[(0, 2)] = 0.5
|
| 233 |
+
b_answer[(1, 3)] = b_answer[(2, 3)] = 0.5
|
| 234 |
+
b_answer[(3, 4)] = 0.5
|
| 235 |
+
b = nx.edge_betweenness_centrality_subset(
|
| 236 |
+
G, sources=[0], targets=[3, 4], weight=None
|
| 237 |
+
)
|
| 238 |
+
for n in sorted(G.edges()):
|
| 239 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 240 |
+
|
| 241 |
+
def test_box_and_path2(self):
|
| 242 |
+
"""Edge betweenness subset centrality: box and path multiple target"""
|
| 243 |
+
G = nx.Graph()
|
| 244 |
+
G.add_edges_from([(0, 1), (1, 2), (2, 3), (1, 20), (20, 3), (3, 4)])
|
| 245 |
+
b_answer = dict.fromkeys(G.edges(), 0)
|
| 246 |
+
b_answer[(0, 1)] = 1.0
|
| 247 |
+
b_answer[(1, 20)] = b_answer[(3, 20)] = 0.5
|
| 248 |
+
b_answer[(1, 2)] = b_answer[(2, 3)] = 0.5
|
| 249 |
+
b_answer[(3, 4)] = 0.5
|
| 250 |
+
b = nx.edge_betweenness_centrality_subset(
|
| 251 |
+
G, sources=[0], targets=[3, 4], weight=None
|
| 252 |
+
)
|
| 253 |
+
for n in sorted(G.edges()):
|
| 254 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 255 |
+
|
| 256 |
+
def test_diamond_multi_path(self):
|
| 257 |
+
"""Edge betweenness subset centrality: Diamond Multi Path"""
|
| 258 |
+
G = nx.Graph()
|
| 259 |
+
G.add_edges_from(
|
| 260 |
+
[
|
| 261 |
+
(1, 2),
|
| 262 |
+
(1, 3),
|
| 263 |
+
(1, 4),
|
| 264 |
+
(1, 5),
|
| 265 |
+
(1, 10),
|
| 266 |
+
(10, 11),
|
| 267 |
+
(11, 12),
|
| 268 |
+
(12, 9),
|
| 269 |
+
(2, 6),
|
| 270 |
+
(3, 6),
|
| 271 |
+
(4, 6),
|
| 272 |
+
(5, 7),
|
| 273 |
+
(7, 8),
|
| 274 |
+
(6, 8),
|
| 275 |
+
(8, 9),
|
| 276 |
+
]
|
| 277 |
+
)
|
| 278 |
+
b_answer = dict.fromkeys(G.edges(), 0)
|
| 279 |
+
b_answer[(8, 9)] = 0.4
|
| 280 |
+
b_answer[(6, 8)] = b_answer[(7, 8)] = 0.2
|
| 281 |
+
b_answer[(2, 6)] = b_answer[(3, 6)] = b_answer[(4, 6)] = 0.2 / 3.0
|
| 282 |
+
b_answer[(1, 2)] = b_answer[(1, 3)] = b_answer[(1, 4)] = 0.2 / 3.0
|
| 283 |
+
b_answer[(5, 7)] = 0.2
|
| 284 |
+
b_answer[(1, 5)] = 0.2
|
| 285 |
+
b_answer[(9, 12)] = 0.1
|
| 286 |
+
b_answer[(11, 12)] = b_answer[(10, 11)] = b_answer[(1, 10)] = 0.1
|
| 287 |
+
b = nx.edge_betweenness_centrality_subset(
|
| 288 |
+
G, sources=[1], targets=[9], weight=None
|
| 289 |
+
)
|
| 290 |
+
for n in G.edges():
|
| 291 |
+
sort_n = tuple(sorted(n))
|
| 292 |
+
assert b[n] == pytest.approx(b_answer[sort_n], abs=1e-7)
|
| 293 |
+
|
| 294 |
+
def test_normalized_p1(self):
|
| 295 |
+
"""
|
| 296 |
+
Edge betweenness subset centrality: P1
|
| 297 |
+
if n <= 1: no normalization b=0 for all nodes
|
| 298 |
+
"""
|
| 299 |
+
G = nx.Graph()
|
| 300 |
+
nx.add_path(G, range(1))
|
| 301 |
+
b_answer = dict.fromkeys(G.edges(), 0)
|
| 302 |
+
b = nx.edge_betweenness_centrality_subset(
|
| 303 |
+
G, sources=[0], targets=[0], normalized=True, weight=None
|
| 304 |
+
)
|
| 305 |
+
for n in G.edges():
|
| 306 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 307 |
+
|
| 308 |
+
def test_normalized_P5_directed(self):
|
| 309 |
+
"""Edge betweenness subset centrality: Normalized Directed P5"""
|
| 310 |
+
G = nx.DiGraph()
|
| 311 |
+
nx.add_path(G, range(5))
|
| 312 |
+
b_answer = dict.fromkeys(G.edges(), 0)
|
| 313 |
+
b_answer[(0, 1)] = b_answer[(1, 2)] = b_answer[(2, 3)] = 0.05
|
| 314 |
+
b = nx.edge_betweenness_centrality_subset(
|
| 315 |
+
G, sources=[0], targets=[3], normalized=True, weight=None
|
| 316 |
+
)
|
| 317 |
+
for n in G.edges():
|
| 318 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 319 |
+
|
| 320 |
+
def test_weighted_graph(self):
|
| 321 |
+
"""Edge betweenness subset centrality: Weighted Graph"""
|
| 322 |
+
G = nx.DiGraph()
|
| 323 |
+
G.add_edge(0, 1, weight=3)
|
| 324 |
+
G.add_edge(0, 2, weight=2)
|
| 325 |
+
G.add_edge(0, 3, weight=6)
|
| 326 |
+
G.add_edge(0, 4, weight=4)
|
| 327 |
+
G.add_edge(1, 3, weight=5)
|
| 328 |
+
G.add_edge(1, 5, weight=5)
|
| 329 |
+
G.add_edge(2, 4, weight=1)
|
| 330 |
+
G.add_edge(3, 4, weight=2)
|
| 331 |
+
G.add_edge(3, 5, weight=1)
|
| 332 |
+
G.add_edge(4, 5, weight=4)
|
| 333 |
+
b_answer = dict.fromkeys(G.edges(), 0)
|
| 334 |
+
b_answer[(0, 2)] = b_answer[(2, 4)] = b_answer[(4, 5)] = 0.5
|
| 335 |
+
b_answer[(0, 3)] = b_answer[(3, 5)] = 0.5
|
| 336 |
+
b = nx.edge_betweenness_centrality_subset(
|
| 337 |
+
G, sources=[0], targets=[5], normalized=False, weight="weight"
|
| 338 |
+
)
|
| 339 |
+
for n in G.edges():
|
| 340 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/test_closeness_centrality.py
ADDED
|
@@ -0,0 +1,306 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Tests for closeness centrality.
|
| 3 |
+
"""
|
| 4 |
+
import pytest
|
| 5 |
+
|
| 6 |
+
import networkx as nx
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class TestClosenessCentrality:
|
| 10 |
+
@classmethod
|
| 11 |
+
def setup_class(cls):
|
| 12 |
+
cls.K = nx.krackhardt_kite_graph()
|
| 13 |
+
cls.P3 = nx.path_graph(3)
|
| 14 |
+
cls.P4 = nx.path_graph(4)
|
| 15 |
+
cls.K5 = nx.complete_graph(5)
|
| 16 |
+
|
| 17 |
+
cls.C4 = nx.cycle_graph(4)
|
| 18 |
+
cls.T = nx.balanced_tree(r=2, h=2)
|
| 19 |
+
cls.Gb = nx.Graph()
|
| 20 |
+
cls.Gb.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)])
|
| 21 |
+
|
| 22 |
+
F = nx.florentine_families_graph()
|
| 23 |
+
cls.F = F
|
| 24 |
+
|
| 25 |
+
cls.LM = nx.les_miserables_graph()
|
| 26 |
+
|
| 27 |
+
# Create random undirected, unweighted graph for testing incremental version
|
| 28 |
+
cls.undirected_G = nx.fast_gnp_random_graph(n=100, p=0.6, seed=123)
|
| 29 |
+
cls.undirected_G_cc = nx.closeness_centrality(cls.undirected_G)
|
| 30 |
+
|
| 31 |
+
def test_wf_improved(self):
|
| 32 |
+
G = nx.union(self.P4, nx.path_graph([4, 5, 6]))
|
| 33 |
+
c = nx.closeness_centrality(G)
|
| 34 |
+
cwf = nx.closeness_centrality(G, wf_improved=False)
|
| 35 |
+
res = {0: 0.25, 1: 0.375, 2: 0.375, 3: 0.25, 4: 0.222, 5: 0.333, 6: 0.222}
|
| 36 |
+
wf_res = {0: 0.5, 1: 0.75, 2: 0.75, 3: 0.5, 4: 0.667, 5: 1.0, 6: 0.667}
|
| 37 |
+
for n in G:
|
| 38 |
+
assert c[n] == pytest.approx(res[n], abs=1e-3)
|
| 39 |
+
assert cwf[n] == pytest.approx(wf_res[n], abs=1e-3)
|
| 40 |
+
|
| 41 |
+
def test_digraph(self):
|
| 42 |
+
G = nx.path_graph(3, create_using=nx.DiGraph())
|
| 43 |
+
c = nx.closeness_centrality(G)
|
| 44 |
+
cr = nx.closeness_centrality(G.reverse())
|
| 45 |
+
d = {0: 0.0, 1: 0.500, 2: 0.667}
|
| 46 |
+
dr = {0: 0.667, 1: 0.500, 2: 0.0}
|
| 47 |
+
for n in sorted(self.P3):
|
| 48 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 49 |
+
assert cr[n] == pytest.approx(dr[n], abs=1e-3)
|
| 50 |
+
|
| 51 |
+
def test_k5_closeness(self):
|
| 52 |
+
c = nx.closeness_centrality(self.K5)
|
| 53 |
+
d = {0: 1.000, 1: 1.000, 2: 1.000, 3: 1.000, 4: 1.000}
|
| 54 |
+
for n in sorted(self.K5):
|
| 55 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 56 |
+
|
| 57 |
+
def test_p3_closeness(self):
|
| 58 |
+
c = nx.closeness_centrality(self.P3)
|
| 59 |
+
d = {0: 0.667, 1: 1.000, 2: 0.667}
|
| 60 |
+
for n in sorted(self.P3):
|
| 61 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 62 |
+
|
| 63 |
+
def test_krackhardt_closeness(self):
|
| 64 |
+
c = nx.closeness_centrality(self.K)
|
| 65 |
+
d = {
|
| 66 |
+
0: 0.529,
|
| 67 |
+
1: 0.529,
|
| 68 |
+
2: 0.500,
|
| 69 |
+
3: 0.600,
|
| 70 |
+
4: 0.500,
|
| 71 |
+
5: 0.643,
|
| 72 |
+
6: 0.643,
|
| 73 |
+
7: 0.600,
|
| 74 |
+
8: 0.429,
|
| 75 |
+
9: 0.310,
|
| 76 |
+
}
|
| 77 |
+
for n in sorted(self.K):
|
| 78 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 79 |
+
|
| 80 |
+
def test_florentine_families_closeness(self):
|
| 81 |
+
c = nx.closeness_centrality(self.F)
|
| 82 |
+
d = {
|
| 83 |
+
"Acciaiuoli": 0.368,
|
| 84 |
+
"Albizzi": 0.483,
|
| 85 |
+
"Barbadori": 0.4375,
|
| 86 |
+
"Bischeri": 0.400,
|
| 87 |
+
"Castellani": 0.389,
|
| 88 |
+
"Ginori": 0.333,
|
| 89 |
+
"Guadagni": 0.467,
|
| 90 |
+
"Lamberteschi": 0.326,
|
| 91 |
+
"Medici": 0.560,
|
| 92 |
+
"Pazzi": 0.286,
|
| 93 |
+
"Peruzzi": 0.368,
|
| 94 |
+
"Ridolfi": 0.500,
|
| 95 |
+
"Salviati": 0.389,
|
| 96 |
+
"Strozzi": 0.4375,
|
| 97 |
+
"Tornabuoni": 0.483,
|
| 98 |
+
}
|
| 99 |
+
for n in sorted(self.F):
|
| 100 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 101 |
+
|
| 102 |
+
def test_les_miserables_closeness(self):
|
| 103 |
+
c = nx.closeness_centrality(self.LM)
|
| 104 |
+
d = {
|
| 105 |
+
"Napoleon": 0.302,
|
| 106 |
+
"Myriel": 0.429,
|
| 107 |
+
"MlleBaptistine": 0.413,
|
| 108 |
+
"MmeMagloire": 0.413,
|
| 109 |
+
"CountessDeLo": 0.302,
|
| 110 |
+
"Geborand": 0.302,
|
| 111 |
+
"Champtercier": 0.302,
|
| 112 |
+
"Cravatte": 0.302,
|
| 113 |
+
"Count": 0.302,
|
| 114 |
+
"OldMan": 0.302,
|
| 115 |
+
"Valjean": 0.644,
|
| 116 |
+
"Labarre": 0.394,
|
| 117 |
+
"Marguerite": 0.413,
|
| 118 |
+
"MmeDeR": 0.394,
|
| 119 |
+
"Isabeau": 0.394,
|
| 120 |
+
"Gervais": 0.394,
|
| 121 |
+
"Listolier": 0.341,
|
| 122 |
+
"Tholomyes": 0.392,
|
| 123 |
+
"Fameuil": 0.341,
|
| 124 |
+
"Blacheville": 0.341,
|
| 125 |
+
"Favourite": 0.341,
|
| 126 |
+
"Dahlia": 0.341,
|
| 127 |
+
"Zephine": 0.341,
|
| 128 |
+
"Fantine": 0.461,
|
| 129 |
+
"MmeThenardier": 0.461,
|
| 130 |
+
"Thenardier": 0.517,
|
| 131 |
+
"Cosette": 0.478,
|
| 132 |
+
"Javert": 0.517,
|
| 133 |
+
"Fauchelevent": 0.402,
|
| 134 |
+
"Bamatabois": 0.427,
|
| 135 |
+
"Perpetue": 0.318,
|
| 136 |
+
"Simplice": 0.418,
|
| 137 |
+
"Scaufflaire": 0.394,
|
| 138 |
+
"Woman1": 0.396,
|
| 139 |
+
"Judge": 0.404,
|
| 140 |
+
"Champmathieu": 0.404,
|
| 141 |
+
"Brevet": 0.404,
|
| 142 |
+
"Chenildieu": 0.404,
|
| 143 |
+
"Cochepaille": 0.404,
|
| 144 |
+
"Pontmercy": 0.373,
|
| 145 |
+
"Boulatruelle": 0.342,
|
| 146 |
+
"Eponine": 0.396,
|
| 147 |
+
"Anzelma": 0.352,
|
| 148 |
+
"Woman2": 0.402,
|
| 149 |
+
"MotherInnocent": 0.398,
|
| 150 |
+
"Gribier": 0.288,
|
| 151 |
+
"MmeBurgon": 0.344,
|
| 152 |
+
"Jondrette": 0.257,
|
| 153 |
+
"Gavroche": 0.514,
|
| 154 |
+
"Gillenormand": 0.442,
|
| 155 |
+
"Magnon": 0.335,
|
| 156 |
+
"MlleGillenormand": 0.442,
|
| 157 |
+
"MmePontmercy": 0.315,
|
| 158 |
+
"MlleVaubois": 0.308,
|
| 159 |
+
"LtGillenormand": 0.365,
|
| 160 |
+
"Marius": 0.531,
|
| 161 |
+
"BaronessT": 0.352,
|
| 162 |
+
"Mabeuf": 0.396,
|
| 163 |
+
"Enjolras": 0.481,
|
| 164 |
+
"Combeferre": 0.392,
|
| 165 |
+
"Prouvaire": 0.357,
|
| 166 |
+
"Feuilly": 0.392,
|
| 167 |
+
"Courfeyrac": 0.400,
|
| 168 |
+
"Bahorel": 0.394,
|
| 169 |
+
"Bossuet": 0.475,
|
| 170 |
+
"Joly": 0.394,
|
| 171 |
+
"Grantaire": 0.358,
|
| 172 |
+
"MotherPlutarch": 0.285,
|
| 173 |
+
"Gueulemer": 0.463,
|
| 174 |
+
"Babet": 0.463,
|
| 175 |
+
"Claquesous": 0.452,
|
| 176 |
+
"Montparnasse": 0.458,
|
| 177 |
+
"Toussaint": 0.402,
|
| 178 |
+
"Child1": 0.342,
|
| 179 |
+
"Child2": 0.342,
|
| 180 |
+
"Brujon": 0.380,
|
| 181 |
+
"MmeHucheloup": 0.353,
|
| 182 |
+
}
|
| 183 |
+
for n in sorted(self.LM):
|
| 184 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 185 |
+
|
| 186 |
+
def test_weighted_closeness(self):
|
| 187 |
+
edges = [
|
| 188 |
+
("s", "u", 10),
|
| 189 |
+
("s", "x", 5),
|
| 190 |
+
("u", "v", 1),
|
| 191 |
+
("u", "x", 2),
|
| 192 |
+
("v", "y", 1),
|
| 193 |
+
("x", "u", 3),
|
| 194 |
+
("x", "v", 5),
|
| 195 |
+
("x", "y", 2),
|
| 196 |
+
("y", "s", 7),
|
| 197 |
+
("y", "v", 6),
|
| 198 |
+
]
|
| 199 |
+
XG = nx.Graph()
|
| 200 |
+
XG.add_weighted_edges_from(edges)
|
| 201 |
+
c = nx.closeness_centrality(XG, distance="weight")
|
| 202 |
+
d = {"y": 0.200, "x": 0.286, "s": 0.138, "u": 0.235, "v": 0.200}
|
| 203 |
+
for n in sorted(XG):
|
| 204 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 205 |
+
|
| 206 |
+
#
|
| 207 |
+
# Tests for incremental closeness centrality.
|
| 208 |
+
#
|
| 209 |
+
@staticmethod
|
| 210 |
+
def pick_add_edge(g):
|
| 211 |
+
u = nx.utils.arbitrary_element(g)
|
| 212 |
+
possible_nodes = set(g.nodes())
|
| 213 |
+
neighbors = list(g.neighbors(u)) + [u]
|
| 214 |
+
possible_nodes.difference_update(neighbors)
|
| 215 |
+
v = nx.utils.arbitrary_element(possible_nodes)
|
| 216 |
+
return (u, v)
|
| 217 |
+
|
| 218 |
+
@staticmethod
|
| 219 |
+
def pick_remove_edge(g):
|
| 220 |
+
u = nx.utils.arbitrary_element(g)
|
| 221 |
+
possible_nodes = list(g.neighbors(u))
|
| 222 |
+
v = nx.utils.arbitrary_element(possible_nodes)
|
| 223 |
+
return (u, v)
|
| 224 |
+
|
| 225 |
+
def test_directed_raises(self):
|
| 226 |
+
with pytest.raises(nx.NetworkXNotImplemented):
|
| 227 |
+
dir_G = nx.gn_graph(n=5)
|
| 228 |
+
prev_cc = None
|
| 229 |
+
edge = self.pick_add_edge(dir_G)
|
| 230 |
+
insert = True
|
| 231 |
+
nx.incremental_closeness_centrality(dir_G, edge, prev_cc, insert)
|
| 232 |
+
|
| 233 |
+
def test_wrong_size_prev_cc_raises(self):
|
| 234 |
+
with pytest.raises(nx.NetworkXError):
|
| 235 |
+
G = self.undirected_G.copy()
|
| 236 |
+
edge = self.pick_add_edge(G)
|
| 237 |
+
insert = True
|
| 238 |
+
prev_cc = self.undirected_G_cc.copy()
|
| 239 |
+
prev_cc.pop(0)
|
| 240 |
+
nx.incremental_closeness_centrality(G, edge, prev_cc, insert)
|
| 241 |
+
|
| 242 |
+
def test_wrong_nodes_prev_cc_raises(self):
|
| 243 |
+
with pytest.raises(nx.NetworkXError):
|
| 244 |
+
G = self.undirected_G.copy()
|
| 245 |
+
edge = self.pick_add_edge(G)
|
| 246 |
+
insert = True
|
| 247 |
+
prev_cc = self.undirected_G_cc.copy()
|
| 248 |
+
num_nodes = len(prev_cc)
|
| 249 |
+
prev_cc.pop(0)
|
| 250 |
+
prev_cc[num_nodes] = 0.5
|
| 251 |
+
nx.incremental_closeness_centrality(G, edge, prev_cc, insert)
|
| 252 |
+
|
| 253 |
+
def test_zero_centrality(self):
|
| 254 |
+
G = nx.path_graph(3)
|
| 255 |
+
prev_cc = nx.closeness_centrality(G)
|
| 256 |
+
edge = self.pick_remove_edge(G)
|
| 257 |
+
test_cc = nx.incremental_closeness_centrality(G, edge, prev_cc, insertion=False)
|
| 258 |
+
G.remove_edges_from([edge])
|
| 259 |
+
real_cc = nx.closeness_centrality(G)
|
| 260 |
+
shared_items = set(test_cc.items()) & set(real_cc.items())
|
| 261 |
+
assert len(shared_items) == len(real_cc)
|
| 262 |
+
assert 0 in test_cc.values()
|
| 263 |
+
|
| 264 |
+
def test_incremental(self):
|
| 265 |
+
# Check that incremental and regular give same output
|
| 266 |
+
G = self.undirected_G.copy()
|
| 267 |
+
prev_cc = None
|
| 268 |
+
for i in range(5):
|
| 269 |
+
if i % 2 == 0:
|
| 270 |
+
# Remove an edge
|
| 271 |
+
insert = False
|
| 272 |
+
edge = self.pick_remove_edge(G)
|
| 273 |
+
else:
|
| 274 |
+
# Add an edge
|
| 275 |
+
insert = True
|
| 276 |
+
edge = self.pick_add_edge(G)
|
| 277 |
+
|
| 278 |
+
# start = timeit.default_timer()
|
| 279 |
+
test_cc = nx.incremental_closeness_centrality(G, edge, prev_cc, insert)
|
| 280 |
+
# inc_elapsed = (timeit.default_timer() - start)
|
| 281 |
+
# print(f"incremental time: {inc_elapsed}")
|
| 282 |
+
|
| 283 |
+
if insert:
|
| 284 |
+
G.add_edges_from([edge])
|
| 285 |
+
else:
|
| 286 |
+
G.remove_edges_from([edge])
|
| 287 |
+
|
| 288 |
+
# start = timeit.default_timer()
|
| 289 |
+
real_cc = nx.closeness_centrality(G)
|
| 290 |
+
# reg_elapsed = (timeit.default_timer() - start)
|
| 291 |
+
# print(f"regular time: {reg_elapsed}")
|
| 292 |
+
# Example output:
|
| 293 |
+
# incremental time: 0.208
|
| 294 |
+
# regular time: 0.276
|
| 295 |
+
# incremental time: 0.00683
|
| 296 |
+
# regular time: 0.260
|
| 297 |
+
# incremental time: 0.0224
|
| 298 |
+
# regular time: 0.278
|
| 299 |
+
# incremental time: 0.00804
|
| 300 |
+
# regular time: 0.208
|
| 301 |
+
# incremental time: 0.00947
|
| 302 |
+
# regular time: 0.188
|
| 303 |
+
|
| 304 |
+
assert set(test_cc.items()) == set(real_cc.items())
|
| 305 |
+
|
| 306 |
+
prev_cc = test_cc
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/test_current_flow_closeness.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
pytest.importorskip("numpy")
|
| 4 |
+
pytest.importorskip("scipy")
|
| 5 |
+
|
| 6 |
+
import networkx as nx
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class TestFlowClosenessCentrality:
|
| 10 |
+
def test_K4(self):
|
| 11 |
+
"""Closeness centrality: K4"""
|
| 12 |
+
G = nx.complete_graph(4)
|
| 13 |
+
b = nx.current_flow_closeness_centrality(G)
|
| 14 |
+
b_answer = {0: 2.0 / 3, 1: 2.0 / 3, 2: 2.0 / 3, 3: 2.0 / 3}
|
| 15 |
+
for n in sorted(G):
|
| 16 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 17 |
+
|
| 18 |
+
def test_P4(self):
|
| 19 |
+
"""Closeness centrality: P4"""
|
| 20 |
+
G = nx.path_graph(4)
|
| 21 |
+
b = nx.current_flow_closeness_centrality(G)
|
| 22 |
+
b_answer = {0: 1.0 / 6, 1: 1.0 / 4, 2: 1.0 / 4, 3: 1.0 / 6}
|
| 23 |
+
for n in sorted(G):
|
| 24 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 25 |
+
|
| 26 |
+
def test_star(self):
|
| 27 |
+
"""Closeness centrality: star"""
|
| 28 |
+
G = nx.Graph()
|
| 29 |
+
nx.add_star(G, ["a", "b", "c", "d"])
|
| 30 |
+
b = nx.current_flow_closeness_centrality(G)
|
| 31 |
+
b_answer = {"a": 1.0 / 3, "b": 0.6 / 3, "c": 0.6 / 3, "d": 0.6 / 3}
|
| 32 |
+
for n in sorted(G):
|
| 33 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 34 |
+
|
| 35 |
+
def test_current_flow_closeness_centrality_not_connected(self):
|
| 36 |
+
G = nx.Graph()
|
| 37 |
+
G.add_nodes_from([1, 2, 3])
|
| 38 |
+
with pytest.raises(nx.NetworkXError):
|
| 39 |
+
nx.current_flow_closeness_centrality(G)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class TestWeightedFlowClosenessCentrality:
|
| 43 |
+
pass
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/test_dispersion.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import networkx as nx
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def small_ego_G():
|
| 5 |
+
"""The sample network from https://arxiv.org/pdf/1310.6753v1.pdf"""
|
| 6 |
+
edges = [
|
| 7 |
+
("a", "b"),
|
| 8 |
+
("a", "c"),
|
| 9 |
+
("b", "c"),
|
| 10 |
+
("b", "d"),
|
| 11 |
+
("b", "e"),
|
| 12 |
+
("b", "f"),
|
| 13 |
+
("c", "d"),
|
| 14 |
+
("c", "f"),
|
| 15 |
+
("c", "h"),
|
| 16 |
+
("d", "f"),
|
| 17 |
+
("e", "f"),
|
| 18 |
+
("f", "h"),
|
| 19 |
+
("h", "j"),
|
| 20 |
+
("h", "k"),
|
| 21 |
+
("i", "j"),
|
| 22 |
+
("i", "k"),
|
| 23 |
+
("j", "k"),
|
| 24 |
+
("u", "a"),
|
| 25 |
+
("u", "b"),
|
| 26 |
+
("u", "c"),
|
| 27 |
+
("u", "d"),
|
| 28 |
+
("u", "e"),
|
| 29 |
+
("u", "f"),
|
| 30 |
+
("u", "g"),
|
| 31 |
+
("u", "h"),
|
| 32 |
+
("u", "i"),
|
| 33 |
+
("u", "j"),
|
| 34 |
+
("u", "k"),
|
| 35 |
+
]
|
| 36 |
+
G = nx.Graph()
|
| 37 |
+
G.add_edges_from(edges)
|
| 38 |
+
|
| 39 |
+
return G
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class TestDispersion:
|
| 43 |
+
def test_article(self):
|
| 44 |
+
"""our algorithm matches article's"""
|
| 45 |
+
G = small_ego_G()
|
| 46 |
+
disp_uh = nx.dispersion(G, "u", "h", normalized=False)
|
| 47 |
+
disp_ub = nx.dispersion(G, "u", "b", normalized=False)
|
| 48 |
+
assert disp_uh == 4
|
| 49 |
+
assert disp_ub == 1
|
| 50 |
+
|
| 51 |
+
def test_results_length(self):
|
| 52 |
+
"""there is a result for every node"""
|
| 53 |
+
G = small_ego_G()
|
| 54 |
+
disp = nx.dispersion(G)
|
| 55 |
+
disp_Gu = nx.dispersion(G, "u")
|
| 56 |
+
disp_uv = nx.dispersion(G, "u", "h")
|
| 57 |
+
assert len(disp) == len(G)
|
| 58 |
+
assert len(disp_Gu) == len(G) - 1
|
| 59 |
+
assert isinstance(disp_uv, float)
|
| 60 |
+
|
| 61 |
+
def test_dispersion_v_only(self):
|
| 62 |
+
G = small_ego_G()
|
| 63 |
+
disp_G_h = nx.dispersion(G, v="h", normalized=False)
|
| 64 |
+
disp_G_h_normalized = nx.dispersion(G, v="h", normalized=True)
|
| 65 |
+
assert disp_G_h == {"c": 0, "f": 0, "j": 0, "k": 0, "u": 4}
|
| 66 |
+
assert disp_G_h_normalized == {"c": 0.0, "f": 0.0, "j": 0.0, "k": 0.0, "u": 1.0}
|
| 67 |
+
|
| 68 |
+
def test_impossible_things(self):
|
| 69 |
+
G = nx.karate_club_graph()
|
| 70 |
+
disp = nx.dispersion(G)
|
| 71 |
+
for u in disp:
|
| 72 |
+
for v in disp[u]:
|
| 73 |
+
assert disp[u][v] >= 0
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
|
| 3 |
+
import pytest
|
| 4 |
+
|
| 5 |
+
np = pytest.importorskip("numpy")
|
| 6 |
+
pytest.importorskip("scipy")
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
import networkx as nx
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class TestEigenvectorCentrality:
|
| 13 |
+
def test_K5(self):
|
| 14 |
+
"""Eigenvector centrality: K5"""
|
| 15 |
+
G = nx.complete_graph(5)
|
| 16 |
+
b = nx.eigenvector_centrality(G)
|
| 17 |
+
v = math.sqrt(1 / 5.0)
|
| 18 |
+
b_answer = dict.fromkeys(G, v)
|
| 19 |
+
for n in sorted(G):
|
| 20 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 21 |
+
nstart = {n: 1 for n in G}
|
| 22 |
+
b = nx.eigenvector_centrality(G, nstart=nstart)
|
| 23 |
+
for n in sorted(G):
|
| 24 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
|
| 25 |
+
|
| 26 |
+
b = nx.eigenvector_centrality_numpy(G)
|
| 27 |
+
for n in sorted(G):
|
| 28 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
|
| 29 |
+
|
| 30 |
+
def test_P3(self):
|
| 31 |
+
"""Eigenvector centrality: P3"""
|
| 32 |
+
G = nx.path_graph(3)
|
| 33 |
+
b_answer = {0: 0.5, 1: 0.7071, 2: 0.5}
|
| 34 |
+
b = nx.eigenvector_centrality_numpy(G)
|
| 35 |
+
for n in sorted(G):
|
| 36 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-4)
|
| 37 |
+
b = nx.eigenvector_centrality(G)
|
| 38 |
+
for n in sorted(G):
|
| 39 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-4)
|
| 40 |
+
|
| 41 |
+
def test_P3_unweighted(self):
|
| 42 |
+
"""Eigenvector centrality: P3"""
|
| 43 |
+
G = nx.path_graph(3)
|
| 44 |
+
b_answer = {0: 0.5, 1: 0.7071, 2: 0.5}
|
| 45 |
+
b = nx.eigenvector_centrality_numpy(G, weight=None)
|
| 46 |
+
for n in sorted(G):
|
| 47 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-4)
|
| 48 |
+
|
| 49 |
+
def test_maxiter(self):
|
| 50 |
+
with pytest.raises(nx.PowerIterationFailedConvergence):
|
| 51 |
+
G = nx.path_graph(3)
|
| 52 |
+
nx.eigenvector_centrality(G, max_iter=0)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class TestEigenvectorCentralityDirected:
|
| 56 |
+
@classmethod
|
| 57 |
+
def setup_class(cls):
|
| 58 |
+
G = nx.DiGraph()
|
| 59 |
+
|
| 60 |
+
edges = [
|
| 61 |
+
(1, 2),
|
| 62 |
+
(1, 3),
|
| 63 |
+
(2, 4),
|
| 64 |
+
(3, 2),
|
| 65 |
+
(3, 5),
|
| 66 |
+
(4, 2),
|
| 67 |
+
(4, 5),
|
| 68 |
+
(4, 6),
|
| 69 |
+
(5, 6),
|
| 70 |
+
(5, 7),
|
| 71 |
+
(5, 8),
|
| 72 |
+
(6, 8),
|
| 73 |
+
(7, 1),
|
| 74 |
+
(7, 5),
|
| 75 |
+
(7, 8),
|
| 76 |
+
(8, 6),
|
| 77 |
+
(8, 7),
|
| 78 |
+
]
|
| 79 |
+
|
| 80 |
+
G.add_edges_from(edges, weight=2.0)
|
| 81 |
+
cls.G = G.reverse()
|
| 82 |
+
cls.G.evc = [
|
| 83 |
+
0.25368793,
|
| 84 |
+
0.19576478,
|
| 85 |
+
0.32817092,
|
| 86 |
+
0.40430835,
|
| 87 |
+
0.48199885,
|
| 88 |
+
0.15724483,
|
| 89 |
+
0.51346196,
|
| 90 |
+
0.32475403,
|
| 91 |
+
]
|
| 92 |
+
|
| 93 |
+
H = nx.DiGraph()
|
| 94 |
+
|
| 95 |
+
edges = [
|
| 96 |
+
(1, 2),
|
| 97 |
+
(1, 3),
|
| 98 |
+
(2, 4),
|
| 99 |
+
(3, 2),
|
| 100 |
+
(3, 5),
|
| 101 |
+
(4, 2),
|
| 102 |
+
(4, 5),
|
| 103 |
+
(4, 6),
|
| 104 |
+
(5, 6),
|
| 105 |
+
(5, 7),
|
| 106 |
+
(5, 8),
|
| 107 |
+
(6, 8),
|
| 108 |
+
(7, 1),
|
| 109 |
+
(7, 5),
|
| 110 |
+
(7, 8),
|
| 111 |
+
(8, 6),
|
| 112 |
+
(8, 7),
|
| 113 |
+
]
|
| 114 |
+
|
| 115 |
+
G.add_edges_from(edges)
|
| 116 |
+
cls.H = G.reverse()
|
| 117 |
+
cls.H.evc = [
|
| 118 |
+
0.25368793,
|
| 119 |
+
0.19576478,
|
| 120 |
+
0.32817092,
|
| 121 |
+
0.40430835,
|
| 122 |
+
0.48199885,
|
| 123 |
+
0.15724483,
|
| 124 |
+
0.51346196,
|
| 125 |
+
0.32475403,
|
| 126 |
+
]
|
| 127 |
+
|
| 128 |
+
def test_eigenvector_centrality_weighted(self):
|
| 129 |
+
G = self.G
|
| 130 |
+
p = nx.eigenvector_centrality(G)
|
| 131 |
+
for a, b in zip(list(p.values()), self.G.evc):
|
| 132 |
+
assert a == pytest.approx(b, abs=1e-4)
|
| 133 |
+
|
| 134 |
+
def test_eigenvector_centrality_weighted_numpy(self):
|
| 135 |
+
G = self.G
|
| 136 |
+
p = nx.eigenvector_centrality_numpy(G)
|
| 137 |
+
for a, b in zip(list(p.values()), self.G.evc):
|
| 138 |
+
assert a == pytest.approx(b, abs=1e-7)
|
| 139 |
+
|
| 140 |
+
def test_eigenvector_centrality_unweighted(self):
|
| 141 |
+
G = self.H
|
| 142 |
+
p = nx.eigenvector_centrality(G)
|
| 143 |
+
for a, b in zip(list(p.values()), self.G.evc):
|
| 144 |
+
assert a == pytest.approx(b, abs=1e-4)
|
| 145 |
+
|
| 146 |
+
def test_eigenvector_centrality_unweighted_numpy(self):
|
| 147 |
+
G = self.H
|
| 148 |
+
p = nx.eigenvector_centrality_numpy(G)
|
| 149 |
+
for a, b in zip(list(p.values()), self.G.evc):
|
| 150 |
+
assert a == pytest.approx(b, abs=1e-7)
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
class TestEigenvectorCentralityExceptions:
|
| 154 |
+
def test_multigraph(self):
|
| 155 |
+
with pytest.raises(nx.NetworkXException):
|
| 156 |
+
nx.eigenvector_centrality(nx.MultiGraph())
|
| 157 |
+
|
| 158 |
+
def test_multigraph_numpy(self):
|
| 159 |
+
with pytest.raises(nx.NetworkXException):
|
| 160 |
+
nx.eigenvector_centrality_numpy(nx.MultiGraph())
|
| 161 |
+
|
| 162 |
+
def test_empty(self):
|
| 163 |
+
with pytest.raises(nx.NetworkXException):
|
| 164 |
+
nx.eigenvector_centrality(nx.Graph())
|
| 165 |
+
|
| 166 |
+
def test_empty_numpy(self):
|
| 167 |
+
with pytest.raises(nx.NetworkXException):
|
| 168 |
+
nx.eigenvector_centrality_numpy(nx.Graph())
|
| 169 |
+
|
| 170 |
+
def test_zero_nstart(self):
|
| 171 |
+
G = nx.Graph([(1, 2), (1, 3), (2, 3)])
|
| 172 |
+
with pytest.raises(
|
| 173 |
+
nx.NetworkXException, match="initial vector cannot have all zero values"
|
| 174 |
+
):
|
| 175 |
+
nx.eigenvector_centrality(G, nstart={v: 0 for v in G})
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/test_harmonic_centrality.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Tests for degree centrality.
|
| 3 |
+
"""
|
| 4 |
+
import pytest
|
| 5 |
+
|
| 6 |
+
import networkx as nx
|
| 7 |
+
from networkx.algorithms.centrality import harmonic_centrality
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class TestClosenessCentrality:
|
| 11 |
+
@classmethod
|
| 12 |
+
def setup_class(cls):
|
| 13 |
+
cls.P3 = nx.path_graph(3)
|
| 14 |
+
cls.P4 = nx.path_graph(4)
|
| 15 |
+
cls.K5 = nx.complete_graph(5)
|
| 16 |
+
|
| 17 |
+
cls.C4 = nx.cycle_graph(4)
|
| 18 |
+
cls.C4_directed = nx.cycle_graph(4, create_using=nx.DiGraph)
|
| 19 |
+
|
| 20 |
+
cls.C5 = nx.cycle_graph(5)
|
| 21 |
+
|
| 22 |
+
cls.T = nx.balanced_tree(r=2, h=2)
|
| 23 |
+
|
| 24 |
+
cls.Gb = nx.DiGraph()
|
| 25 |
+
cls.Gb.add_edges_from([(0, 1), (0, 2), (0, 4), (2, 1), (2, 3), (4, 3)])
|
| 26 |
+
|
| 27 |
+
def test_p3_harmonic(self):
|
| 28 |
+
c = harmonic_centrality(self.P3)
|
| 29 |
+
d = {0: 1.5, 1: 2, 2: 1.5}
|
| 30 |
+
for n in sorted(self.P3):
|
| 31 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 32 |
+
|
| 33 |
+
def test_p4_harmonic(self):
|
| 34 |
+
c = harmonic_centrality(self.P4)
|
| 35 |
+
d = {0: 1.8333333, 1: 2.5, 2: 2.5, 3: 1.8333333}
|
| 36 |
+
for n in sorted(self.P4):
|
| 37 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 38 |
+
|
| 39 |
+
def test_clique_complete(self):
|
| 40 |
+
c = harmonic_centrality(self.K5)
|
| 41 |
+
d = {0: 4, 1: 4, 2: 4, 3: 4, 4: 4}
|
| 42 |
+
for n in sorted(self.P3):
|
| 43 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 44 |
+
|
| 45 |
+
def test_cycle_C4(self):
|
| 46 |
+
c = harmonic_centrality(self.C4)
|
| 47 |
+
d = {0: 2.5, 1: 2.5, 2: 2.5, 3: 2.5}
|
| 48 |
+
for n in sorted(self.C4):
|
| 49 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 50 |
+
|
| 51 |
+
def test_cycle_C5(self):
|
| 52 |
+
c = harmonic_centrality(self.C5)
|
| 53 |
+
d = {0: 3, 1: 3, 2: 3, 3: 3, 4: 3, 5: 4}
|
| 54 |
+
for n in sorted(self.C5):
|
| 55 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 56 |
+
|
| 57 |
+
def test_bal_tree(self):
|
| 58 |
+
c = harmonic_centrality(self.T)
|
| 59 |
+
d = {0: 4.0, 1: 4.1666, 2: 4.1666, 3: 2.8333, 4: 2.8333, 5: 2.8333, 6: 2.8333}
|
| 60 |
+
for n in sorted(self.T):
|
| 61 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 62 |
+
|
| 63 |
+
def test_exampleGraph(self):
|
| 64 |
+
c = harmonic_centrality(self.Gb)
|
| 65 |
+
d = {0: 0, 1: 2, 2: 1, 3: 2.5, 4: 1}
|
| 66 |
+
for n in sorted(self.Gb):
|
| 67 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 68 |
+
|
| 69 |
+
def test_weighted_harmonic(self):
|
| 70 |
+
XG = nx.DiGraph()
|
| 71 |
+
XG.add_weighted_edges_from(
|
| 72 |
+
[
|
| 73 |
+
("a", "b", 10),
|
| 74 |
+
("d", "c", 5),
|
| 75 |
+
("a", "c", 1),
|
| 76 |
+
("e", "f", 2),
|
| 77 |
+
("f", "c", 1),
|
| 78 |
+
("a", "f", 3),
|
| 79 |
+
]
|
| 80 |
+
)
|
| 81 |
+
c = harmonic_centrality(XG, distance="weight")
|
| 82 |
+
d = {"a": 0, "b": 0.1, "c": 2.533, "d": 0, "e": 0, "f": 0.83333}
|
| 83 |
+
for n in sorted(XG):
|
| 84 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 85 |
+
|
| 86 |
+
def test_empty(self):
|
| 87 |
+
G = nx.DiGraph()
|
| 88 |
+
c = harmonic_centrality(G, distance="weight")
|
| 89 |
+
d = {}
|
| 90 |
+
assert c == d
|
| 91 |
+
|
| 92 |
+
def test_singleton(self):
|
| 93 |
+
G = nx.DiGraph()
|
| 94 |
+
G.add_node(0)
|
| 95 |
+
c = harmonic_centrality(G, distance="weight")
|
| 96 |
+
d = {0: 0}
|
| 97 |
+
assert c == d
|
| 98 |
+
|
| 99 |
+
def test_cycle_c4_directed(self):
|
| 100 |
+
c = harmonic_centrality(self.C4_directed, nbunch=[0, 1], sources=[1, 2])
|
| 101 |
+
d = {0: 0.833, 1: 0.333}
|
| 102 |
+
for n in [0, 1]:
|
| 103 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 104 |
+
|
| 105 |
+
def test_p3_harmonic_subset(self):
|
| 106 |
+
c = harmonic_centrality(self.P3, sources=[0, 1])
|
| 107 |
+
d = {0: 1, 1: 1, 2: 1.5}
|
| 108 |
+
for n in self.P3:
|
| 109 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 110 |
+
|
| 111 |
+
def test_p4_harmonic_subset(self):
|
| 112 |
+
c = harmonic_centrality(self.P4, nbunch=[2, 3], sources=[0, 1])
|
| 113 |
+
d = {2: 1.5, 3: 0.8333333}
|
| 114 |
+
for n in [2, 3]:
|
| 115 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/test_laplacian_centrality.py
ADDED
|
@@ -0,0 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
import networkx as nx
|
| 4 |
+
|
| 5 |
+
np = pytest.importorskip("numpy")
|
| 6 |
+
sp = pytest.importorskip("scipy")
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def test_laplacian_centrality_null_graph():
|
| 10 |
+
G = nx.Graph()
|
| 11 |
+
with pytest.raises(nx.NetworkXPointlessConcept):
|
| 12 |
+
d = nx.laplacian_centrality(G, normalized=False)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def test_laplacian_centrality_single_node():
|
| 16 |
+
"""See gh-6571"""
|
| 17 |
+
G = nx.empty_graph(1)
|
| 18 |
+
assert nx.laplacian_centrality(G, normalized=False) == {0: 0}
|
| 19 |
+
with pytest.raises(ZeroDivisionError):
|
| 20 |
+
nx.laplacian_centrality(G, normalized=True)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def test_laplacian_centrality_unconnected_nodes():
|
| 24 |
+
"""laplacian_centrality on a unconnected node graph should return 0
|
| 25 |
+
|
| 26 |
+
For graphs without edges, the Laplacian energy is 0 and is unchanged with
|
| 27 |
+
node removal, so::
|
| 28 |
+
|
| 29 |
+
LC(v) = LE(G) - LE(G - v) = 0 - 0 = 0
|
| 30 |
+
"""
|
| 31 |
+
G = nx.empty_graph(3)
|
| 32 |
+
assert nx.laplacian_centrality(G, normalized=False) == {0: 0, 1: 0, 2: 0}
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def test_laplacian_centrality_empty_graph():
|
| 36 |
+
G = nx.empty_graph(3)
|
| 37 |
+
with pytest.raises(ZeroDivisionError):
|
| 38 |
+
d = nx.laplacian_centrality(G, normalized=True)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def test_laplacian_centrality_E():
|
| 42 |
+
E = nx.Graph()
|
| 43 |
+
E.add_weighted_edges_from(
|
| 44 |
+
[(0, 1, 4), (4, 5, 1), (0, 2, 2), (2, 1, 1), (1, 3, 2), (1, 4, 2)]
|
| 45 |
+
)
|
| 46 |
+
d = nx.laplacian_centrality(E)
|
| 47 |
+
exact = {
|
| 48 |
+
0: 0.700000,
|
| 49 |
+
1: 0.900000,
|
| 50 |
+
2: 0.280000,
|
| 51 |
+
3: 0.220000,
|
| 52 |
+
4: 0.260000,
|
| 53 |
+
5: 0.040000,
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
for n, dc in d.items():
|
| 57 |
+
assert exact[n] == pytest.approx(dc, abs=1e-7)
|
| 58 |
+
|
| 59 |
+
# Check not normalized
|
| 60 |
+
full_energy = 200
|
| 61 |
+
dnn = nx.laplacian_centrality(E, normalized=False)
|
| 62 |
+
for n, dc in dnn.items():
|
| 63 |
+
assert exact[n] * full_energy == pytest.approx(dc, abs=1e-7)
|
| 64 |
+
|
| 65 |
+
# Check unweighted not-normalized version
|
| 66 |
+
duw_nn = nx.laplacian_centrality(E, normalized=False, weight=None)
|
| 67 |
+
print(duw_nn)
|
| 68 |
+
exact_uw_nn = {
|
| 69 |
+
0: 18,
|
| 70 |
+
1: 34,
|
| 71 |
+
2: 18,
|
| 72 |
+
3: 10,
|
| 73 |
+
4: 16,
|
| 74 |
+
5: 6,
|
| 75 |
+
}
|
| 76 |
+
for n, dc in duw_nn.items():
|
| 77 |
+
assert exact_uw_nn[n] == pytest.approx(dc, abs=1e-7)
|
| 78 |
+
|
| 79 |
+
# Check unweighted version
|
| 80 |
+
duw = nx.laplacian_centrality(E, weight=None)
|
| 81 |
+
full_energy = 42
|
| 82 |
+
for n, dc in duw.items():
|
| 83 |
+
assert exact_uw_nn[n] / full_energy == pytest.approx(dc, abs=1e-7)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def test_laplacian_centrality_KC():
|
| 87 |
+
KC = nx.karate_club_graph()
|
| 88 |
+
d = nx.laplacian_centrality(KC)
|
| 89 |
+
exact = {
|
| 90 |
+
0: 0.2543593,
|
| 91 |
+
1: 0.1724524,
|
| 92 |
+
2: 0.2166053,
|
| 93 |
+
3: 0.0964646,
|
| 94 |
+
4: 0.0350344,
|
| 95 |
+
5: 0.0571109,
|
| 96 |
+
6: 0.0540713,
|
| 97 |
+
7: 0.0788674,
|
| 98 |
+
8: 0.1222204,
|
| 99 |
+
9: 0.0217565,
|
| 100 |
+
10: 0.0308751,
|
| 101 |
+
11: 0.0215965,
|
| 102 |
+
12: 0.0174372,
|
| 103 |
+
13: 0.118861,
|
| 104 |
+
14: 0.0366341,
|
| 105 |
+
15: 0.0548712,
|
| 106 |
+
16: 0.0172772,
|
| 107 |
+
17: 0.0191969,
|
| 108 |
+
18: 0.0225564,
|
| 109 |
+
19: 0.0331147,
|
| 110 |
+
20: 0.0279955,
|
| 111 |
+
21: 0.0246361,
|
| 112 |
+
22: 0.0382339,
|
| 113 |
+
23: 0.1294193,
|
| 114 |
+
24: 0.0227164,
|
| 115 |
+
25: 0.0644697,
|
| 116 |
+
26: 0.0281555,
|
| 117 |
+
27: 0.075188,
|
| 118 |
+
28: 0.0364742,
|
| 119 |
+
29: 0.0707087,
|
| 120 |
+
30: 0.0708687,
|
| 121 |
+
31: 0.131019,
|
| 122 |
+
32: 0.2370821,
|
| 123 |
+
33: 0.3066709,
|
| 124 |
+
}
|
| 125 |
+
for n, dc in d.items():
|
| 126 |
+
assert exact[n] == pytest.approx(dc, abs=1e-7)
|
| 127 |
+
|
| 128 |
+
# Check not normalized
|
| 129 |
+
full_energy = 12502
|
| 130 |
+
dnn = nx.laplacian_centrality(KC, normalized=False)
|
| 131 |
+
for n, dc in dnn.items():
|
| 132 |
+
assert exact[n] * full_energy == pytest.approx(dc, abs=1e-3)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def test_laplacian_centrality_K():
|
| 136 |
+
K = nx.krackhardt_kite_graph()
|
| 137 |
+
d = nx.laplacian_centrality(K)
|
| 138 |
+
exact = {
|
| 139 |
+
0: 0.3010753,
|
| 140 |
+
1: 0.3010753,
|
| 141 |
+
2: 0.2258065,
|
| 142 |
+
3: 0.483871,
|
| 143 |
+
4: 0.2258065,
|
| 144 |
+
5: 0.3870968,
|
| 145 |
+
6: 0.3870968,
|
| 146 |
+
7: 0.1935484,
|
| 147 |
+
8: 0.0752688,
|
| 148 |
+
9: 0.0322581,
|
| 149 |
+
}
|
| 150 |
+
for n, dc in d.items():
|
| 151 |
+
assert exact[n] == pytest.approx(dc, abs=1e-7)
|
| 152 |
+
|
| 153 |
+
# Check not normalized
|
| 154 |
+
full_energy = 186
|
| 155 |
+
dnn = nx.laplacian_centrality(K, normalized=False)
|
| 156 |
+
for n, dc in dnn.items():
|
| 157 |
+
assert exact[n] * full_energy == pytest.approx(dc, abs=1e-3)
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def test_laplacian_centrality_P3():
|
| 161 |
+
P3 = nx.path_graph(3)
|
| 162 |
+
d = nx.laplacian_centrality(P3)
|
| 163 |
+
exact = {0: 0.6, 1: 1.0, 2: 0.6}
|
| 164 |
+
for n, dc in d.items():
|
| 165 |
+
assert exact[n] == pytest.approx(dc, abs=1e-7)
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def test_laplacian_centrality_K5():
|
| 169 |
+
K5 = nx.complete_graph(5)
|
| 170 |
+
d = nx.laplacian_centrality(K5)
|
| 171 |
+
exact = {0: 0.52, 1: 0.52, 2: 0.52, 3: 0.52, 4: 0.52}
|
| 172 |
+
for n, dc in d.items():
|
| 173 |
+
assert exact[n] == pytest.approx(dc, abs=1e-7)
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def test_laplacian_centrality_FF():
|
| 177 |
+
FF = nx.florentine_families_graph()
|
| 178 |
+
d = nx.laplacian_centrality(FF)
|
| 179 |
+
exact = {
|
| 180 |
+
"Acciaiuoli": 0.0804598,
|
| 181 |
+
"Medici": 0.4022989,
|
| 182 |
+
"Castellani": 0.1724138,
|
| 183 |
+
"Peruzzi": 0.183908,
|
| 184 |
+
"Strozzi": 0.2528736,
|
| 185 |
+
"Barbadori": 0.137931,
|
| 186 |
+
"Ridolfi": 0.2183908,
|
| 187 |
+
"Tornabuoni": 0.2183908,
|
| 188 |
+
"Albizzi": 0.1954023,
|
| 189 |
+
"Salviati": 0.1149425,
|
| 190 |
+
"Pazzi": 0.0344828,
|
| 191 |
+
"Bischeri": 0.1954023,
|
| 192 |
+
"Guadagni": 0.2298851,
|
| 193 |
+
"Ginori": 0.045977,
|
| 194 |
+
"Lamberteschi": 0.0574713,
|
| 195 |
+
}
|
| 196 |
+
for n, dc in d.items():
|
| 197 |
+
assert exact[n] == pytest.approx(dc, abs=1e-7)
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def test_laplacian_centrality_DG():
|
| 201 |
+
DG = nx.DiGraph([(0, 5), (1, 5), (2, 5), (3, 5), (4, 5), (5, 6), (5, 7), (5, 8)])
|
| 202 |
+
d = nx.laplacian_centrality(DG)
|
| 203 |
+
exact = {
|
| 204 |
+
0: 0.2123352,
|
| 205 |
+
5: 0.515391,
|
| 206 |
+
1: 0.2123352,
|
| 207 |
+
2: 0.2123352,
|
| 208 |
+
3: 0.2123352,
|
| 209 |
+
4: 0.2123352,
|
| 210 |
+
6: 0.2952031,
|
| 211 |
+
7: 0.2952031,
|
| 212 |
+
8: 0.2952031,
|
| 213 |
+
}
|
| 214 |
+
for n, dc in d.items():
|
| 215 |
+
assert exact[n] == pytest.approx(dc, abs=1e-7)
|
| 216 |
+
|
| 217 |
+
# Check not normalized
|
| 218 |
+
full_energy = 9.50704
|
| 219 |
+
dnn = nx.laplacian_centrality(DG, normalized=False)
|
| 220 |
+
for n, dc in dnn.items():
|
| 221 |
+
assert exact[n] * full_energy == pytest.approx(dc, abs=1e-4)
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/test_load_centrality.py
ADDED
|
@@ -0,0 +1,344 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
import networkx as nx
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class TestLoadCentrality:
|
| 7 |
+
@classmethod
|
| 8 |
+
def setup_class(cls):
|
| 9 |
+
G = nx.Graph()
|
| 10 |
+
G.add_edge(0, 1, weight=3)
|
| 11 |
+
G.add_edge(0, 2, weight=2)
|
| 12 |
+
G.add_edge(0, 3, weight=6)
|
| 13 |
+
G.add_edge(0, 4, weight=4)
|
| 14 |
+
G.add_edge(1, 3, weight=5)
|
| 15 |
+
G.add_edge(1, 5, weight=5)
|
| 16 |
+
G.add_edge(2, 4, weight=1)
|
| 17 |
+
G.add_edge(3, 4, weight=2)
|
| 18 |
+
G.add_edge(3, 5, weight=1)
|
| 19 |
+
G.add_edge(4, 5, weight=4)
|
| 20 |
+
cls.G = G
|
| 21 |
+
cls.exact_weighted = {0: 4.0, 1: 0.0, 2: 8.0, 3: 6.0, 4: 8.0, 5: 0.0}
|
| 22 |
+
cls.K = nx.krackhardt_kite_graph()
|
| 23 |
+
cls.P3 = nx.path_graph(3)
|
| 24 |
+
cls.P4 = nx.path_graph(4)
|
| 25 |
+
cls.K5 = nx.complete_graph(5)
|
| 26 |
+
cls.P2 = nx.path_graph(2)
|
| 27 |
+
|
| 28 |
+
cls.C4 = nx.cycle_graph(4)
|
| 29 |
+
cls.T = nx.balanced_tree(r=2, h=2)
|
| 30 |
+
cls.Gb = nx.Graph()
|
| 31 |
+
cls.Gb.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)])
|
| 32 |
+
cls.F = nx.florentine_families_graph()
|
| 33 |
+
cls.LM = nx.les_miserables_graph()
|
| 34 |
+
cls.D = nx.cycle_graph(3, create_using=nx.DiGraph())
|
| 35 |
+
cls.D.add_edges_from([(3, 0), (4, 3)])
|
| 36 |
+
|
| 37 |
+
def test_not_strongly_connected(self):
|
| 38 |
+
b = nx.load_centrality(self.D)
|
| 39 |
+
result = {0: 5.0 / 12, 1: 1.0 / 4, 2: 1.0 / 12, 3: 1.0 / 4, 4: 0.000}
|
| 40 |
+
for n in sorted(self.D):
|
| 41 |
+
assert result[n] == pytest.approx(b[n], abs=1e-3)
|
| 42 |
+
assert result[n] == pytest.approx(nx.load_centrality(self.D, n), abs=1e-3)
|
| 43 |
+
|
| 44 |
+
def test_P2_normalized_load(self):
|
| 45 |
+
G = self.P2
|
| 46 |
+
c = nx.load_centrality(G, normalized=True)
|
| 47 |
+
d = {0: 0.000, 1: 0.000}
|
| 48 |
+
for n in sorted(G):
|
| 49 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 50 |
+
|
| 51 |
+
def test_weighted_load(self):
|
| 52 |
+
b = nx.load_centrality(self.G, weight="weight", normalized=False)
|
| 53 |
+
for n in sorted(self.G):
|
| 54 |
+
assert b[n] == self.exact_weighted[n]
|
| 55 |
+
|
| 56 |
+
def test_k5_load(self):
|
| 57 |
+
G = self.K5
|
| 58 |
+
c = nx.load_centrality(G)
|
| 59 |
+
d = {0: 0.000, 1: 0.000, 2: 0.000, 3: 0.000, 4: 0.000}
|
| 60 |
+
for n in sorted(G):
|
| 61 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 62 |
+
|
| 63 |
+
def test_p3_load(self):
|
| 64 |
+
G = self.P3
|
| 65 |
+
c = nx.load_centrality(G)
|
| 66 |
+
d = {0: 0.000, 1: 1.000, 2: 0.000}
|
| 67 |
+
for n in sorted(G):
|
| 68 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 69 |
+
c = nx.load_centrality(G, v=1)
|
| 70 |
+
assert c == pytest.approx(1.0, abs=1e-7)
|
| 71 |
+
c = nx.load_centrality(G, v=1, normalized=True)
|
| 72 |
+
assert c == pytest.approx(1.0, abs=1e-7)
|
| 73 |
+
|
| 74 |
+
def test_p2_load(self):
|
| 75 |
+
G = nx.path_graph(2)
|
| 76 |
+
c = nx.load_centrality(G)
|
| 77 |
+
d = {0: 0.000, 1: 0.000}
|
| 78 |
+
for n in sorted(G):
|
| 79 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 80 |
+
|
| 81 |
+
def test_krackhardt_load(self):
|
| 82 |
+
G = self.K
|
| 83 |
+
c = nx.load_centrality(G)
|
| 84 |
+
d = {
|
| 85 |
+
0: 0.023,
|
| 86 |
+
1: 0.023,
|
| 87 |
+
2: 0.000,
|
| 88 |
+
3: 0.102,
|
| 89 |
+
4: 0.000,
|
| 90 |
+
5: 0.231,
|
| 91 |
+
6: 0.231,
|
| 92 |
+
7: 0.389,
|
| 93 |
+
8: 0.222,
|
| 94 |
+
9: 0.000,
|
| 95 |
+
}
|
| 96 |
+
for n in sorted(G):
|
| 97 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 98 |
+
|
| 99 |
+
def test_florentine_families_load(self):
|
| 100 |
+
G = self.F
|
| 101 |
+
c = nx.load_centrality(G)
|
| 102 |
+
d = {
|
| 103 |
+
"Acciaiuoli": 0.000,
|
| 104 |
+
"Albizzi": 0.211,
|
| 105 |
+
"Barbadori": 0.093,
|
| 106 |
+
"Bischeri": 0.104,
|
| 107 |
+
"Castellani": 0.055,
|
| 108 |
+
"Ginori": 0.000,
|
| 109 |
+
"Guadagni": 0.251,
|
| 110 |
+
"Lamberteschi": 0.000,
|
| 111 |
+
"Medici": 0.522,
|
| 112 |
+
"Pazzi": 0.000,
|
| 113 |
+
"Peruzzi": 0.022,
|
| 114 |
+
"Ridolfi": 0.117,
|
| 115 |
+
"Salviati": 0.143,
|
| 116 |
+
"Strozzi": 0.106,
|
| 117 |
+
"Tornabuoni": 0.090,
|
| 118 |
+
}
|
| 119 |
+
for n in sorted(G):
|
| 120 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 121 |
+
|
| 122 |
+
def test_les_miserables_load(self):
|
| 123 |
+
G = self.LM
|
| 124 |
+
c = nx.load_centrality(G)
|
| 125 |
+
d = {
|
| 126 |
+
"Napoleon": 0.000,
|
| 127 |
+
"Myriel": 0.177,
|
| 128 |
+
"MlleBaptistine": 0.000,
|
| 129 |
+
"MmeMagloire": 0.000,
|
| 130 |
+
"CountessDeLo": 0.000,
|
| 131 |
+
"Geborand": 0.000,
|
| 132 |
+
"Champtercier": 0.000,
|
| 133 |
+
"Cravatte": 0.000,
|
| 134 |
+
"Count": 0.000,
|
| 135 |
+
"OldMan": 0.000,
|
| 136 |
+
"Valjean": 0.567,
|
| 137 |
+
"Labarre": 0.000,
|
| 138 |
+
"Marguerite": 0.000,
|
| 139 |
+
"MmeDeR": 0.000,
|
| 140 |
+
"Isabeau": 0.000,
|
| 141 |
+
"Gervais": 0.000,
|
| 142 |
+
"Listolier": 0.000,
|
| 143 |
+
"Tholomyes": 0.043,
|
| 144 |
+
"Fameuil": 0.000,
|
| 145 |
+
"Blacheville": 0.000,
|
| 146 |
+
"Favourite": 0.000,
|
| 147 |
+
"Dahlia": 0.000,
|
| 148 |
+
"Zephine": 0.000,
|
| 149 |
+
"Fantine": 0.128,
|
| 150 |
+
"MmeThenardier": 0.029,
|
| 151 |
+
"Thenardier": 0.075,
|
| 152 |
+
"Cosette": 0.024,
|
| 153 |
+
"Javert": 0.054,
|
| 154 |
+
"Fauchelevent": 0.026,
|
| 155 |
+
"Bamatabois": 0.008,
|
| 156 |
+
"Perpetue": 0.000,
|
| 157 |
+
"Simplice": 0.009,
|
| 158 |
+
"Scaufflaire": 0.000,
|
| 159 |
+
"Woman1": 0.000,
|
| 160 |
+
"Judge": 0.000,
|
| 161 |
+
"Champmathieu": 0.000,
|
| 162 |
+
"Brevet": 0.000,
|
| 163 |
+
"Chenildieu": 0.000,
|
| 164 |
+
"Cochepaille": 0.000,
|
| 165 |
+
"Pontmercy": 0.007,
|
| 166 |
+
"Boulatruelle": 0.000,
|
| 167 |
+
"Eponine": 0.012,
|
| 168 |
+
"Anzelma": 0.000,
|
| 169 |
+
"Woman2": 0.000,
|
| 170 |
+
"MotherInnocent": 0.000,
|
| 171 |
+
"Gribier": 0.000,
|
| 172 |
+
"MmeBurgon": 0.026,
|
| 173 |
+
"Jondrette": 0.000,
|
| 174 |
+
"Gavroche": 0.164,
|
| 175 |
+
"Gillenormand": 0.021,
|
| 176 |
+
"Magnon": 0.000,
|
| 177 |
+
"MlleGillenormand": 0.047,
|
| 178 |
+
"MmePontmercy": 0.000,
|
| 179 |
+
"MlleVaubois": 0.000,
|
| 180 |
+
"LtGillenormand": 0.000,
|
| 181 |
+
"Marius": 0.133,
|
| 182 |
+
"BaronessT": 0.000,
|
| 183 |
+
"Mabeuf": 0.028,
|
| 184 |
+
"Enjolras": 0.041,
|
| 185 |
+
"Combeferre": 0.001,
|
| 186 |
+
"Prouvaire": 0.000,
|
| 187 |
+
"Feuilly": 0.001,
|
| 188 |
+
"Courfeyrac": 0.006,
|
| 189 |
+
"Bahorel": 0.002,
|
| 190 |
+
"Bossuet": 0.032,
|
| 191 |
+
"Joly": 0.002,
|
| 192 |
+
"Grantaire": 0.000,
|
| 193 |
+
"MotherPlutarch": 0.000,
|
| 194 |
+
"Gueulemer": 0.005,
|
| 195 |
+
"Babet": 0.005,
|
| 196 |
+
"Claquesous": 0.005,
|
| 197 |
+
"Montparnasse": 0.004,
|
| 198 |
+
"Toussaint": 0.000,
|
| 199 |
+
"Child1": 0.000,
|
| 200 |
+
"Child2": 0.000,
|
| 201 |
+
"Brujon": 0.000,
|
| 202 |
+
"MmeHucheloup": 0.000,
|
| 203 |
+
}
|
| 204 |
+
for n in sorted(G):
|
| 205 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 206 |
+
|
| 207 |
+
def test_unnormalized_k5_load(self):
|
| 208 |
+
G = self.K5
|
| 209 |
+
c = nx.load_centrality(G, normalized=False)
|
| 210 |
+
d = {0: 0.000, 1: 0.000, 2: 0.000, 3: 0.000, 4: 0.000}
|
| 211 |
+
for n in sorted(G):
|
| 212 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 213 |
+
|
| 214 |
+
def test_unnormalized_p3_load(self):
|
| 215 |
+
G = self.P3
|
| 216 |
+
c = nx.load_centrality(G, normalized=False)
|
| 217 |
+
d = {0: 0.000, 1: 2.000, 2: 0.000}
|
| 218 |
+
for n in sorted(G):
|
| 219 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 220 |
+
|
| 221 |
+
def test_unnormalized_krackhardt_load(self):
|
| 222 |
+
G = self.K
|
| 223 |
+
c = nx.load_centrality(G, normalized=False)
|
| 224 |
+
d = {
|
| 225 |
+
0: 1.667,
|
| 226 |
+
1: 1.667,
|
| 227 |
+
2: 0.000,
|
| 228 |
+
3: 7.333,
|
| 229 |
+
4: 0.000,
|
| 230 |
+
5: 16.667,
|
| 231 |
+
6: 16.667,
|
| 232 |
+
7: 28.000,
|
| 233 |
+
8: 16.000,
|
| 234 |
+
9: 0.000,
|
| 235 |
+
}
|
| 236 |
+
|
| 237 |
+
for n in sorted(G):
|
| 238 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 239 |
+
|
| 240 |
+
def test_unnormalized_florentine_families_load(self):
|
| 241 |
+
G = self.F
|
| 242 |
+
c = nx.load_centrality(G, normalized=False)
|
| 243 |
+
|
| 244 |
+
d = {
|
| 245 |
+
"Acciaiuoli": 0.000,
|
| 246 |
+
"Albizzi": 38.333,
|
| 247 |
+
"Barbadori": 17.000,
|
| 248 |
+
"Bischeri": 19.000,
|
| 249 |
+
"Castellani": 10.000,
|
| 250 |
+
"Ginori": 0.000,
|
| 251 |
+
"Guadagni": 45.667,
|
| 252 |
+
"Lamberteschi": 0.000,
|
| 253 |
+
"Medici": 95.000,
|
| 254 |
+
"Pazzi": 0.000,
|
| 255 |
+
"Peruzzi": 4.000,
|
| 256 |
+
"Ridolfi": 21.333,
|
| 257 |
+
"Salviati": 26.000,
|
| 258 |
+
"Strozzi": 19.333,
|
| 259 |
+
"Tornabuoni": 16.333,
|
| 260 |
+
}
|
| 261 |
+
for n in sorted(G):
|
| 262 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 263 |
+
|
| 264 |
+
def test_load_betweenness_difference(self):
|
| 265 |
+
# Difference Between Load and Betweenness
|
| 266 |
+
# --------------------------------------- The smallest graph
|
| 267 |
+
# that shows the difference between load and betweenness is
|
| 268 |
+
# G=ladder_graph(3) (Graph B below)
|
| 269 |
+
|
| 270 |
+
# Graph A and B are from Tao Zhou, Jian-Guo Liu, Bing-Hong
|
| 271 |
+
# Wang: Comment on "Scientific collaboration
|
| 272 |
+
# networks. II. Shortest paths, weighted networks, and
|
| 273 |
+
# centrality". https://arxiv.org/pdf/physics/0511084
|
| 274 |
+
|
| 275 |
+
# Notice that unlike here, their calculation adds to 1 to the
|
| 276 |
+
# betweenness of every node i for every path from i to every
|
| 277 |
+
# other node. This is exactly what it should be, based on
|
| 278 |
+
# Eqn. (1) in their paper: the eqn is B(v) = \sum_{s\neq t,
|
| 279 |
+
# s\neq v}{\frac{\sigma_{st}(v)}{\sigma_{st}}}, therefore,
|
| 280 |
+
# they allow v to be the target node.
|
| 281 |
+
|
| 282 |
+
# We follow Brandes 2001, who follows Freeman 1977 that make
|
| 283 |
+
# the sum for betweenness of v exclude paths where v is either
|
| 284 |
+
# the source or target node. To agree with their numbers, we
|
| 285 |
+
# must additionally, remove edge (4,8) from the graph, see AC
|
| 286 |
+
# example following (there is a mistake in the figure in their
|
| 287 |
+
# paper - personal communication).
|
| 288 |
+
|
| 289 |
+
# A = nx.Graph()
|
| 290 |
+
# A.add_edges_from([(0,1), (1,2), (1,3), (2,4),
|
| 291 |
+
# (3,5), (4,6), (4,7), (4,8),
|
| 292 |
+
# (5,8), (6,9), (7,9), (8,9)])
|
| 293 |
+
B = nx.Graph() # ladder_graph(3)
|
| 294 |
+
B.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)])
|
| 295 |
+
c = nx.load_centrality(B, normalized=False)
|
| 296 |
+
d = {0: 1.750, 1: 1.750, 2: 6.500, 3: 6.500, 4: 1.750, 5: 1.750}
|
| 297 |
+
for n in sorted(B):
|
| 298 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 299 |
+
|
| 300 |
+
def test_c4_edge_load(self):
|
| 301 |
+
G = self.C4
|
| 302 |
+
c = nx.edge_load_centrality(G)
|
| 303 |
+
d = {(0, 1): 6.000, (0, 3): 6.000, (1, 2): 6.000, (2, 3): 6.000}
|
| 304 |
+
for n in G.edges():
|
| 305 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 306 |
+
|
| 307 |
+
def test_p4_edge_load(self):
|
| 308 |
+
G = self.P4
|
| 309 |
+
c = nx.edge_load_centrality(G)
|
| 310 |
+
d = {(0, 1): 6.000, (1, 2): 8.000, (2, 3): 6.000}
|
| 311 |
+
for n in G.edges():
|
| 312 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 313 |
+
|
| 314 |
+
def test_k5_edge_load(self):
|
| 315 |
+
G = self.K5
|
| 316 |
+
c = nx.edge_load_centrality(G)
|
| 317 |
+
d = {
|
| 318 |
+
(0, 1): 5.000,
|
| 319 |
+
(0, 2): 5.000,
|
| 320 |
+
(0, 3): 5.000,
|
| 321 |
+
(0, 4): 5.000,
|
| 322 |
+
(1, 2): 5.000,
|
| 323 |
+
(1, 3): 5.000,
|
| 324 |
+
(1, 4): 5.000,
|
| 325 |
+
(2, 3): 5.000,
|
| 326 |
+
(2, 4): 5.000,
|
| 327 |
+
(3, 4): 5.000,
|
| 328 |
+
}
|
| 329 |
+
for n in G.edges():
|
| 330 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
| 331 |
+
|
| 332 |
+
def test_tree_edge_load(self):
|
| 333 |
+
G = self.T
|
| 334 |
+
c = nx.edge_load_centrality(G)
|
| 335 |
+
d = {
|
| 336 |
+
(0, 1): 24.000,
|
| 337 |
+
(0, 2): 24.000,
|
| 338 |
+
(1, 3): 12.000,
|
| 339 |
+
(1, 4): 12.000,
|
| 340 |
+
(2, 5): 12.000,
|
| 341 |
+
(2, 6): 12.000,
|
| 342 |
+
}
|
| 343 |
+
for n in G.edges():
|
| 344 |
+
assert c[n] == pytest.approx(d[n], abs=1e-3)
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/test_percolation_centrality.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
import networkx as nx
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def example1a_G():
|
| 7 |
+
G = nx.Graph()
|
| 8 |
+
G.add_node(1, percolation=0.1)
|
| 9 |
+
G.add_node(2, percolation=0.2)
|
| 10 |
+
G.add_node(3, percolation=0.2)
|
| 11 |
+
G.add_node(4, percolation=0.2)
|
| 12 |
+
G.add_node(5, percolation=0.3)
|
| 13 |
+
G.add_node(6, percolation=0.2)
|
| 14 |
+
G.add_node(7, percolation=0.5)
|
| 15 |
+
G.add_node(8, percolation=0.5)
|
| 16 |
+
G.add_edges_from([(1, 4), (2, 4), (3, 4), (4, 5), (5, 6), (6, 7), (6, 8)])
|
| 17 |
+
return G
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def example1b_G():
|
| 21 |
+
G = nx.Graph()
|
| 22 |
+
G.add_node(1, percolation=0.3)
|
| 23 |
+
G.add_node(2, percolation=0.5)
|
| 24 |
+
G.add_node(3, percolation=0.5)
|
| 25 |
+
G.add_node(4, percolation=0.2)
|
| 26 |
+
G.add_node(5, percolation=0.3)
|
| 27 |
+
G.add_node(6, percolation=0.2)
|
| 28 |
+
G.add_node(7, percolation=0.1)
|
| 29 |
+
G.add_node(8, percolation=0.1)
|
| 30 |
+
G.add_edges_from([(1, 4), (2, 4), (3, 4), (4, 5), (5, 6), (6, 7), (6, 8)])
|
| 31 |
+
return G
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def test_percolation_example1a():
|
| 35 |
+
"""percolation centrality: example 1a"""
|
| 36 |
+
G = example1a_G()
|
| 37 |
+
p = nx.percolation_centrality(G)
|
| 38 |
+
p_answer = {4: 0.625, 6: 0.667}
|
| 39 |
+
for n, k in p_answer.items():
|
| 40 |
+
assert p[n] == pytest.approx(k, abs=1e-3)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def test_percolation_example1b():
|
| 44 |
+
"""percolation centrality: example 1a"""
|
| 45 |
+
G = example1b_G()
|
| 46 |
+
p = nx.percolation_centrality(G)
|
| 47 |
+
p_answer = {4: 0.825, 6: 0.4}
|
| 48 |
+
for n, k in p_answer.items():
|
| 49 |
+
assert p[n] == pytest.approx(k, abs=1e-3)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def test_converge_to_betweenness():
|
| 53 |
+
"""percolation centrality: should converge to betweenness
|
| 54 |
+
centrality when all nodes are percolated the same"""
|
| 55 |
+
# taken from betweenness test test_florentine_families_graph
|
| 56 |
+
G = nx.florentine_families_graph()
|
| 57 |
+
b_answer = {
|
| 58 |
+
"Acciaiuoli": 0.000,
|
| 59 |
+
"Albizzi": 0.212,
|
| 60 |
+
"Barbadori": 0.093,
|
| 61 |
+
"Bischeri": 0.104,
|
| 62 |
+
"Castellani": 0.055,
|
| 63 |
+
"Ginori": 0.000,
|
| 64 |
+
"Guadagni": 0.255,
|
| 65 |
+
"Lamberteschi": 0.000,
|
| 66 |
+
"Medici": 0.522,
|
| 67 |
+
"Pazzi": 0.000,
|
| 68 |
+
"Peruzzi": 0.022,
|
| 69 |
+
"Ridolfi": 0.114,
|
| 70 |
+
"Salviati": 0.143,
|
| 71 |
+
"Strozzi": 0.103,
|
| 72 |
+
"Tornabuoni": 0.092,
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
# If no initial state is provided, state for
|
| 76 |
+
# every node defaults to 1
|
| 77 |
+
p_answer = nx.percolation_centrality(G)
|
| 78 |
+
assert p_answer == pytest.approx(b_answer, abs=1e-3)
|
| 79 |
+
|
| 80 |
+
p_states = {k: 0.3 for k, v in b_answer.items()}
|
| 81 |
+
p_answer = nx.percolation_centrality(G, states=p_states)
|
| 82 |
+
assert p_answer == pytest.approx(b_answer, abs=1e-3)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def test_default_percolation():
|
| 86 |
+
G = nx.erdos_renyi_graph(42, 0.42, seed=42)
|
| 87 |
+
assert nx.percolation_centrality(G) == pytest.approx(nx.betweenness_centrality(G))
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/test_reaching.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Unit tests for the :mod:`networkx.algorithms.centrality.reaching` module."""
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
import networkx as nx
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class TestGlobalReachingCentrality:
|
| 8 |
+
"""Unit tests for the global reaching centrality function."""
|
| 9 |
+
|
| 10 |
+
def test_non_positive_weights(self):
|
| 11 |
+
with pytest.raises(nx.NetworkXError):
|
| 12 |
+
G = nx.DiGraph()
|
| 13 |
+
nx.global_reaching_centrality(G, weight="weight")
|
| 14 |
+
|
| 15 |
+
def test_negatively_weighted(self):
|
| 16 |
+
with pytest.raises(nx.NetworkXError):
|
| 17 |
+
G = nx.Graph()
|
| 18 |
+
G.add_weighted_edges_from([(0, 1, -2), (1, 2, +1)])
|
| 19 |
+
nx.global_reaching_centrality(G, weight="weight")
|
| 20 |
+
|
| 21 |
+
def test_directed_star(self):
|
| 22 |
+
G = nx.DiGraph()
|
| 23 |
+
G.add_weighted_edges_from([(1, 2, 0.5), (1, 3, 0.5)])
|
| 24 |
+
grc = nx.global_reaching_centrality
|
| 25 |
+
assert grc(G, normalized=False, weight="weight") == 0.5
|
| 26 |
+
assert grc(G) == 1
|
| 27 |
+
|
| 28 |
+
def test_undirected_unweighted_star(self):
|
| 29 |
+
G = nx.star_graph(2)
|
| 30 |
+
grc = nx.global_reaching_centrality
|
| 31 |
+
assert grc(G, normalized=False, weight=None) == 0.25
|
| 32 |
+
|
| 33 |
+
def test_undirected_weighted_star(self):
|
| 34 |
+
G = nx.Graph()
|
| 35 |
+
G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2)])
|
| 36 |
+
grc = nx.global_reaching_centrality
|
| 37 |
+
assert grc(G, normalized=False, weight="weight") == 0.375
|
| 38 |
+
|
| 39 |
+
def test_cycle_directed_unweighted(self):
|
| 40 |
+
G = nx.DiGraph()
|
| 41 |
+
G.add_edge(1, 2)
|
| 42 |
+
G.add_edge(2, 1)
|
| 43 |
+
assert nx.global_reaching_centrality(G, weight=None) == 0
|
| 44 |
+
|
| 45 |
+
def test_cycle_undirected_unweighted(self):
|
| 46 |
+
G = nx.Graph()
|
| 47 |
+
G.add_edge(1, 2)
|
| 48 |
+
assert nx.global_reaching_centrality(G, weight=None) == 0
|
| 49 |
+
|
| 50 |
+
def test_cycle_directed_weighted(self):
|
| 51 |
+
G = nx.DiGraph()
|
| 52 |
+
G.add_weighted_edges_from([(1, 2, 1), (2, 1, 1)])
|
| 53 |
+
assert nx.global_reaching_centrality(G) == 0
|
| 54 |
+
|
| 55 |
+
def test_cycle_undirected_weighted(self):
|
| 56 |
+
G = nx.Graph()
|
| 57 |
+
G.add_edge(1, 2, weight=1)
|
| 58 |
+
grc = nx.global_reaching_centrality
|
| 59 |
+
assert grc(G, normalized=False) == 0
|
| 60 |
+
|
| 61 |
+
def test_directed_weighted(self):
|
| 62 |
+
G = nx.DiGraph()
|
| 63 |
+
G.add_edge("A", "B", weight=5)
|
| 64 |
+
G.add_edge("B", "C", weight=1)
|
| 65 |
+
G.add_edge("B", "D", weight=0.25)
|
| 66 |
+
G.add_edge("D", "E", weight=1)
|
| 67 |
+
|
| 68 |
+
denom = len(G) - 1
|
| 69 |
+
A_local = sum([5, 3, 2.625, 2.0833333333333]) / denom
|
| 70 |
+
B_local = sum([1, 0.25, 0.625]) / denom
|
| 71 |
+
C_local = 0
|
| 72 |
+
D_local = sum([1]) / denom
|
| 73 |
+
E_local = 0
|
| 74 |
+
|
| 75 |
+
local_reach_ctrs = [A_local, C_local, B_local, D_local, E_local]
|
| 76 |
+
max_local = max(local_reach_ctrs)
|
| 77 |
+
expected = sum(max_local - lrc for lrc in local_reach_ctrs) / denom
|
| 78 |
+
grc = nx.global_reaching_centrality
|
| 79 |
+
actual = grc(G, normalized=False, weight="weight")
|
| 80 |
+
assert expected == pytest.approx(actual, abs=1e-7)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
class TestLocalReachingCentrality:
|
| 84 |
+
"""Unit tests for the local reaching centrality function."""
|
| 85 |
+
|
| 86 |
+
def test_non_positive_weights(self):
|
| 87 |
+
with pytest.raises(nx.NetworkXError):
|
| 88 |
+
G = nx.DiGraph()
|
| 89 |
+
G.add_weighted_edges_from([(0, 1, 0)])
|
| 90 |
+
nx.local_reaching_centrality(G, 0, weight="weight")
|
| 91 |
+
|
| 92 |
+
def test_negatively_weighted(self):
|
| 93 |
+
with pytest.raises(nx.NetworkXError):
|
| 94 |
+
G = nx.Graph()
|
| 95 |
+
G.add_weighted_edges_from([(0, 1, -2), (1, 2, +1)])
|
| 96 |
+
nx.local_reaching_centrality(G, 0, weight="weight")
|
| 97 |
+
|
| 98 |
+
def test_undirected_unweighted_star(self):
|
| 99 |
+
G = nx.star_graph(2)
|
| 100 |
+
grc = nx.local_reaching_centrality
|
| 101 |
+
assert grc(G, 1, weight=None, normalized=False) == 0.75
|
| 102 |
+
|
| 103 |
+
def test_undirected_weighted_star(self):
|
| 104 |
+
G = nx.Graph()
|
| 105 |
+
G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2)])
|
| 106 |
+
centrality = nx.local_reaching_centrality(
|
| 107 |
+
G, 1, normalized=False, weight="weight"
|
| 108 |
+
)
|
| 109 |
+
assert centrality == 1.5
|
| 110 |
+
|
| 111 |
+
def test_undirected_weighted_normalized(self):
|
| 112 |
+
G = nx.Graph()
|
| 113 |
+
G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2)])
|
| 114 |
+
centrality = nx.local_reaching_centrality(
|
| 115 |
+
G, 1, normalized=True, weight="weight"
|
| 116 |
+
)
|
| 117 |
+
assert centrality == 1.0
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/test_second_order_centrality.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Tests for second order centrality.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import pytest
|
| 6 |
+
|
| 7 |
+
pytest.importorskip("numpy")
|
| 8 |
+
pytest.importorskip("scipy")
|
| 9 |
+
|
| 10 |
+
import networkx as nx
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def test_empty():
|
| 14 |
+
with pytest.raises(nx.NetworkXException):
|
| 15 |
+
G = nx.empty_graph()
|
| 16 |
+
nx.second_order_centrality(G)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def test_non_connected():
|
| 20 |
+
with pytest.raises(nx.NetworkXException):
|
| 21 |
+
G = nx.Graph()
|
| 22 |
+
G.add_node(0)
|
| 23 |
+
G.add_node(1)
|
| 24 |
+
nx.second_order_centrality(G)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def test_non_negative_edge_weights():
|
| 28 |
+
with pytest.raises(nx.NetworkXException):
|
| 29 |
+
G = nx.path_graph(2)
|
| 30 |
+
G.add_edge(0, 1, weight=-1)
|
| 31 |
+
nx.second_order_centrality(G)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def test_weight_attribute():
|
| 35 |
+
G = nx.Graph()
|
| 36 |
+
G.add_weighted_edges_from([(0, 1, 1.0), (1, 2, 3.5)], weight="w")
|
| 37 |
+
expected = {0: 3.431, 1: 3.082, 2: 5.612}
|
| 38 |
+
b = nx.second_order_centrality(G, weight="w")
|
| 39 |
+
|
| 40 |
+
for n in sorted(G):
|
| 41 |
+
assert b[n] == pytest.approx(expected[n], abs=1e-2)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def test_one_node_graph():
|
| 45 |
+
"""Second order centrality: single node"""
|
| 46 |
+
G = nx.Graph()
|
| 47 |
+
G.add_node(0)
|
| 48 |
+
G.add_edge(0, 0)
|
| 49 |
+
assert nx.second_order_centrality(G)[0] == 0
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def test_P3():
|
| 53 |
+
"""Second order centrality: line graph, as defined in paper"""
|
| 54 |
+
G = nx.path_graph(3)
|
| 55 |
+
b_answer = {0: 3.741, 1: 1.414, 2: 3.741}
|
| 56 |
+
|
| 57 |
+
b = nx.second_order_centrality(G)
|
| 58 |
+
|
| 59 |
+
for n in sorted(G):
|
| 60 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-2)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def test_K3():
|
| 64 |
+
"""Second order centrality: complete graph, as defined in paper"""
|
| 65 |
+
G = nx.complete_graph(3)
|
| 66 |
+
b_answer = {0: 1.414, 1: 1.414, 2: 1.414}
|
| 67 |
+
|
| 68 |
+
b = nx.second_order_centrality(G)
|
| 69 |
+
|
| 70 |
+
for n in sorted(G):
|
| 71 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-2)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def test_ring_graph():
|
| 75 |
+
"""Second order centrality: ring graph, as defined in paper"""
|
| 76 |
+
G = nx.cycle_graph(5)
|
| 77 |
+
b_answer = {0: 4.472, 1: 4.472, 2: 4.472, 3: 4.472, 4: 4.472}
|
| 78 |
+
|
| 79 |
+
b = nx.second_order_centrality(G)
|
| 80 |
+
|
| 81 |
+
for n in sorted(G):
|
| 82 |
+
assert b[n] == pytest.approx(b_answer[n], abs=1e-2)
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/test_voterank.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Unit tests for VoteRank.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
import networkx as nx
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class TestVoteRankCentrality:
|
| 10 |
+
# Example Graph present in reference paper
|
| 11 |
+
def test_voterank_centrality_1(self):
|
| 12 |
+
G = nx.Graph()
|
| 13 |
+
G.add_edges_from(
|
| 14 |
+
[
|
| 15 |
+
(7, 8),
|
| 16 |
+
(7, 5),
|
| 17 |
+
(7, 9),
|
| 18 |
+
(5, 0),
|
| 19 |
+
(0, 1),
|
| 20 |
+
(0, 2),
|
| 21 |
+
(0, 3),
|
| 22 |
+
(0, 4),
|
| 23 |
+
(1, 6),
|
| 24 |
+
(2, 6),
|
| 25 |
+
(3, 6),
|
| 26 |
+
(4, 6),
|
| 27 |
+
]
|
| 28 |
+
)
|
| 29 |
+
assert [0, 7, 6] == nx.voterank(G)
|
| 30 |
+
|
| 31 |
+
def test_voterank_emptygraph(self):
|
| 32 |
+
G = nx.Graph()
|
| 33 |
+
assert [] == nx.voterank(G)
|
| 34 |
+
|
| 35 |
+
# Graph unit test
|
| 36 |
+
def test_voterank_centrality_2(self):
|
| 37 |
+
G = nx.florentine_families_graph()
|
| 38 |
+
d = nx.voterank(G, 4)
|
| 39 |
+
exact = ["Medici", "Strozzi", "Guadagni", "Castellani"]
|
| 40 |
+
assert exact == d
|
| 41 |
+
|
| 42 |
+
# DiGraph unit test
|
| 43 |
+
def test_voterank_centrality_3(self):
|
| 44 |
+
G = nx.gnc_graph(10, seed=7)
|
| 45 |
+
d = nx.voterank(G, 4)
|
| 46 |
+
exact = [3, 6, 8]
|
| 47 |
+
assert exact == d
|
| 48 |
+
|
| 49 |
+
# MultiGraph unit test
|
| 50 |
+
def test_voterank_centrality_4(self):
|
| 51 |
+
G = nx.MultiGraph()
|
| 52 |
+
G.add_edges_from(
|
| 53 |
+
[(0, 1), (0, 1), (1, 2), (2, 5), (2, 5), (5, 6), (5, 6), (2, 4), (4, 3)]
|
| 54 |
+
)
|
| 55 |
+
exact = [2, 1, 5, 4]
|
| 56 |
+
assert exact == nx.voterank(G)
|
| 57 |
+
|
| 58 |
+
# MultiDiGraph unit test
|
| 59 |
+
def test_voterank_centrality_5(self):
|
| 60 |
+
G = nx.MultiDiGraph()
|
| 61 |
+
G.add_edges_from(
|
| 62 |
+
[(0, 1), (0, 1), (1, 2), (2, 5), (2, 5), (5, 6), (5, 6), (2, 4), (4, 3)]
|
| 63 |
+
)
|
| 64 |
+
exact = [2, 0, 5, 4]
|
| 65 |
+
assert exact == nx.voterank(G)
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/trophic.py
ADDED
|
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Trophic levels"""
|
| 2 |
+
import networkx as nx
|
| 3 |
+
from networkx.utils import not_implemented_for
|
| 4 |
+
|
| 5 |
+
__all__ = ["trophic_levels", "trophic_differences", "trophic_incoherence_parameter"]
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@not_implemented_for("undirected")
|
| 9 |
+
@nx._dispatch(edge_attrs="weight")
|
| 10 |
+
def trophic_levels(G, weight="weight"):
|
| 11 |
+
r"""Compute the trophic levels of nodes.
|
| 12 |
+
|
| 13 |
+
The trophic level of a node $i$ is
|
| 14 |
+
|
| 15 |
+
.. math::
|
| 16 |
+
|
| 17 |
+
s_i = 1 + \frac{1}{k^{in}_i} \sum_{j} a_{ij} s_j
|
| 18 |
+
|
| 19 |
+
where $k^{in}_i$ is the in-degree of i
|
| 20 |
+
|
| 21 |
+
.. math::
|
| 22 |
+
|
| 23 |
+
k^{in}_i = \sum_{j} a_{ij}
|
| 24 |
+
|
| 25 |
+
and nodes with $k^{in}_i = 0$ have $s_i = 1$ by convention.
|
| 26 |
+
|
| 27 |
+
These are calculated using the method outlined in Levine [1]_.
|
| 28 |
+
|
| 29 |
+
Parameters
|
| 30 |
+
----------
|
| 31 |
+
G : DiGraph
|
| 32 |
+
A directed networkx graph
|
| 33 |
+
|
| 34 |
+
Returns
|
| 35 |
+
-------
|
| 36 |
+
nodes : dict
|
| 37 |
+
Dictionary of nodes with trophic level as the value.
|
| 38 |
+
|
| 39 |
+
References
|
| 40 |
+
----------
|
| 41 |
+
.. [1] Stephen Levine (1980) J. theor. Biol. 83, 195-207
|
| 42 |
+
"""
|
| 43 |
+
import numpy as np
|
| 44 |
+
|
| 45 |
+
# find adjacency matrix
|
| 46 |
+
a = nx.adjacency_matrix(G, weight=weight).T.toarray()
|
| 47 |
+
|
| 48 |
+
# drop rows/columns where in-degree is zero
|
| 49 |
+
rowsum = np.sum(a, axis=1)
|
| 50 |
+
p = a[rowsum != 0][:, rowsum != 0]
|
| 51 |
+
# normalise so sum of in-degree weights is 1 along each row
|
| 52 |
+
p = p / rowsum[rowsum != 0][:, np.newaxis]
|
| 53 |
+
|
| 54 |
+
# calculate trophic levels
|
| 55 |
+
nn = p.shape[0]
|
| 56 |
+
i = np.eye(nn)
|
| 57 |
+
try:
|
| 58 |
+
n = np.linalg.inv(i - p)
|
| 59 |
+
except np.linalg.LinAlgError as err:
|
| 60 |
+
# LinAlgError is raised when there is a non-basal node
|
| 61 |
+
msg = (
|
| 62 |
+
"Trophic levels are only defined for graphs where every "
|
| 63 |
+
+ "node has a path from a basal node (basal nodes are nodes "
|
| 64 |
+
+ "with no incoming edges)."
|
| 65 |
+
)
|
| 66 |
+
raise nx.NetworkXError(msg) from err
|
| 67 |
+
y = n.sum(axis=1) + 1
|
| 68 |
+
|
| 69 |
+
levels = {}
|
| 70 |
+
|
| 71 |
+
# all nodes with in-degree zero have trophic level == 1
|
| 72 |
+
zero_node_ids = (node_id for node_id, degree in G.in_degree if degree == 0)
|
| 73 |
+
for node_id in zero_node_ids:
|
| 74 |
+
levels[node_id] = 1
|
| 75 |
+
|
| 76 |
+
# all other nodes have levels as calculated
|
| 77 |
+
nonzero_node_ids = (node_id for node_id, degree in G.in_degree if degree != 0)
|
| 78 |
+
for i, node_id in enumerate(nonzero_node_ids):
|
| 79 |
+
levels[node_id] = y[i]
|
| 80 |
+
|
| 81 |
+
return levels
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
@not_implemented_for("undirected")
|
| 85 |
+
@nx._dispatch(edge_attrs="weight")
|
| 86 |
+
def trophic_differences(G, weight="weight"):
|
| 87 |
+
r"""Compute the trophic differences of the edges of a directed graph.
|
| 88 |
+
|
| 89 |
+
The trophic difference $x_ij$ for each edge is defined in Johnson et al.
|
| 90 |
+
[1]_ as:
|
| 91 |
+
|
| 92 |
+
.. math::
|
| 93 |
+
x_ij = s_j - s_i
|
| 94 |
+
|
| 95 |
+
Where $s_i$ is the trophic level of node $i$.
|
| 96 |
+
|
| 97 |
+
Parameters
|
| 98 |
+
----------
|
| 99 |
+
G : DiGraph
|
| 100 |
+
A directed networkx graph
|
| 101 |
+
|
| 102 |
+
Returns
|
| 103 |
+
-------
|
| 104 |
+
diffs : dict
|
| 105 |
+
Dictionary of edges with trophic differences as the value.
|
| 106 |
+
|
| 107 |
+
References
|
| 108 |
+
----------
|
| 109 |
+
.. [1] Samuel Johnson, Virginia Dominguez-Garcia, Luca Donetti, Miguel A.
|
| 110 |
+
Munoz (2014) PNAS "Trophic coherence determines food-web stability"
|
| 111 |
+
"""
|
| 112 |
+
levels = trophic_levels(G, weight=weight)
|
| 113 |
+
diffs = {}
|
| 114 |
+
for u, v in G.edges:
|
| 115 |
+
diffs[(u, v)] = levels[v] - levels[u]
|
| 116 |
+
return diffs
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
@not_implemented_for("undirected")
|
| 120 |
+
@nx._dispatch(edge_attrs="weight")
|
| 121 |
+
def trophic_incoherence_parameter(G, weight="weight", cannibalism=False):
|
| 122 |
+
r"""Compute the trophic incoherence parameter of a graph.
|
| 123 |
+
|
| 124 |
+
Trophic coherence is defined as the homogeneity of the distribution of
|
| 125 |
+
trophic distances: the more similar, the more coherent. This is measured by
|
| 126 |
+
the standard deviation of the trophic differences and referred to as the
|
| 127 |
+
trophic incoherence parameter $q$ by [1].
|
| 128 |
+
|
| 129 |
+
Parameters
|
| 130 |
+
----------
|
| 131 |
+
G : DiGraph
|
| 132 |
+
A directed networkx graph
|
| 133 |
+
|
| 134 |
+
cannibalism: Boolean
|
| 135 |
+
If set to False, self edges are not considered in the calculation
|
| 136 |
+
|
| 137 |
+
Returns
|
| 138 |
+
-------
|
| 139 |
+
trophic_incoherence_parameter : float
|
| 140 |
+
The trophic coherence of a graph
|
| 141 |
+
|
| 142 |
+
References
|
| 143 |
+
----------
|
| 144 |
+
.. [1] Samuel Johnson, Virginia Dominguez-Garcia, Luca Donetti, Miguel A.
|
| 145 |
+
Munoz (2014) PNAS "Trophic coherence determines food-web stability"
|
| 146 |
+
"""
|
| 147 |
+
import numpy as np
|
| 148 |
+
|
| 149 |
+
if cannibalism:
|
| 150 |
+
diffs = trophic_differences(G, weight=weight)
|
| 151 |
+
else:
|
| 152 |
+
# If no cannibalism, remove self-edges
|
| 153 |
+
self_loops = list(nx.selfloop_edges(G))
|
| 154 |
+
if self_loops:
|
| 155 |
+
# Make a copy so we do not change G's edges in memory
|
| 156 |
+
G_2 = G.copy()
|
| 157 |
+
G_2.remove_edges_from(self_loops)
|
| 158 |
+
else:
|
| 159 |
+
# Avoid copy otherwise
|
| 160 |
+
G_2 = G
|
| 161 |
+
diffs = trophic_differences(G_2, weight=weight)
|
| 162 |
+
return np.std(list(diffs.values()))
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/voterank_alg.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Algorithm to select influential nodes in a graph using VoteRank."""
|
| 2 |
+
import networkx as nx
|
| 3 |
+
|
| 4 |
+
__all__ = ["voterank"]
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@nx._dispatch
|
| 8 |
+
def voterank(G, number_of_nodes=None):
|
| 9 |
+
"""Select a list of influential nodes in a graph using VoteRank algorithm
|
| 10 |
+
|
| 11 |
+
VoteRank [1]_ computes a ranking of the nodes in a graph G based on a
|
| 12 |
+
voting scheme. With VoteRank, all nodes vote for each of its in-neighbours
|
| 13 |
+
and the node with the highest votes is elected iteratively. The voting
|
| 14 |
+
ability of out-neighbors of elected nodes is decreased in subsequent turns.
|
| 15 |
+
|
| 16 |
+
Parameters
|
| 17 |
+
----------
|
| 18 |
+
G : graph
|
| 19 |
+
A NetworkX graph.
|
| 20 |
+
|
| 21 |
+
number_of_nodes : integer, optional
|
| 22 |
+
Number of ranked nodes to extract (default all nodes).
|
| 23 |
+
|
| 24 |
+
Returns
|
| 25 |
+
-------
|
| 26 |
+
voterank : list
|
| 27 |
+
Ordered list of computed seeds.
|
| 28 |
+
Only nodes with positive number of votes are returned.
|
| 29 |
+
|
| 30 |
+
Examples
|
| 31 |
+
--------
|
| 32 |
+
>>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 4)])
|
| 33 |
+
>>> nx.voterank(G)
|
| 34 |
+
[0, 1]
|
| 35 |
+
|
| 36 |
+
The algorithm can be used both for undirected and directed graphs.
|
| 37 |
+
However, the directed version is different in two ways:
|
| 38 |
+
(i) nodes only vote for their in-neighbors and
|
| 39 |
+
(ii) only the voting ability of elected node and its out-neighbors are updated:
|
| 40 |
+
|
| 41 |
+
>>> G = nx.DiGraph([(0, 1), (2, 1), (2, 3), (3, 4)])
|
| 42 |
+
>>> nx.voterank(G)
|
| 43 |
+
[2, 3]
|
| 44 |
+
|
| 45 |
+
Notes
|
| 46 |
+
-----
|
| 47 |
+
Each edge is treated independently in case of multigraphs.
|
| 48 |
+
|
| 49 |
+
References
|
| 50 |
+
----------
|
| 51 |
+
.. [1] Zhang, J.-X. et al. (2016).
|
| 52 |
+
Identifying a set of influential spreaders in complex networks.
|
| 53 |
+
Sci. Rep. 6, 27823; doi: 10.1038/srep27823.
|
| 54 |
+
"""
|
| 55 |
+
influential_nodes = []
|
| 56 |
+
vote_rank = {}
|
| 57 |
+
if len(G) == 0:
|
| 58 |
+
return influential_nodes
|
| 59 |
+
if number_of_nodes is None or number_of_nodes > len(G):
|
| 60 |
+
number_of_nodes = len(G)
|
| 61 |
+
if G.is_directed():
|
| 62 |
+
# For directed graphs compute average out-degree
|
| 63 |
+
avgDegree = sum(deg for _, deg in G.out_degree()) / len(G)
|
| 64 |
+
else:
|
| 65 |
+
# For undirected graphs compute average degree
|
| 66 |
+
avgDegree = sum(deg for _, deg in G.degree()) / len(G)
|
| 67 |
+
# step 1 - initiate all nodes to (0,1) (score, voting ability)
|
| 68 |
+
for n in G.nodes():
|
| 69 |
+
vote_rank[n] = [0, 1]
|
| 70 |
+
# Repeat steps 1b to 4 until num_seeds are elected.
|
| 71 |
+
for _ in range(number_of_nodes):
|
| 72 |
+
# step 1b - reset rank
|
| 73 |
+
for n in G.nodes():
|
| 74 |
+
vote_rank[n][0] = 0
|
| 75 |
+
# step 2 - vote
|
| 76 |
+
for n, nbr in G.edges():
|
| 77 |
+
# In directed graphs nodes only vote for their in-neighbors
|
| 78 |
+
vote_rank[n][0] += vote_rank[nbr][1]
|
| 79 |
+
if not G.is_directed():
|
| 80 |
+
vote_rank[nbr][0] += vote_rank[n][1]
|
| 81 |
+
for n in influential_nodes:
|
| 82 |
+
vote_rank[n][0] = 0
|
| 83 |
+
# step 3 - select top node
|
| 84 |
+
n = max(G.nodes, key=lambda x: vote_rank[x][0])
|
| 85 |
+
if vote_rank[n][0] == 0:
|
| 86 |
+
return influential_nodes
|
| 87 |
+
influential_nodes.append(n)
|
| 88 |
+
# weaken the selected node
|
| 89 |
+
vote_rank[n] = [0, 0]
|
| 90 |
+
# step 4 - update voterank properties
|
| 91 |
+
for _, nbr in G.edges(n):
|
| 92 |
+
vote_rank[nbr][1] -= 1 / avgDegree
|
| 93 |
+
vote_rank[nbr][1] = max(vote_rank[nbr][1], 0)
|
| 94 |
+
return influential_nodes
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/cycles.py
ADDED
|
@@ -0,0 +1,1230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
========================
|
| 3 |
+
Cycle finding algorithms
|
| 4 |
+
========================
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from collections import Counter, defaultdict
|
| 8 |
+
from itertools import combinations, product
|
| 9 |
+
from math import inf
|
| 10 |
+
|
| 11 |
+
import networkx as nx
|
| 12 |
+
from networkx.utils import not_implemented_for, pairwise
|
| 13 |
+
|
| 14 |
+
__all__ = [
|
| 15 |
+
"cycle_basis",
|
| 16 |
+
"simple_cycles",
|
| 17 |
+
"recursive_simple_cycles",
|
| 18 |
+
"find_cycle",
|
| 19 |
+
"minimum_cycle_basis",
|
| 20 |
+
"chordless_cycles",
|
| 21 |
+
"girth",
|
| 22 |
+
]
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
@not_implemented_for("directed")
|
| 26 |
+
@not_implemented_for("multigraph")
|
| 27 |
+
@nx._dispatch
|
| 28 |
+
def cycle_basis(G, root=None):
|
| 29 |
+
"""Returns a list of cycles which form a basis for cycles of G.
|
| 30 |
+
|
| 31 |
+
A basis for cycles of a network is a minimal collection of
|
| 32 |
+
cycles such that any cycle in the network can be written
|
| 33 |
+
as a sum of cycles in the basis. Here summation of cycles
|
| 34 |
+
is defined as "exclusive or" of the edges. Cycle bases are
|
| 35 |
+
useful, e.g. when deriving equations for electric circuits
|
| 36 |
+
using Kirchhoff's Laws.
|
| 37 |
+
|
| 38 |
+
Parameters
|
| 39 |
+
----------
|
| 40 |
+
G : NetworkX Graph
|
| 41 |
+
root : node, optional
|
| 42 |
+
Specify starting node for basis.
|
| 43 |
+
|
| 44 |
+
Returns
|
| 45 |
+
-------
|
| 46 |
+
A list of cycle lists. Each cycle list is a list of nodes
|
| 47 |
+
which forms a cycle (loop) in G.
|
| 48 |
+
|
| 49 |
+
Examples
|
| 50 |
+
--------
|
| 51 |
+
>>> G = nx.Graph()
|
| 52 |
+
>>> nx.add_cycle(G, [0, 1, 2, 3])
|
| 53 |
+
>>> nx.add_cycle(G, [0, 3, 4, 5])
|
| 54 |
+
>>> nx.cycle_basis(G, 0)
|
| 55 |
+
[[3, 4, 5, 0], [1, 2, 3, 0]]
|
| 56 |
+
|
| 57 |
+
Notes
|
| 58 |
+
-----
|
| 59 |
+
This is adapted from algorithm CACM 491 [1]_.
|
| 60 |
+
|
| 61 |
+
References
|
| 62 |
+
----------
|
| 63 |
+
.. [1] Paton, K. An algorithm for finding a fundamental set of
|
| 64 |
+
cycles of a graph. Comm. ACM 12, 9 (Sept 1969), 514-518.
|
| 65 |
+
|
| 66 |
+
See Also
|
| 67 |
+
--------
|
| 68 |
+
simple_cycles
|
| 69 |
+
"""
|
| 70 |
+
gnodes = dict.fromkeys(G) # set-like object that maintains node order
|
| 71 |
+
cycles = []
|
| 72 |
+
while gnodes: # loop over connected components
|
| 73 |
+
if root is None:
|
| 74 |
+
root = gnodes.popitem()[0]
|
| 75 |
+
stack = [root]
|
| 76 |
+
pred = {root: root}
|
| 77 |
+
used = {root: set()}
|
| 78 |
+
while stack: # walk the spanning tree finding cycles
|
| 79 |
+
z = stack.pop() # use last-in so cycles easier to find
|
| 80 |
+
zused = used[z]
|
| 81 |
+
for nbr in G[z]:
|
| 82 |
+
if nbr not in used: # new node
|
| 83 |
+
pred[nbr] = z
|
| 84 |
+
stack.append(nbr)
|
| 85 |
+
used[nbr] = {z}
|
| 86 |
+
elif nbr == z: # self loops
|
| 87 |
+
cycles.append([z])
|
| 88 |
+
elif nbr not in zused: # found a cycle
|
| 89 |
+
pn = used[nbr]
|
| 90 |
+
cycle = [nbr, z]
|
| 91 |
+
p = pred[z]
|
| 92 |
+
while p not in pn:
|
| 93 |
+
cycle.append(p)
|
| 94 |
+
p = pred[p]
|
| 95 |
+
cycle.append(p)
|
| 96 |
+
cycles.append(cycle)
|
| 97 |
+
used[nbr].add(z)
|
| 98 |
+
for node in pred:
|
| 99 |
+
gnodes.pop(node, None)
|
| 100 |
+
root = None
|
| 101 |
+
return cycles
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
@nx._dispatch
|
| 105 |
+
def simple_cycles(G, length_bound=None):
|
| 106 |
+
"""Find simple cycles (elementary circuits) of a graph.
|
| 107 |
+
|
| 108 |
+
A `simple cycle`, or `elementary circuit`, is a closed path where
|
| 109 |
+
no node appears twice. In a directed graph, two simple cycles are distinct
|
| 110 |
+
if they are not cyclic permutations of each other. In an undirected graph,
|
| 111 |
+
two simple cycles are distinct if they are not cyclic permutations of each
|
| 112 |
+
other nor of the other's reversal.
|
| 113 |
+
|
| 114 |
+
Optionally, the cycles are bounded in length. In the unbounded case, we use
|
| 115 |
+
a nonrecursive, iterator/generator version of Johnson's algorithm [1]_. In
|
| 116 |
+
the bounded case, we use a version of the algorithm of Gupta and
|
| 117 |
+
Suzumura[2]_. There may be better algorithms for some cases [3]_ [4]_ [5]_.
|
| 118 |
+
|
| 119 |
+
The algorithms of Johnson, and Gupta and Suzumura, are enhanced by some
|
| 120 |
+
well-known preprocessing techniques. When G is directed, we restrict our
|
| 121 |
+
attention to strongly connected components of G, generate all simple cycles
|
| 122 |
+
containing a certain node, remove that node, and further decompose the
|
| 123 |
+
remainder into strongly connected components. When G is undirected, we
|
| 124 |
+
restrict our attention to biconnected components, generate all simple cycles
|
| 125 |
+
containing a particular edge, remove that edge, and further decompose the
|
| 126 |
+
remainder into biconnected components.
|
| 127 |
+
|
| 128 |
+
Note that multigraphs are supported by this function -- and in undirected
|
| 129 |
+
multigraphs, a pair of parallel edges is considered a cycle of length 2.
|
| 130 |
+
Likewise, self-loops are considered to be cycles of length 1. We define
|
| 131 |
+
cycles as sequences of nodes; so the presence of loops and parallel edges
|
| 132 |
+
does not change the number of simple cycles in a graph.
|
| 133 |
+
|
| 134 |
+
Parameters
|
| 135 |
+
----------
|
| 136 |
+
G : NetworkX DiGraph
|
| 137 |
+
A directed graph
|
| 138 |
+
|
| 139 |
+
length_bound : int or None, optional (default=None)
|
| 140 |
+
If length_bound is an int, generate all simple cycles of G with length at
|
| 141 |
+
most length_bound. Otherwise, generate all simple cycles of G.
|
| 142 |
+
|
| 143 |
+
Yields
|
| 144 |
+
------
|
| 145 |
+
list of nodes
|
| 146 |
+
Each cycle is represented by a list of nodes along the cycle.
|
| 147 |
+
|
| 148 |
+
Examples
|
| 149 |
+
--------
|
| 150 |
+
>>> edges = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)]
|
| 151 |
+
>>> G = nx.DiGraph(edges)
|
| 152 |
+
>>> sorted(nx.simple_cycles(G))
|
| 153 |
+
[[0], [0, 1, 2], [0, 2], [1, 2], [2]]
|
| 154 |
+
|
| 155 |
+
To filter the cycles so that they don't include certain nodes or edges,
|
| 156 |
+
copy your graph and eliminate those nodes or edges before calling.
|
| 157 |
+
For example, to exclude self-loops from the above example:
|
| 158 |
+
|
| 159 |
+
>>> H = G.copy()
|
| 160 |
+
>>> H.remove_edges_from(nx.selfloop_edges(G))
|
| 161 |
+
>>> sorted(nx.simple_cycles(H))
|
| 162 |
+
[[0, 1, 2], [0, 2], [1, 2]]
|
| 163 |
+
|
| 164 |
+
Notes
|
| 165 |
+
-----
|
| 166 |
+
When length_bound is None, the time complexity is $O((n+e)(c+1))$ for $n$
|
| 167 |
+
nodes, $e$ edges and $c$ simple circuits. Otherwise, when length_bound > 1,
|
| 168 |
+
the time complexity is $O((c+n)(k-1)d^k)$ where $d$ is the average degree of
|
| 169 |
+
the nodes of G and $k$ = length_bound.
|
| 170 |
+
|
| 171 |
+
Raises
|
| 172 |
+
------
|
| 173 |
+
ValueError
|
| 174 |
+
when length_bound < 0.
|
| 175 |
+
|
| 176 |
+
References
|
| 177 |
+
----------
|
| 178 |
+
.. [1] Finding all the elementary circuits of a directed graph.
|
| 179 |
+
D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975.
|
| 180 |
+
https://doi.org/10.1137/0204007
|
| 181 |
+
.. [2] Finding All Bounded-Length Simple Cycles in a Directed Graph
|
| 182 |
+
A. Gupta and T. Suzumura https://arxiv.org/abs/2105.10094
|
| 183 |
+
.. [3] Enumerating the cycles of a digraph: a new preprocessing strategy.
|
| 184 |
+
G. Loizou and P. Thanish, Information Sciences, v. 27, 163-182, 1982.
|
| 185 |
+
.. [4] A search strategy for the elementary cycles of a directed graph.
|
| 186 |
+
J.L. Szwarcfiter and P.E. Lauer, BIT NUMERICAL MATHEMATICS,
|
| 187 |
+
v. 16, no. 2, 192-204, 1976.
|
| 188 |
+
.. [5] Optimal Listing of Cycles and st-Paths in Undirected Graphs
|
| 189 |
+
R. Ferreira and R. Grossi and A. Marino and N. Pisanti and R. Rizzi and
|
| 190 |
+
G. Sacomoto https://arxiv.org/abs/1205.2766
|
| 191 |
+
|
| 192 |
+
See Also
|
| 193 |
+
--------
|
| 194 |
+
cycle_basis
|
| 195 |
+
chordless_cycles
|
| 196 |
+
"""
|
| 197 |
+
|
| 198 |
+
if length_bound is not None:
|
| 199 |
+
if length_bound == 0:
|
| 200 |
+
return
|
| 201 |
+
elif length_bound < 0:
|
| 202 |
+
raise ValueError("length bound must be non-negative")
|
| 203 |
+
|
| 204 |
+
directed = G.is_directed()
|
| 205 |
+
yield from ([v] for v, Gv in G.adj.items() if v in Gv)
|
| 206 |
+
|
| 207 |
+
if length_bound is not None and length_bound == 1:
|
| 208 |
+
return
|
| 209 |
+
|
| 210 |
+
if G.is_multigraph() and not directed:
|
| 211 |
+
visited = set()
|
| 212 |
+
for u, Gu in G.adj.items():
|
| 213 |
+
multiplicity = ((v, len(Guv)) for v, Guv in Gu.items() if v in visited)
|
| 214 |
+
yield from ([u, v] for v, m in multiplicity if m > 1)
|
| 215 |
+
visited.add(u)
|
| 216 |
+
|
| 217 |
+
# explicitly filter out loops; implicitly filter out parallel edges
|
| 218 |
+
if directed:
|
| 219 |
+
G = nx.DiGraph((u, v) for u, Gu in G.adj.items() for v in Gu if v != u)
|
| 220 |
+
else:
|
| 221 |
+
G = nx.Graph((u, v) for u, Gu in G.adj.items() for v in Gu if v != u)
|
| 222 |
+
|
| 223 |
+
# this case is not strictly necessary but improves performance
|
| 224 |
+
if length_bound is not None and length_bound == 2:
|
| 225 |
+
if directed:
|
| 226 |
+
visited = set()
|
| 227 |
+
for u, Gu in G.adj.items():
|
| 228 |
+
yield from (
|
| 229 |
+
[v, u] for v in visited.intersection(Gu) if G.has_edge(v, u)
|
| 230 |
+
)
|
| 231 |
+
visited.add(u)
|
| 232 |
+
return
|
| 233 |
+
|
| 234 |
+
if directed:
|
| 235 |
+
yield from _directed_cycle_search(G, length_bound)
|
| 236 |
+
else:
|
| 237 |
+
yield from _undirected_cycle_search(G, length_bound)
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
def _directed_cycle_search(G, length_bound):
|
| 241 |
+
"""A dispatch function for `simple_cycles` for directed graphs.
|
| 242 |
+
|
| 243 |
+
We generate all cycles of G through binary partition.
|
| 244 |
+
|
| 245 |
+
1. Pick a node v in G which belongs to at least one cycle
|
| 246 |
+
a. Generate all cycles of G which contain the node v.
|
| 247 |
+
b. Recursively generate all cycles of G \\ v.
|
| 248 |
+
|
| 249 |
+
This is accomplished through the following:
|
| 250 |
+
|
| 251 |
+
1. Compute the strongly connected components SCC of G.
|
| 252 |
+
2. Select and remove a biconnected component C from BCC. Select a
|
| 253 |
+
non-tree edge (u, v) of a depth-first search of G[C].
|
| 254 |
+
3. For each simple cycle P containing v in G[C], yield P.
|
| 255 |
+
4. Add the biconnected components of G[C \\ v] to BCC.
|
| 256 |
+
|
| 257 |
+
If the parameter length_bound is not None, then step 3 will be limited to
|
| 258 |
+
simple cycles of length at most length_bound.
|
| 259 |
+
|
| 260 |
+
Parameters
|
| 261 |
+
----------
|
| 262 |
+
G : NetworkX DiGraph
|
| 263 |
+
A directed graph
|
| 264 |
+
|
| 265 |
+
length_bound : int or None
|
| 266 |
+
If length_bound is an int, generate all simple cycles of G with length at most length_bound.
|
| 267 |
+
Otherwise, generate all simple cycles of G.
|
| 268 |
+
|
| 269 |
+
Yields
|
| 270 |
+
------
|
| 271 |
+
list of nodes
|
| 272 |
+
Each cycle is represented by a list of nodes along the cycle.
|
| 273 |
+
"""
|
| 274 |
+
|
| 275 |
+
scc = nx.strongly_connected_components
|
| 276 |
+
components = [c for c in scc(G) if len(c) >= 2]
|
| 277 |
+
while components:
|
| 278 |
+
c = components.pop()
|
| 279 |
+
Gc = G.subgraph(c)
|
| 280 |
+
v = next(iter(c))
|
| 281 |
+
if length_bound is None:
|
| 282 |
+
yield from _johnson_cycle_search(Gc, [v])
|
| 283 |
+
else:
|
| 284 |
+
yield from _bounded_cycle_search(Gc, [v], length_bound)
|
| 285 |
+
# delete v after searching G, to make sure we can find v
|
| 286 |
+
G.remove_node(v)
|
| 287 |
+
components.extend(c for c in scc(Gc) if len(c) >= 2)
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
def _undirected_cycle_search(G, length_bound):
|
| 291 |
+
"""A dispatch function for `simple_cycles` for undirected graphs.
|
| 292 |
+
|
| 293 |
+
We generate all cycles of G through binary partition.
|
| 294 |
+
|
| 295 |
+
1. Pick an edge (u, v) in G which belongs to at least one cycle
|
| 296 |
+
a. Generate all cycles of G which contain the edge (u, v)
|
| 297 |
+
b. Recursively generate all cycles of G \\ (u, v)
|
| 298 |
+
|
| 299 |
+
This is accomplished through the following:
|
| 300 |
+
|
| 301 |
+
1. Compute the biconnected components BCC of G.
|
| 302 |
+
2. Select and remove a biconnected component C from BCC. Select a
|
| 303 |
+
non-tree edge (u, v) of a depth-first search of G[C].
|
| 304 |
+
3. For each (v -> u) path P remaining in G[C] \\ (u, v), yield P.
|
| 305 |
+
4. Add the biconnected components of G[C] \\ (u, v) to BCC.
|
| 306 |
+
|
| 307 |
+
If the parameter length_bound is not None, then step 3 will be limited to simple paths
|
| 308 |
+
of length at most length_bound.
|
| 309 |
+
|
| 310 |
+
Parameters
|
| 311 |
+
----------
|
| 312 |
+
G : NetworkX Graph
|
| 313 |
+
An undirected graph
|
| 314 |
+
|
| 315 |
+
length_bound : int or None
|
| 316 |
+
If length_bound is an int, generate all simple cycles of G with length at most length_bound.
|
| 317 |
+
Otherwise, generate all simple cycles of G.
|
| 318 |
+
|
| 319 |
+
Yields
|
| 320 |
+
------
|
| 321 |
+
list of nodes
|
| 322 |
+
Each cycle is represented by a list of nodes along the cycle.
|
| 323 |
+
"""
|
| 324 |
+
|
| 325 |
+
bcc = nx.biconnected_components
|
| 326 |
+
components = [c for c in bcc(G) if len(c) >= 3]
|
| 327 |
+
while components:
|
| 328 |
+
c = components.pop()
|
| 329 |
+
Gc = G.subgraph(c)
|
| 330 |
+
uv = list(next(iter(Gc.edges)))
|
| 331 |
+
G.remove_edge(*uv)
|
| 332 |
+
# delete (u, v) before searching G, to avoid fake 3-cycles [u, v, u]
|
| 333 |
+
if length_bound is None:
|
| 334 |
+
yield from _johnson_cycle_search(Gc, uv)
|
| 335 |
+
else:
|
| 336 |
+
yield from _bounded_cycle_search(Gc, uv, length_bound)
|
| 337 |
+
components.extend(c for c in bcc(Gc) if len(c) >= 3)
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
class _NeighborhoodCache(dict):
|
| 341 |
+
"""Very lightweight graph wrapper which caches neighborhoods as list.
|
| 342 |
+
|
| 343 |
+
This dict subclass uses the __missing__ functionality to query graphs for
|
| 344 |
+
their neighborhoods, and store the result as a list. This is used to avoid
|
| 345 |
+
the performance penalty incurred by subgraph views.
|
| 346 |
+
"""
|
| 347 |
+
|
| 348 |
+
def __init__(self, G):
|
| 349 |
+
self.G = G
|
| 350 |
+
|
| 351 |
+
def __missing__(self, v):
|
| 352 |
+
Gv = self[v] = list(self.G[v])
|
| 353 |
+
return Gv
|
| 354 |
+
|
| 355 |
+
|
| 356 |
+
def _johnson_cycle_search(G, path):
|
| 357 |
+
"""The main loop of the cycle-enumeration algorithm of Johnson.
|
| 358 |
+
|
| 359 |
+
Parameters
|
| 360 |
+
----------
|
| 361 |
+
G : NetworkX Graph or DiGraph
|
| 362 |
+
A graph
|
| 363 |
+
|
| 364 |
+
path : list
|
| 365 |
+
A cycle prefix. All cycles generated will begin with this prefix.
|
| 366 |
+
|
| 367 |
+
Yields
|
| 368 |
+
------
|
| 369 |
+
list of nodes
|
| 370 |
+
Each cycle is represented by a list of nodes along the cycle.
|
| 371 |
+
|
| 372 |
+
References
|
| 373 |
+
----------
|
| 374 |
+
.. [1] Finding all the elementary circuits of a directed graph.
|
| 375 |
+
D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975.
|
| 376 |
+
https://doi.org/10.1137/0204007
|
| 377 |
+
|
| 378 |
+
"""
|
| 379 |
+
|
| 380 |
+
G = _NeighborhoodCache(G)
|
| 381 |
+
blocked = set(path)
|
| 382 |
+
B = defaultdict(set) # graph portions that yield no elementary circuit
|
| 383 |
+
start = path[0]
|
| 384 |
+
stack = [iter(G[path[-1]])]
|
| 385 |
+
closed = [False]
|
| 386 |
+
while stack:
|
| 387 |
+
nbrs = stack[-1]
|
| 388 |
+
for w in nbrs:
|
| 389 |
+
if w == start:
|
| 390 |
+
yield path[:]
|
| 391 |
+
closed[-1] = True
|
| 392 |
+
elif w not in blocked:
|
| 393 |
+
path.append(w)
|
| 394 |
+
closed.append(False)
|
| 395 |
+
stack.append(iter(G[w]))
|
| 396 |
+
blocked.add(w)
|
| 397 |
+
break
|
| 398 |
+
else: # no more nbrs
|
| 399 |
+
stack.pop()
|
| 400 |
+
v = path.pop()
|
| 401 |
+
if closed.pop():
|
| 402 |
+
if closed:
|
| 403 |
+
closed[-1] = True
|
| 404 |
+
unblock_stack = {v}
|
| 405 |
+
while unblock_stack:
|
| 406 |
+
u = unblock_stack.pop()
|
| 407 |
+
if u in blocked:
|
| 408 |
+
blocked.remove(u)
|
| 409 |
+
unblock_stack.update(B[u])
|
| 410 |
+
B[u].clear()
|
| 411 |
+
else:
|
| 412 |
+
for w in G[v]:
|
| 413 |
+
B[w].add(v)
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
def _bounded_cycle_search(G, path, length_bound):
|
| 417 |
+
"""The main loop of the cycle-enumeration algorithm of Gupta and Suzumura.
|
| 418 |
+
|
| 419 |
+
Parameters
|
| 420 |
+
----------
|
| 421 |
+
G : NetworkX Graph or DiGraph
|
| 422 |
+
A graph
|
| 423 |
+
|
| 424 |
+
path : list
|
| 425 |
+
A cycle prefix. All cycles generated will begin with this prefix.
|
| 426 |
+
|
| 427 |
+
length_bound: int
|
| 428 |
+
A length bound. All cycles generated will have length at most length_bound.
|
| 429 |
+
|
| 430 |
+
Yields
|
| 431 |
+
------
|
| 432 |
+
list of nodes
|
| 433 |
+
Each cycle is represented by a list of nodes along the cycle.
|
| 434 |
+
|
| 435 |
+
References
|
| 436 |
+
----------
|
| 437 |
+
.. [1] Finding All Bounded-Length Simple Cycles in a Directed Graph
|
| 438 |
+
A. Gupta and T. Suzumura https://arxiv.org/abs/2105.10094
|
| 439 |
+
|
| 440 |
+
"""
|
| 441 |
+
G = _NeighborhoodCache(G)
|
| 442 |
+
lock = {v: 0 for v in path}
|
| 443 |
+
B = defaultdict(set)
|
| 444 |
+
start = path[0]
|
| 445 |
+
stack = [iter(G[path[-1]])]
|
| 446 |
+
blen = [length_bound]
|
| 447 |
+
while stack:
|
| 448 |
+
nbrs = stack[-1]
|
| 449 |
+
for w in nbrs:
|
| 450 |
+
if w == start:
|
| 451 |
+
yield path[:]
|
| 452 |
+
blen[-1] = 1
|
| 453 |
+
elif len(path) < lock.get(w, length_bound):
|
| 454 |
+
path.append(w)
|
| 455 |
+
blen.append(length_bound)
|
| 456 |
+
lock[w] = len(path)
|
| 457 |
+
stack.append(iter(G[w]))
|
| 458 |
+
break
|
| 459 |
+
else:
|
| 460 |
+
stack.pop()
|
| 461 |
+
v = path.pop()
|
| 462 |
+
bl = blen.pop()
|
| 463 |
+
if blen:
|
| 464 |
+
blen[-1] = min(blen[-1], bl)
|
| 465 |
+
if bl < length_bound:
|
| 466 |
+
relax_stack = [(bl, v)]
|
| 467 |
+
while relax_stack:
|
| 468 |
+
bl, u = relax_stack.pop()
|
| 469 |
+
if lock.get(u, length_bound) < length_bound - bl + 1:
|
| 470 |
+
lock[u] = length_bound - bl + 1
|
| 471 |
+
relax_stack.extend((bl + 1, w) for w in B[u].difference(path))
|
| 472 |
+
else:
|
| 473 |
+
for w in G[v]:
|
| 474 |
+
B[w].add(v)
|
| 475 |
+
|
| 476 |
+
|
| 477 |
+
@nx._dispatch
|
| 478 |
+
def chordless_cycles(G, length_bound=None):
|
| 479 |
+
"""Find simple chordless cycles of a graph.
|
| 480 |
+
|
| 481 |
+
A `simple cycle` is a closed path where no node appears twice. In a simple
|
| 482 |
+
cycle, a `chord` is an additional edge between two nodes in the cycle. A
|
| 483 |
+
`chordless cycle` is a simple cycle without chords. Said differently, a
|
| 484 |
+
chordless cycle is a cycle C in a graph G where the number of edges in the
|
| 485 |
+
induced graph G[C] is equal to the length of `C`.
|
| 486 |
+
|
| 487 |
+
Note that some care must be taken in the case that G is not a simple graph
|
| 488 |
+
nor a simple digraph. Some authors limit the definition of chordless cycles
|
| 489 |
+
to have a prescribed minimum length; we do not.
|
| 490 |
+
|
| 491 |
+
1. We interpret self-loops to be chordless cycles, except in multigraphs
|
| 492 |
+
with multiple loops in parallel. Likewise, in a chordless cycle of
|
| 493 |
+
length greater than 1, there can be no nodes with self-loops.
|
| 494 |
+
|
| 495 |
+
2. We interpret directed two-cycles to be chordless cycles, except in
|
| 496 |
+
multi-digraphs when any edge in a two-cycle has a parallel copy.
|
| 497 |
+
|
| 498 |
+
3. We interpret parallel pairs of undirected edges as two-cycles, except
|
| 499 |
+
when a third (or more) parallel edge exists between the two nodes.
|
| 500 |
+
|
| 501 |
+
4. Generalizing the above, edges with parallel clones may not occur in
|
| 502 |
+
chordless cycles.
|
| 503 |
+
|
| 504 |
+
In a directed graph, two chordless cycles are distinct if they are not
|
| 505 |
+
cyclic permutations of each other. In an undirected graph, two chordless
|
| 506 |
+
cycles are distinct if they are not cyclic permutations of each other nor of
|
| 507 |
+
the other's reversal.
|
| 508 |
+
|
| 509 |
+
Optionally, the cycles are bounded in length.
|
| 510 |
+
|
| 511 |
+
We use an algorithm strongly inspired by that of Dias et al [1]_. It has
|
| 512 |
+
been modified in the following ways:
|
| 513 |
+
|
| 514 |
+
1. Recursion is avoided, per Python's limitations
|
| 515 |
+
|
| 516 |
+
2. The labeling function is not necessary, because the starting paths
|
| 517 |
+
are chosen (and deleted from the host graph) to prevent multiple
|
| 518 |
+
occurrences of the same path
|
| 519 |
+
|
| 520 |
+
3. The search is optionally bounded at a specified length
|
| 521 |
+
|
| 522 |
+
4. Support for directed graphs is provided by extending cycles along
|
| 523 |
+
forward edges, and blocking nodes along forward and reverse edges
|
| 524 |
+
|
| 525 |
+
5. Support for multigraphs is provided by omitting digons from the set
|
| 526 |
+
of forward edges
|
| 527 |
+
|
| 528 |
+
Parameters
|
| 529 |
+
----------
|
| 530 |
+
G : NetworkX DiGraph
|
| 531 |
+
A directed graph
|
| 532 |
+
|
| 533 |
+
length_bound : int or None, optional (default=None)
|
| 534 |
+
If length_bound is an int, generate all simple cycles of G with length at
|
| 535 |
+
most length_bound. Otherwise, generate all simple cycles of G.
|
| 536 |
+
|
| 537 |
+
Yields
|
| 538 |
+
------
|
| 539 |
+
list of nodes
|
| 540 |
+
Each cycle is represented by a list of nodes along the cycle.
|
| 541 |
+
|
| 542 |
+
Examples
|
| 543 |
+
--------
|
| 544 |
+
>>> sorted(list(nx.chordless_cycles(nx.complete_graph(4))))
|
| 545 |
+
[[1, 0, 2], [1, 0, 3], [2, 0, 3], [2, 1, 3]]
|
| 546 |
+
|
| 547 |
+
Notes
|
| 548 |
+
-----
|
| 549 |
+
When length_bound is None, and the graph is simple, the time complexity is
|
| 550 |
+
$O((n+e)(c+1))$ for $n$ nodes, $e$ edges and $c$ chordless cycles.
|
| 551 |
+
|
| 552 |
+
Raises
|
| 553 |
+
------
|
| 554 |
+
ValueError
|
| 555 |
+
when length_bound < 0.
|
| 556 |
+
|
| 557 |
+
References
|
| 558 |
+
----------
|
| 559 |
+
.. [1] Efficient enumeration of chordless cycles
|
| 560 |
+
E. Dias and D. Castonguay and H. Longo and W.A.R. Jradi
|
| 561 |
+
https://arxiv.org/abs/1309.1051
|
| 562 |
+
|
| 563 |
+
See Also
|
| 564 |
+
--------
|
| 565 |
+
simple_cycles
|
| 566 |
+
"""
|
| 567 |
+
|
| 568 |
+
if length_bound is not None:
|
| 569 |
+
if length_bound == 0:
|
| 570 |
+
return
|
| 571 |
+
elif length_bound < 0:
|
| 572 |
+
raise ValueError("length bound must be non-negative")
|
| 573 |
+
|
| 574 |
+
directed = G.is_directed()
|
| 575 |
+
multigraph = G.is_multigraph()
|
| 576 |
+
|
| 577 |
+
if multigraph:
|
| 578 |
+
yield from ([v] for v, Gv in G.adj.items() if len(Gv.get(v, ())) == 1)
|
| 579 |
+
else:
|
| 580 |
+
yield from ([v] for v, Gv in G.adj.items() if v in Gv)
|
| 581 |
+
|
| 582 |
+
if length_bound is not None and length_bound == 1:
|
| 583 |
+
return
|
| 584 |
+
|
| 585 |
+
# Nodes with loops cannot belong to longer cycles. Let's delete them here.
|
| 586 |
+
# also, we implicitly reduce the multiplicity of edges down to 1 in the case
|
| 587 |
+
# of multiedges.
|
| 588 |
+
if directed:
|
| 589 |
+
F = nx.DiGraph((u, v) for u, Gu in G.adj.items() if u not in Gu for v in Gu)
|
| 590 |
+
B = F.to_undirected(as_view=False)
|
| 591 |
+
else:
|
| 592 |
+
F = nx.Graph((u, v) for u, Gu in G.adj.items() if u not in Gu for v in Gu)
|
| 593 |
+
B = None
|
| 594 |
+
|
| 595 |
+
# If we're given a multigraph, we have a few cases to consider with parallel
|
| 596 |
+
# edges.
|
| 597 |
+
#
|
| 598 |
+
# 1. If we have 2 or more edges in parallel between the nodes (u, v), we
|
| 599 |
+
# must not construct longer cycles along (u, v).
|
| 600 |
+
# 2. If G is not directed, then a pair of parallel edges between (u, v) is a
|
| 601 |
+
# chordless cycle unless there exists a third (or more) parallel edge.
|
| 602 |
+
# 3. If G is directed, then parallel edges do not form cycles, but do
|
| 603 |
+
# preclude back-edges from forming cycles (handled in the next section),
|
| 604 |
+
# Thus, if an edge (u, v) is duplicated and the reverse (v, u) is also
|
| 605 |
+
# present, then we remove both from F.
|
| 606 |
+
#
|
| 607 |
+
# In directed graphs, we need to consider both directions that edges can
|
| 608 |
+
# take, so iterate over all edges (u, v) and possibly (v, u). In undirected
|
| 609 |
+
# graphs, we need to be a little careful to only consider every edge once,
|
| 610 |
+
# so we use a "visited" set to emulate node-order comparisons.
|
| 611 |
+
|
| 612 |
+
if multigraph:
|
| 613 |
+
if not directed:
|
| 614 |
+
B = F.copy()
|
| 615 |
+
visited = set()
|
| 616 |
+
for u, Gu in G.adj.items():
|
| 617 |
+
if directed:
|
| 618 |
+
multiplicity = ((v, len(Guv)) for v, Guv in Gu.items())
|
| 619 |
+
for v, m in multiplicity:
|
| 620 |
+
if m > 1:
|
| 621 |
+
F.remove_edges_from(((u, v), (v, u)))
|
| 622 |
+
else:
|
| 623 |
+
multiplicity = ((v, len(Guv)) for v, Guv in Gu.items() if v in visited)
|
| 624 |
+
for v, m in multiplicity:
|
| 625 |
+
if m == 2:
|
| 626 |
+
yield [u, v]
|
| 627 |
+
if m > 1:
|
| 628 |
+
F.remove_edge(u, v)
|
| 629 |
+
visited.add(u)
|
| 630 |
+
|
| 631 |
+
# If we're given a directed graphs, we need to think about digons. If we
|
| 632 |
+
# have two edges (u, v) and (v, u), then that's a two-cycle. If either edge
|
| 633 |
+
# was duplicated above, then we removed both from F. So, any digons we find
|
| 634 |
+
# here are chordless. After finding digons, we remove their edges from F
|
| 635 |
+
# to avoid traversing them in the search for chordless cycles.
|
| 636 |
+
if directed:
|
| 637 |
+
for u, Fu in F.adj.items():
|
| 638 |
+
digons = [[u, v] for v in Fu if F.has_edge(v, u)]
|
| 639 |
+
yield from digons
|
| 640 |
+
F.remove_edges_from(digons)
|
| 641 |
+
F.remove_edges_from(e[::-1] for e in digons)
|
| 642 |
+
|
| 643 |
+
if length_bound is not None and length_bound == 2:
|
| 644 |
+
return
|
| 645 |
+
|
| 646 |
+
# Now, we prepare to search for cycles. We have removed all cycles of
|
| 647 |
+
# lengths 1 and 2, so F is a simple graph or simple digraph. We repeatedly
|
| 648 |
+
# separate digraphs into their strongly connected components, and undirected
|
| 649 |
+
# graphs into their biconnected components. For each component, we pick a
|
| 650 |
+
# node v, search for chordless cycles based at each "stem" (u, v, w), and
|
| 651 |
+
# then remove v from that component before separating the graph again.
|
| 652 |
+
if directed:
|
| 653 |
+
separate = nx.strongly_connected_components
|
| 654 |
+
|
| 655 |
+
# Directed stems look like (u -> v -> w), so we use the product of
|
| 656 |
+
# predecessors of v with successors of v.
|
| 657 |
+
def stems(C, v):
|
| 658 |
+
for u, w in product(C.pred[v], C.succ[v]):
|
| 659 |
+
if not G.has_edge(u, w): # omit stems with acyclic chords
|
| 660 |
+
yield [u, v, w], F.has_edge(w, u)
|
| 661 |
+
|
| 662 |
+
else:
|
| 663 |
+
separate = nx.biconnected_components
|
| 664 |
+
|
| 665 |
+
# Undirected stems look like (u ~ v ~ w), but we must not also search
|
| 666 |
+
# (w ~ v ~ u), so we use combinations of v's neighbors of length 2.
|
| 667 |
+
def stems(C, v):
|
| 668 |
+
yield from (([u, v, w], F.has_edge(w, u)) for u, w in combinations(C[v], 2))
|
| 669 |
+
|
| 670 |
+
components = [c for c in separate(F) if len(c) > 2]
|
| 671 |
+
while components:
|
| 672 |
+
c = components.pop()
|
| 673 |
+
v = next(iter(c))
|
| 674 |
+
Fc = F.subgraph(c)
|
| 675 |
+
Fcc = Bcc = None
|
| 676 |
+
for S, is_triangle in stems(Fc, v):
|
| 677 |
+
if is_triangle:
|
| 678 |
+
yield S
|
| 679 |
+
else:
|
| 680 |
+
if Fcc is None:
|
| 681 |
+
Fcc = _NeighborhoodCache(Fc)
|
| 682 |
+
Bcc = Fcc if B is None else _NeighborhoodCache(B.subgraph(c))
|
| 683 |
+
yield from _chordless_cycle_search(Fcc, Bcc, S, length_bound)
|
| 684 |
+
|
| 685 |
+
components.extend(c for c in separate(F.subgraph(c - {v})) if len(c) > 2)
|
| 686 |
+
|
| 687 |
+
|
| 688 |
+
def _chordless_cycle_search(F, B, path, length_bound):
|
| 689 |
+
"""The main loop for chordless cycle enumeration.
|
| 690 |
+
|
| 691 |
+
This algorithm is strongly inspired by that of Dias et al [1]_. It has been
|
| 692 |
+
modified in the following ways:
|
| 693 |
+
|
| 694 |
+
1. Recursion is avoided, per Python's limitations
|
| 695 |
+
|
| 696 |
+
2. The labeling function is not necessary, because the starting paths
|
| 697 |
+
are chosen (and deleted from the host graph) to prevent multiple
|
| 698 |
+
occurrences of the same path
|
| 699 |
+
|
| 700 |
+
3. The search is optionally bounded at a specified length
|
| 701 |
+
|
| 702 |
+
4. Support for directed graphs is provided by extending cycles along
|
| 703 |
+
forward edges, and blocking nodes along forward and reverse edges
|
| 704 |
+
|
| 705 |
+
5. Support for multigraphs is provided by omitting digons from the set
|
| 706 |
+
of forward edges
|
| 707 |
+
|
| 708 |
+
Parameters
|
| 709 |
+
----------
|
| 710 |
+
F : _NeighborhoodCache
|
| 711 |
+
A graph of forward edges to follow in constructing cycles
|
| 712 |
+
|
| 713 |
+
B : _NeighborhoodCache
|
| 714 |
+
A graph of blocking edges to prevent the production of chordless cycles
|
| 715 |
+
|
| 716 |
+
path : list
|
| 717 |
+
A cycle prefix. All cycles generated will begin with this prefix.
|
| 718 |
+
|
| 719 |
+
length_bound : int
|
| 720 |
+
A length bound. All cycles generated will have length at most length_bound.
|
| 721 |
+
|
| 722 |
+
|
| 723 |
+
Yields
|
| 724 |
+
------
|
| 725 |
+
list of nodes
|
| 726 |
+
Each cycle is represented by a list of nodes along the cycle.
|
| 727 |
+
|
| 728 |
+
References
|
| 729 |
+
----------
|
| 730 |
+
.. [1] Efficient enumeration of chordless cycles
|
| 731 |
+
E. Dias and D. Castonguay and H. Longo and W.A.R. Jradi
|
| 732 |
+
https://arxiv.org/abs/1309.1051
|
| 733 |
+
|
| 734 |
+
"""
|
| 735 |
+
blocked = defaultdict(int)
|
| 736 |
+
target = path[0]
|
| 737 |
+
blocked[path[1]] = 1
|
| 738 |
+
for w in path[1:]:
|
| 739 |
+
for v in B[w]:
|
| 740 |
+
blocked[v] += 1
|
| 741 |
+
|
| 742 |
+
stack = [iter(F[path[2]])]
|
| 743 |
+
while stack:
|
| 744 |
+
nbrs = stack[-1]
|
| 745 |
+
for w in nbrs:
|
| 746 |
+
if blocked[w] == 1 and (length_bound is None or len(path) < length_bound):
|
| 747 |
+
Fw = F[w]
|
| 748 |
+
if target in Fw:
|
| 749 |
+
yield path + [w]
|
| 750 |
+
else:
|
| 751 |
+
Bw = B[w]
|
| 752 |
+
if target in Bw:
|
| 753 |
+
continue
|
| 754 |
+
for v in Bw:
|
| 755 |
+
blocked[v] += 1
|
| 756 |
+
path.append(w)
|
| 757 |
+
stack.append(iter(Fw))
|
| 758 |
+
break
|
| 759 |
+
else:
|
| 760 |
+
stack.pop()
|
| 761 |
+
for v in B[path.pop()]:
|
| 762 |
+
blocked[v] -= 1
|
| 763 |
+
|
| 764 |
+
|
| 765 |
+
@not_implemented_for("undirected")
|
| 766 |
+
@nx._dispatch
|
| 767 |
+
def recursive_simple_cycles(G):
|
| 768 |
+
"""Find simple cycles (elementary circuits) of a directed graph.
|
| 769 |
+
|
| 770 |
+
A `simple cycle`, or `elementary circuit`, is a closed path where
|
| 771 |
+
no node appears twice. Two elementary circuits are distinct if they
|
| 772 |
+
are not cyclic permutations of each other.
|
| 773 |
+
|
| 774 |
+
This version uses a recursive algorithm to build a list of cycles.
|
| 775 |
+
You should probably use the iterator version called simple_cycles().
|
| 776 |
+
Warning: This recursive version uses lots of RAM!
|
| 777 |
+
It appears in NetworkX for pedagogical value.
|
| 778 |
+
|
| 779 |
+
Parameters
|
| 780 |
+
----------
|
| 781 |
+
G : NetworkX DiGraph
|
| 782 |
+
A directed graph
|
| 783 |
+
|
| 784 |
+
Returns
|
| 785 |
+
-------
|
| 786 |
+
A list of cycles, where each cycle is represented by a list of nodes
|
| 787 |
+
along the cycle.
|
| 788 |
+
|
| 789 |
+
Example:
|
| 790 |
+
|
| 791 |
+
>>> edges = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)]
|
| 792 |
+
>>> G = nx.DiGraph(edges)
|
| 793 |
+
>>> nx.recursive_simple_cycles(G)
|
| 794 |
+
[[0], [2], [0, 1, 2], [0, 2], [1, 2]]
|
| 795 |
+
|
| 796 |
+
Notes
|
| 797 |
+
-----
|
| 798 |
+
The implementation follows pp. 79-80 in [1]_.
|
| 799 |
+
|
| 800 |
+
The time complexity is $O((n+e)(c+1))$ for $n$ nodes, $e$ edges and $c$
|
| 801 |
+
elementary circuits.
|
| 802 |
+
|
| 803 |
+
References
|
| 804 |
+
----------
|
| 805 |
+
.. [1] Finding all the elementary circuits of a directed graph.
|
| 806 |
+
D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975.
|
| 807 |
+
https://doi.org/10.1137/0204007
|
| 808 |
+
|
| 809 |
+
See Also
|
| 810 |
+
--------
|
| 811 |
+
simple_cycles, cycle_basis
|
| 812 |
+
"""
|
| 813 |
+
|
| 814 |
+
# Jon Olav Vik, 2010-08-09
|
| 815 |
+
def _unblock(thisnode):
|
| 816 |
+
"""Recursively unblock and remove nodes from B[thisnode]."""
|
| 817 |
+
if blocked[thisnode]:
|
| 818 |
+
blocked[thisnode] = False
|
| 819 |
+
while B[thisnode]:
|
| 820 |
+
_unblock(B[thisnode].pop())
|
| 821 |
+
|
| 822 |
+
def circuit(thisnode, startnode, component):
|
| 823 |
+
closed = False # set to True if elementary path is closed
|
| 824 |
+
path.append(thisnode)
|
| 825 |
+
blocked[thisnode] = True
|
| 826 |
+
for nextnode in component[thisnode]: # direct successors of thisnode
|
| 827 |
+
if nextnode == startnode:
|
| 828 |
+
result.append(path[:])
|
| 829 |
+
closed = True
|
| 830 |
+
elif not blocked[nextnode]:
|
| 831 |
+
if circuit(nextnode, startnode, component):
|
| 832 |
+
closed = True
|
| 833 |
+
if closed:
|
| 834 |
+
_unblock(thisnode)
|
| 835 |
+
else:
|
| 836 |
+
for nextnode in component[thisnode]:
|
| 837 |
+
if thisnode not in B[nextnode]: # TODO: use set for speedup?
|
| 838 |
+
B[nextnode].append(thisnode)
|
| 839 |
+
path.pop() # remove thisnode from path
|
| 840 |
+
return closed
|
| 841 |
+
|
| 842 |
+
path = [] # stack of nodes in current path
|
| 843 |
+
blocked = defaultdict(bool) # vertex: blocked from search?
|
| 844 |
+
B = defaultdict(list) # graph portions that yield no elementary circuit
|
| 845 |
+
result = [] # list to accumulate the circuits found
|
| 846 |
+
|
| 847 |
+
# Johnson's algorithm exclude self cycle edges like (v, v)
|
| 848 |
+
# To be backward compatible, we record those cycles in advance
|
| 849 |
+
# and then remove from subG
|
| 850 |
+
for v in G:
|
| 851 |
+
if G.has_edge(v, v):
|
| 852 |
+
result.append([v])
|
| 853 |
+
G.remove_edge(v, v)
|
| 854 |
+
|
| 855 |
+
# Johnson's algorithm requires some ordering of the nodes.
|
| 856 |
+
# They might not be sortable so we assign an arbitrary ordering.
|
| 857 |
+
ordering = dict(zip(G, range(len(G))))
|
| 858 |
+
for s in ordering:
|
| 859 |
+
# Build the subgraph induced by s and following nodes in the ordering
|
| 860 |
+
subgraph = G.subgraph(node for node in G if ordering[node] >= ordering[s])
|
| 861 |
+
# Find the strongly connected component in the subgraph
|
| 862 |
+
# that contains the least node according to the ordering
|
| 863 |
+
strongcomp = nx.strongly_connected_components(subgraph)
|
| 864 |
+
mincomp = min(strongcomp, key=lambda ns: min(ordering[n] for n in ns))
|
| 865 |
+
component = G.subgraph(mincomp)
|
| 866 |
+
if len(component) > 1:
|
| 867 |
+
# smallest node in the component according to the ordering
|
| 868 |
+
startnode = min(component, key=ordering.__getitem__)
|
| 869 |
+
for node in component:
|
| 870 |
+
blocked[node] = False
|
| 871 |
+
B[node][:] = []
|
| 872 |
+
dummy = circuit(startnode, startnode, component)
|
| 873 |
+
return result
|
| 874 |
+
|
| 875 |
+
|
| 876 |
+
@nx._dispatch
|
| 877 |
+
def find_cycle(G, source=None, orientation=None):
|
| 878 |
+
"""Returns a cycle found via depth-first traversal.
|
| 879 |
+
|
| 880 |
+
The cycle is a list of edges indicating the cyclic path.
|
| 881 |
+
Orientation of directed edges is controlled by `orientation`.
|
| 882 |
+
|
| 883 |
+
Parameters
|
| 884 |
+
----------
|
| 885 |
+
G : graph
|
| 886 |
+
A directed/undirected graph/multigraph.
|
| 887 |
+
|
| 888 |
+
source : node, list of nodes
|
| 889 |
+
The node from which the traversal begins. If None, then a source
|
| 890 |
+
is chosen arbitrarily and repeatedly until all edges from each node in
|
| 891 |
+
the graph are searched.
|
| 892 |
+
|
| 893 |
+
orientation : None | 'original' | 'reverse' | 'ignore' (default: None)
|
| 894 |
+
For directed graphs and directed multigraphs, edge traversals need not
|
| 895 |
+
respect the original orientation of the edges.
|
| 896 |
+
When set to 'reverse' every edge is traversed in the reverse direction.
|
| 897 |
+
When set to 'ignore', every edge is treated as undirected.
|
| 898 |
+
When set to 'original', every edge is treated as directed.
|
| 899 |
+
In all three cases, the yielded edge tuples add a last entry to
|
| 900 |
+
indicate the direction in which that edge was traversed.
|
| 901 |
+
If orientation is None, the yielded edge has no direction indicated.
|
| 902 |
+
The direction is respected, but not reported.
|
| 903 |
+
|
| 904 |
+
Returns
|
| 905 |
+
-------
|
| 906 |
+
edges : directed edges
|
| 907 |
+
A list of directed edges indicating the path taken for the loop.
|
| 908 |
+
If no cycle is found, then an exception is raised.
|
| 909 |
+
For graphs, an edge is of the form `(u, v)` where `u` and `v`
|
| 910 |
+
are the tail and head of the edge as determined by the traversal.
|
| 911 |
+
For multigraphs, an edge is of the form `(u, v, key)`, where `key` is
|
| 912 |
+
the key of the edge. When the graph is directed, then `u` and `v`
|
| 913 |
+
are always in the order of the actual directed edge.
|
| 914 |
+
If orientation is not None then the edge tuple is extended to include
|
| 915 |
+
the direction of traversal ('forward' or 'reverse') on that edge.
|
| 916 |
+
|
| 917 |
+
Raises
|
| 918 |
+
------
|
| 919 |
+
NetworkXNoCycle
|
| 920 |
+
If no cycle was found.
|
| 921 |
+
|
| 922 |
+
Examples
|
| 923 |
+
--------
|
| 924 |
+
In this example, we construct a DAG and find, in the first call, that there
|
| 925 |
+
are no directed cycles, and so an exception is raised. In the second call,
|
| 926 |
+
we ignore edge orientations and find that there is an undirected cycle.
|
| 927 |
+
Note that the second call finds a directed cycle while effectively
|
| 928 |
+
traversing an undirected graph, and so, we found an "undirected cycle".
|
| 929 |
+
This means that this DAG structure does not form a directed tree (which
|
| 930 |
+
is also known as a polytree).
|
| 931 |
+
|
| 932 |
+
>>> G = nx.DiGraph([(0, 1), (0, 2), (1, 2)])
|
| 933 |
+
>>> nx.find_cycle(G, orientation="original")
|
| 934 |
+
Traceback (most recent call last):
|
| 935 |
+
...
|
| 936 |
+
networkx.exception.NetworkXNoCycle: No cycle found.
|
| 937 |
+
>>> list(nx.find_cycle(G, orientation="ignore"))
|
| 938 |
+
[(0, 1, 'forward'), (1, 2, 'forward'), (0, 2, 'reverse')]
|
| 939 |
+
|
| 940 |
+
See Also
|
| 941 |
+
--------
|
| 942 |
+
simple_cycles
|
| 943 |
+
"""
|
| 944 |
+
if not G.is_directed() or orientation in (None, "original"):
|
| 945 |
+
|
| 946 |
+
def tailhead(edge):
|
| 947 |
+
return edge[:2]
|
| 948 |
+
|
| 949 |
+
elif orientation == "reverse":
|
| 950 |
+
|
| 951 |
+
def tailhead(edge):
|
| 952 |
+
return edge[1], edge[0]
|
| 953 |
+
|
| 954 |
+
elif orientation == "ignore":
|
| 955 |
+
|
| 956 |
+
def tailhead(edge):
|
| 957 |
+
if edge[-1] == "reverse":
|
| 958 |
+
return edge[1], edge[0]
|
| 959 |
+
return edge[:2]
|
| 960 |
+
|
| 961 |
+
explored = set()
|
| 962 |
+
cycle = []
|
| 963 |
+
final_node = None
|
| 964 |
+
for start_node in G.nbunch_iter(source):
|
| 965 |
+
if start_node in explored:
|
| 966 |
+
# No loop is possible.
|
| 967 |
+
continue
|
| 968 |
+
|
| 969 |
+
edges = []
|
| 970 |
+
# All nodes seen in this iteration of edge_dfs
|
| 971 |
+
seen = {start_node}
|
| 972 |
+
# Nodes in active path.
|
| 973 |
+
active_nodes = {start_node}
|
| 974 |
+
previous_head = None
|
| 975 |
+
|
| 976 |
+
for edge in nx.edge_dfs(G, start_node, orientation):
|
| 977 |
+
# Determine if this edge is a continuation of the active path.
|
| 978 |
+
tail, head = tailhead(edge)
|
| 979 |
+
if head in explored:
|
| 980 |
+
# Then we've already explored it. No loop is possible.
|
| 981 |
+
continue
|
| 982 |
+
if previous_head is not None and tail != previous_head:
|
| 983 |
+
# This edge results from backtracking.
|
| 984 |
+
# Pop until we get a node whose head equals the current tail.
|
| 985 |
+
# So for example, we might have:
|
| 986 |
+
# (0, 1), (1, 2), (2, 3), (1, 4)
|
| 987 |
+
# which must become:
|
| 988 |
+
# (0, 1), (1, 4)
|
| 989 |
+
while True:
|
| 990 |
+
try:
|
| 991 |
+
popped_edge = edges.pop()
|
| 992 |
+
except IndexError:
|
| 993 |
+
edges = []
|
| 994 |
+
active_nodes = {tail}
|
| 995 |
+
break
|
| 996 |
+
else:
|
| 997 |
+
popped_head = tailhead(popped_edge)[1]
|
| 998 |
+
active_nodes.remove(popped_head)
|
| 999 |
+
|
| 1000 |
+
if edges:
|
| 1001 |
+
last_head = tailhead(edges[-1])[1]
|
| 1002 |
+
if tail == last_head:
|
| 1003 |
+
break
|
| 1004 |
+
edges.append(edge)
|
| 1005 |
+
|
| 1006 |
+
if head in active_nodes:
|
| 1007 |
+
# We have a loop!
|
| 1008 |
+
cycle.extend(edges)
|
| 1009 |
+
final_node = head
|
| 1010 |
+
break
|
| 1011 |
+
else:
|
| 1012 |
+
seen.add(head)
|
| 1013 |
+
active_nodes.add(head)
|
| 1014 |
+
previous_head = head
|
| 1015 |
+
|
| 1016 |
+
if cycle:
|
| 1017 |
+
break
|
| 1018 |
+
else:
|
| 1019 |
+
explored.update(seen)
|
| 1020 |
+
|
| 1021 |
+
else:
|
| 1022 |
+
assert len(cycle) == 0
|
| 1023 |
+
raise nx.exception.NetworkXNoCycle("No cycle found.")
|
| 1024 |
+
|
| 1025 |
+
# We now have a list of edges which ends on a cycle.
|
| 1026 |
+
# So we need to remove from the beginning edges that are not relevant.
|
| 1027 |
+
|
| 1028 |
+
for i, edge in enumerate(cycle):
|
| 1029 |
+
tail, head = tailhead(edge)
|
| 1030 |
+
if tail == final_node:
|
| 1031 |
+
break
|
| 1032 |
+
|
| 1033 |
+
return cycle[i:]
|
| 1034 |
+
|
| 1035 |
+
|
| 1036 |
+
@not_implemented_for("directed")
|
| 1037 |
+
@not_implemented_for("multigraph")
|
| 1038 |
+
@nx._dispatch(edge_attrs="weight")
|
| 1039 |
+
def minimum_cycle_basis(G, weight=None):
|
| 1040 |
+
"""Returns a minimum weight cycle basis for G
|
| 1041 |
+
|
| 1042 |
+
Minimum weight means a cycle basis for which the total weight
|
| 1043 |
+
(length for unweighted graphs) of all the cycles is minimum.
|
| 1044 |
+
|
| 1045 |
+
Parameters
|
| 1046 |
+
----------
|
| 1047 |
+
G : NetworkX Graph
|
| 1048 |
+
weight: string
|
| 1049 |
+
name of the edge attribute to use for edge weights
|
| 1050 |
+
|
| 1051 |
+
Returns
|
| 1052 |
+
-------
|
| 1053 |
+
A list of cycle lists. Each cycle list is a list of nodes
|
| 1054 |
+
which forms a cycle (loop) in G. Note that the nodes are not
|
| 1055 |
+
necessarily returned in a order by which they appear in the cycle
|
| 1056 |
+
|
| 1057 |
+
Examples
|
| 1058 |
+
--------
|
| 1059 |
+
>>> G = nx.Graph()
|
| 1060 |
+
>>> nx.add_cycle(G, [0, 1, 2, 3])
|
| 1061 |
+
>>> nx.add_cycle(G, [0, 3, 4, 5])
|
| 1062 |
+
>>> nx.minimum_cycle_basis(G)
|
| 1063 |
+
[[5, 4, 3, 0], [3, 2, 1, 0]]
|
| 1064 |
+
|
| 1065 |
+
References:
|
| 1066 |
+
[1] Kavitha, Telikepalli, et al. "An O(m^2n) Algorithm for
|
| 1067 |
+
Minimum Cycle Basis of Graphs."
|
| 1068 |
+
http://link.springer.com/article/10.1007/s00453-007-9064-z
|
| 1069 |
+
[2] de Pina, J. 1995. Applications of shortest path methods.
|
| 1070 |
+
Ph.D. thesis, University of Amsterdam, Netherlands
|
| 1071 |
+
|
| 1072 |
+
See Also
|
| 1073 |
+
--------
|
| 1074 |
+
simple_cycles, cycle_basis
|
| 1075 |
+
"""
|
| 1076 |
+
# We first split the graph in connected subgraphs
|
| 1077 |
+
return sum(
|
| 1078 |
+
(_min_cycle_basis(G.subgraph(c), weight) for c in nx.connected_components(G)),
|
| 1079 |
+
[],
|
| 1080 |
+
)
|
| 1081 |
+
|
| 1082 |
+
|
| 1083 |
+
def _min_cycle_basis(G, weight):
|
| 1084 |
+
cb = []
|
| 1085 |
+
# We extract the edges not in a spanning tree. We do not really need a
|
| 1086 |
+
# *minimum* spanning tree. That is why we call the next function with
|
| 1087 |
+
# weight=None. Depending on implementation, it may be faster as well
|
| 1088 |
+
tree_edges = list(nx.minimum_spanning_edges(G, weight=None, data=False))
|
| 1089 |
+
chords = G.edges - tree_edges - {(v, u) for u, v in tree_edges}
|
| 1090 |
+
|
| 1091 |
+
# We maintain a set of vectors orthogonal to sofar found cycles
|
| 1092 |
+
set_orth = [{edge} for edge in chords]
|
| 1093 |
+
while set_orth:
|
| 1094 |
+
base = set_orth.pop()
|
| 1095 |
+
# kth cycle is "parallel" to kth vector in set_orth
|
| 1096 |
+
cycle_edges = _min_cycle(G, base, weight)
|
| 1097 |
+
cb.append([v for u, v in cycle_edges])
|
| 1098 |
+
|
| 1099 |
+
# now update set_orth so that k+1,k+2... th elements are
|
| 1100 |
+
# orthogonal to the newly found cycle, as per [p. 336, 1]
|
| 1101 |
+
set_orth = [
|
| 1102 |
+
(
|
| 1103 |
+
{e for e in orth if e not in base if e[::-1] not in base}
|
| 1104 |
+
| {e for e in base if e not in orth if e[::-1] not in orth}
|
| 1105 |
+
)
|
| 1106 |
+
if sum((e in orth or e[::-1] in orth) for e in cycle_edges) % 2
|
| 1107 |
+
else orth
|
| 1108 |
+
for orth in set_orth
|
| 1109 |
+
]
|
| 1110 |
+
return cb
|
| 1111 |
+
|
| 1112 |
+
|
| 1113 |
+
def _min_cycle(G, orth, weight):
|
| 1114 |
+
"""
|
| 1115 |
+
Computes the minimum weight cycle in G,
|
| 1116 |
+
orthogonal to the vector orth as per [p. 338, 1]
|
| 1117 |
+
Use (u, 1) to indicate the lifted copy of u (denoted u' in paper).
|
| 1118 |
+
"""
|
| 1119 |
+
Gi = nx.Graph()
|
| 1120 |
+
|
| 1121 |
+
# Add 2 copies of each edge in G to Gi.
|
| 1122 |
+
# If edge is in orth, add cross edge; otherwise in-plane edge
|
| 1123 |
+
for u, v, wt in G.edges(data=weight, default=1):
|
| 1124 |
+
if (u, v) in orth or (v, u) in orth:
|
| 1125 |
+
Gi.add_edges_from([(u, (v, 1)), ((u, 1), v)], Gi_weight=wt)
|
| 1126 |
+
else:
|
| 1127 |
+
Gi.add_edges_from([(u, v), ((u, 1), (v, 1))], Gi_weight=wt)
|
| 1128 |
+
|
| 1129 |
+
# find the shortest length in Gi between n and (n, 1) for each n
|
| 1130 |
+
# Note: Use "Gi_weight" for name of weight attribute
|
| 1131 |
+
spl = nx.shortest_path_length
|
| 1132 |
+
lift = {n: spl(Gi, source=n, target=(n, 1), weight="Gi_weight") for n in G}
|
| 1133 |
+
|
| 1134 |
+
# Now compute that short path in Gi, which translates to a cycle in G
|
| 1135 |
+
start = min(lift, key=lift.get)
|
| 1136 |
+
end = (start, 1)
|
| 1137 |
+
min_path_i = nx.shortest_path(Gi, source=start, target=end, weight="Gi_weight")
|
| 1138 |
+
|
| 1139 |
+
# Now we obtain the actual path, re-map nodes in Gi to those in G
|
| 1140 |
+
min_path = [n if n in G else n[0] for n in min_path_i]
|
| 1141 |
+
|
| 1142 |
+
# Now remove the edges that occur two times
|
| 1143 |
+
# two passes: flag which edges get kept, then build it
|
| 1144 |
+
edgelist = list(pairwise(min_path))
|
| 1145 |
+
edgeset = set()
|
| 1146 |
+
for e in edgelist:
|
| 1147 |
+
if e in edgeset:
|
| 1148 |
+
edgeset.remove(e)
|
| 1149 |
+
elif e[::-1] in edgeset:
|
| 1150 |
+
edgeset.remove(e[::-1])
|
| 1151 |
+
else:
|
| 1152 |
+
edgeset.add(e)
|
| 1153 |
+
|
| 1154 |
+
min_edgelist = []
|
| 1155 |
+
for e in edgelist:
|
| 1156 |
+
if e in edgeset:
|
| 1157 |
+
min_edgelist.append(e)
|
| 1158 |
+
edgeset.remove(e)
|
| 1159 |
+
elif e[::-1] in edgeset:
|
| 1160 |
+
min_edgelist.append(e[::-1])
|
| 1161 |
+
edgeset.remove(e[::-1])
|
| 1162 |
+
|
| 1163 |
+
return min_edgelist
|
| 1164 |
+
|
| 1165 |
+
|
| 1166 |
+
@not_implemented_for("directed")
|
| 1167 |
+
@not_implemented_for("multigraph")
|
| 1168 |
+
@nx._dispatch
|
| 1169 |
+
def girth(G):
|
| 1170 |
+
"""Returns the girth of the graph.
|
| 1171 |
+
|
| 1172 |
+
The girth of a graph is the length of its shortest cycle, or infinity if
|
| 1173 |
+
the graph is acyclic. The algorithm follows the description given on the
|
| 1174 |
+
Wikipedia page [1]_, and runs in time O(mn) on a graph with m edges and n
|
| 1175 |
+
nodes.
|
| 1176 |
+
|
| 1177 |
+
Parameters
|
| 1178 |
+
----------
|
| 1179 |
+
G : NetworkX Graph
|
| 1180 |
+
|
| 1181 |
+
Returns
|
| 1182 |
+
-------
|
| 1183 |
+
int or math.inf
|
| 1184 |
+
|
| 1185 |
+
Examples
|
| 1186 |
+
--------
|
| 1187 |
+
All examples below (except P_5) can easily be checked using Wikipedia,
|
| 1188 |
+
which has a page for each of these famous graphs.
|
| 1189 |
+
|
| 1190 |
+
>>> nx.girth(nx.chvatal_graph())
|
| 1191 |
+
4
|
| 1192 |
+
>>> nx.girth(nx.tutte_graph())
|
| 1193 |
+
4
|
| 1194 |
+
>>> nx.girth(nx.petersen_graph())
|
| 1195 |
+
5
|
| 1196 |
+
>>> nx.girth(nx.heawood_graph())
|
| 1197 |
+
6
|
| 1198 |
+
>>> nx.girth(nx.pappus_graph())
|
| 1199 |
+
6
|
| 1200 |
+
>>> nx.girth(nx.path_graph(5))
|
| 1201 |
+
inf
|
| 1202 |
+
|
| 1203 |
+
References
|
| 1204 |
+
----------
|
| 1205 |
+
.. [1] https://en.wikipedia.org/wiki/Girth_(graph_theory)
|
| 1206 |
+
|
| 1207 |
+
"""
|
| 1208 |
+
girth = depth_limit = inf
|
| 1209 |
+
tree_edge = nx.algorithms.traversal.breadth_first_search.TREE_EDGE
|
| 1210 |
+
level_edge = nx.algorithms.traversal.breadth_first_search.LEVEL_EDGE
|
| 1211 |
+
for n in G:
|
| 1212 |
+
# run a BFS from source n, keeping track of distances; since we want
|
| 1213 |
+
# the shortest cycle, no need to explore beyond the current minimum length
|
| 1214 |
+
depth = {n: 0}
|
| 1215 |
+
for u, v, label in nx.bfs_labeled_edges(G, n):
|
| 1216 |
+
du = depth[u]
|
| 1217 |
+
if du > depth_limit:
|
| 1218 |
+
break
|
| 1219 |
+
if label is tree_edge:
|
| 1220 |
+
depth[v] = du + 1
|
| 1221 |
+
else:
|
| 1222 |
+
# if (u, v) is a level edge, the length is du + du + 1 (odd)
|
| 1223 |
+
# otherwise, it's a forward edge; length is du + (du + 1) + 1 (even)
|
| 1224 |
+
delta = label is level_edge
|
| 1225 |
+
length = du + du + 2 - delta
|
| 1226 |
+
if length < girth:
|
| 1227 |
+
girth = length
|
| 1228 |
+
depth_limit = du - delta
|
| 1229 |
+
|
| 1230 |
+
return girth
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/flow/__pycache__/dinitz_alg.cpython-311.pyc
ADDED
|
Binary file (8.81 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/flow/__pycache__/preflowpush.cpython-311.pyc
ADDED
|
Binary file (17.6 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/flow/capacityscaling.py
ADDED
|
@@ -0,0 +1,405 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Capacity scaling minimum cost flow algorithm.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
__all__ = ["capacity_scaling"]
|
| 6 |
+
|
| 7 |
+
from itertools import chain
|
| 8 |
+
from math import log
|
| 9 |
+
|
| 10 |
+
import networkx as nx
|
| 11 |
+
|
| 12 |
+
from ...utils import BinaryHeap, arbitrary_element, not_implemented_for
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def _detect_unboundedness(R):
|
| 16 |
+
"""Detect infinite-capacity negative cycles."""
|
| 17 |
+
G = nx.DiGraph()
|
| 18 |
+
G.add_nodes_from(R)
|
| 19 |
+
|
| 20 |
+
# Value simulating infinity.
|
| 21 |
+
inf = R.graph["inf"]
|
| 22 |
+
# True infinity.
|
| 23 |
+
f_inf = float("inf")
|
| 24 |
+
for u in R:
|
| 25 |
+
for v, e in R[u].items():
|
| 26 |
+
# Compute the minimum weight of infinite-capacity (u, v) edges.
|
| 27 |
+
w = f_inf
|
| 28 |
+
for k, e in e.items():
|
| 29 |
+
if e["capacity"] == inf:
|
| 30 |
+
w = min(w, e["weight"])
|
| 31 |
+
if w != f_inf:
|
| 32 |
+
G.add_edge(u, v, weight=w)
|
| 33 |
+
|
| 34 |
+
if nx.negative_edge_cycle(G):
|
| 35 |
+
raise nx.NetworkXUnbounded(
|
| 36 |
+
"Negative cost cycle of infinite capacity found. "
|
| 37 |
+
"Min cost flow may be unbounded below."
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
@not_implemented_for("undirected")
|
| 42 |
+
def _build_residual_network(G, demand, capacity, weight):
|
| 43 |
+
"""Build a residual network and initialize a zero flow."""
|
| 44 |
+
if sum(G.nodes[u].get(demand, 0) for u in G) != 0:
|
| 45 |
+
raise nx.NetworkXUnfeasible("Sum of the demands should be 0.")
|
| 46 |
+
|
| 47 |
+
R = nx.MultiDiGraph()
|
| 48 |
+
R.add_nodes_from(
|
| 49 |
+
(u, {"excess": -G.nodes[u].get(demand, 0), "potential": 0}) for u in G
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
inf = float("inf")
|
| 53 |
+
# Detect selfloops with infinite capacities and negative weights.
|
| 54 |
+
for u, v, e in nx.selfloop_edges(G, data=True):
|
| 55 |
+
if e.get(weight, 0) < 0 and e.get(capacity, inf) == inf:
|
| 56 |
+
raise nx.NetworkXUnbounded(
|
| 57 |
+
"Negative cost cycle of infinite capacity found. "
|
| 58 |
+
"Min cost flow may be unbounded below."
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
# Extract edges with positive capacities. Self loops excluded.
|
| 62 |
+
if G.is_multigraph():
|
| 63 |
+
edge_list = [
|
| 64 |
+
(u, v, k, e)
|
| 65 |
+
for u, v, k, e in G.edges(data=True, keys=True)
|
| 66 |
+
if u != v and e.get(capacity, inf) > 0
|
| 67 |
+
]
|
| 68 |
+
else:
|
| 69 |
+
edge_list = [
|
| 70 |
+
(u, v, 0, e)
|
| 71 |
+
for u, v, e in G.edges(data=True)
|
| 72 |
+
if u != v and e.get(capacity, inf) > 0
|
| 73 |
+
]
|
| 74 |
+
# Simulate infinity with the larger of the sum of absolute node imbalances
|
| 75 |
+
# the sum of finite edge capacities or any positive value if both sums are
|
| 76 |
+
# zero. This allows the infinite-capacity edges to be distinguished for
|
| 77 |
+
# unboundedness detection and directly participate in residual capacity
|
| 78 |
+
# calculation.
|
| 79 |
+
inf = (
|
| 80 |
+
max(
|
| 81 |
+
sum(abs(R.nodes[u]["excess"]) for u in R),
|
| 82 |
+
2
|
| 83 |
+
* sum(
|
| 84 |
+
e[capacity]
|
| 85 |
+
for u, v, k, e in edge_list
|
| 86 |
+
if capacity in e and e[capacity] != inf
|
| 87 |
+
),
|
| 88 |
+
)
|
| 89 |
+
or 1
|
| 90 |
+
)
|
| 91 |
+
for u, v, k, e in edge_list:
|
| 92 |
+
r = min(e.get(capacity, inf), inf)
|
| 93 |
+
w = e.get(weight, 0)
|
| 94 |
+
# Add both (u, v) and (v, u) into the residual network marked with the
|
| 95 |
+
# original key. (key[1] == True) indicates the (u, v) is in the
|
| 96 |
+
# original network.
|
| 97 |
+
R.add_edge(u, v, key=(k, True), capacity=r, weight=w, flow=0)
|
| 98 |
+
R.add_edge(v, u, key=(k, False), capacity=0, weight=-w, flow=0)
|
| 99 |
+
|
| 100 |
+
# Record the value simulating infinity.
|
| 101 |
+
R.graph["inf"] = inf
|
| 102 |
+
|
| 103 |
+
_detect_unboundedness(R)
|
| 104 |
+
|
| 105 |
+
return R
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def _build_flow_dict(G, R, capacity, weight):
|
| 109 |
+
"""Build a flow dictionary from a residual network."""
|
| 110 |
+
inf = float("inf")
|
| 111 |
+
flow_dict = {}
|
| 112 |
+
if G.is_multigraph():
|
| 113 |
+
for u in G:
|
| 114 |
+
flow_dict[u] = {}
|
| 115 |
+
for v, es in G[u].items():
|
| 116 |
+
flow_dict[u][v] = {
|
| 117 |
+
# Always saturate negative selfloops.
|
| 118 |
+
k: (
|
| 119 |
+
0
|
| 120 |
+
if (
|
| 121 |
+
u != v or e.get(capacity, inf) <= 0 or e.get(weight, 0) >= 0
|
| 122 |
+
)
|
| 123 |
+
else e[capacity]
|
| 124 |
+
)
|
| 125 |
+
for k, e in es.items()
|
| 126 |
+
}
|
| 127 |
+
for v, es in R[u].items():
|
| 128 |
+
if v in flow_dict[u]:
|
| 129 |
+
flow_dict[u][v].update(
|
| 130 |
+
(k[0], e["flow"]) for k, e in es.items() if e["flow"] > 0
|
| 131 |
+
)
|
| 132 |
+
else:
|
| 133 |
+
for u in G:
|
| 134 |
+
flow_dict[u] = {
|
| 135 |
+
# Always saturate negative selfloops.
|
| 136 |
+
v: (
|
| 137 |
+
0
|
| 138 |
+
if (u != v or e.get(capacity, inf) <= 0 or e.get(weight, 0) >= 0)
|
| 139 |
+
else e[capacity]
|
| 140 |
+
)
|
| 141 |
+
for v, e in G[u].items()
|
| 142 |
+
}
|
| 143 |
+
flow_dict[u].update(
|
| 144 |
+
(v, e["flow"])
|
| 145 |
+
for v, es in R[u].items()
|
| 146 |
+
for e in es.values()
|
| 147 |
+
if e["flow"] > 0
|
| 148 |
+
)
|
| 149 |
+
return flow_dict
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
@nx._dispatch(node_attrs="demand", edge_attrs={"capacity": float("inf"), "weight": 0})
|
| 153 |
+
def capacity_scaling(
|
| 154 |
+
G, demand="demand", capacity="capacity", weight="weight", heap=BinaryHeap
|
| 155 |
+
):
|
| 156 |
+
r"""Find a minimum cost flow satisfying all demands in digraph G.
|
| 157 |
+
|
| 158 |
+
This is a capacity scaling successive shortest augmenting path algorithm.
|
| 159 |
+
|
| 160 |
+
G is a digraph with edge costs and capacities and in which nodes
|
| 161 |
+
have demand, i.e., they want to send or receive some amount of
|
| 162 |
+
flow. A negative demand means that the node wants to send flow, a
|
| 163 |
+
positive demand means that the node want to receive flow. A flow on
|
| 164 |
+
the digraph G satisfies all demand if the net flow into each node
|
| 165 |
+
is equal to the demand of that node.
|
| 166 |
+
|
| 167 |
+
Parameters
|
| 168 |
+
----------
|
| 169 |
+
G : NetworkX graph
|
| 170 |
+
DiGraph or MultiDiGraph on which a minimum cost flow satisfying all
|
| 171 |
+
demands is to be found.
|
| 172 |
+
|
| 173 |
+
demand : string
|
| 174 |
+
Nodes of the graph G are expected to have an attribute demand
|
| 175 |
+
that indicates how much flow a node wants to send (negative
|
| 176 |
+
demand) or receive (positive demand). Note that the sum of the
|
| 177 |
+
demands should be 0 otherwise the problem in not feasible. If
|
| 178 |
+
this attribute is not present, a node is considered to have 0
|
| 179 |
+
demand. Default value: 'demand'.
|
| 180 |
+
|
| 181 |
+
capacity : string
|
| 182 |
+
Edges of the graph G are expected to have an attribute capacity
|
| 183 |
+
that indicates how much flow the edge can support. If this
|
| 184 |
+
attribute is not present, the edge is considered to have
|
| 185 |
+
infinite capacity. Default value: 'capacity'.
|
| 186 |
+
|
| 187 |
+
weight : string
|
| 188 |
+
Edges of the graph G are expected to have an attribute weight
|
| 189 |
+
that indicates the cost incurred by sending one unit of flow on
|
| 190 |
+
that edge. If not present, the weight is considered to be 0.
|
| 191 |
+
Default value: 'weight'.
|
| 192 |
+
|
| 193 |
+
heap : class
|
| 194 |
+
Type of heap to be used in the algorithm. It should be a subclass of
|
| 195 |
+
:class:`MinHeap` or implement a compatible interface.
|
| 196 |
+
|
| 197 |
+
If a stock heap implementation is to be used, :class:`BinaryHeap` is
|
| 198 |
+
recommended over :class:`PairingHeap` for Python implementations without
|
| 199 |
+
optimized attribute accesses (e.g., CPython) despite a slower
|
| 200 |
+
asymptotic running time. For Python implementations with optimized
|
| 201 |
+
attribute accesses (e.g., PyPy), :class:`PairingHeap` provides better
|
| 202 |
+
performance. Default value: :class:`BinaryHeap`.
|
| 203 |
+
|
| 204 |
+
Returns
|
| 205 |
+
-------
|
| 206 |
+
flowCost : integer
|
| 207 |
+
Cost of a minimum cost flow satisfying all demands.
|
| 208 |
+
|
| 209 |
+
flowDict : dictionary
|
| 210 |
+
If G is a digraph, a dict-of-dicts keyed by nodes such that
|
| 211 |
+
flowDict[u][v] is the flow on edge (u, v).
|
| 212 |
+
If G is a MultiDiGraph, a dict-of-dicts-of-dicts keyed by nodes
|
| 213 |
+
so that flowDict[u][v][key] is the flow on edge (u, v, key).
|
| 214 |
+
|
| 215 |
+
Raises
|
| 216 |
+
------
|
| 217 |
+
NetworkXError
|
| 218 |
+
This exception is raised if the input graph is not directed,
|
| 219 |
+
not connected.
|
| 220 |
+
|
| 221 |
+
NetworkXUnfeasible
|
| 222 |
+
This exception is raised in the following situations:
|
| 223 |
+
|
| 224 |
+
* The sum of the demands is not zero. Then, there is no
|
| 225 |
+
flow satisfying all demands.
|
| 226 |
+
* There is no flow satisfying all demand.
|
| 227 |
+
|
| 228 |
+
NetworkXUnbounded
|
| 229 |
+
This exception is raised if the digraph G has a cycle of
|
| 230 |
+
negative cost and infinite capacity. Then, the cost of a flow
|
| 231 |
+
satisfying all demands is unbounded below.
|
| 232 |
+
|
| 233 |
+
Notes
|
| 234 |
+
-----
|
| 235 |
+
This algorithm does not work if edge weights are floating-point numbers.
|
| 236 |
+
|
| 237 |
+
See also
|
| 238 |
+
--------
|
| 239 |
+
:meth:`network_simplex`
|
| 240 |
+
|
| 241 |
+
Examples
|
| 242 |
+
--------
|
| 243 |
+
A simple example of a min cost flow problem.
|
| 244 |
+
|
| 245 |
+
>>> G = nx.DiGraph()
|
| 246 |
+
>>> G.add_node("a", demand=-5)
|
| 247 |
+
>>> G.add_node("d", demand=5)
|
| 248 |
+
>>> G.add_edge("a", "b", weight=3, capacity=4)
|
| 249 |
+
>>> G.add_edge("a", "c", weight=6, capacity=10)
|
| 250 |
+
>>> G.add_edge("b", "d", weight=1, capacity=9)
|
| 251 |
+
>>> G.add_edge("c", "d", weight=2, capacity=5)
|
| 252 |
+
>>> flowCost, flowDict = nx.capacity_scaling(G)
|
| 253 |
+
>>> flowCost
|
| 254 |
+
24
|
| 255 |
+
>>> flowDict
|
| 256 |
+
{'a': {'b': 4, 'c': 1}, 'd': {}, 'b': {'d': 4}, 'c': {'d': 1}}
|
| 257 |
+
|
| 258 |
+
It is possible to change the name of the attributes used for the
|
| 259 |
+
algorithm.
|
| 260 |
+
|
| 261 |
+
>>> G = nx.DiGraph()
|
| 262 |
+
>>> G.add_node("p", spam=-4)
|
| 263 |
+
>>> G.add_node("q", spam=2)
|
| 264 |
+
>>> G.add_node("a", spam=-2)
|
| 265 |
+
>>> G.add_node("d", spam=-1)
|
| 266 |
+
>>> G.add_node("t", spam=2)
|
| 267 |
+
>>> G.add_node("w", spam=3)
|
| 268 |
+
>>> G.add_edge("p", "q", cost=7, vacancies=5)
|
| 269 |
+
>>> G.add_edge("p", "a", cost=1, vacancies=4)
|
| 270 |
+
>>> G.add_edge("q", "d", cost=2, vacancies=3)
|
| 271 |
+
>>> G.add_edge("t", "q", cost=1, vacancies=2)
|
| 272 |
+
>>> G.add_edge("a", "t", cost=2, vacancies=4)
|
| 273 |
+
>>> G.add_edge("d", "w", cost=3, vacancies=4)
|
| 274 |
+
>>> G.add_edge("t", "w", cost=4, vacancies=1)
|
| 275 |
+
>>> flowCost, flowDict = nx.capacity_scaling(
|
| 276 |
+
... G, demand="spam", capacity="vacancies", weight="cost"
|
| 277 |
+
... )
|
| 278 |
+
>>> flowCost
|
| 279 |
+
37
|
| 280 |
+
>>> flowDict
|
| 281 |
+
{'p': {'q': 2, 'a': 2}, 'q': {'d': 1}, 'a': {'t': 4}, 'd': {'w': 2}, 't': {'q': 1, 'w': 1}, 'w': {}}
|
| 282 |
+
"""
|
| 283 |
+
R = _build_residual_network(G, demand, capacity, weight)
|
| 284 |
+
|
| 285 |
+
inf = float("inf")
|
| 286 |
+
# Account cost of negative selfloops.
|
| 287 |
+
flow_cost = sum(
|
| 288 |
+
0
|
| 289 |
+
if e.get(capacity, inf) <= 0 or e.get(weight, 0) >= 0
|
| 290 |
+
else e[capacity] * e[weight]
|
| 291 |
+
for u, v, e in nx.selfloop_edges(G, data=True)
|
| 292 |
+
)
|
| 293 |
+
|
| 294 |
+
# Determine the maximum edge capacity.
|
| 295 |
+
wmax = max(chain([-inf], (e["capacity"] for u, v, e in R.edges(data=True))))
|
| 296 |
+
if wmax == -inf:
|
| 297 |
+
# Residual network has no edges.
|
| 298 |
+
return flow_cost, _build_flow_dict(G, R, capacity, weight)
|
| 299 |
+
|
| 300 |
+
R_nodes = R.nodes
|
| 301 |
+
R_succ = R.succ
|
| 302 |
+
|
| 303 |
+
delta = 2 ** int(log(wmax, 2))
|
| 304 |
+
while delta >= 1:
|
| 305 |
+
# Saturate Δ-residual edges with negative reduced costs to achieve
|
| 306 |
+
# Δ-optimality.
|
| 307 |
+
for u in R:
|
| 308 |
+
p_u = R_nodes[u]["potential"]
|
| 309 |
+
for v, es in R_succ[u].items():
|
| 310 |
+
for k, e in es.items():
|
| 311 |
+
flow = e["capacity"] - e["flow"]
|
| 312 |
+
if e["weight"] - p_u + R_nodes[v]["potential"] < 0:
|
| 313 |
+
flow = e["capacity"] - e["flow"]
|
| 314 |
+
if flow >= delta:
|
| 315 |
+
e["flow"] += flow
|
| 316 |
+
R_succ[v][u][(k[0], not k[1])]["flow"] -= flow
|
| 317 |
+
R_nodes[u]["excess"] -= flow
|
| 318 |
+
R_nodes[v]["excess"] += flow
|
| 319 |
+
# Determine the Δ-active nodes.
|
| 320 |
+
S = set()
|
| 321 |
+
T = set()
|
| 322 |
+
S_add = S.add
|
| 323 |
+
S_remove = S.remove
|
| 324 |
+
T_add = T.add
|
| 325 |
+
T_remove = T.remove
|
| 326 |
+
for u in R:
|
| 327 |
+
excess = R_nodes[u]["excess"]
|
| 328 |
+
if excess >= delta:
|
| 329 |
+
S_add(u)
|
| 330 |
+
elif excess <= -delta:
|
| 331 |
+
T_add(u)
|
| 332 |
+
# Repeatedly augment flow from S to T along shortest paths until
|
| 333 |
+
# Δ-feasibility is achieved.
|
| 334 |
+
while S and T:
|
| 335 |
+
s = arbitrary_element(S)
|
| 336 |
+
t = None
|
| 337 |
+
# Search for a shortest path in terms of reduce costs from s to
|
| 338 |
+
# any t in T in the Δ-residual network.
|
| 339 |
+
d = {}
|
| 340 |
+
pred = {s: None}
|
| 341 |
+
h = heap()
|
| 342 |
+
h_insert = h.insert
|
| 343 |
+
h_get = h.get
|
| 344 |
+
h_insert(s, 0)
|
| 345 |
+
while h:
|
| 346 |
+
u, d_u = h.pop()
|
| 347 |
+
d[u] = d_u
|
| 348 |
+
if u in T:
|
| 349 |
+
# Path found.
|
| 350 |
+
t = u
|
| 351 |
+
break
|
| 352 |
+
p_u = R_nodes[u]["potential"]
|
| 353 |
+
for v, es in R_succ[u].items():
|
| 354 |
+
if v in d:
|
| 355 |
+
continue
|
| 356 |
+
wmin = inf
|
| 357 |
+
# Find the minimum-weighted (u, v) Δ-residual edge.
|
| 358 |
+
for k, e in es.items():
|
| 359 |
+
if e["capacity"] - e["flow"] >= delta:
|
| 360 |
+
w = e["weight"]
|
| 361 |
+
if w < wmin:
|
| 362 |
+
wmin = w
|
| 363 |
+
kmin = k
|
| 364 |
+
emin = e
|
| 365 |
+
if wmin == inf:
|
| 366 |
+
continue
|
| 367 |
+
# Update the distance label of v.
|
| 368 |
+
d_v = d_u + wmin - p_u + R_nodes[v]["potential"]
|
| 369 |
+
if h_insert(v, d_v):
|
| 370 |
+
pred[v] = (u, kmin, emin)
|
| 371 |
+
if t is not None:
|
| 372 |
+
# Augment Δ units of flow from s to t.
|
| 373 |
+
while u != s:
|
| 374 |
+
v = u
|
| 375 |
+
u, k, e = pred[v]
|
| 376 |
+
e["flow"] += delta
|
| 377 |
+
R_succ[v][u][(k[0], not k[1])]["flow"] -= delta
|
| 378 |
+
# Account node excess and deficit.
|
| 379 |
+
R_nodes[s]["excess"] -= delta
|
| 380 |
+
R_nodes[t]["excess"] += delta
|
| 381 |
+
if R_nodes[s]["excess"] < delta:
|
| 382 |
+
S_remove(s)
|
| 383 |
+
if R_nodes[t]["excess"] > -delta:
|
| 384 |
+
T_remove(t)
|
| 385 |
+
# Update node potentials.
|
| 386 |
+
d_t = d[t]
|
| 387 |
+
for u, d_u in d.items():
|
| 388 |
+
R_nodes[u]["potential"] -= d_u - d_t
|
| 389 |
+
else:
|
| 390 |
+
# Path not found.
|
| 391 |
+
S_remove(s)
|
| 392 |
+
delta //= 2
|
| 393 |
+
|
| 394 |
+
if any(R.nodes[u]["excess"] != 0 for u in R):
|
| 395 |
+
raise nx.NetworkXUnfeasible("No flow satisfying all demands.")
|
| 396 |
+
|
| 397 |
+
# Calculate the flow cost.
|
| 398 |
+
for u in R:
|
| 399 |
+
for v, es in R_succ[u].items():
|
| 400 |
+
for e in es.values():
|
| 401 |
+
flow = e["flow"]
|
| 402 |
+
if flow > 0:
|
| 403 |
+
flow_cost += flow * e["weight"]
|
| 404 |
+
|
| 405 |
+
return flow_cost, _build_flow_dict(G, R, capacity, weight)
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/flow/gomory_hu.py
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Gomory-Hu tree of undirected Graphs.
|
| 3 |
+
"""
|
| 4 |
+
import networkx as nx
|
| 5 |
+
from networkx.utils import not_implemented_for
|
| 6 |
+
|
| 7 |
+
from .edmondskarp import edmonds_karp
|
| 8 |
+
from .utils import build_residual_network
|
| 9 |
+
|
| 10 |
+
default_flow_func = edmonds_karp
|
| 11 |
+
|
| 12 |
+
__all__ = ["gomory_hu_tree"]
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@not_implemented_for("directed")
|
| 16 |
+
@nx._dispatch(edge_attrs={"capacity": float("inf")})
|
| 17 |
+
def gomory_hu_tree(G, capacity="capacity", flow_func=None):
|
| 18 |
+
r"""Returns the Gomory-Hu tree of an undirected graph G.
|
| 19 |
+
|
| 20 |
+
A Gomory-Hu tree of an undirected graph with capacities is a
|
| 21 |
+
weighted tree that represents the minimum s-t cuts for all s-t
|
| 22 |
+
pairs in the graph.
|
| 23 |
+
|
| 24 |
+
It only requires `n-1` minimum cut computations instead of the
|
| 25 |
+
obvious `n(n-1)/2`. The tree represents all s-t cuts as the
|
| 26 |
+
minimum cut value among any pair of nodes is the minimum edge
|
| 27 |
+
weight in the shortest path between the two nodes in the
|
| 28 |
+
Gomory-Hu tree.
|
| 29 |
+
|
| 30 |
+
The Gomory-Hu tree also has the property that removing the
|
| 31 |
+
edge with the minimum weight in the shortest path between
|
| 32 |
+
any two nodes leaves two connected components that form
|
| 33 |
+
a partition of the nodes in G that defines the minimum s-t
|
| 34 |
+
cut.
|
| 35 |
+
|
| 36 |
+
See Examples section below for details.
|
| 37 |
+
|
| 38 |
+
Parameters
|
| 39 |
+
----------
|
| 40 |
+
G : NetworkX graph
|
| 41 |
+
Undirected graph
|
| 42 |
+
|
| 43 |
+
capacity : string
|
| 44 |
+
Edges of the graph G are expected to have an attribute capacity
|
| 45 |
+
that indicates how much flow the edge can support. If this
|
| 46 |
+
attribute is not present, the edge is considered to have
|
| 47 |
+
infinite capacity. Default value: 'capacity'.
|
| 48 |
+
|
| 49 |
+
flow_func : function
|
| 50 |
+
Function to perform the underlying flow computations. Default value
|
| 51 |
+
:func:`edmonds_karp`. This function performs better in sparse graphs
|
| 52 |
+
with right tailed degree distributions.
|
| 53 |
+
:func:`shortest_augmenting_path` will perform better in denser
|
| 54 |
+
graphs.
|
| 55 |
+
|
| 56 |
+
Returns
|
| 57 |
+
-------
|
| 58 |
+
Tree : NetworkX graph
|
| 59 |
+
A NetworkX graph representing the Gomory-Hu tree of the input graph.
|
| 60 |
+
|
| 61 |
+
Raises
|
| 62 |
+
------
|
| 63 |
+
NetworkXNotImplemented
|
| 64 |
+
Raised if the input graph is directed.
|
| 65 |
+
|
| 66 |
+
NetworkXError
|
| 67 |
+
Raised if the input graph is an empty Graph.
|
| 68 |
+
|
| 69 |
+
Examples
|
| 70 |
+
--------
|
| 71 |
+
>>> G = nx.karate_club_graph()
|
| 72 |
+
>>> nx.set_edge_attributes(G, 1, "capacity")
|
| 73 |
+
>>> T = nx.gomory_hu_tree(G)
|
| 74 |
+
>>> # The value of the minimum cut between any pair
|
| 75 |
+
... # of nodes in G is the minimum edge weight in the
|
| 76 |
+
... # shortest path between the two nodes in the
|
| 77 |
+
... # Gomory-Hu tree.
|
| 78 |
+
... def minimum_edge_weight_in_shortest_path(T, u, v):
|
| 79 |
+
... path = nx.shortest_path(T, u, v, weight="weight")
|
| 80 |
+
... return min((T[u][v]["weight"], (u, v)) for (u, v) in zip(path, path[1:]))
|
| 81 |
+
>>> u, v = 0, 33
|
| 82 |
+
>>> cut_value, edge = minimum_edge_weight_in_shortest_path(T, u, v)
|
| 83 |
+
>>> cut_value
|
| 84 |
+
10
|
| 85 |
+
>>> nx.minimum_cut_value(G, u, v)
|
| 86 |
+
10
|
| 87 |
+
>>> # The Gomory-Hu tree also has the property that removing the
|
| 88 |
+
... # edge with the minimum weight in the shortest path between
|
| 89 |
+
... # any two nodes leaves two connected components that form
|
| 90 |
+
... # a partition of the nodes in G that defines the minimum s-t
|
| 91 |
+
... # cut.
|
| 92 |
+
... cut_value, edge = minimum_edge_weight_in_shortest_path(T, u, v)
|
| 93 |
+
>>> T.remove_edge(*edge)
|
| 94 |
+
>>> U, V = list(nx.connected_components(T))
|
| 95 |
+
>>> # Thus U and V form a partition that defines a minimum cut
|
| 96 |
+
... # between u and v in G. You can compute the edge cut set,
|
| 97 |
+
... # that is, the set of edges that if removed from G will
|
| 98 |
+
... # disconnect u from v in G, with this information:
|
| 99 |
+
... cutset = set()
|
| 100 |
+
>>> for x, nbrs in ((n, G[n]) for n in U):
|
| 101 |
+
... cutset.update((x, y) for y in nbrs if y in V)
|
| 102 |
+
>>> # Because we have set the capacities of all edges to 1
|
| 103 |
+
... # the cutset contains ten edges
|
| 104 |
+
... len(cutset)
|
| 105 |
+
10
|
| 106 |
+
>>> # You can use any maximum flow algorithm for the underlying
|
| 107 |
+
... # flow computations using the argument flow_func
|
| 108 |
+
... from networkx.algorithms import flow
|
| 109 |
+
>>> T = nx.gomory_hu_tree(G, flow_func=flow.boykov_kolmogorov)
|
| 110 |
+
>>> cut_value, edge = minimum_edge_weight_in_shortest_path(T, u, v)
|
| 111 |
+
>>> cut_value
|
| 112 |
+
10
|
| 113 |
+
>>> nx.minimum_cut_value(G, u, v, flow_func=flow.boykov_kolmogorov)
|
| 114 |
+
10
|
| 115 |
+
|
| 116 |
+
Notes
|
| 117 |
+
-----
|
| 118 |
+
This implementation is based on Gusfield approach [1]_ to compute
|
| 119 |
+
Gomory-Hu trees, which does not require node contractions and has
|
| 120 |
+
the same computational complexity than the original method.
|
| 121 |
+
|
| 122 |
+
See also
|
| 123 |
+
--------
|
| 124 |
+
:func:`minimum_cut`
|
| 125 |
+
:func:`maximum_flow`
|
| 126 |
+
|
| 127 |
+
References
|
| 128 |
+
----------
|
| 129 |
+
.. [1] Gusfield D: Very simple methods for all pairs network flow analysis.
|
| 130 |
+
SIAM J Comput 19(1):143-155, 1990.
|
| 131 |
+
|
| 132 |
+
"""
|
| 133 |
+
if flow_func is None:
|
| 134 |
+
flow_func = default_flow_func
|
| 135 |
+
|
| 136 |
+
if len(G) == 0: # empty graph
|
| 137 |
+
msg = "Empty Graph does not have a Gomory-Hu tree representation"
|
| 138 |
+
raise nx.NetworkXError(msg)
|
| 139 |
+
|
| 140 |
+
# Start the tree as a star graph with an arbitrary node at the center
|
| 141 |
+
tree = {}
|
| 142 |
+
labels = {}
|
| 143 |
+
iter_nodes = iter(G)
|
| 144 |
+
root = next(iter_nodes)
|
| 145 |
+
for n in iter_nodes:
|
| 146 |
+
tree[n] = root
|
| 147 |
+
|
| 148 |
+
# Reuse residual network
|
| 149 |
+
R = build_residual_network(G, capacity)
|
| 150 |
+
|
| 151 |
+
# For all the leaves in the star graph tree (that is n-1 nodes).
|
| 152 |
+
for source in tree:
|
| 153 |
+
# Find neighbor in the tree
|
| 154 |
+
target = tree[source]
|
| 155 |
+
# compute minimum cut
|
| 156 |
+
cut_value, partition = nx.minimum_cut(
|
| 157 |
+
G, source, target, capacity=capacity, flow_func=flow_func, residual=R
|
| 158 |
+
)
|
| 159 |
+
labels[(source, target)] = cut_value
|
| 160 |
+
# Update the tree
|
| 161 |
+
# Source will always be in partition[0] and target in partition[1]
|
| 162 |
+
for node in partition[0]:
|
| 163 |
+
if node != source and node in tree and tree[node] == target:
|
| 164 |
+
tree[node] = source
|
| 165 |
+
labels[node, source] = labels.get((node, target), cut_value)
|
| 166 |
+
#
|
| 167 |
+
if target != root and tree[target] in partition[0]:
|
| 168 |
+
labels[source, tree[target]] = labels[target, tree[target]]
|
| 169 |
+
labels[target, source] = cut_value
|
| 170 |
+
tree[source] = tree[target]
|
| 171 |
+
tree[target] = source
|
| 172 |
+
|
| 173 |
+
# Build the tree
|
| 174 |
+
T = nx.Graph()
|
| 175 |
+
T.add_nodes_from(G)
|
| 176 |
+
T.add_weighted_edges_from(((u, v, labels[u, v]) for u, v in tree.items()))
|
| 177 |
+
return T
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/flow/maxflow.py
ADDED
|
@@ -0,0 +1,607 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Maximum flow (and minimum cut) algorithms on capacitated graphs.
|
| 3 |
+
"""
|
| 4 |
+
import networkx as nx
|
| 5 |
+
|
| 6 |
+
from .boykovkolmogorov import boykov_kolmogorov
|
| 7 |
+
from .dinitz_alg import dinitz
|
| 8 |
+
from .edmondskarp import edmonds_karp
|
| 9 |
+
from .preflowpush import preflow_push
|
| 10 |
+
from .shortestaugmentingpath import shortest_augmenting_path
|
| 11 |
+
from .utils import build_flow_dict
|
| 12 |
+
|
| 13 |
+
# Define the default flow function for computing maximum flow.
|
| 14 |
+
default_flow_func = preflow_push
|
| 15 |
+
|
| 16 |
+
__all__ = ["maximum_flow", "maximum_flow_value", "minimum_cut", "minimum_cut_value"]
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
@nx._dispatch(graphs="flowG", edge_attrs={"capacity": float("inf")})
|
| 20 |
+
def maximum_flow(flowG, _s, _t, capacity="capacity", flow_func=None, **kwargs):
|
| 21 |
+
"""Find a maximum single-commodity flow.
|
| 22 |
+
|
| 23 |
+
Parameters
|
| 24 |
+
----------
|
| 25 |
+
flowG : NetworkX graph
|
| 26 |
+
Edges of the graph are expected to have an attribute called
|
| 27 |
+
'capacity'. If this attribute is not present, the edge is
|
| 28 |
+
considered to have infinite capacity.
|
| 29 |
+
|
| 30 |
+
_s : node
|
| 31 |
+
Source node for the flow.
|
| 32 |
+
|
| 33 |
+
_t : node
|
| 34 |
+
Sink node for the flow.
|
| 35 |
+
|
| 36 |
+
capacity : string
|
| 37 |
+
Edges of the graph G are expected to have an attribute capacity
|
| 38 |
+
that indicates how much flow the edge can support. If this
|
| 39 |
+
attribute is not present, the edge is considered to have
|
| 40 |
+
infinite capacity. Default value: 'capacity'.
|
| 41 |
+
|
| 42 |
+
flow_func : function
|
| 43 |
+
A function for computing the maximum flow among a pair of nodes
|
| 44 |
+
in a capacitated graph. The function has to accept at least three
|
| 45 |
+
parameters: a Graph or Digraph, a source node, and a target node.
|
| 46 |
+
And return a residual network that follows NetworkX conventions
|
| 47 |
+
(see Notes). If flow_func is None, the default maximum
|
| 48 |
+
flow function (:meth:`preflow_push`) is used. See below for
|
| 49 |
+
alternative algorithms. The choice of the default function may change
|
| 50 |
+
from version to version and should not be relied on. Default value:
|
| 51 |
+
None.
|
| 52 |
+
|
| 53 |
+
kwargs : Any other keyword parameter is passed to the function that
|
| 54 |
+
computes the maximum flow.
|
| 55 |
+
|
| 56 |
+
Returns
|
| 57 |
+
-------
|
| 58 |
+
flow_value : integer, float
|
| 59 |
+
Value of the maximum flow, i.e., net outflow from the source.
|
| 60 |
+
|
| 61 |
+
flow_dict : dict
|
| 62 |
+
A dictionary containing the value of the flow that went through
|
| 63 |
+
each edge.
|
| 64 |
+
|
| 65 |
+
Raises
|
| 66 |
+
------
|
| 67 |
+
NetworkXError
|
| 68 |
+
The algorithm does not support MultiGraph and MultiDiGraph. If
|
| 69 |
+
the input graph is an instance of one of these two classes, a
|
| 70 |
+
NetworkXError is raised.
|
| 71 |
+
|
| 72 |
+
NetworkXUnbounded
|
| 73 |
+
If the graph has a path of infinite capacity, the value of a
|
| 74 |
+
feasible flow on the graph is unbounded above and the function
|
| 75 |
+
raises a NetworkXUnbounded.
|
| 76 |
+
|
| 77 |
+
See also
|
| 78 |
+
--------
|
| 79 |
+
:meth:`maximum_flow_value`
|
| 80 |
+
:meth:`minimum_cut`
|
| 81 |
+
:meth:`minimum_cut_value`
|
| 82 |
+
:meth:`edmonds_karp`
|
| 83 |
+
:meth:`preflow_push`
|
| 84 |
+
:meth:`shortest_augmenting_path`
|
| 85 |
+
|
| 86 |
+
Notes
|
| 87 |
+
-----
|
| 88 |
+
The function used in the flow_func parameter has to return a residual
|
| 89 |
+
network that follows NetworkX conventions:
|
| 90 |
+
|
| 91 |
+
The residual network :samp:`R` from an input graph :samp:`G` has the
|
| 92 |
+
same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair
|
| 93 |
+
of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a
|
| 94 |
+
self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists
|
| 95 |
+
in :samp:`G`.
|
| 96 |
+
|
| 97 |
+
For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']`
|
| 98 |
+
is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists
|
| 99 |
+
in :samp:`G` or zero otherwise. If the capacity is infinite,
|
| 100 |
+
:samp:`R[u][v]['capacity']` will have a high arbitrary finite value
|
| 101 |
+
that does not affect the solution of the problem. This value is stored in
|
| 102 |
+
:samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`,
|
| 103 |
+
:samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and
|
| 104 |
+
satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`.
|
| 105 |
+
|
| 106 |
+
The flow value, defined as the total flow into :samp:`t`, the sink, is
|
| 107 |
+
stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using
|
| 108 |
+
only edges :samp:`(u, v)` such that
|
| 109 |
+
:samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum
|
| 110 |
+
:samp:`s`-:samp:`t` cut.
|
| 111 |
+
|
| 112 |
+
Specific algorithms may store extra data in :samp:`R`.
|
| 113 |
+
|
| 114 |
+
The function should supports an optional boolean parameter value_only. When
|
| 115 |
+
True, it can optionally terminate the algorithm as soon as the maximum flow
|
| 116 |
+
value and the minimum cut can be determined.
|
| 117 |
+
|
| 118 |
+
Examples
|
| 119 |
+
--------
|
| 120 |
+
>>> G = nx.DiGraph()
|
| 121 |
+
>>> G.add_edge("x", "a", capacity=3.0)
|
| 122 |
+
>>> G.add_edge("x", "b", capacity=1.0)
|
| 123 |
+
>>> G.add_edge("a", "c", capacity=3.0)
|
| 124 |
+
>>> G.add_edge("b", "c", capacity=5.0)
|
| 125 |
+
>>> G.add_edge("b", "d", capacity=4.0)
|
| 126 |
+
>>> G.add_edge("d", "e", capacity=2.0)
|
| 127 |
+
>>> G.add_edge("c", "y", capacity=2.0)
|
| 128 |
+
>>> G.add_edge("e", "y", capacity=3.0)
|
| 129 |
+
|
| 130 |
+
maximum_flow returns both the value of the maximum flow and a
|
| 131 |
+
dictionary with all flows.
|
| 132 |
+
|
| 133 |
+
>>> flow_value, flow_dict = nx.maximum_flow(G, "x", "y")
|
| 134 |
+
>>> flow_value
|
| 135 |
+
3.0
|
| 136 |
+
>>> print(flow_dict["x"]["b"])
|
| 137 |
+
1.0
|
| 138 |
+
|
| 139 |
+
You can also use alternative algorithms for computing the
|
| 140 |
+
maximum flow by using the flow_func parameter.
|
| 141 |
+
|
| 142 |
+
>>> from networkx.algorithms.flow import shortest_augmenting_path
|
| 143 |
+
>>> flow_value == nx.maximum_flow(G, "x", "y", flow_func=shortest_augmenting_path)[
|
| 144 |
+
... 0
|
| 145 |
+
... ]
|
| 146 |
+
True
|
| 147 |
+
|
| 148 |
+
"""
|
| 149 |
+
if flow_func is None:
|
| 150 |
+
if kwargs:
|
| 151 |
+
raise nx.NetworkXError(
|
| 152 |
+
"You have to explicitly set a flow_func if"
|
| 153 |
+
" you need to pass parameters via kwargs."
|
| 154 |
+
)
|
| 155 |
+
flow_func = default_flow_func
|
| 156 |
+
|
| 157 |
+
if not callable(flow_func):
|
| 158 |
+
raise nx.NetworkXError("flow_func has to be callable.")
|
| 159 |
+
|
| 160 |
+
R = flow_func(flowG, _s, _t, capacity=capacity, value_only=False, **kwargs)
|
| 161 |
+
flow_dict = build_flow_dict(flowG, R)
|
| 162 |
+
|
| 163 |
+
return (R.graph["flow_value"], flow_dict)
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
@nx._dispatch(graphs="flowG", edge_attrs={"capacity": float("inf")})
|
| 167 |
+
def maximum_flow_value(flowG, _s, _t, capacity="capacity", flow_func=None, **kwargs):
|
| 168 |
+
"""Find the value of maximum single-commodity flow.
|
| 169 |
+
|
| 170 |
+
Parameters
|
| 171 |
+
----------
|
| 172 |
+
flowG : NetworkX graph
|
| 173 |
+
Edges of the graph are expected to have an attribute called
|
| 174 |
+
'capacity'. If this attribute is not present, the edge is
|
| 175 |
+
considered to have infinite capacity.
|
| 176 |
+
|
| 177 |
+
_s : node
|
| 178 |
+
Source node for the flow.
|
| 179 |
+
|
| 180 |
+
_t : node
|
| 181 |
+
Sink node for the flow.
|
| 182 |
+
|
| 183 |
+
capacity : string
|
| 184 |
+
Edges of the graph G are expected to have an attribute capacity
|
| 185 |
+
that indicates how much flow the edge can support. If this
|
| 186 |
+
attribute is not present, the edge is considered to have
|
| 187 |
+
infinite capacity. Default value: 'capacity'.
|
| 188 |
+
|
| 189 |
+
flow_func : function
|
| 190 |
+
A function for computing the maximum flow among a pair of nodes
|
| 191 |
+
in a capacitated graph. The function has to accept at least three
|
| 192 |
+
parameters: a Graph or Digraph, a source node, and a target node.
|
| 193 |
+
And return a residual network that follows NetworkX conventions
|
| 194 |
+
(see Notes). If flow_func is None, the default maximum
|
| 195 |
+
flow function (:meth:`preflow_push`) is used. See below for
|
| 196 |
+
alternative algorithms. The choice of the default function may change
|
| 197 |
+
from version to version and should not be relied on. Default value:
|
| 198 |
+
None.
|
| 199 |
+
|
| 200 |
+
kwargs : Any other keyword parameter is passed to the function that
|
| 201 |
+
computes the maximum flow.
|
| 202 |
+
|
| 203 |
+
Returns
|
| 204 |
+
-------
|
| 205 |
+
flow_value : integer, float
|
| 206 |
+
Value of the maximum flow, i.e., net outflow from the source.
|
| 207 |
+
|
| 208 |
+
Raises
|
| 209 |
+
------
|
| 210 |
+
NetworkXError
|
| 211 |
+
The algorithm does not support MultiGraph and MultiDiGraph. If
|
| 212 |
+
the input graph is an instance of one of these two classes, a
|
| 213 |
+
NetworkXError is raised.
|
| 214 |
+
|
| 215 |
+
NetworkXUnbounded
|
| 216 |
+
If the graph has a path of infinite capacity, the value of a
|
| 217 |
+
feasible flow on the graph is unbounded above and the function
|
| 218 |
+
raises a NetworkXUnbounded.
|
| 219 |
+
|
| 220 |
+
See also
|
| 221 |
+
--------
|
| 222 |
+
:meth:`maximum_flow`
|
| 223 |
+
:meth:`minimum_cut`
|
| 224 |
+
:meth:`minimum_cut_value`
|
| 225 |
+
:meth:`edmonds_karp`
|
| 226 |
+
:meth:`preflow_push`
|
| 227 |
+
:meth:`shortest_augmenting_path`
|
| 228 |
+
|
| 229 |
+
Notes
|
| 230 |
+
-----
|
| 231 |
+
The function used in the flow_func parameter has to return a residual
|
| 232 |
+
network that follows NetworkX conventions:
|
| 233 |
+
|
| 234 |
+
The residual network :samp:`R` from an input graph :samp:`G` has the
|
| 235 |
+
same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair
|
| 236 |
+
of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a
|
| 237 |
+
self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists
|
| 238 |
+
in :samp:`G`.
|
| 239 |
+
|
| 240 |
+
For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']`
|
| 241 |
+
is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists
|
| 242 |
+
in :samp:`G` or zero otherwise. If the capacity is infinite,
|
| 243 |
+
:samp:`R[u][v]['capacity']` will have a high arbitrary finite value
|
| 244 |
+
that does not affect the solution of the problem. This value is stored in
|
| 245 |
+
:samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`,
|
| 246 |
+
:samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and
|
| 247 |
+
satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`.
|
| 248 |
+
|
| 249 |
+
The flow value, defined as the total flow into :samp:`t`, the sink, is
|
| 250 |
+
stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using
|
| 251 |
+
only edges :samp:`(u, v)` such that
|
| 252 |
+
:samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum
|
| 253 |
+
:samp:`s`-:samp:`t` cut.
|
| 254 |
+
|
| 255 |
+
Specific algorithms may store extra data in :samp:`R`.
|
| 256 |
+
|
| 257 |
+
The function should supports an optional boolean parameter value_only. When
|
| 258 |
+
True, it can optionally terminate the algorithm as soon as the maximum flow
|
| 259 |
+
value and the minimum cut can be determined.
|
| 260 |
+
|
| 261 |
+
Examples
|
| 262 |
+
--------
|
| 263 |
+
>>> G = nx.DiGraph()
|
| 264 |
+
>>> G.add_edge("x", "a", capacity=3.0)
|
| 265 |
+
>>> G.add_edge("x", "b", capacity=1.0)
|
| 266 |
+
>>> G.add_edge("a", "c", capacity=3.0)
|
| 267 |
+
>>> G.add_edge("b", "c", capacity=5.0)
|
| 268 |
+
>>> G.add_edge("b", "d", capacity=4.0)
|
| 269 |
+
>>> G.add_edge("d", "e", capacity=2.0)
|
| 270 |
+
>>> G.add_edge("c", "y", capacity=2.0)
|
| 271 |
+
>>> G.add_edge("e", "y", capacity=3.0)
|
| 272 |
+
|
| 273 |
+
maximum_flow_value computes only the value of the
|
| 274 |
+
maximum flow:
|
| 275 |
+
|
| 276 |
+
>>> flow_value = nx.maximum_flow_value(G, "x", "y")
|
| 277 |
+
>>> flow_value
|
| 278 |
+
3.0
|
| 279 |
+
|
| 280 |
+
You can also use alternative algorithms for computing the
|
| 281 |
+
maximum flow by using the flow_func parameter.
|
| 282 |
+
|
| 283 |
+
>>> from networkx.algorithms.flow import shortest_augmenting_path
|
| 284 |
+
>>> flow_value == nx.maximum_flow_value(
|
| 285 |
+
... G, "x", "y", flow_func=shortest_augmenting_path
|
| 286 |
+
... )
|
| 287 |
+
True
|
| 288 |
+
|
| 289 |
+
"""
|
| 290 |
+
if flow_func is None:
|
| 291 |
+
if kwargs:
|
| 292 |
+
raise nx.NetworkXError(
|
| 293 |
+
"You have to explicitly set a flow_func if"
|
| 294 |
+
" you need to pass parameters via kwargs."
|
| 295 |
+
)
|
| 296 |
+
flow_func = default_flow_func
|
| 297 |
+
|
| 298 |
+
if not callable(flow_func):
|
| 299 |
+
raise nx.NetworkXError("flow_func has to be callable.")
|
| 300 |
+
|
| 301 |
+
R = flow_func(flowG, _s, _t, capacity=capacity, value_only=True, **kwargs)
|
| 302 |
+
|
| 303 |
+
return R.graph["flow_value"]
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
@nx._dispatch(graphs="flowG", edge_attrs={"capacity": float("inf")})
|
| 307 |
+
def minimum_cut(flowG, _s, _t, capacity="capacity", flow_func=None, **kwargs):
|
| 308 |
+
"""Compute the value and the node partition of a minimum (s, t)-cut.
|
| 309 |
+
|
| 310 |
+
Use the max-flow min-cut theorem, i.e., the capacity of a minimum
|
| 311 |
+
capacity cut is equal to the flow value of a maximum flow.
|
| 312 |
+
|
| 313 |
+
Parameters
|
| 314 |
+
----------
|
| 315 |
+
flowG : NetworkX graph
|
| 316 |
+
Edges of the graph are expected to have an attribute called
|
| 317 |
+
'capacity'. If this attribute is not present, the edge is
|
| 318 |
+
considered to have infinite capacity.
|
| 319 |
+
|
| 320 |
+
_s : node
|
| 321 |
+
Source node for the flow.
|
| 322 |
+
|
| 323 |
+
_t : node
|
| 324 |
+
Sink node for the flow.
|
| 325 |
+
|
| 326 |
+
capacity : string
|
| 327 |
+
Edges of the graph G are expected to have an attribute capacity
|
| 328 |
+
that indicates how much flow the edge can support. If this
|
| 329 |
+
attribute is not present, the edge is considered to have
|
| 330 |
+
infinite capacity. Default value: 'capacity'.
|
| 331 |
+
|
| 332 |
+
flow_func : function
|
| 333 |
+
A function for computing the maximum flow among a pair of nodes
|
| 334 |
+
in a capacitated graph. The function has to accept at least three
|
| 335 |
+
parameters: a Graph or Digraph, a source node, and a target node.
|
| 336 |
+
And return a residual network that follows NetworkX conventions
|
| 337 |
+
(see Notes). If flow_func is None, the default maximum
|
| 338 |
+
flow function (:meth:`preflow_push`) is used. See below for
|
| 339 |
+
alternative algorithms. The choice of the default function may change
|
| 340 |
+
from version to version and should not be relied on. Default value:
|
| 341 |
+
None.
|
| 342 |
+
|
| 343 |
+
kwargs : Any other keyword parameter is passed to the function that
|
| 344 |
+
computes the maximum flow.
|
| 345 |
+
|
| 346 |
+
Returns
|
| 347 |
+
-------
|
| 348 |
+
cut_value : integer, float
|
| 349 |
+
Value of the minimum cut.
|
| 350 |
+
|
| 351 |
+
partition : pair of node sets
|
| 352 |
+
A partitioning of the nodes that defines a minimum cut.
|
| 353 |
+
|
| 354 |
+
Raises
|
| 355 |
+
------
|
| 356 |
+
NetworkXUnbounded
|
| 357 |
+
If the graph has a path of infinite capacity, all cuts have
|
| 358 |
+
infinite capacity and the function raises a NetworkXError.
|
| 359 |
+
|
| 360 |
+
See also
|
| 361 |
+
--------
|
| 362 |
+
:meth:`maximum_flow`
|
| 363 |
+
:meth:`maximum_flow_value`
|
| 364 |
+
:meth:`minimum_cut_value`
|
| 365 |
+
:meth:`edmonds_karp`
|
| 366 |
+
:meth:`preflow_push`
|
| 367 |
+
:meth:`shortest_augmenting_path`
|
| 368 |
+
|
| 369 |
+
Notes
|
| 370 |
+
-----
|
| 371 |
+
The function used in the flow_func parameter has to return a residual
|
| 372 |
+
network that follows NetworkX conventions:
|
| 373 |
+
|
| 374 |
+
The residual network :samp:`R` from an input graph :samp:`G` has the
|
| 375 |
+
same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair
|
| 376 |
+
of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a
|
| 377 |
+
self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists
|
| 378 |
+
in :samp:`G`.
|
| 379 |
+
|
| 380 |
+
For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']`
|
| 381 |
+
is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists
|
| 382 |
+
in :samp:`G` or zero otherwise. If the capacity is infinite,
|
| 383 |
+
:samp:`R[u][v]['capacity']` will have a high arbitrary finite value
|
| 384 |
+
that does not affect the solution of the problem. This value is stored in
|
| 385 |
+
:samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`,
|
| 386 |
+
:samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and
|
| 387 |
+
satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`.
|
| 388 |
+
|
| 389 |
+
The flow value, defined as the total flow into :samp:`t`, the sink, is
|
| 390 |
+
stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using
|
| 391 |
+
only edges :samp:`(u, v)` such that
|
| 392 |
+
:samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum
|
| 393 |
+
:samp:`s`-:samp:`t` cut.
|
| 394 |
+
|
| 395 |
+
Specific algorithms may store extra data in :samp:`R`.
|
| 396 |
+
|
| 397 |
+
The function should supports an optional boolean parameter value_only. When
|
| 398 |
+
True, it can optionally terminate the algorithm as soon as the maximum flow
|
| 399 |
+
value and the minimum cut can be determined.
|
| 400 |
+
|
| 401 |
+
Examples
|
| 402 |
+
--------
|
| 403 |
+
>>> G = nx.DiGraph()
|
| 404 |
+
>>> G.add_edge("x", "a", capacity=3.0)
|
| 405 |
+
>>> G.add_edge("x", "b", capacity=1.0)
|
| 406 |
+
>>> G.add_edge("a", "c", capacity=3.0)
|
| 407 |
+
>>> G.add_edge("b", "c", capacity=5.0)
|
| 408 |
+
>>> G.add_edge("b", "d", capacity=4.0)
|
| 409 |
+
>>> G.add_edge("d", "e", capacity=2.0)
|
| 410 |
+
>>> G.add_edge("c", "y", capacity=2.0)
|
| 411 |
+
>>> G.add_edge("e", "y", capacity=3.0)
|
| 412 |
+
|
| 413 |
+
minimum_cut computes both the value of the
|
| 414 |
+
minimum cut and the node partition:
|
| 415 |
+
|
| 416 |
+
>>> cut_value, partition = nx.minimum_cut(G, "x", "y")
|
| 417 |
+
>>> reachable, non_reachable = partition
|
| 418 |
+
|
| 419 |
+
'partition' here is a tuple with the two sets of nodes that define
|
| 420 |
+
the minimum cut. You can compute the cut set of edges that induce
|
| 421 |
+
the minimum cut as follows:
|
| 422 |
+
|
| 423 |
+
>>> cutset = set()
|
| 424 |
+
>>> for u, nbrs in ((n, G[n]) for n in reachable):
|
| 425 |
+
... cutset.update((u, v) for v in nbrs if v in non_reachable)
|
| 426 |
+
>>> print(sorted(cutset))
|
| 427 |
+
[('c', 'y'), ('x', 'b')]
|
| 428 |
+
>>> cut_value == sum(G.edges[u, v]["capacity"] for (u, v) in cutset)
|
| 429 |
+
True
|
| 430 |
+
|
| 431 |
+
You can also use alternative algorithms for computing the
|
| 432 |
+
minimum cut by using the flow_func parameter.
|
| 433 |
+
|
| 434 |
+
>>> from networkx.algorithms.flow import shortest_augmenting_path
|
| 435 |
+
>>> cut_value == nx.minimum_cut(G, "x", "y", flow_func=shortest_augmenting_path)[0]
|
| 436 |
+
True
|
| 437 |
+
|
| 438 |
+
"""
|
| 439 |
+
if flow_func is None:
|
| 440 |
+
if kwargs:
|
| 441 |
+
raise nx.NetworkXError(
|
| 442 |
+
"You have to explicitly set a flow_func if"
|
| 443 |
+
" you need to pass parameters via kwargs."
|
| 444 |
+
)
|
| 445 |
+
flow_func = default_flow_func
|
| 446 |
+
|
| 447 |
+
if not callable(flow_func):
|
| 448 |
+
raise nx.NetworkXError("flow_func has to be callable.")
|
| 449 |
+
|
| 450 |
+
if kwargs.get("cutoff") is not None and flow_func is preflow_push:
|
| 451 |
+
raise nx.NetworkXError("cutoff should not be specified.")
|
| 452 |
+
|
| 453 |
+
R = flow_func(flowG, _s, _t, capacity=capacity, value_only=True, **kwargs)
|
| 454 |
+
# Remove saturated edges from the residual network
|
| 455 |
+
cutset = [(u, v, d) for u, v, d in R.edges(data=True) if d["flow"] == d["capacity"]]
|
| 456 |
+
R.remove_edges_from(cutset)
|
| 457 |
+
|
| 458 |
+
# Then, reachable and non reachable nodes from source in the
|
| 459 |
+
# residual network form the node partition that defines
|
| 460 |
+
# the minimum cut.
|
| 461 |
+
non_reachable = set(dict(nx.shortest_path_length(R, target=_t)))
|
| 462 |
+
partition = (set(flowG) - non_reachable, non_reachable)
|
| 463 |
+
# Finally add again cutset edges to the residual network to make
|
| 464 |
+
# sure that it is reusable.
|
| 465 |
+
if cutset is not None:
|
| 466 |
+
R.add_edges_from(cutset)
|
| 467 |
+
return (R.graph["flow_value"], partition)
|
| 468 |
+
|
| 469 |
+
|
| 470 |
+
@nx._dispatch(graphs="flowG", edge_attrs={"capacity": float("inf")})
|
| 471 |
+
def minimum_cut_value(flowG, _s, _t, capacity="capacity", flow_func=None, **kwargs):
|
| 472 |
+
"""Compute the value of a minimum (s, t)-cut.
|
| 473 |
+
|
| 474 |
+
Use the max-flow min-cut theorem, i.e., the capacity of a minimum
|
| 475 |
+
capacity cut is equal to the flow value of a maximum flow.
|
| 476 |
+
|
| 477 |
+
Parameters
|
| 478 |
+
----------
|
| 479 |
+
flowG : NetworkX graph
|
| 480 |
+
Edges of the graph are expected to have an attribute called
|
| 481 |
+
'capacity'. If this attribute is not present, the edge is
|
| 482 |
+
considered to have infinite capacity.
|
| 483 |
+
|
| 484 |
+
_s : node
|
| 485 |
+
Source node for the flow.
|
| 486 |
+
|
| 487 |
+
_t : node
|
| 488 |
+
Sink node for the flow.
|
| 489 |
+
|
| 490 |
+
capacity : string
|
| 491 |
+
Edges of the graph G are expected to have an attribute capacity
|
| 492 |
+
that indicates how much flow the edge can support. If this
|
| 493 |
+
attribute is not present, the edge is considered to have
|
| 494 |
+
infinite capacity. Default value: 'capacity'.
|
| 495 |
+
|
| 496 |
+
flow_func : function
|
| 497 |
+
A function for computing the maximum flow among a pair of nodes
|
| 498 |
+
in a capacitated graph. The function has to accept at least three
|
| 499 |
+
parameters: a Graph or Digraph, a source node, and a target node.
|
| 500 |
+
And return a residual network that follows NetworkX conventions
|
| 501 |
+
(see Notes). If flow_func is None, the default maximum
|
| 502 |
+
flow function (:meth:`preflow_push`) is used. See below for
|
| 503 |
+
alternative algorithms. The choice of the default function may change
|
| 504 |
+
from version to version and should not be relied on. Default value:
|
| 505 |
+
None.
|
| 506 |
+
|
| 507 |
+
kwargs : Any other keyword parameter is passed to the function that
|
| 508 |
+
computes the maximum flow.
|
| 509 |
+
|
| 510 |
+
Returns
|
| 511 |
+
-------
|
| 512 |
+
cut_value : integer, float
|
| 513 |
+
Value of the minimum cut.
|
| 514 |
+
|
| 515 |
+
Raises
|
| 516 |
+
------
|
| 517 |
+
NetworkXUnbounded
|
| 518 |
+
If the graph has a path of infinite capacity, all cuts have
|
| 519 |
+
infinite capacity and the function raises a NetworkXError.
|
| 520 |
+
|
| 521 |
+
See also
|
| 522 |
+
--------
|
| 523 |
+
:meth:`maximum_flow`
|
| 524 |
+
:meth:`maximum_flow_value`
|
| 525 |
+
:meth:`minimum_cut`
|
| 526 |
+
:meth:`edmonds_karp`
|
| 527 |
+
:meth:`preflow_push`
|
| 528 |
+
:meth:`shortest_augmenting_path`
|
| 529 |
+
|
| 530 |
+
Notes
|
| 531 |
+
-----
|
| 532 |
+
The function used in the flow_func parameter has to return a residual
|
| 533 |
+
network that follows NetworkX conventions:
|
| 534 |
+
|
| 535 |
+
The residual network :samp:`R` from an input graph :samp:`G` has the
|
| 536 |
+
same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair
|
| 537 |
+
of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a
|
| 538 |
+
self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists
|
| 539 |
+
in :samp:`G`.
|
| 540 |
+
|
| 541 |
+
For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']`
|
| 542 |
+
is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists
|
| 543 |
+
in :samp:`G` or zero otherwise. If the capacity is infinite,
|
| 544 |
+
:samp:`R[u][v]['capacity']` will have a high arbitrary finite value
|
| 545 |
+
that does not affect the solution of the problem. This value is stored in
|
| 546 |
+
:samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`,
|
| 547 |
+
:samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and
|
| 548 |
+
satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`.
|
| 549 |
+
|
| 550 |
+
The flow value, defined as the total flow into :samp:`t`, the sink, is
|
| 551 |
+
stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using
|
| 552 |
+
only edges :samp:`(u, v)` such that
|
| 553 |
+
:samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum
|
| 554 |
+
:samp:`s`-:samp:`t` cut.
|
| 555 |
+
|
| 556 |
+
Specific algorithms may store extra data in :samp:`R`.
|
| 557 |
+
|
| 558 |
+
The function should supports an optional boolean parameter value_only. When
|
| 559 |
+
True, it can optionally terminate the algorithm as soon as the maximum flow
|
| 560 |
+
value and the minimum cut can be determined.
|
| 561 |
+
|
| 562 |
+
Examples
|
| 563 |
+
--------
|
| 564 |
+
>>> G = nx.DiGraph()
|
| 565 |
+
>>> G.add_edge("x", "a", capacity=3.0)
|
| 566 |
+
>>> G.add_edge("x", "b", capacity=1.0)
|
| 567 |
+
>>> G.add_edge("a", "c", capacity=3.0)
|
| 568 |
+
>>> G.add_edge("b", "c", capacity=5.0)
|
| 569 |
+
>>> G.add_edge("b", "d", capacity=4.0)
|
| 570 |
+
>>> G.add_edge("d", "e", capacity=2.0)
|
| 571 |
+
>>> G.add_edge("c", "y", capacity=2.0)
|
| 572 |
+
>>> G.add_edge("e", "y", capacity=3.0)
|
| 573 |
+
|
| 574 |
+
minimum_cut_value computes only the value of the
|
| 575 |
+
minimum cut:
|
| 576 |
+
|
| 577 |
+
>>> cut_value = nx.minimum_cut_value(G, "x", "y")
|
| 578 |
+
>>> cut_value
|
| 579 |
+
3.0
|
| 580 |
+
|
| 581 |
+
You can also use alternative algorithms for computing the
|
| 582 |
+
minimum cut by using the flow_func parameter.
|
| 583 |
+
|
| 584 |
+
>>> from networkx.algorithms.flow import shortest_augmenting_path
|
| 585 |
+
>>> cut_value == nx.minimum_cut_value(
|
| 586 |
+
... G, "x", "y", flow_func=shortest_augmenting_path
|
| 587 |
+
... )
|
| 588 |
+
True
|
| 589 |
+
|
| 590 |
+
"""
|
| 591 |
+
if flow_func is None:
|
| 592 |
+
if kwargs:
|
| 593 |
+
raise nx.NetworkXError(
|
| 594 |
+
"You have to explicitly set a flow_func if"
|
| 595 |
+
" you need to pass parameters via kwargs."
|
| 596 |
+
)
|
| 597 |
+
flow_func = default_flow_func
|
| 598 |
+
|
| 599 |
+
if not callable(flow_func):
|
| 600 |
+
raise nx.NetworkXError("flow_func has to be callable.")
|
| 601 |
+
|
| 602 |
+
if kwargs.get("cutoff") is not None and flow_func is preflow_push:
|
| 603 |
+
raise nx.NetworkXError("cutoff should not be specified.")
|
| 604 |
+
|
| 605 |
+
R = flow_func(flowG, _s, _t, capacity=capacity, value_only=True, **kwargs)
|
| 606 |
+
|
| 607 |
+
return R.graph["flow_value"]
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/link_analysis/tests/__init__.py
ADDED
|
File without changes
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/polynomials.py
ADDED
|
@@ -0,0 +1,305 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Provides algorithms supporting the computation of graph polynomials.
|
| 2 |
+
|
| 3 |
+
Graph polynomials are polynomial-valued graph invariants that encode a wide
|
| 4 |
+
variety of structural information. Examples include the Tutte polynomial,
|
| 5 |
+
chromatic polynomial, characteristic polynomial, and matching polynomial. An
|
| 6 |
+
extensive treatment is provided in [1]_.
|
| 7 |
+
|
| 8 |
+
For a simple example, the `~sympy.matrices.matrices.MatrixDeterminant.charpoly`
|
| 9 |
+
method can be used to compute the characteristic polynomial from the adjacency
|
| 10 |
+
matrix of a graph. Consider the complete graph ``K_4``:
|
| 11 |
+
|
| 12 |
+
>>> import sympy
|
| 13 |
+
>>> x = sympy.Symbol("x")
|
| 14 |
+
>>> G = nx.complete_graph(4)
|
| 15 |
+
>>> A = nx.adjacency_matrix(G)
|
| 16 |
+
>>> M = sympy.SparseMatrix(A.todense())
|
| 17 |
+
>>> M.charpoly(x).as_expr()
|
| 18 |
+
x**4 - 6*x**2 - 8*x - 3
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
.. [1] Y. Shi, M. Dehmer, X. Li, I. Gutman,
|
| 22 |
+
"Graph Polynomials"
|
| 23 |
+
"""
|
| 24 |
+
from collections import deque
|
| 25 |
+
|
| 26 |
+
import networkx as nx
|
| 27 |
+
from networkx.utils import not_implemented_for
|
| 28 |
+
|
| 29 |
+
__all__ = ["tutte_polynomial", "chromatic_polynomial"]
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
@not_implemented_for("directed")
|
| 33 |
+
@nx._dispatch
|
| 34 |
+
def tutte_polynomial(G):
|
| 35 |
+
r"""Returns the Tutte polynomial of `G`
|
| 36 |
+
|
| 37 |
+
This function computes the Tutte polynomial via an iterative version of
|
| 38 |
+
the deletion-contraction algorithm.
|
| 39 |
+
|
| 40 |
+
The Tutte polynomial `T_G(x, y)` is a fundamental graph polynomial invariant in
|
| 41 |
+
two variables. It encodes a wide array of information related to the
|
| 42 |
+
edge-connectivity of a graph; "Many problems about graphs can be reduced to
|
| 43 |
+
problems of finding and evaluating the Tutte polynomial at certain values" [1]_.
|
| 44 |
+
In fact, every deletion-contraction-expressible feature of a graph is a
|
| 45 |
+
specialization of the Tutte polynomial [2]_ (see Notes for examples).
|
| 46 |
+
|
| 47 |
+
There are several equivalent definitions; here are three:
|
| 48 |
+
|
| 49 |
+
Def 1 (rank-nullity expansion): For `G` an undirected graph, `n(G)` the
|
| 50 |
+
number of vertices of `G`, `E` the edge set of `G`, `V` the vertex set of
|
| 51 |
+
`G`, and `c(A)` the number of connected components of the graph with vertex
|
| 52 |
+
set `V` and edge set `A` [3]_:
|
| 53 |
+
|
| 54 |
+
.. math::
|
| 55 |
+
|
| 56 |
+
T_G(x, y) = \sum_{A \in E} (x-1)^{c(A) - c(E)} (y-1)^{c(A) + |A| - n(G)}
|
| 57 |
+
|
| 58 |
+
Def 2 (spanning tree expansion): Let `G` be an undirected graph, `T` a spanning
|
| 59 |
+
tree of `G`, and `E` the edge set of `G`. Let `E` have an arbitrary strict
|
| 60 |
+
linear order `L`. Let `B_e` be the unique minimal nonempty edge cut of
|
| 61 |
+
$E \setminus T \cup {e}$. An edge `e` is internally active with respect to
|
| 62 |
+
`T` and `L` if `e` is the least edge in `B_e` according to the linear order
|
| 63 |
+
`L`. The internal activity of `T` (denoted `i(T)`) is the number of edges
|
| 64 |
+
in $E \setminus T$ that are internally active with respect to `T` and `L`.
|
| 65 |
+
Let `P_e` be the unique path in $T \cup {e}$ whose source and target vertex
|
| 66 |
+
are the same. An edge `e` is externally active with respect to `T` and `L`
|
| 67 |
+
if `e` is the least edge in `P_e` according to the linear order `L`. The
|
| 68 |
+
external activity of `T` (denoted `e(T)`) is the number of edges in
|
| 69 |
+
$E \setminus T$ that are externally active with respect to `T` and `L`.
|
| 70 |
+
Then [4]_ [5]_:
|
| 71 |
+
|
| 72 |
+
.. math::
|
| 73 |
+
|
| 74 |
+
T_G(x, y) = \sum_{T \text{ a spanning tree of } G} x^{i(T)} y^{e(T)}
|
| 75 |
+
|
| 76 |
+
Def 3 (deletion-contraction recurrence): For `G` an undirected graph, `G-e`
|
| 77 |
+
the graph obtained from `G` by deleting edge `e`, `G/e` the graph obtained
|
| 78 |
+
from `G` by contracting edge `e`, `k(G)` the number of cut-edges of `G`,
|
| 79 |
+
and `l(G)` the number of self-loops of `G`:
|
| 80 |
+
|
| 81 |
+
.. math::
|
| 82 |
+
T_G(x, y) = \begin{cases}
|
| 83 |
+
x^{k(G)} y^{l(G)}, & \text{if all edges are cut-edges or self-loops} \\
|
| 84 |
+
T_{G-e}(x, y) + T_{G/e}(x, y), & \text{otherwise, for an arbitrary edge $e$ not a cut-edge or loop}
|
| 85 |
+
\end{cases}
|
| 86 |
+
|
| 87 |
+
Parameters
|
| 88 |
+
----------
|
| 89 |
+
G : NetworkX graph
|
| 90 |
+
|
| 91 |
+
Returns
|
| 92 |
+
-------
|
| 93 |
+
instance of `sympy.core.add.Add`
|
| 94 |
+
A Sympy expression representing the Tutte polynomial for `G`.
|
| 95 |
+
|
| 96 |
+
Examples
|
| 97 |
+
--------
|
| 98 |
+
>>> C = nx.cycle_graph(5)
|
| 99 |
+
>>> nx.tutte_polynomial(C)
|
| 100 |
+
x**4 + x**3 + x**2 + x + y
|
| 101 |
+
|
| 102 |
+
>>> D = nx.diamond_graph()
|
| 103 |
+
>>> nx.tutte_polynomial(D)
|
| 104 |
+
x**3 + 2*x**2 + 2*x*y + x + y**2 + y
|
| 105 |
+
|
| 106 |
+
Notes
|
| 107 |
+
-----
|
| 108 |
+
Some specializations of the Tutte polynomial:
|
| 109 |
+
|
| 110 |
+
- `T_G(1, 1)` counts the number of spanning trees of `G`
|
| 111 |
+
- `T_G(1, 2)` counts the number of connected spanning subgraphs of `G`
|
| 112 |
+
- `T_G(2, 1)` counts the number of spanning forests in `G`
|
| 113 |
+
- `T_G(0, 2)` counts the number of strong orientations of `G`
|
| 114 |
+
- `T_G(2, 0)` counts the number of acyclic orientations of `G`
|
| 115 |
+
|
| 116 |
+
Edge contraction is defined and deletion-contraction is introduced in [6]_.
|
| 117 |
+
Combinatorial meaning of the coefficients is introduced in [7]_.
|
| 118 |
+
Universality, properties, and applications are discussed in [8]_.
|
| 119 |
+
|
| 120 |
+
Practically, up-front computation of the Tutte polynomial may be useful when
|
| 121 |
+
users wish to repeatedly calculate edge-connectivity-related information
|
| 122 |
+
about one or more graphs.
|
| 123 |
+
|
| 124 |
+
References
|
| 125 |
+
----------
|
| 126 |
+
.. [1] M. Brandt,
|
| 127 |
+
"The Tutte Polynomial."
|
| 128 |
+
Talking About Combinatorial Objects Seminar, 2015
|
| 129 |
+
https://math.berkeley.edu/~brandtm/talks/tutte.pdf
|
| 130 |
+
.. [2] A. Björklund, T. Husfeldt, P. Kaski, M. Koivisto,
|
| 131 |
+
"Computing the Tutte polynomial in vertex-exponential time"
|
| 132 |
+
49th Annual IEEE Symposium on Foundations of Computer Science, 2008
|
| 133 |
+
https://ieeexplore.ieee.org/abstract/document/4691000
|
| 134 |
+
.. [3] Y. Shi, M. Dehmer, X. Li, I. Gutman,
|
| 135 |
+
"Graph Polynomials," p. 14
|
| 136 |
+
.. [4] Y. Shi, M. Dehmer, X. Li, I. Gutman,
|
| 137 |
+
"Graph Polynomials," p. 46
|
| 138 |
+
.. [5] A. Nešetril, J. Goodall,
|
| 139 |
+
"Graph invariants, homomorphisms, and the Tutte polynomial"
|
| 140 |
+
https://iuuk.mff.cuni.cz/~andrew/Tutte.pdf
|
| 141 |
+
.. [6] D. B. West,
|
| 142 |
+
"Introduction to Graph Theory," p. 84
|
| 143 |
+
.. [7] G. Coutinho,
|
| 144 |
+
"A brief introduction to the Tutte polynomial"
|
| 145 |
+
Structural Analysis of Complex Networks, 2011
|
| 146 |
+
https://homepages.dcc.ufmg.br/~gabriel/seminars/coutinho_tuttepolynomial_seminar.pdf
|
| 147 |
+
.. [8] J. A. Ellis-Monaghan, C. Merino,
|
| 148 |
+
"Graph polynomials and their applications I: The Tutte polynomial"
|
| 149 |
+
Structural Analysis of Complex Networks, 2011
|
| 150 |
+
https://arxiv.org/pdf/0803.3079.pdf
|
| 151 |
+
"""
|
| 152 |
+
import sympy
|
| 153 |
+
|
| 154 |
+
x = sympy.Symbol("x")
|
| 155 |
+
y = sympy.Symbol("y")
|
| 156 |
+
stack = deque()
|
| 157 |
+
stack.append(nx.MultiGraph(G))
|
| 158 |
+
|
| 159 |
+
polynomial = 0
|
| 160 |
+
while stack:
|
| 161 |
+
G = stack.pop()
|
| 162 |
+
bridges = set(nx.bridges(G))
|
| 163 |
+
|
| 164 |
+
e = None
|
| 165 |
+
for i in G.edges:
|
| 166 |
+
if (i[0], i[1]) not in bridges and i[0] != i[1]:
|
| 167 |
+
e = i
|
| 168 |
+
break
|
| 169 |
+
if not e:
|
| 170 |
+
loops = list(nx.selfloop_edges(G, keys=True))
|
| 171 |
+
polynomial += x ** len(bridges) * y ** len(loops)
|
| 172 |
+
else:
|
| 173 |
+
# deletion-contraction
|
| 174 |
+
C = nx.contracted_edge(G, e, self_loops=True)
|
| 175 |
+
C.remove_edge(e[0], e[0])
|
| 176 |
+
G.remove_edge(*e)
|
| 177 |
+
stack.append(G)
|
| 178 |
+
stack.append(C)
|
| 179 |
+
return sympy.simplify(polynomial)
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
@not_implemented_for("directed")
|
| 183 |
+
@nx._dispatch
|
| 184 |
+
def chromatic_polynomial(G):
|
| 185 |
+
r"""Returns the chromatic polynomial of `G`
|
| 186 |
+
|
| 187 |
+
This function computes the chromatic polynomial via an iterative version of
|
| 188 |
+
the deletion-contraction algorithm.
|
| 189 |
+
|
| 190 |
+
The chromatic polynomial `X_G(x)` is a fundamental graph polynomial
|
| 191 |
+
invariant in one variable. Evaluating `X_G(k)` for an natural number `k`
|
| 192 |
+
enumerates the proper k-colorings of `G`.
|
| 193 |
+
|
| 194 |
+
There are several equivalent definitions; here are three:
|
| 195 |
+
|
| 196 |
+
Def 1 (explicit formula):
|
| 197 |
+
For `G` an undirected graph, `c(G)` the number of connected components of
|
| 198 |
+
`G`, `E` the edge set of `G`, and `G(S)` the spanning subgraph of `G` with
|
| 199 |
+
edge set `S` [1]_:
|
| 200 |
+
|
| 201 |
+
.. math::
|
| 202 |
+
|
| 203 |
+
X_G(x) = \sum_{S \subseteq E} (-1)^{|S|} x^{c(G(S))}
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
Def 2 (interpolating polynomial):
|
| 207 |
+
For `G` an undirected graph, `n(G)` the number of vertices of `G`, `k_0 = 0`,
|
| 208 |
+
and `k_i` the number of distinct ways to color the vertices of `G` with `i`
|
| 209 |
+
unique colors (for `i` a natural number at most `n(G)`), `X_G(x)` is the
|
| 210 |
+
unique Lagrange interpolating polynomial of degree `n(G)` through the points
|
| 211 |
+
`(0, k_0), (1, k_1), \dots, (n(G), k_{n(G)})` [2]_.
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
Def 3 (chromatic recurrence):
|
| 215 |
+
For `G` an undirected graph, `G-e` the graph obtained from `G` by deleting
|
| 216 |
+
edge `e`, `G/e` the graph obtained from `G` by contracting edge `e`, `n(G)`
|
| 217 |
+
the number of vertices of `G`, and `e(G)` the number of edges of `G` [3]_:
|
| 218 |
+
|
| 219 |
+
.. math::
|
| 220 |
+
X_G(x) = \begin{cases}
|
| 221 |
+
x^{n(G)}, & \text{if $e(G)=0$} \\
|
| 222 |
+
X_{G-e}(x) - X_{G/e}(x), & \text{otherwise, for an arbitrary edge $e$}
|
| 223 |
+
\end{cases}
|
| 224 |
+
|
| 225 |
+
This formulation is also known as the Fundamental Reduction Theorem [4]_.
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
Parameters
|
| 229 |
+
----------
|
| 230 |
+
G : NetworkX graph
|
| 231 |
+
|
| 232 |
+
Returns
|
| 233 |
+
-------
|
| 234 |
+
instance of `sympy.core.add.Add`
|
| 235 |
+
A Sympy expression representing the chromatic polynomial for `G`.
|
| 236 |
+
|
| 237 |
+
Examples
|
| 238 |
+
--------
|
| 239 |
+
>>> C = nx.cycle_graph(5)
|
| 240 |
+
>>> nx.chromatic_polynomial(C)
|
| 241 |
+
x**5 - 5*x**4 + 10*x**3 - 10*x**2 + 4*x
|
| 242 |
+
|
| 243 |
+
>>> G = nx.complete_graph(4)
|
| 244 |
+
>>> nx.chromatic_polynomial(G)
|
| 245 |
+
x**4 - 6*x**3 + 11*x**2 - 6*x
|
| 246 |
+
|
| 247 |
+
Notes
|
| 248 |
+
-----
|
| 249 |
+
Interpretation of the coefficients is discussed in [5]_. Several special
|
| 250 |
+
cases are listed in [2]_.
|
| 251 |
+
|
| 252 |
+
The chromatic polynomial is a specialization of the Tutte polynomial; in
|
| 253 |
+
particular, ``X_G(x) = T_G(x, 0)`` [6]_.
|
| 254 |
+
|
| 255 |
+
The chromatic polynomial may take negative arguments, though evaluations
|
| 256 |
+
may not have chromatic interpretations. For instance, ``X_G(-1)`` enumerates
|
| 257 |
+
the acyclic orientations of `G` [7]_.
|
| 258 |
+
|
| 259 |
+
References
|
| 260 |
+
----------
|
| 261 |
+
.. [1] D. B. West,
|
| 262 |
+
"Introduction to Graph Theory," p. 222
|
| 263 |
+
.. [2] E. W. Weisstein
|
| 264 |
+
"Chromatic Polynomial"
|
| 265 |
+
MathWorld--A Wolfram Web Resource
|
| 266 |
+
https://mathworld.wolfram.com/ChromaticPolynomial.html
|
| 267 |
+
.. [3] D. B. West,
|
| 268 |
+
"Introduction to Graph Theory," p. 221
|
| 269 |
+
.. [4] J. Zhang, J. Goodall,
|
| 270 |
+
"An Introduction to Chromatic Polynomials"
|
| 271 |
+
https://math.mit.edu/~apost/courses/18.204_2018/Julie_Zhang_paper.pdf
|
| 272 |
+
.. [5] R. C. Read,
|
| 273 |
+
"An Introduction to Chromatic Polynomials"
|
| 274 |
+
Journal of Combinatorial Theory, 1968
|
| 275 |
+
https://math.berkeley.edu/~mrklug/ReadChromatic.pdf
|
| 276 |
+
.. [6] W. T. Tutte,
|
| 277 |
+
"Graph-polynomials"
|
| 278 |
+
Advances in Applied Mathematics, 2004
|
| 279 |
+
https://www.sciencedirect.com/science/article/pii/S0196885803000411
|
| 280 |
+
.. [7] R. P. Stanley,
|
| 281 |
+
"Acyclic orientations of graphs"
|
| 282 |
+
Discrete Mathematics, 2006
|
| 283 |
+
https://math.mit.edu/~rstan/pubs/pubfiles/18.pdf
|
| 284 |
+
"""
|
| 285 |
+
import sympy
|
| 286 |
+
|
| 287 |
+
x = sympy.Symbol("x")
|
| 288 |
+
stack = deque()
|
| 289 |
+
stack.append(nx.MultiGraph(G, contraction_idx=0))
|
| 290 |
+
|
| 291 |
+
polynomial = 0
|
| 292 |
+
while stack:
|
| 293 |
+
G = stack.pop()
|
| 294 |
+
edges = list(G.edges)
|
| 295 |
+
if not edges:
|
| 296 |
+
polynomial += (-1) ** G.graph["contraction_idx"] * x ** len(G)
|
| 297 |
+
else:
|
| 298 |
+
e = edges[0]
|
| 299 |
+
C = nx.contracted_edge(G, e, self_loops=True)
|
| 300 |
+
C.graph["contraction_idx"] = G.graph["contraction_idx"] + 1
|
| 301 |
+
C.remove_edge(e[0], e[0])
|
| 302 |
+
G.remove_edge(*e)
|
| 303 |
+
stack.append(G)
|
| 304 |
+
stack.append(C)
|
| 305 |
+
return polynomial
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/shortest_paths/tests/__init__.py
ADDED
|
File without changes
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (241 Bytes). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_astar.cpython-311.pyc
ADDED
|
Binary file (13.6 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_dense_numpy.cpython-311.pyc
ADDED
|
Binary file (5.18 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/shortest_paths/tests/test_dense.py
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
import networkx as nx
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class TestFloyd:
|
| 7 |
+
@classmethod
|
| 8 |
+
def setup_class(cls):
|
| 9 |
+
pass
|
| 10 |
+
|
| 11 |
+
def test_floyd_warshall_predecessor_and_distance(self):
|
| 12 |
+
XG = nx.DiGraph()
|
| 13 |
+
XG.add_weighted_edges_from(
|
| 14 |
+
[
|
| 15 |
+
("s", "u", 10),
|
| 16 |
+
("s", "x", 5),
|
| 17 |
+
("u", "v", 1),
|
| 18 |
+
("u", "x", 2),
|
| 19 |
+
("v", "y", 1),
|
| 20 |
+
("x", "u", 3),
|
| 21 |
+
("x", "v", 5),
|
| 22 |
+
("x", "y", 2),
|
| 23 |
+
("y", "s", 7),
|
| 24 |
+
("y", "v", 6),
|
| 25 |
+
]
|
| 26 |
+
)
|
| 27 |
+
path, dist = nx.floyd_warshall_predecessor_and_distance(XG)
|
| 28 |
+
assert dist["s"]["v"] == 9
|
| 29 |
+
assert path["s"]["v"] == "u"
|
| 30 |
+
assert dist == {
|
| 31 |
+
"y": {"y": 0, "x": 12, "s": 7, "u": 15, "v": 6},
|
| 32 |
+
"x": {"y": 2, "x": 0, "s": 9, "u": 3, "v": 4},
|
| 33 |
+
"s": {"y": 7, "x": 5, "s": 0, "u": 8, "v": 9},
|
| 34 |
+
"u": {"y": 2, "x": 2, "s": 9, "u": 0, "v": 1},
|
| 35 |
+
"v": {"y": 1, "x": 13, "s": 8, "u": 16, "v": 0},
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
GG = XG.to_undirected()
|
| 39 |
+
# make sure we get lower weight
|
| 40 |
+
# to_undirected might choose either edge with weight 2 or weight 3
|
| 41 |
+
GG["u"]["x"]["weight"] = 2
|
| 42 |
+
path, dist = nx.floyd_warshall_predecessor_and_distance(GG)
|
| 43 |
+
assert dist["s"]["v"] == 8
|
| 44 |
+
# skip this test, could be alternate path s-u-v
|
| 45 |
+
# assert_equal(path['s']['v'],'y')
|
| 46 |
+
|
| 47 |
+
G = nx.DiGraph() # no weights
|
| 48 |
+
G.add_edges_from(
|
| 49 |
+
[
|
| 50 |
+
("s", "u"),
|
| 51 |
+
("s", "x"),
|
| 52 |
+
("u", "v"),
|
| 53 |
+
("u", "x"),
|
| 54 |
+
("v", "y"),
|
| 55 |
+
("x", "u"),
|
| 56 |
+
("x", "v"),
|
| 57 |
+
("x", "y"),
|
| 58 |
+
("y", "s"),
|
| 59 |
+
("y", "v"),
|
| 60 |
+
]
|
| 61 |
+
)
|
| 62 |
+
path, dist = nx.floyd_warshall_predecessor_and_distance(G)
|
| 63 |
+
assert dist["s"]["v"] == 2
|
| 64 |
+
# skip this test, could be alternate path s-u-v
|
| 65 |
+
# assert_equal(path['s']['v'],'x')
|
| 66 |
+
|
| 67 |
+
# alternate interface
|
| 68 |
+
dist = nx.floyd_warshall(G)
|
| 69 |
+
assert dist["s"]["v"] == 2
|
| 70 |
+
|
| 71 |
+
# floyd_warshall_predecessor_and_distance returns
|
| 72 |
+
# dicts-of-defautdicts
|
| 73 |
+
# make sure we don't get empty dictionary
|
| 74 |
+
XG = nx.DiGraph()
|
| 75 |
+
XG.add_weighted_edges_from(
|
| 76 |
+
[("v", "x", 5.0), ("y", "x", 5.0), ("v", "y", 6.0), ("x", "u", 2.0)]
|
| 77 |
+
)
|
| 78 |
+
path, dist = nx.floyd_warshall_predecessor_and_distance(XG)
|
| 79 |
+
inf = float("inf")
|
| 80 |
+
assert dist == {
|
| 81 |
+
"v": {"v": 0, "x": 5.0, "y": 6.0, "u": 7.0},
|
| 82 |
+
"x": {"x": 0, "u": 2.0, "v": inf, "y": inf},
|
| 83 |
+
"y": {"y": 0, "x": 5.0, "v": inf, "u": 7.0},
|
| 84 |
+
"u": {"u": 0, "v": inf, "x": inf, "y": inf},
|
| 85 |
+
}
|
| 86 |
+
assert path == {
|
| 87 |
+
"v": {"x": "v", "y": "v", "u": "x"},
|
| 88 |
+
"x": {"u": "x"},
|
| 89 |
+
"y": {"x": "y", "u": "x"},
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
def test_reconstruct_path(self):
|
| 93 |
+
with pytest.raises(KeyError):
|
| 94 |
+
XG = nx.DiGraph()
|
| 95 |
+
XG.add_weighted_edges_from(
|
| 96 |
+
[
|
| 97 |
+
("s", "u", 10),
|
| 98 |
+
("s", "x", 5),
|
| 99 |
+
("u", "v", 1),
|
| 100 |
+
("u", "x", 2),
|
| 101 |
+
("v", "y", 1),
|
| 102 |
+
("x", "u", 3),
|
| 103 |
+
("x", "v", 5),
|
| 104 |
+
("x", "y", 2),
|
| 105 |
+
("y", "s", 7),
|
| 106 |
+
("y", "v", 6),
|
| 107 |
+
]
|
| 108 |
+
)
|
| 109 |
+
predecessors, _ = nx.floyd_warshall_predecessor_and_distance(XG)
|
| 110 |
+
|
| 111 |
+
path = nx.reconstruct_path("s", "v", predecessors)
|
| 112 |
+
assert path == ["s", "x", "u", "v"]
|
| 113 |
+
|
| 114 |
+
path = nx.reconstruct_path("s", "s", predecessors)
|
| 115 |
+
assert path == []
|
| 116 |
+
|
| 117 |
+
# this part raises the keyError
|
| 118 |
+
nx.reconstruct_path("1", "2", predecessors)
|
| 119 |
+
|
| 120 |
+
def test_cycle(self):
|
| 121 |
+
path, dist = nx.floyd_warshall_predecessor_and_distance(nx.cycle_graph(7))
|
| 122 |
+
assert dist[0][3] == 3
|
| 123 |
+
assert path[0][3] == 2
|
| 124 |
+
assert dist[0][4] == 3
|
| 125 |
+
|
| 126 |
+
def test_weighted(self):
|
| 127 |
+
XG3 = nx.Graph()
|
| 128 |
+
XG3.add_weighted_edges_from(
|
| 129 |
+
[[0, 1, 2], [1, 2, 12], [2, 3, 1], [3, 4, 5], [4, 5, 1], [5, 0, 10]]
|
| 130 |
+
)
|
| 131 |
+
path, dist = nx.floyd_warshall_predecessor_and_distance(XG3)
|
| 132 |
+
assert dist[0][3] == 15
|
| 133 |
+
assert path[0][3] == 2
|
| 134 |
+
|
| 135 |
+
def test_weighted2(self):
|
| 136 |
+
XG4 = nx.Graph()
|
| 137 |
+
XG4.add_weighted_edges_from(
|
| 138 |
+
[
|
| 139 |
+
[0, 1, 2],
|
| 140 |
+
[1, 2, 2],
|
| 141 |
+
[2, 3, 1],
|
| 142 |
+
[3, 4, 1],
|
| 143 |
+
[4, 5, 1],
|
| 144 |
+
[5, 6, 1],
|
| 145 |
+
[6, 7, 1],
|
| 146 |
+
[7, 0, 1],
|
| 147 |
+
]
|
| 148 |
+
)
|
| 149 |
+
path, dist = nx.floyd_warshall_predecessor_and_distance(XG4)
|
| 150 |
+
assert dist[0][2] == 4
|
| 151 |
+
assert path[0][2] == 1
|
| 152 |
+
|
| 153 |
+
def test_weight_parameter(self):
|
| 154 |
+
XG4 = nx.Graph()
|
| 155 |
+
XG4.add_edges_from(
|
| 156 |
+
[
|
| 157 |
+
(0, 1, {"heavy": 2}),
|
| 158 |
+
(1, 2, {"heavy": 2}),
|
| 159 |
+
(2, 3, {"heavy": 1}),
|
| 160 |
+
(3, 4, {"heavy": 1}),
|
| 161 |
+
(4, 5, {"heavy": 1}),
|
| 162 |
+
(5, 6, {"heavy": 1}),
|
| 163 |
+
(6, 7, {"heavy": 1}),
|
| 164 |
+
(7, 0, {"heavy": 1}),
|
| 165 |
+
]
|
| 166 |
+
)
|
| 167 |
+
path, dist = nx.floyd_warshall_predecessor_and_distance(XG4, weight="heavy")
|
| 168 |
+
assert dist[0][2] == 4
|
| 169 |
+
assert path[0][2] == 1
|
| 170 |
+
|
| 171 |
+
def test_zero_distance(self):
|
| 172 |
+
XG = nx.DiGraph()
|
| 173 |
+
XG.add_weighted_edges_from(
|
| 174 |
+
[
|
| 175 |
+
("s", "u", 10),
|
| 176 |
+
("s", "x", 5),
|
| 177 |
+
("u", "v", 1),
|
| 178 |
+
("u", "x", 2),
|
| 179 |
+
("v", "y", 1),
|
| 180 |
+
("x", "u", 3),
|
| 181 |
+
("x", "v", 5),
|
| 182 |
+
("x", "y", 2),
|
| 183 |
+
("y", "s", 7),
|
| 184 |
+
("y", "v", 6),
|
| 185 |
+
]
|
| 186 |
+
)
|
| 187 |
+
path, dist = nx.floyd_warshall_predecessor_and_distance(XG)
|
| 188 |
+
|
| 189 |
+
for u in XG:
|
| 190 |
+
assert dist[u][u] == 0
|
| 191 |
+
|
| 192 |
+
GG = XG.to_undirected()
|
| 193 |
+
# make sure we get lower weight
|
| 194 |
+
# to_undirected might choose either edge with weight 2 or weight 3
|
| 195 |
+
GG["u"]["x"]["weight"] = 2
|
| 196 |
+
path, dist = nx.floyd_warshall_predecessor_and_distance(GG)
|
| 197 |
+
|
| 198 |
+
for u in GG:
|
| 199 |
+
dist[u][u] = 0
|
| 200 |
+
|
| 201 |
+
def test_zero_weight(self):
|
| 202 |
+
G = nx.DiGraph()
|
| 203 |
+
edges = [(1, 2, -2), (2, 3, -4), (1, 5, 1), (5, 4, 0), (4, 3, -5), (2, 5, -7)]
|
| 204 |
+
G.add_weighted_edges_from(edges)
|
| 205 |
+
dist = nx.floyd_warshall(G)
|
| 206 |
+
assert dist[1][3] == -14
|
| 207 |
+
|
| 208 |
+
G = nx.MultiDiGraph()
|
| 209 |
+
edges.append((2, 5, -7))
|
| 210 |
+
G.add_weighted_edges_from(edges)
|
| 211 |
+
dist = nx.floyd_warshall(G)
|
| 212 |
+
assert dist[1][3] == -14
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/shortest_paths/tests/test_generic.py
ADDED
|
@@ -0,0 +1,444 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
import networkx as nx
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def validate_grid_path(r, c, s, t, p):
|
| 7 |
+
assert isinstance(p, list)
|
| 8 |
+
assert p[0] == s
|
| 9 |
+
assert p[-1] == t
|
| 10 |
+
s = ((s - 1) // c, (s - 1) % c)
|
| 11 |
+
t = ((t - 1) // c, (t - 1) % c)
|
| 12 |
+
assert len(p) == abs(t[0] - s[0]) + abs(t[1] - s[1]) + 1
|
| 13 |
+
p = [((u - 1) // c, (u - 1) % c) for u in p]
|
| 14 |
+
for u in p:
|
| 15 |
+
assert 0 <= u[0] < r
|
| 16 |
+
assert 0 <= u[1] < c
|
| 17 |
+
for u, v in zip(p[:-1], p[1:]):
|
| 18 |
+
assert (abs(v[0] - u[0]), abs(v[1] - u[1])) in [(0, 1), (1, 0)]
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class TestGenericPath:
|
| 22 |
+
@classmethod
|
| 23 |
+
def setup_class(cls):
|
| 24 |
+
from networkx import convert_node_labels_to_integers as cnlti
|
| 25 |
+
|
| 26 |
+
cls.grid = cnlti(nx.grid_2d_graph(4, 4), first_label=1, ordering="sorted")
|
| 27 |
+
cls.cycle = nx.cycle_graph(7)
|
| 28 |
+
cls.directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph())
|
| 29 |
+
cls.neg_weights = nx.DiGraph()
|
| 30 |
+
cls.neg_weights.add_edge(0, 1, weight=1)
|
| 31 |
+
cls.neg_weights.add_edge(0, 2, weight=3)
|
| 32 |
+
cls.neg_weights.add_edge(1, 3, weight=1)
|
| 33 |
+
cls.neg_weights.add_edge(2, 3, weight=-2)
|
| 34 |
+
|
| 35 |
+
def test_shortest_path(self):
|
| 36 |
+
assert nx.shortest_path(self.cycle, 0, 3) == [0, 1, 2, 3]
|
| 37 |
+
assert nx.shortest_path(self.cycle, 0, 4) == [0, 6, 5, 4]
|
| 38 |
+
validate_grid_path(4, 4, 1, 12, nx.shortest_path(self.grid, 1, 12))
|
| 39 |
+
assert nx.shortest_path(self.directed_cycle, 0, 3) == [0, 1, 2, 3]
|
| 40 |
+
# now with weights
|
| 41 |
+
assert nx.shortest_path(self.cycle, 0, 3, weight="weight") == [0, 1, 2, 3]
|
| 42 |
+
assert nx.shortest_path(self.cycle, 0, 4, weight="weight") == [0, 6, 5, 4]
|
| 43 |
+
validate_grid_path(
|
| 44 |
+
4, 4, 1, 12, nx.shortest_path(self.grid, 1, 12, weight="weight")
|
| 45 |
+
)
|
| 46 |
+
assert nx.shortest_path(self.directed_cycle, 0, 3, weight="weight") == [
|
| 47 |
+
0,
|
| 48 |
+
1,
|
| 49 |
+
2,
|
| 50 |
+
3,
|
| 51 |
+
]
|
| 52 |
+
# weights and method specified
|
| 53 |
+
assert nx.shortest_path(
|
| 54 |
+
self.directed_cycle, 0, 3, weight="weight", method="dijkstra"
|
| 55 |
+
) == [0, 1, 2, 3]
|
| 56 |
+
assert nx.shortest_path(
|
| 57 |
+
self.directed_cycle, 0, 3, weight="weight", method="bellman-ford"
|
| 58 |
+
) == [0, 1, 2, 3]
|
| 59 |
+
# when Dijkstra's will probably (depending on precise implementation)
|
| 60 |
+
# incorrectly return [0, 1, 3] instead
|
| 61 |
+
assert nx.shortest_path(
|
| 62 |
+
self.neg_weights, 0, 3, weight="weight", method="bellman-ford"
|
| 63 |
+
) == [0, 2, 3]
|
| 64 |
+
# confirm bad method rejection
|
| 65 |
+
pytest.raises(ValueError, nx.shortest_path, self.cycle, method="SPAM")
|
| 66 |
+
# confirm absent source rejection
|
| 67 |
+
pytest.raises(nx.NodeNotFound, nx.shortest_path, self.cycle, 8)
|
| 68 |
+
|
| 69 |
+
def test_shortest_path_target(self):
|
| 70 |
+
answer = {0: [0, 1], 1: [1], 2: [2, 1]}
|
| 71 |
+
sp = nx.shortest_path(nx.path_graph(3), target=1)
|
| 72 |
+
assert sp == answer
|
| 73 |
+
# with weights
|
| 74 |
+
sp = nx.shortest_path(nx.path_graph(3), target=1, weight="weight")
|
| 75 |
+
assert sp == answer
|
| 76 |
+
# weights and method specified
|
| 77 |
+
sp = nx.shortest_path(
|
| 78 |
+
nx.path_graph(3), target=1, weight="weight", method="dijkstra"
|
| 79 |
+
)
|
| 80 |
+
assert sp == answer
|
| 81 |
+
sp = nx.shortest_path(
|
| 82 |
+
nx.path_graph(3), target=1, weight="weight", method="bellman-ford"
|
| 83 |
+
)
|
| 84 |
+
assert sp == answer
|
| 85 |
+
|
| 86 |
+
def test_shortest_path_length(self):
|
| 87 |
+
assert nx.shortest_path_length(self.cycle, 0, 3) == 3
|
| 88 |
+
assert nx.shortest_path_length(self.grid, 1, 12) == 5
|
| 89 |
+
assert nx.shortest_path_length(self.directed_cycle, 0, 4) == 4
|
| 90 |
+
# now with weights
|
| 91 |
+
assert nx.shortest_path_length(self.cycle, 0, 3, weight="weight") == 3
|
| 92 |
+
assert nx.shortest_path_length(self.grid, 1, 12, weight="weight") == 5
|
| 93 |
+
assert nx.shortest_path_length(self.directed_cycle, 0, 4, weight="weight") == 4
|
| 94 |
+
# weights and method specified
|
| 95 |
+
assert (
|
| 96 |
+
nx.shortest_path_length(
|
| 97 |
+
self.cycle, 0, 3, weight="weight", method="dijkstra"
|
| 98 |
+
)
|
| 99 |
+
== 3
|
| 100 |
+
)
|
| 101 |
+
assert (
|
| 102 |
+
nx.shortest_path_length(
|
| 103 |
+
self.cycle, 0, 3, weight="weight", method="bellman-ford"
|
| 104 |
+
)
|
| 105 |
+
== 3
|
| 106 |
+
)
|
| 107 |
+
# confirm bad method rejection
|
| 108 |
+
pytest.raises(ValueError, nx.shortest_path_length, self.cycle, method="SPAM")
|
| 109 |
+
# confirm absent source rejection
|
| 110 |
+
pytest.raises(nx.NodeNotFound, nx.shortest_path_length, self.cycle, 8)
|
| 111 |
+
|
| 112 |
+
def test_shortest_path_length_target(self):
|
| 113 |
+
answer = {0: 1, 1: 0, 2: 1}
|
| 114 |
+
sp = dict(nx.shortest_path_length(nx.path_graph(3), target=1))
|
| 115 |
+
assert sp == answer
|
| 116 |
+
# with weights
|
| 117 |
+
sp = nx.shortest_path_length(nx.path_graph(3), target=1, weight="weight")
|
| 118 |
+
assert sp == answer
|
| 119 |
+
# weights and method specified
|
| 120 |
+
sp = nx.shortest_path_length(
|
| 121 |
+
nx.path_graph(3), target=1, weight="weight", method="dijkstra"
|
| 122 |
+
)
|
| 123 |
+
assert sp == answer
|
| 124 |
+
sp = nx.shortest_path_length(
|
| 125 |
+
nx.path_graph(3), target=1, weight="weight", method="bellman-ford"
|
| 126 |
+
)
|
| 127 |
+
assert sp == answer
|
| 128 |
+
|
| 129 |
+
def test_single_source_shortest_path(self):
|
| 130 |
+
p = nx.shortest_path(self.cycle, 0)
|
| 131 |
+
assert p[3] == [0, 1, 2, 3]
|
| 132 |
+
assert p == nx.single_source_shortest_path(self.cycle, 0)
|
| 133 |
+
p = nx.shortest_path(self.grid, 1)
|
| 134 |
+
validate_grid_path(4, 4, 1, 12, p[12])
|
| 135 |
+
# now with weights
|
| 136 |
+
p = nx.shortest_path(self.cycle, 0, weight="weight")
|
| 137 |
+
assert p[3] == [0, 1, 2, 3]
|
| 138 |
+
assert p == nx.single_source_dijkstra_path(self.cycle, 0)
|
| 139 |
+
p = nx.shortest_path(self.grid, 1, weight="weight")
|
| 140 |
+
validate_grid_path(4, 4, 1, 12, p[12])
|
| 141 |
+
# weights and method specified
|
| 142 |
+
p = nx.shortest_path(self.cycle, 0, method="dijkstra", weight="weight")
|
| 143 |
+
assert p[3] == [0, 1, 2, 3]
|
| 144 |
+
assert p == nx.single_source_shortest_path(self.cycle, 0)
|
| 145 |
+
p = nx.shortest_path(self.cycle, 0, method="bellman-ford", weight="weight")
|
| 146 |
+
assert p[3] == [0, 1, 2, 3]
|
| 147 |
+
assert p == nx.single_source_shortest_path(self.cycle, 0)
|
| 148 |
+
|
| 149 |
+
def test_single_source_shortest_path_length(self):
|
| 150 |
+
ans = dict(nx.shortest_path_length(self.cycle, 0))
|
| 151 |
+
assert ans == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
|
| 152 |
+
assert ans == dict(nx.single_source_shortest_path_length(self.cycle, 0))
|
| 153 |
+
ans = dict(nx.shortest_path_length(self.grid, 1))
|
| 154 |
+
assert ans[16] == 6
|
| 155 |
+
# now with weights
|
| 156 |
+
ans = dict(nx.shortest_path_length(self.cycle, 0, weight="weight"))
|
| 157 |
+
assert ans == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
|
| 158 |
+
assert ans == dict(nx.single_source_dijkstra_path_length(self.cycle, 0))
|
| 159 |
+
ans = dict(nx.shortest_path_length(self.grid, 1, weight="weight"))
|
| 160 |
+
assert ans[16] == 6
|
| 161 |
+
# weights and method specified
|
| 162 |
+
ans = dict(
|
| 163 |
+
nx.shortest_path_length(self.cycle, 0, weight="weight", method="dijkstra")
|
| 164 |
+
)
|
| 165 |
+
assert ans == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
|
| 166 |
+
assert ans == dict(nx.single_source_dijkstra_path_length(self.cycle, 0))
|
| 167 |
+
ans = dict(
|
| 168 |
+
nx.shortest_path_length(
|
| 169 |
+
self.cycle, 0, weight="weight", method="bellman-ford"
|
| 170 |
+
)
|
| 171 |
+
)
|
| 172 |
+
assert ans == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
|
| 173 |
+
assert ans == dict(nx.single_source_bellman_ford_path_length(self.cycle, 0))
|
| 174 |
+
|
| 175 |
+
def test_single_source_all_shortest_paths(self):
|
| 176 |
+
cycle_ans = {0: [[0]], 1: [[0, 1]], 2: [[0, 1, 2], [0, 3, 2]], 3: [[0, 3]]}
|
| 177 |
+
ans = dict(nx.single_source_all_shortest_paths(nx.cycle_graph(4), 0))
|
| 178 |
+
assert sorted(ans[2]) == cycle_ans[2]
|
| 179 |
+
ans = dict(nx.single_source_all_shortest_paths(self.grid, 1))
|
| 180 |
+
grid_ans = [
|
| 181 |
+
[1, 2, 3, 7, 11],
|
| 182 |
+
[1, 2, 6, 7, 11],
|
| 183 |
+
[1, 2, 6, 10, 11],
|
| 184 |
+
[1, 5, 6, 7, 11],
|
| 185 |
+
[1, 5, 6, 10, 11],
|
| 186 |
+
[1, 5, 9, 10, 11],
|
| 187 |
+
]
|
| 188 |
+
assert sorted(ans[11]) == grid_ans
|
| 189 |
+
ans = dict(
|
| 190 |
+
nx.single_source_all_shortest_paths(nx.cycle_graph(4), 0, weight="weight")
|
| 191 |
+
)
|
| 192 |
+
assert sorted(ans[2]) == cycle_ans[2]
|
| 193 |
+
ans = dict(
|
| 194 |
+
nx.single_source_all_shortest_paths(
|
| 195 |
+
nx.cycle_graph(4), 0, method="bellman-ford", weight="weight"
|
| 196 |
+
)
|
| 197 |
+
)
|
| 198 |
+
assert sorted(ans[2]) == cycle_ans[2]
|
| 199 |
+
ans = dict(nx.single_source_all_shortest_paths(self.grid, 1, weight="weight"))
|
| 200 |
+
assert sorted(ans[11]) == grid_ans
|
| 201 |
+
ans = dict(
|
| 202 |
+
nx.single_source_all_shortest_paths(
|
| 203 |
+
self.grid, 1, method="bellman-ford", weight="weight"
|
| 204 |
+
)
|
| 205 |
+
)
|
| 206 |
+
assert sorted(ans[11]) == grid_ans
|
| 207 |
+
G = nx.cycle_graph(4)
|
| 208 |
+
G.add_node(4)
|
| 209 |
+
ans = dict(nx.single_source_all_shortest_paths(G, 0))
|
| 210 |
+
assert sorted(ans[2]) == [[0, 1, 2], [0, 3, 2]]
|
| 211 |
+
ans = dict(nx.single_source_all_shortest_paths(G, 4))
|
| 212 |
+
assert sorted(ans[4]) == [[4]]
|
| 213 |
+
|
| 214 |
+
def test_all_pairs_shortest_path(self):
|
| 215 |
+
p = nx.shortest_path(self.cycle)
|
| 216 |
+
assert p[0][3] == [0, 1, 2, 3]
|
| 217 |
+
assert p == dict(nx.all_pairs_shortest_path(self.cycle))
|
| 218 |
+
p = nx.shortest_path(self.grid)
|
| 219 |
+
validate_grid_path(4, 4, 1, 12, p[1][12])
|
| 220 |
+
# now with weights
|
| 221 |
+
p = nx.shortest_path(self.cycle, weight="weight")
|
| 222 |
+
assert p[0][3] == [0, 1, 2, 3]
|
| 223 |
+
assert p == dict(nx.all_pairs_dijkstra_path(self.cycle))
|
| 224 |
+
p = nx.shortest_path(self.grid, weight="weight")
|
| 225 |
+
validate_grid_path(4, 4, 1, 12, p[1][12])
|
| 226 |
+
# weights and method specified
|
| 227 |
+
p = nx.shortest_path(self.cycle, weight="weight", method="dijkstra")
|
| 228 |
+
assert p[0][3] == [0, 1, 2, 3]
|
| 229 |
+
assert p == dict(nx.all_pairs_dijkstra_path(self.cycle))
|
| 230 |
+
p = nx.shortest_path(self.cycle, weight="weight", method="bellman-ford")
|
| 231 |
+
assert p[0][3] == [0, 1, 2, 3]
|
| 232 |
+
assert p == dict(nx.all_pairs_bellman_ford_path(self.cycle))
|
| 233 |
+
|
| 234 |
+
def test_all_pairs_shortest_path_length(self):
|
| 235 |
+
ans = dict(nx.shortest_path_length(self.cycle))
|
| 236 |
+
assert ans[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
|
| 237 |
+
assert ans == dict(nx.all_pairs_shortest_path_length(self.cycle))
|
| 238 |
+
ans = dict(nx.shortest_path_length(self.grid))
|
| 239 |
+
assert ans[1][16] == 6
|
| 240 |
+
# now with weights
|
| 241 |
+
ans = dict(nx.shortest_path_length(self.cycle, weight="weight"))
|
| 242 |
+
assert ans[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
|
| 243 |
+
assert ans == dict(nx.all_pairs_dijkstra_path_length(self.cycle))
|
| 244 |
+
ans = dict(nx.shortest_path_length(self.grid, weight="weight"))
|
| 245 |
+
assert ans[1][16] == 6
|
| 246 |
+
# weights and method specified
|
| 247 |
+
ans = dict(
|
| 248 |
+
nx.shortest_path_length(self.cycle, weight="weight", method="dijkstra")
|
| 249 |
+
)
|
| 250 |
+
assert ans[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
|
| 251 |
+
assert ans == dict(nx.all_pairs_dijkstra_path_length(self.cycle))
|
| 252 |
+
ans = dict(
|
| 253 |
+
nx.shortest_path_length(self.cycle, weight="weight", method="bellman-ford")
|
| 254 |
+
)
|
| 255 |
+
assert ans[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
|
| 256 |
+
assert ans == dict(nx.all_pairs_bellman_ford_path_length(self.cycle))
|
| 257 |
+
|
| 258 |
+
def test_all_pairs_all_shortest_paths(self):
|
| 259 |
+
ans = dict(nx.all_pairs_all_shortest_paths(nx.cycle_graph(4)))
|
| 260 |
+
assert sorted(ans[1][3]) == [[1, 0, 3], [1, 2, 3]]
|
| 261 |
+
ans = dict(nx.all_pairs_all_shortest_paths(nx.cycle_graph(4)), weight="weight")
|
| 262 |
+
assert sorted(ans[1][3]) == [[1, 0, 3], [1, 2, 3]]
|
| 263 |
+
ans = dict(
|
| 264 |
+
nx.all_pairs_all_shortest_paths(nx.cycle_graph(4)),
|
| 265 |
+
method="bellman-ford",
|
| 266 |
+
weight="weight",
|
| 267 |
+
)
|
| 268 |
+
assert sorted(ans[1][3]) == [[1, 0, 3], [1, 2, 3]]
|
| 269 |
+
G = nx.cycle_graph(4)
|
| 270 |
+
G.add_node(4)
|
| 271 |
+
ans = dict(nx.all_pairs_all_shortest_paths(G))
|
| 272 |
+
assert sorted(ans[4][4]) == [[4]]
|
| 273 |
+
|
| 274 |
+
def test_has_path(self):
|
| 275 |
+
G = nx.Graph()
|
| 276 |
+
nx.add_path(G, range(3))
|
| 277 |
+
nx.add_path(G, range(3, 5))
|
| 278 |
+
assert nx.has_path(G, 0, 2)
|
| 279 |
+
assert not nx.has_path(G, 0, 4)
|
| 280 |
+
|
| 281 |
+
def test_all_shortest_paths(self):
|
| 282 |
+
G = nx.Graph()
|
| 283 |
+
nx.add_path(G, [0, 1, 2, 3])
|
| 284 |
+
nx.add_path(G, [0, 10, 20, 3])
|
| 285 |
+
assert [[0, 1, 2, 3], [0, 10, 20, 3]] == sorted(nx.all_shortest_paths(G, 0, 3))
|
| 286 |
+
# with weights
|
| 287 |
+
G = nx.Graph()
|
| 288 |
+
nx.add_path(G, [0, 1, 2, 3])
|
| 289 |
+
nx.add_path(G, [0, 10, 20, 3])
|
| 290 |
+
assert [[0, 1, 2, 3], [0, 10, 20, 3]] == sorted(
|
| 291 |
+
nx.all_shortest_paths(G, 0, 3, weight="weight")
|
| 292 |
+
)
|
| 293 |
+
# weights and method specified
|
| 294 |
+
G = nx.Graph()
|
| 295 |
+
nx.add_path(G, [0, 1, 2, 3])
|
| 296 |
+
nx.add_path(G, [0, 10, 20, 3])
|
| 297 |
+
assert [[0, 1, 2, 3], [0, 10, 20, 3]] == sorted(
|
| 298 |
+
nx.all_shortest_paths(G, 0, 3, weight="weight", method="dijkstra")
|
| 299 |
+
)
|
| 300 |
+
G = nx.Graph()
|
| 301 |
+
nx.add_path(G, [0, 1, 2, 3])
|
| 302 |
+
nx.add_path(G, [0, 10, 20, 3])
|
| 303 |
+
assert [[0, 1, 2, 3], [0, 10, 20, 3]] == sorted(
|
| 304 |
+
nx.all_shortest_paths(G, 0, 3, weight="weight", method="bellman-ford")
|
| 305 |
+
)
|
| 306 |
+
|
| 307 |
+
def test_all_shortest_paths_raise(self):
|
| 308 |
+
with pytest.raises(nx.NetworkXNoPath):
|
| 309 |
+
G = nx.path_graph(4)
|
| 310 |
+
G.add_node(4)
|
| 311 |
+
list(nx.all_shortest_paths(G, 0, 4))
|
| 312 |
+
|
| 313 |
+
def test_bad_method(self):
|
| 314 |
+
with pytest.raises(ValueError):
|
| 315 |
+
G = nx.path_graph(2)
|
| 316 |
+
list(nx.all_shortest_paths(G, 0, 1, weight="weight", method="SPAM"))
|
| 317 |
+
|
| 318 |
+
def test_single_source_all_shortest_paths_bad_method(self):
|
| 319 |
+
with pytest.raises(ValueError):
|
| 320 |
+
G = nx.path_graph(2)
|
| 321 |
+
dict(
|
| 322 |
+
nx.single_source_all_shortest_paths(
|
| 323 |
+
G, 0, weight="weight", method="SPAM"
|
| 324 |
+
)
|
| 325 |
+
)
|
| 326 |
+
|
| 327 |
+
def test_all_shortest_paths_zero_weight_edge(self):
|
| 328 |
+
g = nx.Graph()
|
| 329 |
+
nx.add_path(g, [0, 1, 3])
|
| 330 |
+
nx.add_path(g, [0, 1, 2, 3])
|
| 331 |
+
g.edges[1, 2]["weight"] = 0
|
| 332 |
+
paths30d = list(
|
| 333 |
+
nx.all_shortest_paths(g, 3, 0, weight="weight", method="dijkstra")
|
| 334 |
+
)
|
| 335 |
+
paths03d = list(
|
| 336 |
+
nx.all_shortest_paths(g, 0, 3, weight="weight", method="dijkstra")
|
| 337 |
+
)
|
| 338 |
+
paths30b = list(
|
| 339 |
+
nx.all_shortest_paths(g, 3, 0, weight="weight", method="bellman-ford")
|
| 340 |
+
)
|
| 341 |
+
paths03b = list(
|
| 342 |
+
nx.all_shortest_paths(g, 0, 3, weight="weight", method="bellman-ford")
|
| 343 |
+
)
|
| 344 |
+
assert sorted(paths03d) == sorted(p[::-1] for p in paths30d)
|
| 345 |
+
assert sorted(paths03d) == sorted(p[::-1] for p in paths30b)
|
| 346 |
+
assert sorted(paths03b) == sorted(p[::-1] for p in paths30b)
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
class TestAverageShortestPathLength:
|
| 350 |
+
def test_cycle_graph(self):
|
| 351 |
+
ans = nx.average_shortest_path_length(nx.cycle_graph(7))
|
| 352 |
+
assert ans == pytest.approx(2, abs=1e-7)
|
| 353 |
+
|
| 354 |
+
def test_path_graph(self):
|
| 355 |
+
ans = nx.average_shortest_path_length(nx.path_graph(5))
|
| 356 |
+
assert ans == pytest.approx(2, abs=1e-7)
|
| 357 |
+
|
| 358 |
+
def test_weighted(self):
|
| 359 |
+
G = nx.Graph()
|
| 360 |
+
nx.add_cycle(G, range(7), weight=2)
|
| 361 |
+
ans = nx.average_shortest_path_length(G, weight="weight")
|
| 362 |
+
assert ans == pytest.approx(4, abs=1e-7)
|
| 363 |
+
G = nx.Graph()
|
| 364 |
+
nx.add_path(G, range(5), weight=2)
|
| 365 |
+
ans = nx.average_shortest_path_length(G, weight="weight")
|
| 366 |
+
assert ans == pytest.approx(4, abs=1e-7)
|
| 367 |
+
|
| 368 |
+
def test_specified_methods(self):
|
| 369 |
+
G = nx.Graph()
|
| 370 |
+
nx.add_cycle(G, range(7), weight=2)
|
| 371 |
+
ans = nx.average_shortest_path_length(G, weight="weight", method="dijkstra")
|
| 372 |
+
assert ans == pytest.approx(4, abs=1e-7)
|
| 373 |
+
ans = nx.average_shortest_path_length(G, weight="weight", method="bellman-ford")
|
| 374 |
+
assert ans == pytest.approx(4, abs=1e-7)
|
| 375 |
+
ans = nx.average_shortest_path_length(
|
| 376 |
+
G, weight="weight", method="floyd-warshall"
|
| 377 |
+
)
|
| 378 |
+
assert ans == pytest.approx(4, abs=1e-7)
|
| 379 |
+
|
| 380 |
+
G = nx.Graph()
|
| 381 |
+
nx.add_path(G, range(5), weight=2)
|
| 382 |
+
ans = nx.average_shortest_path_length(G, weight="weight", method="dijkstra")
|
| 383 |
+
assert ans == pytest.approx(4, abs=1e-7)
|
| 384 |
+
ans = nx.average_shortest_path_length(G, weight="weight", method="bellman-ford")
|
| 385 |
+
assert ans == pytest.approx(4, abs=1e-7)
|
| 386 |
+
ans = nx.average_shortest_path_length(
|
| 387 |
+
G, weight="weight", method="floyd-warshall"
|
| 388 |
+
)
|
| 389 |
+
assert ans == pytest.approx(4, abs=1e-7)
|
| 390 |
+
|
| 391 |
+
def test_directed_not_strongly_connected(self):
|
| 392 |
+
G = nx.DiGraph([(0, 1)])
|
| 393 |
+
with pytest.raises(nx.NetworkXError, match="Graph is not strongly connected"):
|
| 394 |
+
nx.average_shortest_path_length(G)
|
| 395 |
+
|
| 396 |
+
def test_undirected_not_connected(self):
|
| 397 |
+
g = nx.Graph()
|
| 398 |
+
g.add_nodes_from(range(3))
|
| 399 |
+
g.add_edge(0, 1)
|
| 400 |
+
pytest.raises(nx.NetworkXError, nx.average_shortest_path_length, g)
|
| 401 |
+
|
| 402 |
+
def test_trivial_graph(self):
|
| 403 |
+
"""Tests that the trivial graph has average path length zero,
|
| 404 |
+
since there is exactly one path of length zero in the trivial
|
| 405 |
+
graph.
|
| 406 |
+
|
| 407 |
+
For more information, see issue #1960.
|
| 408 |
+
|
| 409 |
+
"""
|
| 410 |
+
G = nx.trivial_graph()
|
| 411 |
+
assert nx.average_shortest_path_length(G) == 0
|
| 412 |
+
|
| 413 |
+
def test_null_graph(self):
|
| 414 |
+
with pytest.raises(nx.NetworkXPointlessConcept):
|
| 415 |
+
nx.average_shortest_path_length(nx.null_graph())
|
| 416 |
+
|
| 417 |
+
def test_bad_method(self):
|
| 418 |
+
with pytest.raises(ValueError):
|
| 419 |
+
G = nx.path_graph(2)
|
| 420 |
+
nx.average_shortest_path_length(G, weight="weight", method="SPAM")
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
class TestAverageShortestPathLengthNumpy:
|
| 424 |
+
@classmethod
|
| 425 |
+
def setup_class(cls):
|
| 426 |
+
global np
|
| 427 |
+
import pytest
|
| 428 |
+
|
| 429 |
+
np = pytest.importorskip("numpy")
|
| 430 |
+
|
| 431 |
+
def test_specified_methods_numpy(self):
|
| 432 |
+
G = nx.Graph()
|
| 433 |
+
nx.add_cycle(G, range(7), weight=2)
|
| 434 |
+
ans = nx.average_shortest_path_length(
|
| 435 |
+
G, weight="weight", method="floyd-warshall-numpy"
|
| 436 |
+
)
|
| 437 |
+
np.testing.assert_almost_equal(ans, 4)
|
| 438 |
+
|
| 439 |
+
G = nx.Graph()
|
| 440 |
+
nx.add_path(G, range(5), weight=2)
|
| 441 |
+
ans = nx.average_shortest_path_length(
|
| 442 |
+
G, weight="weight", method="floyd-warshall-numpy"
|
| 443 |
+
)
|
| 444 |
+
np.testing.assert_almost_equal(ans, 4)
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/similarity.py
ADDED
|
@@ -0,0 +1,1710 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" Functions measuring similarity using graph edit distance.
|
| 2 |
+
|
| 3 |
+
The graph edit distance is the number of edge/node changes needed
|
| 4 |
+
to make two graphs isomorphic.
|
| 5 |
+
|
| 6 |
+
The default algorithm/implementation is sub-optimal for some graphs.
|
| 7 |
+
The problem of finding the exact Graph Edit Distance (GED) is NP-hard
|
| 8 |
+
so it is often slow. If the simple interface `graph_edit_distance`
|
| 9 |
+
takes too long for your graph, try `optimize_graph_edit_distance`
|
| 10 |
+
and/or `optimize_edit_paths`.
|
| 11 |
+
|
| 12 |
+
At the same time, I encourage capable people to investigate
|
| 13 |
+
alternative GED algorithms, in order to improve the choices available.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
import math
|
| 17 |
+
import time
|
| 18 |
+
import warnings
|
| 19 |
+
from dataclasses import dataclass
|
| 20 |
+
from itertools import product
|
| 21 |
+
|
| 22 |
+
import networkx as nx
|
| 23 |
+
|
| 24 |
+
__all__ = [
|
| 25 |
+
"graph_edit_distance",
|
| 26 |
+
"optimal_edit_paths",
|
| 27 |
+
"optimize_graph_edit_distance",
|
| 28 |
+
"optimize_edit_paths",
|
| 29 |
+
"simrank_similarity",
|
| 30 |
+
"panther_similarity",
|
| 31 |
+
"generate_random_paths",
|
| 32 |
+
]
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def debug_print(*args, **kwargs):
|
| 36 |
+
print(*args, **kwargs)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
@nx._dispatch(
|
| 40 |
+
graphs={"G1": 0, "G2": 1}, preserve_edge_attrs=True, preserve_node_attrs=True
|
| 41 |
+
)
|
| 42 |
+
def graph_edit_distance(
|
| 43 |
+
G1,
|
| 44 |
+
G2,
|
| 45 |
+
node_match=None,
|
| 46 |
+
edge_match=None,
|
| 47 |
+
node_subst_cost=None,
|
| 48 |
+
node_del_cost=None,
|
| 49 |
+
node_ins_cost=None,
|
| 50 |
+
edge_subst_cost=None,
|
| 51 |
+
edge_del_cost=None,
|
| 52 |
+
edge_ins_cost=None,
|
| 53 |
+
roots=None,
|
| 54 |
+
upper_bound=None,
|
| 55 |
+
timeout=None,
|
| 56 |
+
):
|
| 57 |
+
"""Returns GED (graph edit distance) between graphs G1 and G2.
|
| 58 |
+
|
| 59 |
+
Graph edit distance is a graph similarity measure analogous to
|
| 60 |
+
Levenshtein distance for strings. It is defined as minimum cost
|
| 61 |
+
of edit path (sequence of node and edge edit operations)
|
| 62 |
+
transforming graph G1 to graph isomorphic to G2.
|
| 63 |
+
|
| 64 |
+
Parameters
|
| 65 |
+
----------
|
| 66 |
+
G1, G2: graphs
|
| 67 |
+
The two graphs G1 and G2 must be of the same type.
|
| 68 |
+
|
| 69 |
+
node_match : callable
|
| 70 |
+
A function that returns True if node n1 in G1 and n2 in G2
|
| 71 |
+
should be considered equal during matching.
|
| 72 |
+
|
| 73 |
+
The function will be called like
|
| 74 |
+
|
| 75 |
+
node_match(G1.nodes[n1], G2.nodes[n2]).
|
| 76 |
+
|
| 77 |
+
That is, the function will receive the node attribute
|
| 78 |
+
dictionaries for n1 and n2 as inputs.
|
| 79 |
+
|
| 80 |
+
Ignored if node_subst_cost is specified. If neither
|
| 81 |
+
node_match nor node_subst_cost are specified then node
|
| 82 |
+
attributes are not considered.
|
| 83 |
+
|
| 84 |
+
edge_match : callable
|
| 85 |
+
A function that returns True if the edge attribute dictionaries
|
| 86 |
+
for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should
|
| 87 |
+
be considered equal during matching.
|
| 88 |
+
|
| 89 |
+
The function will be called like
|
| 90 |
+
|
| 91 |
+
edge_match(G1[u1][v1], G2[u2][v2]).
|
| 92 |
+
|
| 93 |
+
That is, the function will receive the edge attribute
|
| 94 |
+
dictionaries of the edges under consideration.
|
| 95 |
+
|
| 96 |
+
Ignored if edge_subst_cost is specified. If neither
|
| 97 |
+
edge_match nor edge_subst_cost are specified then edge
|
| 98 |
+
attributes are not considered.
|
| 99 |
+
|
| 100 |
+
node_subst_cost, node_del_cost, node_ins_cost : callable
|
| 101 |
+
Functions that return the costs of node substitution, node
|
| 102 |
+
deletion, and node insertion, respectively.
|
| 103 |
+
|
| 104 |
+
The functions will be called like
|
| 105 |
+
|
| 106 |
+
node_subst_cost(G1.nodes[n1], G2.nodes[n2]),
|
| 107 |
+
node_del_cost(G1.nodes[n1]),
|
| 108 |
+
node_ins_cost(G2.nodes[n2]).
|
| 109 |
+
|
| 110 |
+
That is, the functions will receive the node attribute
|
| 111 |
+
dictionaries as inputs. The functions are expected to return
|
| 112 |
+
positive numeric values.
|
| 113 |
+
|
| 114 |
+
Function node_subst_cost overrides node_match if specified.
|
| 115 |
+
If neither node_match nor node_subst_cost are specified then
|
| 116 |
+
default node substitution cost of 0 is used (node attributes
|
| 117 |
+
are not considered during matching).
|
| 118 |
+
|
| 119 |
+
If node_del_cost is not specified then default node deletion
|
| 120 |
+
cost of 1 is used. If node_ins_cost is not specified then
|
| 121 |
+
default node insertion cost of 1 is used.
|
| 122 |
+
|
| 123 |
+
edge_subst_cost, edge_del_cost, edge_ins_cost : callable
|
| 124 |
+
Functions that return the costs of edge substitution, edge
|
| 125 |
+
deletion, and edge insertion, respectively.
|
| 126 |
+
|
| 127 |
+
The functions will be called like
|
| 128 |
+
|
| 129 |
+
edge_subst_cost(G1[u1][v1], G2[u2][v2]),
|
| 130 |
+
edge_del_cost(G1[u1][v1]),
|
| 131 |
+
edge_ins_cost(G2[u2][v2]).
|
| 132 |
+
|
| 133 |
+
That is, the functions will receive the edge attribute
|
| 134 |
+
dictionaries as inputs. The functions are expected to return
|
| 135 |
+
positive numeric values.
|
| 136 |
+
|
| 137 |
+
Function edge_subst_cost overrides edge_match if specified.
|
| 138 |
+
If neither edge_match nor edge_subst_cost are specified then
|
| 139 |
+
default edge substitution cost of 0 is used (edge attributes
|
| 140 |
+
are not considered during matching).
|
| 141 |
+
|
| 142 |
+
If edge_del_cost is not specified then default edge deletion
|
| 143 |
+
cost of 1 is used. If edge_ins_cost is not specified then
|
| 144 |
+
default edge insertion cost of 1 is used.
|
| 145 |
+
|
| 146 |
+
roots : 2-tuple
|
| 147 |
+
Tuple where first element is a node in G1 and the second
|
| 148 |
+
is a node in G2.
|
| 149 |
+
These nodes are forced to be matched in the comparison to
|
| 150 |
+
allow comparison between rooted graphs.
|
| 151 |
+
|
| 152 |
+
upper_bound : numeric
|
| 153 |
+
Maximum edit distance to consider. Return None if no edit
|
| 154 |
+
distance under or equal to upper_bound exists.
|
| 155 |
+
|
| 156 |
+
timeout : numeric
|
| 157 |
+
Maximum number of seconds to execute.
|
| 158 |
+
After timeout is met, the current best GED is returned.
|
| 159 |
+
|
| 160 |
+
Examples
|
| 161 |
+
--------
|
| 162 |
+
>>> G1 = nx.cycle_graph(6)
|
| 163 |
+
>>> G2 = nx.wheel_graph(7)
|
| 164 |
+
>>> nx.graph_edit_distance(G1, G2)
|
| 165 |
+
7.0
|
| 166 |
+
|
| 167 |
+
>>> G1 = nx.star_graph(5)
|
| 168 |
+
>>> G2 = nx.star_graph(5)
|
| 169 |
+
>>> nx.graph_edit_distance(G1, G2, roots=(0, 0))
|
| 170 |
+
0.0
|
| 171 |
+
>>> nx.graph_edit_distance(G1, G2, roots=(1, 0))
|
| 172 |
+
8.0
|
| 173 |
+
|
| 174 |
+
See Also
|
| 175 |
+
--------
|
| 176 |
+
optimal_edit_paths, optimize_graph_edit_distance,
|
| 177 |
+
|
| 178 |
+
is_isomorphic: test for graph edit distance of 0
|
| 179 |
+
|
| 180 |
+
References
|
| 181 |
+
----------
|
| 182 |
+
.. [1] Zeina Abu-Aisheh, Romain Raveaux, Jean-Yves Ramel, Patrick
|
| 183 |
+
Martineau. An Exact Graph Edit Distance Algorithm for Solving
|
| 184 |
+
Pattern Recognition Problems. 4th International Conference on
|
| 185 |
+
Pattern Recognition Applications and Methods 2015, Jan 2015,
|
| 186 |
+
Lisbon, Portugal. 2015,
|
| 187 |
+
<10.5220/0005209202710278>. <hal-01168816>
|
| 188 |
+
https://hal.archives-ouvertes.fr/hal-01168816
|
| 189 |
+
|
| 190 |
+
"""
|
| 191 |
+
bestcost = None
|
| 192 |
+
for _, _, cost in optimize_edit_paths(
|
| 193 |
+
G1,
|
| 194 |
+
G2,
|
| 195 |
+
node_match,
|
| 196 |
+
edge_match,
|
| 197 |
+
node_subst_cost,
|
| 198 |
+
node_del_cost,
|
| 199 |
+
node_ins_cost,
|
| 200 |
+
edge_subst_cost,
|
| 201 |
+
edge_del_cost,
|
| 202 |
+
edge_ins_cost,
|
| 203 |
+
upper_bound,
|
| 204 |
+
True,
|
| 205 |
+
roots,
|
| 206 |
+
timeout,
|
| 207 |
+
):
|
| 208 |
+
# assert bestcost is None or cost < bestcost
|
| 209 |
+
bestcost = cost
|
| 210 |
+
return bestcost
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
@nx._dispatch(graphs={"G1": 0, "G2": 1})
|
| 214 |
+
def optimal_edit_paths(
|
| 215 |
+
G1,
|
| 216 |
+
G2,
|
| 217 |
+
node_match=None,
|
| 218 |
+
edge_match=None,
|
| 219 |
+
node_subst_cost=None,
|
| 220 |
+
node_del_cost=None,
|
| 221 |
+
node_ins_cost=None,
|
| 222 |
+
edge_subst_cost=None,
|
| 223 |
+
edge_del_cost=None,
|
| 224 |
+
edge_ins_cost=None,
|
| 225 |
+
upper_bound=None,
|
| 226 |
+
):
|
| 227 |
+
"""Returns all minimum-cost edit paths transforming G1 to G2.
|
| 228 |
+
|
| 229 |
+
Graph edit path is a sequence of node and edge edit operations
|
| 230 |
+
transforming graph G1 to graph isomorphic to G2. Edit operations
|
| 231 |
+
include substitutions, deletions, and insertions.
|
| 232 |
+
|
| 233 |
+
Parameters
|
| 234 |
+
----------
|
| 235 |
+
G1, G2: graphs
|
| 236 |
+
The two graphs G1 and G2 must be of the same type.
|
| 237 |
+
|
| 238 |
+
node_match : callable
|
| 239 |
+
A function that returns True if node n1 in G1 and n2 in G2
|
| 240 |
+
should be considered equal during matching.
|
| 241 |
+
|
| 242 |
+
The function will be called like
|
| 243 |
+
|
| 244 |
+
node_match(G1.nodes[n1], G2.nodes[n2]).
|
| 245 |
+
|
| 246 |
+
That is, the function will receive the node attribute
|
| 247 |
+
dictionaries for n1 and n2 as inputs.
|
| 248 |
+
|
| 249 |
+
Ignored if node_subst_cost is specified. If neither
|
| 250 |
+
node_match nor node_subst_cost are specified then node
|
| 251 |
+
attributes are not considered.
|
| 252 |
+
|
| 253 |
+
edge_match : callable
|
| 254 |
+
A function that returns True if the edge attribute dictionaries
|
| 255 |
+
for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should
|
| 256 |
+
be considered equal during matching.
|
| 257 |
+
|
| 258 |
+
The function will be called like
|
| 259 |
+
|
| 260 |
+
edge_match(G1[u1][v1], G2[u2][v2]).
|
| 261 |
+
|
| 262 |
+
That is, the function will receive the edge attribute
|
| 263 |
+
dictionaries of the edges under consideration.
|
| 264 |
+
|
| 265 |
+
Ignored if edge_subst_cost is specified. If neither
|
| 266 |
+
edge_match nor edge_subst_cost are specified then edge
|
| 267 |
+
attributes are not considered.
|
| 268 |
+
|
| 269 |
+
node_subst_cost, node_del_cost, node_ins_cost : callable
|
| 270 |
+
Functions that return the costs of node substitution, node
|
| 271 |
+
deletion, and node insertion, respectively.
|
| 272 |
+
|
| 273 |
+
The functions will be called like
|
| 274 |
+
|
| 275 |
+
node_subst_cost(G1.nodes[n1], G2.nodes[n2]),
|
| 276 |
+
node_del_cost(G1.nodes[n1]),
|
| 277 |
+
node_ins_cost(G2.nodes[n2]).
|
| 278 |
+
|
| 279 |
+
That is, the functions will receive the node attribute
|
| 280 |
+
dictionaries as inputs. The functions are expected to return
|
| 281 |
+
positive numeric values.
|
| 282 |
+
|
| 283 |
+
Function node_subst_cost overrides node_match if specified.
|
| 284 |
+
If neither node_match nor node_subst_cost are specified then
|
| 285 |
+
default node substitution cost of 0 is used (node attributes
|
| 286 |
+
are not considered during matching).
|
| 287 |
+
|
| 288 |
+
If node_del_cost is not specified then default node deletion
|
| 289 |
+
cost of 1 is used. If node_ins_cost is not specified then
|
| 290 |
+
default node insertion cost of 1 is used.
|
| 291 |
+
|
| 292 |
+
edge_subst_cost, edge_del_cost, edge_ins_cost : callable
|
| 293 |
+
Functions that return the costs of edge substitution, edge
|
| 294 |
+
deletion, and edge insertion, respectively.
|
| 295 |
+
|
| 296 |
+
The functions will be called like
|
| 297 |
+
|
| 298 |
+
edge_subst_cost(G1[u1][v1], G2[u2][v2]),
|
| 299 |
+
edge_del_cost(G1[u1][v1]),
|
| 300 |
+
edge_ins_cost(G2[u2][v2]).
|
| 301 |
+
|
| 302 |
+
That is, the functions will receive the edge attribute
|
| 303 |
+
dictionaries as inputs. The functions are expected to return
|
| 304 |
+
positive numeric values.
|
| 305 |
+
|
| 306 |
+
Function edge_subst_cost overrides edge_match if specified.
|
| 307 |
+
If neither edge_match nor edge_subst_cost are specified then
|
| 308 |
+
default edge substitution cost of 0 is used (edge attributes
|
| 309 |
+
are not considered during matching).
|
| 310 |
+
|
| 311 |
+
If edge_del_cost is not specified then default edge deletion
|
| 312 |
+
cost of 1 is used. If edge_ins_cost is not specified then
|
| 313 |
+
default edge insertion cost of 1 is used.
|
| 314 |
+
|
| 315 |
+
upper_bound : numeric
|
| 316 |
+
Maximum edit distance to consider.
|
| 317 |
+
|
| 318 |
+
Returns
|
| 319 |
+
-------
|
| 320 |
+
edit_paths : list of tuples (node_edit_path, edge_edit_path)
|
| 321 |
+
node_edit_path : list of tuples (u, v)
|
| 322 |
+
edge_edit_path : list of tuples ((u1, v1), (u2, v2))
|
| 323 |
+
|
| 324 |
+
cost : numeric
|
| 325 |
+
Optimal edit path cost (graph edit distance).
|
| 326 |
+
|
| 327 |
+
Examples
|
| 328 |
+
--------
|
| 329 |
+
>>> G1 = nx.cycle_graph(4)
|
| 330 |
+
>>> G2 = nx.wheel_graph(5)
|
| 331 |
+
>>> paths, cost = nx.optimal_edit_paths(G1, G2)
|
| 332 |
+
>>> len(paths)
|
| 333 |
+
40
|
| 334 |
+
>>> cost
|
| 335 |
+
5.0
|
| 336 |
+
|
| 337 |
+
See Also
|
| 338 |
+
--------
|
| 339 |
+
graph_edit_distance, optimize_edit_paths
|
| 340 |
+
|
| 341 |
+
References
|
| 342 |
+
----------
|
| 343 |
+
.. [1] Zeina Abu-Aisheh, Romain Raveaux, Jean-Yves Ramel, Patrick
|
| 344 |
+
Martineau. An Exact Graph Edit Distance Algorithm for Solving
|
| 345 |
+
Pattern Recognition Problems. 4th International Conference on
|
| 346 |
+
Pattern Recognition Applications and Methods 2015, Jan 2015,
|
| 347 |
+
Lisbon, Portugal. 2015,
|
| 348 |
+
<10.5220/0005209202710278>. <hal-01168816>
|
| 349 |
+
https://hal.archives-ouvertes.fr/hal-01168816
|
| 350 |
+
|
| 351 |
+
"""
|
| 352 |
+
paths = []
|
| 353 |
+
bestcost = None
|
| 354 |
+
for vertex_path, edge_path, cost in optimize_edit_paths(
|
| 355 |
+
G1,
|
| 356 |
+
G2,
|
| 357 |
+
node_match,
|
| 358 |
+
edge_match,
|
| 359 |
+
node_subst_cost,
|
| 360 |
+
node_del_cost,
|
| 361 |
+
node_ins_cost,
|
| 362 |
+
edge_subst_cost,
|
| 363 |
+
edge_del_cost,
|
| 364 |
+
edge_ins_cost,
|
| 365 |
+
upper_bound,
|
| 366 |
+
False,
|
| 367 |
+
):
|
| 368 |
+
# assert bestcost is None or cost <= bestcost
|
| 369 |
+
if bestcost is not None and cost < bestcost:
|
| 370 |
+
paths = []
|
| 371 |
+
paths.append((vertex_path, edge_path))
|
| 372 |
+
bestcost = cost
|
| 373 |
+
return paths, bestcost
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
@nx._dispatch(graphs={"G1": 0, "G2": 1})
|
| 377 |
+
def optimize_graph_edit_distance(
|
| 378 |
+
G1,
|
| 379 |
+
G2,
|
| 380 |
+
node_match=None,
|
| 381 |
+
edge_match=None,
|
| 382 |
+
node_subst_cost=None,
|
| 383 |
+
node_del_cost=None,
|
| 384 |
+
node_ins_cost=None,
|
| 385 |
+
edge_subst_cost=None,
|
| 386 |
+
edge_del_cost=None,
|
| 387 |
+
edge_ins_cost=None,
|
| 388 |
+
upper_bound=None,
|
| 389 |
+
):
|
| 390 |
+
"""Returns consecutive approximations of GED (graph edit distance)
|
| 391 |
+
between graphs G1 and G2.
|
| 392 |
+
|
| 393 |
+
Graph edit distance is a graph similarity measure analogous to
|
| 394 |
+
Levenshtein distance for strings. It is defined as minimum cost
|
| 395 |
+
of edit path (sequence of node and edge edit operations)
|
| 396 |
+
transforming graph G1 to graph isomorphic to G2.
|
| 397 |
+
|
| 398 |
+
Parameters
|
| 399 |
+
----------
|
| 400 |
+
G1, G2: graphs
|
| 401 |
+
The two graphs G1 and G2 must be of the same type.
|
| 402 |
+
|
| 403 |
+
node_match : callable
|
| 404 |
+
A function that returns True if node n1 in G1 and n2 in G2
|
| 405 |
+
should be considered equal during matching.
|
| 406 |
+
|
| 407 |
+
The function will be called like
|
| 408 |
+
|
| 409 |
+
node_match(G1.nodes[n1], G2.nodes[n2]).
|
| 410 |
+
|
| 411 |
+
That is, the function will receive the node attribute
|
| 412 |
+
dictionaries for n1 and n2 as inputs.
|
| 413 |
+
|
| 414 |
+
Ignored if node_subst_cost is specified. If neither
|
| 415 |
+
node_match nor node_subst_cost are specified then node
|
| 416 |
+
attributes are not considered.
|
| 417 |
+
|
| 418 |
+
edge_match : callable
|
| 419 |
+
A function that returns True if the edge attribute dictionaries
|
| 420 |
+
for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should
|
| 421 |
+
be considered equal during matching.
|
| 422 |
+
|
| 423 |
+
The function will be called like
|
| 424 |
+
|
| 425 |
+
edge_match(G1[u1][v1], G2[u2][v2]).
|
| 426 |
+
|
| 427 |
+
That is, the function will receive the edge attribute
|
| 428 |
+
dictionaries of the edges under consideration.
|
| 429 |
+
|
| 430 |
+
Ignored if edge_subst_cost is specified. If neither
|
| 431 |
+
edge_match nor edge_subst_cost are specified then edge
|
| 432 |
+
attributes are not considered.
|
| 433 |
+
|
| 434 |
+
node_subst_cost, node_del_cost, node_ins_cost : callable
|
| 435 |
+
Functions that return the costs of node substitution, node
|
| 436 |
+
deletion, and node insertion, respectively.
|
| 437 |
+
|
| 438 |
+
The functions will be called like
|
| 439 |
+
|
| 440 |
+
node_subst_cost(G1.nodes[n1], G2.nodes[n2]),
|
| 441 |
+
node_del_cost(G1.nodes[n1]),
|
| 442 |
+
node_ins_cost(G2.nodes[n2]).
|
| 443 |
+
|
| 444 |
+
That is, the functions will receive the node attribute
|
| 445 |
+
dictionaries as inputs. The functions are expected to return
|
| 446 |
+
positive numeric values.
|
| 447 |
+
|
| 448 |
+
Function node_subst_cost overrides node_match if specified.
|
| 449 |
+
If neither node_match nor node_subst_cost are specified then
|
| 450 |
+
default node substitution cost of 0 is used (node attributes
|
| 451 |
+
are not considered during matching).
|
| 452 |
+
|
| 453 |
+
If node_del_cost is not specified then default node deletion
|
| 454 |
+
cost of 1 is used. If node_ins_cost is not specified then
|
| 455 |
+
default node insertion cost of 1 is used.
|
| 456 |
+
|
| 457 |
+
edge_subst_cost, edge_del_cost, edge_ins_cost : callable
|
| 458 |
+
Functions that return the costs of edge substitution, edge
|
| 459 |
+
deletion, and edge insertion, respectively.
|
| 460 |
+
|
| 461 |
+
The functions will be called like
|
| 462 |
+
|
| 463 |
+
edge_subst_cost(G1[u1][v1], G2[u2][v2]),
|
| 464 |
+
edge_del_cost(G1[u1][v1]),
|
| 465 |
+
edge_ins_cost(G2[u2][v2]).
|
| 466 |
+
|
| 467 |
+
That is, the functions will receive the edge attribute
|
| 468 |
+
dictionaries as inputs. The functions are expected to return
|
| 469 |
+
positive numeric values.
|
| 470 |
+
|
| 471 |
+
Function edge_subst_cost overrides edge_match if specified.
|
| 472 |
+
If neither edge_match nor edge_subst_cost are specified then
|
| 473 |
+
default edge substitution cost of 0 is used (edge attributes
|
| 474 |
+
are not considered during matching).
|
| 475 |
+
|
| 476 |
+
If edge_del_cost is not specified then default edge deletion
|
| 477 |
+
cost of 1 is used. If edge_ins_cost is not specified then
|
| 478 |
+
default edge insertion cost of 1 is used.
|
| 479 |
+
|
| 480 |
+
upper_bound : numeric
|
| 481 |
+
Maximum edit distance to consider.
|
| 482 |
+
|
| 483 |
+
Returns
|
| 484 |
+
-------
|
| 485 |
+
Generator of consecutive approximations of graph edit distance.
|
| 486 |
+
|
| 487 |
+
Examples
|
| 488 |
+
--------
|
| 489 |
+
>>> G1 = nx.cycle_graph(6)
|
| 490 |
+
>>> G2 = nx.wheel_graph(7)
|
| 491 |
+
>>> for v in nx.optimize_graph_edit_distance(G1, G2):
|
| 492 |
+
... minv = v
|
| 493 |
+
>>> minv
|
| 494 |
+
7.0
|
| 495 |
+
|
| 496 |
+
See Also
|
| 497 |
+
--------
|
| 498 |
+
graph_edit_distance, optimize_edit_paths
|
| 499 |
+
|
| 500 |
+
References
|
| 501 |
+
----------
|
| 502 |
+
.. [1] Zeina Abu-Aisheh, Romain Raveaux, Jean-Yves Ramel, Patrick
|
| 503 |
+
Martineau. An Exact Graph Edit Distance Algorithm for Solving
|
| 504 |
+
Pattern Recognition Problems. 4th International Conference on
|
| 505 |
+
Pattern Recognition Applications and Methods 2015, Jan 2015,
|
| 506 |
+
Lisbon, Portugal. 2015,
|
| 507 |
+
<10.5220/0005209202710278>. <hal-01168816>
|
| 508 |
+
https://hal.archives-ouvertes.fr/hal-01168816
|
| 509 |
+
"""
|
| 510 |
+
for _, _, cost in optimize_edit_paths(
|
| 511 |
+
G1,
|
| 512 |
+
G2,
|
| 513 |
+
node_match,
|
| 514 |
+
edge_match,
|
| 515 |
+
node_subst_cost,
|
| 516 |
+
node_del_cost,
|
| 517 |
+
node_ins_cost,
|
| 518 |
+
edge_subst_cost,
|
| 519 |
+
edge_del_cost,
|
| 520 |
+
edge_ins_cost,
|
| 521 |
+
upper_bound,
|
| 522 |
+
True,
|
| 523 |
+
):
|
| 524 |
+
yield cost
|
| 525 |
+
|
| 526 |
+
|
| 527 |
+
@nx._dispatch(
|
| 528 |
+
graphs={"G1": 0, "G2": 1}, preserve_edge_attrs=True, preserve_node_attrs=True
|
| 529 |
+
)
|
| 530 |
+
def optimize_edit_paths(
|
| 531 |
+
G1,
|
| 532 |
+
G2,
|
| 533 |
+
node_match=None,
|
| 534 |
+
edge_match=None,
|
| 535 |
+
node_subst_cost=None,
|
| 536 |
+
node_del_cost=None,
|
| 537 |
+
node_ins_cost=None,
|
| 538 |
+
edge_subst_cost=None,
|
| 539 |
+
edge_del_cost=None,
|
| 540 |
+
edge_ins_cost=None,
|
| 541 |
+
upper_bound=None,
|
| 542 |
+
strictly_decreasing=True,
|
| 543 |
+
roots=None,
|
| 544 |
+
timeout=None,
|
| 545 |
+
):
|
| 546 |
+
"""GED (graph edit distance) calculation: advanced interface.
|
| 547 |
+
|
| 548 |
+
Graph edit path is a sequence of node and edge edit operations
|
| 549 |
+
transforming graph G1 to graph isomorphic to G2. Edit operations
|
| 550 |
+
include substitutions, deletions, and insertions.
|
| 551 |
+
|
| 552 |
+
Graph edit distance is defined as minimum cost of edit path.
|
| 553 |
+
|
| 554 |
+
Parameters
|
| 555 |
+
----------
|
| 556 |
+
G1, G2: graphs
|
| 557 |
+
The two graphs G1 and G2 must be of the same type.
|
| 558 |
+
|
| 559 |
+
node_match : callable
|
| 560 |
+
A function that returns True if node n1 in G1 and n2 in G2
|
| 561 |
+
should be considered equal during matching.
|
| 562 |
+
|
| 563 |
+
The function will be called like
|
| 564 |
+
|
| 565 |
+
node_match(G1.nodes[n1], G2.nodes[n2]).
|
| 566 |
+
|
| 567 |
+
That is, the function will receive the node attribute
|
| 568 |
+
dictionaries for n1 and n2 as inputs.
|
| 569 |
+
|
| 570 |
+
Ignored if node_subst_cost is specified. If neither
|
| 571 |
+
node_match nor node_subst_cost are specified then node
|
| 572 |
+
attributes are not considered.
|
| 573 |
+
|
| 574 |
+
edge_match : callable
|
| 575 |
+
A function that returns True if the edge attribute dictionaries
|
| 576 |
+
for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should
|
| 577 |
+
be considered equal during matching.
|
| 578 |
+
|
| 579 |
+
The function will be called like
|
| 580 |
+
|
| 581 |
+
edge_match(G1[u1][v1], G2[u2][v2]).
|
| 582 |
+
|
| 583 |
+
That is, the function will receive the edge attribute
|
| 584 |
+
dictionaries of the edges under consideration.
|
| 585 |
+
|
| 586 |
+
Ignored if edge_subst_cost is specified. If neither
|
| 587 |
+
edge_match nor edge_subst_cost are specified then edge
|
| 588 |
+
attributes are not considered.
|
| 589 |
+
|
| 590 |
+
node_subst_cost, node_del_cost, node_ins_cost : callable
|
| 591 |
+
Functions that return the costs of node substitution, node
|
| 592 |
+
deletion, and node insertion, respectively.
|
| 593 |
+
|
| 594 |
+
The functions will be called like
|
| 595 |
+
|
| 596 |
+
node_subst_cost(G1.nodes[n1], G2.nodes[n2]),
|
| 597 |
+
node_del_cost(G1.nodes[n1]),
|
| 598 |
+
node_ins_cost(G2.nodes[n2]).
|
| 599 |
+
|
| 600 |
+
That is, the functions will receive the node attribute
|
| 601 |
+
dictionaries as inputs. The functions are expected to return
|
| 602 |
+
positive numeric values.
|
| 603 |
+
|
| 604 |
+
Function node_subst_cost overrides node_match if specified.
|
| 605 |
+
If neither node_match nor node_subst_cost are specified then
|
| 606 |
+
default node substitution cost of 0 is used (node attributes
|
| 607 |
+
are not considered during matching).
|
| 608 |
+
|
| 609 |
+
If node_del_cost is not specified then default node deletion
|
| 610 |
+
cost of 1 is used. If node_ins_cost is not specified then
|
| 611 |
+
default node insertion cost of 1 is used.
|
| 612 |
+
|
| 613 |
+
edge_subst_cost, edge_del_cost, edge_ins_cost : callable
|
| 614 |
+
Functions that return the costs of edge substitution, edge
|
| 615 |
+
deletion, and edge insertion, respectively.
|
| 616 |
+
|
| 617 |
+
The functions will be called like
|
| 618 |
+
|
| 619 |
+
edge_subst_cost(G1[u1][v1], G2[u2][v2]),
|
| 620 |
+
edge_del_cost(G1[u1][v1]),
|
| 621 |
+
edge_ins_cost(G2[u2][v2]).
|
| 622 |
+
|
| 623 |
+
That is, the functions will receive the edge attribute
|
| 624 |
+
dictionaries as inputs. The functions are expected to return
|
| 625 |
+
positive numeric values.
|
| 626 |
+
|
| 627 |
+
Function edge_subst_cost overrides edge_match if specified.
|
| 628 |
+
If neither edge_match nor edge_subst_cost are specified then
|
| 629 |
+
default edge substitution cost of 0 is used (edge attributes
|
| 630 |
+
are not considered during matching).
|
| 631 |
+
|
| 632 |
+
If edge_del_cost is not specified then default edge deletion
|
| 633 |
+
cost of 1 is used. If edge_ins_cost is not specified then
|
| 634 |
+
default edge insertion cost of 1 is used.
|
| 635 |
+
|
| 636 |
+
upper_bound : numeric
|
| 637 |
+
Maximum edit distance to consider.
|
| 638 |
+
|
| 639 |
+
strictly_decreasing : bool
|
| 640 |
+
If True, return consecutive approximations of strictly
|
| 641 |
+
decreasing cost. Otherwise, return all edit paths of cost
|
| 642 |
+
less than or equal to the previous minimum cost.
|
| 643 |
+
|
| 644 |
+
roots : 2-tuple
|
| 645 |
+
Tuple where first element is a node in G1 and the second
|
| 646 |
+
is a node in G2.
|
| 647 |
+
These nodes are forced to be matched in the comparison to
|
| 648 |
+
allow comparison between rooted graphs.
|
| 649 |
+
|
| 650 |
+
timeout : numeric
|
| 651 |
+
Maximum number of seconds to execute.
|
| 652 |
+
After timeout is met, the current best GED is returned.
|
| 653 |
+
|
| 654 |
+
Returns
|
| 655 |
+
-------
|
| 656 |
+
Generator of tuples (node_edit_path, edge_edit_path, cost)
|
| 657 |
+
node_edit_path : list of tuples (u, v)
|
| 658 |
+
edge_edit_path : list of tuples ((u1, v1), (u2, v2))
|
| 659 |
+
cost : numeric
|
| 660 |
+
|
| 661 |
+
See Also
|
| 662 |
+
--------
|
| 663 |
+
graph_edit_distance, optimize_graph_edit_distance, optimal_edit_paths
|
| 664 |
+
|
| 665 |
+
References
|
| 666 |
+
----------
|
| 667 |
+
.. [1] Zeina Abu-Aisheh, Romain Raveaux, Jean-Yves Ramel, Patrick
|
| 668 |
+
Martineau. An Exact Graph Edit Distance Algorithm for Solving
|
| 669 |
+
Pattern Recognition Problems. 4th International Conference on
|
| 670 |
+
Pattern Recognition Applications and Methods 2015, Jan 2015,
|
| 671 |
+
Lisbon, Portugal. 2015,
|
| 672 |
+
<10.5220/0005209202710278>. <hal-01168816>
|
| 673 |
+
https://hal.archives-ouvertes.fr/hal-01168816
|
| 674 |
+
|
| 675 |
+
"""
|
| 676 |
+
# TODO: support DiGraph
|
| 677 |
+
|
| 678 |
+
import numpy as np
|
| 679 |
+
import scipy as sp
|
| 680 |
+
|
| 681 |
+
@dataclass
|
| 682 |
+
class CostMatrix:
|
| 683 |
+
C: ...
|
| 684 |
+
lsa_row_ind: ...
|
| 685 |
+
lsa_col_ind: ...
|
| 686 |
+
ls: ...
|
| 687 |
+
|
| 688 |
+
def make_CostMatrix(C, m, n):
|
| 689 |
+
# assert(C.shape == (m + n, m + n))
|
| 690 |
+
lsa_row_ind, lsa_col_ind = sp.optimize.linear_sum_assignment(C)
|
| 691 |
+
|
| 692 |
+
# Fixup dummy assignments:
|
| 693 |
+
# each substitution i<->j should have dummy assignment m+j<->n+i
|
| 694 |
+
# NOTE: fast reduce of Cv relies on it
|
| 695 |
+
# assert len(lsa_row_ind) == len(lsa_col_ind)
|
| 696 |
+
indexes = zip(range(len(lsa_row_ind)), lsa_row_ind, lsa_col_ind)
|
| 697 |
+
subst_ind = [k for k, i, j in indexes if i < m and j < n]
|
| 698 |
+
indexes = zip(range(len(lsa_row_ind)), lsa_row_ind, lsa_col_ind)
|
| 699 |
+
dummy_ind = [k for k, i, j in indexes if i >= m and j >= n]
|
| 700 |
+
# assert len(subst_ind) == len(dummy_ind)
|
| 701 |
+
lsa_row_ind[dummy_ind] = lsa_col_ind[subst_ind] + m
|
| 702 |
+
lsa_col_ind[dummy_ind] = lsa_row_ind[subst_ind] + n
|
| 703 |
+
|
| 704 |
+
return CostMatrix(
|
| 705 |
+
C, lsa_row_ind, lsa_col_ind, C[lsa_row_ind, lsa_col_ind].sum()
|
| 706 |
+
)
|
| 707 |
+
|
| 708 |
+
def extract_C(C, i, j, m, n):
|
| 709 |
+
# assert(C.shape == (m + n, m + n))
|
| 710 |
+
row_ind = [k in i or k - m in j for k in range(m + n)]
|
| 711 |
+
col_ind = [k in j or k - n in i for k in range(m + n)]
|
| 712 |
+
return C[row_ind, :][:, col_ind]
|
| 713 |
+
|
| 714 |
+
def reduce_C(C, i, j, m, n):
|
| 715 |
+
# assert(C.shape == (m + n, m + n))
|
| 716 |
+
row_ind = [k not in i and k - m not in j for k in range(m + n)]
|
| 717 |
+
col_ind = [k not in j and k - n not in i for k in range(m + n)]
|
| 718 |
+
return C[row_ind, :][:, col_ind]
|
| 719 |
+
|
| 720 |
+
def reduce_ind(ind, i):
|
| 721 |
+
# assert set(ind) == set(range(len(ind)))
|
| 722 |
+
rind = ind[[k not in i for k in ind]]
|
| 723 |
+
for k in set(i):
|
| 724 |
+
rind[rind >= k] -= 1
|
| 725 |
+
return rind
|
| 726 |
+
|
| 727 |
+
def match_edges(u, v, pending_g, pending_h, Ce, matched_uv=None):
|
| 728 |
+
"""
|
| 729 |
+
Parameters:
|
| 730 |
+
u, v: matched vertices, u=None or v=None for
|
| 731 |
+
deletion/insertion
|
| 732 |
+
pending_g, pending_h: lists of edges not yet mapped
|
| 733 |
+
Ce: CostMatrix of pending edge mappings
|
| 734 |
+
matched_uv: partial vertex edit path
|
| 735 |
+
list of tuples (u, v) of previously matched vertex
|
| 736 |
+
mappings u<->v, u=None or v=None for
|
| 737 |
+
deletion/insertion
|
| 738 |
+
|
| 739 |
+
Returns:
|
| 740 |
+
list of (i, j): indices of edge mappings g<->h
|
| 741 |
+
localCe: local CostMatrix of edge mappings
|
| 742 |
+
(basically submatrix of Ce at cross of rows i, cols j)
|
| 743 |
+
"""
|
| 744 |
+
M = len(pending_g)
|
| 745 |
+
N = len(pending_h)
|
| 746 |
+
# assert Ce.C.shape == (M + N, M + N)
|
| 747 |
+
|
| 748 |
+
# only attempt to match edges after one node match has been made
|
| 749 |
+
# this will stop self-edges on the first node being automatically deleted
|
| 750 |
+
# even when a substitution is the better option
|
| 751 |
+
if matched_uv is None or len(matched_uv) == 0:
|
| 752 |
+
g_ind = []
|
| 753 |
+
h_ind = []
|
| 754 |
+
else:
|
| 755 |
+
g_ind = [
|
| 756 |
+
i
|
| 757 |
+
for i in range(M)
|
| 758 |
+
if pending_g[i][:2] == (u, u)
|
| 759 |
+
or any(
|
| 760 |
+
pending_g[i][:2] in ((p, u), (u, p), (p, p)) for p, q in matched_uv
|
| 761 |
+
)
|
| 762 |
+
]
|
| 763 |
+
h_ind = [
|
| 764 |
+
j
|
| 765 |
+
for j in range(N)
|
| 766 |
+
if pending_h[j][:2] == (v, v)
|
| 767 |
+
or any(
|
| 768 |
+
pending_h[j][:2] in ((q, v), (v, q), (q, q)) for p, q in matched_uv
|
| 769 |
+
)
|
| 770 |
+
]
|
| 771 |
+
|
| 772 |
+
m = len(g_ind)
|
| 773 |
+
n = len(h_ind)
|
| 774 |
+
|
| 775 |
+
if m or n:
|
| 776 |
+
C = extract_C(Ce.C, g_ind, h_ind, M, N)
|
| 777 |
+
# assert C.shape == (m + n, m + n)
|
| 778 |
+
|
| 779 |
+
# Forbid structurally invalid matches
|
| 780 |
+
# NOTE: inf remembered from Ce construction
|
| 781 |
+
for k, i in enumerate(g_ind):
|
| 782 |
+
g = pending_g[i][:2]
|
| 783 |
+
for l, j in enumerate(h_ind):
|
| 784 |
+
h = pending_h[j][:2]
|
| 785 |
+
if nx.is_directed(G1) or nx.is_directed(G2):
|
| 786 |
+
if any(
|
| 787 |
+
g == (p, u) and h == (q, v) or g == (u, p) and h == (v, q)
|
| 788 |
+
for p, q in matched_uv
|
| 789 |
+
):
|
| 790 |
+
continue
|
| 791 |
+
else:
|
| 792 |
+
if any(
|
| 793 |
+
g in ((p, u), (u, p)) and h in ((q, v), (v, q))
|
| 794 |
+
for p, q in matched_uv
|
| 795 |
+
):
|
| 796 |
+
continue
|
| 797 |
+
if g == (u, u) or any(g == (p, p) for p, q in matched_uv):
|
| 798 |
+
continue
|
| 799 |
+
if h == (v, v) or any(h == (q, q) for p, q in matched_uv):
|
| 800 |
+
continue
|
| 801 |
+
C[k, l] = inf
|
| 802 |
+
|
| 803 |
+
localCe = make_CostMatrix(C, m, n)
|
| 804 |
+
ij = [
|
| 805 |
+
(
|
| 806 |
+
g_ind[k] if k < m else M + h_ind[l],
|
| 807 |
+
h_ind[l] if l < n else N + g_ind[k],
|
| 808 |
+
)
|
| 809 |
+
for k, l in zip(localCe.lsa_row_ind, localCe.lsa_col_ind)
|
| 810 |
+
if k < m or l < n
|
| 811 |
+
]
|
| 812 |
+
|
| 813 |
+
else:
|
| 814 |
+
ij = []
|
| 815 |
+
localCe = CostMatrix(np.empty((0, 0)), [], [], 0)
|
| 816 |
+
|
| 817 |
+
return ij, localCe
|
| 818 |
+
|
| 819 |
+
def reduce_Ce(Ce, ij, m, n):
|
| 820 |
+
if len(ij):
|
| 821 |
+
i, j = zip(*ij)
|
| 822 |
+
m_i = m - sum(1 for t in i if t < m)
|
| 823 |
+
n_j = n - sum(1 for t in j if t < n)
|
| 824 |
+
return make_CostMatrix(reduce_C(Ce.C, i, j, m, n), m_i, n_j)
|
| 825 |
+
return Ce
|
| 826 |
+
|
| 827 |
+
def get_edit_ops(
|
| 828 |
+
matched_uv, pending_u, pending_v, Cv, pending_g, pending_h, Ce, matched_cost
|
| 829 |
+
):
|
| 830 |
+
"""
|
| 831 |
+
Parameters:
|
| 832 |
+
matched_uv: partial vertex edit path
|
| 833 |
+
list of tuples (u, v) of vertex mappings u<->v,
|
| 834 |
+
u=None or v=None for deletion/insertion
|
| 835 |
+
pending_u, pending_v: lists of vertices not yet mapped
|
| 836 |
+
Cv: CostMatrix of pending vertex mappings
|
| 837 |
+
pending_g, pending_h: lists of edges not yet mapped
|
| 838 |
+
Ce: CostMatrix of pending edge mappings
|
| 839 |
+
matched_cost: cost of partial edit path
|
| 840 |
+
|
| 841 |
+
Returns:
|
| 842 |
+
sequence of
|
| 843 |
+
(i, j): indices of vertex mapping u<->v
|
| 844 |
+
Cv_ij: reduced CostMatrix of pending vertex mappings
|
| 845 |
+
(basically Cv with row i, col j removed)
|
| 846 |
+
list of (x, y): indices of edge mappings g<->h
|
| 847 |
+
Ce_xy: reduced CostMatrix of pending edge mappings
|
| 848 |
+
(basically Ce with rows x, cols y removed)
|
| 849 |
+
cost: total cost of edit operation
|
| 850 |
+
NOTE: most promising ops first
|
| 851 |
+
"""
|
| 852 |
+
m = len(pending_u)
|
| 853 |
+
n = len(pending_v)
|
| 854 |
+
# assert Cv.C.shape == (m + n, m + n)
|
| 855 |
+
|
| 856 |
+
# 1) a vertex mapping from optimal linear sum assignment
|
| 857 |
+
i, j = min(
|
| 858 |
+
(k, l) for k, l in zip(Cv.lsa_row_ind, Cv.lsa_col_ind) if k < m or l < n
|
| 859 |
+
)
|
| 860 |
+
xy, localCe = match_edges(
|
| 861 |
+
pending_u[i] if i < m else None,
|
| 862 |
+
pending_v[j] if j < n else None,
|
| 863 |
+
pending_g,
|
| 864 |
+
pending_h,
|
| 865 |
+
Ce,
|
| 866 |
+
matched_uv,
|
| 867 |
+
)
|
| 868 |
+
Ce_xy = reduce_Ce(Ce, xy, len(pending_g), len(pending_h))
|
| 869 |
+
# assert Ce.ls <= localCe.ls + Ce_xy.ls
|
| 870 |
+
if prune(matched_cost + Cv.ls + localCe.ls + Ce_xy.ls):
|
| 871 |
+
pass
|
| 872 |
+
else:
|
| 873 |
+
# get reduced Cv efficiently
|
| 874 |
+
Cv_ij = CostMatrix(
|
| 875 |
+
reduce_C(Cv.C, (i,), (j,), m, n),
|
| 876 |
+
reduce_ind(Cv.lsa_row_ind, (i, m + j)),
|
| 877 |
+
reduce_ind(Cv.lsa_col_ind, (j, n + i)),
|
| 878 |
+
Cv.ls - Cv.C[i, j],
|
| 879 |
+
)
|
| 880 |
+
yield (i, j), Cv_ij, xy, Ce_xy, Cv.C[i, j] + localCe.ls
|
| 881 |
+
|
| 882 |
+
# 2) other candidates, sorted by lower-bound cost estimate
|
| 883 |
+
other = []
|
| 884 |
+
fixed_i, fixed_j = i, j
|
| 885 |
+
if m <= n:
|
| 886 |
+
candidates = (
|
| 887 |
+
(t, fixed_j)
|
| 888 |
+
for t in range(m + n)
|
| 889 |
+
if t != fixed_i and (t < m or t == m + fixed_j)
|
| 890 |
+
)
|
| 891 |
+
else:
|
| 892 |
+
candidates = (
|
| 893 |
+
(fixed_i, t)
|
| 894 |
+
for t in range(m + n)
|
| 895 |
+
if t != fixed_j and (t < n or t == n + fixed_i)
|
| 896 |
+
)
|
| 897 |
+
for i, j in candidates:
|
| 898 |
+
if prune(matched_cost + Cv.C[i, j] + Ce.ls):
|
| 899 |
+
continue
|
| 900 |
+
Cv_ij = make_CostMatrix(
|
| 901 |
+
reduce_C(Cv.C, (i,), (j,), m, n),
|
| 902 |
+
m - 1 if i < m else m,
|
| 903 |
+
n - 1 if j < n else n,
|
| 904 |
+
)
|
| 905 |
+
# assert Cv.ls <= Cv.C[i, j] + Cv_ij.ls
|
| 906 |
+
if prune(matched_cost + Cv.C[i, j] + Cv_ij.ls + Ce.ls):
|
| 907 |
+
continue
|
| 908 |
+
xy, localCe = match_edges(
|
| 909 |
+
pending_u[i] if i < m else None,
|
| 910 |
+
pending_v[j] if j < n else None,
|
| 911 |
+
pending_g,
|
| 912 |
+
pending_h,
|
| 913 |
+
Ce,
|
| 914 |
+
matched_uv,
|
| 915 |
+
)
|
| 916 |
+
if prune(matched_cost + Cv.C[i, j] + Cv_ij.ls + localCe.ls):
|
| 917 |
+
continue
|
| 918 |
+
Ce_xy = reduce_Ce(Ce, xy, len(pending_g), len(pending_h))
|
| 919 |
+
# assert Ce.ls <= localCe.ls + Ce_xy.ls
|
| 920 |
+
if prune(matched_cost + Cv.C[i, j] + Cv_ij.ls + localCe.ls + Ce_xy.ls):
|
| 921 |
+
continue
|
| 922 |
+
other.append(((i, j), Cv_ij, xy, Ce_xy, Cv.C[i, j] + localCe.ls))
|
| 923 |
+
|
| 924 |
+
yield from sorted(other, key=lambda t: t[4] + t[1].ls + t[3].ls)
|
| 925 |
+
|
| 926 |
+
def get_edit_paths(
|
| 927 |
+
matched_uv,
|
| 928 |
+
pending_u,
|
| 929 |
+
pending_v,
|
| 930 |
+
Cv,
|
| 931 |
+
matched_gh,
|
| 932 |
+
pending_g,
|
| 933 |
+
pending_h,
|
| 934 |
+
Ce,
|
| 935 |
+
matched_cost,
|
| 936 |
+
):
|
| 937 |
+
"""
|
| 938 |
+
Parameters:
|
| 939 |
+
matched_uv: partial vertex edit path
|
| 940 |
+
list of tuples (u, v) of vertex mappings u<->v,
|
| 941 |
+
u=None or v=None for deletion/insertion
|
| 942 |
+
pending_u, pending_v: lists of vertices not yet mapped
|
| 943 |
+
Cv: CostMatrix of pending vertex mappings
|
| 944 |
+
matched_gh: partial edge edit path
|
| 945 |
+
list of tuples (g, h) of edge mappings g<->h,
|
| 946 |
+
g=None or h=None for deletion/insertion
|
| 947 |
+
pending_g, pending_h: lists of edges not yet mapped
|
| 948 |
+
Ce: CostMatrix of pending edge mappings
|
| 949 |
+
matched_cost: cost of partial edit path
|
| 950 |
+
|
| 951 |
+
Returns:
|
| 952 |
+
sequence of (vertex_path, edge_path, cost)
|
| 953 |
+
vertex_path: complete vertex edit path
|
| 954 |
+
list of tuples (u, v) of vertex mappings u<->v,
|
| 955 |
+
u=None or v=None for deletion/insertion
|
| 956 |
+
edge_path: complete edge edit path
|
| 957 |
+
list of tuples (g, h) of edge mappings g<->h,
|
| 958 |
+
g=None or h=None for deletion/insertion
|
| 959 |
+
cost: total cost of edit path
|
| 960 |
+
NOTE: path costs are non-increasing
|
| 961 |
+
"""
|
| 962 |
+
# debug_print('matched-uv:', matched_uv)
|
| 963 |
+
# debug_print('matched-gh:', matched_gh)
|
| 964 |
+
# debug_print('matched-cost:', matched_cost)
|
| 965 |
+
# debug_print('pending-u:', pending_u)
|
| 966 |
+
# debug_print('pending-v:', pending_v)
|
| 967 |
+
# debug_print(Cv.C)
|
| 968 |
+
# assert list(sorted(G1.nodes)) == list(sorted(list(u for u, v in matched_uv if u is not None) + pending_u))
|
| 969 |
+
# assert list(sorted(G2.nodes)) == list(sorted(list(v for u, v in matched_uv if v is not None) + pending_v))
|
| 970 |
+
# debug_print('pending-g:', pending_g)
|
| 971 |
+
# debug_print('pending-h:', pending_h)
|
| 972 |
+
# debug_print(Ce.C)
|
| 973 |
+
# assert list(sorted(G1.edges)) == list(sorted(list(g for g, h in matched_gh if g is not None) + pending_g))
|
| 974 |
+
# assert list(sorted(G2.edges)) == list(sorted(list(h for g, h in matched_gh if h is not None) + pending_h))
|
| 975 |
+
# debug_print()
|
| 976 |
+
|
| 977 |
+
if prune(matched_cost + Cv.ls + Ce.ls):
|
| 978 |
+
return
|
| 979 |
+
|
| 980 |
+
if not max(len(pending_u), len(pending_v)):
|
| 981 |
+
# assert not len(pending_g)
|
| 982 |
+
# assert not len(pending_h)
|
| 983 |
+
# path completed!
|
| 984 |
+
# assert matched_cost <= maxcost_value
|
| 985 |
+
nonlocal maxcost_value
|
| 986 |
+
maxcost_value = min(maxcost_value, matched_cost)
|
| 987 |
+
yield matched_uv, matched_gh, matched_cost
|
| 988 |
+
|
| 989 |
+
else:
|
| 990 |
+
edit_ops = get_edit_ops(
|
| 991 |
+
matched_uv,
|
| 992 |
+
pending_u,
|
| 993 |
+
pending_v,
|
| 994 |
+
Cv,
|
| 995 |
+
pending_g,
|
| 996 |
+
pending_h,
|
| 997 |
+
Ce,
|
| 998 |
+
matched_cost,
|
| 999 |
+
)
|
| 1000 |
+
for ij, Cv_ij, xy, Ce_xy, edit_cost in edit_ops:
|
| 1001 |
+
i, j = ij
|
| 1002 |
+
# assert Cv.C[i, j] + sum(Ce.C[t] for t in xy) == edit_cost
|
| 1003 |
+
if prune(matched_cost + edit_cost + Cv_ij.ls + Ce_xy.ls):
|
| 1004 |
+
continue
|
| 1005 |
+
|
| 1006 |
+
# dive deeper
|
| 1007 |
+
u = pending_u.pop(i) if i < len(pending_u) else None
|
| 1008 |
+
v = pending_v.pop(j) if j < len(pending_v) else None
|
| 1009 |
+
matched_uv.append((u, v))
|
| 1010 |
+
for x, y in xy:
|
| 1011 |
+
len_g = len(pending_g)
|
| 1012 |
+
len_h = len(pending_h)
|
| 1013 |
+
matched_gh.append(
|
| 1014 |
+
(
|
| 1015 |
+
pending_g[x] if x < len_g else None,
|
| 1016 |
+
pending_h[y] if y < len_h else None,
|
| 1017 |
+
)
|
| 1018 |
+
)
|
| 1019 |
+
sortedx = sorted(x for x, y in xy)
|
| 1020 |
+
sortedy = sorted(y for x, y in xy)
|
| 1021 |
+
G = [
|
| 1022 |
+
(pending_g.pop(x) if x < len(pending_g) else None)
|
| 1023 |
+
for x in reversed(sortedx)
|
| 1024 |
+
]
|
| 1025 |
+
H = [
|
| 1026 |
+
(pending_h.pop(y) if y < len(pending_h) else None)
|
| 1027 |
+
for y in reversed(sortedy)
|
| 1028 |
+
]
|
| 1029 |
+
|
| 1030 |
+
yield from get_edit_paths(
|
| 1031 |
+
matched_uv,
|
| 1032 |
+
pending_u,
|
| 1033 |
+
pending_v,
|
| 1034 |
+
Cv_ij,
|
| 1035 |
+
matched_gh,
|
| 1036 |
+
pending_g,
|
| 1037 |
+
pending_h,
|
| 1038 |
+
Ce_xy,
|
| 1039 |
+
matched_cost + edit_cost,
|
| 1040 |
+
)
|
| 1041 |
+
|
| 1042 |
+
# backtrack
|
| 1043 |
+
if u is not None:
|
| 1044 |
+
pending_u.insert(i, u)
|
| 1045 |
+
if v is not None:
|
| 1046 |
+
pending_v.insert(j, v)
|
| 1047 |
+
matched_uv.pop()
|
| 1048 |
+
for x, g in zip(sortedx, reversed(G)):
|
| 1049 |
+
if g is not None:
|
| 1050 |
+
pending_g.insert(x, g)
|
| 1051 |
+
for y, h in zip(sortedy, reversed(H)):
|
| 1052 |
+
if h is not None:
|
| 1053 |
+
pending_h.insert(y, h)
|
| 1054 |
+
for _ in xy:
|
| 1055 |
+
matched_gh.pop()
|
| 1056 |
+
|
| 1057 |
+
# Initialization
|
| 1058 |
+
|
| 1059 |
+
pending_u = list(G1.nodes)
|
| 1060 |
+
pending_v = list(G2.nodes)
|
| 1061 |
+
|
| 1062 |
+
initial_cost = 0
|
| 1063 |
+
if roots:
|
| 1064 |
+
root_u, root_v = roots
|
| 1065 |
+
if root_u not in pending_u or root_v not in pending_v:
|
| 1066 |
+
raise nx.NodeNotFound("Root node not in graph.")
|
| 1067 |
+
|
| 1068 |
+
# remove roots from pending
|
| 1069 |
+
pending_u.remove(root_u)
|
| 1070 |
+
pending_v.remove(root_v)
|
| 1071 |
+
|
| 1072 |
+
# cost matrix of vertex mappings
|
| 1073 |
+
m = len(pending_u)
|
| 1074 |
+
n = len(pending_v)
|
| 1075 |
+
C = np.zeros((m + n, m + n))
|
| 1076 |
+
if node_subst_cost:
|
| 1077 |
+
C[0:m, 0:n] = np.array(
|
| 1078 |
+
[
|
| 1079 |
+
node_subst_cost(G1.nodes[u], G2.nodes[v])
|
| 1080 |
+
for u in pending_u
|
| 1081 |
+
for v in pending_v
|
| 1082 |
+
]
|
| 1083 |
+
).reshape(m, n)
|
| 1084 |
+
if roots:
|
| 1085 |
+
initial_cost = node_subst_cost(G1.nodes[root_u], G2.nodes[root_v])
|
| 1086 |
+
elif node_match:
|
| 1087 |
+
C[0:m, 0:n] = np.array(
|
| 1088 |
+
[
|
| 1089 |
+
1 - int(node_match(G1.nodes[u], G2.nodes[v]))
|
| 1090 |
+
for u in pending_u
|
| 1091 |
+
for v in pending_v
|
| 1092 |
+
]
|
| 1093 |
+
).reshape(m, n)
|
| 1094 |
+
if roots:
|
| 1095 |
+
initial_cost = 1 - node_match(G1.nodes[root_u], G2.nodes[root_v])
|
| 1096 |
+
else:
|
| 1097 |
+
# all zeroes
|
| 1098 |
+
pass
|
| 1099 |
+
# assert not min(m, n) or C[0:m, 0:n].min() >= 0
|
| 1100 |
+
if node_del_cost:
|
| 1101 |
+
del_costs = [node_del_cost(G1.nodes[u]) for u in pending_u]
|
| 1102 |
+
else:
|
| 1103 |
+
del_costs = [1] * len(pending_u)
|
| 1104 |
+
# assert not m or min(del_costs) >= 0
|
| 1105 |
+
if node_ins_cost:
|
| 1106 |
+
ins_costs = [node_ins_cost(G2.nodes[v]) for v in pending_v]
|
| 1107 |
+
else:
|
| 1108 |
+
ins_costs = [1] * len(pending_v)
|
| 1109 |
+
# assert not n or min(ins_costs) >= 0
|
| 1110 |
+
inf = C[0:m, 0:n].sum() + sum(del_costs) + sum(ins_costs) + 1
|
| 1111 |
+
C[0:m, n : n + m] = np.array(
|
| 1112 |
+
[del_costs[i] if i == j else inf for i in range(m) for j in range(m)]
|
| 1113 |
+
).reshape(m, m)
|
| 1114 |
+
C[m : m + n, 0:n] = np.array(
|
| 1115 |
+
[ins_costs[i] if i == j else inf for i in range(n) for j in range(n)]
|
| 1116 |
+
).reshape(n, n)
|
| 1117 |
+
Cv = make_CostMatrix(C, m, n)
|
| 1118 |
+
# debug_print(f"Cv: {m} x {n}")
|
| 1119 |
+
# debug_print(Cv.C)
|
| 1120 |
+
|
| 1121 |
+
pending_g = list(G1.edges)
|
| 1122 |
+
pending_h = list(G2.edges)
|
| 1123 |
+
|
| 1124 |
+
# cost matrix of edge mappings
|
| 1125 |
+
m = len(pending_g)
|
| 1126 |
+
n = len(pending_h)
|
| 1127 |
+
C = np.zeros((m + n, m + n))
|
| 1128 |
+
if edge_subst_cost:
|
| 1129 |
+
C[0:m, 0:n] = np.array(
|
| 1130 |
+
[
|
| 1131 |
+
edge_subst_cost(G1.edges[g], G2.edges[h])
|
| 1132 |
+
for g in pending_g
|
| 1133 |
+
for h in pending_h
|
| 1134 |
+
]
|
| 1135 |
+
).reshape(m, n)
|
| 1136 |
+
elif edge_match:
|
| 1137 |
+
C[0:m, 0:n] = np.array(
|
| 1138 |
+
[
|
| 1139 |
+
1 - int(edge_match(G1.edges[g], G2.edges[h]))
|
| 1140 |
+
for g in pending_g
|
| 1141 |
+
for h in pending_h
|
| 1142 |
+
]
|
| 1143 |
+
).reshape(m, n)
|
| 1144 |
+
else:
|
| 1145 |
+
# all zeroes
|
| 1146 |
+
pass
|
| 1147 |
+
# assert not min(m, n) or C[0:m, 0:n].min() >= 0
|
| 1148 |
+
if edge_del_cost:
|
| 1149 |
+
del_costs = [edge_del_cost(G1.edges[g]) for g in pending_g]
|
| 1150 |
+
else:
|
| 1151 |
+
del_costs = [1] * len(pending_g)
|
| 1152 |
+
# assert not m or min(del_costs) >= 0
|
| 1153 |
+
if edge_ins_cost:
|
| 1154 |
+
ins_costs = [edge_ins_cost(G2.edges[h]) for h in pending_h]
|
| 1155 |
+
else:
|
| 1156 |
+
ins_costs = [1] * len(pending_h)
|
| 1157 |
+
# assert not n or min(ins_costs) >= 0
|
| 1158 |
+
inf = C[0:m, 0:n].sum() + sum(del_costs) + sum(ins_costs) + 1
|
| 1159 |
+
C[0:m, n : n + m] = np.array(
|
| 1160 |
+
[del_costs[i] if i == j else inf for i in range(m) for j in range(m)]
|
| 1161 |
+
).reshape(m, m)
|
| 1162 |
+
C[m : m + n, 0:n] = np.array(
|
| 1163 |
+
[ins_costs[i] if i == j else inf for i in range(n) for j in range(n)]
|
| 1164 |
+
).reshape(n, n)
|
| 1165 |
+
Ce = make_CostMatrix(C, m, n)
|
| 1166 |
+
# debug_print(f'Ce: {m} x {n}')
|
| 1167 |
+
# debug_print(Ce.C)
|
| 1168 |
+
# debug_print()
|
| 1169 |
+
|
| 1170 |
+
maxcost_value = Cv.C.sum() + Ce.C.sum() + 1
|
| 1171 |
+
|
| 1172 |
+
if timeout is not None:
|
| 1173 |
+
if timeout <= 0:
|
| 1174 |
+
raise nx.NetworkXError("Timeout value must be greater than 0")
|
| 1175 |
+
start = time.perf_counter()
|
| 1176 |
+
|
| 1177 |
+
def prune(cost):
|
| 1178 |
+
if timeout is not None:
|
| 1179 |
+
if time.perf_counter() - start > timeout:
|
| 1180 |
+
return True
|
| 1181 |
+
if upper_bound is not None:
|
| 1182 |
+
if cost > upper_bound:
|
| 1183 |
+
return True
|
| 1184 |
+
if cost > maxcost_value:
|
| 1185 |
+
return True
|
| 1186 |
+
if strictly_decreasing and cost >= maxcost_value:
|
| 1187 |
+
return True
|
| 1188 |
+
return False
|
| 1189 |
+
|
| 1190 |
+
# Now go!
|
| 1191 |
+
|
| 1192 |
+
done_uv = [] if roots is None else [roots]
|
| 1193 |
+
|
| 1194 |
+
for vertex_path, edge_path, cost in get_edit_paths(
|
| 1195 |
+
done_uv, pending_u, pending_v, Cv, [], pending_g, pending_h, Ce, initial_cost
|
| 1196 |
+
):
|
| 1197 |
+
# assert sorted(G1.nodes) == sorted(u for u, v in vertex_path if u is not None)
|
| 1198 |
+
# assert sorted(G2.nodes) == sorted(v for u, v in vertex_path if v is not None)
|
| 1199 |
+
# assert sorted(G1.edges) == sorted(g for g, h in edge_path if g is not None)
|
| 1200 |
+
# assert sorted(G2.edges) == sorted(h for g, h in edge_path if h is not None)
|
| 1201 |
+
# print(vertex_path, edge_path, cost, file = sys.stderr)
|
| 1202 |
+
# assert cost == maxcost_value
|
| 1203 |
+
yield list(vertex_path), list(edge_path), cost
|
| 1204 |
+
|
| 1205 |
+
|
| 1206 |
+
@nx._dispatch
|
| 1207 |
+
def simrank_similarity(
|
| 1208 |
+
G,
|
| 1209 |
+
source=None,
|
| 1210 |
+
target=None,
|
| 1211 |
+
importance_factor=0.9,
|
| 1212 |
+
max_iterations=1000,
|
| 1213 |
+
tolerance=1e-4,
|
| 1214 |
+
):
|
| 1215 |
+
"""Returns the SimRank similarity of nodes in the graph ``G``.
|
| 1216 |
+
|
| 1217 |
+
SimRank is a similarity metric that says "two objects are considered
|
| 1218 |
+
to be similar if they are referenced by similar objects." [1]_.
|
| 1219 |
+
|
| 1220 |
+
The pseudo-code definition from the paper is::
|
| 1221 |
+
|
| 1222 |
+
def simrank(G, u, v):
|
| 1223 |
+
in_neighbors_u = G.predecessors(u)
|
| 1224 |
+
in_neighbors_v = G.predecessors(v)
|
| 1225 |
+
scale = C / (len(in_neighbors_u) * len(in_neighbors_v))
|
| 1226 |
+
return scale * sum(simrank(G, w, x)
|
| 1227 |
+
for w, x in product(in_neighbors_u,
|
| 1228 |
+
in_neighbors_v))
|
| 1229 |
+
|
| 1230 |
+
where ``G`` is the graph, ``u`` is the source, ``v`` is the target,
|
| 1231 |
+
and ``C`` is a float decay or importance factor between 0 and 1.
|
| 1232 |
+
|
| 1233 |
+
The SimRank algorithm for determining node similarity is defined in
|
| 1234 |
+
[2]_.
|
| 1235 |
+
|
| 1236 |
+
Parameters
|
| 1237 |
+
----------
|
| 1238 |
+
G : NetworkX graph
|
| 1239 |
+
A NetworkX graph
|
| 1240 |
+
|
| 1241 |
+
source : node
|
| 1242 |
+
If this is specified, the returned dictionary maps each node
|
| 1243 |
+
``v`` in the graph to the similarity between ``source`` and
|
| 1244 |
+
``v``.
|
| 1245 |
+
|
| 1246 |
+
target : node
|
| 1247 |
+
If both ``source`` and ``target`` are specified, the similarity
|
| 1248 |
+
value between ``source`` and ``target`` is returned. If
|
| 1249 |
+
``target`` is specified but ``source`` is not, this argument is
|
| 1250 |
+
ignored.
|
| 1251 |
+
|
| 1252 |
+
importance_factor : float
|
| 1253 |
+
The relative importance of indirect neighbors with respect to
|
| 1254 |
+
direct neighbors.
|
| 1255 |
+
|
| 1256 |
+
max_iterations : integer
|
| 1257 |
+
Maximum number of iterations.
|
| 1258 |
+
|
| 1259 |
+
tolerance : float
|
| 1260 |
+
Error tolerance used to check convergence. When an iteration of
|
| 1261 |
+
the algorithm finds that no similarity value changes more than
|
| 1262 |
+
this amount, the algorithm halts.
|
| 1263 |
+
|
| 1264 |
+
Returns
|
| 1265 |
+
-------
|
| 1266 |
+
similarity : dictionary or float
|
| 1267 |
+
If ``source`` and ``target`` are both ``None``, this returns a
|
| 1268 |
+
dictionary of dictionaries, where keys are node pairs and value
|
| 1269 |
+
are similarity of the pair of nodes.
|
| 1270 |
+
|
| 1271 |
+
If ``source`` is not ``None`` but ``target`` is, this returns a
|
| 1272 |
+
dictionary mapping node to the similarity of ``source`` and that
|
| 1273 |
+
node.
|
| 1274 |
+
|
| 1275 |
+
If neither ``source`` nor ``target`` is ``None``, this returns
|
| 1276 |
+
the similarity value for the given pair of nodes.
|
| 1277 |
+
|
| 1278 |
+
Examples
|
| 1279 |
+
--------
|
| 1280 |
+
>>> G = nx.cycle_graph(2)
|
| 1281 |
+
>>> nx.simrank_similarity(G)
|
| 1282 |
+
{0: {0: 1.0, 1: 0.0}, 1: {0: 0.0, 1: 1.0}}
|
| 1283 |
+
>>> nx.simrank_similarity(G, source=0)
|
| 1284 |
+
{0: 1.0, 1: 0.0}
|
| 1285 |
+
>>> nx.simrank_similarity(G, source=0, target=0)
|
| 1286 |
+
1.0
|
| 1287 |
+
|
| 1288 |
+
The result of this function can be converted to a numpy array
|
| 1289 |
+
representing the SimRank matrix by using the node order of the
|
| 1290 |
+
graph to determine which row and column represent each node.
|
| 1291 |
+
Other ordering of nodes is also possible.
|
| 1292 |
+
|
| 1293 |
+
>>> import numpy as np
|
| 1294 |
+
>>> sim = nx.simrank_similarity(G)
|
| 1295 |
+
>>> np.array([[sim[u][v] for v in G] for u in G])
|
| 1296 |
+
array([[1., 0.],
|
| 1297 |
+
[0., 1.]])
|
| 1298 |
+
>>> sim_1d = nx.simrank_similarity(G, source=0)
|
| 1299 |
+
>>> np.array([sim[0][v] for v in G])
|
| 1300 |
+
array([1., 0.])
|
| 1301 |
+
|
| 1302 |
+
References
|
| 1303 |
+
----------
|
| 1304 |
+
.. [1] https://en.wikipedia.org/wiki/SimRank
|
| 1305 |
+
.. [2] G. Jeh and J. Widom.
|
| 1306 |
+
"SimRank: a measure of structural-context similarity",
|
| 1307 |
+
In KDD'02: Proceedings of the Eighth ACM SIGKDD
|
| 1308 |
+
International Conference on Knowledge Discovery and Data Mining,
|
| 1309 |
+
pp. 538--543. ACM Press, 2002.
|
| 1310 |
+
"""
|
| 1311 |
+
import numpy as np
|
| 1312 |
+
|
| 1313 |
+
nodelist = list(G)
|
| 1314 |
+
s_indx = None if source is None else nodelist.index(source)
|
| 1315 |
+
t_indx = None if target is None else nodelist.index(target)
|
| 1316 |
+
|
| 1317 |
+
x = _simrank_similarity_numpy(
|
| 1318 |
+
G, s_indx, t_indx, importance_factor, max_iterations, tolerance
|
| 1319 |
+
)
|
| 1320 |
+
|
| 1321 |
+
if isinstance(x, np.ndarray):
|
| 1322 |
+
if x.ndim == 1:
|
| 1323 |
+
return dict(zip(G, x))
|
| 1324 |
+
# else x.ndim == 2
|
| 1325 |
+
return {u: dict(zip(G, row)) for u, row in zip(G, x)}
|
| 1326 |
+
return x
|
| 1327 |
+
|
| 1328 |
+
|
| 1329 |
+
def _simrank_similarity_python(
|
| 1330 |
+
G,
|
| 1331 |
+
source=None,
|
| 1332 |
+
target=None,
|
| 1333 |
+
importance_factor=0.9,
|
| 1334 |
+
max_iterations=1000,
|
| 1335 |
+
tolerance=1e-4,
|
| 1336 |
+
):
|
| 1337 |
+
"""Returns the SimRank similarity of nodes in the graph ``G``.
|
| 1338 |
+
|
| 1339 |
+
This pure Python version is provided for pedagogical purposes.
|
| 1340 |
+
|
| 1341 |
+
Examples
|
| 1342 |
+
--------
|
| 1343 |
+
>>> G = nx.cycle_graph(2)
|
| 1344 |
+
>>> nx.similarity._simrank_similarity_python(G)
|
| 1345 |
+
{0: {0: 1, 1: 0.0}, 1: {0: 0.0, 1: 1}}
|
| 1346 |
+
>>> nx.similarity._simrank_similarity_python(G, source=0)
|
| 1347 |
+
{0: 1, 1: 0.0}
|
| 1348 |
+
>>> nx.similarity._simrank_similarity_python(G, source=0, target=0)
|
| 1349 |
+
1
|
| 1350 |
+
"""
|
| 1351 |
+
# build up our similarity adjacency dictionary output
|
| 1352 |
+
newsim = {u: {v: 1 if u == v else 0 for v in G} for u in G}
|
| 1353 |
+
|
| 1354 |
+
# These functions compute the update to the similarity value of the nodes
|
| 1355 |
+
# `u` and `v` with respect to the previous similarity values.
|
| 1356 |
+
def avg_sim(s):
|
| 1357 |
+
return sum(newsim[w][x] for (w, x) in s) / len(s) if s else 0.0
|
| 1358 |
+
|
| 1359 |
+
Gadj = G.pred if G.is_directed() else G.adj
|
| 1360 |
+
|
| 1361 |
+
def sim(u, v):
|
| 1362 |
+
return importance_factor * avg_sim(list(product(Gadj[u], Gadj[v])))
|
| 1363 |
+
|
| 1364 |
+
for its in range(max_iterations):
|
| 1365 |
+
oldsim = newsim
|
| 1366 |
+
newsim = {u: {v: sim(u, v) if u != v else 1 for v in G} for u in G}
|
| 1367 |
+
is_close = all(
|
| 1368 |
+
all(
|
| 1369 |
+
abs(newsim[u][v] - old) <= tolerance * (1 + abs(old))
|
| 1370 |
+
for v, old in nbrs.items()
|
| 1371 |
+
)
|
| 1372 |
+
for u, nbrs in oldsim.items()
|
| 1373 |
+
)
|
| 1374 |
+
if is_close:
|
| 1375 |
+
break
|
| 1376 |
+
|
| 1377 |
+
if its + 1 == max_iterations:
|
| 1378 |
+
raise nx.ExceededMaxIterations(
|
| 1379 |
+
f"simrank did not converge after {max_iterations} iterations."
|
| 1380 |
+
)
|
| 1381 |
+
|
| 1382 |
+
if source is not None and target is not None:
|
| 1383 |
+
return newsim[source][target]
|
| 1384 |
+
if source is not None:
|
| 1385 |
+
return newsim[source]
|
| 1386 |
+
return newsim
|
| 1387 |
+
|
| 1388 |
+
|
| 1389 |
+
def _simrank_similarity_numpy(
|
| 1390 |
+
G,
|
| 1391 |
+
source=None,
|
| 1392 |
+
target=None,
|
| 1393 |
+
importance_factor=0.9,
|
| 1394 |
+
max_iterations=1000,
|
| 1395 |
+
tolerance=1e-4,
|
| 1396 |
+
):
|
| 1397 |
+
"""Calculate SimRank of nodes in ``G`` using matrices with ``numpy``.
|
| 1398 |
+
|
| 1399 |
+
The SimRank algorithm for determining node similarity is defined in
|
| 1400 |
+
[1]_.
|
| 1401 |
+
|
| 1402 |
+
Parameters
|
| 1403 |
+
----------
|
| 1404 |
+
G : NetworkX graph
|
| 1405 |
+
A NetworkX graph
|
| 1406 |
+
|
| 1407 |
+
source : node
|
| 1408 |
+
If this is specified, the returned dictionary maps each node
|
| 1409 |
+
``v`` in the graph to the similarity between ``source`` and
|
| 1410 |
+
``v``.
|
| 1411 |
+
|
| 1412 |
+
target : node
|
| 1413 |
+
If both ``source`` and ``target`` are specified, the similarity
|
| 1414 |
+
value between ``source`` and ``target`` is returned. If
|
| 1415 |
+
``target`` is specified but ``source`` is not, this argument is
|
| 1416 |
+
ignored.
|
| 1417 |
+
|
| 1418 |
+
importance_factor : float
|
| 1419 |
+
The relative importance of indirect neighbors with respect to
|
| 1420 |
+
direct neighbors.
|
| 1421 |
+
|
| 1422 |
+
max_iterations : integer
|
| 1423 |
+
Maximum number of iterations.
|
| 1424 |
+
|
| 1425 |
+
tolerance : float
|
| 1426 |
+
Error tolerance used to check convergence. When an iteration of
|
| 1427 |
+
the algorithm finds that no similarity value changes more than
|
| 1428 |
+
this amount, the algorithm halts.
|
| 1429 |
+
|
| 1430 |
+
Returns
|
| 1431 |
+
-------
|
| 1432 |
+
similarity : numpy array or float
|
| 1433 |
+
If ``source`` and ``target`` are both ``None``, this returns a
|
| 1434 |
+
2D array containing SimRank scores of the nodes.
|
| 1435 |
+
|
| 1436 |
+
If ``source`` is not ``None`` but ``target`` is, this returns an
|
| 1437 |
+
1D array containing SimRank scores of ``source`` and that
|
| 1438 |
+
node.
|
| 1439 |
+
|
| 1440 |
+
If neither ``source`` nor ``target`` is ``None``, this returns
|
| 1441 |
+
the similarity value for the given pair of nodes.
|
| 1442 |
+
|
| 1443 |
+
Examples
|
| 1444 |
+
--------
|
| 1445 |
+
>>> G = nx.cycle_graph(2)
|
| 1446 |
+
>>> nx.similarity._simrank_similarity_numpy(G)
|
| 1447 |
+
array([[1., 0.],
|
| 1448 |
+
[0., 1.]])
|
| 1449 |
+
>>> nx.similarity._simrank_similarity_numpy(G, source=0)
|
| 1450 |
+
array([1., 0.])
|
| 1451 |
+
>>> nx.similarity._simrank_similarity_numpy(G, source=0, target=0)
|
| 1452 |
+
1.0
|
| 1453 |
+
|
| 1454 |
+
References
|
| 1455 |
+
----------
|
| 1456 |
+
.. [1] G. Jeh and J. Widom.
|
| 1457 |
+
"SimRank: a measure of structural-context similarity",
|
| 1458 |
+
In KDD'02: Proceedings of the Eighth ACM SIGKDD
|
| 1459 |
+
International Conference on Knowledge Discovery and Data Mining,
|
| 1460 |
+
pp. 538--543. ACM Press, 2002.
|
| 1461 |
+
"""
|
| 1462 |
+
# This algorithm follows roughly
|
| 1463 |
+
#
|
| 1464 |
+
# S = max{C * (A.T * S * A), I}
|
| 1465 |
+
#
|
| 1466 |
+
# where C is the importance factor, A is the column normalized
|
| 1467 |
+
# adjacency matrix, and I is the identity matrix.
|
| 1468 |
+
import numpy as np
|
| 1469 |
+
|
| 1470 |
+
adjacency_matrix = nx.to_numpy_array(G)
|
| 1471 |
+
|
| 1472 |
+
# column-normalize the ``adjacency_matrix``
|
| 1473 |
+
s = np.array(adjacency_matrix.sum(axis=0))
|
| 1474 |
+
s[s == 0] = 1
|
| 1475 |
+
adjacency_matrix /= s # adjacency_matrix.sum(axis=0)
|
| 1476 |
+
|
| 1477 |
+
newsim = np.eye(len(G), dtype=np.float64)
|
| 1478 |
+
for its in range(max_iterations):
|
| 1479 |
+
prevsim = newsim.copy()
|
| 1480 |
+
newsim = importance_factor * ((adjacency_matrix.T @ prevsim) @ adjacency_matrix)
|
| 1481 |
+
np.fill_diagonal(newsim, 1.0)
|
| 1482 |
+
|
| 1483 |
+
if np.allclose(prevsim, newsim, atol=tolerance):
|
| 1484 |
+
break
|
| 1485 |
+
|
| 1486 |
+
if its + 1 == max_iterations:
|
| 1487 |
+
raise nx.ExceededMaxIterations(
|
| 1488 |
+
f"simrank did not converge after {max_iterations} iterations."
|
| 1489 |
+
)
|
| 1490 |
+
|
| 1491 |
+
if source is not None and target is not None:
|
| 1492 |
+
return newsim[source, target]
|
| 1493 |
+
if source is not None:
|
| 1494 |
+
return newsim[source]
|
| 1495 |
+
return newsim
|
| 1496 |
+
|
| 1497 |
+
|
| 1498 |
+
@nx._dispatch(edge_attrs="weight")
|
| 1499 |
+
def panther_similarity(
|
| 1500 |
+
G, source, k=5, path_length=5, c=0.5, delta=0.1, eps=None, weight="weight"
|
| 1501 |
+
):
|
| 1502 |
+
r"""Returns the Panther similarity of nodes in the graph `G` to node ``v``.
|
| 1503 |
+
|
| 1504 |
+
Panther is a similarity metric that says "two objects are considered
|
| 1505 |
+
to be similar if they frequently appear on the same paths." [1]_.
|
| 1506 |
+
|
| 1507 |
+
Parameters
|
| 1508 |
+
----------
|
| 1509 |
+
G : NetworkX graph
|
| 1510 |
+
A NetworkX graph
|
| 1511 |
+
source : node
|
| 1512 |
+
Source node for which to find the top `k` similar other nodes
|
| 1513 |
+
k : int (default = 5)
|
| 1514 |
+
The number of most similar nodes to return
|
| 1515 |
+
path_length : int (default = 5)
|
| 1516 |
+
How long the randomly generated paths should be (``T`` in [1]_)
|
| 1517 |
+
c : float (default = 0.5)
|
| 1518 |
+
A universal positive constant used to scale the number
|
| 1519 |
+
of sample random paths to generate.
|
| 1520 |
+
delta : float (default = 0.1)
|
| 1521 |
+
The probability that the similarity $S$ is not an epsilon-approximation to (R, phi),
|
| 1522 |
+
where $R$ is the number of random paths and $\phi$ is the probability
|
| 1523 |
+
that an element sampled from a set $A \subseteq D$, where $D$ is the domain.
|
| 1524 |
+
eps : float or None (default = None)
|
| 1525 |
+
The error bound. Per [1]_, a good value is ``sqrt(1/|E|)``. Therefore,
|
| 1526 |
+
if no value is provided, the recommended computed value will be used.
|
| 1527 |
+
weight : string or None, optional (default="weight")
|
| 1528 |
+
The name of an edge attribute that holds the numerical value
|
| 1529 |
+
used as a weight. If None then each edge has weight 1.
|
| 1530 |
+
|
| 1531 |
+
Returns
|
| 1532 |
+
-------
|
| 1533 |
+
similarity : dictionary
|
| 1534 |
+
Dictionary of nodes to similarity scores (as floats). Note:
|
| 1535 |
+
the self-similarity (i.e., ``v``) will not be included in
|
| 1536 |
+
the returned dictionary.
|
| 1537 |
+
|
| 1538 |
+
Examples
|
| 1539 |
+
--------
|
| 1540 |
+
>>> G = nx.star_graph(10)
|
| 1541 |
+
>>> sim = nx.panther_similarity(G, 0)
|
| 1542 |
+
|
| 1543 |
+
References
|
| 1544 |
+
----------
|
| 1545 |
+
.. [1] Zhang, J., Tang, J., Ma, C., Tong, H., Jing, Y., & Li, J.
|
| 1546 |
+
Panther: Fast top-k similarity search on large networks.
|
| 1547 |
+
In Proceedings of the ACM SIGKDD International Conference
|
| 1548 |
+
on Knowledge Discovery and Data Mining (Vol. 2015-August, pp. 1445–1454).
|
| 1549 |
+
Association for Computing Machinery. https://doi.org/10.1145/2783258.2783267.
|
| 1550 |
+
"""
|
| 1551 |
+
import numpy as np
|
| 1552 |
+
|
| 1553 |
+
num_nodes = G.number_of_nodes()
|
| 1554 |
+
if num_nodes < k:
|
| 1555 |
+
warnings.warn(
|
| 1556 |
+
f"Number of nodes is {num_nodes}, but requested k is {k}. "
|
| 1557 |
+
"Setting k to number of nodes."
|
| 1558 |
+
)
|
| 1559 |
+
k = num_nodes
|
| 1560 |
+
# According to [1], they empirically determined
|
| 1561 |
+
# a good value for ``eps`` to be sqrt( 1 / |E| )
|
| 1562 |
+
if eps is None:
|
| 1563 |
+
eps = np.sqrt(1.0 / G.number_of_edges())
|
| 1564 |
+
|
| 1565 |
+
inv_node_map = {name: index for index, name in enumerate(G.nodes)}
|
| 1566 |
+
node_map = np.array(G)
|
| 1567 |
+
|
| 1568 |
+
# Calculate the sample size ``R`` for how many paths
|
| 1569 |
+
# to randomly generate
|
| 1570 |
+
t_choose_2 = math.comb(path_length, 2)
|
| 1571 |
+
sample_size = int((c / eps**2) * (np.log2(t_choose_2) + 1 + np.log(1 / delta)))
|
| 1572 |
+
index_map = {}
|
| 1573 |
+
_ = list(
|
| 1574 |
+
generate_random_paths(
|
| 1575 |
+
G, sample_size, path_length=path_length, index_map=index_map, weight=weight
|
| 1576 |
+
)
|
| 1577 |
+
)
|
| 1578 |
+
S = np.zeros(num_nodes)
|
| 1579 |
+
|
| 1580 |
+
inv_sample_size = 1 / sample_size
|
| 1581 |
+
|
| 1582 |
+
source_paths = set(index_map[source])
|
| 1583 |
+
|
| 1584 |
+
# Calculate the path similarities
|
| 1585 |
+
# between ``source`` (v) and ``node`` (v_j)
|
| 1586 |
+
# using our inverted index mapping of
|
| 1587 |
+
# vertices to paths
|
| 1588 |
+
for node, paths in index_map.items():
|
| 1589 |
+
# Only consider paths where both
|
| 1590 |
+
# ``node`` and ``source`` are present
|
| 1591 |
+
common_paths = source_paths.intersection(paths)
|
| 1592 |
+
S[inv_node_map[node]] = len(common_paths) * inv_sample_size
|
| 1593 |
+
|
| 1594 |
+
# Retrieve top ``k`` similar
|
| 1595 |
+
# Note: the below performed anywhere from 4-10x faster
|
| 1596 |
+
# (depending on input sizes) vs the equivalent ``np.argsort(S)[::-1]``
|
| 1597 |
+
top_k_unsorted = np.argpartition(S, -k)[-k:]
|
| 1598 |
+
top_k_sorted = top_k_unsorted[np.argsort(S[top_k_unsorted])][::-1]
|
| 1599 |
+
|
| 1600 |
+
# Add back the similarity scores
|
| 1601 |
+
top_k_sorted_names = (node_map[n] for n in top_k_sorted)
|
| 1602 |
+
top_k_with_val = dict(zip(top_k_sorted_names, S[top_k_sorted]))
|
| 1603 |
+
|
| 1604 |
+
# Remove the self-similarity
|
| 1605 |
+
top_k_with_val.pop(source, None)
|
| 1606 |
+
return top_k_with_val
|
| 1607 |
+
|
| 1608 |
+
|
| 1609 |
+
@nx._dispatch(edge_attrs="weight")
|
| 1610 |
+
def generate_random_paths(
|
| 1611 |
+
G, sample_size, path_length=5, index_map=None, weight="weight"
|
| 1612 |
+
):
|
| 1613 |
+
"""Randomly generate `sample_size` paths of length `path_length`.
|
| 1614 |
+
|
| 1615 |
+
Parameters
|
| 1616 |
+
----------
|
| 1617 |
+
G : NetworkX graph
|
| 1618 |
+
A NetworkX graph
|
| 1619 |
+
sample_size : integer
|
| 1620 |
+
The number of paths to generate. This is ``R`` in [1]_.
|
| 1621 |
+
path_length : integer (default = 5)
|
| 1622 |
+
The maximum size of the path to randomly generate.
|
| 1623 |
+
This is ``T`` in [1]_. According to the paper, ``T >= 5`` is
|
| 1624 |
+
recommended.
|
| 1625 |
+
index_map : dictionary, optional
|
| 1626 |
+
If provided, this will be populated with the inverted
|
| 1627 |
+
index of nodes mapped to the set of generated random path
|
| 1628 |
+
indices within ``paths``.
|
| 1629 |
+
weight : string or None, optional (default="weight")
|
| 1630 |
+
The name of an edge attribute that holds the numerical value
|
| 1631 |
+
used as a weight. If None then each edge has weight 1.
|
| 1632 |
+
|
| 1633 |
+
Returns
|
| 1634 |
+
-------
|
| 1635 |
+
paths : generator of lists
|
| 1636 |
+
Generator of `sample_size` paths each with length `path_length`.
|
| 1637 |
+
|
| 1638 |
+
Examples
|
| 1639 |
+
--------
|
| 1640 |
+
Note that the return value is the list of paths:
|
| 1641 |
+
|
| 1642 |
+
>>> G = nx.star_graph(3)
|
| 1643 |
+
>>> random_path = nx.generate_random_paths(G, 2)
|
| 1644 |
+
|
| 1645 |
+
By passing a dictionary into `index_map`, it will build an
|
| 1646 |
+
inverted index mapping of nodes to the paths in which that node is present:
|
| 1647 |
+
|
| 1648 |
+
>>> G = nx.star_graph(3)
|
| 1649 |
+
>>> index_map = {}
|
| 1650 |
+
>>> random_path = nx.generate_random_paths(G, 3, index_map=index_map)
|
| 1651 |
+
>>> paths_containing_node_0 = [random_path[path_idx] for path_idx in index_map.get(0, [])]
|
| 1652 |
+
|
| 1653 |
+
References
|
| 1654 |
+
----------
|
| 1655 |
+
.. [1] Zhang, J., Tang, J., Ma, C., Tong, H., Jing, Y., & Li, J.
|
| 1656 |
+
Panther: Fast top-k similarity search on large networks.
|
| 1657 |
+
In Proceedings of the ACM SIGKDD International Conference
|
| 1658 |
+
on Knowledge Discovery and Data Mining (Vol. 2015-August, pp. 1445–1454).
|
| 1659 |
+
Association for Computing Machinery. https://doi.org/10.1145/2783258.2783267.
|
| 1660 |
+
"""
|
| 1661 |
+
import numpy as np
|
| 1662 |
+
|
| 1663 |
+
# Calculate transition probabilities between
|
| 1664 |
+
# every pair of vertices according to Eq. (3)
|
| 1665 |
+
adj_mat = nx.to_numpy_array(G, weight=weight)
|
| 1666 |
+
inv_row_sums = np.reciprocal(adj_mat.sum(axis=1)).reshape(-1, 1)
|
| 1667 |
+
transition_probabilities = adj_mat * inv_row_sums
|
| 1668 |
+
|
| 1669 |
+
node_map = np.array(G)
|
| 1670 |
+
num_nodes = G.number_of_nodes()
|
| 1671 |
+
|
| 1672 |
+
for path_index in range(sample_size):
|
| 1673 |
+
# Sample current vertex v = v_i uniformly at random
|
| 1674 |
+
node_index = np.random.randint(0, high=num_nodes)
|
| 1675 |
+
node = node_map[node_index]
|
| 1676 |
+
|
| 1677 |
+
# Add v into p_r and add p_r into the path set
|
| 1678 |
+
# of v, i.e., P_v
|
| 1679 |
+
path = [node]
|
| 1680 |
+
|
| 1681 |
+
# Build the inverted index (P_v) of vertices to paths
|
| 1682 |
+
if index_map is not None:
|
| 1683 |
+
if node in index_map:
|
| 1684 |
+
index_map[node].add(path_index)
|
| 1685 |
+
else:
|
| 1686 |
+
index_map[node] = {path_index}
|
| 1687 |
+
|
| 1688 |
+
starting_index = node_index
|
| 1689 |
+
for _ in range(path_length):
|
| 1690 |
+
# Randomly sample a neighbor (v_j) according
|
| 1691 |
+
# to transition probabilities from ``node`` (v) to its neighbors
|
| 1692 |
+
neighbor_index = np.random.choice(
|
| 1693 |
+
num_nodes, p=transition_probabilities[starting_index]
|
| 1694 |
+
)
|
| 1695 |
+
|
| 1696 |
+
# Set current vertex (v = v_j)
|
| 1697 |
+
starting_index = neighbor_index
|
| 1698 |
+
|
| 1699 |
+
# Add v into p_r
|
| 1700 |
+
neighbor_node = node_map[neighbor_index]
|
| 1701 |
+
path.append(neighbor_node)
|
| 1702 |
+
|
| 1703 |
+
# Add p_r into P_v
|
| 1704 |
+
if index_map is not None:
|
| 1705 |
+
if neighbor_node in index_map:
|
| 1706 |
+
index_map[neighbor_node].add(path_index)
|
| 1707 |
+
else:
|
| 1708 |
+
index_map[neighbor_node] = {path_index}
|
| 1709 |
+
|
| 1710 |
+
yield path
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/sparsifiers.py
ADDED
|
@@ -0,0 +1,295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Functions for computing sparsifiers of graphs."""
|
| 2 |
+
import math
|
| 3 |
+
|
| 4 |
+
import networkx as nx
|
| 5 |
+
from networkx.utils import not_implemented_for, py_random_state
|
| 6 |
+
|
| 7 |
+
__all__ = ["spanner"]
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@not_implemented_for("directed")
|
| 11 |
+
@not_implemented_for("multigraph")
|
| 12 |
+
@py_random_state(3)
|
| 13 |
+
@nx._dispatch(edge_attrs="weight")
|
| 14 |
+
def spanner(G, stretch, weight=None, seed=None):
|
| 15 |
+
"""Returns a spanner of the given graph with the given stretch.
|
| 16 |
+
|
| 17 |
+
A spanner of a graph G = (V, E) with stretch t is a subgraph
|
| 18 |
+
H = (V, E_S) such that E_S is a subset of E and the distance between
|
| 19 |
+
any pair of nodes in H is at most t times the distance between the
|
| 20 |
+
nodes in G.
|
| 21 |
+
|
| 22 |
+
Parameters
|
| 23 |
+
----------
|
| 24 |
+
G : NetworkX graph
|
| 25 |
+
An undirected simple graph.
|
| 26 |
+
|
| 27 |
+
stretch : float
|
| 28 |
+
The stretch of the spanner.
|
| 29 |
+
|
| 30 |
+
weight : object
|
| 31 |
+
The edge attribute to use as distance.
|
| 32 |
+
|
| 33 |
+
seed : integer, random_state, or None (default)
|
| 34 |
+
Indicator of random number generation state.
|
| 35 |
+
See :ref:`Randomness<randomness>`.
|
| 36 |
+
|
| 37 |
+
Returns
|
| 38 |
+
-------
|
| 39 |
+
NetworkX graph
|
| 40 |
+
A spanner of the given graph with the given stretch.
|
| 41 |
+
|
| 42 |
+
Raises
|
| 43 |
+
------
|
| 44 |
+
ValueError
|
| 45 |
+
If a stretch less than 1 is given.
|
| 46 |
+
|
| 47 |
+
Notes
|
| 48 |
+
-----
|
| 49 |
+
This function implements the spanner algorithm by Baswana and Sen,
|
| 50 |
+
see [1].
|
| 51 |
+
|
| 52 |
+
This algorithm is a randomized las vegas algorithm: The expected
|
| 53 |
+
running time is O(km) where k = (stretch + 1) // 2 and m is the
|
| 54 |
+
number of edges in G. The returned graph is always a spanner of the
|
| 55 |
+
given graph with the specified stretch. For weighted graphs the
|
| 56 |
+
number of edges in the spanner is O(k * n^(1 + 1 / k)) where k is
|
| 57 |
+
defined as above and n is the number of nodes in G. For unweighted
|
| 58 |
+
graphs the number of edges is O(n^(1 + 1 / k) + kn).
|
| 59 |
+
|
| 60 |
+
References
|
| 61 |
+
----------
|
| 62 |
+
[1] S. Baswana, S. Sen. A Simple and Linear Time Randomized
|
| 63 |
+
Algorithm for Computing Sparse Spanners in Weighted Graphs.
|
| 64 |
+
Random Struct. Algorithms 30(4): 532-563 (2007).
|
| 65 |
+
"""
|
| 66 |
+
if stretch < 1:
|
| 67 |
+
raise ValueError("stretch must be at least 1")
|
| 68 |
+
|
| 69 |
+
k = (stretch + 1) // 2
|
| 70 |
+
|
| 71 |
+
# initialize spanner H with empty edge set
|
| 72 |
+
H = nx.empty_graph()
|
| 73 |
+
H.add_nodes_from(G.nodes)
|
| 74 |
+
|
| 75 |
+
# phase 1: forming the clusters
|
| 76 |
+
# the residual graph has V' from the paper as its node set
|
| 77 |
+
# and E' from the paper as its edge set
|
| 78 |
+
residual_graph = _setup_residual_graph(G, weight)
|
| 79 |
+
# clustering is a dictionary that maps nodes in a cluster to the
|
| 80 |
+
# cluster center
|
| 81 |
+
clustering = {v: v for v in G.nodes}
|
| 82 |
+
sample_prob = math.pow(G.number_of_nodes(), -1 / k)
|
| 83 |
+
size_limit = 2 * math.pow(G.number_of_nodes(), 1 + 1 / k)
|
| 84 |
+
|
| 85 |
+
i = 0
|
| 86 |
+
while i < k - 1:
|
| 87 |
+
# step 1: sample centers
|
| 88 |
+
sampled_centers = set()
|
| 89 |
+
for center in set(clustering.values()):
|
| 90 |
+
if seed.random() < sample_prob:
|
| 91 |
+
sampled_centers.add(center)
|
| 92 |
+
|
| 93 |
+
# combined loop for steps 2 and 3
|
| 94 |
+
edges_to_add = set()
|
| 95 |
+
edges_to_remove = set()
|
| 96 |
+
new_clustering = {}
|
| 97 |
+
for v in residual_graph.nodes:
|
| 98 |
+
if clustering[v] in sampled_centers:
|
| 99 |
+
continue
|
| 100 |
+
|
| 101 |
+
# step 2: find neighboring (sampled) clusters and
|
| 102 |
+
# lightest edges to them
|
| 103 |
+
lightest_edge_neighbor, lightest_edge_weight = _lightest_edge_dicts(
|
| 104 |
+
residual_graph, clustering, v
|
| 105 |
+
)
|
| 106 |
+
neighboring_sampled_centers = (
|
| 107 |
+
set(lightest_edge_weight.keys()) & sampled_centers
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
# step 3: add edges to spanner
|
| 111 |
+
if not neighboring_sampled_centers:
|
| 112 |
+
# connect to each neighboring center via lightest edge
|
| 113 |
+
for neighbor in lightest_edge_neighbor.values():
|
| 114 |
+
edges_to_add.add((v, neighbor))
|
| 115 |
+
# remove all incident edges
|
| 116 |
+
for neighbor in residual_graph.adj[v]:
|
| 117 |
+
edges_to_remove.add((v, neighbor))
|
| 118 |
+
|
| 119 |
+
else: # there is a neighboring sampled center
|
| 120 |
+
closest_center = min(
|
| 121 |
+
neighboring_sampled_centers, key=lightest_edge_weight.get
|
| 122 |
+
)
|
| 123 |
+
closest_center_weight = lightest_edge_weight[closest_center]
|
| 124 |
+
closest_center_neighbor = lightest_edge_neighbor[closest_center]
|
| 125 |
+
|
| 126 |
+
edges_to_add.add((v, closest_center_neighbor))
|
| 127 |
+
new_clustering[v] = closest_center
|
| 128 |
+
|
| 129 |
+
# connect to centers with edge weight less than
|
| 130 |
+
# closest_center_weight
|
| 131 |
+
for center, edge_weight in lightest_edge_weight.items():
|
| 132 |
+
if edge_weight < closest_center_weight:
|
| 133 |
+
neighbor = lightest_edge_neighbor[center]
|
| 134 |
+
edges_to_add.add((v, neighbor))
|
| 135 |
+
|
| 136 |
+
# remove edges to centers with edge weight less than
|
| 137 |
+
# closest_center_weight
|
| 138 |
+
for neighbor in residual_graph.adj[v]:
|
| 139 |
+
neighbor_cluster = clustering[neighbor]
|
| 140 |
+
neighbor_weight = lightest_edge_weight[neighbor_cluster]
|
| 141 |
+
if (
|
| 142 |
+
neighbor_cluster == closest_center
|
| 143 |
+
or neighbor_weight < closest_center_weight
|
| 144 |
+
):
|
| 145 |
+
edges_to_remove.add((v, neighbor))
|
| 146 |
+
|
| 147 |
+
# check whether iteration added too many edges to spanner,
|
| 148 |
+
# if so repeat
|
| 149 |
+
if len(edges_to_add) > size_limit:
|
| 150 |
+
# an iteration is repeated O(1) times on expectation
|
| 151 |
+
continue
|
| 152 |
+
|
| 153 |
+
# iteration succeeded
|
| 154 |
+
i = i + 1
|
| 155 |
+
|
| 156 |
+
# actually add edges to spanner
|
| 157 |
+
for u, v in edges_to_add:
|
| 158 |
+
_add_edge_to_spanner(H, residual_graph, u, v, weight)
|
| 159 |
+
|
| 160 |
+
# actually delete edges from residual graph
|
| 161 |
+
residual_graph.remove_edges_from(edges_to_remove)
|
| 162 |
+
|
| 163 |
+
# copy old clustering data to new_clustering
|
| 164 |
+
for node, center in clustering.items():
|
| 165 |
+
if center in sampled_centers:
|
| 166 |
+
new_clustering[node] = center
|
| 167 |
+
clustering = new_clustering
|
| 168 |
+
|
| 169 |
+
# step 4: remove intra-cluster edges
|
| 170 |
+
for u in residual_graph.nodes:
|
| 171 |
+
for v in list(residual_graph.adj[u]):
|
| 172 |
+
if clustering[u] == clustering[v]:
|
| 173 |
+
residual_graph.remove_edge(u, v)
|
| 174 |
+
|
| 175 |
+
# update residual graph node set
|
| 176 |
+
for v in list(residual_graph.nodes):
|
| 177 |
+
if v not in clustering:
|
| 178 |
+
residual_graph.remove_node(v)
|
| 179 |
+
|
| 180 |
+
# phase 2: vertex-cluster joining
|
| 181 |
+
for v in residual_graph.nodes:
|
| 182 |
+
lightest_edge_neighbor, _ = _lightest_edge_dicts(residual_graph, clustering, v)
|
| 183 |
+
for neighbor in lightest_edge_neighbor.values():
|
| 184 |
+
_add_edge_to_spanner(H, residual_graph, v, neighbor, weight)
|
| 185 |
+
|
| 186 |
+
return H
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
def _setup_residual_graph(G, weight):
|
| 190 |
+
"""Setup residual graph as a copy of G with unique edges weights.
|
| 191 |
+
|
| 192 |
+
The node set of the residual graph corresponds to the set V' from
|
| 193 |
+
the Baswana-Sen paper and the edge set corresponds to the set E'
|
| 194 |
+
from the paper.
|
| 195 |
+
|
| 196 |
+
This function associates distinct weights to the edges of the
|
| 197 |
+
residual graph (even for unweighted input graphs), as required by
|
| 198 |
+
the algorithm.
|
| 199 |
+
|
| 200 |
+
Parameters
|
| 201 |
+
----------
|
| 202 |
+
G : NetworkX graph
|
| 203 |
+
An undirected simple graph.
|
| 204 |
+
|
| 205 |
+
weight : object
|
| 206 |
+
The edge attribute to use as distance.
|
| 207 |
+
|
| 208 |
+
Returns
|
| 209 |
+
-------
|
| 210 |
+
NetworkX graph
|
| 211 |
+
The residual graph used for the Baswana-Sen algorithm.
|
| 212 |
+
"""
|
| 213 |
+
residual_graph = G.copy()
|
| 214 |
+
|
| 215 |
+
# establish unique edge weights, even for unweighted graphs
|
| 216 |
+
for u, v in G.edges():
|
| 217 |
+
if not weight:
|
| 218 |
+
residual_graph[u][v]["weight"] = (id(u), id(v))
|
| 219 |
+
else:
|
| 220 |
+
residual_graph[u][v]["weight"] = (G[u][v][weight], id(u), id(v))
|
| 221 |
+
|
| 222 |
+
return residual_graph
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
def _lightest_edge_dicts(residual_graph, clustering, node):
|
| 226 |
+
"""Find the lightest edge to each cluster.
|
| 227 |
+
|
| 228 |
+
Searches for the minimum-weight edge to each cluster adjacent to
|
| 229 |
+
the given node.
|
| 230 |
+
|
| 231 |
+
Parameters
|
| 232 |
+
----------
|
| 233 |
+
residual_graph : NetworkX graph
|
| 234 |
+
The residual graph used by the Baswana-Sen algorithm.
|
| 235 |
+
|
| 236 |
+
clustering : dictionary
|
| 237 |
+
The current clustering of the nodes.
|
| 238 |
+
|
| 239 |
+
node : node
|
| 240 |
+
The node from which the search originates.
|
| 241 |
+
|
| 242 |
+
Returns
|
| 243 |
+
-------
|
| 244 |
+
lightest_edge_neighbor, lightest_edge_weight : dictionary, dictionary
|
| 245 |
+
lightest_edge_neighbor is a dictionary that maps a center C to
|
| 246 |
+
a node v in the corresponding cluster such that the edge from
|
| 247 |
+
the given node to v is the lightest edge from the given node to
|
| 248 |
+
any node in cluster. lightest_edge_weight maps a center C to the
|
| 249 |
+
weight of the aforementioned edge.
|
| 250 |
+
|
| 251 |
+
Notes
|
| 252 |
+
-----
|
| 253 |
+
If a cluster has no node that is adjacent to the given node in the
|
| 254 |
+
residual graph then the center of the cluster is not a key in the
|
| 255 |
+
returned dictionaries.
|
| 256 |
+
"""
|
| 257 |
+
lightest_edge_neighbor = {}
|
| 258 |
+
lightest_edge_weight = {}
|
| 259 |
+
for neighbor in residual_graph.adj[node]:
|
| 260 |
+
neighbor_center = clustering[neighbor]
|
| 261 |
+
weight = residual_graph[node][neighbor]["weight"]
|
| 262 |
+
if (
|
| 263 |
+
neighbor_center not in lightest_edge_weight
|
| 264 |
+
or weight < lightest_edge_weight[neighbor_center]
|
| 265 |
+
):
|
| 266 |
+
lightest_edge_neighbor[neighbor_center] = neighbor
|
| 267 |
+
lightest_edge_weight[neighbor_center] = weight
|
| 268 |
+
return lightest_edge_neighbor, lightest_edge_weight
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
def _add_edge_to_spanner(H, residual_graph, u, v, weight):
|
| 272 |
+
"""Add the edge {u, v} to the spanner H and take weight from
|
| 273 |
+
the residual graph.
|
| 274 |
+
|
| 275 |
+
Parameters
|
| 276 |
+
----------
|
| 277 |
+
H : NetworkX graph
|
| 278 |
+
The spanner under construction.
|
| 279 |
+
|
| 280 |
+
residual_graph : NetworkX graph
|
| 281 |
+
The residual graph used by the Baswana-Sen algorithm. The weight
|
| 282 |
+
for the edge is taken from this graph.
|
| 283 |
+
|
| 284 |
+
u : node
|
| 285 |
+
One endpoint of the edge.
|
| 286 |
+
|
| 287 |
+
v : node
|
| 288 |
+
The other endpoint of the edge.
|
| 289 |
+
|
| 290 |
+
weight : object
|
| 291 |
+
The edge attribute to use as distance.
|
| 292 |
+
"""
|
| 293 |
+
H.add_edge(u, v)
|
| 294 |
+
if weight:
|
| 295 |
+
H[u][v][weight] = residual_graph[u][v]["weight"][0]
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/__pycache__/test_chains.cpython-311.pyc
ADDED
|
Binary file (7.49 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/__pycache__/test_chordal.cpython-311.pyc
ADDED
|
Binary file (9.26 kB). View file
|
|
|