ZTWHHH commited on
Commit
ce3f3b5
·
verified ·
1 Parent(s): c50940a

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/__pycache__/__init__.cpython-310.pyc +0 -0
  2. parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/__pycache__/basic.cpython-310.pyc +0 -0
  3. parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/__pycache__/centrality.cpython-310.pyc +0 -0
  4. parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/__pycache__/cluster.cpython-310.pyc +0 -0
  5. parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/__pycache__/covering.cpython-310.pyc +0 -0
  6. parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/__pycache__/edgelist.cpython-310.pyc +0 -0
  7. parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/__pycache__/extendability.cpython-310.pyc +0 -0
  8. parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/__pycache__/matching.cpython-310.pyc +0 -0
  9. parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/__pycache__/matrix.cpython-310.pyc +0 -0
  10. parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/__pycache__/projection.cpython-310.pyc +0 -0
  11. parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/__pycache__/redundancy.cpython-310.pyc +0 -0
  12. parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/__pycache__/spectral.cpython-310.pyc +0 -0
  13. parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  14. parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_centrality.cpython-310.pyc +0 -0
  15. parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_cluster.cpython-310.pyc +0 -0
  16. parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_extendability.cpython-310.pyc +0 -0
  17. parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_generators.cpython-310.pyc +0 -0
  18. parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matching.cpython-310.pyc +0 -0
  19. parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_redundancy.cpython-310.pyc +0 -0
  20. parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_spectral_bipartivity.cpython-310.pyc +0 -0
  21. parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/tests/test_redundancy.py +37 -0
  22. parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/__init__.py +20 -0
  23. parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/betweenness.py +435 -0
  24. parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/betweenness_subset.py +274 -0
  25. parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/closeness.py +281 -0
  26. parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/current_flow_betweenness.py +341 -0
  27. parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/current_flow_betweenness_subset.py +226 -0
  28. parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/current_flow_closeness.py +95 -0
  29. parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/degree_alg.py +149 -0
  30. parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/dispersion.py +107 -0
  31. parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/eigenvector.py +341 -0
  32. parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/flow_matrix.py +130 -0
  33. parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/group.py +786 -0
  34. parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/harmonic.py +80 -0
  35. parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/katz.py +330 -0
  36. parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/laplacian.py +149 -0
  37. parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/load.py +199 -0
  38. parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/percolation.py +128 -0
  39. parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/reaching.py +206 -0
  40. parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/second_order.py +141 -0
  41. parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/subgraph_alg.py +339 -0
  42. parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/__init__.py +0 -0
  43. parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_percolation_centrality.cpython-310.pyc +0 -0
  44. parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality.py +780 -0
  45. parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py +340 -0
  46. parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_closeness_centrality.py +306 -0
  47. parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py +197 -0
  48. parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py +147 -0
  49. parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_current_flow_closeness.py +43 -0
  50. parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_degree_centrality.py +144 -0
parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.99 kB). View file
 
parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/__pycache__/basic.cpython-310.pyc ADDED
Binary file (8.47 kB). View file
 
parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/__pycache__/centrality.cpython-310.pyc ADDED
Binary file (9.13 kB). View file
 
parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/__pycache__/cluster.cpython-310.pyc ADDED
Binary file (7.49 kB). View file
 
parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/__pycache__/covering.cpython-310.pyc ADDED
Binary file (2.26 kB). View file
 
parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/__pycache__/edgelist.cpython-310.pyc ADDED
Binary file (10.8 kB). View file
 
parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/__pycache__/extendability.cpython-310.pyc ADDED
Binary file (4.05 kB). View file
 
parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/__pycache__/matching.cpython-310.pyc ADDED
Binary file (16.2 kB). View file
 
parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/__pycache__/matrix.cpython-310.pyc ADDED
Binary file (6.03 kB). View file
 
parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/__pycache__/projection.cpython-310.pyc ADDED
Binary file (17.9 kB). View file
 
parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/__pycache__/redundancy.cpython-310.pyc ADDED
Binary file (4.03 kB). View file
 
parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/__pycache__/spectral.cpython-310.pyc ADDED
Binary file (1.92 kB). View file
 
parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (187 Bytes). View file
 
parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_centrality.cpython-310.pyc ADDED
Binary file (5.32 kB). View file
 
parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_cluster.cpython-310.pyc ADDED
Binary file (3.34 kB). View file
 
parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_extendability.cpython-310.pyc ADDED
Binary file (5.23 kB). View file
 
parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_generators.cpython-310.pyc ADDED
Binary file (9.38 kB). View file
 
parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matching.cpython-310.pyc ADDED
Binary file (12.4 kB). View file
 
parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_redundancy.cpython-310.pyc ADDED
Binary file (1.41 kB). View file
 
parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_spectral_bipartivity.cpython-310.pyc ADDED
Binary file (2.19 kB). View file
 
parrot/lib/python3.10/site-packages/networkx/algorithms/bipartite/tests/test_redundancy.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Unit tests for the :mod:`networkx.algorithms.bipartite.redundancy` module.
2
+
3
+ """
4
+
5
+ import pytest
6
+
7
+ from networkx import NetworkXError, cycle_graph
8
+ from networkx.algorithms.bipartite import complete_bipartite_graph, node_redundancy
9
+
10
+
11
+ def test_no_redundant_nodes():
12
+ G = complete_bipartite_graph(2, 2)
13
+
14
+ # when nodes is None
15
+ rc = node_redundancy(G)
16
+ assert all(redundancy == 1 for redundancy in rc.values())
17
+
18
+ # when set of nodes is specified
19
+ rc = node_redundancy(G, (2, 3))
20
+ assert rc == {2: 1.0, 3: 1.0}
21
+
22
+
23
+ def test_redundant_nodes():
24
+ G = cycle_graph(6)
25
+ edge = {0, 3}
26
+ G.add_edge(*edge)
27
+ redundancy = node_redundancy(G)
28
+ for v in edge:
29
+ assert redundancy[v] == 2 / 3
30
+ for v in set(G) - edge:
31
+ assert redundancy[v] == 1
32
+
33
+
34
+ def test_not_enough_neighbors():
35
+ with pytest.raises(NetworkXError):
36
+ G = complete_bipartite_graph(1, 2)
37
+ node_redundancy(G)
parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/__init__.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .betweenness import *
2
+ from .betweenness_subset import *
3
+ from .closeness import *
4
+ from .current_flow_betweenness import *
5
+ from .current_flow_betweenness_subset import *
6
+ from .current_flow_closeness import *
7
+ from .degree_alg import *
8
+ from .dispersion import *
9
+ from .eigenvector import *
10
+ from .group import *
11
+ from .harmonic import *
12
+ from .katz import *
13
+ from .load import *
14
+ from .percolation import *
15
+ from .reaching import *
16
+ from .second_order import *
17
+ from .subgraph_alg import *
18
+ from .trophic import *
19
+ from .voterank_alg import *
20
+ from .laplacian import *
parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/betweenness.py ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Betweenness centrality measures."""
2
+ from collections import deque
3
+ from heapq import heappop, heappush
4
+ from itertools import count
5
+
6
+ import networkx as nx
7
+ from networkx.algorithms.shortest_paths.weighted import _weight_function
8
+ from networkx.utils import py_random_state
9
+ from networkx.utils.decorators import not_implemented_for
10
+
11
+ __all__ = ["betweenness_centrality", "edge_betweenness_centrality"]
12
+
13
+
14
+ @py_random_state(5)
15
+ @nx._dispatchable(edge_attrs="weight")
16
+ def betweenness_centrality(
17
+ G, k=None, normalized=True, weight=None, endpoints=False, seed=None
18
+ ):
19
+ r"""Compute the shortest-path betweenness centrality for nodes.
20
+
21
+ Betweenness centrality of a node $v$ is the sum of the
22
+ fraction of all-pairs shortest paths that pass through $v$
23
+
24
+ .. math::
25
+
26
+ c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)}
27
+
28
+ where $V$ is the set of nodes, $\sigma(s, t)$ is the number of
29
+ shortest $(s, t)$-paths, and $\sigma(s, t|v)$ is the number of
30
+ those paths passing through some node $v$ other than $s, t$.
31
+ If $s = t$, $\sigma(s, t) = 1$, and if $v \in {s, t}$,
32
+ $\sigma(s, t|v) = 0$ [2]_.
33
+
34
+ Parameters
35
+ ----------
36
+ G : graph
37
+ A NetworkX graph.
38
+
39
+ k : int, optional (default=None)
40
+ If k is not None use k node samples to estimate betweenness.
41
+ The value of k <= n where n is the number of nodes in the graph.
42
+ Higher values give better approximation.
43
+
44
+ normalized : bool, optional
45
+ If True the betweenness values are normalized by `2/((n-1)(n-2))`
46
+ for graphs, and `1/((n-1)(n-2))` for directed graphs where `n`
47
+ is the number of nodes in G.
48
+
49
+ weight : None or string, optional (default=None)
50
+ If None, all edge weights are considered equal.
51
+ Otherwise holds the name of the edge attribute used as weight.
52
+ Weights are used to calculate weighted shortest paths, so they are
53
+ interpreted as distances.
54
+
55
+ endpoints : bool, optional
56
+ If True include the endpoints in the shortest path counts.
57
+
58
+ seed : integer, random_state, or None (default)
59
+ Indicator of random number generation state.
60
+ See :ref:`Randomness<randomness>`.
61
+ Note that this is only used if k is not None.
62
+
63
+ Returns
64
+ -------
65
+ nodes : dictionary
66
+ Dictionary of nodes with betweenness centrality as the value.
67
+
68
+ See Also
69
+ --------
70
+ edge_betweenness_centrality
71
+ load_centrality
72
+
73
+ Notes
74
+ -----
75
+ The algorithm is from Ulrik Brandes [1]_.
76
+ See [4]_ for the original first published version and [2]_ for details on
77
+ algorithms for variations and related metrics.
78
+
79
+ For approximate betweenness calculations set k=#samples to use
80
+ k nodes ("pivots") to estimate the betweenness values. For an estimate
81
+ of the number of pivots needed see [3]_.
82
+
83
+ For weighted graphs the edge weights must be greater than zero.
84
+ Zero edge weights can produce an infinite number of equal length
85
+ paths between pairs of nodes.
86
+
87
+ The total number of paths between source and target is counted
88
+ differently for directed and undirected graphs. Directed paths
89
+ are easy to count. Undirected paths are tricky: should a path
90
+ from "u" to "v" count as 1 undirected path or as 2 directed paths?
91
+
92
+ For betweenness_centrality we report the number of undirected
93
+ paths when G is undirected.
94
+
95
+ For betweenness_centrality_subset the reporting is different.
96
+ If the source and target subsets are the same, then we want
97
+ to count undirected paths. But if the source and target subsets
98
+ differ -- for example, if sources is {0} and targets is {1},
99
+ then we are only counting the paths in one direction. They are
100
+ undirected paths but we are counting them in a directed way.
101
+ To count them as undirected paths, each should count as half a path.
102
+
103
+ This algorithm is not guaranteed to be correct if edge weights
104
+ are floating point numbers. As a workaround you can use integer
105
+ numbers by multiplying the relevant edge attributes by a convenient
106
+ constant factor (eg 100) and converting to integers.
107
+
108
+ References
109
+ ----------
110
+ .. [1] Ulrik Brandes:
111
+ A Faster Algorithm for Betweenness Centrality.
112
+ Journal of Mathematical Sociology 25(2):163-177, 2001.
113
+ https://doi.org/10.1080/0022250X.2001.9990249
114
+ .. [2] Ulrik Brandes:
115
+ On Variants of Shortest-Path Betweenness
116
+ Centrality and their Generic Computation.
117
+ Social Networks 30(2):136-145, 2008.
118
+ https://doi.org/10.1016/j.socnet.2007.11.001
119
+ .. [3] Ulrik Brandes and Christian Pich:
120
+ Centrality Estimation in Large Networks.
121
+ International Journal of Bifurcation and Chaos 17(7):2303-2318, 2007.
122
+ https://dx.doi.org/10.1142/S0218127407018403
123
+ .. [4] Linton C. Freeman:
124
+ A set of measures of centrality based on betweenness.
125
+ Sociometry 40: 35–41, 1977
126
+ https://doi.org/10.2307/3033543
127
+ """
128
+ betweenness = dict.fromkeys(G, 0.0) # b[v]=0 for v in G
129
+ if k is None:
130
+ nodes = G
131
+ else:
132
+ nodes = seed.sample(list(G.nodes()), k)
133
+ for s in nodes:
134
+ # single source shortest paths
135
+ if weight is None: # use BFS
136
+ S, P, sigma, _ = _single_source_shortest_path_basic(G, s)
137
+ else: # use Dijkstra's algorithm
138
+ S, P, sigma, _ = _single_source_dijkstra_path_basic(G, s, weight)
139
+ # accumulation
140
+ if endpoints:
141
+ betweenness, _ = _accumulate_endpoints(betweenness, S, P, sigma, s)
142
+ else:
143
+ betweenness, _ = _accumulate_basic(betweenness, S, P, sigma, s)
144
+ # rescaling
145
+ betweenness = _rescale(
146
+ betweenness,
147
+ len(G),
148
+ normalized=normalized,
149
+ directed=G.is_directed(),
150
+ k=k,
151
+ endpoints=endpoints,
152
+ )
153
+ return betweenness
154
+
155
+
156
+ @py_random_state(4)
157
+ @nx._dispatchable(edge_attrs="weight")
158
+ def edge_betweenness_centrality(G, k=None, normalized=True, weight=None, seed=None):
159
+ r"""Compute betweenness centrality for edges.
160
+
161
+ Betweenness centrality of an edge $e$ is the sum of the
162
+ fraction of all-pairs shortest paths that pass through $e$
163
+
164
+ .. math::
165
+
166
+ c_B(e) =\sum_{s,t \in V} \frac{\sigma(s, t|e)}{\sigma(s, t)}
167
+
168
+ where $V$ is the set of nodes, $\sigma(s, t)$ is the number of
169
+ shortest $(s, t)$-paths, and $\sigma(s, t|e)$ is the number of
170
+ those paths passing through edge $e$ [2]_.
171
+
172
+ Parameters
173
+ ----------
174
+ G : graph
175
+ A NetworkX graph.
176
+
177
+ k : int, optional (default=None)
178
+ If k is not None use k node samples to estimate betweenness.
179
+ The value of k <= n where n is the number of nodes in the graph.
180
+ Higher values give better approximation.
181
+
182
+ normalized : bool, optional
183
+ If True the betweenness values are normalized by $2/(n(n-1))$
184
+ for graphs, and $1/(n(n-1))$ for directed graphs where $n$
185
+ is the number of nodes in G.
186
+
187
+ weight : None or string, optional (default=None)
188
+ If None, all edge weights are considered equal.
189
+ Otherwise holds the name of the edge attribute used as weight.
190
+ Weights are used to calculate weighted shortest paths, so they are
191
+ interpreted as distances.
192
+
193
+ seed : integer, random_state, or None (default)
194
+ Indicator of random number generation state.
195
+ See :ref:`Randomness<randomness>`.
196
+ Note that this is only used if k is not None.
197
+
198
+ Returns
199
+ -------
200
+ edges : dictionary
201
+ Dictionary of edges with betweenness centrality as the value.
202
+
203
+ See Also
204
+ --------
205
+ betweenness_centrality
206
+ edge_load
207
+
208
+ Notes
209
+ -----
210
+ The algorithm is from Ulrik Brandes [1]_.
211
+
212
+ For weighted graphs the edge weights must be greater than zero.
213
+ Zero edge weights can produce an infinite number of equal length
214
+ paths between pairs of nodes.
215
+
216
+ References
217
+ ----------
218
+ .. [1] A Faster Algorithm for Betweenness Centrality. Ulrik Brandes,
219
+ Journal of Mathematical Sociology 25(2):163-177, 2001.
220
+ https://doi.org/10.1080/0022250X.2001.9990249
221
+ .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness
222
+ Centrality and their Generic Computation.
223
+ Social Networks 30(2):136-145, 2008.
224
+ https://doi.org/10.1016/j.socnet.2007.11.001
225
+ """
226
+ betweenness = dict.fromkeys(G, 0.0) # b[v]=0 for v in G
227
+ # b[e]=0 for e in G.edges()
228
+ betweenness.update(dict.fromkeys(G.edges(), 0.0))
229
+ if k is None:
230
+ nodes = G
231
+ else:
232
+ nodes = seed.sample(list(G.nodes()), k)
233
+ for s in nodes:
234
+ # single source shortest paths
235
+ if weight is None: # use BFS
236
+ S, P, sigma, _ = _single_source_shortest_path_basic(G, s)
237
+ else: # use Dijkstra's algorithm
238
+ S, P, sigma, _ = _single_source_dijkstra_path_basic(G, s, weight)
239
+ # accumulation
240
+ betweenness = _accumulate_edges(betweenness, S, P, sigma, s)
241
+ # rescaling
242
+ for n in G: # remove nodes to only return edges
243
+ del betweenness[n]
244
+ betweenness = _rescale_e(
245
+ betweenness, len(G), normalized=normalized, directed=G.is_directed()
246
+ )
247
+ if G.is_multigraph():
248
+ betweenness = _add_edge_keys(G, betweenness, weight=weight)
249
+ return betweenness
250
+
251
+
252
+ # helpers for betweenness centrality
253
+
254
+
255
+ def _single_source_shortest_path_basic(G, s):
256
+ S = []
257
+ P = {}
258
+ for v in G:
259
+ P[v] = []
260
+ sigma = dict.fromkeys(G, 0.0) # sigma[v]=0 for v in G
261
+ D = {}
262
+ sigma[s] = 1.0
263
+ D[s] = 0
264
+ Q = deque([s])
265
+ while Q: # use BFS to find shortest paths
266
+ v = Q.popleft()
267
+ S.append(v)
268
+ Dv = D[v]
269
+ sigmav = sigma[v]
270
+ for w in G[v]:
271
+ if w not in D:
272
+ Q.append(w)
273
+ D[w] = Dv + 1
274
+ if D[w] == Dv + 1: # this is a shortest path, count paths
275
+ sigma[w] += sigmav
276
+ P[w].append(v) # predecessors
277
+ return S, P, sigma, D
278
+
279
+
280
+ def _single_source_dijkstra_path_basic(G, s, weight):
281
+ weight = _weight_function(G, weight)
282
+ # modified from Eppstein
283
+ S = []
284
+ P = {}
285
+ for v in G:
286
+ P[v] = []
287
+ sigma = dict.fromkeys(G, 0.0) # sigma[v]=0 for v in G
288
+ D = {}
289
+ sigma[s] = 1.0
290
+ push = heappush
291
+ pop = heappop
292
+ seen = {s: 0}
293
+ c = count()
294
+ Q = [] # use Q as heap with (distance,node id) tuples
295
+ push(Q, (0, next(c), s, s))
296
+ while Q:
297
+ (dist, _, pred, v) = pop(Q)
298
+ if v in D:
299
+ continue # already searched this node.
300
+ sigma[v] += sigma[pred] # count paths
301
+ S.append(v)
302
+ D[v] = dist
303
+ for w, edgedata in G[v].items():
304
+ vw_dist = dist + weight(v, w, edgedata)
305
+ if w not in D and (w not in seen or vw_dist < seen[w]):
306
+ seen[w] = vw_dist
307
+ push(Q, (vw_dist, next(c), v, w))
308
+ sigma[w] = 0.0
309
+ P[w] = [v]
310
+ elif vw_dist == seen[w]: # handle equal paths
311
+ sigma[w] += sigma[v]
312
+ P[w].append(v)
313
+ return S, P, sigma, D
314
+
315
+
316
+ def _accumulate_basic(betweenness, S, P, sigma, s):
317
+ delta = dict.fromkeys(S, 0)
318
+ while S:
319
+ w = S.pop()
320
+ coeff = (1 + delta[w]) / sigma[w]
321
+ for v in P[w]:
322
+ delta[v] += sigma[v] * coeff
323
+ if w != s:
324
+ betweenness[w] += delta[w]
325
+ return betweenness, delta
326
+
327
+
328
+ def _accumulate_endpoints(betweenness, S, P, sigma, s):
329
+ betweenness[s] += len(S) - 1
330
+ delta = dict.fromkeys(S, 0)
331
+ while S:
332
+ w = S.pop()
333
+ coeff = (1 + delta[w]) / sigma[w]
334
+ for v in P[w]:
335
+ delta[v] += sigma[v] * coeff
336
+ if w != s:
337
+ betweenness[w] += delta[w] + 1
338
+ return betweenness, delta
339
+
340
+
341
+ def _accumulate_edges(betweenness, S, P, sigma, s):
342
+ delta = dict.fromkeys(S, 0)
343
+ while S:
344
+ w = S.pop()
345
+ coeff = (1 + delta[w]) / sigma[w]
346
+ for v in P[w]:
347
+ c = sigma[v] * coeff
348
+ if (v, w) not in betweenness:
349
+ betweenness[(w, v)] += c
350
+ else:
351
+ betweenness[(v, w)] += c
352
+ delta[v] += c
353
+ if w != s:
354
+ betweenness[w] += delta[w]
355
+ return betweenness
356
+
357
+
358
+ def _rescale(betweenness, n, normalized, directed=False, k=None, endpoints=False):
359
+ if normalized:
360
+ if endpoints:
361
+ if n < 2:
362
+ scale = None # no normalization
363
+ else:
364
+ # Scale factor should include endpoint nodes
365
+ scale = 1 / (n * (n - 1))
366
+ elif n <= 2:
367
+ scale = None # no normalization b=0 for all nodes
368
+ else:
369
+ scale = 1 / ((n - 1) * (n - 2))
370
+ else: # rescale by 2 for undirected graphs
371
+ if not directed:
372
+ scale = 0.5
373
+ else:
374
+ scale = None
375
+ if scale is not None:
376
+ if k is not None:
377
+ scale = scale * n / k
378
+ for v in betweenness:
379
+ betweenness[v] *= scale
380
+ return betweenness
381
+
382
+
383
+ def _rescale_e(betweenness, n, normalized, directed=False, k=None):
384
+ if normalized:
385
+ if n <= 1:
386
+ scale = None # no normalization b=0 for all nodes
387
+ else:
388
+ scale = 1 / (n * (n - 1))
389
+ else: # rescale by 2 for undirected graphs
390
+ if not directed:
391
+ scale = 0.5
392
+ else:
393
+ scale = None
394
+ if scale is not None:
395
+ if k is not None:
396
+ scale = scale * n / k
397
+ for v in betweenness:
398
+ betweenness[v] *= scale
399
+ return betweenness
400
+
401
+
402
+ @not_implemented_for("graph")
403
+ def _add_edge_keys(G, betweenness, weight=None):
404
+ r"""Adds the corrected betweenness centrality (BC) values for multigraphs.
405
+
406
+ Parameters
407
+ ----------
408
+ G : NetworkX graph.
409
+
410
+ betweenness : dictionary
411
+ Dictionary mapping adjacent node tuples to betweenness centrality values.
412
+
413
+ weight : string or function
414
+ See `_weight_function` for details. Defaults to `None`.
415
+
416
+ Returns
417
+ -------
418
+ edges : dictionary
419
+ The parameter `betweenness` including edges with keys and their
420
+ betweenness centrality values.
421
+
422
+ The BC value is divided among edges of equal weight.
423
+ """
424
+ _weight = _weight_function(G, weight)
425
+
426
+ edge_bc = dict.fromkeys(G.edges, 0.0)
427
+ for u, v in betweenness:
428
+ d = G[u][v]
429
+ wt = _weight(u, v, d)
430
+ keys = [k for k in d if _weight(u, v, {k: d[k]}) == wt]
431
+ bc = betweenness[(u, v)] / len(keys)
432
+ for k in keys:
433
+ edge_bc[(u, v, k)] = bc
434
+
435
+ return edge_bc
parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/betweenness_subset.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Betweenness centrality measures for subsets of nodes."""
2
+ import networkx as nx
3
+ from networkx.algorithms.centrality.betweenness import (
4
+ _add_edge_keys,
5
+ )
6
+ from networkx.algorithms.centrality.betweenness import (
7
+ _single_source_dijkstra_path_basic as dijkstra,
8
+ )
9
+ from networkx.algorithms.centrality.betweenness import (
10
+ _single_source_shortest_path_basic as shortest_path,
11
+ )
12
+
13
+ __all__ = [
14
+ "betweenness_centrality_subset",
15
+ "edge_betweenness_centrality_subset",
16
+ ]
17
+
18
+
19
+ @nx._dispatchable(edge_attrs="weight")
20
+ def betweenness_centrality_subset(G, sources, targets, normalized=False, weight=None):
21
+ r"""Compute betweenness centrality for a subset of nodes.
22
+
23
+ .. math::
24
+
25
+ c_B(v) =\sum_{s\in S, t \in T} \frac{\sigma(s, t|v)}{\sigma(s, t)}
26
+
27
+ where $S$ is the set of sources, $T$ is the set of targets,
28
+ $\sigma(s, t)$ is the number of shortest $(s, t)$-paths,
29
+ and $\sigma(s, t|v)$ is the number of those paths
30
+ passing through some node $v$ other than $s, t$.
31
+ If $s = t$, $\sigma(s, t) = 1$,
32
+ and if $v \in {s, t}$, $\sigma(s, t|v) = 0$ [2]_.
33
+
34
+
35
+ Parameters
36
+ ----------
37
+ G : graph
38
+ A NetworkX graph.
39
+
40
+ sources: list of nodes
41
+ Nodes to use as sources for shortest paths in betweenness
42
+
43
+ targets: list of nodes
44
+ Nodes to use as targets for shortest paths in betweenness
45
+
46
+ normalized : bool, optional
47
+ If True the betweenness values are normalized by $2/((n-1)(n-2))$
48
+ for graphs, and $1/((n-1)(n-2))$ for directed graphs where $n$
49
+ is the number of nodes in G.
50
+
51
+ weight : None or string, optional (default=None)
52
+ If None, all edge weights are considered equal.
53
+ Otherwise holds the name of the edge attribute used as weight.
54
+ Weights are used to calculate weighted shortest paths, so they are
55
+ interpreted as distances.
56
+
57
+ Returns
58
+ -------
59
+ nodes : dictionary
60
+ Dictionary of nodes with betweenness centrality as the value.
61
+
62
+ See Also
63
+ --------
64
+ edge_betweenness_centrality
65
+ load_centrality
66
+
67
+ Notes
68
+ -----
69
+ The basic algorithm is from [1]_.
70
+
71
+ For weighted graphs the edge weights must be greater than zero.
72
+ Zero edge weights can produce an infinite number of equal length
73
+ paths between pairs of nodes.
74
+
75
+ The normalization might seem a little strange but it is
76
+ designed to make betweenness_centrality(G) be the same as
77
+ betweenness_centrality_subset(G,sources=G.nodes(),targets=G.nodes()).
78
+
79
+ The total number of paths between source and target is counted
80
+ differently for directed and undirected graphs. Directed paths
81
+ are easy to count. Undirected paths are tricky: should a path
82
+ from "u" to "v" count as 1 undirected path or as 2 directed paths?
83
+
84
+ For betweenness_centrality we report the number of undirected
85
+ paths when G is undirected.
86
+
87
+ For betweenness_centrality_subset the reporting is different.
88
+ If the source and target subsets are the same, then we want
89
+ to count undirected paths. But if the source and target subsets
90
+ differ -- for example, if sources is {0} and targets is {1},
91
+ then we are only counting the paths in one direction. They are
92
+ undirected paths but we are counting them in a directed way.
93
+ To count them as undirected paths, each should count as half a path.
94
+
95
+ References
96
+ ----------
97
+ .. [1] Ulrik Brandes, A Faster Algorithm for Betweenness Centrality.
98
+ Journal of Mathematical Sociology 25(2):163-177, 2001.
99
+ https://doi.org/10.1080/0022250X.2001.9990249
100
+ .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness
101
+ Centrality and their Generic Computation.
102
+ Social Networks 30(2):136-145, 2008.
103
+ https://doi.org/10.1016/j.socnet.2007.11.001
104
+ """
105
+ b = dict.fromkeys(G, 0.0) # b[v]=0 for v in G
106
+ for s in sources:
107
+ # single source shortest paths
108
+ if weight is None: # use BFS
109
+ S, P, sigma, _ = shortest_path(G, s)
110
+ else: # use Dijkstra's algorithm
111
+ S, P, sigma, _ = dijkstra(G, s, weight)
112
+ b = _accumulate_subset(b, S, P, sigma, s, targets)
113
+ b = _rescale(b, len(G), normalized=normalized, directed=G.is_directed())
114
+ return b
115
+
116
+
117
+ @nx._dispatchable(edge_attrs="weight")
118
+ def edge_betweenness_centrality_subset(
119
+ G, sources, targets, normalized=False, weight=None
120
+ ):
121
+ r"""Compute betweenness centrality for edges for a subset of nodes.
122
+
123
+ .. math::
124
+
125
+ c_B(v) =\sum_{s\in S,t \in T} \frac{\sigma(s, t|e)}{\sigma(s, t)}
126
+
127
+ where $S$ is the set of sources, $T$ is the set of targets,
128
+ $\sigma(s, t)$ is the number of shortest $(s, t)$-paths,
129
+ and $\sigma(s, t|e)$ is the number of those paths
130
+ passing through edge $e$ [2]_.
131
+
132
+ Parameters
133
+ ----------
134
+ G : graph
135
+ A networkx graph.
136
+
137
+ sources: list of nodes
138
+ Nodes to use as sources for shortest paths in betweenness
139
+
140
+ targets: list of nodes
141
+ Nodes to use as targets for shortest paths in betweenness
142
+
143
+ normalized : bool, optional
144
+ If True the betweenness values are normalized by `2/(n(n-1))`
145
+ for graphs, and `1/(n(n-1))` for directed graphs where `n`
146
+ is the number of nodes in G.
147
+
148
+ weight : None or string, optional (default=None)
149
+ If None, all edge weights are considered equal.
150
+ Otherwise holds the name of the edge attribute used as weight.
151
+ Weights are used to calculate weighted shortest paths, so they are
152
+ interpreted as distances.
153
+
154
+ Returns
155
+ -------
156
+ edges : dictionary
157
+ Dictionary of edges with Betweenness centrality as the value.
158
+
159
+ See Also
160
+ --------
161
+ betweenness_centrality
162
+ edge_load
163
+
164
+ Notes
165
+ -----
166
+ The basic algorithm is from [1]_.
167
+
168
+ For weighted graphs the edge weights must be greater than zero.
169
+ Zero edge weights can produce an infinite number of equal length
170
+ paths between pairs of nodes.
171
+
172
+ The normalization might seem a little strange but it is the same
173
+ as in edge_betweenness_centrality() and is designed to make
174
+ edge_betweenness_centrality(G) be the same as
175
+ edge_betweenness_centrality_subset(G,sources=G.nodes(),targets=G.nodes()).
176
+
177
+ References
178
+ ----------
179
+ .. [1] Ulrik Brandes, A Faster Algorithm for Betweenness Centrality.
180
+ Journal of Mathematical Sociology 25(2):163-177, 2001.
181
+ https://doi.org/10.1080/0022250X.2001.9990249
182
+ .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness
183
+ Centrality and their Generic Computation.
184
+ Social Networks 30(2):136-145, 2008.
185
+ https://doi.org/10.1016/j.socnet.2007.11.001
186
+ """
187
+ b = dict.fromkeys(G, 0.0) # b[v]=0 for v in G
188
+ b.update(dict.fromkeys(G.edges(), 0.0)) # b[e] for e in G.edges()
189
+ for s in sources:
190
+ # single source shortest paths
191
+ if weight is None: # use BFS
192
+ S, P, sigma, _ = shortest_path(G, s)
193
+ else: # use Dijkstra's algorithm
194
+ S, P, sigma, _ = dijkstra(G, s, weight)
195
+ b = _accumulate_edges_subset(b, S, P, sigma, s, targets)
196
+ for n in G: # remove nodes to only return edges
197
+ del b[n]
198
+ b = _rescale_e(b, len(G), normalized=normalized, directed=G.is_directed())
199
+ if G.is_multigraph():
200
+ b = _add_edge_keys(G, b, weight=weight)
201
+ return b
202
+
203
+
204
+ def _accumulate_subset(betweenness, S, P, sigma, s, targets):
205
+ delta = dict.fromkeys(S, 0.0)
206
+ target_set = set(targets) - {s}
207
+ while S:
208
+ w = S.pop()
209
+ if w in target_set:
210
+ coeff = (delta[w] + 1.0) / sigma[w]
211
+ else:
212
+ coeff = delta[w] / sigma[w]
213
+ for v in P[w]:
214
+ delta[v] += sigma[v] * coeff
215
+ if w != s:
216
+ betweenness[w] += delta[w]
217
+ return betweenness
218
+
219
+
220
+ def _accumulate_edges_subset(betweenness, S, P, sigma, s, targets):
221
+ """edge_betweenness_centrality_subset helper."""
222
+ delta = dict.fromkeys(S, 0)
223
+ target_set = set(targets)
224
+ while S:
225
+ w = S.pop()
226
+ for v in P[w]:
227
+ if w in target_set:
228
+ c = (sigma[v] / sigma[w]) * (1.0 + delta[w])
229
+ else:
230
+ c = delta[w] / len(P[w])
231
+ if (v, w) not in betweenness:
232
+ betweenness[(w, v)] += c
233
+ else:
234
+ betweenness[(v, w)] += c
235
+ delta[v] += c
236
+ if w != s:
237
+ betweenness[w] += delta[w]
238
+ return betweenness
239
+
240
+
241
+ def _rescale(betweenness, n, normalized, directed=False):
242
+ """betweenness_centrality_subset helper."""
243
+ if normalized:
244
+ if n <= 2:
245
+ scale = None # no normalization b=0 for all nodes
246
+ else:
247
+ scale = 1.0 / ((n - 1) * (n - 2))
248
+ else: # rescale by 2 for undirected graphs
249
+ if not directed:
250
+ scale = 0.5
251
+ else:
252
+ scale = None
253
+ if scale is not None:
254
+ for v in betweenness:
255
+ betweenness[v] *= scale
256
+ return betweenness
257
+
258
+
259
+ def _rescale_e(betweenness, n, normalized, directed=False):
260
+ """edge_betweenness_centrality_subset helper."""
261
+ if normalized:
262
+ if n <= 1:
263
+ scale = None # no normalization b=0 for all nodes
264
+ else:
265
+ scale = 1.0 / (n * (n - 1))
266
+ else: # rescale by 2 for undirected graphs
267
+ if not directed:
268
+ scale = 0.5
269
+ else:
270
+ scale = None
271
+ if scale is not None:
272
+ for v in betweenness:
273
+ betweenness[v] *= scale
274
+ return betweenness
parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/closeness.py ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Closeness centrality measures.
3
+ """
4
+ import functools
5
+
6
+ import networkx as nx
7
+ from networkx.exception import NetworkXError
8
+ from networkx.utils.decorators import not_implemented_for
9
+
10
+ __all__ = ["closeness_centrality", "incremental_closeness_centrality"]
11
+
12
+
13
+ @nx._dispatchable(edge_attrs="distance")
14
+ def closeness_centrality(G, u=None, distance=None, wf_improved=True):
15
+ r"""Compute closeness centrality for nodes.
16
+
17
+ Closeness centrality [1]_ of a node `u` is the reciprocal of the
18
+ average shortest path distance to `u` over all `n-1` reachable nodes.
19
+
20
+ .. math::
21
+
22
+ C(u) = \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)},
23
+
24
+ where `d(v, u)` is the shortest-path distance between `v` and `u`,
25
+ and `n-1` is the number of nodes reachable from `u`. Notice that the
26
+ closeness distance function computes the incoming distance to `u`
27
+ for directed graphs. To use outward distance, act on `G.reverse()`.
28
+
29
+ Notice that higher values of closeness indicate higher centrality.
30
+
31
+ Wasserman and Faust propose an improved formula for graphs with
32
+ more than one connected component. The result is "a ratio of the
33
+ fraction of actors in the group who are reachable, to the average
34
+ distance" from the reachable actors [2]_. You might think this
35
+ scale factor is inverted but it is not. As is, nodes from small
36
+ components receive a smaller closeness value. Letting `N` denote
37
+ the number of nodes in the graph,
38
+
39
+ .. math::
40
+
41
+ C_{WF}(u) = \frac{n-1}{N-1} \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)},
42
+
43
+ Parameters
44
+ ----------
45
+ G : graph
46
+ A NetworkX graph
47
+
48
+ u : node, optional
49
+ Return only the value for node u
50
+
51
+ distance : edge attribute key, optional (default=None)
52
+ Use the specified edge attribute as the edge distance in shortest
53
+ path calculations. If `None` (the default) all edges have a distance of 1.
54
+ Absent edge attributes are assigned a distance of 1. Note that no check
55
+ is performed to ensure that edges have the provided attribute.
56
+
57
+ wf_improved : bool, optional (default=True)
58
+ If True, scale by the fraction of nodes reachable. This gives the
59
+ Wasserman and Faust improved formula. For single component graphs
60
+ it is the same as the original formula.
61
+
62
+ Returns
63
+ -------
64
+ nodes : dictionary
65
+ Dictionary of nodes with closeness centrality as the value.
66
+
67
+ Examples
68
+ --------
69
+ >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)])
70
+ >>> nx.closeness_centrality(G)
71
+ {0: 1.0, 1: 1.0, 2: 0.75, 3: 0.75}
72
+
73
+ See Also
74
+ --------
75
+ betweenness_centrality, load_centrality, eigenvector_centrality,
76
+ degree_centrality, incremental_closeness_centrality
77
+
78
+ Notes
79
+ -----
80
+ The closeness centrality is normalized to `(n-1)/(|G|-1)` where
81
+ `n` is the number of nodes in the connected part of graph
82
+ containing the node. If the graph is not completely connected,
83
+ this algorithm computes the closeness centrality for each
84
+ connected part separately scaled by that parts size.
85
+
86
+ If the 'distance' keyword is set to an edge attribute key then the
87
+ shortest-path length will be computed using Dijkstra's algorithm with
88
+ that edge attribute as the edge weight.
89
+
90
+ The closeness centrality uses *inward* distance to a node, not outward.
91
+ If you want to use outword distances apply the function to `G.reverse()`
92
+
93
+ In NetworkX 2.2 and earlier a bug caused Dijkstra's algorithm to use the
94
+ outward distance rather than the inward distance. If you use a 'distance'
95
+ keyword and a DiGraph, your results will change between v2.2 and v2.3.
96
+
97
+ References
98
+ ----------
99
+ .. [1] Linton C. Freeman: Centrality in networks: I.
100
+ Conceptual clarification. Social Networks 1:215-239, 1979.
101
+ https://doi.org/10.1016/0378-8733(78)90021-7
102
+ .. [2] pg. 201 of Wasserman, S. and Faust, K.,
103
+ Social Network Analysis: Methods and Applications, 1994,
104
+ Cambridge University Press.
105
+ """
106
+ if G.is_directed():
107
+ G = G.reverse() # create a reversed graph view
108
+
109
+ if distance is not None:
110
+ # use Dijkstra's algorithm with specified attribute as edge weight
111
+ path_length = functools.partial(
112
+ nx.single_source_dijkstra_path_length, weight=distance
113
+ )
114
+ else:
115
+ path_length = nx.single_source_shortest_path_length
116
+
117
+ if u is None:
118
+ nodes = G.nodes
119
+ else:
120
+ nodes = [u]
121
+ closeness_dict = {}
122
+ for n in nodes:
123
+ sp = path_length(G, n)
124
+ totsp = sum(sp.values())
125
+ len_G = len(G)
126
+ _closeness_centrality = 0.0
127
+ if totsp > 0.0 and len_G > 1:
128
+ _closeness_centrality = (len(sp) - 1.0) / totsp
129
+ # normalize to number of nodes-1 in connected part
130
+ if wf_improved:
131
+ s = (len(sp) - 1.0) / (len_G - 1)
132
+ _closeness_centrality *= s
133
+ closeness_dict[n] = _closeness_centrality
134
+ if u is not None:
135
+ return closeness_dict[u]
136
+ return closeness_dict
137
+
138
+
139
+ @not_implemented_for("directed")
140
+ @nx._dispatchable(mutates_input=True)
141
+ def incremental_closeness_centrality(
142
+ G, edge, prev_cc=None, insertion=True, wf_improved=True
143
+ ):
144
+ r"""Incremental closeness centrality for nodes.
145
+
146
+ Compute closeness centrality for nodes using level-based work filtering
147
+ as described in Incremental Algorithms for Closeness Centrality by Sariyuce et al.
148
+
149
+ Level-based work filtering detects unnecessary updates to the closeness
150
+ centrality and filters them out.
151
+
152
+ ---
153
+ From "Incremental Algorithms for Closeness Centrality":
154
+
155
+ Theorem 1: Let :math:`G = (V, E)` be a graph and u and v be two vertices in V
156
+ such that there is no edge (u, v) in E. Let :math:`G' = (V, E \cup uv)`
157
+ Then :math:`cc[s] = cc'[s]` if and only if :math:`\left|dG(s, u) - dG(s, v)\right| \leq 1`.
158
+
159
+ Where :math:`dG(u, v)` denotes the length of the shortest path between
160
+ two vertices u, v in a graph G, cc[s] is the closeness centrality for a
161
+ vertex s in V, and cc'[s] is the closeness centrality for a
162
+ vertex s in V, with the (u, v) edge added.
163
+ ---
164
+
165
+ We use Theorem 1 to filter out updates when adding or removing an edge.
166
+ When adding an edge (u, v), we compute the shortest path lengths from all
167
+ other nodes to u and to v before the node is added. When removing an edge,
168
+ we compute the shortest path lengths after the edge is removed. Then we
169
+ apply Theorem 1 to use previously computed closeness centrality for nodes
170
+ where :math:`\left|dG(s, u) - dG(s, v)\right| \leq 1`. This works only for
171
+ undirected, unweighted graphs; the distance argument is not supported.
172
+
173
+ Closeness centrality [1]_ of a node `u` is the reciprocal of the
174
+ sum of the shortest path distances from `u` to all `n-1` other nodes.
175
+ Since the sum of distances depends on the number of nodes in the
176
+ graph, closeness is normalized by the sum of minimum possible
177
+ distances `n-1`.
178
+
179
+ .. math::
180
+
181
+ C(u) = \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)},
182
+
183
+ where `d(v, u)` is the shortest-path distance between `v` and `u`,
184
+ and `n` is the number of nodes in the graph.
185
+
186
+ Notice that higher values of closeness indicate higher centrality.
187
+
188
+ Parameters
189
+ ----------
190
+ G : graph
191
+ A NetworkX graph
192
+
193
+ edge : tuple
194
+ The modified edge (u, v) in the graph.
195
+
196
+ prev_cc : dictionary
197
+ The previous closeness centrality for all nodes in the graph.
198
+
199
+ insertion : bool, optional
200
+ If True (default) the edge was inserted, otherwise it was deleted from the graph.
201
+
202
+ wf_improved : bool, optional (default=True)
203
+ If True, scale by the fraction of nodes reachable. This gives the
204
+ Wasserman and Faust improved formula. For single component graphs
205
+ it is the same as the original formula.
206
+
207
+ Returns
208
+ -------
209
+ nodes : dictionary
210
+ Dictionary of nodes with closeness centrality as the value.
211
+
212
+ See Also
213
+ --------
214
+ betweenness_centrality, load_centrality, eigenvector_centrality,
215
+ degree_centrality, closeness_centrality
216
+
217
+ Notes
218
+ -----
219
+ The closeness centrality is normalized to `(n-1)/(|G|-1)` where
220
+ `n` is the number of nodes in the connected part of graph
221
+ containing the node. If the graph is not completely connected,
222
+ this algorithm computes the closeness centrality for each
223
+ connected part separately.
224
+
225
+ References
226
+ ----------
227
+ .. [1] Freeman, L.C., 1979. Centrality in networks: I.
228
+ Conceptual clarification. Social Networks 1, 215--239.
229
+ https://doi.org/10.1016/0378-8733(78)90021-7
230
+ .. [2] Sariyuce, A.E. ; Kaya, K. ; Saule, E. ; Catalyiirek, U.V. Incremental
231
+ Algorithms for Closeness Centrality. 2013 IEEE International Conference on Big Data
232
+ http://sariyuce.com/papers/bigdata13.pdf
233
+ """
234
+ if prev_cc is not None and set(prev_cc.keys()) != set(G.nodes()):
235
+ raise NetworkXError("prev_cc and G do not have the same nodes")
236
+
237
+ # Unpack edge
238
+ (u, v) = edge
239
+ path_length = nx.single_source_shortest_path_length
240
+
241
+ if insertion:
242
+ # For edge insertion, we want shortest paths before the edge is inserted
243
+ du = path_length(G, u)
244
+ dv = path_length(G, v)
245
+
246
+ G.add_edge(u, v)
247
+ else:
248
+ G.remove_edge(u, v)
249
+
250
+ # For edge removal, we want shortest paths after the edge is removed
251
+ du = path_length(G, u)
252
+ dv = path_length(G, v)
253
+
254
+ if prev_cc is None:
255
+ return nx.closeness_centrality(G)
256
+
257
+ nodes = G.nodes()
258
+ closeness_dict = {}
259
+ for n in nodes:
260
+ if n in du and n in dv and abs(du[n] - dv[n]) <= 1:
261
+ closeness_dict[n] = prev_cc[n]
262
+ else:
263
+ sp = path_length(G, n)
264
+ totsp = sum(sp.values())
265
+ len_G = len(G)
266
+ _closeness_centrality = 0.0
267
+ if totsp > 0.0 and len_G > 1:
268
+ _closeness_centrality = (len(sp) - 1.0) / totsp
269
+ # normalize to number of nodes-1 in connected part
270
+ if wf_improved:
271
+ s = (len(sp) - 1.0) / (len_G - 1)
272
+ _closeness_centrality *= s
273
+ closeness_dict[n] = _closeness_centrality
274
+
275
+ # Leave the graph as we found it
276
+ if insertion:
277
+ G.remove_edge(u, v)
278
+ else:
279
+ G.add_edge(u, v)
280
+
281
+ return closeness_dict
parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/current_flow_betweenness.py ADDED
@@ -0,0 +1,341 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Current-flow betweenness centrality measures."""
2
+ import networkx as nx
3
+ from networkx.algorithms.centrality.flow_matrix import (
4
+ CGInverseLaplacian,
5
+ FullInverseLaplacian,
6
+ SuperLUInverseLaplacian,
7
+ flow_matrix_row,
8
+ )
9
+ from networkx.utils import (
10
+ not_implemented_for,
11
+ py_random_state,
12
+ reverse_cuthill_mckee_ordering,
13
+ )
14
+
15
+ __all__ = [
16
+ "current_flow_betweenness_centrality",
17
+ "approximate_current_flow_betweenness_centrality",
18
+ "edge_current_flow_betweenness_centrality",
19
+ ]
20
+
21
+
22
+ @not_implemented_for("directed")
23
+ @py_random_state(7)
24
+ @nx._dispatchable(edge_attrs="weight")
25
+ def approximate_current_flow_betweenness_centrality(
26
+ G,
27
+ normalized=True,
28
+ weight=None,
29
+ dtype=float,
30
+ solver="full",
31
+ epsilon=0.5,
32
+ kmax=10000,
33
+ seed=None,
34
+ ):
35
+ r"""Compute the approximate current-flow betweenness centrality for nodes.
36
+
37
+ Approximates the current-flow betweenness centrality within absolute
38
+ error of epsilon with high probability [1]_.
39
+
40
+
41
+ Parameters
42
+ ----------
43
+ G : graph
44
+ A NetworkX graph
45
+
46
+ normalized : bool, optional (default=True)
47
+ If True the betweenness values are normalized by 2/[(n-1)(n-2)] where
48
+ n is the number of nodes in G.
49
+
50
+ weight : string or None, optional (default=None)
51
+ Key for edge data used as the edge weight.
52
+ If None, then use 1 as each edge weight.
53
+ The weight reflects the capacity or the strength of the
54
+ edge.
55
+
56
+ dtype : data type (float)
57
+ Default data type for internal matrices.
58
+ Set to np.float32 for lower memory consumption.
59
+
60
+ solver : string (default='full')
61
+ Type of linear solver to use for computing the flow matrix.
62
+ Options are "full" (uses most memory), "lu" (recommended), and
63
+ "cg" (uses least memory).
64
+
65
+ epsilon: float
66
+ Absolute error tolerance.
67
+
68
+ kmax: int
69
+ Maximum number of sample node pairs to use for approximation.
70
+
71
+ seed : integer, random_state, or None (default)
72
+ Indicator of random number generation state.
73
+ See :ref:`Randomness<randomness>`.
74
+
75
+ Returns
76
+ -------
77
+ nodes : dictionary
78
+ Dictionary of nodes with betweenness centrality as the value.
79
+
80
+ See Also
81
+ --------
82
+ current_flow_betweenness_centrality
83
+
84
+ Notes
85
+ -----
86
+ The running time is $O((1/\epsilon^2)m{\sqrt k} \log n)$
87
+ and the space required is $O(m)$ for $n$ nodes and $m$ edges.
88
+
89
+ If the edges have a 'weight' attribute they will be used as
90
+ weights in this algorithm. Unspecified weights are set to 1.
91
+
92
+ References
93
+ ----------
94
+ .. [1] Ulrik Brandes and Daniel Fleischer:
95
+ Centrality Measures Based on Current Flow.
96
+ Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
97
+ LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
98
+ https://doi.org/10.1007/978-3-540-31856-9_44
99
+ """
100
+ import numpy as np
101
+
102
+ if not nx.is_connected(G):
103
+ raise nx.NetworkXError("Graph not connected.")
104
+ solvername = {
105
+ "full": FullInverseLaplacian,
106
+ "lu": SuperLUInverseLaplacian,
107
+ "cg": CGInverseLaplacian,
108
+ }
109
+ n = G.number_of_nodes()
110
+ ordering = list(reverse_cuthill_mckee_ordering(G))
111
+ # make a copy with integer labels according to rcm ordering
112
+ # this could be done without a copy if we really wanted to
113
+ H = nx.relabel_nodes(G, dict(zip(ordering, range(n))))
114
+ L = nx.laplacian_matrix(H, nodelist=range(n), weight=weight).asformat("csc")
115
+ L = L.astype(dtype)
116
+ C = solvername[solver](L, dtype=dtype) # initialize solver
117
+ betweenness = dict.fromkeys(H, 0.0)
118
+ nb = (n - 1.0) * (n - 2.0) # normalization factor
119
+ cstar = n * (n - 1) / nb
120
+ l = 1 # parameter in approximation, adjustable
121
+ k = l * int(np.ceil((cstar / epsilon) ** 2 * np.log(n)))
122
+ if k > kmax:
123
+ msg = f"Number random pairs k>kmax ({k}>{kmax}) "
124
+ raise nx.NetworkXError(msg, "Increase kmax or epsilon")
125
+ cstar2k = cstar / (2 * k)
126
+ for _ in range(k):
127
+ s, t = pair = seed.sample(range(n), 2)
128
+ b = np.zeros(n, dtype=dtype)
129
+ b[s] = 1
130
+ b[t] = -1
131
+ p = C.solve(b)
132
+ for v in H:
133
+ if v in pair:
134
+ continue
135
+ for nbr in H[v]:
136
+ w = H[v][nbr].get(weight, 1.0)
137
+ betweenness[v] += float(w * np.abs(p[v] - p[nbr]) * cstar2k)
138
+ if normalized:
139
+ factor = 1.0
140
+ else:
141
+ factor = nb / 2.0
142
+ # remap to original node names and "unnormalize" if required
143
+ return {ordering[k]: v * factor for k, v in betweenness.items()}
144
+
145
+
146
+ @not_implemented_for("directed")
147
+ @nx._dispatchable(edge_attrs="weight")
148
+ def current_flow_betweenness_centrality(
149
+ G, normalized=True, weight=None, dtype=float, solver="full"
150
+ ):
151
+ r"""Compute current-flow betweenness centrality for nodes.
152
+
153
+ Current-flow betweenness centrality uses an electrical current
154
+ model for information spreading in contrast to betweenness
155
+ centrality which uses shortest paths.
156
+
157
+ Current-flow betweenness centrality is also known as
158
+ random-walk betweenness centrality [2]_.
159
+
160
+ Parameters
161
+ ----------
162
+ G : graph
163
+ A NetworkX graph
164
+
165
+ normalized : bool, optional (default=True)
166
+ If True the betweenness values are normalized by 2/[(n-1)(n-2)] where
167
+ n is the number of nodes in G.
168
+
169
+ weight : string or None, optional (default=None)
170
+ Key for edge data used as the edge weight.
171
+ If None, then use 1 as each edge weight.
172
+ The weight reflects the capacity or the strength of the
173
+ edge.
174
+
175
+ dtype : data type (float)
176
+ Default data type for internal matrices.
177
+ Set to np.float32 for lower memory consumption.
178
+
179
+ solver : string (default='full')
180
+ Type of linear solver to use for computing the flow matrix.
181
+ Options are "full" (uses most memory), "lu" (recommended), and
182
+ "cg" (uses least memory).
183
+
184
+ Returns
185
+ -------
186
+ nodes : dictionary
187
+ Dictionary of nodes with betweenness centrality as the value.
188
+
189
+ See Also
190
+ --------
191
+ approximate_current_flow_betweenness_centrality
192
+ betweenness_centrality
193
+ edge_betweenness_centrality
194
+ edge_current_flow_betweenness_centrality
195
+
196
+ Notes
197
+ -----
198
+ Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$
199
+ time [1]_, where $I(n-1)$ is the time needed to compute the
200
+ inverse Laplacian. For a full matrix this is $O(n^3)$ but using
201
+ sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the
202
+ Laplacian matrix condition number.
203
+
204
+ The space required is $O(nw)$ where $w$ is the width of the sparse
205
+ Laplacian matrix. Worse case is $w=n$ for $O(n^2)$.
206
+
207
+ If the edges have a 'weight' attribute they will be used as
208
+ weights in this algorithm. Unspecified weights are set to 1.
209
+
210
+ References
211
+ ----------
212
+ .. [1] Centrality Measures Based on Current Flow.
213
+ Ulrik Brandes and Daniel Fleischer,
214
+ Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
215
+ LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
216
+ https://doi.org/10.1007/978-3-540-31856-9_44
217
+
218
+ .. [2] A measure of betweenness centrality based on random walks,
219
+ M. E. J. Newman, Social Networks 27, 39-54 (2005).
220
+ """
221
+ if not nx.is_connected(G):
222
+ raise nx.NetworkXError("Graph not connected.")
223
+ N = G.number_of_nodes()
224
+ ordering = list(reverse_cuthill_mckee_ordering(G))
225
+ # make a copy with integer labels according to rcm ordering
226
+ # this could be done without a copy if we really wanted to
227
+ H = nx.relabel_nodes(G, dict(zip(ordering, range(N))))
228
+ betweenness = dict.fromkeys(H, 0.0) # b[n]=0 for n in H
229
+ for row, (s, t) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver):
230
+ pos = dict(zip(row.argsort()[::-1], range(N)))
231
+ for i in range(N):
232
+ betweenness[s] += (i - pos[i]) * row.item(i)
233
+ betweenness[t] += (N - i - 1 - pos[i]) * row.item(i)
234
+ if normalized:
235
+ nb = (N - 1.0) * (N - 2.0) # normalization factor
236
+ else:
237
+ nb = 2.0
238
+ return {ordering[n]: (b - n) * 2.0 / nb for n, b in betweenness.items()}
239
+
240
+
241
+ @not_implemented_for("directed")
242
+ @nx._dispatchable(edge_attrs="weight")
243
+ def edge_current_flow_betweenness_centrality(
244
+ G, normalized=True, weight=None, dtype=float, solver="full"
245
+ ):
246
+ r"""Compute current-flow betweenness centrality for edges.
247
+
248
+ Current-flow betweenness centrality uses an electrical current
249
+ model for information spreading in contrast to betweenness
250
+ centrality which uses shortest paths.
251
+
252
+ Current-flow betweenness centrality is also known as
253
+ random-walk betweenness centrality [2]_.
254
+
255
+ Parameters
256
+ ----------
257
+ G : graph
258
+ A NetworkX graph
259
+
260
+ normalized : bool, optional (default=True)
261
+ If True the betweenness values are normalized by 2/[(n-1)(n-2)] where
262
+ n is the number of nodes in G.
263
+
264
+ weight : string or None, optional (default=None)
265
+ Key for edge data used as the edge weight.
266
+ If None, then use 1 as each edge weight.
267
+ The weight reflects the capacity or the strength of the
268
+ edge.
269
+
270
+ dtype : data type (default=float)
271
+ Default data type for internal matrices.
272
+ Set to np.float32 for lower memory consumption.
273
+
274
+ solver : string (default='full')
275
+ Type of linear solver to use for computing the flow matrix.
276
+ Options are "full" (uses most memory), "lu" (recommended), and
277
+ "cg" (uses least memory).
278
+
279
+ Returns
280
+ -------
281
+ nodes : dictionary
282
+ Dictionary of edge tuples with betweenness centrality as the value.
283
+
284
+ Raises
285
+ ------
286
+ NetworkXError
287
+ The algorithm does not support DiGraphs.
288
+ If the input graph is an instance of DiGraph class, NetworkXError
289
+ is raised.
290
+
291
+ See Also
292
+ --------
293
+ betweenness_centrality
294
+ edge_betweenness_centrality
295
+ current_flow_betweenness_centrality
296
+
297
+ Notes
298
+ -----
299
+ Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$
300
+ time [1]_, where $I(n-1)$ is the time needed to compute the
301
+ inverse Laplacian. For a full matrix this is $O(n^3)$ but using
302
+ sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the
303
+ Laplacian matrix condition number.
304
+
305
+ The space required is $O(nw)$ where $w$ is the width of the sparse
306
+ Laplacian matrix. Worse case is $w=n$ for $O(n^2)$.
307
+
308
+ If the edges have a 'weight' attribute they will be used as
309
+ weights in this algorithm. Unspecified weights are set to 1.
310
+
311
+ References
312
+ ----------
313
+ .. [1] Centrality Measures Based on Current Flow.
314
+ Ulrik Brandes and Daniel Fleischer,
315
+ Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
316
+ LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
317
+ https://doi.org/10.1007/978-3-540-31856-9_44
318
+
319
+ .. [2] A measure of betweenness centrality based on random walks,
320
+ M. E. J. Newman, Social Networks 27, 39-54 (2005).
321
+ """
322
+ if not nx.is_connected(G):
323
+ raise nx.NetworkXError("Graph not connected.")
324
+ N = G.number_of_nodes()
325
+ ordering = list(reverse_cuthill_mckee_ordering(G))
326
+ # make a copy with integer labels according to rcm ordering
327
+ # this could be done without a copy if we really wanted to
328
+ H = nx.relabel_nodes(G, dict(zip(ordering, range(N))))
329
+ edges = (tuple(sorted((u, v))) for u, v in H.edges())
330
+ betweenness = dict.fromkeys(edges, 0.0)
331
+ if normalized:
332
+ nb = (N - 1.0) * (N - 2.0) # normalization factor
333
+ else:
334
+ nb = 2.0
335
+ for row, (e) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver):
336
+ pos = dict(zip(row.argsort()[::-1], range(1, N + 1)))
337
+ for i in range(N):
338
+ betweenness[e] += (i + 1 - pos[i]) * row.item(i)
339
+ betweenness[e] += (N - i - pos[i]) * row.item(i)
340
+ betweenness[e] /= nb
341
+ return {(ordering[s], ordering[t]): b for (s, t), b in betweenness.items()}
parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/current_flow_betweenness_subset.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Current-flow betweenness centrality measures for subsets of nodes."""
2
+ import networkx as nx
3
+ from networkx.algorithms.centrality.flow_matrix import flow_matrix_row
4
+ from networkx.utils import not_implemented_for, reverse_cuthill_mckee_ordering
5
+
6
+ __all__ = [
7
+ "current_flow_betweenness_centrality_subset",
8
+ "edge_current_flow_betweenness_centrality_subset",
9
+ ]
10
+
11
+
12
+ @not_implemented_for("directed")
13
+ @nx._dispatchable(edge_attrs="weight")
14
+ def current_flow_betweenness_centrality_subset(
15
+ G, sources, targets, normalized=True, weight=None, dtype=float, solver="lu"
16
+ ):
17
+ r"""Compute current-flow betweenness centrality for subsets of nodes.
18
+
19
+ Current-flow betweenness centrality uses an electrical current
20
+ model for information spreading in contrast to betweenness
21
+ centrality which uses shortest paths.
22
+
23
+ Current-flow betweenness centrality is also known as
24
+ random-walk betweenness centrality [2]_.
25
+
26
+ Parameters
27
+ ----------
28
+ G : graph
29
+ A NetworkX graph
30
+
31
+ sources: list of nodes
32
+ Nodes to use as sources for current
33
+
34
+ targets: list of nodes
35
+ Nodes to use as sinks for current
36
+
37
+ normalized : bool, optional (default=True)
38
+ If True the betweenness values are normalized by b=b/(n-1)(n-2) where
39
+ n is the number of nodes in G.
40
+
41
+ weight : string or None, optional (default=None)
42
+ Key for edge data used as the edge weight.
43
+ If None, then use 1 as each edge weight.
44
+ The weight reflects the capacity or the strength of the
45
+ edge.
46
+
47
+ dtype: data type (float)
48
+ Default data type for internal matrices.
49
+ Set to np.float32 for lower memory consumption.
50
+
51
+ solver: string (default='lu')
52
+ Type of linear solver to use for computing the flow matrix.
53
+ Options are "full" (uses most memory), "lu" (recommended), and
54
+ "cg" (uses least memory).
55
+
56
+ Returns
57
+ -------
58
+ nodes : dictionary
59
+ Dictionary of nodes with betweenness centrality as the value.
60
+
61
+ See Also
62
+ --------
63
+ approximate_current_flow_betweenness_centrality
64
+ betweenness_centrality
65
+ edge_betweenness_centrality
66
+ edge_current_flow_betweenness_centrality
67
+
68
+ Notes
69
+ -----
70
+ Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$
71
+ time [1]_, where $I(n-1)$ is the time needed to compute the
72
+ inverse Laplacian. For a full matrix this is $O(n^3)$ but using
73
+ sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the
74
+ Laplacian matrix condition number.
75
+
76
+ The space required is $O(nw)$ where $w$ is the width of the sparse
77
+ Laplacian matrix. Worse case is $w=n$ for $O(n^2)$.
78
+
79
+ If the edges have a 'weight' attribute they will be used as
80
+ weights in this algorithm. Unspecified weights are set to 1.
81
+
82
+ References
83
+ ----------
84
+ .. [1] Centrality Measures Based on Current Flow.
85
+ Ulrik Brandes and Daniel Fleischer,
86
+ Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
87
+ LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
88
+ https://doi.org/10.1007/978-3-540-31856-9_44
89
+
90
+ .. [2] A measure of betweenness centrality based on random walks,
91
+ M. E. J. Newman, Social Networks 27, 39-54 (2005).
92
+ """
93
+ import numpy as np
94
+
95
+ from networkx.utils import reverse_cuthill_mckee_ordering
96
+
97
+ if not nx.is_connected(G):
98
+ raise nx.NetworkXError("Graph not connected.")
99
+ N = G.number_of_nodes()
100
+ ordering = list(reverse_cuthill_mckee_ordering(G))
101
+ # make a copy with integer labels according to rcm ordering
102
+ # this could be done without a copy if we really wanted to
103
+ mapping = dict(zip(ordering, range(N)))
104
+ H = nx.relabel_nodes(G, mapping)
105
+ betweenness = dict.fromkeys(H, 0.0) # b[n]=0 for n in H
106
+ for row, (s, t) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver):
107
+ for ss in sources:
108
+ i = mapping[ss]
109
+ for tt in targets:
110
+ j = mapping[tt]
111
+ betweenness[s] += 0.5 * abs(row.item(i) - row.item(j))
112
+ betweenness[t] += 0.5 * abs(row.item(i) - row.item(j))
113
+ if normalized:
114
+ nb = (N - 1.0) * (N - 2.0) # normalization factor
115
+ else:
116
+ nb = 2.0
117
+ for node in H:
118
+ betweenness[node] = betweenness[node] / nb + 1.0 / (2 - N)
119
+ return {ordering[node]: value for node, value in betweenness.items()}
120
+
121
+
122
+ @not_implemented_for("directed")
123
+ @nx._dispatchable(edge_attrs="weight")
124
+ def edge_current_flow_betweenness_centrality_subset(
125
+ G, sources, targets, normalized=True, weight=None, dtype=float, solver="lu"
126
+ ):
127
+ r"""Compute current-flow betweenness centrality for edges using subsets
128
+ of nodes.
129
+
130
+ Current-flow betweenness centrality uses an electrical current
131
+ model for information spreading in contrast to betweenness
132
+ centrality which uses shortest paths.
133
+
134
+ Current-flow betweenness centrality is also known as
135
+ random-walk betweenness centrality [2]_.
136
+
137
+ Parameters
138
+ ----------
139
+ G : graph
140
+ A NetworkX graph
141
+
142
+ sources: list of nodes
143
+ Nodes to use as sources for current
144
+
145
+ targets: list of nodes
146
+ Nodes to use as sinks for current
147
+
148
+ normalized : bool, optional (default=True)
149
+ If True the betweenness values are normalized by b=b/(n-1)(n-2) where
150
+ n is the number of nodes in G.
151
+
152
+ weight : string or None, optional (default=None)
153
+ Key for edge data used as the edge weight.
154
+ If None, then use 1 as each edge weight.
155
+ The weight reflects the capacity or the strength of the
156
+ edge.
157
+
158
+ dtype: data type (float)
159
+ Default data type for internal matrices.
160
+ Set to np.float32 for lower memory consumption.
161
+
162
+ solver: string (default='lu')
163
+ Type of linear solver to use for computing the flow matrix.
164
+ Options are "full" (uses most memory), "lu" (recommended), and
165
+ "cg" (uses least memory).
166
+
167
+ Returns
168
+ -------
169
+ nodes : dict
170
+ Dictionary of edge tuples with betweenness centrality as the value.
171
+
172
+ See Also
173
+ --------
174
+ betweenness_centrality
175
+ edge_betweenness_centrality
176
+ current_flow_betweenness_centrality
177
+
178
+ Notes
179
+ -----
180
+ Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$
181
+ time [1]_, where $I(n-1)$ is the time needed to compute the
182
+ inverse Laplacian. For a full matrix this is $O(n^3)$ but using
183
+ sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the
184
+ Laplacian matrix condition number.
185
+
186
+ The space required is $O(nw)$ where $w$ is the width of the sparse
187
+ Laplacian matrix. Worse case is $w=n$ for $O(n^2)$.
188
+
189
+ If the edges have a 'weight' attribute they will be used as
190
+ weights in this algorithm. Unspecified weights are set to 1.
191
+
192
+ References
193
+ ----------
194
+ .. [1] Centrality Measures Based on Current Flow.
195
+ Ulrik Brandes and Daniel Fleischer,
196
+ Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
197
+ LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
198
+ https://doi.org/10.1007/978-3-540-31856-9_44
199
+
200
+ .. [2] A measure of betweenness centrality based on random walks,
201
+ M. E. J. Newman, Social Networks 27, 39-54 (2005).
202
+ """
203
+ import numpy as np
204
+
205
+ if not nx.is_connected(G):
206
+ raise nx.NetworkXError("Graph not connected.")
207
+ N = G.number_of_nodes()
208
+ ordering = list(reverse_cuthill_mckee_ordering(G))
209
+ # make a copy with integer labels according to rcm ordering
210
+ # this could be done without a copy if we really wanted to
211
+ mapping = dict(zip(ordering, range(N)))
212
+ H = nx.relabel_nodes(G, mapping)
213
+ edges = (tuple(sorted((u, v))) for u, v in H.edges())
214
+ betweenness = dict.fromkeys(edges, 0.0)
215
+ if normalized:
216
+ nb = (N - 1.0) * (N - 2.0) # normalization factor
217
+ else:
218
+ nb = 2.0
219
+ for row, (e) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver):
220
+ for ss in sources:
221
+ i = mapping[ss]
222
+ for tt in targets:
223
+ j = mapping[tt]
224
+ betweenness[e] += 0.5 * abs(row.item(i) - row.item(j))
225
+ betweenness[e] /= nb
226
+ return {(ordering[s], ordering[t]): value for (s, t), value in betweenness.items()}
parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/current_flow_closeness.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Current-flow closeness centrality measures."""
2
+ import networkx as nx
3
+ from networkx.algorithms.centrality.flow_matrix import (
4
+ CGInverseLaplacian,
5
+ FullInverseLaplacian,
6
+ SuperLUInverseLaplacian,
7
+ )
8
+ from networkx.utils import not_implemented_for, reverse_cuthill_mckee_ordering
9
+
10
+ __all__ = ["current_flow_closeness_centrality", "information_centrality"]
11
+
12
+
13
+ @not_implemented_for("directed")
14
+ @nx._dispatchable(edge_attrs="weight")
15
+ def current_flow_closeness_centrality(G, weight=None, dtype=float, solver="lu"):
16
+ """Compute current-flow closeness centrality for nodes.
17
+
18
+ Current-flow closeness centrality is variant of closeness
19
+ centrality based on effective resistance between nodes in
20
+ a network. This metric is also known as information centrality.
21
+
22
+ Parameters
23
+ ----------
24
+ G : graph
25
+ A NetworkX graph.
26
+
27
+ weight : None or string, optional (default=None)
28
+ If None, all edge weights are considered equal.
29
+ Otherwise holds the name of the edge attribute used as weight.
30
+ The weight reflects the capacity or the strength of the
31
+ edge.
32
+
33
+ dtype: data type (default=float)
34
+ Default data type for internal matrices.
35
+ Set to np.float32 for lower memory consumption.
36
+
37
+ solver: string (default='lu')
38
+ Type of linear solver to use for computing the flow matrix.
39
+ Options are "full" (uses most memory), "lu" (recommended), and
40
+ "cg" (uses least memory).
41
+
42
+ Returns
43
+ -------
44
+ nodes : dictionary
45
+ Dictionary of nodes with current flow closeness centrality as the value.
46
+
47
+ See Also
48
+ --------
49
+ closeness_centrality
50
+
51
+ Notes
52
+ -----
53
+ The algorithm is from Brandes [1]_.
54
+
55
+ See also [2]_ for the original definition of information centrality.
56
+
57
+ References
58
+ ----------
59
+ .. [1] Ulrik Brandes and Daniel Fleischer,
60
+ Centrality Measures Based on Current Flow.
61
+ Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
62
+ LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
63
+ https://doi.org/10.1007/978-3-540-31856-9_44
64
+
65
+ .. [2] Karen Stephenson and Marvin Zelen:
66
+ Rethinking centrality: Methods and examples.
67
+ Social Networks 11(1):1-37, 1989.
68
+ https://doi.org/10.1016/0378-8733(89)90016-6
69
+ """
70
+ if not nx.is_connected(G):
71
+ raise nx.NetworkXError("Graph not connected.")
72
+ solvername = {
73
+ "full": FullInverseLaplacian,
74
+ "lu": SuperLUInverseLaplacian,
75
+ "cg": CGInverseLaplacian,
76
+ }
77
+ N = G.number_of_nodes()
78
+ ordering = list(reverse_cuthill_mckee_ordering(G))
79
+ # make a copy with integer labels according to rcm ordering
80
+ # this could be done without a copy if we really wanted to
81
+ H = nx.relabel_nodes(G, dict(zip(ordering, range(N))))
82
+ betweenness = dict.fromkeys(H, 0.0) # b[n]=0 for n in H
83
+ N = H.number_of_nodes()
84
+ L = nx.laplacian_matrix(H, nodelist=range(N), weight=weight).asformat("csc")
85
+ L = L.astype(dtype)
86
+ C2 = solvername[solver](L, width=1, dtype=dtype) # initialize solver
87
+ for v in H:
88
+ col = C2.get_row(v)
89
+ for w in H:
90
+ betweenness[v] += col.item(v) - 2 * col.item(w)
91
+ betweenness[w] += col.item(v)
92
+ return {ordering[node]: 1 / value for node, value in betweenness.items()}
93
+
94
+
95
+ information_centrality = current_flow_closeness_centrality
parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/degree_alg.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Degree centrality measures."""
2
+ import networkx as nx
3
+ from networkx.utils.decorators import not_implemented_for
4
+
5
+ __all__ = ["degree_centrality", "in_degree_centrality", "out_degree_centrality"]
6
+
7
+
8
+ @nx._dispatchable
9
+ def degree_centrality(G):
10
+ """Compute the degree centrality for nodes.
11
+
12
+ The degree centrality for a node v is the fraction of nodes it
13
+ is connected to.
14
+
15
+ Parameters
16
+ ----------
17
+ G : graph
18
+ A networkx graph
19
+
20
+ Returns
21
+ -------
22
+ nodes : dictionary
23
+ Dictionary of nodes with degree centrality as the value.
24
+
25
+ Examples
26
+ --------
27
+ >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)])
28
+ >>> nx.degree_centrality(G)
29
+ {0: 1.0, 1: 1.0, 2: 0.6666666666666666, 3: 0.6666666666666666}
30
+
31
+ See Also
32
+ --------
33
+ betweenness_centrality, load_centrality, eigenvector_centrality
34
+
35
+ Notes
36
+ -----
37
+ The degree centrality values are normalized by dividing by the maximum
38
+ possible degree in a simple graph n-1 where n is the number of nodes in G.
39
+
40
+ For multigraphs or graphs with self loops the maximum degree might
41
+ be higher than n-1 and values of degree centrality greater than 1
42
+ are possible.
43
+ """
44
+ if len(G) <= 1:
45
+ return {n: 1 for n in G}
46
+
47
+ s = 1.0 / (len(G) - 1.0)
48
+ centrality = {n: d * s for n, d in G.degree()}
49
+ return centrality
50
+
51
+
52
+ @not_implemented_for("undirected")
53
+ @nx._dispatchable
54
+ def in_degree_centrality(G):
55
+ """Compute the in-degree centrality for nodes.
56
+
57
+ The in-degree centrality for a node v is the fraction of nodes its
58
+ incoming edges are connected to.
59
+
60
+ Parameters
61
+ ----------
62
+ G : graph
63
+ A NetworkX graph
64
+
65
+ Returns
66
+ -------
67
+ nodes : dictionary
68
+ Dictionary of nodes with in-degree centrality as values.
69
+
70
+ Raises
71
+ ------
72
+ NetworkXNotImplemented
73
+ If G is undirected.
74
+
75
+ Examples
76
+ --------
77
+ >>> G = nx.DiGraph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)])
78
+ >>> nx.in_degree_centrality(G)
79
+ {0: 0.0, 1: 0.3333333333333333, 2: 0.6666666666666666, 3: 0.6666666666666666}
80
+
81
+ See Also
82
+ --------
83
+ degree_centrality, out_degree_centrality
84
+
85
+ Notes
86
+ -----
87
+ The degree centrality values are normalized by dividing by the maximum
88
+ possible degree in a simple graph n-1 where n is the number of nodes in G.
89
+
90
+ For multigraphs or graphs with self loops the maximum degree might
91
+ be higher than n-1 and values of degree centrality greater than 1
92
+ are possible.
93
+ """
94
+ if len(G) <= 1:
95
+ return {n: 1 for n in G}
96
+
97
+ s = 1.0 / (len(G) - 1.0)
98
+ centrality = {n: d * s for n, d in G.in_degree()}
99
+ return centrality
100
+
101
+
102
+ @not_implemented_for("undirected")
103
+ @nx._dispatchable
104
+ def out_degree_centrality(G):
105
+ """Compute the out-degree centrality for nodes.
106
+
107
+ The out-degree centrality for a node v is the fraction of nodes its
108
+ outgoing edges are connected to.
109
+
110
+ Parameters
111
+ ----------
112
+ G : graph
113
+ A NetworkX graph
114
+
115
+ Returns
116
+ -------
117
+ nodes : dictionary
118
+ Dictionary of nodes with out-degree centrality as values.
119
+
120
+ Raises
121
+ ------
122
+ NetworkXNotImplemented
123
+ If G is undirected.
124
+
125
+ Examples
126
+ --------
127
+ >>> G = nx.DiGraph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)])
128
+ >>> nx.out_degree_centrality(G)
129
+ {0: 1.0, 1: 0.6666666666666666, 2: 0.0, 3: 0.0}
130
+
131
+ See Also
132
+ --------
133
+ degree_centrality, in_degree_centrality
134
+
135
+ Notes
136
+ -----
137
+ The degree centrality values are normalized by dividing by the maximum
138
+ possible degree in a simple graph n-1 where n is the number of nodes in G.
139
+
140
+ For multigraphs or graphs with self loops the maximum degree might
141
+ be higher than n-1 and values of degree centrality greater than 1
142
+ are possible.
143
+ """
144
+ if len(G) <= 1:
145
+ return {n: 1 for n in G}
146
+
147
+ s = 1.0 / (len(G) - 1.0)
148
+ centrality = {n: d * s for n, d in G.out_degree()}
149
+ return centrality
parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/dispersion.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from itertools import combinations
2
+
3
+ import networkx as nx
4
+
5
+ __all__ = ["dispersion"]
6
+
7
+
8
+ @nx._dispatchable
9
+ def dispersion(G, u=None, v=None, normalized=True, alpha=1.0, b=0.0, c=0.0):
10
+ r"""Calculate dispersion between `u` and `v` in `G`.
11
+
12
+ A link between two actors (`u` and `v`) has a high dispersion when their
13
+ mutual ties (`s` and `t`) are not well connected with each other.
14
+
15
+ Parameters
16
+ ----------
17
+ G : graph
18
+ A NetworkX graph.
19
+ u : node, optional
20
+ The source for the dispersion score (e.g. ego node of the network).
21
+ v : node, optional
22
+ The target of the dispersion score if specified.
23
+ normalized : bool
24
+ If True (default) normalize by the embeddedness of the nodes (u and v).
25
+ alpha, b, c : float
26
+ Parameters for the normalization procedure. When `normalized` is True,
27
+ the dispersion value is normalized by::
28
+
29
+ result = ((dispersion + b) ** alpha) / (embeddedness + c)
30
+
31
+ as long as the denominator is nonzero.
32
+
33
+ Returns
34
+ -------
35
+ nodes : dictionary
36
+ If u (v) is specified, returns a dictionary of nodes with dispersion
37
+ score for all "target" ("source") nodes. If neither u nor v is
38
+ specified, returns a dictionary of dictionaries for all nodes 'u' in the
39
+ graph with a dispersion score for each node 'v'.
40
+
41
+ Notes
42
+ -----
43
+ This implementation follows Lars Backstrom and Jon Kleinberg [1]_. Typical
44
+ usage would be to run dispersion on the ego network $G_u$ if $u$ were
45
+ specified. Running :func:`dispersion` with neither $u$ nor $v$ specified
46
+ can take some time to complete.
47
+
48
+ References
49
+ ----------
50
+ .. [1] Romantic Partnerships and the Dispersion of Social Ties:
51
+ A Network Analysis of Relationship Status on Facebook.
52
+ Lars Backstrom, Jon Kleinberg.
53
+ https://arxiv.org/pdf/1310.6753v1.pdf
54
+
55
+ """
56
+
57
+ def _dispersion(G_u, u, v):
58
+ """dispersion for all nodes 'v' in a ego network G_u of node 'u'"""
59
+ u_nbrs = set(G_u[u])
60
+ ST = {n for n in G_u[v] if n in u_nbrs}
61
+ set_uv = {u, v}
62
+ # all possible ties of connections that u and b share
63
+ possib = combinations(ST, 2)
64
+ total = 0
65
+ for s, t in possib:
66
+ # neighbors of s that are in G_u, not including u and v
67
+ nbrs_s = u_nbrs.intersection(G_u[s]) - set_uv
68
+ # s and t are not directly connected
69
+ if t not in nbrs_s:
70
+ # s and t do not share a connection
71
+ if nbrs_s.isdisjoint(G_u[t]):
72
+ # tick for disp(u, v)
73
+ total += 1
74
+ # neighbors that u and v share
75
+ embeddedness = len(ST)
76
+
77
+ dispersion_val = total
78
+ if normalized:
79
+ dispersion_val = (total + b) ** alpha
80
+ if embeddedness + c != 0:
81
+ dispersion_val /= embeddedness + c
82
+
83
+ return dispersion_val
84
+
85
+ if u is None:
86
+ # v and u are not specified
87
+ if v is None:
88
+ results = {n: {} for n in G}
89
+ for u in G:
90
+ for v in G[u]:
91
+ results[u][v] = _dispersion(G, u, v)
92
+ # u is not specified, but v is
93
+ else:
94
+ results = dict.fromkeys(G[v], {})
95
+ for u in G[v]:
96
+ results[u] = _dispersion(G, v, u)
97
+ else:
98
+ # u is specified with no target v
99
+ if v is None:
100
+ results = dict.fromkeys(G[u], {})
101
+ for v in G[u]:
102
+ results[v] = _dispersion(G, u, v)
103
+ # both u and v are specified
104
+ else:
105
+ results = _dispersion(G, u, v)
106
+
107
+ return results
parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/eigenvector.py ADDED
@@ -0,0 +1,341 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Functions for computing eigenvector centrality."""
2
+ import math
3
+
4
+ import networkx as nx
5
+ from networkx.utils import not_implemented_for
6
+
7
+ __all__ = ["eigenvector_centrality", "eigenvector_centrality_numpy"]
8
+
9
+
10
+ @not_implemented_for("multigraph")
11
+ @nx._dispatchable(edge_attrs="weight")
12
+ def eigenvector_centrality(G, max_iter=100, tol=1.0e-6, nstart=None, weight=None):
13
+ r"""Compute the eigenvector centrality for the graph G.
14
+
15
+ Eigenvector centrality computes the centrality for a node by adding
16
+ the centrality of its predecessors. The centrality for node $i$ is the
17
+ $i$-th element of a left eigenvector associated with the eigenvalue $\lambda$
18
+ of maximum modulus that is positive. Such an eigenvector $x$ is
19
+ defined up to a multiplicative constant by the equation
20
+
21
+ .. math::
22
+
23
+ \lambda x^T = x^T A,
24
+
25
+ where $A$ is the adjacency matrix of the graph G. By definition of
26
+ row-column product, the equation above is equivalent to
27
+
28
+ .. math::
29
+
30
+ \lambda x_i = \sum_{j\to i}x_j.
31
+
32
+ That is, adding the eigenvector centralities of the predecessors of
33
+ $i$ one obtains the eigenvector centrality of $i$ multiplied by
34
+ $\lambda$. In the case of undirected graphs, $x$ also solves the familiar
35
+ right-eigenvector equation $Ax = \lambda x$.
36
+
37
+ By virtue of the Perron–Frobenius theorem [1]_, if G is strongly
38
+ connected there is a unique eigenvector $x$, and all its entries
39
+ are strictly positive.
40
+
41
+ If G is not strongly connected there might be several left
42
+ eigenvectors associated with $\lambda$, and some of their elements
43
+ might be zero.
44
+
45
+ Parameters
46
+ ----------
47
+ G : graph
48
+ A networkx graph.
49
+
50
+ max_iter : integer, optional (default=100)
51
+ Maximum number of power iterations.
52
+
53
+ tol : float, optional (default=1.0e-6)
54
+ Error tolerance (in Euclidean norm) used to check convergence in
55
+ power iteration.
56
+
57
+ nstart : dictionary, optional (default=None)
58
+ Starting value of power iteration for each node. Must have a nonzero
59
+ projection on the desired eigenvector for the power method to converge.
60
+ If None, this implementation uses an all-ones vector, which is a safe
61
+ choice.
62
+
63
+ weight : None or string, optional (default=None)
64
+ If None, all edge weights are considered equal. Otherwise holds the
65
+ name of the edge attribute used as weight. In this measure the
66
+ weight is interpreted as the connection strength.
67
+
68
+ Returns
69
+ -------
70
+ nodes : dictionary
71
+ Dictionary of nodes with eigenvector centrality as the value. The
72
+ associated vector has unit Euclidean norm and the values are
73
+ nonegative.
74
+
75
+ Examples
76
+ --------
77
+ >>> G = nx.path_graph(4)
78
+ >>> centrality = nx.eigenvector_centrality(G)
79
+ >>> sorted((v, f"{c:0.2f}") for v, c in centrality.items())
80
+ [(0, '0.37'), (1, '0.60'), (2, '0.60'), (3, '0.37')]
81
+
82
+ Raises
83
+ ------
84
+ NetworkXPointlessConcept
85
+ If the graph G is the null graph.
86
+
87
+ NetworkXError
88
+ If each value in `nstart` is zero.
89
+
90
+ PowerIterationFailedConvergence
91
+ If the algorithm fails to converge to the specified tolerance
92
+ within the specified number of iterations of the power iteration
93
+ method.
94
+
95
+ See Also
96
+ --------
97
+ eigenvector_centrality_numpy
98
+ :func:`~networkx.algorithms.link_analysis.pagerank_alg.pagerank`
99
+ :func:`~networkx.algorithms.link_analysis.hits_alg.hits`
100
+
101
+ Notes
102
+ -----
103
+ Eigenvector centrality was introduced by Landau [2]_ for chess
104
+ tournaments. It was later rediscovered by Wei [3]_ and then
105
+ popularized by Kendall [4]_ in the context of sport ranking. Berge
106
+ introduced a general definition for graphs based on social connections
107
+ [5]_. Bonacich [6]_ reintroduced again eigenvector centrality and made
108
+ it popular in link analysis.
109
+
110
+ This function computes the left dominant eigenvector, which corresponds
111
+ to adding the centrality of predecessors: this is the usual approach.
112
+ To add the centrality of successors first reverse the graph with
113
+ ``G.reverse()``.
114
+
115
+ The implementation uses power iteration [7]_ to compute a dominant
116
+ eigenvector starting from the provided vector `nstart`. Convergence is
117
+ guaranteed as long as `nstart` has a nonzero projection on a dominant
118
+ eigenvector, which certainly happens using the default value.
119
+
120
+ The method stops when the change in the computed vector between two
121
+ iterations is smaller than an error tolerance of ``G.number_of_nodes()
122
+ * tol`` or after ``max_iter`` iterations, but in the second case it
123
+ raises an exception.
124
+
125
+ This implementation uses $(A + I)$ rather than the adjacency matrix
126
+ $A$ because the change preserves eigenvectors, but it shifts the
127
+ spectrum, thus guaranteeing convergence even for networks with
128
+ negative eigenvalues of maximum modulus.
129
+
130
+ References
131
+ ----------
132
+ .. [1] Abraham Berman and Robert J. Plemmons.
133
+ "Nonnegative Matrices in the Mathematical Sciences."
134
+ Classics in Applied Mathematics. SIAM, 1994.
135
+
136
+ .. [2] Edmund Landau.
137
+ "Zur relativen Wertbemessung der Turnierresultate."
138
+ Deutsches Wochenschach, 11:366–369, 1895.
139
+
140
+ .. [3] Teh-Hsing Wei.
141
+ "The Algebraic Foundations of Ranking Theory."
142
+ PhD thesis, University of Cambridge, 1952.
143
+
144
+ .. [4] Maurice G. Kendall.
145
+ "Further contributions to the theory of paired comparisons."
146
+ Biometrics, 11(1):43–62, 1955.
147
+ https://www.jstor.org/stable/3001479
148
+
149
+ .. [5] Claude Berge
150
+ "Théorie des graphes et ses applications."
151
+ Dunod, Paris, France, 1958.
152
+
153
+ .. [6] Phillip Bonacich.
154
+ "Technique for analyzing overlapping memberships."
155
+ Sociological Methodology, 4:176–185, 1972.
156
+ https://www.jstor.org/stable/270732
157
+
158
+ .. [7] Power iteration:: https://en.wikipedia.org/wiki/Power_iteration
159
+
160
+ """
161
+ if len(G) == 0:
162
+ raise nx.NetworkXPointlessConcept(
163
+ "cannot compute centrality for the null graph"
164
+ )
165
+ # If no initial vector is provided, start with the all-ones vector.
166
+ if nstart is None:
167
+ nstart = {v: 1 for v in G}
168
+ if all(v == 0 for v in nstart.values()):
169
+ raise nx.NetworkXError("initial vector cannot have all zero values")
170
+ # Normalize the initial vector so that each entry is in [0, 1]. This is
171
+ # guaranteed to never have a divide-by-zero error by the previous line.
172
+ nstart_sum = sum(nstart.values())
173
+ x = {k: v / nstart_sum for k, v in nstart.items()}
174
+ nnodes = G.number_of_nodes()
175
+ # make up to max_iter iterations
176
+ for _ in range(max_iter):
177
+ xlast = x
178
+ x = xlast.copy() # Start with xlast times I to iterate with (A+I)
179
+ # do the multiplication y^T = x^T A (left eigenvector)
180
+ for n in x:
181
+ for nbr in G[n]:
182
+ w = G[n][nbr].get(weight, 1) if weight else 1
183
+ x[nbr] += xlast[n] * w
184
+ # Normalize the vector. The normalization denominator `norm`
185
+ # should never be zero by the Perron--Frobenius
186
+ # theorem. However, in case it is due to numerical error, we
187
+ # assume the norm to be one instead.
188
+ norm = math.hypot(*x.values()) or 1
189
+ x = {k: v / norm for k, v in x.items()}
190
+ # Check for convergence (in the L_1 norm).
191
+ if sum(abs(x[n] - xlast[n]) for n in x) < nnodes * tol:
192
+ return x
193
+ raise nx.PowerIterationFailedConvergence(max_iter)
194
+
195
+
196
+ @nx._dispatchable(edge_attrs="weight")
197
+ def eigenvector_centrality_numpy(G, weight=None, max_iter=50, tol=0):
198
+ r"""Compute the eigenvector centrality for the graph G.
199
+
200
+ Eigenvector centrality computes the centrality for a node by adding
201
+ the centrality of its predecessors. The centrality for node $i$ is the
202
+ $i$-th element of a left eigenvector associated with the eigenvalue $\lambda$
203
+ of maximum modulus that is positive. Such an eigenvector $x$ is
204
+ defined up to a multiplicative constant by the equation
205
+
206
+ .. math::
207
+
208
+ \lambda x^T = x^T A,
209
+
210
+ where $A$ is the adjacency matrix of the graph G. By definition of
211
+ row-column product, the equation above is equivalent to
212
+
213
+ .. math::
214
+
215
+ \lambda x_i = \sum_{j\to i}x_j.
216
+
217
+ That is, adding the eigenvector centralities of the predecessors of
218
+ $i$ one obtains the eigenvector centrality of $i$ multiplied by
219
+ $\lambda$. In the case of undirected graphs, $x$ also solves the familiar
220
+ right-eigenvector equation $Ax = \lambda x$.
221
+
222
+ By virtue of the Perron–Frobenius theorem [1]_, if G is strongly
223
+ connected there is a unique eigenvector $x$, and all its entries
224
+ are strictly positive.
225
+
226
+ If G is not strongly connected there might be several left
227
+ eigenvectors associated with $\lambda$, and some of their elements
228
+ might be zero.
229
+
230
+ Parameters
231
+ ----------
232
+ G : graph
233
+ A networkx graph.
234
+
235
+ max_iter : integer, optional (default=50)
236
+ Maximum number of Arnoldi update iterations allowed.
237
+
238
+ tol : float, optional (default=0)
239
+ Relative accuracy for eigenvalues (stopping criterion).
240
+ The default value of 0 implies machine precision.
241
+
242
+ weight : None or string, optional (default=None)
243
+ If None, all edge weights are considered equal. Otherwise holds the
244
+ name of the edge attribute used as weight. In this measure the
245
+ weight is interpreted as the connection strength.
246
+
247
+ Returns
248
+ -------
249
+ nodes : dictionary
250
+ Dictionary of nodes with eigenvector centrality as the value. The
251
+ associated vector has unit Euclidean norm and the values are
252
+ nonegative.
253
+
254
+ Examples
255
+ --------
256
+ >>> G = nx.path_graph(4)
257
+ >>> centrality = nx.eigenvector_centrality_numpy(G)
258
+ >>> print([f"{node} {centrality[node]:0.2f}" for node in centrality])
259
+ ['0 0.37', '1 0.60', '2 0.60', '3 0.37']
260
+
261
+ Raises
262
+ ------
263
+ NetworkXPointlessConcept
264
+ If the graph G is the null graph.
265
+
266
+ ArpackNoConvergence
267
+ When the requested convergence is not obtained. The currently
268
+ converged eigenvalues and eigenvectors can be found as
269
+ eigenvalues and eigenvectors attributes of the exception object.
270
+
271
+ See Also
272
+ --------
273
+ :func:`scipy.sparse.linalg.eigs`
274
+ eigenvector_centrality
275
+ :func:`~networkx.algorithms.link_analysis.pagerank_alg.pagerank`
276
+ :func:`~networkx.algorithms.link_analysis.hits_alg.hits`
277
+
278
+ Notes
279
+ -----
280
+ Eigenvector centrality was introduced by Landau [2]_ for chess
281
+ tournaments. It was later rediscovered by Wei [3]_ and then
282
+ popularized by Kendall [4]_ in the context of sport ranking. Berge
283
+ introduced a general definition for graphs based on social connections
284
+ [5]_. Bonacich [6]_ reintroduced again eigenvector centrality and made
285
+ it popular in link analysis.
286
+
287
+ This function computes the left dominant eigenvector, which corresponds
288
+ to adding the centrality of predecessors: this is the usual approach.
289
+ To add the centrality of successors first reverse the graph with
290
+ ``G.reverse()``.
291
+
292
+ This implementation uses the
293
+ :func:`SciPy sparse eigenvalue solver<scipy.sparse.linalg.eigs>` (ARPACK)
294
+ to find the largest eigenvalue/eigenvector pair using Arnoldi iterations
295
+ [7]_.
296
+
297
+ References
298
+ ----------
299
+ .. [1] Abraham Berman and Robert J. Plemmons.
300
+ "Nonnegative Matrices in the Mathematical Sciences."
301
+ Classics in Applied Mathematics. SIAM, 1994.
302
+
303
+ .. [2] Edmund Landau.
304
+ "Zur relativen Wertbemessung der Turnierresultate."
305
+ Deutsches Wochenschach, 11:366–369, 1895.
306
+
307
+ .. [3] Teh-Hsing Wei.
308
+ "The Algebraic Foundations of Ranking Theory."
309
+ PhD thesis, University of Cambridge, 1952.
310
+
311
+ .. [4] Maurice G. Kendall.
312
+ "Further contributions to the theory of paired comparisons."
313
+ Biometrics, 11(1):43–62, 1955.
314
+ https://www.jstor.org/stable/3001479
315
+
316
+ .. [5] Claude Berge
317
+ "Théorie des graphes et ses applications."
318
+ Dunod, Paris, France, 1958.
319
+
320
+ .. [6] Phillip Bonacich.
321
+ "Technique for analyzing overlapping memberships."
322
+ Sociological Methodology, 4:176–185, 1972.
323
+ https://www.jstor.org/stable/270732
324
+
325
+ .. [7] Arnoldi iteration:: https://en.wikipedia.org/wiki/Arnoldi_iteration
326
+
327
+ """
328
+ import numpy as np
329
+ import scipy as sp
330
+
331
+ if len(G) == 0:
332
+ raise nx.NetworkXPointlessConcept(
333
+ "cannot compute centrality for the null graph"
334
+ )
335
+ M = nx.to_scipy_sparse_array(G, nodelist=list(G), weight=weight, dtype=float)
336
+ _, eigenvector = sp.sparse.linalg.eigs(
337
+ M.T, k=1, which="LR", maxiter=max_iter, tol=tol
338
+ )
339
+ largest = eigenvector.flatten().real
340
+ norm = np.sign(largest.sum()) * sp.linalg.norm(largest)
341
+ return dict(zip(G, (largest / norm).tolist()))
parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/flow_matrix.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Helpers for current-flow betweenness and current-flow closeness
2
+ # Lazy computations for inverse Laplacian and flow-matrix rows.
3
+ import networkx as nx
4
+
5
+
6
+ @nx._dispatchable(edge_attrs="weight")
7
+ def flow_matrix_row(G, weight=None, dtype=float, solver="lu"):
8
+ # Generate a row of the current-flow matrix
9
+ import numpy as np
10
+
11
+ solvername = {
12
+ "full": FullInverseLaplacian,
13
+ "lu": SuperLUInverseLaplacian,
14
+ "cg": CGInverseLaplacian,
15
+ }
16
+ n = G.number_of_nodes()
17
+ L = nx.laplacian_matrix(G, nodelist=range(n), weight=weight).asformat("csc")
18
+ L = L.astype(dtype)
19
+ C = solvername[solver](L, dtype=dtype) # initialize solver
20
+ w = C.w # w is the Laplacian matrix width
21
+ # row-by-row flow matrix
22
+ for u, v in sorted(sorted((u, v)) for u, v in G.edges()):
23
+ B = np.zeros(w, dtype=dtype)
24
+ c = G[u][v].get(weight, 1.0)
25
+ B[u % w] = c
26
+ B[v % w] = -c
27
+ # get only the rows needed in the inverse laplacian
28
+ # and multiply to get the flow matrix row
29
+ row = B @ C.get_rows(u, v)
30
+ yield row, (u, v)
31
+
32
+
33
+ # Class to compute the inverse laplacian only for specified rows
34
+ # Allows computation of the current-flow matrix without storing entire
35
+ # inverse laplacian matrix
36
+ class InverseLaplacian:
37
+ def __init__(self, L, width=None, dtype=None):
38
+ global np
39
+ import numpy as np
40
+
41
+ (n, n) = L.shape
42
+ self.dtype = dtype
43
+ self.n = n
44
+ if width is None:
45
+ self.w = self.width(L)
46
+ else:
47
+ self.w = width
48
+ self.C = np.zeros((self.w, n), dtype=dtype)
49
+ self.L1 = L[1:, 1:]
50
+ self.init_solver(L)
51
+
52
+ def init_solver(self, L):
53
+ pass
54
+
55
+ def solve(self, r):
56
+ raise nx.NetworkXError("Implement solver")
57
+
58
+ def solve_inverse(self, r):
59
+ raise nx.NetworkXError("Implement solver")
60
+
61
+ def get_rows(self, r1, r2):
62
+ for r in range(r1, r2 + 1):
63
+ self.C[r % self.w, 1:] = self.solve_inverse(r)
64
+ return self.C
65
+
66
+ def get_row(self, r):
67
+ self.C[r % self.w, 1:] = self.solve_inverse(r)
68
+ return self.C[r % self.w]
69
+
70
+ def width(self, L):
71
+ m = 0
72
+ for i, row in enumerate(L):
73
+ w = 0
74
+ x, y = np.nonzero(row)
75
+ if len(y) > 0:
76
+ v = y - i
77
+ w = v.max() - v.min() + 1
78
+ m = max(w, m)
79
+ return m
80
+
81
+
82
+ class FullInverseLaplacian(InverseLaplacian):
83
+ def init_solver(self, L):
84
+ self.IL = np.zeros(L.shape, dtype=self.dtype)
85
+ self.IL[1:, 1:] = np.linalg.inv(self.L1.todense())
86
+
87
+ def solve(self, rhs):
88
+ s = np.zeros(rhs.shape, dtype=self.dtype)
89
+ s = self.IL @ rhs
90
+ return s
91
+
92
+ def solve_inverse(self, r):
93
+ return self.IL[r, 1:]
94
+
95
+
96
+ class SuperLUInverseLaplacian(InverseLaplacian):
97
+ def init_solver(self, L):
98
+ import scipy as sp
99
+
100
+ self.lusolve = sp.sparse.linalg.factorized(self.L1.tocsc())
101
+
102
+ def solve_inverse(self, r):
103
+ rhs = np.zeros(self.n, dtype=self.dtype)
104
+ rhs[r] = 1
105
+ return self.lusolve(rhs[1:])
106
+
107
+ def solve(self, rhs):
108
+ s = np.zeros(rhs.shape, dtype=self.dtype)
109
+ s[1:] = self.lusolve(rhs[1:])
110
+ return s
111
+
112
+
113
+ class CGInverseLaplacian(InverseLaplacian):
114
+ def init_solver(self, L):
115
+ global sp
116
+ import scipy as sp
117
+
118
+ ilu = sp.sparse.linalg.spilu(self.L1.tocsc())
119
+ n = self.n - 1
120
+ self.M = sp.sparse.linalg.LinearOperator(shape=(n, n), matvec=ilu.solve)
121
+
122
+ def solve(self, rhs):
123
+ s = np.zeros(rhs.shape, dtype=self.dtype)
124
+ s[1:] = sp.sparse.linalg.cg(self.L1, rhs[1:], M=self.M, atol=0)[0]
125
+ return s
126
+
127
+ def solve_inverse(self, r):
128
+ rhs = np.zeros(self.n, self.dtype)
129
+ rhs[r] = 1
130
+ return sp.sparse.linalg.cg(self.L1, rhs[1:], M=self.M, atol=0)[0]
parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/group.py ADDED
@@ -0,0 +1,786 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Group centrality measures."""
2
+ from copy import deepcopy
3
+
4
+ import networkx as nx
5
+ from networkx.algorithms.centrality.betweenness import (
6
+ _accumulate_endpoints,
7
+ _single_source_dijkstra_path_basic,
8
+ _single_source_shortest_path_basic,
9
+ )
10
+ from networkx.utils.decorators import not_implemented_for
11
+
12
+ __all__ = [
13
+ "group_betweenness_centrality",
14
+ "group_closeness_centrality",
15
+ "group_degree_centrality",
16
+ "group_in_degree_centrality",
17
+ "group_out_degree_centrality",
18
+ "prominent_group",
19
+ ]
20
+
21
+
22
+ @nx._dispatchable(edge_attrs="weight")
23
+ def group_betweenness_centrality(G, C, normalized=True, weight=None, endpoints=False):
24
+ r"""Compute the group betweenness centrality for a group of nodes.
25
+
26
+ Group betweenness centrality of a group of nodes $C$ is the sum of the
27
+ fraction of all-pairs shortest paths that pass through any vertex in $C$
28
+
29
+ .. math::
30
+
31
+ c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)}
32
+
33
+ where $V$ is the set of nodes, $\sigma(s, t)$ is the number of
34
+ shortest $(s, t)$-paths, and $\sigma(s, t|C)$ is the number of
35
+ those paths passing through some node in group $C$. Note that
36
+ $(s, t)$ are not members of the group ($V-C$ is the set of nodes
37
+ in $V$ that are not in $C$).
38
+
39
+ Parameters
40
+ ----------
41
+ G : graph
42
+ A NetworkX graph.
43
+
44
+ C : list or set or list of lists or list of sets
45
+ A group or a list of groups containing nodes which belong to G, for which group betweenness
46
+ centrality is to be calculated.
47
+
48
+ normalized : bool, optional (default=True)
49
+ If True, group betweenness is normalized by `1/((|V|-|C|)(|V|-|C|-1))`
50
+ where `|V|` is the number of nodes in G and `|C|` is the number of nodes in C.
51
+
52
+ weight : None or string, optional (default=None)
53
+ If None, all edge weights are considered equal.
54
+ Otherwise holds the name of the edge attribute used as weight.
55
+ The weight of an edge is treated as the length or distance between the two sides.
56
+
57
+ endpoints : bool, optional (default=False)
58
+ If True include the endpoints in the shortest path counts.
59
+
60
+ Raises
61
+ ------
62
+ NodeNotFound
63
+ If node(s) in C are not present in G.
64
+
65
+ Returns
66
+ -------
67
+ betweenness : list of floats or float
68
+ If C is a single group then return a float. If C is a list with
69
+ several groups then return a list of group betweenness centralities.
70
+
71
+ See Also
72
+ --------
73
+ betweenness_centrality
74
+
75
+ Notes
76
+ -----
77
+ Group betweenness centrality is described in [1]_ and its importance discussed in [3]_.
78
+ The initial implementation of the algorithm is mentioned in [2]_. This function uses
79
+ an improved algorithm presented in [4]_.
80
+
81
+ The number of nodes in the group must be a maximum of n - 2 where `n`
82
+ is the total number of nodes in the graph.
83
+
84
+ For weighted graphs the edge weights must be greater than zero.
85
+ Zero edge weights can produce an infinite number of equal length
86
+ paths between pairs of nodes.
87
+
88
+ The total number of paths between source and target is counted
89
+ differently for directed and undirected graphs. Directed paths
90
+ between "u" and "v" are counted as two possible paths (one each
91
+ direction) while undirected paths between "u" and "v" are counted
92
+ as one path. Said another way, the sum in the expression above is
93
+ over all ``s != t`` for directed graphs and for ``s < t`` for undirected graphs.
94
+
95
+
96
+ References
97
+ ----------
98
+ .. [1] M G Everett and S P Borgatti:
99
+ The Centrality of Groups and Classes.
100
+ Journal of Mathematical Sociology. 23(3): 181-201. 1999.
101
+ http://www.analytictech.com/borgatti/group_centrality.htm
102
+ .. [2] Ulrik Brandes:
103
+ On Variants of Shortest-Path Betweenness
104
+ Centrality and their Generic Computation.
105
+ Social Networks 30(2):136-145, 2008.
106
+ http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.72.9610&rep=rep1&type=pdf
107
+ .. [3] Sourav Medya et. al.:
108
+ Group Centrality Maximization via Network Design.
109
+ SIAM International Conference on Data Mining, SDM 2018, 126–134.
110
+ https://sites.cs.ucsb.edu/~arlei/pubs/sdm18.pdf
111
+ .. [4] Rami Puzis, Yuval Elovici, and Shlomi Dolev.
112
+ "Fast algorithm for successive computation of group betweenness centrality."
113
+ https://journals.aps.org/pre/pdf/10.1103/PhysRevE.76.056709
114
+
115
+ """
116
+ GBC = [] # initialize betweenness
117
+ list_of_groups = True
118
+ # check weather C contains one or many groups
119
+ if any(el in G for el in C):
120
+ C = [C]
121
+ list_of_groups = False
122
+ set_v = {node for group in C for node in group}
123
+ if set_v - G.nodes: # element(s) of C not in G
124
+ raise nx.NodeNotFound(f"The node(s) {set_v - G.nodes} are in C but not in G.")
125
+
126
+ # pre-processing
127
+ PB, sigma, D = _group_preprocessing(G, set_v, weight)
128
+
129
+ # the algorithm for each group
130
+ for group in C:
131
+ group = set(group) # set of nodes in group
132
+ # initialize the matrices of the sigma and the PB
133
+ GBC_group = 0
134
+ sigma_m = deepcopy(sigma)
135
+ PB_m = deepcopy(PB)
136
+ sigma_m_v = deepcopy(sigma_m)
137
+ PB_m_v = deepcopy(PB_m)
138
+ for v in group:
139
+ GBC_group += PB_m[v][v]
140
+ for x in group:
141
+ for y in group:
142
+ dxvy = 0
143
+ dxyv = 0
144
+ dvxy = 0
145
+ if not (
146
+ sigma_m[x][y] == 0 or sigma_m[x][v] == 0 or sigma_m[v][y] == 0
147
+ ):
148
+ if D[x][v] == D[x][y] + D[y][v]:
149
+ dxyv = sigma_m[x][y] * sigma_m[y][v] / sigma_m[x][v]
150
+ if D[x][y] == D[x][v] + D[v][y]:
151
+ dxvy = sigma_m[x][v] * sigma_m[v][y] / sigma_m[x][y]
152
+ if D[v][y] == D[v][x] + D[x][y]:
153
+ dvxy = sigma_m[v][x] * sigma[x][y] / sigma[v][y]
154
+ sigma_m_v[x][y] = sigma_m[x][y] * (1 - dxvy)
155
+ PB_m_v[x][y] = PB_m[x][y] - PB_m[x][y] * dxvy
156
+ if y != v:
157
+ PB_m_v[x][y] -= PB_m[x][v] * dxyv
158
+ if x != v:
159
+ PB_m_v[x][y] -= PB_m[v][y] * dvxy
160
+ sigma_m, sigma_m_v = sigma_m_v, sigma_m
161
+ PB_m, PB_m_v = PB_m_v, PB_m
162
+
163
+ # endpoints
164
+ v, c = len(G), len(group)
165
+ if not endpoints:
166
+ scale = 0
167
+ # if the graph is connected then subtract the endpoints from
168
+ # the count for all the nodes in the graph. else count how many
169
+ # nodes are connected to the group's nodes and subtract that.
170
+ if nx.is_directed(G):
171
+ if nx.is_strongly_connected(G):
172
+ scale = c * (2 * v - c - 1)
173
+ elif nx.is_connected(G):
174
+ scale = c * (2 * v - c - 1)
175
+ if scale == 0:
176
+ for group_node1 in group:
177
+ for node in D[group_node1]:
178
+ if node != group_node1:
179
+ if node in group:
180
+ scale += 1
181
+ else:
182
+ scale += 2
183
+ GBC_group -= scale
184
+
185
+ # normalized
186
+ if normalized:
187
+ scale = 1 / ((v - c) * (v - c - 1))
188
+ GBC_group *= scale
189
+
190
+ # If undirected than count only the undirected edges
191
+ elif not G.is_directed():
192
+ GBC_group /= 2
193
+
194
+ GBC.append(GBC_group)
195
+ if list_of_groups:
196
+ return GBC
197
+ return GBC[0]
198
+
199
+
200
+ def _group_preprocessing(G, set_v, weight):
201
+ sigma = {}
202
+ delta = {}
203
+ D = {}
204
+ betweenness = dict.fromkeys(G, 0)
205
+ for s in G:
206
+ if weight is None: # use BFS
207
+ S, P, sigma[s], D[s] = _single_source_shortest_path_basic(G, s)
208
+ else: # use Dijkstra's algorithm
209
+ S, P, sigma[s], D[s] = _single_source_dijkstra_path_basic(G, s, weight)
210
+ betweenness, delta[s] = _accumulate_endpoints(betweenness, S, P, sigma[s], s)
211
+ for i in delta[s]: # add the paths from s to i and rescale sigma
212
+ if s != i:
213
+ delta[s][i] += 1
214
+ if weight is not None:
215
+ sigma[s][i] = sigma[s][i] / 2
216
+ # building the path betweenness matrix only for nodes that appear in the group
217
+ PB = dict.fromkeys(G)
218
+ for group_node1 in set_v:
219
+ PB[group_node1] = dict.fromkeys(G, 0.0)
220
+ for group_node2 in set_v:
221
+ if group_node2 not in D[group_node1]:
222
+ continue
223
+ for node in G:
224
+ # if node is connected to the two group nodes than continue
225
+ if group_node2 in D[node] and group_node1 in D[node]:
226
+ if (
227
+ D[node][group_node2]
228
+ == D[node][group_node1] + D[group_node1][group_node2]
229
+ ):
230
+ PB[group_node1][group_node2] += (
231
+ delta[node][group_node2]
232
+ * sigma[node][group_node1]
233
+ * sigma[group_node1][group_node2]
234
+ / sigma[node][group_node2]
235
+ )
236
+ return PB, sigma, D
237
+
238
+
239
+ @nx._dispatchable(edge_attrs="weight")
240
+ def prominent_group(
241
+ G, k, weight=None, C=None, endpoints=False, normalized=True, greedy=False
242
+ ):
243
+ r"""Find the prominent group of size $k$ in graph $G$. The prominence of the
244
+ group is evaluated by the group betweenness centrality.
245
+
246
+ Group betweenness centrality of a group of nodes $C$ is the sum of the
247
+ fraction of all-pairs shortest paths that pass through any vertex in $C$
248
+
249
+ .. math::
250
+
251
+ c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)}
252
+
253
+ where $V$ is the set of nodes, $\sigma(s, t)$ is the number of
254
+ shortest $(s, t)$-paths, and $\sigma(s, t|C)$ is the number of
255
+ those paths passing through some node in group $C$. Note that
256
+ $(s, t)$ are not members of the group ($V-C$ is the set of nodes
257
+ in $V$ that are not in $C$).
258
+
259
+ Parameters
260
+ ----------
261
+ G : graph
262
+ A NetworkX graph.
263
+
264
+ k : int
265
+ The number of nodes in the group.
266
+
267
+ normalized : bool, optional (default=True)
268
+ If True, group betweenness is normalized by ``1/((|V|-|C|)(|V|-|C|-1))``
269
+ where ``|V|`` is the number of nodes in G and ``|C|`` is the number of
270
+ nodes in C.
271
+
272
+ weight : None or string, optional (default=None)
273
+ If None, all edge weights are considered equal.
274
+ Otherwise holds the name of the edge attribute used as weight.
275
+ The weight of an edge is treated as the length or distance between the two sides.
276
+
277
+ endpoints : bool, optional (default=False)
278
+ If True include the endpoints in the shortest path counts.
279
+
280
+ C : list or set, optional (default=None)
281
+ list of nodes which won't be candidates of the prominent group.
282
+
283
+ greedy : bool, optional (default=False)
284
+ Using a naive greedy algorithm in order to find non-optimal prominent
285
+ group. For scale free networks the results are negligibly below the optimal
286
+ results.
287
+
288
+ Raises
289
+ ------
290
+ NodeNotFound
291
+ If node(s) in C are not present in G.
292
+
293
+ Returns
294
+ -------
295
+ max_GBC : float
296
+ The group betweenness centrality of the prominent group.
297
+
298
+ max_group : list
299
+ The list of nodes in the prominent group.
300
+
301
+ See Also
302
+ --------
303
+ betweenness_centrality, group_betweenness_centrality
304
+
305
+ Notes
306
+ -----
307
+ Group betweenness centrality is described in [1]_ and its importance discussed in [3]_.
308
+ The algorithm is described in [2]_ and is based on techniques mentioned in [4]_.
309
+
310
+ The number of nodes in the group must be a maximum of ``n - 2`` where ``n``
311
+ is the total number of nodes in the graph.
312
+
313
+ For weighted graphs the edge weights must be greater than zero.
314
+ Zero edge weights can produce an infinite number of equal length
315
+ paths between pairs of nodes.
316
+
317
+ The total number of paths between source and target is counted
318
+ differently for directed and undirected graphs. Directed paths
319
+ between "u" and "v" are counted as two possible paths (one each
320
+ direction) while undirected paths between "u" and "v" are counted
321
+ as one path. Said another way, the sum in the expression above is
322
+ over all ``s != t`` for directed graphs and for ``s < t`` for undirected graphs.
323
+
324
+ References
325
+ ----------
326
+ .. [1] M G Everett and S P Borgatti:
327
+ The Centrality of Groups and Classes.
328
+ Journal of Mathematical Sociology. 23(3): 181-201. 1999.
329
+ http://www.analytictech.com/borgatti/group_centrality.htm
330
+ .. [2] Rami Puzis, Yuval Elovici, and Shlomi Dolev:
331
+ "Finding the Most Prominent Group in Complex Networks"
332
+ AI communications 20(4): 287-296, 2007.
333
+ https://www.researchgate.net/profile/Rami_Puzis2/publication/220308855
334
+ .. [3] Sourav Medya et. al.:
335
+ Group Centrality Maximization via Network Design.
336
+ SIAM International Conference on Data Mining, SDM 2018, 126–134.
337
+ https://sites.cs.ucsb.edu/~arlei/pubs/sdm18.pdf
338
+ .. [4] Rami Puzis, Yuval Elovici, and Shlomi Dolev.
339
+ "Fast algorithm for successive computation of group betweenness centrality."
340
+ https://journals.aps.org/pre/pdf/10.1103/PhysRevE.76.056709
341
+ """
342
+ import numpy as np
343
+ import pandas as pd
344
+
345
+ if C is not None:
346
+ C = set(C)
347
+ if C - G.nodes: # element(s) of C not in G
348
+ raise nx.NodeNotFound(f"The node(s) {C - G.nodes} are in C but not in G.")
349
+ nodes = list(G.nodes - C)
350
+ else:
351
+ nodes = list(G.nodes)
352
+ DF_tree = nx.Graph()
353
+ DF_tree.__networkx_cache__ = None # Disable caching
354
+ PB, sigma, D = _group_preprocessing(G, nodes, weight)
355
+ betweenness = pd.DataFrame.from_dict(PB)
356
+ if C is not None:
357
+ for node in C:
358
+ # remove from the betweenness all the nodes not part of the group
359
+ betweenness.drop(index=node, inplace=True)
360
+ betweenness.drop(columns=node, inplace=True)
361
+ CL = [node for _, node in sorted(zip(np.diag(betweenness), nodes), reverse=True)]
362
+ max_GBC = 0
363
+ max_group = []
364
+ DF_tree.add_node(
365
+ 1,
366
+ CL=CL,
367
+ betweenness=betweenness,
368
+ GBC=0,
369
+ GM=[],
370
+ sigma=sigma,
371
+ cont=dict(zip(nodes, np.diag(betweenness))),
372
+ )
373
+
374
+ # the algorithm
375
+ DF_tree.nodes[1]["heu"] = 0
376
+ for i in range(k):
377
+ DF_tree.nodes[1]["heu"] += DF_tree.nodes[1]["cont"][DF_tree.nodes[1]["CL"][i]]
378
+ max_GBC, DF_tree, max_group = _dfbnb(
379
+ G, k, DF_tree, max_GBC, 1, D, max_group, nodes, greedy
380
+ )
381
+
382
+ v = len(G)
383
+ if not endpoints:
384
+ scale = 0
385
+ # if the graph is connected then subtract the endpoints from
386
+ # the count for all the nodes in the graph. else count how many
387
+ # nodes are connected to the group's nodes and subtract that.
388
+ if nx.is_directed(G):
389
+ if nx.is_strongly_connected(G):
390
+ scale = k * (2 * v - k - 1)
391
+ elif nx.is_connected(G):
392
+ scale = k * (2 * v - k - 1)
393
+ if scale == 0:
394
+ for group_node1 in max_group:
395
+ for node in D[group_node1]:
396
+ if node != group_node1:
397
+ if node in max_group:
398
+ scale += 1
399
+ else:
400
+ scale += 2
401
+ max_GBC -= scale
402
+
403
+ # normalized
404
+ if normalized:
405
+ scale = 1 / ((v - k) * (v - k - 1))
406
+ max_GBC *= scale
407
+
408
+ # If undirected then count only the undirected edges
409
+ elif not G.is_directed():
410
+ max_GBC /= 2
411
+ max_GBC = float("%.2f" % max_GBC)
412
+ return max_GBC, max_group
413
+
414
+
415
+ def _dfbnb(G, k, DF_tree, max_GBC, root, D, max_group, nodes, greedy):
416
+ # stopping condition - if we found a group of size k and with higher GBC then prune
417
+ if len(DF_tree.nodes[root]["GM"]) == k and DF_tree.nodes[root]["GBC"] > max_GBC:
418
+ return DF_tree.nodes[root]["GBC"], DF_tree, DF_tree.nodes[root]["GM"]
419
+ # stopping condition - if the size of group members equal to k or there are less than
420
+ # k - |GM| in the candidate list or the heuristic function plus the GBC is below the
421
+ # maximal GBC found then prune
422
+ if (
423
+ len(DF_tree.nodes[root]["GM"]) == k
424
+ or len(DF_tree.nodes[root]["CL"]) <= k - len(DF_tree.nodes[root]["GM"])
425
+ or DF_tree.nodes[root]["GBC"] + DF_tree.nodes[root]["heu"] <= max_GBC
426
+ ):
427
+ return max_GBC, DF_tree, max_group
428
+
429
+ # finding the heuristic of both children
430
+ node_p, node_m, DF_tree = _heuristic(k, root, DF_tree, D, nodes, greedy)
431
+
432
+ # finding the child with the bigger heuristic + GBC and expand
433
+ # that node first if greedy then only expand the plus node
434
+ if greedy:
435
+ max_GBC, DF_tree, max_group = _dfbnb(
436
+ G, k, DF_tree, max_GBC, node_p, D, max_group, nodes, greedy
437
+ )
438
+
439
+ elif (
440
+ DF_tree.nodes[node_p]["GBC"] + DF_tree.nodes[node_p]["heu"]
441
+ > DF_tree.nodes[node_m]["GBC"] + DF_tree.nodes[node_m]["heu"]
442
+ ):
443
+ max_GBC, DF_tree, max_group = _dfbnb(
444
+ G, k, DF_tree, max_GBC, node_p, D, max_group, nodes, greedy
445
+ )
446
+ max_GBC, DF_tree, max_group = _dfbnb(
447
+ G, k, DF_tree, max_GBC, node_m, D, max_group, nodes, greedy
448
+ )
449
+ else:
450
+ max_GBC, DF_tree, max_group = _dfbnb(
451
+ G, k, DF_tree, max_GBC, node_m, D, max_group, nodes, greedy
452
+ )
453
+ max_GBC, DF_tree, max_group = _dfbnb(
454
+ G, k, DF_tree, max_GBC, node_p, D, max_group, nodes, greedy
455
+ )
456
+ return max_GBC, DF_tree, max_group
457
+
458
+
459
+ def _heuristic(k, root, DF_tree, D, nodes, greedy):
460
+ import numpy as np
461
+
462
+ # This helper function add two nodes to DF_tree - one left son and the
463
+ # other right son, finds their heuristic, CL, GBC, and GM
464
+ node_p = DF_tree.number_of_nodes() + 1
465
+ node_m = DF_tree.number_of_nodes() + 2
466
+ added_node = DF_tree.nodes[root]["CL"][0]
467
+
468
+ # adding the plus node
469
+ DF_tree.add_nodes_from([(node_p, deepcopy(DF_tree.nodes[root]))])
470
+ DF_tree.nodes[node_p]["GM"].append(added_node)
471
+ DF_tree.nodes[node_p]["GBC"] += DF_tree.nodes[node_p]["cont"][added_node]
472
+ root_node = DF_tree.nodes[root]
473
+ for x in nodes:
474
+ for y in nodes:
475
+ dxvy = 0
476
+ dxyv = 0
477
+ dvxy = 0
478
+ if not (
479
+ root_node["sigma"][x][y] == 0
480
+ or root_node["sigma"][x][added_node] == 0
481
+ or root_node["sigma"][added_node][y] == 0
482
+ ):
483
+ if D[x][added_node] == D[x][y] + D[y][added_node]:
484
+ dxyv = (
485
+ root_node["sigma"][x][y]
486
+ * root_node["sigma"][y][added_node]
487
+ / root_node["sigma"][x][added_node]
488
+ )
489
+ if D[x][y] == D[x][added_node] + D[added_node][y]:
490
+ dxvy = (
491
+ root_node["sigma"][x][added_node]
492
+ * root_node["sigma"][added_node][y]
493
+ / root_node["sigma"][x][y]
494
+ )
495
+ if D[added_node][y] == D[added_node][x] + D[x][y]:
496
+ dvxy = (
497
+ root_node["sigma"][added_node][x]
498
+ * root_node["sigma"][x][y]
499
+ / root_node["sigma"][added_node][y]
500
+ )
501
+ DF_tree.nodes[node_p]["sigma"][x][y] = root_node["sigma"][x][y] * (1 - dxvy)
502
+ DF_tree.nodes[node_p]["betweenness"].loc[y, x] = (
503
+ root_node["betweenness"][x][y] - root_node["betweenness"][x][y] * dxvy
504
+ )
505
+ if y != added_node:
506
+ DF_tree.nodes[node_p]["betweenness"].loc[y, x] -= (
507
+ root_node["betweenness"][x][added_node] * dxyv
508
+ )
509
+ if x != added_node:
510
+ DF_tree.nodes[node_p]["betweenness"].loc[y, x] -= (
511
+ root_node["betweenness"][added_node][y] * dvxy
512
+ )
513
+
514
+ DF_tree.nodes[node_p]["CL"] = [
515
+ node
516
+ for _, node in sorted(
517
+ zip(np.diag(DF_tree.nodes[node_p]["betweenness"]), nodes), reverse=True
518
+ )
519
+ if node not in DF_tree.nodes[node_p]["GM"]
520
+ ]
521
+ DF_tree.nodes[node_p]["cont"] = dict(
522
+ zip(nodes, np.diag(DF_tree.nodes[node_p]["betweenness"]))
523
+ )
524
+ DF_tree.nodes[node_p]["heu"] = 0
525
+ for i in range(k - len(DF_tree.nodes[node_p]["GM"])):
526
+ DF_tree.nodes[node_p]["heu"] += DF_tree.nodes[node_p]["cont"][
527
+ DF_tree.nodes[node_p]["CL"][i]
528
+ ]
529
+
530
+ # adding the minus node - don't insert the first node in the CL to GM
531
+ # Insert minus node only if isn't greedy type algorithm
532
+ if not greedy:
533
+ DF_tree.add_nodes_from([(node_m, deepcopy(DF_tree.nodes[root]))])
534
+ DF_tree.nodes[node_m]["CL"].pop(0)
535
+ DF_tree.nodes[node_m]["cont"].pop(added_node)
536
+ DF_tree.nodes[node_m]["heu"] = 0
537
+ for i in range(k - len(DF_tree.nodes[node_m]["GM"])):
538
+ DF_tree.nodes[node_m]["heu"] += DF_tree.nodes[node_m]["cont"][
539
+ DF_tree.nodes[node_m]["CL"][i]
540
+ ]
541
+ else:
542
+ node_m = None
543
+
544
+ return node_p, node_m, DF_tree
545
+
546
+
547
+ @nx._dispatchable(edge_attrs="weight")
548
+ def group_closeness_centrality(G, S, weight=None):
549
+ r"""Compute the group closeness centrality for a group of nodes.
550
+
551
+ Group closeness centrality of a group of nodes $S$ is a measure
552
+ of how close the group is to the other nodes in the graph.
553
+
554
+ .. math::
555
+
556
+ c_{close}(S) = \frac{|V-S|}{\sum_{v \in V-S} d_{S, v}}
557
+
558
+ d_{S, v} = min_{u \in S} (d_{u, v})
559
+
560
+ where $V$ is the set of nodes, $d_{S, v}$ is the distance of
561
+ the group $S$ from $v$ defined as above. ($V-S$ is the set of nodes
562
+ in $V$ that are not in $S$).
563
+
564
+ Parameters
565
+ ----------
566
+ G : graph
567
+ A NetworkX graph.
568
+
569
+ S : list or set
570
+ S is a group of nodes which belong to G, for which group closeness
571
+ centrality is to be calculated.
572
+
573
+ weight : None or string, optional (default=None)
574
+ If None, all edge weights are considered equal.
575
+ Otherwise holds the name of the edge attribute used as weight.
576
+ The weight of an edge is treated as the length or distance between the two sides.
577
+
578
+ Raises
579
+ ------
580
+ NodeNotFound
581
+ If node(s) in S are not present in G.
582
+
583
+ Returns
584
+ -------
585
+ closeness : float
586
+ Group closeness centrality of the group S.
587
+
588
+ See Also
589
+ --------
590
+ closeness_centrality
591
+
592
+ Notes
593
+ -----
594
+ The measure was introduced in [1]_.
595
+ The formula implemented here is described in [2]_.
596
+
597
+ Higher values of closeness indicate greater centrality.
598
+
599
+ It is assumed that 1 / 0 is 0 (required in the case of directed graphs,
600
+ or when a shortest path length is 0).
601
+
602
+ The number of nodes in the group must be a maximum of n - 1 where `n`
603
+ is the total number of nodes in the graph.
604
+
605
+ For directed graphs, the incoming distance is utilized here. To use the
606
+ outward distance, act on `G.reverse()`.
607
+
608
+ For weighted graphs the edge weights must be greater than zero.
609
+ Zero edge weights can produce an infinite number of equal length
610
+ paths between pairs of nodes.
611
+
612
+ References
613
+ ----------
614
+ .. [1] M G Everett and S P Borgatti:
615
+ The Centrality of Groups and Classes.
616
+ Journal of Mathematical Sociology. 23(3): 181-201. 1999.
617
+ http://www.analytictech.com/borgatti/group_centrality.htm
618
+ .. [2] J. Zhao et. al.:
619
+ Measuring and Maximizing Group Closeness Centrality over
620
+ Disk Resident Graphs.
621
+ WWWConference Proceedings, 2014. 689-694.
622
+ https://doi.org/10.1145/2567948.2579356
623
+ """
624
+ if G.is_directed():
625
+ G = G.reverse() # reverse view
626
+ closeness = 0 # initialize to 0
627
+ V = set(G) # set of nodes in G
628
+ S = set(S) # set of nodes in group S
629
+ V_S = V - S # set of nodes in V but not S
630
+ shortest_path_lengths = nx.multi_source_dijkstra_path_length(G, S, weight=weight)
631
+ # accumulation
632
+ for v in V_S:
633
+ try:
634
+ closeness += shortest_path_lengths[v]
635
+ except KeyError: # no path exists
636
+ closeness += 0
637
+ try:
638
+ closeness = len(V_S) / closeness
639
+ except ZeroDivisionError: # 1 / 0 assumed as 0
640
+ closeness = 0
641
+ return closeness
642
+
643
+
644
+ @nx._dispatchable
645
+ def group_degree_centrality(G, S):
646
+ """Compute the group degree centrality for a group of nodes.
647
+
648
+ Group degree centrality of a group of nodes $S$ is the fraction
649
+ of non-group members connected to group members.
650
+
651
+ Parameters
652
+ ----------
653
+ G : graph
654
+ A NetworkX graph.
655
+
656
+ S : list or set
657
+ S is a group of nodes which belong to G, for which group degree
658
+ centrality is to be calculated.
659
+
660
+ Raises
661
+ ------
662
+ NetworkXError
663
+ If node(s) in S are not in G.
664
+
665
+ Returns
666
+ -------
667
+ centrality : float
668
+ Group degree centrality of the group S.
669
+
670
+ See Also
671
+ --------
672
+ degree_centrality
673
+ group_in_degree_centrality
674
+ group_out_degree_centrality
675
+
676
+ Notes
677
+ -----
678
+ The measure was introduced in [1]_.
679
+
680
+ The number of nodes in the group must be a maximum of n - 1 where `n`
681
+ is the total number of nodes in the graph.
682
+
683
+ References
684
+ ----------
685
+ .. [1] M G Everett and S P Borgatti:
686
+ The Centrality of Groups and Classes.
687
+ Journal of Mathematical Sociology. 23(3): 181-201. 1999.
688
+ http://www.analytictech.com/borgatti/group_centrality.htm
689
+ """
690
+ centrality = len(set().union(*[set(G.neighbors(i)) for i in S]) - set(S))
691
+ centrality /= len(G.nodes()) - len(S)
692
+ return centrality
693
+
694
+
695
+ @not_implemented_for("undirected")
696
+ @nx._dispatchable
697
+ def group_in_degree_centrality(G, S):
698
+ """Compute the group in-degree centrality for a group of nodes.
699
+
700
+ Group in-degree centrality of a group of nodes $S$ is the fraction
701
+ of non-group members connected to group members by incoming edges.
702
+
703
+ Parameters
704
+ ----------
705
+ G : graph
706
+ A NetworkX graph.
707
+
708
+ S : list or set
709
+ S is a group of nodes which belong to G, for which group in-degree
710
+ centrality is to be calculated.
711
+
712
+ Returns
713
+ -------
714
+ centrality : float
715
+ Group in-degree centrality of the group S.
716
+
717
+ Raises
718
+ ------
719
+ NetworkXNotImplemented
720
+ If G is undirected.
721
+
722
+ NodeNotFound
723
+ If node(s) in S are not in G.
724
+
725
+ See Also
726
+ --------
727
+ degree_centrality
728
+ group_degree_centrality
729
+ group_out_degree_centrality
730
+
731
+ Notes
732
+ -----
733
+ The number of nodes in the group must be a maximum of n - 1 where `n`
734
+ is the total number of nodes in the graph.
735
+
736
+ `G.neighbors(i)` gives nodes with an outward edge from i, in a DiGraph,
737
+ so for group in-degree centrality, the reverse graph is used.
738
+ """
739
+ return group_degree_centrality(G.reverse(), S)
740
+
741
+
742
+ @not_implemented_for("undirected")
743
+ @nx._dispatchable
744
+ def group_out_degree_centrality(G, S):
745
+ """Compute the group out-degree centrality for a group of nodes.
746
+
747
+ Group out-degree centrality of a group of nodes $S$ is the fraction
748
+ of non-group members connected to group members by outgoing edges.
749
+
750
+ Parameters
751
+ ----------
752
+ G : graph
753
+ A NetworkX graph.
754
+
755
+ S : list or set
756
+ S is a group of nodes which belong to G, for which group in-degree
757
+ centrality is to be calculated.
758
+
759
+ Returns
760
+ -------
761
+ centrality : float
762
+ Group out-degree centrality of the group S.
763
+
764
+ Raises
765
+ ------
766
+ NetworkXNotImplemented
767
+ If G is undirected.
768
+
769
+ NodeNotFound
770
+ If node(s) in S are not in G.
771
+
772
+ See Also
773
+ --------
774
+ degree_centrality
775
+ group_degree_centrality
776
+ group_in_degree_centrality
777
+
778
+ Notes
779
+ -----
780
+ The number of nodes in the group must be a maximum of n - 1 where `n`
781
+ is the total number of nodes in the graph.
782
+
783
+ `G.neighbors(i)` gives nodes with an outward edge from i, in a DiGraph,
784
+ so for group out-degree centrality, the graph itself is used.
785
+ """
786
+ return group_degree_centrality(G, S)
parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/harmonic.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Functions for computing the harmonic centrality of a graph."""
2
+ from functools import partial
3
+
4
+ import networkx as nx
5
+
6
+ __all__ = ["harmonic_centrality"]
7
+
8
+
9
+ @nx._dispatchable(edge_attrs="distance")
10
+ def harmonic_centrality(G, nbunch=None, distance=None, sources=None):
11
+ r"""Compute harmonic centrality for nodes.
12
+
13
+ Harmonic centrality [1]_ of a node `u` is the sum of the reciprocal
14
+ of the shortest path distances from all other nodes to `u`
15
+
16
+ .. math::
17
+
18
+ C(u) = \sum_{v \neq u} \frac{1}{d(v, u)}
19
+
20
+ where `d(v, u)` is the shortest-path distance between `v` and `u`.
21
+
22
+ If `sources` is given as an argument, the returned harmonic centrality
23
+ values are calculated as the sum of the reciprocals of the shortest
24
+ path distances from the nodes specified in `sources` to `u` instead
25
+ of from all nodes to `u`.
26
+
27
+ Notice that higher values indicate higher centrality.
28
+
29
+ Parameters
30
+ ----------
31
+ G : graph
32
+ A NetworkX graph
33
+
34
+ nbunch : container (default: all nodes in G)
35
+ Container of nodes for which harmonic centrality values are calculated.
36
+
37
+ sources : container (default: all nodes in G)
38
+ Container of nodes `v` over which reciprocal distances are computed.
39
+ Nodes not in `G` are silently ignored.
40
+
41
+ distance : edge attribute key, optional (default=None)
42
+ Use the specified edge attribute as the edge distance in shortest
43
+ path calculations. If `None`, then each edge will have distance equal to 1.
44
+
45
+ Returns
46
+ -------
47
+ nodes : dictionary
48
+ Dictionary of nodes with harmonic centrality as the value.
49
+
50
+ See Also
51
+ --------
52
+ betweenness_centrality, load_centrality, eigenvector_centrality,
53
+ degree_centrality, closeness_centrality
54
+
55
+ Notes
56
+ -----
57
+ If the 'distance' keyword is set to an edge attribute key then the
58
+ shortest-path length will be computed using Dijkstra's algorithm with
59
+ that edge attribute as the edge weight.
60
+
61
+ References
62
+ ----------
63
+ .. [1] Boldi, Paolo, and Sebastiano Vigna. "Axioms for centrality."
64
+ Internet Mathematics 10.3-4 (2014): 222-262.
65
+ """
66
+
67
+ nbunch = set(G.nbunch_iter(nbunch)) if nbunch is not None else set(G.nodes)
68
+ sources = set(G.nbunch_iter(sources)) if sources is not None else G.nodes
69
+
70
+ spl = partial(nx.shortest_path_length, G, weight=distance)
71
+ centrality = {u: 0 for u in nbunch}
72
+ for v in sources:
73
+ dist = spl(v)
74
+ for u in nbunch.intersection(dist):
75
+ d = dist[u]
76
+ if d == 0: # handle u == v and edges with 0 weight
77
+ continue
78
+ centrality[u] += 1 / d
79
+
80
+ return centrality
parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/katz.py ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Katz centrality."""
2
+ import math
3
+
4
+ import networkx as nx
5
+ from networkx.utils import not_implemented_for
6
+
7
+ __all__ = ["katz_centrality", "katz_centrality_numpy"]
8
+
9
+
10
+ @not_implemented_for("multigraph")
11
+ @nx._dispatchable(edge_attrs="weight")
12
+ def katz_centrality(
13
+ G,
14
+ alpha=0.1,
15
+ beta=1.0,
16
+ max_iter=1000,
17
+ tol=1.0e-6,
18
+ nstart=None,
19
+ normalized=True,
20
+ weight=None,
21
+ ):
22
+ r"""Compute the Katz centrality for the nodes of the graph G.
23
+
24
+ Katz centrality computes the centrality for a node based on the centrality
25
+ of its neighbors. It is a generalization of the eigenvector centrality. The
26
+ Katz centrality for node $i$ is
27
+
28
+ .. math::
29
+
30
+ x_i = \alpha \sum_{j} A_{ij} x_j + \beta,
31
+
32
+ where $A$ is the adjacency matrix of graph G with eigenvalues $\lambda$.
33
+
34
+ The parameter $\beta$ controls the initial centrality and
35
+
36
+ .. math::
37
+
38
+ \alpha < \frac{1}{\lambda_{\max}}.
39
+
40
+ Katz centrality computes the relative influence of a node within a
41
+ network by measuring the number of the immediate neighbors (first
42
+ degree nodes) and also all other nodes in the network that connect
43
+ to the node under consideration through these immediate neighbors.
44
+
45
+ Extra weight can be provided to immediate neighbors through the
46
+ parameter $\beta$. Connections made with distant neighbors
47
+ are, however, penalized by an attenuation factor $\alpha$ which
48
+ should be strictly less than the inverse largest eigenvalue of the
49
+ adjacency matrix in order for the Katz centrality to be computed
50
+ correctly. More information is provided in [1]_.
51
+
52
+ Parameters
53
+ ----------
54
+ G : graph
55
+ A NetworkX graph.
56
+
57
+ alpha : float, optional (default=0.1)
58
+ Attenuation factor
59
+
60
+ beta : scalar or dictionary, optional (default=1.0)
61
+ Weight attributed to the immediate neighborhood. If not a scalar, the
62
+ dictionary must have a value for every node.
63
+
64
+ max_iter : integer, optional (default=1000)
65
+ Maximum number of iterations in power method.
66
+
67
+ tol : float, optional (default=1.0e-6)
68
+ Error tolerance used to check convergence in power method iteration.
69
+
70
+ nstart : dictionary, optional
71
+ Starting value of Katz iteration for each node.
72
+
73
+ normalized : bool, optional (default=True)
74
+ If True normalize the resulting values.
75
+
76
+ weight : None or string, optional (default=None)
77
+ If None, all edge weights are considered equal.
78
+ Otherwise holds the name of the edge attribute used as weight.
79
+ In this measure the weight is interpreted as the connection strength.
80
+
81
+ Returns
82
+ -------
83
+ nodes : dictionary
84
+ Dictionary of nodes with Katz centrality as the value.
85
+
86
+ Raises
87
+ ------
88
+ NetworkXError
89
+ If the parameter `beta` is not a scalar but lacks a value for at least
90
+ one node
91
+
92
+ PowerIterationFailedConvergence
93
+ If the algorithm fails to converge to the specified tolerance
94
+ within the specified number of iterations of the power iteration
95
+ method.
96
+
97
+ Examples
98
+ --------
99
+ >>> import math
100
+ >>> G = nx.path_graph(4)
101
+ >>> phi = (1 + math.sqrt(5)) / 2.0 # largest eigenvalue of adj matrix
102
+ >>> centrality = nx.katz_centrality(G, 1 / phi - 0.01)
103
+ >>> for n, c in sorted(centrality.items()):
104
+ ... print(f"{n} {c:.2f}")
105
+ 0 0.37
106
+ 1 0.60
107
+ 2 0.60
108
+ 3 0.37
109
+
110
+ See Also
111
+ --------
112
+ katz_centrality_numpy
113
+ eigenvector_centrality
114
+ eigenvector_centrality_numpy
115
+ :func:`~networkx.algorithms.link_analysis.pagerank_alg.pagerank`
116
+ :func:`~networkx.algorithms.link_analysis.hits_alg.hits`
117
+
118
+ Notes
119
+ -----
120
+ Katz centrality was introduced by [2]_.
121
+
122
+ This algorithm it uses the power method to find the eigenvector
123
+ corresponding to the largest eigenvalue of the adjacency matrix of ``G``.
124
+ The parameter ``alpha`` should be strictly less than the inverse of largest
125
+ eigenvalue of the adjacency matrix for the algorithm to converge.
126
+ You can use ``max(nx.adjacency_spectrum(G))`` to get $\lambda_{\max}$ the largest
127
+ eigenvalue of the adjacency matrix.
128
+ The iteration will stop after ``max_iter`` iterations or an error tolerance of
129
+ ``number_of_nodes(G) * tol`` has been reached.
130
+
131
+ For strongly connected graphs, as $\alpha \to 1/\lambda_{\max}$, and $\beta > 0$,
132
+ Katz centrality approaches the results for eigenvector centrality.
133
+
134
+ For directed graphs this finds "left" eigenvectors which corresponds
135
+ to the in-edges in the graph. For out-edges Katz centrality,
136
+ first reverse the graph with ``G.reverse()``.
137
+
138
+ References
139
+ ----------
140
+ .. [1] Mark E. J. Newman:
141
+ Networks: An Introduction.
142
+ Oxford University Press, USA, 2010, p. 720.
143
+ .. [2] Leo Katz:
144
+ A New Status Index Derived from Sociometric Index.
145
+ Psychometrika 18(1):39–43, 1953
146
+ https://link.springer.com/content/pdf/10.1007/BF02289026.pdf
147
+ """
148
+ if len(G) == 0:
149
+ return {}
150
+
151
+ nnodes = G.number_of_nodes()
152
+
153
+ if nstart is None:
154
+ # choose starting vector with entries of 0
155
+ x = {n: 0 for n in G}
156
+ else:
157
+ x = nstart
158
+
159
+ try:
160
+ b = dict.fromkeys(G, float(beta))
161
+ except (TypeError, ValueError, AttributeError) as err:
162
+ b = beta
163
+ if set(beta) != set(G):
164
+ raise nx.NetworkXError(
165
+ "beta dictionary must have a value for every node"
166
+ ) from err
167
+
168
+ # make up to max_iter iterations
169
+ for _ in range(max_iter):
170
+ xlast = x
171
+ x = dict.fromkeys(xlast, 0)
172
+ # do the multiplication y^T = Alpha * x^T A + Beta
173
+ for n in x:
174
+ for nbr in G[n]:
175
+ x[nbr] += xlast[n] * G[n][nbr].get(weight, 1)
176
+ for n in x:
177
+ x[n] = alpha * x[n] + b[n]
178
+
179
+ # check convergence
180
+ error = sum(abs(x[n] - xlast[n]) for n in x)
181
+ if error < nnodes * tol:
182
+ if normalized:
183
+ # normalize vector
184
+ try:
185
+ s = 1.0 / math.hypot(*x.values())
186
+ except ZeroDivisionError:
187
+ s = 1.0
188
+ else:
189
+ s = 1
190
+ for n in x:
191
+ x[n] *= s
192
+ return x
193
+ raise nx.PowerIterationFailedConvergence(max_iter)
194
+
195
+
196
+ @not_implemented_for("multigraph")
197
+ @nx._dispatchable(edge_attrs="weight")
198
+ def katz_centrality_numpy(G, alpha=0.1, beta=1.0, normalized=True, weight=None):
199
+ r"""Compute the Katz centrality for the graph G.
200
+
201
+ Katz centrality computes the centrality for a node based on the centrality
202
+ of its neighbors. It is a generalization of the eigenvector centrality. The
203
+ Katz centrality for node $i$ is
204
+
205
+ .. math::
206
+
207
+ x_i = \alpha \sum_{j} A_{ij} x_j + \beta,
208
+
209
+ where $A$ is the adjacency matrix of graph G with eigenvalues $\lambda$.
210
+
211
+ The parameter $\beta$ controls the initial centrality and
212
+
213
+ .. math::
214
+
215
+ \alpha < \frac{1}{\lambda_{\max}}.
216
+
217
+ Katz centrality computes the relative influence of a node within a
218
+ network by measuring the number of the immediate neighbors (first
219
+ degree nodes) and also all other nodes in the network that connect
220
+ to the node under consideration through these immediate neighbors.
221
+
222
+ Extra weight can be provided to immediate neighbors through the
223
+ parameter $\beta$. Connections made with distant neighbors
224
+ are, however, penalized by an attenuation factor $\alpha$ which
225
+ should be strictly less than the inverse largest eigenvalue of the
226
+ adjacency matrix in order for the Katz centrality to be computed
227
+ correctly. More information is provided in [1]_.
228
+
229
+ Parameters
230
+ ----------
231
+ G : graph
232
+ A NetworkX graph
233
+
234
+ alpha : float
235
+ Attenuation factor
236
+
237
+ beta : scalar or dictionary, optional (default=1.0)
238
+ Weight attributed to the immediate neighborhood. If not a scalar the
239
+ dictionary must have an value for every node.
240
+
241
+ normalized : bool
242
+ If True normalize the resulting values.
243
+
244
+ weight : None or string, optional
245
+ If None, all edge weights are considered equal.
246
+ Otherwise holds the name of the edge attribute used as weight.
247
+ In this measure the weight is interpreted as the connection strength.
248
+
249
+ Returns
250
+ -------
251
+ nodes : dictionary
252
+ Dictionary of nodes with Katz centrality as the value.
253
+
254
+ Raises
255
+ ------
256
+ NetworkXError
257
+ If the parameter `beta` is not a scalar but lacks a value for at least
258
+ one node
259
+
260
+ Examples
261
+ --------
262
+ >>> import math
263
+ >>> G = nx.path_graph(4)
264
+ >>> phi = (1 + math.sqrt(5)) / 2.0 # largest eigenvalue of adj matrix
265
+ >>> centrality = nx.katz_centrality_numpy(G, 1 / phi)
266
+ >>> for n, c in sorted(centrality.items()):
267
+ ... print(f"{n} {c:.2f}")
268
+ 0 0.37
269
+ 1 0.60
270
+ 2 0.60
271
+ 3 0.37
272
+
273
+ See Also
274
+ --------
275
+ katz_centrality
276
+ eigenvector_centrality_numpy
277
+ eigenvector_centrality
278
+ :func:`~networkx.algorithms.link_analysis.pagerank_alg.pagerank`
279
+ :func:`~networkx.algorithms.link_analysis.hits_alg.hits`
280
+
281
+ Notes
282
+ -----
283
+ Katz centrality was introduced by [2]_.
284
+
285
+ This algorithm uses a direct linear solver to solve the above equation.
286
+ The parameter ``alpha`` should be strictly less than the inverse of largest
287
+ eigenvalue of the adjacency matrix for there to be a solution.
288
+ You can use ``max(nx.adjacency_spectrum(G))`` to get $\lambda_{\max}$ the largest
289
+ eigenvalue of the adjacency matrix.
290
+
291
+ For strongly connected graphs, as $\alpha \to 1/\lambda_{\max}$, and $\beta > 0$,
292
+ Katz centrality approaches the results for eigenvector centrality.
293
+
294
+ For directed graphs this finds "left" eigenvectors which corresponds
295
+ to the in-edges in the graph. For out-edges Katz centrality,
296
+ first reverse the graph with ``G.reverse()``.
297
+
298
+ References
299
+ ----------
300
+ .. [1] Mark E. J. Newman:
301
+ Networks: An Introduction.
302
+ Oxford University Press, USA, 2010, p. 173.
303
+ .. [2] Leo Katz:
304
+ A New Status Index Derived from Sociometric Index.
305
+ Psychometrika 18(1):39–43, 1953
306
+ https://link.springer.com/content/pdf/10.1007/BF02289026.pdf
307
+ """
308
+ import numpy as np
309
+
310
+ if len(G) == 0:
311
+ return {}
312
+ try:
313
+ nodelist = beta.keys()
314
+ if set(nodelist) != set(G):
315
+ raise nx.NetworkXError("beta dictionary must have a value for every node")
316
+ b = np.array(list(beta.values()), dtype=float)
317
+ except AttributeError:
318
+ nodelist = list(G)
319
+ try:
320
+ b = np.ones((len(nodelist), 1)) * beta
321
+ except (TypeError, ValueError, AttributeError) as err:
322
+ raise nx.NetworkXError("beta must be a number") from err
323
+
324
+ A = nx.adjacency_matrix(G, nodelist=nodelist, weight=weight).todense().T
325
+ n = A.shape[0]
326
+ centrality = np.linalg.solve(np.eye(n, n) - (alpha * A), b).squeeze()
327
+
328
+ # Normalize: rely on truediv to cast to float, then tolist to make Python numbers
329
+ norm = np.sign(sum(centrality)) * np.linalg.norm(centrality) if normalized else 1
330
+ return dict(zip(nodelist, (centrality / norm).tolist()))
parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/laplacian.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Laplacian centrality measures.
3
+ """
4
+ import networkx as nx
5
+
6
+ __all__ = ["laplacian_centrality"]
7
+
8
+
9
+ @nx._dispatchable(edge_attrs="weight")
10
+ def laplacian_centrality(
11
+ G, normalized=True, nodelist=None, weight="weight", walk_type=None, alpha=0.95
12
+ ):
13
+ r"""Compute the Laplacian centrality for nodes in the graph `G`.
14
+
15
+ The Laplacian Centrality of a node ``i`` is measured by the drop in the
16
+ Laplacian Energy after deleting node ``i`` from the graph. The Laplacian Energy
17
+ is the sum of the squared eigenvalues of a graph's Laplacian matrix.
18
+
19
+ .. math::
20
+
21
+ C_L(u_i,G) = \frac{(\Delta E)_i}{E_L (G)} = \frac{E_L (G)-E_L (G_i)}{E_L (G)}
22
+
23
+ E_L (G) = \sum_{i=0}^n \lambda_i^2
24
+
25
+ Where $E_L (G)$ is the Laplacian energy of graph `G`,
26
+ E_L (G_i) is the Laplacian energy of graph `G` after deleting node ``i``
27
+ and $\lambda_i$ are the eigenvalues of `G`'s Laplacian matrix.
28
+ This formula shows the normalized value. Without normalization,
29
+ the numerator on the right side is returned.
30
+
31
+ Parameters
32
+ ----------
33
+ G : graph
34
+ A networkx graph
35
+
36
+ normalized : bool (default = True)
37
+ If True the centrality score is scaled so the sum over all nodes is 1.
38
+ If False the centrality score for each node is the drop in Laplacian
39
+ energy when that node is removed.
40
+
41
+ nodelist : list, optional (default = None)
42
+ The rows and columns are ordered according to the nodes in nodelist.
43
+ If nodelist is None, then the ordering is produced by G.nodes().
44
+
45
+ weight: string or None, optional (default=`weight`)
46
+ Optional parameter `weight` to compute the Laplacian matrix.
47
+ The edge data key used to compute each value in the matrix.
48
+ If None, then each edge has weight 1.
49
+
50
+ walk_type : string or None, optional (default=None)
51
+ Optional parameter `walk_type` used when calling
52
+ :func:`directed_laplacian_matrix <networkx.directed_laplacian_matrix>`.
53
+ One of ``"random"``, ``"lazy"``, or ``"pagerank"``. If ``walk_type=None``
54
+ (the default), then a value is selected according to the properties of `G`:
55
+ - ``walk_type="random"`` if `G` is strongly connected and aperiodic
56
+ - ``walk_type="lazy"`` if `G` is strongly connected but not aperiodic
57
+ - ``walk_type="pagerank"`` for all other cases.
58
+
59
+ alpha : real (default = 0.95)
60
+ Optional parameter `alpha` used when calling
61
+ :func:`directed_laplacian_matrix <networkx.directed_laplacian_matrix>`.
62
+ (1 - alpha) is the teleportation probability used with pagerank.
63
+
64
+ Returns
65
+ -------
66
+ nodes : dictionary
67
+ Dictionary of nodes with Laplacian centrality as the value.
68
+
69
+ Examples
70
+ --------
71
+ >>> G = nx.Graph()
72
+ >>> edges = [(0, 1, 4), (0, 2, 2), (2, 1, 1), (1, 3, 2), (1, 4, 2), (4, 5, 1)]
73
+ >>> G.add_weighted_edges_from(edges)
74
+ >>> sorted((v, f"{c:0.2f}") for v, c in laplacian_centrality(G).items())
75
+ [(0, '0.70'), (1, '0.90'), (2, '0.28'), (3, '0.22'), (4, '0.26'), (5, '0.04')]
76
+
77
+ Notes
78
+ -----
79
+ The algorithm is implemented based on [1]_ with an extension to directed graphs
80
+ using the ``directed_laplacian_matrix`` function.
81
+
82
+ Raises
83
+ ------
84
+ NetworkXPointlessConcept
85
+ If the graph `G` is the null graph.
86
+ ZeroDivisionError
87
+ If the graph `G` has no edges (is empty) and normalization is requested.
88
+
89
+ References
90
+ ----------
91
+ .. [1] Qi, X., Fuller, E., Wu, Q., Wu, Y., and Zhang, C.-Q. (2012).
92
+ Laplacian centrality: A new centrality measure for weighted networks.
93
+ Information Sciences, 194:240-253.
94
+ https://math.wvu.edu/~cqzhang/Publication-files/my-paper/INS-2012-Laplacian-W.pdf
95
+
96
+ See Also
97
+ --------
98
+ :func:`~networkx.linalg.laplacianmatrix.directed_laplacian_matrix`
99
+ :func:`~networkx.linalg.laplacianmatrix.laplacian_matrix`
100
+ """
101
+ import numpy as np
102
+ import scipy as sp
103
+
104
+ if len(G) == 0:
105
+ raise nx.NetworkXPointlessConcept("null graph has no centrality defined")
106
+ if G.size(weight=weight) == 0:
107
+ if normalized:
108
+ raise ZeroDivisionError("graph with no edges has zero full energy")
109
+ return {n: 0 for n in G}
110
+
111
+ if nodelist is not None:
112
+ nodeset = set(G.nbunch_iter(nodelist))
113
+ if len(nodeset) != len(nodelist):
114
+ raise nx.NetworkXError("nodelist has duplicate nodes or nodes not in G")
115
+ nodes = nodelist + [n for n in G if n not in nodeset]
116
+ else:
117
+ nodelist = nodes = list(G)
118
+
119
+ if G.is_directed():
120
+ lap_matrix = nx.directed_laplacian_matrix(G, nodes, weight, walk_type, alpha)
121
+ else:
122
+ lap_matrix = nx.laplacian_matrix(G, nodes, weight).toarray()
123
+
124
+ full_energy = np.power(sp.linalg.eigh(lap_matrix, eigvals_only=True), 2).sum()
125
+
126
+ # calculate laplacian centrality
127
+ laplace_centralities_dict = {}
128
+ for i, node in enumerate(nodelist):
129
+ # remove row and col i from lap_matrix
130
+ all_but_i = list(np.arange(lap_matrix.shape[0]))
131
+ all_but_i.remove(i)
132
+ A_2 = lap_matrix[all_but_i, :][:, all_but_i]
133
+
134
+ # Adjust diagonal for removed row
135
+ new_diag = lap_matrix.diagonal() - abs(lap_matrix[:, i])
136
+ np.fill_diagonal(A_2, new_diag[all_but_i])
137
+
138
+ if len(all_but_i) > 0: # catches degenerate case of single node
139
+ new_energy = np.power(sp.linalg.eigh(A_2, eigvals_only=True), 2).sum()
140
+ else:
141
+ new_energy = 0.0
142
+
143
+ lapl_cent = full_energy - new_energy
144
+ if normalized:
145
+ lapl_cent = lapl_cent / full_energy
146
+
147
+ laplace_centralities_dict[node] = float(lapl_cent)
148
+
149
+ return laplace_centralities_dict
parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/load.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Load centrality."""
2
+ from operator import itemgetter
3
+
4
+ import networkx as nx
5
+
6
+ __all__ = ["load_centrality", "edge_load_centrality"]
7
+
8
+
9
+ @nx._dispatchable(edge_attrs="weight")
10
+ def newman_betweenness_centrality(G, v=None, cutoff=None, normalized=True, weight=None):
11
+ """Compute load centrality for nodes.
12
+
13
+ The load centrality of a node is the fraction of all shortest
14
+ paths that pass through that node.
15
+
16
+ Parameters
17
+ ----------
18
+ G : graph
19
+ A networkx graph.
20
+
21
+ normalized : bool, optional (default=True)
22
+ If True the betweenness values are normalized by b=b/(n-1)(n-2) where
23
+ n is the number of nodes in G.
24
+
25
+ weight : None or string, optional (default=None)
26
+ If None, edge weights are ignored.
27
+ Otherwise holds the name of the edge attribute used as weight.
28
+ The weight of an edge is treated as the length or distance between the two sides.
29
+
30
+ cutoff : bool, optional (default=None)
31
+ If specified, only consider paths of length <= cutoff.
32
+
33
+ Returns
34
+ -------
35
+ nodes : dictionary
36
+ Dictionary of nodes with centrality as the value.
37
+
38
+ See Also
39
+ --------
40
+ betweenness_centrality
41
+
42
+ Notes
43
+ -----
44
+ Load centrality is slightly different than betweenness. It was originally
45
+ introduced by [2]_. For this load algorithm see [1]_.
46
+
47
+ References
48
+ ----------
49
+ .. [1] Mark E. J. Newman:
50
+ Scientific collaboration networks. II.
51
+ Shortest paths, weighted networks, and centrality.
52
+ Physical Review E 64, 016132, 2001.
53
+ http://journals.aps.org/pre/abstract/10.1103/PhysRevE.64.016132
54
+ .. [2] Kwang-Il Goh, Byungnam Kahng and Doochul Kim
55
+ Universal behavior of Load Distribution in Scale-Free Networks.
56
+ Physical Review Letters 87(27):1–4, 2001.
57
+ https://doi.org/10.1103/PhysRevLett.87.278701
58
+ """
59
+ if v is not None: # only one node
60
+ betweenness = 0.0
61
+ for source in G:
62
+ ubetween = _node_betweenness(G, source, cutoff, False, weight)
63
+ betweenness += ubetween[v] if v in ubetween else 0
64
+ if normalized:
65
+ order = G.order()
66
+ if order <= 2:
67
+ return betweenness # no normalization b=0 for all nodes
68
+ betweenness *= 1.0 / ((order - 1) * (order - 2))
69
+ else:
70
+ betweenness = {}.fromkeys(G, 0.0)
71
+ for source in betweenness:
72
+ ubetween = _node_betweenness(G, source, cutoff, False, weight)
73
+ for vk in ubetween:
74
+ betweenness[vk] += ubetween[vk]
75
+ if normalized:
76
+ order = G.order()
77
+ if order <= 2:
78
+ return betweenness # no normalization b=0 for all nodes
79
+ scale = 1.0 / ((order - 1) * (order - 2))
80
+ for v in betweenness:
81
+ betweenness[v] *= scale
82
+ return betweenness # all nodes
83
+
84
+
85
+ def _node_betweenness(G, source, cutoff=False, normalized=True, weight=None):
86
+ """Node betweenness_centrality helper:
87
+
88
+ See betweenness_centrality for what you probably want.
89
+ This actually computes "load" and not betweenness.
90
+ See https://networkx.lanl.gov/ticket/103
91
+
92
+ This calculates the load of each node for paths from a single source.
93
+ (The fraction of number of shortests paths from source that go
94
+ through each node.)
95
+
96
+ To get the load for a node you need to do all-pairs shortest paths.
97
+
98
+ If weight is not None then use Dijkstra for finding shortest paths.
99
+ """
100
+ # get the predecessor and path length data
101
+ if weight is None:
102
+ (pred, length) = nx.predecessor(G, source, cutoff=cutoff, return_seen=True)
103
+ else:
104
+ (pred, length) = nx.dijkstra_predecessor_and_distance(G, source, cutoff, weight)
105
+
106
+ # order the nodes by path length
107
+ onodes = [(l, vert) for (vert, l) in length.items()]
108
+ onodes.sort()
109
+ onodes[:] = [vert for (l, vert) in onodes if l > 0]
110
+
111
+ # initialize betweenness
112
+ between = {}.fromkeys(length, 1.0)
113
+
114
+ while onodes:
115
+ v = onodes.pop()
116
+ if v in pred:
117
+ num_paths = len(pred[v]) # Discount betweenness if more than
118
+ for x in pred[v]: # one shortest path.
119
+ if x == source: # stop if hit source because all remaining v
120
+ break # also have pred[v]==[source]
121
+ between[x] += between[v] / num_paths
122
+ # remove source
123
+ for v in between:
124
+ between[v] -= 1
125
+ # rescale to be between 0 and 1
126
+ if normalized:
127
+ l = len(between)
128
+ if l > 2:
129
+ # scale by 1/the number of possible paths
130
+ scale = 1 / ((l - 1) * (l - 2))
131
+ for v in between:
132
+ between[v] *= scale
133
+ return between
134
+
135
+
136
+ load_centrality = newman_betweenness_centrality
137
+
138
+
139
+ @nx._dispatchable
140
+ def edge_load_centrality(G, cutoff=False):
141
+ """Compute edge load.
142
+
143
+ WARNING: This concept of edge load has not been analysed
144
+ or discussed outside of NetworkX that we know of.
145
+ It is based loosely on load_centrality in the sense that
146
+ it counts the number of shortest paths which cross each edge.
147
+ This function is for demonstration and testing purposes.
148
+
149
+ Parameters
150
+ ----------
151
+ G : graph
152
+ A networkx graph
153
+
154
+ cutoff : bool, optional (default=False)
155
+ If specified, only consider paths of length <= cutoff.
156
+
157
+ Returns
158
+ -------
159
+ A dict keyed by edge 2-tuple to the number of shortest paths
160
+ which use that edge. Where more than one path is shortest
161
+ the count is divided equally among paths.
162
+ """
163
+ betweenness = {}
164
+ for u, v in G.edges():
165
+ betweenness[(u, v)] = 0.0
166
+ betweenness[(v, u)] = 0.0
167
+
168
+ for source in G:
169
+ ubetween = _edge_betweenness(G, source, cutoff=cutoff)
170
+ for e, ubetweenv in ubetween.items():
171
+ betweenness[e] += ubetweenv # cumulative total
172
+ return betweenness
173
+
174
+
175
+ def _edge_betweenness(G, source, nodes=None, cutoff=False):
176
+ """Edge betweenness helper."""
177
+ # get the predecessor data
178
+ (pred, length) = nx.predecessor(G, source, cutoff=cutoff, return_seen=True)
179
+ # order the nodes by path length
180
+ onodes = [n for n, d in sorted(length.items(), key=itemgetter(1))]
181
+ # initialize betweenness, doesn't account for any edge weights
182
+ between = {}
183
+ for u, v in G.edges(nodes):
184
+ between[(u, v)] = 1.0
185
+ between[(v, u)] = 1.0
186
+
187
+ while onodes: # work through all paths
188
+ v = onodes.pop()
189
+ if v in pred:
190
+ # Discount betweenness if more than one shortest path.
191
+ num_paths = len(pred[v])
192
+ for w in pred[v]:
193
+ if w in pred:
194
+ # Discount betweenness, mult path
195
+ num_paths = len(pred[w])
196
+ for x in pred[w]:
197
+ between[(w, x)] += between[(v, w)] / num_paths
198
+ between[(x, w)] += between[(w, v)] / num_paths
199
+ return between
parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/percolation.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Percolation centrality measures."""
2
+
3
+ import networkx as nx
4
+ from networkx.algorithms.centrality.betweenness import (
5
+ _single_source_dijkstra_path_basic as dijkstra,
6
+ )
7
+ from networkx.algorithms.centrality.betweenness import (
8
+ _single_source_shortest_path_basic as shortest_path,
9
+ )
10
+
11
+ __all__ = ["percolation_centrality"]
12
+
13
+
14
+ @nx._dispatchable(node_attrs="attribute", edge_attrs="weight")
15
+ def percolation_centrality(G, attribute="percolation", states=None, weight=None):
16
+ r"""Compute the percolation centrality for nodes.
17
+
18
+ Percolation centrality of a node $v$, at a given time, is defined
19
+ as the proportion of ‘percolated paths’ that go through that node.
20
+
21
+ This measure quantifies relative impact of nodes based on their
22
+ topological connectivity, as well as their percolation states.
23
+
24
+ Percolation states of nodes are used to depict network percolation
25
+ scenarios (such as during infection transmission in a social network
26
+ of individuals, spreading of computer viruses on computer networks, or
27
+ transmission of disease over a network of towns) over time. In this
28
+ measure usually the percolation state is expressed as a decimal
29
+ between 0.0 and 1.0.
30
+
31
+ When all nodes are in the same percolated state this measure is
32
+ equivalent to betweenness centrality.
33
+
34
+ Parameters
35
+ ----------
36
+ G : graph
37
+ A NetworkX graph.
38
+
39
+ attribute : None or string, optional (default='percolation')
40
+ Name of the node attribute to use for percolation state, used
41
+ if `states` is None. If a node does not set the attribute the
42
+ state of that node will be set to the default value of 1.
43
+ If all nodes do not have the attribute all nodes will be set to
44
+ 1 and the centrality measure will be equivalent to betweenness centrality.
45
+
46
+ states : None or dict, optional (default=None)
47
+ Specify percolation states for the nodes, nodes as keys states
48
+ as values.
49
+
50
+ weight : None or string, optional (default=None)
51
+ If None, all edge weights are considered equal.
52
+ Otherwise holds the name of the edge attribute used as weight.
53
+ The weight of an edge is treated as the length or distance between the two sides.
54
+
55
+
56
+ Returns
57
+ -------
58
+ nodes : dictionary
59
+ Dictionary of nodes with percolation centrality as the value.
60
+
61
+ See Also
62
+ --------
63
+ betweenness_centrality
64
+
65
+ Notes
66
+ -----
67
+ The algorithm is from Mahendra Piraveenan, Mikhail Prokopenko, and
68
+ Liaquat Hossain [1]_
69
+ Pair dependencies are calculated and accumulated using [2]_
70
+
71
+ For weighted graphs the edge weights must be greater than zero.
72
+ Zero edge weights can produce an infinite number of equal length
73
+ paths between pairs of nodes.
74
+
75
+ References
76
+ ----------
77
+ .. [1] Mahendra Piraveenan, Mikhail Prokopenko, Liaquat Hossain
78
+ Percolation Centrality: Quantifying Graph-Theoretic Impact of Nodes
79
+ during Percolation in Networks
80
+ http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0053095
81
+ .. [2] Ulrik Brandes:
82
+ A Faster Algorithm for Betweenness Centrality.
83
+ Journal of Mathematical Sociology 25(2):163-177, 2001.
84
+ https://doi.org/10.1080/0022250X.2001.9990249
85
+ """
86
+ percolation = dict.fromkeys(G, 0.0) # b[v]=0 for v in G
87
+
88
+ nodes = G
89
+
90
+ if states is None:
91
+ states = nx.get_node_attributes(nodes, attribute, default=1)
92
+
93
+ # sum of all percolation states
94
+ p_sigma_x_t = 0.0
95
+ for v in states.values():
96
+ p_sigma_x_t += v
97
+
98
+ for s in nodes:
99
+ # single source shortest paths
100
+ if weight is None: # use BFS
101
+ S, P, sigma, _ = shortest_path(G, s)
102
+ else: # use Dijkstra's algorithm
103
+ S, P, sigma, _ = dijkstra(G, s, weight)
104
+ # accumulation
105
+ percolation = _accumulate_percolation(
106
+ percolation, S, P, sigma, s, states, p_sigma_x_t
107
+ )
108
+
109
+ n = len(G)
110
+
111
+ for v in percolation:
112
+ percolation[v] *= 1 / (n - 2)
113
+
114
+ return percolation
115
+
116
+
117
+ def _accumulate_percolation(percolation, S, P, sigma, s, states, p_sigma_x_t):
118
+ delta = dict.fromkeys(S, 0)
119
+ while S:
120
+ w = S.pop()
121
+ coeff = (1 + delta[w]) / sigma[w]
122
+ for v in P[w]:
123
+ delta[v] += sigma[v] * coeff
124
+ if w != s:
125
+ # percolation weight
126
+ pw_s_w = states[s] / (p_sigma_x_t - states[w])
127
+ percolation[w] += delta[w] * pw_s_w
128
+ return percolation
parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/reaching.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Functions for computing reaching centrality of a node or a graph."""
2
+
3
+ import networkx as nx
4
+ from networkx.utils import pairwise
5
+
6
+ __all__ = ["global_reaching_centrality", "local_reaching_centrality"]
7
+
8
+
9
+ def _average_weight(G, path, weight=None):
10
+ """Returns the average weight of an edge in a weighted path.
11
+
12
+ Parameters
13
+ ----------
14
+ G : graph
15
+ A networkx graph.
16
+
17
+ path: list
18
+ A list of vertices that define the path.
19
+
20
+ weight : None or string, optional (default=None)
21
+ If None, edge weights are ignored. Then the average weight of an edge
22
+ is assumed to be the multiplicative inverse of the length of the path.
23
+ Otherwise holds the name of the edge attribute used as weight.
24
+ """
25
+ path_length = len(path) - 1
26
+ if path_length <= 0:
27
+ return 0
28
+ if weight is None:
29
+ return 1 / path_length
30
+ total_weight = sum(G.edges[i, j][weight] for i, j in pairwise(path))
31
+ return total_weight / path_length
32
+
33
+
34
+ @nx._dispatchable(edge_attrs="weight")
35
+ def global_reaching_centrality(G, weight=None, normalized=True):
36
+ """Returns the global reaching centrality of a directed graph.
37
+
38
+ The *global reaching centrality* of a weighted directed graph is the
39
+ average over all nodes of the difference between the local reaching
40
+ centrality of the node and the greatest local reaching centrality of
41
+ any node in the graph [1]_. For more information on the local
42
+ reaching centrality, see :func:`local_reaching_centrality`.
43
+ Informally, the local reaching centrality is the proportion of the
44
+ graph that is reachable from the neighbors of the node.
45
+
46
+ Parameters
47
+ ----------
48
+ G : DiGraph
49
+ A networkx DiGraph.
50
+
51
+ weight : None or string, optional (default=None)
52
+ Attribute to use for edge weights. If ``None``, each edge weight
53
+ is assumed to be one. A higher weight implies a stronger
54
+ connection between nodes and a *shorter* path length.
55
+
56
+ normalized : bool, optional (default=True)
57
+ Whether to normalize the edge weights by the total sum of edge
58
+ weights.
59
+
60
+ Returns
61
+ -------
62
+ h : float
63
+ The global reaching centrality of the graph.
64
+
65
+ Examples
66
+ --------
67
+ >>> G = nx.DiGraph()
68
+ >>> G.add_edge(1, 2)
69
+ >>> G.add_edge(1, 3)
70
+ >>> nx.global_reaching_centrality(G)
71
+ 1.0
72
+ >>> G.add_edge(3, 2)
73
+ >>> nx.global_reaching_centrality(G)
74
+ 0.75
75
+
76
+ See also
77
+ --------
78
+ local_reaching_centrality
79
+
80
+ References
81
+ ----------
82
+ .. [1] Mones, Enys, Lilla Vicsek, and Tamás Vicsek.
83
+ "Hierarchy Measure for Complex Networks."
84
+ *PLoS ONE* 7.3 (2012): e33799.
85
+ https://doi.org/10.1371/journal.pone.0033799
86
+ """
87
+ if nx.is_negatively_weighted(G, weight=weight):
88
+ raise nx.NetworkXError("edge weights must be positive")
89
+ total_weight = G.size(weight=weight)
90
+ if total_weight <= 0:
91
+ raise nx.NetworkXError("Size of G must be positive")
92
+
93
+ # If provided, weights must be interpreted as connection strength
94
+ # (so higher weights are more likely to be chosen). However, the
95
+ # shortest path algorithms in NetworkX assume the provided "weight"
96
+ # is actually a distance (so edges with higher weight are less
97
+ # likely to be chosen). Therefore we need to invert the weights when
98
+ # computing shortest paths.
99
+ #
100
+ # If weight is None, we leave it as-is so that the shortest path
101
+ # algorithm can use a faster, unweighted algorithm.
102
+ if weight is not None:
103
+
104
+ def as_distance(u, v, d):
105
+ return total_weight / d.get(weight, 1)
106
+
107
+ shortest_paths = nx.shortest_path(G, weight=as_distance)
108
+ else:
109
+ shortest_paths = nx.shortest_path(G)
110
+
111
+ centrality = local_reaching_centrality
112
+ # TODO This can be trivially parallelized.
113
+ lrc = [
114
+ centrality(G, node, paths=paths, weight=weight, normalized=normalized)
115
+ for node, paths in shortest_paths.items()
116
+ ]
117
+
118
+ max_lrc = max(lrc)
119
+ return sum(max_lrc - c for c in lrc) / (len(G) - 1)
120
+
121
+
122
+ @nx._dispatchable(edge_attrs="weight")
123
+ def local_reaching_centrality(G, v, paths=None, weight=None, normalized=True):
124
+ """Returns the local reaching centrality of a node in a directed
125
+ graph.
126
+
127
+ The *local reaching centrality* of a node in a directed graph is the
128
+ proportion of other nodes reachable from that node [1]_.
129
+
130
+ Parameters
131
+ ----------
132
+ G : DiGraph
133
+ A NetworkX DiGraph.
134
+
135
+ v : node
136
+ A node in the directed graph `G`.
137
+
138
+ paths : dictionary (default=None)
139
+ If this is not `None` it must be a dictionary representation
140
+ of single-source shortest paths, as computed by, for example,
141
+ :func:`networkx.shortest_path` with source node `v`. Use this
142
+ keyword argument if you intend to invoke this function many
143
+ times but don't want the paths to be recomputed each time.
144
+
145
+ weight : None or string, optional (default=None)
146
+ Attribute to use for edge weights. If `None`, each edge weight
147
+ is assumed to be one. A higher weight implies a stronger
148
+ connection between nodes and a *shorter* path length.
149
+
150
+ normalized : bool, optional (default=True)
151
+ Whether to normalize the edge weights by the total sum of edge
152
+ weights.
153
+
154
+ Returns
155
+ -------
156
+ h : float
157
+ The local reaching centrality of the node ``v`` in the graph
158
+ ``G``.
159
+
160
+ Examples
161
+ --------
162
+ >>> G = nx.DiGraph()
163
+ >>> G.add_edges_from([(1, 2), (1, 3)])
164
+ >>> nx.local_reaching_centrality(G, 3)
165
+ 0.0
166
+ >>> G.add_edge(3, 2)
167
+ >>> nx.local_reaching_centrality(G, 3)
168
+ 0.5
169
+
170
+ See also
171
+ --------
172
+ global_reaching_centrality
173
+
174
+ References
175
+ ----------
176
+ .. [1] Mones, Enys, Lilla Vicsek, and Tamás Vicsek.
177
+ "Hierarchy Measure for Complex Networks."
178
+ *PLoS ONE* 7.3 (2012): e33799.
179
+ https://doi.org/10.1371/journal.pone.0033799
180
+ """
181
+ if paths is None:
182
+ if nx.is_negatively_weighted(G, weight=weight):
183
+ raise nx.NetworkXError("edge weights must be positive")
184
+ total_weight = G.size(weight=weight)
185
+ if total_weight <= 0:
186
+ raise nx.NetworkXError("Size of G must be positive")
187
+ if weight is not None:
188
+ # Interpret weights as lengths.
189
+ def as_distance(u, v, d):
190
+ return total_weight / d.get(weight, 1)
191
+
192
+ paths = nx.shortest_path(G, source=v, weight=as_distance)
193
+ else:
194
+ paths = nx.shortest_path(G, source=v)
195
+ # If the graph is unweighted, simply return the proportion of nodes
196
+ # reachable from the source node ``v``.
197
+ if weight is None and G.is_directed():
198
+ return (len(paths) - 1) / (len(G) - 1)
199
+ if normalized and weight is not None:
200
+ norm = G.size(weight=weight) / G.size()
201
+ else:
202
+ norm = 1
203
+ # TODO This can be trivially parallelized.
204
+ avgw = (_average_weight(G, path, weight=weight) for path in paths.values())
205
+ sum_avg_weight = sum(avgw) / norm
206
+ return sum_avg_weight / (len(G) - 1)
parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/second_order.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Copyright (c) 2015 – Thomson Licensing, SAS
2
+
3
+ Redistribution and use in source and binary forms, with or without
4
+ modification, are permitted (subject to the limitations in the
5
+ disclaimer below) provided that the following conditions are met:
6
+
7
+ * Redistributions of source code must retain the above copyright
8
+ notice, this list of conditions and the following disclaimer.
9
+
10
+ * Redistributions in binary form must reproduce the above copyright
11
+ notice, this list of conditions and the following disclaimer in the
12
+ documentation and/or other materials provided with the distribution.
13
+
14
+ * Neither the name of Thomson Licensing, or Technicolor, nor the names
15
+ of its contributors may be used to endorse or promote products derived
16
+ from this software without specific prior written permission.
17
+
18
+ NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
19
+ GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
20
+ HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
21
+ WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
22
+ MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
27
+ BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
28
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
29
+ OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
30
+ IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
+ """
32
+
33
+ import networkx as nx
34
+ from networkx.utils import not_implemented_for
35
+
36
+ # Authors: Erwan Le Merrer (erwan.lemerrer@technicolor.com)
37
+
38
+ __all__ = ["second_order_centrality"]
39
+
40
+
41
+ @not_implemented_for("directed")
42
+ @nx._dispatchable(edge_attrs="weight")
43
+ def second_order_centrality(G, weight="weight"):
44
+ """Compute the second order centrality for nodes of G.
45
+
46
+ The second order centrality of a given node is the standard deviation of
47
+ the return times to that node of a perpetual random walk on G:
48
+
49
+ Parameters
50
+ ----------
51
+ G : graph
52
+ A NetworkX connected and undirected graph.
53
+
54
+ weight : string or None, optional (default="weight")
55
+ The name of an edge attribute that holds the numerical value
56
+ used as a weight. If None then each edge has weight 1.
57
+
58
+ Returns
59
+ -------
60
+ nodes : dictionary
61
+ Dictionary keyed by node with second order centrality as the value.
62
+
63
+ Examples
64
+ --------
65
+ >>> G = nx.star_graph(10)
66
+ >>> soc = nx.second_order_centrality(G)
67
+ >>> print(sorted(soc.items(), key=lambda x: x[1])[0][0]) # pick first id
68
+ 0
69
+
70
+ Raises
71
+ ------
72
+ NetworkXException
73
+ If the graph G is empty, non connected or has negative weights.
74
+
75
+ See Also
76
+ --------
77
+ betweenness_centrality
78
+
79
+ Notes
80
+ -----
81
+ Lower values of second order centrality indicate higher centrality.
82
+
83
+ The algorithm is from Kermarrec, Le Merrer, Sericola and Trédan [1]_.
84
+
85
+ This code implements the analytical version of the algorithm, i.e.,
86
+ there is no simulation of a random walk process involved. The random walk
87
+ is here unbiased (corresponding to eq 6 of the paper [1]_), thus the
88
+ centrality values are the standard deviations for random walk return times
89
+ on the transformed input graph G (equal in-degree at each nodes by adding
90
+ self-loops).
91
+
92
+ Complexity of this implementation, made to run locally on a single machine,
93
+ is O(n^3), with n the size of G, which makes it viable only for small
94
+ graphs.
95
+
96
+ References
97
+ ----------
98
+ .. [1] Anne-Marie Kermarrec, Erwan Le Merrer, Bruno Sericola, Gilles Trédan
99
+ "Second order centrality: Distributed assessment of nodes criticity in
100
+ complex networks", Elsevier Computer Communications 34(5):619-628, 2011.
101
+ """
102
+ import numpy as np
103
+
104
+ n = len(G)
105
+
106
+ if n == 0:
107
+ raise nx.NetworkXException("Empty graph.")
108
+ if not nx.is_connected(G):
109
+ raise nx.NetworkXException("Non connected graph.")
110
+ if any(d.get(weight, 0) < 0 for u, v, d in G.edges(data=True)):
111
+ raise nx.NetworkXException("Graph has negative edge weights.")
112
+
113
+ # balancing G for Metropolis-Hastings random walks
114
+ G = nx.DiGraph(G)
115
+ in_deg = dict(G.in_degree(weight=weight))
116
+ d_max = max(in_deg.values())
117
+ for i, deg in in_deg.items():
118
+ if deg < d_max:
119
+ G.add_edge(i, i, weight=d_max - deg)
120
+
121
+ P = nx.to_numpy_array(G)
122
+ P /= P.sum(axis=1)[:, np.newaxis] # to transition probability matrix
123
+
124
+ def _Qj(P, j):
125
+ P = P.copy()
126
+ P[:, j] = 0
127
+ return P
128
+
129
+ M = np.empty([n, n])
130
+
131
+ for i in range(n):
132
+ M[:, i] = np.linalg.solve(
133
+ np.identity(n) - _Qj(P, i), np.ones([n, 1])[:, 0]
134
+ ) # eq 3
135
+
136
+ return dict(
137
+ zip(
138
+ G.nodes,
139
+ (float(np.sqrt(2 * np.sum(M[:, i]) - n * (n + 1))) for i in range(n)),
140
+ )
141
+ ) # eq 6
parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/subgraph_alg.py ADDED
@@ -0,0 +1,339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Subraph centrality and communicability betweenness.
3
+ """
4
+ import networkx as nx
5
+ from networkx.utils import not_implemented_for
6
+
7
+ __all__ = [
8
+ "subgraph_centrality_exp",
9
+ "subgraph_centrality",
10
+ "communicability_betweenness_centrality",
11
+ "estrada_index",
12
+ ]
13
+
14
+
15
+ @not_implemented_for("directed")
16
+ @not_implemented_for("multigraph")
17
+ @nx._dispatchable
18
+ def subgraph_centrality_exp(G):
19
+ r"""Returns the subgraph centrality for each node of G.
20
+
21
+ Subgraph centrality of a node `n` is the sum of weighted closed
22
+ walks of all lengths starting and ending at node `n`. The weights
23
+ decrease with path length. Each closed walk is associated with a
24
+ connected subgraph ([1]_).
25
+
26
+ Parameters
27
+ ----------
28
+ G: graph
29
+
30
+ Returns
31
+ -------
32
+ nodes:dictionary
33
+ Dictionary of nodes with subgraph centrality as the value.
34
+
35
+ Raises
36
+ ------
37
+ NetworkXError
38
+ If the graph is not undirected and simple.
39
+
40
+ See Also
41
+ --------
42
+ subgraph_centrality:
43
+ Alternative algorithm of the subgraph centrality for each node of G.
44
+
45
+ Notes
46
+ -----
47
+ This version of the algorithm exponentiates the adjacency matrix.
48
+
49
+ The subgraph centrality of a node `u` in G can be found using
50
+ the matrix exponential of the adjacency matrix of G [1]_,
51
+
52
+ .. math::
53
+
54
+ SC(u)=(e^A)_{uu} .
55
+
56
+ References
57
+ ----------
58
+ .. [1] Ernesto Estrada, Juan A. Rodriguez-Velazquez,
59
+ "Subgraph centrality in complex networks",
60
+ Physical Review E 71, 056103 (2005).
61
+ https://arxiv.org/abs/cond-mat/0504730
62
+
63
+ Examples
64
+ --------
65
+ (Example from [1]_)
66
+ >>> G = nx.Graph(
67
+ ... [
68
+ ... (1, 2),
69
+ ... (1, 5),
70
+ ... (1, 8),
71
+ ... (2, 3),
72
+ ... (2, 8),
73
+ ... (3, 4),
74
+ ... (3, 6),
75
+ ... (4, 5),
76
+ ... (4, 7),
77
+ ... (5, 6),
78
+ ... (6, 7),
79
+ ... (7, 8),
80
+ ... ]
81
+ ... )
82
+ >>> sc = nx.subgraph_centrality_exp(G)
83
+ >>> print([f"{node} {sc[node]:0.2f}" for node in sorted(sc)])
84
+ ['1 3.90', '2 3.90', '3 3.64', '4 3.71', '5 3.64', '6 3.71', '7 3.64', '8 3.90']
85
+ """
86
+ # alternative implementation that calculates the matrix exponential
87
+ import scipy as sp
88
+
89
+ nodelist = list(G) # ordering of nodes in matrix
90
+ A = nx.to_numpy_array(G, nodelist)
91
+ # convert to 0-1 matrix
92
+ A[A != 0.0] = 1
93
+ expA = sp.linalg.expm(A)
94
+ # convert diagonal to dictionary keyed by node
95
+ sc = dict(zip(nodelist, map(float, expA.diagonal())))
96
+ return sc
97
+
98
+
99
+ @not_implemented_for("directed")
100
+ @not_implemented_for("multigraph")
101
+ @nx._dispatchable
102
+ def subgraph_centrality(G):
103
+ r"""Returns subgraph centrality for each node in G.
104
+
105
+ Subgraph centrality of a node `n` is the sum of weighted closed
106
+ walks of all lengths starting and ending at node `n`. The weights
107
+ decrease with path length. Each closed walk is associated with a
108
+ connected subgraph ([1]_).
109
+
110
+ Parameters
111
+ ----------
112
+ G: graph
113
+
114
+ Returns
115
+ -------
116
+ nodes : dictionary
117
+ Dictionary of nodes with subgraph centrality as the value.
118
+
119
+ Raises
120
+ ------
121
+ NetworkXError
122
+ If the graph is not undirected and simple.
123
+
124
+ See Also
125
+ --------
126
+ subgraph_centrality_exp:
127
+ Alternative algorithm of the subgraph centrality for each node of G.
128
+
129
+ Notes
130
+ -----
131
+ This version of the algorithm computes eigenvalues and eigenvectors
132
+ of the adjacency matrix.
133
+
134
+ Subgraph centrality of a node `u` in G can be found using
135
+ a spectral decomposition of the adjacency matrix [1]_,
136
+
137
+ .. math::
138
+
139
+ SC(u)=\sum_{j=1}^{N}(v_{j}^{u})^2 e^{\lambda_{j}},
140
+
141
+ where `v_j` is an eigenvector of the adjacency matrix `A` of G
142
+ corresponding to the eigenvalue `\lambda_j`.
143
+
144
+ Examples
145
+ --------
146
+ (Example from [1]_)
147
+ >>> G = nx.Graph(
148
+ ... [
149
+ ... (1, 2),
150
+ ... (1, 5),
151
+ ... (1, 8),
152
+ ... (2, 3),
153
+ ... (2, 8),
154
+ ... (3, 4),
155
+ ... (3, 6),
156
+ ... (4, 5),
157
+ ... (4, 7),
158
+ ... (5, 6),
159
+ ... (6, 7),
160
+ ... (7, 8),
161
+ ... ]
162
+ ... )
163
+ >>> sc = nx.subgraph_centrality(G)
164
+ >>> print([f"{node} {sc[node]:0.2f}" for node in sorted(sc)])
165
+ ['1 3.90', '2 3.90', '3 3.64', '4 3.71', '5 3.64', '6 3.71', '7 3.64', '8 3.90']
166
+
167
+ References
168
+ ----------
169
+ .. [1] Ernesto Estrada, Juan A. Rodriguez-Velazquez,
170
+ "Subgraph centrality in complex networks",
171
+ Physical Review E 71, 056103 (2005).
172
+ https://arxiv.org/abs/cond-mat/0504730
173
+
174
+ """
175
+ import numpy as np
176
+
177
+ nodelist = list(G) # ordering of nodes in matrix
178
+ A = nx.to_numpy_array(G, nodelist)
179
+ # convert to 0-1 matrix
180
+ A[np.nonzero(A)] = 1
181
+ w, v = np.linalg.eigh(A)
182
+ vsquare = np.array(v) ** 2
183
+ expw = np.exp(w)
184
+ xg = vsquare @ expw
185
+ # convert vector dictionary keyed by node
186
+ sc = dict(zip(nodelist, map(float, xg)))
187
+ return sc
188
+
189
+
190
+ @not_implemented_for("directed")
191
+ @not_implemented_for("multigraph")
192
+ @nx._dispatchable
193
+ def communicability_betweenness_centrality(G):
194
+ r"""Returns subgraph communicability for all pairs of nodes in G.
195
+
196
+ Communicability betweenness measure makes use of the number of walks
197
+ connecting every pair of nodes as the basis of a betweenness centrality
198
+ measure.
199
+
200
+ Parameters
201
+ ----------
202
+ G: graph
203
+
204
+ Returns
205
+ -------
206
+ nodes : dictionary
207
+ Dictionary of nodes with communicability betweenness as the value.
208
+
209
+ Raises
210
+ ------
211
+ NetworkXError
212
+ If the graph is not undirected and simple.
213
+
214
+ Notes
215
+ -----
216
+ Let `G=(V,E)` be a simple undirected graph with `n` nodes and `m` edges,
217
+ and `A` denote the adjacency matrix of `G`.
218
+
219
+ Let `G(r)=(V,E(r))` be the graph resulting from
220
+ removing all edges connected to node `r` but not the node itself.
221
+
222
+ The adjacency matrix for `G(r)` is `A+E(r)`, where `E(r)` has nonzeros
223
+ only in row and column `r`.
224
+
225
+ The subraph betweenness of a node `r` is [1]_
226
+
227
+ .. math::
228
+
229
+ \omega_{r} = \frac{1}{C}\sum_{p}\sum_{q}\frac{G_{prq}}{G_{pq}},
230
+ p\neq q, q\neq r,
231
+
232
+ where
233
+ `G_{prq}=(e^{A}_{pq} - (e^{A+E(r)})_{pq}` is the number of walks
234
+ involving node r,
235
+ `G_{pq}=(e^{A})_{pq}` is the number of closed walks starting
236
+ at node `p` and ending at node `q`,
237
+ and `C=(n-1)^{2}-(n-1)` is a normalization factor equal to the
238
+ number of terms in the sum.
239
+
240
+ The resulting `\omega_{r}` takes values between zero and one.
241
+ The lower bound cannot be attained for a connected
242
+ graph, and the upper bound is attained in the star graph.
243
+
244
+ References
245
+ ----------
246
+ .. [1] Ernesto Estrada, Desmond J. Higham, Naomichi Hatano,
247
+ "Communicability Betweenness in Complex Networks"
248
+ Physica A 388 (2009) 764-774.
249
+ https://arxiv.org/abs/0905.4102
250
+
251
+ Examples
252
+ --------
253
+ >>> G = nx.Graph([(0, 1), (1, 2), (1, 5), (5, 4), (2, 4), (2, 3), (4, 3), (3, 6)])
254
+ >>> cbc = nx.communicability_betweenness_centrality(G)
255
+ >>> print([f"{node} {cbc[node]:0.2f}" for node in sorted(cbc)])
256
+ ['0 0.03', '1 0.45', '2 0.51', '3 0.45', '4 0.40', '5 0.19', '6 0.03']
257
+ """
258
+ import numpy as np
259
+ import scipy as sp
260
+
261
+ nodelist = list(G) # ordering of nodes in matrix
262
+ n = len(nodelist)
263
+ A = nx.to_numpy_array(G, nodelist)
264
+ # convert to 0-1 matrix
265
+ A[np.nonzero(A)] = 1
266
+ expA = sp.linalg.expm(A)
267
+ mapping = dict(zip(nodelist, range(n)))
268
+ cbc = {}
269
+ for v in G:
270
+ # remove row and col of node v
271
+ i = mapping[v]
272
+ row = A[i, :].copy()
273
+ col = A[:, i].copy()
274
+ A[i, :] = 0
275
+ A[:, i] = 0
276
+ B = (expA - sp.linalg.expm(A)) / expA
277
+ # sum with row/col of node v and diag set to zero
278
+ B[i, :] = 0
279
+ B[:, i] = 0
280
+ B -= np.diag(np.diag(B))
281
+ cbc[v] = float(B.sum())
282
+ # put row and col back
283
+ A[i, :] = row
284
+ A[:, i] = col
285
+ # rescale when more than two nodes
286
+ order = len(cbc)
287
+ if order > 2:
288
+ scale = 1.0 / ((order - 1.0) ** 2 - (order - 1.0))
289
+ cbc = {node: value * scale for node, value in cbc.items()}
290
+ return cbc
291
+
292
+
293
+ @nx._dispatchable
294
+ def estrada_index(G):
295
+ r"""Returns the Estrada index of a the graph G.
296
+
297
+ The Estrada Index is a topological index of folding or 3D "compactness" ([1]_).
298
+
299
+ Parameters
300
+ ----------
301
+ G: graph
302
+
303
+ Returns
304
+ -------
305
+ estrada index: float
306
+
307
+ Raises
308
+ ------
309
+ NetworkXError
310
+ If the graph is not undirected and simple.
311
+
312
+ Notes
313
+ -----
314
+ Let `G=(V,E)` be a simple undirected graph with `n` nodes and let
315
+ `\lambda_{1}\leq\lambda_{2}\leq\cdots\lambda_{n}`
316
+ be a non-increasing ordering of the eigenvalues of its adjacency
317
+ matrix `A`. The Estrada index is ([1]_, [2]_)
318
+
319
+ .. math::
320
+ EE(G)=\sum_{j=1}^n e^{\lambda _j}.
321
+
322
+ References
323
+ ----------
324
+ .. [1] E. Estrada, "Characterization of 3D molecular structure",
325
+ Chem. Phys. Lett. 319, 713 (2000).
326
+ https://doi.org/10.1016/S0009-2614(00)00158-5
327
+ .. [2] José Antonio de la Peñaa, Ivan Gutman, Juan Rada,
328
+ "Estimating the Estrada index",
329
+ Linear Algebra and its Applications. 427, 1 (2007).
330
+ https://doi.org/10.1016/j.laa.2007.06.020
331
+
332
+ Examples
333
+ --------
334
+ >>> G = nx.Graph([(0, 1), (1, 2), (1, 5), (5, 4), (2, 4), (2, 3), (4, 3), (3, 6)])
335
+ >>> ei = nx.estrada_index(G)
336
+ >>> print(f"{ei:0.5}")
337
+ 20.55
338
+ """
339
+ return sum(subgraph_centrality(G).values())
parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/__init__.py ADDED
File without changes
parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_percolation_centrality.cpython-310.pyc ADDED
Binary file (2.74 kB). View file
 
parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality.py ADDED
@@ -0,0 +1,780 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ import networkx as nx
4
+
5
+
6
+ def weighted_G():
7
+ G = nx.Graph()
8
+ G.add_edge(0, 1, weight=3)
9
+ G.add_edge(0, 2, weight=2)
10
+ G.add_edge(0, 3, weight=6)
11
+ G.add_edge(0, 4, weight=4)
12
+ G.add_edge(1, 3, weight=5)
13
+ G.add_edge(1, 5, weight=5)
14
+ G.add_edge(2, 4, weight=1)
15
+ G.add_edge(3, 4, weight=2)
16
+ G.add_edge(3, 5, weight=1)
17
+ G.add_edge(4, 5, weight=4)
18
+ return G
19
+
20
+
21
+ class TestBetweennessCentrality:
22
+ def test_K5(self):
23
+ """Betweenness centrality: K5"""
24
+ G = nx.complete_graph(5)
25
+ b = nx.betweenness_centrality(G, weight=None, normalized=False)
26
+ b_answer = {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0}
27
+ for n in sorted(G):
28
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
29
+
30
+ def test_K5_endpoints(self):
31
+ """Betweenness centrality: K5 endpoints"""
32
+ G = nx.complete_graph(5)
33
+ b = nx.betweenness_centrality(G, weight=None, normalized=False, endpoints=True)
34
+ b_answer = {0: 4.0, 1: 4.0, 2: 4.0, 3: 4.0, 4: 4.0}
35
+ for n in sorted(G):
36
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
37
+ # normalized = True case
38
+ b = nx.betweenness_centrality(G, weight=None, normalized=True, endpoints=True)
39
+ b_answer = {0: 0.4, 1: 0.4, 2: 0.4, 3: 0.4, 4: 0.4}
40
+ for n in sorted(G):
41
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
42
+
43
+ def test_P3_normalized(self):
44
+ """Betweenness centrality: P3 normalized"""
45
+ G = nx.path_graph(3)
46
+ b = nx.betweenness_centrality(G, weight=None, normalized=True)
47
+ b_answer = {0: 0.0, 1: 1.0, 2: 0.0}
48
+ for n in sorted(G):
49
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
50
+
51
+ def test_P3(self):
52
+ """Betweenness centrality: P3"""
53
+ G = nx.path_graph(3)
54
+ b_answer = {0: 0.0, 1: 1.0, 2: 0.0}
55
+ b = nx.betweenness_centrality(G, weight=None, normalized=False)
56
+ for n in sorted(G):
57
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
58
+
59
+ def test_sample_from_P3(self):
60
+ """Betweenness centrality: P3 sample"""
61
+ G = nx.path_graph(3)
62
+ b_answer = {0: 0.0, 1: 1.0, 2: 0.0}
63
+ b = nx.betweenness_centrality(G, k=3, weight=None, normalized=False, seed=1)
64
+ for n in sorted(G):
65
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
66
+ b = nx.betweenness_centrality(G, k=2, weight=None, normalized=False, seed=1)
67
+ # python versions give different results with same seed
68
+ b_approx1 = {0: 0.0, 1: 1.5, 2: 0.0}
69
+ b_approx2 = {0: 0.0, 1: 0.75, 2: 0.0}
70
+ for n in sorted(G):
71
+ assert b[n] in (b_approx1[n], b_approx2[n])
72
+
73
+ def test_P3_endpoints(self):
74
+ """Betweenness centrality: P3 endpoints"""
75
+ G = nx.path_graph(3)
76
+ b_answer = {0: 2.0, 1: 3.0, 2: 2.0}
77
+ b = nx.betweenness_centrality(G, weight=None, normalized=False, endpoints=True)
78
+ for n in sorted(G):
79
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
80
+ # normalized = True case
81
+ b_answer = {0: 2 / 3, 1: 1.0, 2: 2 / 3}
82
+ b = nx.betweenness_centrality(G, weight=None, normalized=True, endpoints=True)
83
+ for n in sorted(G):
84
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
85
+
86
+ def test_krackhardt_kite_graph(self):
87
+ """Betweenness centrality: Krackhardt kite graph"""
88
+ G = nx.krackhardt_kite_graph()
89
+ b_answer = {
90
+ 0: 1.667,
91
+ 1: 1.667,
92
+ 2: 0.000,
93
+ 3: 7.333,
94
+ 4: 0.000,
95
+ 5: 16.667,
96
+ 6: 16.667,
97
+ 7: 28.000,
98
+ 8: 16.000,
99
+ 9: 0.000,
100
+ }
101
+ for b in b_answer:
102
+ b_answer[b] /= 2
103
+ b = nx.betweenness_centrality(G, weight=None, normalized=False)
104
+ for n in sorted(G):
105
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
106
+
107
+ def test_krackhardt_kite_graph_normalized(self):
108
+ """Betweenness centrality: Krackhardt kite graph normalized"""
109
+ G = nx.krackhardt_kite_graph()
110
+ b_answer = {
111
+ 0: 0.023,
112
+ 1: 0.023,
113
+ 2: 0.000,
114
+ 3: 0.102,
115
+ 4: 0.000,
116
+ 5: 0.231,
117
+ 6: 0.231,
118
+ 7: 0.389,
119
+ 8: 0.222,
120
+ 9: 0.000,
121
+ }
122
+ b = nx.betweenness_centrality(G, weight=None, normalized=True)
123
+ for n in sorted(G):
124
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
125
+
126
+ def test_florentine_families_graph(self):
127
+ """Betweenness centrality: Florentine families graph"""
128
+ G = nx.florentine_families_graph()
129
+ b_answer = {
130
+ "Acciaiuoli": 0.000,
131
+ "Albizzi": 0.212,
132
+ "Barbadori": 0.093,
133
+ "Bischeri": 0.104,
134
+ "Castellani": 0.055,
135
+ "Ginori": 0.000,
136
+ "Guadagni": 0.255,
137
+ "Lamberteschi": 0.000,
138
+ "Medici": 0.522,
139
+ "Pazzi": 0.000,
140
+ "Peruzzi": 0.022,
141
+ "Ridolfi": 0.114,
142
+ "Salviati": 0.143,
143
+ "Strozzi": 0.103,
144
+ "Tornabuoni": 0.092,
145
+ }
146
+
147
+ b = nx.betweenness_centrality(G, weight=None, normalized=True)
148
+ for n in sorted(G):
149
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
150
+
151
+ def test_les_miserables_graph(self):
152
+ """Betweenness centrality: Les Miserables graph"""
153
+ G = nx.les_miserables_graph()
154
+ b_answer = {
155
+ "Napoleon": 0.000,
156
+ "Myriel": 0.177,
157
+ "MlleBaptistine": 0.000,
158
+ "MmeMagloire": 0.000,
159
+ "CountessDeLo": 0.000,
160
+ "Geborand": 0.000,
161
+ "Champtercier": 0.000,
162
+ "Cravatte": 0.000,
163
+ "Count": 0.000,
164
+ "OldMan": 0.000,
165
+ "Valjean": 0.570,
166
+ "Labarre": 0.000,
167
+ "Marguerite": 0.000,
168
+ "MmeDeR": 0.000,
169
+ "Isabeau": 0.000,
170
+ "Gervais": 0.000,
171
+ "Listolier": 0.000,
172
+ "Tholomyes": 0.041,
173
+ "Fameuil": 0.000,
174
+ "Blacheville": 0.000,
175
+ "Favourite": 0.000,
176
+ "Dahlia": 0.000,
177
+ "Zephine": 0.000,
178
+ "Fantine": 0.130,
179
+ "MmeThenardier": 0.029,
180
+ "Thenardier": 0.075,
181
+ "Cosette": 0.024,
182
+ "Javert": 0.054,
183
+ "Fauchelevent": 0.026,
184
+ "Bamatabois": 0.008,
185
+ "Perpetue": 0.000,
186
+ "Simplice": 0.009,
187
+ "Scaufflaire": 0.000,
188
+ "Woman1": 0.000,
189
+ "Judge": 0.000,
190
+ "Champmathieu": 0.000,
191
+ "Brevet": 0.000,
192
+ "Chenildieu": 0.000,
193
+ "Cochepaille": 0.000,
194
+ "Pontmercy": 0.007,
195
+ "Boulatruelle": 0.000,
196
+ "Eponine": 0.011,
197
+ "Anzelma": 0.000,
198
+ "Woman2": 0.000,
199
+ "MotherInnocent": 0.000,
200
+ "Gribier": 0.000,
201
+ "MmeBurgon": 0.026,
202
+ "Jondrette": 0.000,
203
+ "Gavroche": 0.165,
204
+ "Gillenormand": 0.020,
205
+ "Magnon": 0.000,
206
+ "MlleGillenormand": 0.048,
207
+ "MmePontmercy": 0.000,
208
+ "MlleVaubois": 0.000,
209
+ "LtGillenormand": 0.000,
210
+ "Marius": 0.132,
211
+ "BaronessT": 0.000,
212
+ "Mabeuf": 0.028,
213
+ "Enjolras": 0.043,
214
+ "Combeferre": 0.001,
215
+ "Prouvaire": 0.000,
216
+ "Feuilly": 0.001,
217
+ "Courfeyrac": 0.005,
218
+ "Bahorel": 0.002,
219
+ "Bossuet": 0.031,
220
+ "Joly": 0.002,
221
+ "Grantaire": 0.000,
222
+ "MotherPlutarch": 0.000,
223
+ "Gueulemer": 0.005,
224
+ "Babet": 0.005,
225
+ "Claquesous": 0.005,
226
+ "Montparnasse": 0.004,
227
+ "Toussaint": 0.000,
228
+ "Child1": 0.000,
229
+ "Child2": 0.000,
230
+ "Brujon": 0.000,
231
+ "MmeHucheloup": 0.000,
232
+ }
233
+
234
+ b = nx.betweenness_centrality(G, weight=None, normalized=True)
235
+ for n in sorted(G):
236
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
237
+
238
+ def test_ladder_graph(self):
239
+ """Betweenness centrality: Ladder graph"""
240
+ G = nx.Graph() # ladder_graph(3)
241
+ G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)])
242
+ b_answer = {0: 1.667, 1: 1.667, 2: 6.667, 3: 6.667, 4: 1.667, 5: 1.667}
243
+ for b in b_answer:
244
+ b_answer[b] /= 2
245
+ b = nx.betweenness_centrality(G, weight=None, normalized=False)
246
+ for n in sorted(G):
247
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
248
+
249
+ def test_disconnected_path(self):
250
+ """Betweenness centrality: disconnected path"""
251
+ G = nx.Graph()
252
+ nx.add_path(G, [0, 1, 2])
253
+ nx.add_path(G, [3, 4, 5, 6])
254
+ b_answer = {0: 0, 1: 1, 2: 0, 3: 0, 4: 2, 5: 2, 6: 0}
255
+ b = nx.betweenness_centrality(G, weight=None, normalized=False)
256
+ for n in sorted(G):
257
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
258
+
259
+ def test_disconnected_path_endpoints(self):
260
+ """Betweenness centrality: disconnected path endpoints"""
261
+ G = nx.Graph()
262
+ nx.add_path(G, [0, 1, 2])
263
+ nx.add_path(G, [3, 4, 5, 6])
264
+ b_answer = {0: 2, 1: 3, 2: 2, 3: 3, 4: 5, 5: 5, 6: 3}
265
+ b = nx.betweenness_centrality(G, weight=None, normalized=False, endpoints=True)
266
+ for n in sorted(G):
267
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
268
+ # normalized = True case
269
+ b = nx.betweenness_centrality(G, weight=None, normalized=True, endpoints=True)
270
+ for n in sorted(G):
271
+ assert b[n] == pytest.approx(b_answer[n] / 21, abs=1e-7)
272
+
273
+ def test_directed_path(self):
274
+ """Betweenness centrality: directed path"""
275
+ G = nx.DiGraph()
276
+ nx.add_path(G, [0, 1, 2])
277
+ b = nx.betweenness_centrality(G, weight=None, normalized=False)
278
+ b_answer = {0: 0.0, 1: 1.0, 2: 0.0}
279
+ for n in sorted(G):
280
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
281
+
282
+ def test_directed_path_normalized(self):
283
+ """Betweenness centrality: directed path normalized"""
284
+ G = nx.DiGraph()
285
+ nx.add_path(G, [0, 1, 2])
286
+ b = nx.betweenness_centrality(G, weight=None, normalized=True)
287
+ b_answer = {0: 0.0, 1: 0.5, 2: 0.0}
288
+ for n in sorted(G):
289
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
290
+
291
+
292
+ class TestWeightedBetweennessCentrality:
293
+ def test_K5(self):
294
+ """Weighted betweenness centrality: K5"""
295
+ G = nx.complete_graph(5)
296
+ b = nx.betweenness_centrality(G, weight="weight", normalized=False)
297
+ b_answer = {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0}
298
+ for n in sorted(G):
299
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
300
+
301
+ def test_P3_normalized(self):
302
+ """Weighted betweenness centrality: P3 normalized"""
303
+ G = nx.path_graph(3)
304
+ b = nx.betweenness_centrality(G, weight="weight", normalized=True)
305
+ b_answer = {0: 0.0, 1: 1.0, 2: 0.0}
306
+ for n in sorted(G):
307
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
308
+
309
+ def test_P3(self):
310
+ """Weighted betweenness centrality: P3"""
311
+ G = nx.path_graph(3)
312
+ b_answer = {0: 0.0, 1: 1.0, 2: 0.0}
313
+ b = nx.betweenness_centrality(G, weight="weight", normalized=False)
314
+ for n in sorted(G):
315
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
316
+
317
+ def test_krackhardt_kite_graph(self):
318
+ """Weighted betweenness centrality: Krackhardt kite graph"""
319
+ G = nx.krackhardt_kite_graph()
320
+ b_answer = {
321
+ 0: 1.667,
322
+ 1: 1.667,
323
+ 2: 0.000,
324
+ 3: 7.333,
325
+ 4: 0.000,
326
+ 5: 16.667,
327
+ 6: 16.667,
328
+ 7: 28.000,
329
+ 8: 16.000,
330
+ 9: 0.000,
331
+ }
332
+ for b in b_answer:
333
+ b_answer[b] /= 2
334
+
335
+ b = nx.betweenness_centrality(G, weight="weight", normalized=False)
336
+
337
+ for n in sorted(G):
338
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
339
+
340
+ def test_krackhardt_kite_graph_normalized(self):
341
+ """Weighted betweenness centrality:
342
+ Krackhardt kite graph normalized
343
+ """
344
+ G = nx.krackhardt_kite_graph()
345
+ b_answer = {
346
+ 0: 0.023,
347
+ 1: 0.023,
348
+ 2: 0.000,
349
+ 3: 0.102,
350
+ 4: 0.000,
351
+ 5: 0.231,
352
+ 6: 0.231,
353
+ 7: 0.389,
354
+ 8: 0.222,
355
+ 9: 0.000,
356
+ }
357
+ b = nx.betweenness_centrality(G, weight="weight", normalized=True)
358
+
359
+ for n in sorted(G):
360
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
361
+
362
+ def test_florentine_families_graph(self):
363
+ """Weighted betweenness centrality:
364
+ Florentine families graph"""
365
+ G = nx.florentine_families_graph()
366
+ b_answer = {
367
+ "Acciaiuoli": 0.000,
368
+ "Albizzi": 0.212,
369
+ "Barbadori": 0.093,
370
+ "Bischeri": 0.104,
371
+ "Castellani": 0.055,
372
+ "Ginori": 0.000,
373
+ "Guadagni": 0.255,
374
+ "Lamberteschi": 0.000,
375
+ "Medici": 0.522,
376
+ "Pazzi": 0.000,
377
+ "Peruzzi": 0.022,
378
+ "Ridolfi": 0.114,
379
+ "Salviati": 0.143,
380
+ "Strozzi": 0.103,
381
+ "Tornabuoni": 0.092,
382
+ }
383
+
384
+ b = nx.betweenness_centrality(G, weight="weight", normalized=True)
385
+ for n in sorted(G):
386
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
387
+
388
+ def test_les_miserables_graph(self):
389
+ """Weighted betweenness centrality: Les Miserables graph"""
390
+ G = nx.les_miserables_graph()
391
+ b_answer = {
392
+ "Napoleon": 0.000,
393
+ "Myriel": 0.177,
394
+ "MlleBaptistine": 0.000,
395
+ "MmeMagloire": 0.000,
396
+ "CountessDeLo": 0.000,
397
+ "Geborand": 0.000,
398
+ "Champtercier": 0.000,
399
+ "Cravatte": 0.000,
400
+ "Count": 0.000,
401
+ "OldMan": 0.000,
402
+ "Valjean": 0.454,
403
+ "Labarre": 0.000,
404
+ "Marguerite": 0.009,
405
+ "MmeDeR": 0.000,
406
+ "Isabeau": 0.000,
407
+ "Gervais": 0.000,
408
+ "Listolier": 0.000,
409
+ "Tholomyes": 0.066,
410
+ "Fameuil": 0.000,
411
+ "Blacheville": 0.000,
412
+ "Favourite": 0.000,
413
+ "Dahlia": 0.000,
414
+ "Zephine": 0.000,
415
+ "Fantine": 0.114,
416
+ "MmeThenardier": 0.046,
417
+ "Thenardier": 0.129,
418
+ "Cosette": 0.075,
419
+ "Javert": 0.193,
420
+ "Fauchelevent": 0.026,
421
+ "Bamatabois": 0.080,
422
+ "Perpetue": 0.000,
423
+ "Simplice": 0.001,
424
+ "Scaufflaire": 0.000,
425
+ "Woman1": 0.000,
426
+ "Judge": 0.000,
427
+ "Champmathieu": 0.000,
428
+ "Brevet": 0.000,
429
+ "Chenildieu": 0.000,
430
+ "Cochepaille": 0.000,
431
+ "Pontmercy": 0.023,
432
+ "Boulatruelle": 0.000,
433
+ "Eponine": 0.023,
434
+ "Anzelma": 0.000,
435
+ "Woman2": 0.000,
436
+ "MotherInnocent": 0.000,
437
+ "Gribier": 0.000,
438
+ "MmeBurgon": 0.026,
439
+ "Jondrette": 0.000,
440
+ "Gavroche": 0.285,
441
+ "Gillenormand": 0.024,
442
+ "Magnon": 0.005,
443
+ "MlleGillenormand": 0.036,
444
+ "MmePontmercy": 0.005,
445
+ "MlleVaubois": 0.000,
446
+ "LtGillenormand": 0.015,
447
+ "Marius": 0.072,
448
+ "BaronessT": 0.004,
449
+ "Mabeuf": 0.089,
450
+ "Enjolras": 0.003,
451
+ "Combeferre": 0.000,
452
+ "Prouvaire": 0.000,
453
+ "Feuilly": 0.004,
454
+ "Courfeyrac": 0.001,
455
+ "Bahorel": 0.007,
456
+ "Bossuet": 0.028,
457
+ "Joly": 0.000,
458
+ "Grantaire": 0.036,
459
+ "MotherPlutarch": 0.000,
460
+ "Gueulemer": 0.025,
461
+ "Babet": 0.015,
462
+ "Claquesous": 0.042,
463
+ "Montparnasse": 0.050,
464
+ "Toussaint": 0.011,
465
+ "Child1": 0.000,
466
+ "Child2": 0.000,
467
+ "Brujon": 0.002,
468
+ "MmeHucheloup": 0.034,
469
+ }
470
+
471
+ b = nx.betweenness_centrality(G, weight="weight", normalized=True)
472
+ for n in sorted(G):
473
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
474
+
475
+ def test_ladder_graph(self):
476
+ """Weighted betweenness centrality: Ladder graph"""
477
+ G = nx.Graph() # ladder_graph(3)
478
+ G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)])
479
+ b_answer = {0: 1.667, 1: 1.667, 2: 6.667, 3: 6.667, 4: 1.667, 5: 1.667}
480
+ for b in b_answer:
481
+ b_answer[b] /= 2
482
+ b = nx.betweenness_centrality(G, weight="weight", normalized=False)
483
+ for n in sorted(G):
484
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
485
+
486
+ def test_G(self):
487
+ """Weighted betweenness centrality: G"""
488
+ G = weighted_G()
489
+ b_answer = {0: 2.0, 1: 0.0, 2: 4.0, 3: 3.0, 4: 4.0, 5: 0.0}
490
+ b = nx.betweenness_centrality(G, weight="weight", normalized=False)
491
+ for n in sorted(G):
492
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
493
+
494
+ def test_G2(self):
495
+ """Weighted betweenness centrality: G2"""
496
+ G = nx.DiGraph()
497
+ G.add_weighted_edges_from(
498
+ [
499
+ ("s", "u", 10),
500
+ ("s", "x", 5),
501
+ ("u", "v", 1),
502
+ ("u", "x", 2),
503
+ ("v", "y", 1),
504
+ ("x", "u", 3),
505
+ ("x", "v", 5),
506
+ ("x", "y", 2),
507
+ ("y", "s", 7),
508
+ ("y", "v", 6),
509
+ ]
510
+ )
511
+
512
+ b_answer = {"y": 5.0, "x": 5.0, "s": 4.0, "u": 2.0, "v": 2.0}
513
+
514
+ b = nx.betweenness_centrality(G, weight="weight", normalized=False)
515
+ for n in sorted(G):
516
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
517
+
518
+ def test_G3(self):
519
+ """Weighted betweenness centrality: G3"""
520
+ G = nx.MultiGraph(weighted_G())
521
+ es = list(G.edges(data=True))[::2] # duplicate every other edge
522
+ G.add_edges_from(es)
523
+ b_answer = {0: 2.0, 1: 0.0, 2: 4.0, 3: 3.0, 4: 4.0, 5: 0.0}
524
+ b = nx.betweenness_centrality(G, weight="weight", normalized=False)
525
+ for n in sorted(G):
526
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
527
+
528
+ def test_G4(self):
529
+ """Weighted betweenness centrality: G4"""
530
+ G = nx.MultiDiGraph()
531
+ G.add_weighted_edges_from(
532
+ [
533
+ ("s", "u", 10),
534
+ ("s", "x", 5),
535
+ ("s", "x", 6),
536
+ ("u", "v", 1),
537
+ ("u", "x", 2),
538
+ ("v", "y", 1),
539
+ ("v", "y", 1),
540
+ ("x", "u", 3),
541
+ ("x", "v", 5),
542
+ ("x", "y", 2),
543
+ ("x", "y", 3),
544
+ ("y", "s", 7),
545
+ ("y", "v", 6),
546
+ ("y", "v", 6),
547
+ ]
548
+ )
549
+
550
+ b_answer = {"y": 5.0, "x": 5.0, "s": 4.0, "u": 2.0, "v": 2.0}
551
+
552
+ b = nx.betweenness_centrality(G, weight="weight", normalized=False)
553
+ for n in sorted(G):
554
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
555
+
556
+
557
+ class TestEdgeBetweennessCentrality:
558
+ def test_K5(self):
559
+ """Edge betweenness centrality: K5"""
560
+ G = nx.complete_graph(5)
561
+ b = nx.edge_betweenness_centrality(G, weight=None, normalized=False)
562
+ b_answer = dict.fromkeys(G.edges(), 1)
563
+ for n in sorted(G.edges()):
564
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
565
+
566
+ def test_normalized_K5(self):
567
+ """Edge betweenness centrality: K5"""
568
+ G = nx.complete_graph(5)
569
+ b = nx.edge_betweenness_centrality(G, weight=None, normalized=True)
570
+ b_answer = dict.fromkeys(G.edges(), 1 / 10)
571
+ for n in sorted(G.edges()):
572
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
573
+
574
+ def test_C4(self):
575
+ """Edge betweenness centrality: C4"""
576
+ G = nx.cycle_graph(4)
577
+ b = nx.edge_betweenness_centrality(G, weight=None, normalized=True)
578
+ b_answer = {(0, 1): 2, (0, 3): 2, (1, 2): 2, (2, 3): 2}
579
+ for n in sorted(G.edges()):
580
+ assert b[n] == pytest.approx(b_answer[n] / 6, abs=1e-7)
581
+
582
+ def test_P4(self):
583
+ """Edge betweenness centrality: P4"""
584
+ G = nx.path_graph(4)
585
+ b = nx.edge_betweenness_centrality(G, weight=None, normalized=False)
586
+ b_answer = {(0, 1): 3, (1, 2): 4, (2, 3): 3}
587
+ for n in sorted(G.edges()):
588
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
589
+
590
+ def test_normalized_P4(self):
591
+ """Edge betweenness centrality: P4"""
592
+ G = nx.path_graph(4)
593
+ b = nx.edge_betweenness_centrality(G, weight=None, normalized=True)
594
+ b_answer = {(0, 1): 3, (1, 2): 4, (2, 3): 3}
595
+ for n in sorted(G.edges()):
596
+ assert b[n] == pytest.approx(b_answer[n] / 6, abs=1e-7)
597
+
598
+ def test_balanced_tree(self):
599
+ """Edge betweenness centrality: balanced tree"""
600
+ G = nx.balanced_tree(r=2, h=2)
601
+ b = nx.edge_betweenness_centrality(G, weight=None, normalized=False)
602
+ b_answer = {(0, 1): 12, (0, 2): 12, (1, 3): 6, (1, 4): 6, (2, 5): 6, (2, 6): 6}
603
+ for n in sorted(G.edges()):
604
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
605
+
606
+
607
+ class TestWeightedEdgeBetweennessCentrality:
608
+ def test_K5(self):
609
+ """Edge betweenness centrality: K5"""
610
+ G = nx.complete_graph(5)
611
+ b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False)
612
+ b_answer = dict.fromkeys(G.edges(), 1)
613
+ for n in sorted(G.edges()):
614
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
615
+
616
+ def test_C4(self):
617
+ """Edge betweenness centrality: C4"""
618
+ G = nx.cycle_graph(4)
619
+ b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False)
620
+ b_answer = {(0, 1): 2, (0, 3): 2, (1, 2): 2, (2, 3): 2}
621
+ for n in sorted(G.edges()):
622
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
623
+
624
+ def test_P4(self):
625
+ """Edge betweenness centrality: P4"""
626
+ G = nx.path_graph(4)
627
+ b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False)
628
+ b_answer = {(0, 1): 3, (1, 2): 4, (2, 3): 3}
629
+ for n in sorted(G.edges()):
630
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
631
+
632
+ def test_balanced_tree(self):
633
+ """Edge betweenness centrality: balanced tree"""
634
+ G = nx.balanced_tree(r=2, h=2)
635
+ b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False)
636
+ b_answer = {(0, 1): 12, (0, 2): 12, (1, 3): 6, (1, 4): 6, (2, 5): 6, (2, 6): 6}
637
+ for n in sorted(G.edges()):
638
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
639
+
640
+ def test_weighted_graph(self):
641
+ """Edge betweenness centrality: weighted"""
642
+ eList = [
643
+ (0, 1, 5),
644
+ (0, 2, 4),
645
+ (0, 3, 3),
646
+ (0, 4, 2),
647
+ (1, 2, 4),
648
+ (1, 3, 1),
649
+ (1, 4, 3),
650
+ (2, 4, 5),
651
+ (3, 4, 4),
652
+ ]
653
+ G = nx.Graph()
654
+ G.add_weighted_edges_from(eList)
655
+ b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False)
656
+ b_answer = {
657
+ (0, 1): 0.0,
658
+ (0, 2): 1.0,
659
+ (0, 3): 2.0,
660
+ (0, 4): 1.0,
661
+ (1, 2): 2.0,
662
+ (1, 3): 3.5,
663
+ (1, 4): 1.5,
664
+ (2, 4): 1.0,
665
+ (3, 4): 0.5,
666
+ }
667
+ for n in sorted(G.edges()):
668
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
669
+
670
+ def test_normalized_weighted_graph(self):
671
+ """Edge betweenness centrality: normalized weighted"""
672
+ eList = [
673
+ (0, 1, 5),
674
+ (0, 2, 4),
675
+ (0, 3, 3),
676
+ (0, 4, 2),
677
+ (1, 2, 4),
678
+ (1, 3, 1),
679
+ (1, 4, 3),
680
+ (2, 4, 5),
681
+ (3, 4, 4),
682
+ ]
683
+ G = nx.Graph()
684
+ G.add_weighted_edges_from(eList)
685
+ b = nx.edge_betweenness_centrality(G, weight="weight", normalized=True)
686
+ b_answer = {
687
+ (0, 1): 0.0,
688
+ (0, 2): 1.0,
689
+ (0, 3): 2.0,
690
+ (0, 4): 1.0,
691
+ (1, 2): 2.0,
692
+ (1, 3): 3.5,
693
+ (1, 4): 1.5,
694
+ (2, 4): 1.0,
695
+ (3, 4): 0.5,
696
+ }
697
+ norm = len(G) * (len(G) - 1) / 2
698
+ for n in sorted(G.edges()):
699
+ assert b[n] == pytest.approx(b_answer[n] / norm, abs=1e-7)
700
+
701
+ def test_weighted_multigraph(self):
702
+ """Edge betweenness centrality: weighted multigraph"""
703
+ eList = [
704
+ (0, 1, 5),
705
+ (0, 1, 4),
706
+ (0, 2, 4),
707
+ (0, 3, 3),
708
+ (0, 3, 3),
709
+ (0, 4, 2),
710
+ (1, 2, 4),
711
+ (1, 3, 1),
712
+ (1, 3, 2),
713
+ (1, 4, 3),
714
+ (1, 4, 4),
715
+ (2, 4, 5),
716
+ (3, 4, 4),
717
+ (3, 4, 4),
718
+ ]
719
+ G = nx.MultiGraph()
720
+ G.add_weighted_edges_from(eList)
721
+ b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False)
722
+ b_answer = {
723
+ (0, 1, 0): 0.0,
724
+ (0, 1, 1): 0.5,
725
+ (0, 2, 0): 1.0,
726
+ (0, 3, 0): 0.75,
727
+ (0, 3, 1): 0.75,
728
+ (0, 4, 0): 1.0,
729
+ (1, 2, 0): 2.0,
730
+ (1, 3, 0): 3.0,
731
+ (1, 3, 1): 0.0,
732
+ (1, 4, 0): 1.5,
733
+ (1, 4, 1): 0.0,
734
+ (2, 4, 0): 1.0,
735
+ (3, 4, 0): 0.25,
736
+ (3, 4, 1): 0.25,
737
+ }
738
+ for n in sorted(G.edges(keys=True)):
739
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
740
+
741
+ def test_normalized_weighted_multigraph(self):
742
+ """Edge betweenness centrality: normalized weighted multigraph"""
743
+ eList = [
744
+ (0, 1, 5),
745
+ (0, 1, 4),
746
+ (0, 2, 4),
747
+ (0, 3, 3),
748
+ (0, 3, 3),
749
+ (0, 4, 2),
750
+ (1, 2, 4),
751
+ (1, 3, 1),
752
+ (1, 3, 2),
753
+ (1, 4, 3),
754
+ (1, 4, 4),
755
+ (2, 4, 5),
756
+ (3, 4, 4),
757
+ (3, 4, 4),
758
+ ]
759
+ G = nx.MultiGraph()
760
+ G.add_weighted_edges_from(eList)
761
+ b = nx.edge_betweenness_centrality(G, weight="weight", normalized=True)
762
+ b_answer = {
763
+ (0, 1, 0): 0.0,
764
+ (0, 1, 1): 0.5,
765
+ (0, 2, 0): 1.0,
766
+ (0, 3, 0): 0.75,
767
+ (0, 3, 1): 0.75,
768
+ (0, 4, 0): 1.0,
769
+ (1, 2, 0): 2.0,
770
+ (1, 3, 0): 3.0,
771
+ (1, 3, 1): 0.0,
772
+ (1, 4, 0): 1.5,
773
+ (1, 4, 1): 0.0,
774
+ (2, 4, 0): 1.0,
775
+ (3, 4, 0): 0.25,
776
+ (3, 4, 1): 0.25,
777
+ }
778
+ norm = len(G) * (len(G) - 1) / 2
779
+ for n in sorted(G.edges(keys=True)):
780
+ assert b[n] == pytest.approx(b_answer[n] / norm, abs=1e-7)
parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ import networkx as nx
4
+
5
+
6
+ class TestSubsetBetweennessCentrality:
7
+ def test_K5(self):
8
+ """Betweenness Centrality Subset: K5"""
9
+ G = nx.complete_graph(5)
10
+ b = nx.betweenness_centrality_subset(
11
+ G, sources=[0], targets=[1, 3], weight=None
12
+ )
13
+ b_answer = {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0}
14
+ for n in sorted(G):
15
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
16
+
17
+ def test_P5_directed(self):
18
+ """Betweenness Centrality Subset: P5 directed"""
19
+ G = nx.DiGraph()
20
+ nx.add_path(G, range(5))
21
+ b_answer = {0: 0, 1: 1, 2: 1, 3: 0, 4: 0, 5: 0}
22
+ b = nx.betweenness_centrality_subset(G, sources=[0], targets=[3], weight=None)
23
+ for n in sorted(G):
24
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
25
+
26
+ def test_P5(self):
27
+ """Betweenness Centrality Subset: P5"""
28
+ G = nx.Graph()
29
+ nx.add_path(G, range(5))
30
+ b_answer = {0: 0, 1: 0.5, 2: 0.5, 3: 0, 4: 0, 5: 0}
31
+ b = nx.betweenness_centrality_subset(G, sources=[0], targets=[3], weight=None)
32
+ for n in sorted(G):
33
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
34
+
35
+ def test_P5_multiple_target(self):
36
+ """Betweenness Centrality Subset: P5 multiple target"""
37
+ G = nx.Graph()
38
+ nx.add_path(G, range(5))
39
+ b_answer = {0: 0, 1: 1, 2: 1, 3: 0.5, 4: 0, 5: 0}
40
+ b = nx.betweenness_centrality_subset(
41
+ G, sources=[0], targets=[3, 4], weight=None
42
+ )
43
+ for n in sorted(G):
44
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
45
+
46
+ def test_box(self):
47
+ """Betweenness Centrality Subset: box"""
48
+ G = nx.Graph()
49
+ G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)])
50
+ b_answer = {0: 0, 1: 0.25, 2: 0.25, 3: 0}
51
+ b = nx.betweenness_centrality_subset(G, sources=[0], targets=[3], weight=None)
52
+ for n in sorted(G):
53
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
54
+
55
+ def test_box_and_path(self):
56
+ """Betweenness Centrality Subset: box and path"""
57
+ G = nx.Graph()
58
+ G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (3, 4), (4, 5)])
59
+ b_answer = {0: 0, 1: 0.5, 2: 0.5, 3: 0.5, 4: 0, 5: 0}
60
+ b = nx.betweenness_centrality_subset(
61
+ G, sources=[0], targets=[3, 4], weight=None
62
+ )
63
+ for n in sorted(G):
64
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
65
+
66
+ def test_box_and_path2(self):
67
+ """Betweenness Centrality Subset: box and path multiple target"""
68
+ G = nx.Graph()
69
+ G.add_edges_from([(0, 1), (1, 2), (2, 3), (1, 20), (20, 3), (3, 4)])
70
+ b_answer = {0: 0, 1: 1.0, 2: 0.5, 20: 0.5, 3: 0.5, 4: 0}
71
+ b = nx.betweenness_centrality_subset(
72
+ G, sources=[0], targets=[3, 4], weight=None
73
+ )
74
+ for n in sorted(G):
75
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
76
+
77
+ def test_diamond_multi_path(self):
78
+ """Betweenness Centrality Subset: Diamond Multi Path"""
79
+ G = nx.Graph()
80
+ G.add_edges_from(
81
+ [
82
+ (1, 2),
83
+ (1, 3),
84
+ (1, 4),
85
+ (1, 5),
86
+ (1, 10),
87
+ (10, 11),
88
+ (11, 12),
89
+ (12, 9),
90
+ (2, 6),
91
+ (3, 6),
92
+ (4, 6),
93
+ (5, 7),
94
+ (7, 8),
95
+ (6, 8),
96
+ (8, 9),
97
+ ]
98
+ )
99
+ b = nx.betweenness_centrality_subset(G, sources=[1], targets=[9], weight=None)
100
+
101
+ expected_b = {
102
+ 1: 0,
103
+ 2: 1.0 / 10,
104
+ 3: 1.0 / 10,
105
+ 4: 1.0 / 10,
106
+ 5: 1.0 / 10,
107
+ 6: 3.0 / 10,
108
+ 7: 1.0 / 10,
109
+ 8: 4.0 / 10,
110
+ 9: 0,
111
+ 10: 1.0 / 10,
112
+ 11: 1.0 / 10,
113
+ 12: 1.0 / 10,
114
+ }
115
+
116
+ for n in sorted(G):
117
+ assert b[n] == pytest.approx(expected_b[n], abs=1e-7)
118
+
119
+ def test_normalized_p2(self):
120
+ """
121
+ Betweenness Centrality Subset: Normalized P2
122
+ if n <= 2: no normalization, betweenness centrality should be 0 for all nodes.
123
+ """
124
+ G = nx.Graph()
125
+ nx.add_path(G, range(2))
126
+ b_answer = {0: 0, 1: 0.0}
127
+ b = nx.betweenness_centrality_subset(
128
+ G, sources=[0], targets=[1], normalized=True, weight=None
129
+ )
130
+ for n in sorted(G):
131
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
132
+
133
+ def test_normalized_P5_directed(self):
134
+ """Betweenness Centrality Subset: Normalized Directed P5"""
135
+ G = nx.DiGraph()
136
+ nx.add_path(G, range(5))
137
+ b_answer = {0: 0, 1: 1.0 / 12.0, 2: 1.0 / 12.0, 3: 0, 4: 0, 5: 0}
138
+ b = nx.betweenness_centrality_subset(
139
+ G, sources=[0], targets=[3], normalized=True, weight=None
140
+ )
141
+ for n in sorted(G):
142
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
143
+
144
+ def test_weighted_graph(self):
145
+ """Betweenness Centrality Subset: Weighted Graph"""
146
+ G = nx.DiGraph()
147
+ G.add_edge(0, 1, weight=3)
148
+ G.add_edge(0, 2, weight=2)
149
+ G.add_edge(0, 3, weight=6)
150
+ G.add_edge(0, 4, weight=4)
151
+ G.add_edge(1, 3, weight=5)
152
+ G.add_edge(1, 5, weight=5)
153
+ G.add_edge(2, 4, weight=1)
154
+ G.add_edge(3, 4, weight=2)
155
+ G.add_edge(3, 5, weight=1)
156
+ G.add_edge(4, 5, weight=4)
157
+ b_answer = {0: 0.0, 1: 0.0, 2: 0.5, 3: 0.5, 4: 0.5, 5: 0.0}
158
+ b = nx.betweenness_centrality_subset(
159
+ G, sources=[0], targets=[5], normalized=False, weight="weight"
160
+ )
161
+ for n in sorted(G):
162
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
163
+
164
+
165
+ class TestEdgeSubsetBetweennessCentrality:
166
+ def test_K5(self):
167
+ """Edge betweenness subset centrality: K5"""
168
+ G = nx.complete_graph(5)
169
+ b = nx.edge_betweenness_centrality_subset(
170
+ G, sources=[0], targets=[1, 3], weight=None
171
+ )
172
+ b_answer = dict.fromkeys(G.edges(), 0)
173
+ b_answer[(0, 3)] = b_answer[(0, 1)] = 0.5
174
+ for n in sorted(G.edges()):
175
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
176
+
177
+ def test_P5_directed(self):
178
+ """Edge betweenness subset centrality: P5 directed"""
179
+ G = nx.DiGraph()
180
+ nx.add_path(G, range(5))
181
+ b_answer = dict.fromkeys(G.edges(), 0)
182
+ b_answer[(0, 1)] = b_answer[(1, 2)] = b_answer[(2, 3)] = 1
183
+ b = nx.edge_betweenness_centrality_subset(
184
+ G, sources=[0], targets=[3], weight=None
185
+ )
186
+ for n in sorted(G.edges()):
187
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
188
+
189
+ def test_P5(self):
190
+ """Edge betweenness subset centrality: P5"""
191
+ G = nx.Graph()
192
+ nx.add_path(G, range(5))
193
+ b_answer = dict.fromkeys(G.edges(), 0)
194
+ b_answer[(0, 1)] = b_answer[(1, 2)] = b_answer[(2, 3)] = 0.5
195
+ b = nx.edge_betweenness_centrality_subset(
196
+ G, sources=[0], targets=[3], weight=None
197
+ )
198
+ for n in sorted(G.edges()):
199
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
200
+
201
+ def test_P5_multiple_target(self):
202
+ """Edge betweenness subset centrality: P5 multiple target"""
203
+ G = nx.Graph()
204
+ nx.add_path(G, range(5))
205
+ b_answer = dict.fromkeys(G.edges(), 0)
206
+ b_answer[(0, 1)] = b_answer[(1, 2)] = b_answer[(2, 3)] = 1
207
+ b_answer[(3, 4)] = 0.5
208
+ b = nx.edge_betweenness_centrality_subset(
209
+ G, sources=[0], targets=[3, 4], weight=None
210
+ )
211
+ for n in sorted(G.edges()):
212
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
213
+
214
+ def test_box(self):
215
+ """Edge betweenness subset centrality: box"""
216
+ G = nx.Graph()
217
+ G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)])
218
+ b_answer = dict.fromkeys(G.edges(), 0)
219
+ b_answer[(0, 1)] = b_answer[(0, 2)] = 0.25
220
+ b_answer[(1, 3)] = b_answer[(2, 3)] = 0.25
221
+ b = nx.edge_betweenness_centrality_subset(
222
+ G, sources=[0], targets=[3], weight=None
223
+ )
224
+ for n in sorted(G.edges()):
225
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
226
+
227
+ def test_box_and_path(self):
228
+ """Edge betweenness subset centrality: box and path"""
229
+ G = nx.Graph()
230
+ G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (3, 4), (4, 5)])
231
+ b_answer = dict.fromkeys(G.edges(), 0)
232
+ b_answer[(0, 1)] = b_answer[(0, 2)] = 0.5
233
+ b_answer[(1, 3)] = b_answer[(2, 3)] = 0.5
234
+ b_answer[(3, 4)] = 0.5
235
+ b = nx.edge_betweenness_centrality_subset(
236
+ G, sources=[0], targets=[3, 4], weight=None
237
+ )
238
+ for n in sorted(G.edges()):
239
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
240
+
241
+ def test_box_and_path2(self):
242
+ """Edge betweenness subset centrality: box and path multiple target"""
243
+ G = nx.Graph()
244
+ G.add_edges_from([(0, 1), (1, 2), (2, 3), (1, 20), (20, 3), (3, 4)])
245
+ b_answer = dict.fromkeys(G.edges(), 0)
246
+ b_answer[(0, 1)] = 1.0
247
+ b_answer[(1, 20)] = b_answer[(3, 20)] = 0.5
248
+ b_answer[(1, 2)] = b_answer[(2, 3)] = 0.5
249
+ b_answer[(3, 4)] = 0.5
250
+ b = nx.edge_betweenness_centrality_subset(
251
+ G, sources=[0], targets=[3, 4], weight=None
252
+ )
253
+ for n in sorted(G.edges()):
254
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
255
+
256
+ def test_diamond_multi_path(self):
257
+ """Edge betweenness subset centrality: Diamond Multi Path"""
258
+ G = nx.Graph()
259
+ G.add_edges_from(
260
+ [
261
+ (1, 2),
262
+ (1, 3),
263
+ (1, 4),
264
+ (1, 5),
265
+ (1, 10),
266
+ (10, 11),
267
+ (11, 12),
268
+ (12, 9),
269
+ (2, 6),
270
+ (3, 6),
271
+ (4, 6),
272
+ (5, 7),
273
+ (7, 8),
274
+ (6, 8),
275
+ (8, 9),
276
+ ]
277
+ )
278
+ b_answer = dict.fromkeys(G.edges(), 0)
279
+ b_answer[(8, 9)] = 0.4
280
+ b_answer[(6, 8)] = b_answer[(7, 8)] = 0.2
281
+ b_answer[(2, 6)] = b_answer[(3, 6)] = b_answer[(4, 6)] = 0.2 / 3.0
282
+ b_answer[(1, 2)] = b_answer[(1, 3)] = b_answer[(1, 4)] = 0.2 / 3.0
283
+ b_answer[(5, 7)] = 0.2
284
+ b_answer[(1, 5)] = 0.2
285
+ b_answer[(9, 12)] = 0.1
286
+ b_answer[(11, 12)] = b_answer[(10, 11)] = b_answer[(1, 10)] = 0.1
287
+ b = nx.edge_betweenness_centrality_subset(
288
+ G, sources=[1], targets=[9], weight=None
289
+ )
290
+ for n in G.edges():
291
+ sort_n = tuple(sorted(n))
292
+ assert b[n] == pytest.approx(b_answer[sort_n], abs=1e-7)
293
+
294
+ def test_normalized_p1(self):
295
+ """
296
+ Edge betweenness subset centrality: P1
297
+ if n <= 1: no normalization b=0 for all nodes
298
+ """
299
+ G = nx.Graph()
300
+ nx.add_path(G, range(1))
301
+ b_answer = dict.fromkeys(G.edges(), 0)
302
+ b = nx.edge_betweenness_centrality_subset(
303
+ G, sources=[0], targets=[0], normalized=True, weight=None
304
+ )
305
+ for n in G.edges():
306
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
307
+
308
+ def test_normalized_P5_directed(self):
309
+ """Edge betweenness subset centrality: Normalized Directed P5"""
310
+ G = nx.DiGraph()
311
+ nx.add_path(G, range(5))
312
+ b_answer = dict.fromkeys(G.edges(), 0)
313
+ b_answer[(0, 1)] = b_answer[(1, 2)] = b_answer[(2, 3)] = 0.05
314
+ b = nx.edge_betweenness_centrality_subset(
315
+ G, sources=[0], targets=[3], normalized=True, weight=None
316
+ )
317
+ for n in G.edges():
318
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
319
+
320
+ def test_weighted_graph(self):
321
+ """Edge betweenness subset centrality: Weighted Graph"""
322
+ G = nx.DiGraph()
323
+ G.add_edge(0, 1, weight=3)
324
+ G.add_edge(0, 2, weight=2)
325
+ G.add_edge(0, 3, weight=6)
326
+ G.add_edge(0, 4, weight=4)
327
+ G.add_edge(1, 3, weight=5)
328
+ G.add_edge(1, 5, weight=5)
329
+ G.add_edge(2, 4, weight=1)
330
+ G.add_edge(3, 4, weight=2)
331
+ G.add_edge(3, 5, weight=1)
332
+ G.add_edge(4, 5, weight=4)
333
+ b_answer = dict.fromkeys(G.edges(), 0)
334
+ b_answer[(0, 2)] = b_answer[(2, 4)] = b_answer[(4, 5)] = 0.5
335
+ b_answer[(0, 3)] = b_answer[(3, 5)] = 0.5
336
+ b = nx.edge_betweenness_centrality_subset(
337
+ G, sources=[0], targets=[5], normalized=False, weight="weight"
338
+ )
339
+ for n in G.edges():
340
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_closeness_centrality.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests for closeness centrality.
3
+ """
4
+ import pytest
5
+
6
+ import networkx as nx
7
+
8
+
9
+ class TestClosenessCentrality:
10
+ @classmethod
11
+ def setup_class(cls):
12
+ cls.K = nx.krackhardt_kite_graph()
13
+ cls.P3 = nx.path_graph(3)
14
+ cls.P4 = nx.path_graph(4)
15
+ cls.K5 = nx.complete_graph(5)
16
+
17
+ cls.C4 = nx.cycle_graph(4)
18
+ cls.T = nx.balanced_tree(r=2, h=2)
19
+ cls.Gb = nx.Graph()
20
+ cls.Gb.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)])
21
+
22
+ F = nx.florentine_families_graph()
23
+ cls.F = F
24
+
25
+ cls.LM = nx.les_miserables_graph()
26
+
27
+ # Create random undirected, unweighted graph for testing incremental version
28
+ cls.undirected_G = nx.fast_gnp_random_graph(n=100, p=0.6, seed=123)
29
+ cls.undirected_G_cc = nx.closeness_centrality(cls.undirected_G)
30
+
31
+ def test_wf_improved(self):
32
+ G = nx.union(self.P4, nx.path_graph([4, 5, 6]))
33
+ c = nx.closeness_centrality(G)
34
+ cwf = nx.closeness_centrality(G, wf_improved=False)
35
+ res = {0: 0.25, 1: 0.375, 2: 0.375, 3: 0.25, 4: 0.222, 5: 0.333, 6: 0.222}
36
+ wf_res = {0: 0.5, 1: 0.75, 2: 0.75, 3: 0.5, 4: 0.667, 5: 1.0, 6: 0.667}
37
+ for n in G:
38
+ assert c[n] == pytest.approx(res[n], abs=1e-3)
39
+ assert cwf[n] == pytest.approx(wf_res[n], abs=1e-3)
40
+
41
+ def test_digraph(self):
42
+ G = nx.path_graph(3, create_using=nx.DiGraph())
43
+ c = nx.closeness_centrality(G)
44
+ cr = nx.closeness_centrality(G.reverse())
45
+ d = {0: 0.0, 1: 0.500, 2: 0.667}
46
+ dr = {0: 0.667, 1: 0.500, 2: 0.0}
47
+ for n in sorted(self.P3):
48
+ assert c[n] == pytest.approx(d[n], abs=1e-3)
49
+ assert cr[n] == pytest.approx(dr[n], abs=1e-3)
50
+
51
+ def test_k5_closeness(self):
52
+ c = nx.closeness_centrality(self.K5)
53
+ d = {0: 1.000, 1: 1.000, 2: 1.000, 3: 1.000, 4: 1.000}
54
+ for n in sorted(self.K5):
55
+ assert c[n] == pytest.approx(d[n], abs=1e-3)
56
+
57
+ def test_p3_closeness(self):
58
+ c = nx.closeness_centrality(self.P3)
59
+ d = {0: 0.667, 1: 1.000, 2: 0.667}
60
+ for n in sorted(self.P3):
61
+ assert c[n] == pytest.approx(d[n], abs=1e-3)
62
+
63
+ def test_krackhardt_closeness(self):
64
+ c = nx.closeness_centrality(self.K)
65
+ d = {
66
+ 0: 0.529,
67
+ 1: 0.529,
68
+ 2: 0.500,
69
+ 3: 0.600,
70
+ 4: 0.500,
71
+ 5: 0.643,
72
+ 6: 0.643,
73
+ 7: 0.600,
74
+ 8: 0.429,
75
+ 9: 0.310,
76
+ }
77
+ for n in sorted(self.K):
78
+ assert c[n] == pytest.approx(d[n], abs=1e-3)
79
+
80
+ def test_florentine_families_closeness(self):
81
+ c = nx.closeness_centrality(self.F)
82
+ d = {
83
+ "Acciaiuoli": 0.368,
84
+ "Albizzi": 0.483,
85
+ "Barbadori": 0.4375,
86
+ "Bischeri": 0.400,
87
+ "Castellani": 0.389,
88
+ "Ginori": 0.333,
89
+ "Guadagni": 0.467,
90
+ "Lamberteschi": 0.326,
91
+ "Medici": 0.560,
92
+ "Pazzi": 0.286,
93
+ "Peruzzi": 0.368,
94
+ "Ridolfi": 0.500,
95
+ "Salviati": 0.389,
96
+ "Strozzi": 0.4375,
97
+ "Tornabuoni": 0.483,
98
+ }
99
+ for n in sorted(self.F):
100
+ assert c[n] == pytest.approx(d[n], abs=1e-3)
101
+
102
+ def test_les_miserables_closeness(self):
103
+ c = nx.closeness_centrality(self.LM)
104
+ d = {
105
+ "Napoleon": 0.302,
106
+ "Myriel": 0.429,
107
+ "MlleBaptistine": 0.413,
108
+ "MmeMagloire": 0.413,
109
+ "CountessDeLo": 0.302,
110
+ "Geborand": 0.302,
111
+ "Champtercier": 0.302,
112
+ "Cravatte": 0.302,
113
+ "Count": 0.302,
114
+ "OldMan": 0.302,
115
+ "Valjean": 0.644,
116
+ "Labarre": 0.394,
117
+ "Marguerite": 0.413,
118
+ "MmeDeR": 0.394,
119
+ "Isabeau": 0.394,
120
+ "Gervais": 0.394,
121
+ "Listolier": 0.341,
122
+ "Tholomyes": 0.392,
123
+ "Fameuil": 0.341,
124
+ "Blacheville": 0.341,
125
+ "Favourite": 0.341,
126
+ "Dahlia": 0.341,
127
+ "Zephine": 0.341,
128
+ "Fantine": 0.461,
129
+ "MmeThenardier": 0.461,
130
+ "Thenardier": 0.517,
131
+ "Cosette": 0.478,
132
+ "Javert": 0.517,
133
+ "Fauchelevent": 0.402,
134
+ "Bamatabois": 0.427,
135
+ "Perpetue": 0.318,
136
+ "Simplice": 0.418,
137
+ "Scaufflaire": 0.394,
138
+ "Woman1": 0.396,
139
+ "Judge": 0.404,
140
+ "Champmathieu": 0.404,
141
+ "Brevet": 0.404,
142
+ "Chenildieu": 0.404,
143
+ "Cochepaille": 0.404,
144
+ "Pontmercy": 0.373,
145
+ "Boulatruelle": 0.342,
146
+ "Eponine": 0.396,
147
+ "Anzelma": 0.352,
148
+ "Woman2": 0.402,
149
+ "MotherInnocent": 0.398,
150
+ "Gribier": 0.288,
151
+ "MmeBurgon": 0.344,
152
+ "Jondrette": 0.257,
153
+ "Gavroche": 0.514,
154
+ "Gillenormand": 0.442,
155
+ "Magnon": 0.335,
156
+ "MlleGillenormand": 0.442,
157
+ "MmePontmercy": 0.315,
158
+ "MlleVaubois": 0.308,
159
+ "LtGillenormand": 0.365,
160
+ "Marius": 0.531,
161
+ "BaronessT": 0.352,
162
+ "Mabeuf": 0.396,
163
+ "Enjolras": 0.481,
164
+ "Combeferre": 0.392,
165
+ "Prouvaire": 0.357,
166
+ "Feuilly": 0.392,
167
+ "Courfeyrac": 0.400,
168
+ "Bahorel": 0.394,
169
+ "Bossuet": 0.475,
170
+ "Joly": 0.394,
171
+ "Grantaire": 0.358,
172
+ "MotherPlutarch": 0.285,
173
+ "Gueulemer": 0.463,
174
+ "Babet": 0.463,
175
+ "Claquesous": 0.452,
176
+ "Montparnasse": 0.458,
177
+ "Toussaint": 0.402,
178
+ "Child1": 0.342,
179
+ "Child2": 0.342,
180
+ "Brujon": 0.380,
181
+ "MmeHucheloup": 0.353,
182
+ }
183
+ for n in sorted(self.LM):
184
+ assert c[n] == pytest.approx(d[n], abs=1e-3)
185
+
186
+ def test_weighted_closeness(self):
187
+ edges = [
188
+ ("s", "u", 10),
189
+ ("s", "x", 5),
190
+ ("u", "v", 1),
191
+ ("u", "x", 2),
192
+ ("v", "y", 1),
193
+ ("x", "u", 3),
194
+ ("x", "v", 5),
195
+ ("x", "y", 2),
196
+ ("y", "s", 7),
197
+ ("y", "v", 6),
198
+ ]
199
+ XG = nx.Graph()
200
+ XG.add_weighted_edges_from(edges)
201
+ c = nx.closeness_centrality(XG, distance="weight")
202
+ d = {"y": 0.200, "x": 0.286, "s": 0.138, "u": 0.235, "v": 0.200}
203
+ for n in sorted(XG):
204
+ assert c[n] == pytest.approx(d[n], abs=1e-3)
205
+
206
+ #
207
+ # Tests for incremental closeness centrality.
208
+ #
209
+ @staticmethod
210
+ def pick_add_edge(g):
211
+ u = nx.utils.arbitrary_element(g)
212
+ possible_nodes = set(g.nodes())
213
+ neighbors = list(g.neighbors(u)) + [u]
214
+ possible_nodes.difference_update(neighbors)
215
+ v = nx.utils.arbitrary_element(possible_nodes)
216
+ return (u, v)
217
+
218
+ @staticmethod
219
+ def pick_remove_edge(g):
220
+ u = nx.utils.arbitrary_element(g)
221
+ possible_nodes = list(g.neighbors(u))
222
+ v = nx.utils.arbitrary_element(possible_nodes)
223
+ return (u, v)
224
+
225
+ def test_directed_raises(self):
226
+ with pytest.raises(nx.NetworkXNotImplemented):
227
+ dir_G = nx.gn_graph(n=5)
228
+ prev_cc = None
229
+ edge = self.pick_add_edge(dir_G)
230
+ insert = True
231
+ nx.incremental_closeness_centrality(dir_G, edge, prev_cc, insert)
232
+
233
+ def test_wrong_size_prev_cc_raises(self):
234
+ with pytest.raises(nx.NetworkXError):
235
+ G = self.undirected_G.copy()
236
+ edge = self.pick_add_edge(G)
237
+ insert = True
238
+ prev_cc = self.undirected_G_cc.copy()
239
+ prev_cc.pop(0)
240
+ nx.incremental_closeness_centrality(G, edge, prev_cc, insert)
241
+
242
+ def test_wrong_nodes_prev_cc_raises(self):
243
+ with pytest.raises(nx.NetworkXError):
244
+ G = self.undirected_G.copy()
245
+ edge = self.pick_add_edge(G)
246
+ insert = True
247
+ prev_cc = self.undirected_G_cc.copy()
248
+ num_nodes = len(prev_cc)
249
+ prev_cc.pop(0)
250
+ prev_cc[num_nodes] = 0.5
251
+ nx.incremental_closeness_centrality(G, edge, prev_cc, insert)
252
+
253
+ def test_zero_centrality(self):
254
+ G = nx.path_graph(3)
255
+ prev_cc = nx.closeness_centrality(G)
256
+ edge = self.pick_remove_edge(G)
257
+ test_cc = nx.incremental_closeness_centrality(G, edge, prev_cc, insertion=False)
258
+ G.remove_edges_from([edge])
259
+ real_cc = nx.closeness_centrality(G)
260
+ shared_items = set(test_cc.items()) & set(real_cc.items())
261
+ assert len(shared_items) == len(real_cc)
262
+ assert 0 in test_cc.values()
263
+
264
+ def test_incremental(self):
265
+ # Check that incremental and regular give same output
266
+ G = self.undirected_G.copy()
267
+ prev_cc = None
268
+ for i in range(5):
269
+ if i % 2 == 0:
270
+ # Remove an edge
271
+ insert = False
272
+ edge = self.pick_remove_edge(G)
273
+ else:
274
+ # Add an edge
275
+ insert = True
276
+ edge = self.pick_add_edge(G)
277
+
278
+ # start = timeit.default_timer()
279
+ test_cc = nx.incremental_closeness_centrality(G, edge, prev_cc, insert)
280
+ # inc_elapsed = (timeit.default_timer() - start)
281
+ # print(f"incremental time: {inc_elapsed}")
282
+
283
+ if insert:
284
+ G.add_edges_from([edge])
285
+ else:
286
+ G.remove_edges_from([edge])
287
+
288
+ # start = timeit.default_timer()
289
+ real_cc = nx.closeness_centrality(G)
290
+ # reg_elapsed = (timeit.default_timer() - start)
291
+ # print(f"regular time: {reg_elapsed}")
292
+ # Example output:
293
+ # incremental time: 0.208
294
+ # regular time: 0.276
295
+ # incremental time: 0.00683
296
+ # regular time: 0.260
297
+ # incremental time: 0.0224
298
+ # regular time: 0.278
299
+ # incremental time: 0.00804
300
+ # regular time: 0.208
301
+ # incremental time: 0.00947
302
+ # regular time: 0.188
303
+
304
+ assert set(test_cc.items()) == set(real_cc.items())
305
+
306
+ prev_cc = test_cc
parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ import networkx as nx
4
+ from networkx import approximate_current_flow_betweenness_centrality as approximate_cfbc
5
+ from networkx import edge_current_flow_betweenness_centrality as edge_current_flow
6
+
7
+ np = pytest.importorskip("numpy")
8
+ pytest.importorskip("scipy")
9
+
10
+
11
+ class TestFlowBetweennessCentrality:
12
+ def test_K4_normalized(self):
13
+ """Betweenness centrality: K4"""
14
+ G = nx.complete_graph(4)
15
+ b = nx.current_flow_betweenness_centrality(G, normalized=True)
16
+ b_answer = {0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25}
17
+ for n in sorted(G):
18
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
19
+ G.add_edge(0, 1, weight=0.5, other=0.3)
20
+ b = nx.current_flow_betweenness_centrality(G, normalized=True, weight=None)
21
+ for n in sorted(G):
22
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
23
+ wb_answer = {0: 0.2222222, 1: 0.2222222, 2: 0.30555555, 3: 0.30555555}
24
+ b = nx.current_flow_betweenness_centrality(G, normalized=True, weight="weight")
25
+ for n in sorted(G):
26
+ assert b[n] == pytest.approx(wb_answer[n], abs=1e-7)
27
+ wb_answer = {0: 0.2051282, 1: 0.2051282, 2: 0.33974358, 3: 0.33974358}
28
+ b = nx.current_flow_betweenness_centrality(G, normalized=True, weight="other")
29
+ for n in sorted(G):
30
+ assert b[n] == pytest.approx(wb_answer[n], abs=1e-7)
31
+
32
+ def test_K4(self):
33
+ """Betweenness centrality: K4"""
34
+ G = nx.complete_graph(4)
35
+ for solver in ["full", "lu", "cg"]:
36
+ b = nx.current_flow_betweenness_centrality(
37
+ G, normalized=False, solver=solver
38
+ )
39
+ b_answer = {0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75}
40
+ for n in sorted(G):
41
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
42
+
43
+ def test_P4_normalized(self):
44
+ """Betweenness centrality: P4 normalized"""
45
+ G = nx.path_graph(4)
46
+ b = nx.current_flow_betweenness_centrality(G, normalized=True)
47
+ b_answer = {0: 0, 1: 2.0 / 3, 2: 2.0 / 3, 3: 0}
48
+ for n in sorted(G):
49
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
50
+
51
+ def test_P4(self):
52
+ """Betweenness centrality: P4"""
53
+ G = nx.path_graph(4)
54
+ b = nx.current_flow_betweenness_centrality(G, normalized=False)
55
+ b_answer = {0: 0, 1: 2, 2: 2, 3: 0}
56
+ for n in sorted(G):
57
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
58
+
59
+ def test_star(self):
60
+ """Betweenness centrality: star"""
61
+ G = nx.Graph()
62
+ nx.add_star(G, ["a", "b", "c", "d"])
63
+ b = nx.current_flow_betweenness_centrality(G, normalized=True)
64
+ b_answer = {"a": 1.0, "b": 0.0, "c": 0.0, "d": 0.0}
65
+ for n in sorted(G):
66
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
67
+
68
+ def test_solvers2(self):
69
+ """Betweenness centrality: alternate solvers"""
70
+ G = nx.complete_graph(4)
71
+ for solver in ["full", "lu", "cg"]:
72
+ b = nx.current_flow_betweenness_centrality(
73
+ G, normalized=False, solver=solver
74
+ )
75
+ b_answer = {0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75}
76
+ for n in sorted(G):
77
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
78
+
79
+
80
+ class TestApproximateFlowBetweennessCentrality:
81
+ def test_K4_normalized(self):
82
+ "Approximate current-flow betweenness centrality: K4 normalized"
83
+ G = nx.complete_graph(4)
84
+ b = nx.current_flow_betweenness_centrality(G, normalized=True)
85
+ epsilon = 0.1
86
+ ba = approximate_cfbc(G, normalized=True, epsilon=0.5 * epsilon)
87
+ for n in sorted(G):
88
+ np.testing.assert_allclose(b[n], ba[n], atol=epsilon)
89
+
90
+ def test_K4(self):
91
+ "Approximate current-flow betweenness centrality: K4"
92
+ G = nx.complete_graph(4)
93
+ b = nx.current_flow_betweenness_centrality(G, normalized=False)
94
+ epsilon = 0.1
95
+ ba = approximate_cfbc(G, normalized=False, epsilon=0.5 * epsilon)
96
+ for n in sorted(G):
97
+ np.testing.assert_allclose(b[n], ba[n], atol=epsilon * len(G) ** 2)
98
+
99
+ def test_star(self):
100
+ "Approximate current-flow betweenness centrality: star"
101
+ G = nx.Graph()
102
+ nx.add_star(G, ["a", "b", "c", "d"])
103
+ b = nx.current_flow_betweenness_centrality(G, normalized=True)
104
+ epsilon = 0.1
105
+ ba = approximate_cfbc(G, normalized=True, epsilon=0.5 * epsilon)
106
+ for n in sorted(G):
107
+ np.testing.assert_allclose(b[n], ba[n], atol=epsilon)
108
+
109
+ def test_grid(self):
110
+ "Approximate current-flow betweenness centrality: 2d grid"
111
+ G = nx.grid_2d_graph(4, 4)
112
+ b = nx.current_flow_betweenness_centrality(G, normalized=True)
113
+ epsilon = 0.1
114
+ ba = approximate_cfbc(G, normalized=True, epsilon=0.5 * epsilon)
115
+ for n in sorted(G):
116
+ np.testing.assert_allclose(b[n], ba[n], atol=epsilon)
117
+
118
+ def test_seed(self):
119
+ G = nx.complete_graph(4)
120
+ b = approximate_cfbc(G, normalized=False, epsilon=0.05, seed=1)
121
+ b_answer = {0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75}
122
+ for n in sorted(G):
123
+ np.testing.assert_allclose(b[n], b_answer[n], atol=0.1)
124
+
125
+ def test_solvers(self):
126
+ "Approximate current-flow betweenness centrality: solvers"
127
+ G = nx.complete_graph(4)
128
+ epsilon = 0.1
129
+ for solver in ["full", "lu", "cg"]:
130
+ b = approximate_cfbc(
131
+ G, normalized=False, solver=solver, epsilon=0.5 * epsilon
132
+ )
133
+ b_answer = {0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75}
134
+ for n in sorted(G):
135
+ np.testing.assert_allclose(b[n], b_answer[n], atol=epsilon)
136
+
137
+ def test_lower_kmax(self):
138
+ G = nx.complete_graph(4)
139
+ with pytest.raises(nx.NetworkXError, match="Increase kmax or epsilon"):
140
+ nx.approximate_current_flow_betweenness_centrality(G, kmax=4)
141
+
142
+
143
+ class TestWeightedFlowBetweennessCentrality:
144
+ pass
145
+
146
+
147
+ class TestEdgeFlowBetweennessCentrality:
148
+ def test_K4(self):
149
+ """Edge flow betweenness centrality: K4"""
150
+ G = nx.complete_graph(4)
151
+ b = edge_current_flow(G, normalized=True)
152
+ b_answer = dict.fromkeys(G.edges(), 0.25)
153
+ for (s, t), v1 in b_answer.items():
154
+ v2 = b.get((s, t), b.get((t, s)))
155
+ assert v1 == pytest.approx(v2, abs=1e-7)
156
+
157
+ def test_K4_normalized(self):
158
+ """Edge flow betweenness centrality: K4"""
159
+ G = nx.complete_graph(4)
160
+ b = edge_current_flow(G, normalized=False)
161
+ b_answer = dict.fromkeys(G.edges(), 0.75)
162
+ for (s, t), v1 in b_answer.items():
163
+ v2 = b.get((s, t), b.get((t, s)))
164
+ assert v1 == pytest.approx(v2, abs=1e-7)
165
+
166
+ def test_C4(self):
167
+ """Edge flow betweenness centrality: C4"""
168
+ G = nx.cycle_graph(4)
169
+ b = edge_current_flow(G, normalized=False)
170
+ b_answer = {(0, 1): 1.25, (0, 3): 1.25, (1, 2): 1.25, (2, 3): 1.25}
171
+ for (s, t), v1 in b_answer.items():
172
+ v2 = b.get((s, t), b.get((t, s)))
173
+ assert v1 == pytest.approx(v2, abs=1e-7)
174
+
175
+ def test_P4(self):
176
+ """Edge betweenness centrality: P4"""
177
+ G = nx.path_graph(4)
178
+ b = edge_current_flow(G, normalized=False)
179
+ b_answer = {(0, 1): 1.5, (1, 2): 2.0, (2, 3): 1.5}
180
+ for (s, t), v1 in b_answer.items():
181
+ v2 = b.get((s, t), b.get((t, s)))
182
+ assert v1 == pytest.approx(v2, abs=1e-7)
183
+
184
+
185
+ @pytest.mark.parametrize(
186
+ "centrality_func",
187
+ (
188
+ nx.current_flow_betweenness_centrality,
189
+ nx.edge_current_flow_betweenness_centrality,
190
+ nx.approximate_current_flow_betweenness_centrality,
191
+ ),
192
+ )
193
+ def test_unconnected_graphs_betweenness_centrality(centrality_func):
194
+ G = nx.Graph([(1, 2), (3, 4)])
195
+ G.add_node(5)
196
+ with pytest.raises(nx.NetworkXError, match="Graph not connected"):
197
+ centrality_func(G)
parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ pytest.importorskip("numpy")
4
+ pytest.importorskip("scipy")
5
+
6
+ import networkx as nx
7
+ from networkx import edge_current_flow_betweenness_centrality as edge_current_flow
8
+ from networkx import (
9
+ edge_current_flow_betweenness_centrality_subset as edge_current_flow_subset,
10
+ )
11
+
12
+
13
+ class TestFlowBetweennessCentrality:
14
+ def test_K4_normalized(self):
15
+ """Betweenness centrality: K4"""
16
+ G = nx.complete_graph(4)
17
+ b = nx.current_flow_betweenness_centrality_subset(
18
+ G, list(G), list(G), normalized=True
19
+ )
20
+ b_answer = nx.current_flow_betweenness_centrality(G, normalized=True)
21
+ for n in sorted(G):
22
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
23
+
24
+ def test_K4(self):
25
+ """Betweenness centrality: K4"""
26
+ G = nx.complete_graph(4)
27
+ b = nx.current_flow_betweenness_centrality_subset(
28
+ G, list(G), list(G), normalized=True
29
+ )
30
+ b_answer = nx.current_flow_betweenness_centrality(G, normalized=True)
31
+ for n in sorted(G):
32
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
33
+ # test weighted network
34
+ G.add_edge(0, 1, weight=0.5, other=0.3)
35
+ b = nx.current_flow_betweenness_centrality_subset(
36
+ G, list(G), list(G), normalized=True, weight=None
37
+ )
38
+ for n in sorted(G):
39
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
40
+ b = nx.current_flow_betweenness_centrality_subset(
41
+ G, list(G), list(G), normalized=True
42
+ )
43
+ b_answer = nx.current_flow_betweenness_centrality(G, normalized=True)
44
+ for n in sorted(G):
45
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
46
+ b = nx.current_flow_betweenness_centrality_subset(
47
+ G, list(G), list(G), normalized=True, weight="other"
48
+ )
49
+ b_answer = nx.current_flow_betweenness_centrality(
50
+ G, normalized=True, weight="other"
51
+ )
52
+ for n in sorted(G):
53
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
54
+
55
+ def test_P4_normalized(self):
56
+ """Betweenness centrality: P4 normalized"""
57
+ G = nx.path_graph(4)
58
+ b = nx.current_flow_betweenness_centrality_subset(
59
+ G, list(G), list(G), normalized=True
60
+ )
61
+ b_answer = nx.current_flow_betweenness_centrality(G, normalized=True)
62
+ for n in sorted(G):
63
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
64
+
65
+ def test_P4(self):
66
+ """Betweenness centrality: P4"""
67
+ G = nx.path_graph(4)
68
+ b = nx.current_flow_betweenness_centrality_subset(
69
+ G, list(G), list(G), normalized=True
70
+ )
71
+ b_answer = nx.current_flow_betweenness_centrality(G, normalized=True)
72
+ for n in sorted(G):
73
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
74
+
75
+ def test_star(self):
76
+ """Betweenness centrality: star"""
77
+ G = nx.Graph()
78
+ nx.add_star(G, ["a", "b", "c", "d"])
79
+ b = nx.current_flow_betweenness_centrality_subset(
80
+ G, list(G), list(G), normalized=True
81
+ )
82
+ b_answer = nx.current_flow_betweenness_centrality(G, normalized=True)
83
+ for n in sorted(G):
84
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
85
+
86
+
87
+ # class TestWeightedFlowBetweennessCentrality():
88
+ # pass
89
+
90
+
91
+ class TestEdgeFlowBetweennessCentrality:
92
+ def test_K4_normalized(self):
93
+ """Betweenness centrality: K4"""
94
+ G = nx.complete_graph(4)
95
+ b = edge_current_flow_subset(G, list(G), list(G), normalized=True)
96
+ b_answer = edge_current_flow(G, normalized=True)
97
+ for (s, t), v1 in b_answer.items():
98
+ v2 = b.get((s, t), b.get((t, s)))
99
+ assert v1 == pytest.approx(v2, abs=1e-7)
100
+
101
+ def test_K4(self):
102
+ """Betweenness centrality: K4"""
103
+ G = nx.complete_graph(4)
104
+ b = edge_current_flow_subset(G, list(G), list(G), normalized=False)
105
+ b_answer = edge_current_flow(G, normalized=False)
106
+ for (s, t), v1 in b_answer.items():
107
+ v2 = b.get((s, t), b.get((t, s)))
108
+ assert v1 == pytest.approx(v2, abs=1e-7)
109
+ # test weighted network
110
+ G.add_edge(0, 1, weight=0.5, other=0.3)
111
+ b = edge_current_flow_subset(G, list(G), list(G), normalized=False, weight=None)
112
+ # weight is None => same as unweighted network
113
+ for (s, t), v1 in b_answer.items():
114
+ v2 = b.get((s, t), b.get((t, s)))
115
+ assert v1 == pytest.approx(v2, abs=1e-7)
116
+
117
+ b = edge_current_flow_subset(G, list(G), list(G), normalized=False)
118
+ b_answer = edge_current_flow(G, normalized=False)
119
+ for (s, t), v1 in b_answer.items():
120
+ v2 = b.get((s, t), b.get((t, s)))
121
+ assert v1 == pytest.approx(v2, abs=1e-7)
122
+
123
+ b = edge_current_flow_subset(
124
+ G, list(G), list(G), normalized=False, weight="other"
125
+ )
126
+ b_answer = edge_current_flow(G, normalized=False, weight="other")
127
+ for (s, t), v1 in b_answer.items():
128
+ v2 = b.get((s, t), b.get((t, s)))
129
+ assert v1 == pytest.approx(v2, abs=1e-7)
130
+
131
+ def test_C4(self):
132
+ """Edge betweenness centrality: C4"""
133
+ G = nx.cycle_graph(4)
134
+ b = edge_current_flow_subset(G, list(G), list(G), normalized=True)
135
+ b_answer = edge_current_flow(G, normalized=True)
136
+ for (s, t), v1 in b_answer.items():
137
+ v2 = b.get((s, t), b.get((t, s)))
138
+ assert v1 == pytest.approx(v2, abs=1e-7)
139
+
140
+ def test_P4(self):
141
+ """Edge betweenness centrality: P4"""
142
+ G = nx.path_graph(4)
143
+ b = edge_current_flow_subset(G, list(G), list(G), normalized=True)
144
+ b_answer = edge_current_flow(G, normalized=True)
145
+ for (s, t), v1 in b_answer.items():
146
+ v2 = b.get((s, t), b.get((t, s)))
147
+ assert v1 == pytest.approx(v2, abs=1e-7)
parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_current_flow_closeness.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ pytest.importorskip("numpy")
4
+ pytest.importorskip("scipy")
5
+
6
+ import networkx as nx
7
+
8
+
9
+ class TestFlowClosenessCentrality:
10
+ def test_K4(self):
11
+ """Closeness centrality: K4"""
12
+ G = nx.complete_graph(4)
13
+ b = nx.current_flow_closeness_centrality(G)
14
+ b_answer = {0: 2.0 / 3, 1: 2.0 / 3, 2: 2.0 / 3, 3: 2.0 / 3}
15
+ for n in sorted(G):
16
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
17
+
18
+ def test_P4(self):
19
+ """Closeness centrality: P4"""
20
+ G = nx.path_graph(4)
21
+ b = nx.current_flow_closeness_centrality(G)
22
+ b_answer = {0: 1.0 / 6, 1: 1.0 / 4, 2: 1.0 / 4, 3: 1.0 / 6}
23
+ for n in sorted(G):
24
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
25
+
26
+ def test_star(self):
27
+ """Closeness centrality: star"""
28
+ G = nx.Graph()
29
+ nx.add_star(G, ["a", "b", "c", "d"])
30
+ b = nx.current_flow_closeness_centrality(G)
31
+ b_answer = {"a": 1.0 / 3, "b": 0.6 / 3, "c": 0.6 / 3, "d": 0.6 / 3}
32
+ for n in sorted(G):
33
+ assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
34
+
35
+ def test_current_flow_closeness_centrality_not_connected(self):
36
+ G = nx.Graph()
37
+ G.add_nodes_from([1, 2, 3])
38
+ with pytest.raises(nx.NetworkXError):
39
+ nx.current_flow_closeness_centrality(G)
40
+
41
+
42
+ class TestWeightedFlowClosenessCentrality:
43
+ pass
parrot/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_degree_centrality.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unit tests for degree centrality.
3
+ """
4
+
5
+ import pytest
6
+
7
+ import networkx as nx
8
+
9
+
10
+ class TestDegreeCentrality:
11
+ def setup_method(self):
12
+ self.K = nx.krackhardt_kite_graph()
13
+ self.P3 = nx.path_graph(3)
14
+ self.K5 = nx.complete_graph(5)
15
+
16
+ F = nx.Graph() # Florentine families
17
+ F.add_edge("Acciaiuoli", "Medici")
18
+ F.add_edge("Castellani", "Peruzzi")
19
+ F.add_edge("Castellani", "Strozzi")
20
+ F.add_edge("Castellani", "Barbadori")
21
+ F.add_edge("Medici", "Barbadori")
22
+ F.add_edge("Medici", "Ridolfi")
23
+ F.add_edge("Medici", "Tornabuoni")
24
+ F.add_edge("Medici", "Albizzi")
25
+ F.add_edge("Medici", "Salviati")
26
+ F.add_edge("Salviati", "Pazzi")
27
+ F.add_edge("Peruzzi", "Strozzi")
28
+ F.add_edge("Peruzzi", "Bischeri")
29
+ F.add_edge("Strozzi", "Ridolfi")
30
+ F.add_edge("Strozzi", "Bischeri")
31
+ F.add_edge("Ridolfi", "Tornabuoni")
32
+ F.add_edge("Tornabuoni", "Guadagni")
33
+ F.add_edge("Albizzi", "Ginori")
34
+ F.add_edge("Albizzi", "Guadagni")
35
+ F.add_edge("Bischeri", "Guadagni")
36
+ F.add_edge("Guadagni", "Lamberteschi")
37
+ self.F = F
38
+
39
+ G = nx.DiGraph()
40
+ G.add_edge(0, 5)
41
+ G.add_edge(1, 5)
42
+ G.add_edge(2, 5)
43
+ G.add_edge(3, 5)
44
+ G.add_edge(4, 5)
45
+ G.add_edge(5, 6)
46
+ G.add_edge(5, 7)
47
+ G.add_edge(5, 8)
48
+ self.G = G
49
+
50
+ def test_degree_centrality_1(self):
51
+ d = nx.degree_centrality(self.K5)
52
+ exact = dict(zip(range(5), [1] * 5))
53
+ for n, dc in d.items():
54
+ assert exact[n] == pytest.approx(dc, abs=1e-7)
55
+
56
+ def test_degree_centrality_2(self):
57
+ d = nx.degree_centrality(self.P3)
58
+ exact = {0: 0.5, 1: 1, 2: 0.5}
59
+ for n, dc in d.items():
60
+ assert exact[n] == pytest.approx(dc, abs=1e-7)
61
+
62
+ def test_degree_centrality_3(self):
63
+ d = nx.degree_centrality(self.K)
64
+ exact = {
65
+ 0: 0.444,
66
+ 1: 0.444,
67
+ 2: 0.333,
68
+ 3: 0.667,
69
+ 4: 0.333,
70
+ 5: 0.556,
71
+ 6: 0.556,
72
+ 7: 0.333,
73
+ 8: 0.222,
74
+ 9: 0.111,
75
+ }
76
+ for n, dc in d.items():
77
+ assert exact[n] == pytest.approx(float(f"{dc:.3f}"), abs=1e-7)
78
+
79
+ def test_degree_centrality_4(self):
80
+ d = nx.degree_centrality(self.F)
81
+ names = sorted(self.F.nodes())
82
+ dcs = [
83
+ 0.071,
84
+ 0.214,
85
+ 0.143,
86
+ 0.214,
87
+ 0.214,
88
+ 0.071,
89
+ 0.286,
90
+ 0.071,
91
+ 0.429,
92
+ 0.071,
93
+ 0.214,
94
+ 0.214,
95
+ 0.143,
96
+ 0.286,
97
+ 0.214,
98
+ ]
99
+ exact = dict(zip(names, dcs))
100
+ for n, dc in d.items():
101
+ assert exact[n] == pytest.approx(float(f"{dc:.3f}"), abs=1e-7)
102
+
103
+ def test_indegree_centrality(self):
104
+ d = nx.in_degree_centrality(self.G)
105
+ exact = {
106
+ 0: 0.0,
107
+ 1: 0.0,
108
+ 2: 0.0,
109
+ 3: 0.0,
110
+ 4: 0.0,
111
+ 5: 0.625,
112
+ 6: 0.125,
113
+ 7: 0.125,
114
+ 8: 0.125,
115
+ }
116
+ for n, dc in d.items():
117
+ assert exact[n] == pytest.approx(dc, abs=1e-7)
118
+
119
+ def test_outdegree_centrality(self):
120
+ d = nx.out_degree_centrality(self.G)
121
+ exact = {
122
+ 0: 0.125,
123
+ 1: 0.125,
124
+ 2: 0.125,
125
+ 3: 0.125,
126
+ 4: 0.125,
127
+ 5: 0.375,
128
+ 6: 0.0,
129
+ 7: 0.0,
130
+ 8: 0.0,
131
+ }
132
+ for n, dc in d.items():
133
+ assert exact[n] == pytest.approx(dc, abs=1e-7)
134
+
135
+ def test_small_graph_centrality(self):
136
+ G = nx.empty_graph(create_using=nx.DiGraph)
137
+ assert {} == nx.degree_centrality(G)
138
+ assert {} == nx.out_degree_centrality(G)
139
+ assert {} == nx.in_degree_centrality(G)
140
+
141
+ G = nx.empty_graph(1, create_using=nx.DiGraph)
142
+ assert {0: 1} == nx.degree_centrality(G)
143
+ assert {0: 1} == nx.out_degree_centrality(G)
144
+ assert {0: 1} == nx.in_degree_centrality(G)