text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def randmio_und(R, itr, seed=None):
'''
This function randomizes an undirected network, while preserving the
degree distribution. The function does not preserve the strength
distribution in weighted networks.
Parameters
----------
W : NxN np.ndarray
undirected binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
R : NxN np.ndarray
randomized network
eff : int
number of actual rewirings carried out
'''
if not np.all(R == R.T):
raise BCTParamError("Input must be undirected")
rng = get_rng(seed)
R = R.copy()
n = len(R)
i, j = np.where(np.tril(R))
k = len(i)
itr *= k
# maximum number of rewiring attempts per iteration
max_attempts = np.round(n * k / (n * (n - 1)))
# actual number of successful rewirings
eff = 0
for it in range(int(itr)):
att = 0
while att <= max_attempts: # while not rewired
while True:
e1, e2 = rng.randint(k, size=(2,))
while e1 == e2:
e2 = rng.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break # all 4 vertices must be different
if rng.random_sample() > .5:
i.setflags(write=True)
j.setflags(write=True)
i[e2] = d
j[e2] = c # flip edge c-d with 50% probability
c = i[e2]
d = j[e2] # to explore all potential rewirings
# rewiring condition
if not (R[a, d] or R[c, b]):
R[a, d] = R[a, b]
R[a, b] = 0
R[d, a] = R[b, a]
R[b, a] = 0
R[c, b] = R[c, d]
R[c, d] = 0
R[b, c] = R[d, c]
R[d, c] = 0
j.setflags(write=True)
j[e1] = d
j[e2] = b # reassign edge indices
eff += 1
break
att += 1
return R, eff |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def randmio_und_signed(R, itr, seed=None):
'''
This function randomizes an undirected weighted network with positive
and negative weights, while simultaneously preserving the degree
distribution of positive and negative weights. The function does not
preserve the strength distribution in weighted networks.
Parameters
----------
W : NxN np.ndarray
undirected binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
R : NxN np.ndarray
randomized network
'''
rng = get_rng(seed)
R = R.copy()
n = len(R)
itr *= int(n * (n -1) / 2)
max_attempts = int(np.round(n / 2))
eff = 0
for it in range(int(itr)):
att = 0
while att <= max_attempts:
a, b, c, d = pick_four_unique_nodes_quickly(n, rng)
r0_ab = R[a, b]
r0_cd = R[c, d]
r0_ad = R[a, d]
r0_cb = R[c, b]
#rewiring condition
if ( np.sign(r0_ab) == np.sign(r0_cd) and
np.sign(r0_ad) == np.sign(r0_cb) and
np.sign(r0_ab) != np.sign(r0_ad)):
R[a, d] = R[d, a] = r0_ab
R[a, b] = R[b, a] = r0_ad
R[c, b] = R[b, c] = r0_cd
R[c, d] = R[d, c] = r0_cb
eff += 1
break
att += 1
return R, eff |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def evaluate_generative_model(A, Atgt, D, eta, gamma=None,
model_type='matching', model_var='powerlaw', epsilon=1e-6, seed=None):
'''
Generates synthetic networks with parameters provided and evaluates their
energy function. The energy function is defined as in Betzel et al. 2016.
Basically it takes the Kolmogorov-Smirnov statistics of 4 network
measures; comparing the degree distributions, clustering coefficients,
betweenness centrality, and Euclidean distances between connected regions.
The energy is globally low if the synthetic network matches the target.
Energy is defined as the maximum difference across the four statistics.
'''
m = np.size(np.where(Atgt.flat))//2
n = len(Atgt)
xk = np.sum(Atgt, axis=1)
xc = clustering_coef_bu(Atgt)
xb = betweenness_bin(Atgt)
xe = D[np.triu(Atgt, 1) > 0]
B = generative_model(A, D, m, eta, gamma, model_type=model_type,
model_var=model_var, epsilon=epsilon, copy=True, seed=seed)
#if eta != gamma then an error is thrown within generative model
nB = len(eta)
if nB == 1:
B = np.reshape(B, np.append(np.shape(B), 1))
K = np.zeros((nB, 4))
def kstats(x, y):
bin_edges = np.concatenate([[-np.inf],
np.sort(np.concatenate((x, y))),
[np.inf]])
bin_x,_ = np.histogram(x, bin_edges)
bin_y,_ = np.histogram(y, bin_edges)
#print(np.shape(bin_x))
sum_x = np.cumsum(bin_x) / np.sum(bin_x)
sum_y = np.cumsum(bin_y) / np.sum(bin_y)
cdfsamp_x = sum_x[:-1]
cdfsamp_y = sum_y[:-1]
delta_cdf = np.abs(cdfsamp_x - cdfsamp_y)
print(np.shape(delta_cdf))
#print(delta_cdf)
print(np.argmax(delta_cdf), np.max(delta_cdf))
return np.max(delta_cdf)
for ib in range(nB):
Bc = B[:,:,ib]
yk = np.sum(Bc, axis=1)
yc = clustering_coef_bu(Bc)
yb = betweenness_bin(Bc)
ye = D[np.triu(Bc, 1) > 0]
K[ib, 0] = kstats(xk, yk)
K[ib, 1] = kstats(xc, yc)
K[ib, 2] = kstats(xb, yb)
K[ib, 3] = kstats(xe, ye)
return np.max(K, axis=1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def diversity_coef_sign(W, ci):
'''
The Shannon-entropy based diversity coefficient measures the diversity
of intermodular connections of individual nodes and ranges from 0 to 1.
Parameters
----------
W : NxN np.ndarray
undirected connection matrix with positive and negative weights
ci : Nx1 np.ndarray
community affiliation vector
Returns
-------
Hpos : Nx1 np.ndarray
diversity coefficient based on positive connections
Hneg : Nx1 np.ndarray
diversity coefficient based on negative connections
'''
n = len(W) # number of nodes
_, ci = np.unique(ci, return_inverse=True)
ci += 1
m = np.max(ci) # number of modules
def entropy(w_):
S = np.sum(w_, axis=1) # strength
Snm = np.zeros((n, m)) # node-to-module degree
for i in range(m):
Snm[:, i] = np.sum(w_[:, ci == i + 1], axis=1)
pnm = Snm / (np.tile(S, (m, 1)).T)
pnm[np.isnan(pnm)] = 0
pnm[np.logical_not(pnm)] = 1
return -np.sum(pnm * np.log(pnm), axis=1) / np.log(m)
#explicitly ignore compiler warning for division by zero
with np.errstate(invalid='ignore'):
Hpos = entropy(W * (W > 0))
Hneg = entropy(-W * (W < 0))
return Hpos, Hneg |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def edge_betweenness_bin(G):
'''
Edge betweenness centrality is the fraction of all shortest paths in
the network that contain a given edge. Edges with high values of
betweenness centrality participate in a large number of shortest paths.
Parameters
----------
A : NxN np.ndarray
binary directed/undirected connection matrix
Returns
-------
EBC : NxN np.ndarray
edge betweenness centrality matrix
BC : Nx1 np.ndarray
node betweenness centrality vector
Notes
-----
Betweenness centrality may be normalised to the range [0,1] as
BC/[(N-1)(N-2)], where N is the number of nodes in the network.
'''
n = len(G)
BC = np.zeros((n,)) # vertex betweenness
EBC = np.zeros((n, n)) # edge betweenness
for u in range(n):
D = np.zeros((n,))
D[u] = 1 # distance from u
NP = np.zeros((n,))
NP[u] = 1 # number of paths from u
P = np.zeros((n, n)) # predecessors
Q = np.zeros((n,), dtype=int) # indices
q = n - 1 # order of non-increasing distance
Gu = G.copy()
V = np.array([u])
while V.size:
Gu[:, V] = 0 # remove remaining in-edges
for v in V:
Q[q] = v
q -= 1
W, = np.where(Gu[v, :]) # neighbors of V
for w in W:
if D[w]:
NP[w] += NP[v] # NP(u->w) sum of old and new
P[w, v] = 1 # v is a predecessor
else:
D[w] = 1
NP[w] = NP[v] # NP(u->v) = NP of new path
P[w, v] = 1 # v is a predecessor
V, = np.where(np.any(Gu[V, :], axis=0))
if np.any(np.logical_not(D)): # if some vertices unreachable
Q[:q], = np.where(np.logical_not(D)) # ...these are first in line
DP = np.zeros((n,)) # dependency
for w in Q[:n - 1]:
BC[w] += DP[w]
for v in np.where(P[w, :])[0]:
DPvw = (1 + DP[w]) * NP[v] / NP[w]
DP[v] += DPvw
EBC[v, w] += DPvw
return EBC, BC |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def erange(CIJ):
'''
Shortcuts are central edges which significantly reduce the
characteristic path length in the network.
Parameters
----------
CIJ : NxN np.ndarray
binary directed connection matrix
Returns
-------
Erange : NxN np.ndarray
range for each edge, i.e. the length of the shortest path from i to j
for edge c(i,j) after the edge has been removed from the graph
eta : float
average range for the entire graph
Eshort : NxN np.ndarray
entries are ones for shortcut edges
fs : float
fractions of shortcuts in the graph
Follows the treatment of 'shortcuts' by Duncan Watts
'''
N = len(CIJ)
K = np.size(np.where(CIJ)[1])
Erange = np.zeros((N, N))
i, j = np.where(CIJ)
for c in range(len(i)):
CIJcut = CIJ.copy()
CIJcut[i[c], j[c]] = 0
R, D = reachdist(CIJcut)
Erange[i[c], j[c]] = D[i[c], j[c]]
# average range (ignore Inf)
eta = (np.sum(Erange[np.logical_and(Erange > 0, Erange < np.inf)]) /
len(Erange[np.logical_and(Erange > 0, Erange < np.inf)]))
# Original entries of D are ones, thus entries of Erange
# must be two or greater.
# If Erange(i,j) > 2, then the edge is a shortcut.
# 'fshort' is the fraction of shortcuts over the entire graph.
Eshort = Erange > 2
fs = len(np.where(Eshort)) / K
return Erange, eta, Eshort, fs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def module_degree_zscore(W, ci, flag=0):
'''
The within-module degree z-score is a within-module version of degree
centrality.
Parameters
----------
W : NxN np.narray
binary/weighted directed/undirected connection matrix
ci : Nx1 np.array_like
community affiliation vector
flag : int
Graph type. 0: undirected graph (default)
1: directed graph in degree
2: directed graph out degree
3: directed graph in and out degree
Returns
-------
Z : Nx1 np.ndarray
within-module degree Z-score
'''
_, ci = np.unique(ci, return_inverse=True)
ci += 1
if flag == 2:
W = W.copy()
W = W.T
elif flag == 3:
W = W.copy()
W = W + W.T
n = len(W)
Z = np.zeros((n,)) # number of vertices
for i in range(1, int(np.max(ci) + 1)):
Koi = np.sum(W[np.ix_(ci == i, ci == i)], axis=1)
Z[np.where(ci == i)] = (Koi - np.mean(Koi)) / np.std(Koi)
Z[np.where(np.isnan(Z))] = 0
return Z |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def pagerank_centrality(A, d, falff=None):
'''
The PageRank centrality is a variant of eigenvector centrality. This
function computes the PageRank centrality of each vertex in a graph.
Formally, PageRank is defined as the stationary distribution achieved
by instantiating a Markov chain on a graph. The PageRank centrality of
a given vertex, then, is proportional to the number of steps (or amount
of time) spent at that vertex as a result of such a process.
The PageRank index gets modified by the addition of a damping factor,
d. In terms of a Markov chain, the damping factor specifies the
fraction of the time that a random walker will transition to one of its
current state's neighbors. The remaining fraction of the time the
walker is restarted at a random vertex. A common value for the damping
factor is d = 0.85.
Parameters
----------
A : NxN np.narray
adjacency matrix
d : float
damping factor (see description)
falff : Nx1 np.ndarray | None
Initial page rank probability, non-negative values. Default value is
None. If not specified, a naive bayesian prior is used.
Returns
-------
r : Nx1 np.ndarray
vectors of page rankings
Notes
-----
Note: The algorithm will work well for smaller matrices (number of
nodes around 1000 or less)
'''
from scipy import linalg
N = len(A)
if falff is None:
norm_falff = np.ones((N,)) / N
else:
norm_falff = falff / np.sum(falff)
deg = np.sum(A, axis=0)
deg[deg == 0] = 1
D1 = np.diag(1 / deg)
B = np.eye(N) - d * np.dot(A, D1)
b = (1 - d) * norm_falff
r = linalg.solve(B, b)
r /= np.sum(r)
return r |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def subgraph_centrality(CIJ):
'''
The subgraph centrality of a node is a weighted sum of closed walks of
different lengths in the network starting and ending at the node. This
function returns a vector of subgraph centralities for each node of the
network.
Parameters
----------
CIJ : NxN np.ndarray
binary adjacency matrix
Cs : Nx1 np.ndarray
subgraph centrality
'''
from scipy import linalg
vals, vecs = linalg.eig(CIJ) # compute eigendecomposition
# lambdas=np.diag(vals)
# compute eigenvector centr.
Cs = np.real(np.dot(vecs * vecs, np.exp(vals)))
return Cs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def invert(W, copy=True):
'''
Inverts elementwise the weights in an input connection matrix.
In other words, change the from the matrix of internode strengths to the
matrix of internode distances.
If copy is not set, this function will *modify W in place.*
Parameters
----------
W : np.ndarray
weighted connectivity matrix
copy : bool
if True, returns a copy of the matrix. Otherwise, modifies the matrix
in place. Default value=True.
Returns
-------
W : np.ndarray
inverted connectivity matrix
'''
if copy:
W = W.copy()
E = np.where(W)
W[E] = 1. / W[E]
return W |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def ci2ls(ci):
'''
Convert from a community index vector to a 2D python list of modules
The list is a pure python list, not requiring numpy.
Parameters
----------
ci : Nx1 np.ndarray
the community index vector
zeroindexed : bool
If True, ci uses zero-indexing (lowest value is 0). Defaults to False.
Returns
-------
ls : listof(list)
pure python list with lowest value zero-indexed
(regardless of zero-indexing parameter)
'''
if not np.size(ci):
return ci # list is empty
_, ci = np.unique(ci, return_inverse=True)
ci += 1
nr_indices = int(max(ci))
ls = []
for c in range(nr_indices):
ls.append([])
for i, x in enumerate(ci):
ls[ci[i] - 1].append(i)
return ls |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def ls2ci(ls, zeroindexed=False):
'''
Convert from a 2D python list of modules to a community index vector.
The list is a pure python list, not requiring numpy.
Parameters
----------
ls : listof(list)
pure python list with lowest value zero-indexed
(regardless of value of zeroindexed parameter)
zeroindexed : bool
If True, ci uses zero-indexing (lowest value is 0). Defaults to False.
Returns
-------
ci : Nx1 np.ndarray
community index vector
'''
if ls is None or np.size(ls) == 0:
return () # list is empty
nr_indices = sum(map(len, ls))
ci = np.zeros((nr_indices,), dtype=int)
z = int(not zeroindexed)
for i, x in enumerate(ls):
for j, y in enumerate(ls[i]):
ci[ls[i][j]] = i + z
return ci |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _safe_squeeze(arr, *args, **kwargs):
""" numpy.squeeze will reduce a 1-item array down to a zero-dimensional "array", which is not necessarily desirable. This function does the squeeze operation, but ensures that there is at least 1 dimension in the output. """ |
out = np.squeeze(arr, *args, **kwargs)
if np.ndim(out) == 0:
out = out.reshape((1,))
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def modularity_und_sign(W, ci, qtype='sta'):
'''
This function simply calculates the signed modularity for a given
partition. It does not do automatic partition generation right now.
Parameters
----------
W : NxN np.ndarray
undirected weighted/binary connection matrix with positive and
negative weights
ci : Nx1 np.ndarray
community partition
qtype : str
modularity type. Can be 'sta' (default), 'pos', 'smp', 'gja', 'neg'.
See Rubinov and Sporns (2011) for a description.
Returns
-------
ci : Nx1 np.ndarray
the partition which was input (for consistency of the API)
Q : float
maximized modularity metric
Notes
-----
uses a deterministic algorithm
'''
n = len(W)
_, ci = np.unique(ci, return_inverse=True)
ci += 1
W0 = W * (W > 0) # positive weights matrix
W1 = -W * (W < 0) # negative weights matrix
s0 = np.sum(W0) # positive sum of weights
s1 = np.sum(W1) # negative sum of weights
Knm0 = np.zeros((n, n)) # positive node-to-module degree
Knm1 = np.zeros((n, n)) # negative node-to-module degree
for m in range(int(np.max(ci))): # loop over initial modules
Knm0[:, m] = np.sum(W0[:, ci == m + 1], axis=1)
Knm1[:, m] = np.sum(W1[:, ci == m + 1], axis=1)
Kn0 = np.sum(Knm0, axis=1) # positive node degree
Kn1 = np.sum(Knm1, axis=1) # negative node degree
Km0 = np.sum(Knm0, axis=0) # positive module degree
Km1 = np.sum(Knm1, axis=0) # negaitve module degree
if qtype == 'smp':
d0 = 1 / s0
d1 = 1 / s1 # dQ=dQ0/s0-dQ1/s1
elif qtype == 'gja':
d0 = 1 / (s0 + s1)
d1 = 1 / (s0 + s1) # dQ=(dQ0-dQ1)/(s0+s1)
elif qtype == 'sta':
d0 = 1 / s0
d1 = 1 / (s0 + s1) # dQ=dQ0/s0-dQ1/(s0+s1)
elif qtype == 'pos':
d0 = 1 / s0
d1 = 0 # dQ=dQ0/s0
elif qtype == 'neg':
d0 = 0
d1 = 1 / s1 # dQ=-dQ1/s1
else:
raise KeyError('modularity type unknown')
if not s0: # adjust for absent positive weights
s0 = 1
d0 = 0
if not s1: # adjust for absent negative weights
s1 = 1
d1 = 0
m = np.tile(ci, (n, 1))
q0 = (W0 - np.outer(Kn0, Kn0) / s0) * (m == m.T)
q1 = (W1 - np.outer(Kn1, Kn1) / s1) * (m == m.T)
q = d0 * np.sum(q0) - d1 * np.sum(q1)
return ci, q |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def partition_distance(cx, cy):
'''
This function quantifies the distance between pairs of community
partitions with information theoretic measures.
Parameters
----------
cx : Nx1 np.ndarray
community affiliation vector X
cy : Nx1 np.ndarray
community affiliation vector Y
Returns
-------
VIn : Nx1 np.ndarray
normalized variation of information
MIn : Nx1 np.ndarray
normalized mutual information
Notes
-----
(Definitions:
VIn = [H(X) + H(Y) - 2MI(X,Y)]/log(n)
MIn = 2MI(X,Y)/[H(X)+H(Y)]
where H is entropy, MI is mutual information and n is number of nodes)
'''
n = np.size(cx)
_, cx = np.unique(cx, return_inverse=True)
_, cy = np.unique(cy, return_inverse=True)
_, cxy = np.unique(cx + cy * 1j, return_inverse=True)
cx += 1
cy += 1
cxy += 1
Px = np.histogram(cx, bins=np.max(cx))[0] / n
Py = np.histogram(cy, bins=np.max(cy))[0] / n
Pxy = np.histogram(cxy, bins=np.max(cxy))[0] / n
Hx = -np.sum(Px * np.log(Px))
Hy = -np.sum(Py * np.log(Py))
Hxy = -np.sum(Pxy * np.log(Pxy))
Vin = (2 * Hxy - Hx - Hy) / np.log(n)
Min = 2 * (Hx + Hy - Hxy) / (Hx + Hy)
return Vin, Min |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def breadth(CIJ, source):
'''
Implementation of breadth-first search.
Parameters
----------
CIJ : NxN np.ndarray
binary directed/undirected connection matrix
source : int
source vertex
Returns
-------
distance : Nx1 np.ndarray
vector of distances between source and ith vertex (0 for source)
branch : Nx1 np.ndarray
vertex that precedes i in the breadth-first search (-1 for source)
Notes
-----
Breadth-first search tree does not contain all paths (or all
shortest paths), but allows the determination of at least one path with
minimum distance. The entire graph is explored, starting from source
vertex 'source'.
'''
n = len(CIJ)
# colors: white,gray,black
white = 0
gray = 1
black = 2
color = np.zeros((n,))
distance = np.inf * np.ones((n,))
branch = np.zeros((n,))
# start on vertex source
color[source] = gray
distance[source] = 0
branch[source] = -1
Q = [source]
# keep going until the entire graph is explored
while Q:
u = Q[0]
ns, = np.where(CIJ[u, :])
for v in ns:
# this allows the source distance itself to be recorded
if distance[v] == 0:
distance[v] = distance[u] + 1
if color[v] == white:
color[v] = gray
distance[v] = distance[u] + 1
branch[v] = u
Q.append(v)
Q = Q[1:]
color[u] = black
return distance, branch |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def charpath(D, include_diagonal=False, include_infinite=True):
'''
The characteristic path length is the average shortest path length in
the network. The global efficiency is the average inverse shortest path
length in the network.
Parameters
----------
D : NxN np.ndarray
distance matrix
include_diagonal : bool
If True, include the weights on the diagonal. Default value is False.
include_infinite : bool
If True, include infinite distances in calculation
Returns
-------
lambda : float
characteristic path length
efficiency : float
global efficiency
ecc : Nx1 np.ndarray
eccentricity at each vertex
radius : float
radius of graph
diameter : float
diameter of graph
Notes
-----
The input distance matrix may be obtained with any of the distance
functions, e.g. distance_bin, distance_wei.
Characteristic path length is calculated as the global mean of
the distance matrix D, excludings any 'Infs' but including distances on
the main diagonal.
'''
D = D.copy()
if not include_diagonal:
np.fill_diagonal(D, np.nan)
if not include_infinite:
D[np.isinf(D)] = np.nan
Dv = D[np.logical_not(np.isnan(D))].ravel()
# mean of finite entries of D[G]
lambda_ = np.mean(Dv)
# efficiency: mean of inverse entries of D[G]
efficiency = np.mean(1 / Dv)
# eccentricity for each vertex (ignore inf)
ecc = np.array(np.ma.masked_where(np.isnan(D), D).max(axis=1))
# radius of graph
radius = np.min(ecc) # but what about zeros?
# diameter of graph
diameter = np.max(ecc)
return lambda_, efficiency, ecc, radius, diameter |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def cycprob(Pq):
'''
Cycles are paths which begin and end at the same node. Cycle
probability for path length d, is the fraction of all paths of length
d-1 that may be extended to form cycles of length d.
Parameters
----------
Pq : NxNxQ np.ndarray
Path matrix with Pq[i,j,q] = number of paths from i to j of length q.
Produced by findpaths()
Returns
-------
fcyc : Qx1 np.ndarray
fraction of all paths that are cycles for each path length q
pcyc : Qx1 np.ndarray
probability that a non-cyclic path of length q-1 can be extended to
form a cycle of length q for each path length q
'''
# note: fcyc[1] must be zero, as there cannot be cycles of length 1
fcyc = np.zeros(np.size(Pq, axis=2))
for q in range(np.size(Pq, axis=2)):
if np.sum(Pq[:, :, q]) > 0:
fcyc[q] = np.sum(np.diag(Pq[:, :, q])) / np.sum(Pq[:, :, q])
else:
fcyc[q] = 0
# note: pcyc[1] is not defined (set to zero)
# note: pcyc[2] is equal to the fraction of reciprocal connections
# note: there are no non-cyclic paths of length N and no cycles of len N+1
pcyc = np.zeros(np.size(Pq, axis=2))
for q in range(np.size(Pq, axis=2)):
if np.sum(Pq[:, :, q - 1]) - np.sum(np.diag(Pq[:, :, q - 1])) > 0:
pcyc[q] = (np.sum(np.diag(Pq[:, :, q - 1])) /
np.sum(Pq[:, :, q - 1]) - np.sum(np.diag(Pq[:, :, q - 1])))
else:
pcyc[q] = 0
return fcyc, pcyc |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def distance_wei_floyd(adjacency, transform=None):
""" Computes the topological length of the shortest possible path connecting every pair of nodes in the network. Parameters D : (N x N) array_like Weighted/unweighted, direct/undirected connection weight/length array transform : str, optional If `adjacency` is a connection weight array, specify a transform to map input connection weights to connection lengths. Options include ['log', 'inv'], where 'log' is `-np.log(adjacency)` and 'inv' is `1/adjacency`. Default: None Returns ------- SPL : (N x N) ndarray Weighted/unweighted shortest path-length array. If `D` is a directed graph, then `SPL` is not symmetric hops : (N x N) ndarray Number of edges in the shortest path array. If `D` is unweighted, `SPL` and `hops` are identical. Pmat : (N x N) ndarray Element `[i,j]` of this array indicates the next node in the shortest path between `i` and `j`. This array is used as an input argument for function `retrieve_shortest_path()`, which returns as output the sequence of nodes comprising the shortest path between a given pair of nodes. Notes ----- There may be more than one shortest path between any pair of nodes in the network. Non-unique shortest paths are termed shortest path degeneracies and are most likely to occur in unweighted networks. When the shortest-path is degenerate, the elements of `Pmat` correspond to the first shortest path discovered by the algorithm. The input array may be either a connection weight or length array. The connection length array is typically obtained with a mapping from weight to length, such that higher weights are mapped to shorter lengths (see argument `transform`, above). Originally written in Matlab by Andrea Avena-Koenigsberger (IU, 2012) References .. [1] Floyd, R. W. (1962). Algorithm 97: shortest path. Communications of the ACM, 5(6), 345. .. [2] Roy, B. (1959). Transitivite et connexite. Comptes Rendus Hebdomadaires Des Seances De L Academie Des Sciences, 249(2), 216-218. .. [3] Warshall, S. (1962). A theorem on boolean matrices. Journal of the ACM (JACM), 9(1), 11-12. .. [4] https://en.wikipedia.org/wiki/Floyd%E2%80%93Warshall_algorithm """ |
if transform is not None:
if transform == 'log':
if np.logical_or(adjacency > 1, adjacency < 0).any():
raise ValueError("Connection strengths must be in the " +
"interval [0,1) to use the transform " +
"-log(w_ij).")
SPL = -np.log(adjacency)
elif transform == 'inv':
SPL = 1. / adjacency
else:
raise ValueError("Unexpected transform type. Only 'log' and " +
"'inv' are accepted")
else:
SPL = adjacency.copy().astype('float')
SPL[SPL == 0] = np.inf
n = adjacency.shape[1]
flag_find_paths = True
hops = np.array(adjacency != 0).astype('float')
Pmat = np.repeat(np.atleast_2d(np.arange(0, n)), n, 0)
for k in range(n):
i2k_k2j = np.repeat(SPL[:, [k]], n, 1) + np.repeat(SPL[[k], :], n, 0)
if flag_find_paths:
path = SPL > i2k_k2j
i, j = np.where(path)
hops[path] = hops[i, k] + hops[k, j]
Pmat[path] = Pmat[i, k]
SPL = np.min(np.stack([SPL, i2k_k2j], 2), 2)
I = np.eye(n) > 0
SPL[I] = 0
if flag_find_paths:
hops[I], Pmat[I] = 0, 0
return SPL, hops, Pmat |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def findwalks(CIJ):
'''
Walks are sequences of linked nodes, that may visit a single node more
than once. This function finds the number of walks of a given length,
between any two nodes.
Parameters
----------
CIJ : NxN np.ndarray
binary directed/undirected connection matrix
Returns
-------
Wq : NxNxQ np.ndarray
Wq[i,j,q] is the number of walks from i to j of length q
twalk : int
total number of walks found
wlq : Qx1 np.ndarray
walk length distribution as a function of q
Notes
-----
Wq grows very quickly for larger N,K,q. Weights are discarded.
'''
CIJ = binarize(CIJ, copy=True)
n = len(CIJ)
Wq = np.zeros((n, n, n))
CIJpwr = CIJ.copy()
Wq[:, :, 1] = CIJ
for q in range(n):
CIJpwr = np.dot(CIJpwr, CIJ)
Wq[:, :, q] = CIJpwr
twalk = np.sum(Wq) # total number of walks
wlq = np.sum(np.sum(Wq, axis=0), axis=0)
return Wq, twalk, wlq |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mean_first_passage_time(adjacency):
""" Calculates mean first passage time of `adjacency` The first passage time from i to j is the expected number of steps it takes a random walker starting at node i to arrive for the first time at node j. The mean first passage time is not a symmetric measure: `mfpt(i,j)` may be different from `mfpt(j,i)`. Parameters adjacency : (N x N) array_like Weighted/unweighted, direct/undirected connection weight/length array Returns ------- MFPT : (N x N) ndarray Pairwise mean first passage time array References .. [1] Goni, J., Avena-Koenigsberger, A., de Mendizabal, N. V., van den Heuvel, M. P., Betzel, R. F., & Sporns, O. (2013). Exploring the morphospace of communication efficiency in complex networks. PLoS One, 8(3), e58070. """ |
P = np.linalg.solve(np.diag(np.sum(adjacency, axis=1)), adjacency)
n = len(P)
D, V = np.linalg.eig(P.T)
aux = np.abs(D - 1)
index = np.where(aux == aux.min())[0]
if aux[index] > 10e-3:
raise ValueError("Cannot find eigenvalue of 1. Minimum eigenvalue " +
"value is {0}. Tolerance was ".format(aux[index]+1) +
"set at 10e-3.")
w = V[:, index].T
w = w / np.sum(w)
W = np.real(np.repeat(w, n, 0))
I = np.eye(n)
Z = np.linalg.inv(I - P + W)
mfpt = (np.repeat(np.atleast_2d(np.diag(Z)), n, 0) - Z) / W
return mfpt |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def teachers_round(x):
'''
Do rounding such that .5 always rounds to 1, and not bankers rounding.
This is for compatibility with matlab functions, and ease of testing.
'''
if ((x > 0) and (x % 1 >= 0.5)) or ((x < 0) and (x % 1 > 0.5)):
return int(np.ceil(x))
else:
return int(np.floor(x)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def dummyvar(cis, return_sparse=False):
'''
This is an efficient implementation of matlab's "dummyvar" command
using sparse matrices.
input: partitions, NxM array-like containing M partitions of N nodes
into <=N distinct communities
output: dummyvar, an NxR matrix containing R column variables (indicator
variables) with N entries, where R is the total number of communities
summed across each of the M partitions.
i.e.
r = sum((max(len(unique(partitions[i]))) for i in range(m)))
'''
# num_rows is not affected by partition indexes
n = np.size(cis, axis=0)
m = np.size(cis, axis=1)
r = np.sum((np.max(len(np.unique(cis[:, i])))) for i in range(m))
nnz = np.prod(cis.shape)
ix = np.argsort(cis, axis=0)
# s_cis=np.sort(cis,axis=0)
# FIXME use the sorted indices to sort by row efficiently
s_cis = cis[ix][:, range(m), range(m)]
mask = np.hstack((((True,),) * m, (s_cis[:-1, :] != s_cis[1:, :]).T))
indptr, = np.where(mask.flat)
indptr = np.append(indptr, nnz)
import scipy.sparse as sp
dv = sp.csc_matrix((np.repeat((1,), nnz), ix.T.flat, indptr), shape=(n, r))
return dv.toarray() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def assortativity_bin(CIJ, flag=0):
'''
The assortativity coefficient is a correlation coefficient between the
degrees of all nodes on two opposite ends of a link. A positive
assortativity coefficient indicates that nodes tend to link to other
nodes with the same or similar degree.
Parameters
----------
CIJ : NxN np.ndarray
binary directed/undirected connection matrix
flag : int
0 : undirected graph; degree/degree correlation
1 : directed graph; out-degree/in-degree correlation
2 : directed graph; in-degree/out-degree correlation
3 : directed graph; out-degree/out-degree correlation
4 : directed graph; in-degree/in-degreen correlation
Returns
-------
r : float
assortativity coefficient
Notes
-----
The function accepts weighted networks, but all connection
weights are ignored. The main diagonal should be empty. For flag 1
the function computes the directed assortativity described in Rubinov
and Sporns (2010) NeuroImage.
'''
if flag == 0: # undirected version
deg = degrees_und(CIJ)
i, j = np.where(np.triu(CIJ, 1) > 0)
K = len(i)
degi = deg[i]
degj = deg[j]
else: # directed version
id, od, deg = degrees_dir(CIJ)
i, j = np.where(CIJ > 0)
K = len(i)
if flag == 1:
degi = od[i]
degj = id[j]
elif flag == 2:
degi = id[i]
degj = od[j]
elif flag == 3:
degi = od[i]
degj = od[j]
elif flag == 4:
degi = id[i]
degj = id[j]
else:
raise ValueError('Flag must be 0-4')
# compute assortativity
term1 = np.sum(degi * degj) / K
term2 = np.square(np.sum(.5 * (degi + degj)) / K)
term3 = np.sum(.5 * (degi * degi + degj * degj)) / K
r = (term1 - term2) / (term3 - term2)
return r |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def kcore_bd(CIJ, k, peel=False):
'''
The k-core is the largest subnetwork comprising nodes of degree at
least k. This function computes the k-core for a given binary directed
connection matrix by recursively peeling off nodes with degree lower
than k, until no such nodes remain.
Parameters
----------
CIJ : NxN np.ndarray
binary directed adjacency matrix
k : int
level of k-core
peel : bool
If True, additionally calculates peelorder and peellevel. Defaults to
False.
Returns
-------
CIJkcore : NxN np.ndarray
connection matrix of the k-core. This matrix only contains nodes of
degree at least k.
kn : int
size of k-core
peelorder : Nx1 np.ndarray
indices in the order in which they were peeled away during k-core
decomposition. only returned if peel is specified.
peellevel : Nx1 np.ndarray
corresponding level - nodes in at the same level have been peeled
away at the same time. only return if peel is specified
Notes
-----
'peelorder' and 'peellevel' are similar the the k-core sub-shells
described in Modha and Singh (2010).
'''
if peel:
peelorder, peellevel = ([], [])
iter = 0
CIJkcore = CIJ.copy()
while True:
id, od, deg = degrees_dir(CIJkcore) # get degrees of matrix
# find nodes with degree <k
ff, = np.where(np.logical_and(deg < k, deg > 0))
if ff.size == 0:
break # if none found -> stop
# else peel away found nodes
iter += 1
CIJkcore[ff, :] = 0
CIJkcore[:, ff] = 0
if peel:
peelorder.append(ff)
if peel:
peellevel.append(iter * np.ones((len(ff),)))
kn = np.sum(deg > 0)
if peel:
return CIJkcore, kn, peelorder, peellevel
else:
return CIJkcore, kn |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def kcore_bu(CIJ, k, peel=False):
'''
The k-core is the largest subnetwork comprising nodes of degree at
least k. This function computes the k-core for a given binary
undirected connection matrix by recursively peeling off nodes with
degree lower than k, until no such nodes remain.
Parameters
----------
CIJ : NxN np.ndarray
binary undirected connection matrix
k : int
level of k-core
peel : bool
If True, additionally calculates peelorder and peellevel. Defaults to
False.
Returns
-------
CIJkcore : NxN np.ndarray
connection matrix of the k-core. This matrix only contains nodes of
degree at least k.
kn : int
size of k-core
peelorder : Nx1 np.ndarray
indices in the order in which they were peeled away during k-core
decomposition. only returned if peel is specified.
peellevel : Nx1 np.ndarray
corresponding level - nodes in at the same level have been peeled
away at the same time. only return if peel is specified
Notes
-----
'peelorder' and 'peellevel' are similar the the k-core sub-shells
described in Modha and Singh (2010).
'''
if peel:
peelorder, peellevel = ([], [])
iter = 0
CIJkcore = CIJ.copy()
while True:
deg = degrees_und(CIJkcore) # get degrees of matrix
# find nodes with degree <k
ff, = np.where(np.logical_and(deg < k, deg > 0))
if ff.size == 0:
break # if none found -> stop
# else peel away found nodes
iter += 1
CIJkcore[ff, :] = 0
CIJkcore[:, ff] = 0
if peel:
peelorder.append(ff)
if peel:
peellevel.append(iter * np.ones((len(ff),)))
kn = np.sum(deg > 0)
if peel:
return CIJkcore, kn, peelorder, peellevel
else:
return CIJkcore, kn |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def score_wu(CIJ, s):
'''
The s-core is the largest subnetwork comprising nodes of strength at
least s. This function computes the s-core for a given weighted
undirected connection matrix. Computation is analogous to the more
widely used k-core, but is based on node strengths instead of node
degrees.
Parameters
----------
CIJ : NxN np.ndarray
weighted undirected connection matrix
s : float
level of s-core. Note that can take on any fractional value.
Returns
-------
CIJscore : NxN np.ndarray
connection matrix of the s-core. This matrix contains only nodes with
a strength of at least s.
sn : int
size of s-core
'''
CIJscore = CIJ.copy()
while True:
str = strengths_und(CIJscore) # get strengths of matrix
# find nodes with strength <s
ff, = np.where(np.logical_and(str < s, str > 0))
if ff.size == 0:
break # if none found -> stop
# else peel away found nodes
CIJscore[ff, :] = 0
CIJscore[:, ff] = 0
sn = np.sum(str > 0)
return CIJscore, sn |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_pad_index(self, array):
"""Find padding index. Args: array (list):
integer list. Returns: idx: padding index. Examples: 2 """ |
try:
return list(array).index(self.pad_value)
except ValueError:
return len(array) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_length(self, y):
"""Get true length of y. Args: y (list):
padded list. Returns: lens: true length of y. Examples: [1, 2, 3] """ |
lens = [self.find_pad_index(row) for row in y]
return lens |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_idx_to_name(self, y, lens):
"""Convert label index to name. Args: y (list):
label index list. lens (list):
true length of y. Returns: y: label name list. Examples: [['B-LOC'], ['B-LOC', 'I-LOC'], ['B-LOC', 'B-LOC', 'B-LOC']] """ |
y = [[self.id2label[idx] for idx in row[:l]]
for row, l in zip(y, lens)]
return y |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def predict(self, X, y):
"""Predict sequences. Args: X (list):
input data. y (list):
tags. Returns: y_true: true sequences. y_pred: predicted sequences. """ |
y_pred = self.model.predict_on_batch(X)
# reduce dimension.
y_true = np.argmax(y, -1)
y_pred = np.argmax(y_pred, -1)
lens = self.get_length(y_true)
y_true = self.convert_idx_to_name(y_true, lens)
y_pred = self.convert_idx_to_name(y_pred, lens)
return y_true, y_pred |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def score(self, y_true, y_pred):
"""Calculate f1 score. Args: y_true (list):
true sequences. y_pred (list):
predicted sequences. Returns: score: f1 score. """ |
score = f1_score(y_true, y_pred)
print(' - f1: {:04.2f}'.format(score * 100))
print(classification_report(y_true, y_pred, digits=4))
return score |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_entities(seq, suffix=False):
"""Gets entities from sequence. Args: seq (list):
sequence of labels. Returns: list: list of (chunk_type, chunk_start, chunk_end). Example: [('PER', 0, 1), ('LOC', 3, 3)] """ |
# for nested list
if any(isinstance(s, list) for s in seq):
seq = [item for sublist in seq for item in sublist + ['O']]
prev_tag = 'O'
prev_type = ''
begin_offset = 0
chunks = []
for i, chunk in enumerate(seq + ['O']):
if suffix:
tag = chunk[-1]
type_ = chunk.split('-')[0]
else:
tag = chunk[0]
type_ = chunk.split('-')[-1]
if end_of_chunk(prev_tag, tag, prev_type, type_):
chunks.append((prev_type, begin_offset, i-1))
if start_of_chunk(prev_tag, tag, prev_type, type_):
begin_offset = i
prev_tag = tag
prev_type = type_
return chunks |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def end_of_chunk(prev_tag, tag, prev_type, type_):
"""Checks if a chunk ended between the previous and current word. Args: prev_tag: previous chunk tag. tag: current chunk tag. prev_type: previous type. type_: current type. Returns: chunk_end: boolean. """ |
chunk_end = False
if prev_tag == 'E': chunk_end = True
if prev_tag == 'S': chunk_end = True
if prev_tag == 'B' and tag == 'B': chunk_end = True
if prev_tag == 'B' and tag == 'S': chunk_end = True
if prev_tag == 'B' and tag == 'O': chunk_end = True
if prev_tag == 'I' and tag == 'B': chunk_end = True
if prev_tag == 'I' and tag == 'S': chunk_end = True
if prev_tag == 'I' and tag == 'O': chunk_end = True
if prev_tag != 'O' and prev_tag != '.' and prev_type != type_:
chunk_end = True
return chunk_end |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def start_of_chunk(prev_tag, tag, prev_type, type_):
"""Checks if a chunk started between the previous and current word. Args: prev_tag: previous chunk tag. tag: current chunk tag. prev_type: previous type. type_: current type. Returns: chunk_start: boolean. """ |
chunk_start = False
if tag == 'B': chunk_start = True
if tag == 'S': chunk_start = True
if prev_tag == 'E' and tag == 'E': chunk_start = True
if prev_tag == 'E' and tag == 'I': chunk_start = True
if prev_tag == 'S' and tag == 'E': chunk_start = True
if prev_tag == 'S' and tag == 'I': chunk_start = True
if prev_tag == 'O' and tag == 'E': chunk_start = True
if prev_tag == 'O' and tag == 'I': chunk_start = True
if tag != 'O' and tag != '.' and prev_type != type_:
chunk_start = True
return chunk_start |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def f1_score(y_true, y_pred, average='micro', suffix=False):
"""Compute the F1 score. The F1 score can be interpreted as a weighted average of the precision and recall, where an F1 score reaches its best value at 1 and worst score at 0. The relative contribution of precision and recall to the F1 score are equal. The formula for the F1 score is:: F1 = 2 * (precision * recall) / (precision + recall) Args: y_true : 2d array. Ground truth (correct) target values. y_pred : 2d array. Estimated targets as returned by a tagger. Returns: score : float. Example: 0.50 """ |
true_entities = set(get_entities(y_true, suffix))
pred_entities = set(get_entities(y_pred, suffix))
nb_correct = len(true_entities & pred_entities)
nb_pred = len(pred_entities)
nb_true = len(true_entities)
p = nb_correct / nb_pred if nb_pred > 0 else 0
r = nb_correct / nb_true if nb_true > 0 else 0
score = 2 * p * r / (p + r) if p + r > 0 else 0
return score |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def precision_score(y_true, y_pred, average='micro', suffix=False):
"""Compute the precision. The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of true positives and ``fp`` the number of false positives. The precision is intuitively the ability of the classifier not to label as positive a sample. The best value is 1 and the worst value is 0. Args: y_true : 2d array. Ground truth (correct) target values. y_pred : 2d array. Estimated targets as returned by a tagger. Returns: score : float. Example: 0.50 """ |
true_entities = set(get_entities(y_true, suffix))
pred_entities = set(get_entities(y_pred, suffix))
nb_correct = len(true_entities & pred_entities)
nb_pred = len(pred_entities)
score = nb_correct / nb_pred if nb_pred > 0 else 0
return score |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def recall_score(y_true, y_pred, average='micro', suffix=False):
"""Compute the recall. The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of true positives and ``fn`` the number of false negatives. The recall is intuitively the ability of the classifier to find all the positive samples. The best value is 1 and the worst value is 0. Args: y_true : 2d array. Ground truth (correct) target values. y_pred : 2d array. Estimated targets as returned by a tagger. Returns: score : float. Example: 0.50 """ |
true_entities = set(get_entities(y_true, suffix))
pred_entities = set(get_entities(y_pred, suffix))
nb_correct = len(true_entities & pred_entities)
nb_true = len(true_entities)
score = nb_correct / nb_true if nb_true > 0 else 0
return score |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def classification_report(y_true, y_pred, digits=2, suffix=False):
"""Build a text report showing the main classification metrics. Args: y_true : 2d array. Ground truth (correct) target values. y_pred : 2d array. Estimated targets as returned by a classifier. digits : int. Number of digits for formatting output floating point values. Returns: report : string. Text summary of the precision, recall, F1 score for each class. Examples: precision recall f1-score support <BLANKLINE> MISC 0.00 0.00 0.00 1 PER 1.00 1.00 1.00 1 <BLANKLINE> micro avg 0.50 0.50 0.50 2 macro avg 0.50 0.50 0.50 2 <BLANKLINE> """ |
true_entities = set(get_entities(y_true, suffix))
pred_entities = set(get_entities(y_pred, suffix))
name_width = 0
d1 = defaultdict(set)
d2 = defaultdict(set)
for e in true_entities:
d1[e[0]].add((e[1], e[2]))
name_width = max(name_width, len(e[0]))
for e in pred_entities:
d2[e[0]].add((e[1], e[2]))
last_line_heading = 'macro avg'
width = max(name_width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
head_fmt = u'{:>{width}s} ' + u' {:>9}' * len(headers)
report = head_fmt.format(u'', *headers, width=width)
report += u'\n\n'
row_fmt = u'{:>{width}s} ' + u' {:>9.{digits}f}' * 3 + u' {:>9}\n'
ps, rs, f1s, s = [], [], [], []
for type_name, true_entities in d1.items():
pred_entities = d2[type_name]
nb_correct = len(true_entities & pred_entities)
nb_pred = len(pred_entities)
nb_true = len(true_entities)
p = nb_correct / nb_pred if nb_pred > 0 else 0
r = nb_correct / nb_true if nb_true > 0 else 0
f1 = 2 * p * r / (p + r) if p + r > 0 else 0
report += row_fmt.format(*[type_name, p, r, f1, nb_true], width=width, digits=digits)
ps.append(p)
rs.append(r)
f1s.append(f1)
s.append(nb_true)
report += u'\n'
# compute averages
report += row_fmt.format('micro avg',
precision_score(y_true, y_pred, suffix=suffix),
recall_score(y_true, y_pred, suffix=suffix),
f1_score(y_true, y_pred, suffix=suffix),
np.sum(s),
width=width, digits=digits)
report += row_fmt.format(last_line_heading,
np.average(ps, weights=s),
np.average(rs, weights=s),
np.average(f1s, weights=s),
np.sum(s),
width=width, digits=digits)
return report |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _timedelta_to_seconds(td):
"""Convert a datetime.timedelta object into a seconds interval for rotating file ouput. :param td: datetime.timedelta :return: time in seconds :rtype: int """ |
if isinstance(td, numbers.Real):
td = datetime.timedelta(seconds=td)
return td.total_seconds() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getLogger(name=None, **kwargs):
"""Build a logger with the given name. :param name: The name for the logger. This is usually the module name, ``__name__``. :type name: string """ |
adapter = _LOGGERS.get(name)
if not adapter:
# NOTE(jd) Keep using the `adapter' variable here because so it's not
# collected by Python since _LOGGERS contains only a weakref
adapter = KeywordArgumentAdapter(logging.getLogger(name), kwargs)
_LOGGERS[name] = adapter
return adapter |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setup(level=logging.WARNING, outputs=[output.STDERR], program_name=None, capture_warnings=True):
"""Setup Python logging. This will setup basic handlers for Python logging. :param level: Root log level. :param outputs: Iterable of outputs to log to. :param program_name: The name of the program. Auto-detected if not set. :param capture_warnings: Capture warnings from the `warnings' module. """ |
root_logger = logging.getLogger(None)
# Remove all handlers
for handler in list(root_logger.handlers):
root_logger.removeHandler(handler)
# Add configured handlers
for out in outputs:
if isinstance(out, str):
out = output.preconfigured.get(out)
if out is None:
raise RuntimeError("Output {} is not available".format(out))
out.add_to_logger(root_logger)
root_logger.setLevel(level)
program_logger = logging.getLogger(program_name)
def logging_excepthook(exc_type, value, tb):
program_logger.critical(
"".join(traceback.format_exception(exc_type, value, tb)))
sys.excepthook = logging_excepthook
if capture_warnings:
logging.captureWarnings(True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_default_log_levels(loggers_and_log_levels):
"""Set default log levels for some loggers. :param loggers_and_log_levels: List of tuple (logger name, level). """ |
for logger, level in loggers_and_log_levels:
if isinstance(level, str):
level = level.upper()
logging.getLogger(logger).setLevel(level) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_swag_from_ctx(ctx):
"""Creates SWAG client from the current context.""" |
swag_opts = {}
if ctx.type == 'file':
swag_opts = {
'swag.type': 'file',
'swag.data_dir': ctx.data_dir,
'swag.data_file': ctx.data_file
}
elif ctx.type == 's3':
swag_opts = {
'swag.type': 's3',
'swag.bucket_name': ctx.bucket_name,
'swag.data_file': ctx.data_file,
'swag.region': ctx.region
}
elif ctx.type == 'dynamodb':
swag_opts = {
'swag.type': 'dynamodb',
'swag.region': ctx.region
}
return SWAGManager(**parse_swag_config_options(swag_opts)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def file(ctx, data_dir, data_file):
"""Use the File SWAG Backend""" |
if not ctx.file:
ctx.data_file = data_file
if not ctx.data_dir:
ctx.data_dir = data_dir
ctx.type = 'file' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def s3(ctx, bucket_name, data_file, region):
"""Use the S3 SWAG backend.""" |
if not ctx.data_file:
ctx.data_file = data_file
if not ctx.bucket_name:
ctx.bucket_name = bucket_name
if not ctx.region:
ctx.region = region
ctx.type = 's3' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list(ctx):
"""List SWAG account info.""" |
if ctx.namespace != 'accounts':
click.echo(
click.style('Only account data is available for listing.', fg='red')
)
return
swag = create_swag_from_ctx(ctx)
accounts = swag.get_all()
_table = [[result['name'], result.get('id')] for result in accounts]
click.echo(
tabulate(_table, headers=["Account Name", "Account Number"])
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_service(ctx, name):
"""Retrieve accounts pertaining to named service.""" |
swag = create_swag_from_ctx(ctx)
accounts = swag.get_service_enabled(name)
_table = [[result['name'], result.get('id')] for result in accounts]
click.echo(
tabulate(_table, headers=["Account Name", "Account Number"])
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def migrate(ctx, start_version, end_version):
"""Transition from one SWAG schema to another.""" |
if ctx.type == 'file':
if ctx.data_file:
file_path = ctx.data_file
else:
file_path = os.path.join(ctx.data_file, ctx.namespace + '.json')
# todo make this more like alemebic and determine/load versions automatically
with open(file_path, 'r') as f:
data = json.loads(f.read())
data = run_migration(data, start_version, end_version)
with open(file_path, 'w') as f:
f.write(json.dumps(data)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def propagate(ctx):
"""Transfers SWAG data from one backend to another""" |
data = []
if ctx.type == 'file':
if ctx.data_file:
file_path = ctx.data_file
else:
file_path = os.path.join(ctx.data_dir, ctx.namespace + '.json')
with open(file_path, 'r') as f:
data = json.loads(f.read())
swag_opts = {
'swag.type': 'dynamodb'
}
swag = SWAGManager(**parse_swag_config_options(swag_opts))
for item in data:
time.sleep(2)
swag.create(item, dry_run=ctx.dry_run) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create(ctx, data):
"""Create a new SWAG item.""" |
swag = create_swag_from_ctx(ctx)
data = json.loads(data.read())
for account in data:
swag.create(account, dry_run=ctx.dry_run) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def deploy_service(ctx, path, name, regions, disabled):
"""Deploys a new service JSON to multiple accounts. NAME is the service name you wish to deploy.""" |
enabled = False if disabled else True
swag = create_swag_from_ctx(ctx)
accounts = swag.get_all(search_filter=path)
log.debug('Searching for accounts. Found: {} JMESPath: `{}`'.format(len(accounts), path))
for a in accounts:
try:
if not swag.get_service(name, search_filter="[?id=='{id}']".format(id=a['id'])):
log.info('Found an account to update. AccountName: {name} AccountNumber: {number}'.format(name=a['name'], number=a['id']))
status = []
for region in regions:
status.append(
{
'enabled': enabled,
'region': region
}
)
a['services'].append(
{
'name': name,
'status': status
}
)
swag.update(a, dry_run=ctx.dry_run)
except InvalidSWAGDataException as e:
log.warning('Found a data quality issue. AccountName: {name} AccountNumber: {number}'.format(name=a['name'], number=a['id']))
log.info('Service has been deployed to all matching accounts.') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def seed_aws_data(ctx, data):
"""Seeds SWAG from a list of known AWS accounts.""" |
swag = create_swag_from_ctx(ctx)
for k, v in json.loads(data.read()).items():
for account in v['accounts']:
data = {
'description': 'This is an AWS owned account used for {}'.format(k),
'id': account['account_id'],
'contacts': [],
'owner': 'aws',
'provider': 'aws',
'sensitive': False,
'email': 'support@amazon.com',
'name': k + '-' + account['region']
}
click.echo(click.style(
'Seeded Account. AccountName: {}'.format(data['name']), fg='green')
)
swag.create(data, dry_run=ctx.dry_run) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def seed_aws_organization(ctx, owner):
"""Seeds SWAG from an AWS organziation.""" |
swag = create_swag_from_ctx(ctx)
accounts = swag.get_all()
_ids = [result.get('id') for result in accounts]
client = boto3.client('organizations')
paginator = client.get_paginator('list_accounts')
response_iterator = paginator.paginate()
count = 0
for response in response_iterator:
for account in response['Accounts']:
if account['Id'] in _ids:
click.echo(click.style(
'Ignoring Duplicate Account. AccountId: {} already exists in SWAG'.format(account['Id']), fg='yellow')
)
continue
if account['Status'] == 'SUSPENDED':
status = 'deprecated'
else:
status = 'created'
data = {
'id': account['Id'],
'name': account['Name'],
'description': 'Account imported from AWS organization.',
'email': account['Email'],
'owner': owner,
'provider': 'aws',
'contacts': [],
'sensitive': False,
'status': [{'region': 'all', 'status': status}]
}
click.echo(click.style(
'Seeded Account. AccountName: {}'.format(data['name']), fg='green')
)
count += 1
swag.create(data, dry_run=ctx.dry_run)
click.echo('Seeded {} accounts to SWAG.'.format(count)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_file(client, bucket, data_file):
"""Tries to load JSON data from S3.""" |
logger.debug('Loading item from s3. Bucket: {bucket} Key: {key}'.format(
bucket=bucket,
key=data_file
))
# If the file doesn't exist, then return an empty dict:
try:
data = _get_from_s3(client, bucket, data_file)
except ClientError as ce:
if ce.response['Error']['Code'] == 'NoSuchKey':
return {}
else:
raise ce
if sys.version_info > (3,):
data = data.decode('utf-8')
return json.loads(data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save_file(client, bucket, data_file, items, dry_run=None):
"""Tries to write JSON data to data file in S3.""" |
logger.debug('Writing {number_items} items to s3. Bucket: {bucket} Key: {key}'.format(
number_items=len(items),
bucket=bucket,
key=data_file
))
if not dry_run:
return _put_to_s3(client, bucket, data_file, json.dumps(items)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create(self, item, dry_run=None):
"""Creates a new item in file.""" |
logger.debug('Creating new item. Item: {item} Path: {data_file}'.format(
item=item,
data_file=self.data_file
))
items = load_file(self.client, self.bucket_name, self.data_file)
items = append_item(self.namespace, self.version, item, items)
save_file(self.client, self.bucket_name, self.data_file, items, dry_run=dry_run)
return item |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def health_check(self):
"""Uses head object to make sure the file exists in S3.""" |
logger.debug('Health Check on S3 file for: {namespace}'.format(
namespace=self.namespace
))
try:
self.client.head_object(Bucket=self.bucket_name, Key=self.data_file)
return True
except ClientError as e:
logger.debug('Error encountered with S3. Assume unhealthy') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def health_check(self):
"""Gets a single item to determine if Dynamo is functioning.""" |
logger.debug('Health Check on Table: {namespace}'.format(
namespace=self.namespace
))
try:
self.get_all()
return True
except ClientError as e:
logger.exception(e)
logger.error('Error encountered with Database. Assume unhealthy')
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_swag_config_options(config):
"""Ensures that options passed to the backend are valid.""" |
options = {}
for key, val in config.items():
if key.startswith('swag.backend.'):
options[key[12:]] = val
if key.startswith('swag.'):
options[key[5:]] = val
if options.get('type') == 's3':
return S3OptionsSchema(strict=True).load(options).data
elif options.get('type') == 'dynamodb':
return DynamoDBOptionsSchema(strict=True).load(options).data
else:
return FileOptionsSchema(strict=True).load(options).data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def deprecated(message):
"""Deprecated function decorator.""" |
def wrapper(fn):
def deprecated_method(*args, **kargs):
warnings.warn(message, DeprecationWarning, 2)
return fn(*args, **kargs)
# TODO: use decorator ? functools.wrapper ?
deprecated_method.__name__ = fn.__name__
deprecated_method.__doc__ = "%s\n\n%s" % (message, fn.__doc__)
return deprecated_method
return wrapper |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_sub_dict(sub_dict, dictionary):
"""Legacy filter for determining if a given dict is present.""" |
for key in sub_dict.keys():
if key not in dictionary:
return False
if (type(sub_dict[key]) is not dict) and (sub_dict[key] != dictionary[key]):
return False
if (type(sub_dict[key]) is dict) and (not is_sub_dict(sub_dict[key], dictionary[key])):
return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_by_name(account_name, bucket, region='us-west-2', json_path='accounts.json', alias=None):
"""Given an account name, attempts to retrieve associated account info.""" |
for account in get_all_accounts(bucket, region, json_path)['accounts']:
if 'aws' in account['type']:
if account['name'] == account_name:
return account
elif alias:
for a in account['alias']:
if a == account_name:
return account |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_all_accounts(bucket, region='us-west-2', json_path='accounts.json', **filters):
"""Fetches all the accounts from SWAG.""" |
swag_opts = {
'swag.type': 's3',
'swag.bucket_name': bucket,
'swag.bucket_region': region,
'swag.data_file': json_path,
'swag.schema_version': 1
}
swag = SWAGManager(**parse_swag_config_options(swag_opts))
accounts = swag.get_all()
accounts = [account for account in accounts['accounts'] if is_sub_dict(filters, account)]
return {'accounts': accounts} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_file(data_file):
"""Tries to load JSON from data file.""" |
try:
with open(data_file, 'r', encoding='utf-8') as f:
return json.loads(f.read())
except JSONDecodeError as e:
return [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save_file(data_file, data, dry_run=None):
"""Writes JSON data to data file.""" |
if dry_run:
return
with open(data_file, 'w', encoding='utf-8') as f:
if sys.version_info > (3, 0):
f.write(json.dumps(data))
else:
f.write(json.dumps(data).decode('utf-8')) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def health_check(self):
"""Checks to make sure the file is there.""" |
logger.debug('Health Check on file for: {namespace}'.format(
namespace=self.namespace
))
return os.path.isfile(self.data_file) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def configure(self, *args, **kwargs):
"""Configures a SWAG manager. Overrides existing configuration.""" |
self.version = kwargs['schema_version']
self.namespace = kwargs['namespace']
self.backend = get(kwargs['type'])(*args, **kwargs)
self.context = kwargs.pop('schema_context', {}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create(self, item, dry_run=None):
"""Create a new item in backend.""" |
return self.backend.create(validate(item, version=self.version, context=self.context), dry_run=dry_run) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete(self, item, dry_run=None):
"""Delete an item in backend.""" |
return self.backend.delete(item, dry_run=dry_run) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update(self, item, dry_run=None):
"""Update an item in backend.""" |
return self.backend.update(validate(item, version=self.version, context=self.context), dry_run=dry_run) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_all(self, search_filter=None):
"""Fetch all data from backend.""" |
items = self.backend.get_all()
if not items:
if self.version == 1:
return {self.namespace: []}
return []
if search_filter:
items = jmespath.search(search_filter, items)
return items |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_service_enabled(self, name, accounts_list=None, search_filter=None, region=None):
"""Get a list of accounts where a service has been enabled.""" |
if not accounts_list:
accounts = self.get_all(search_filter=search_filter)
else:
accounts = accounts_list
if self.version == 1:
accounts = accounts['accounts']
enabled = []
for account in accounts:
if self.version == 1:
account_filter = "accounts[?id=='{id}']".format(id=account['id'])
else:
account_filter = "[?id=='{id}']".format(id=account['id'])
service = self.get_service(name, search_filter=account_filter)
if self.version == 1:
if service:
service = service['enabled'] # no region information available in v1
else:
if not region:
service_filter = "status[?enabled]"
else:
service_filter = "status[?(region=='{region}' || region=='all') && enabled]".format(region=region)
service = jmespath.search(service_filter, service)
if service:
enabled.append(account)
return enabled |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_service(self, name, search_filter):
"""Fetch service metadata.""" |
if self.version == 1:
service_filter = "service.{name}".format(name=name)
return jmespath.search(service_filter, self.get(search_filter))
else:
service_filter = "services[?name=='{}']".format(name)
return one(jmespath.search(service_filter, self.get(search_filter))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_service_name(self, name, search_filter):
"""Fetch account name as referenced by a particular service. """ |
service_filter = "services[?name=='{}'].metadata.name".format(name)
return one(jmespath.search(service_filter, self.get(search_filter))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_by_name(self, name, alias=None):
"""Fetch all accounts with name specified, optionally include aliases.""" |
search_filter = "[?name=='{}']".format(name)
if alias:
if self.version == 1:
search_filter = "accounts[?name=='{name}' || contains(alias, '{name}')]".format(name=name)
elif self.version == 2:
search_filter = "[?name=='{name}' || contains(aliases, '{name}')]".format(name=name)
return self.get_all(search_filter) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run_migration(data, version_start, version_end):
"""Runs migration against a data set.""" |
items = []
if version_start == 1 and version_end == 2:
for item in data['accounts']:
items.append(v2.upgrade(item))
if version_start == 2 and version_end == 1:
for item in data:
items.append(v2.downgrade(item))
items = {'accounts': items}
return items |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_type(self, data):
"""Performs field validation against the schema context if values have been provided to SWAGManager via the swag.schema_context config object. If the schema context for a given field is empty, then we assume any value is valid for the given schema field. """ |
fields_to_validate = ['type', 'environment', 'owner']
for field in fields_to_validate:
value = data.get(field)
allowed_values = self.context.get(field)
if allowed_values and value not in allowed_values:
raise ValidationError('Must be one of {}'.format(allowed_values), field_names=field) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_account_status(self, data):
"""Performs field validation for account_status. If any region is not deleted, account_status cannot be deleted """ |
deleted_status = 'deleted'
region_status = data.get('status')
account_status = data.get('account_status')
for region in region_status:
if region['status'] != deleted_status and account_status == deleted_status:
raise ValidationError('Account Status cannot be "deleted" if a region is not "deleted"') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_regions_schema(self, data):
"""Performs field validation for regions. This should be a dict with region names as the key and RegionSchema as the value """ |
region_schema = RegionSchema()
supplied_regions = data.get('regions', {})
for region in supplied_regions.keys():
result = region_schema.validate(supplied_regions[region])
if len(result.keys()) > 0:
raise ValidationError(result) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def coord_to_dimension(coord):
""" Converts an iris coordinate to a HoloViews dimension. """ |
kwargs = {}
if coord.units.is_time_reference():
kwargs['value_format'] = get_date_format(coord)
else:
kwargs['unit'] = str(coord.units)
return Dimension(coord.name(), **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sort_coords(coord):
""" Sorts a list of DimCoords trying to ensure that dates and pressure levels appear first and the longitude and latitude appear last in the correct order. """ |
import iris
order = {'T': -2, 'Z': -1, 'X': 1, 'Y': 2}
axis = iris.util.guess_coord_axis(coord)
return (order.get(axis, 0), coord and coord.name()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def values(cls, dataset, dim, expanded=True, flat=True, compute=True):
""" Returns an array of the values along the supplied dimension. """ |
dim = dataset.get_dimension(dim, strict=True)
if dim in dataset.vdims:
coord_names = [c.name() for c in dataset.data.dim_coords]
data = dataset.data.copy().data
data = cls.canonicalize(dataset, data, coord_names)
return data.T.flatten() if flat else data
elif expanded:
data = cls.coords(dataset, dim.name, expanded=True)
return data.T.flatten() if flat else data
else:
return cls.coords(dataset, dim.name, ordered=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def groupby(cls, dataset, dims, container_type=HoloMap, group_type=None, **kwargs):
""" Groups the data by one or more dimensions returning a container indexed by the grouped dimensions containing slices of the cube wrapped in the group_type. This makes it very easy to break up a high-dimensional dataset into smaller viewable chunks. """ |
import iris
if not isinstance(dims, list): dims = [dims]
dims = [dataset.get_dimension(d, strict=True) for d in dims]
constraints = [d.name for d in dims]
slice_dims = [d for d in dataset.kdims if d not in dims]
# Update the kwargs appropriately for Element group types
group_kwargs = {}
group_type = dict if group_type == 'raw' else group_type
if issubclass(group_type, Element):
group_kwargs.update(util.get_param_values(dataset))
group_kwargs['kdims'] = slice_dims
group_kwargs.update(kwargs)
drop_dim = any(d not in group_kwargs['kdims'] for d in slice_dims)
unique_coords = product(*[cls.values(dataset, d, expanded=False)
for d in dims])
data = []
for key in unique_coords:
constraint = iris.Constraint(**dict(zip(constraints, key)))
extracted = dataset.data.extract(constraint)
if drop_dim:
extracted = group_type(extracted, kdims=slice_dims,
vdims=dataset.vdims).columns()
cube = group_type(extracted, **group_kwargs)
data.append((key, cube))
if issubclass(container_type, NdMapping):
with item_check(False), sorted_context(False):
return container_type(data, kdims=dims)
else:
return container_type(data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def concat_dim(cls, datasets, dim, vdims):
""" Concatenates datasets along one dimension """ |
import iris
from iris.experimental.equalise_cubes import equalise_attributes
cubes = []
for c, cube in datasets.items():
cube = cube.copy()
cube.add_aux_coord(iris.coords.DimCoord([c], var_name=dim.name))
cubes.append(cube)
cubes = iris.cube.CubeList(cubes)
equalise_attributes(cubes)
return cubes.merge_cube() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def range(cls, dataset, dimension):
""" Computes the range along a particular dimension. """ |
dim = dataset.get_dimension(dimension, strict=True)
values = dataset.dimension_values(dim.name, False)
return (np.nanmin(values), np.nanmax(values)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def redim(cls, dataset, dimensions):
""" Rename coords on the Cube. """ |
new_dataset = dataset.data.copy()
for name, new_dim in dimensions.items():
if name == new_dataset.name():
new_dataset.rename(new_dim.name)
for coord in new_dataset.dim_coords:
if name == coord.name():
coord.rename(new_dim.name)
return new_dataset |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def length(cls, dataset):
""" Returns the total number of samples in the dataset. """ |
return np.product([len(d.points) for d in dataset.data.coords(dim_coords=True)], dtype=np.intp) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_dimension(cls, columns, dimension, dim_pos, values, vdim):
""" Adding value dimensions not currently supported by iris interface. Adding key dimensions not possible on dense interfaces. """ |
if not vdim:
raise Exception("Cannot add key dimension to a dense representation.")
raise NotImplementedError |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def select_to_constraint(cls, dataset, selection):
""" Transform a selection dictionary to an iris Constraint. """ |
import iris
def get_slicer(start, end):
def slicer(cell):
return start <= cell.point < end
return slicer
constraint_kwargs = {}
for dim, constraint in selection.items():
if isinstance(constraint, slice):
constraint = (constraint.start, constraint.stop)
if isinstance(constraint, tuple):
if constraint == (None, None):
continue
constraint = get_slicer(*constraint)
dim = dataset.get_dimension(dim, strict=True)
constraint_kwargs[dim.name] = constraint
return iris.Constraint(**constraint_kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def select(cls, dataset, selection_mask=None, **selection):
""" Apply a selection to the data. """ |
import iris
constraint = cls.select_to_constraint(dataset, selection)
pre_dim_coords = [c.name() for c in dataset.data.dim_coords]
indexed = cls.indexed(dataset, selection)
extracted = dataset.data.extract(constraint)
if indexed and not extracted.dim_coords:
return extracted.data.item()
post_dim_coords = [c.name() for c in extracted.dim_coords]
dropped = [c for c in pre_dim_coords if c not in post_dim_coords]
for d in dropped:
extracted = iris.util.new_axis(extracted, d)
return extracted |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_to_geotype(element, crs=None):
""" Converts a HoloViews element type to the equivalent GeoViews element if given a coordinate reference system. """ |
geotype = getattr(gv_element, type(element).__name__, None)
if crs is None or geotype is None or isinstance(element, _Element):
return element
return geotype(element, crs=crs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_crs(op, element, **kwargs):
""" Converts any elements in the input to their equivalent geotypes if given a coordinate reference system. """ |
return element.map(lambda x: convert_to_geotype(x, kwargs.get('crs')), Element) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_geographic(element, kdims=None):
""" Utility to determine whether the supplied element optionally a subset of its key dimensions represent a geographic coordinate system. """ |
if isinstance(element, (Overlay, NdOverlay)):
return any(element.traverse(is_geographic, [_Element]))
if kdims:
kdims = [element.get_dimension(d) for d in kdims]
else:
kdims = element.kdims
if len(kdims) != 2 and not isinstance(element, (Graph, Nodes)):
return False
if isinstance(element.data, geographic_types) or isinstance(element, (WMTS, Feature)):
return True
elif isinstance(element, _Element):
return kdims == element.kdims and element.crs
else:
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def geoms(self, scale=None, bounds=None, as_element=True):
""" Returns the geometries held by the Feature. Parameters scale: str Scale of the geometry to return expressed as string. Available scales depends on the Feature type. NaturalEarthFeature: '10m', '50m', '110m' GSHHSFeature: 'auto', 'coarse', 'low', 'intermediate', 'high', 'full' bounds: tuple Tuple of a bounding region to query for geometries in as_element: boolean Whether to wrap the geometries in an element Returns ------- geometries: Polygons/Path Polygons or Path object wrapping around returned geometries """ |
feature = self.data
if scale is not None:
feature = feature.with_scale(scale)
if bounds:
extent = (bounds[0], bounds[2], bounds[1], bounds[3])
else:
extent = None
geoms = [g for g in feature.intersecting_geometries(extent) if g is not None]
if not as_element:
return geoms
elif not geoms or 'Polygon' in geoms[0].geom_type:
return Polygons(geoms, crs=feature.crs)
elif 'Point' in geoms[0].geom_type:
return Points(geoms, crs=feature.crs)
else:
return Path(geoms, crs=feature.crs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_shapefile(cls, shapefile, *args, **kwargs):
""" Loads a shapefile from disk and optionally merges it with a dataset. See ``from_records`` for full signature. Parameters records: list of cartopy.io.shapereader.Record Iterator containing Records. dataset: holoviews.Dataset Any HoloViews Dataset type. on: str or list or dict A mapping between the attribute names in the records and the dimensions in the dataset. value: str The value dimension in the dataset the values will be drawn from. index: str or list One or more dimensions in the dataset the Shapes will be indexed by. drop_missing: boolean Whether to drop shapes which are missing from the provides dataset. Returns ------- shapes: Polygons or Path object A Polygons or Path object containing the geometries """ |
reader = Reader(shapefile)
return cls.from_records(reader.records(), *args, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_records(cls, records, dataset=None, on=None, value=None, index=[], drop_missing=False, element=None, **kwargs):
""" Load data from a collection of `cartopy.io.shapereader.Record` objects and optionally merge it with a dataset to assign values to each polygon and form a chloropleth. Supplying just records will return an NdOverlayof Shape Elements with a numeric index. If a dataset is supplied, a mapping between the attribute names in the records and the dimension names in the dataset must be supplied. The values assigned to each shape file can then be drawn from the dataset by supplying a ``value`` and keys the Shapes are indexed by specifying one or index dimensions. Parameters records: list of cartopy.io.shapereader.Record Iterator containing Records. dataset: holoviews.Dataset Any HoloViews Dataset type. on: str or list or dict A mapping between the attribute names in the records and the dimensions in the dataset. value: str The value dimension in the dataset the values will be drawn from. index: str or list One or more dimensions in the dataset the Shapes will be indexed by. drop_missing: boolean Whether to drop shapes which are missing from the provides dataset. Returns ------- shapes: Polygons or Path object A Polygons or Path object containing the geometries """ |
if dataset is not None and not on:
raise ValueError('To merge dataset with shapes mapping '
'must define attribute(s) to merge on.')
if util.pd and isinstance(dataset, util.pd.DataFrame):
dataset = Dataset(dataset)
if not isinstance(on, (dict, list)):
on = [on]
if on and not isinstance(on, dict):
on = {o: o for o in on}
if not isinstance(index, list):
index = [index]
kdims = []
for ind in index:
if dataset and dataset.get_dimension(ind):
dim = dataset.get_dimension(ind)
else:
dim = Dimension(ind)
kdims.append(dim)
ddims = []
if dataset:
if value:
vdims = [dataset.get_dimension(value)]
else:
vdims = dataset.vdims
ddims = dataset.dimensions()
if None in vdims:
raise ValueError('Value dimension %s not found '
'in dataset dimensions %s' % (value, ddims) )
else:
vdims = []
data = []
for i, rec in enumerate(records):
geom = {}
if dataset:
selection = {dim: rec.attributes.get(attr, None)
for attr, dim in on.items()}
row = dataset.select(**selection)
if len(row):
values = {k: v[0] for k, v in row.iloc[0].columns().items()}
elif drop_missing:
continue
else:
values = {vd.name: np.nan for vd in vdims}
geom.update(values)
if index:
for kdim in kdims:
if kdim in ddims and len(row):
k = row[kdim.name][0]
elif kdim.name in rec.attributes:
k = rec.attributes[kdim.name]
else:
k = None
geom[kdim.name] = k
geom['geometry'] = rec.geometry
data.append(geom)
if element is not None:
pass
elif data and data[0]:
if isinstance(data[0]['geometry'], poly_types):
element = Polygons
else:
element = Path
else:
element = Polygons
return element(data, vdims=kdims+vdims, **kwargs).opts(color=value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_cb_plot(cb, plot=None):
""" Finds the subplot with the corresponding stream. """ |
plot = plot or cb.plot
if isinstance(plot, GeoOverlayPlot):
plots = [get_cb_plot(cb, p) for p in plot.subplots.values()]
plots = [p for p in plots if any(s in cb.streams and getattr(s, '_triggering', False)
for s in p.streams)]
if plots:
plot = plots[0]
return plot |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def skip(cb, msg, attributes):
""" Skips applying transforms if data is not geographic. """ |
if not all(a in msg for a in attributes):
return True
plot = get_cb_plot(cb)
return (not getattr(plot, 'geographic', False) or
not hasattr(plot.current_frame, 'crs')) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def project_ranges(cb, msg, attributes):
""" Projects ranges supplied by a callback. """ |
if skip(cb, msg, attributes):
return msg
plot = get_cb_plot(cb)
x0, x1 = msg.get('x_range', (0, 1000))
y0, y1 = msg.get('y_range', (0, 1000))
extents = x0, y0, x1, y1
x0, y0, x1, y1 = project_extents(extents, plot.projection,
plot.current_frame.crs)
coords = {'x_range': (x0, x1), 'y_range': (y0, y1)}
return {k: v for k, v in coords.items() if k in attributes} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.