repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
pysal/giddy | giddy/rank.py | Tau._calc | python | def _calc(self, x, y):
x = np.array(x)
y = np.array(y)
n = len(y)
perm = list(range(n))
perm.sort(key=lambda a: (x[a], y[a]))
vals = y[perm]
ExtraY = 0
ExtraX = 0
ACount = 0
BCount = 0
CCount = 0
DCount = 0
ECount = 0
DCount = 0
Concordant = 0
Discordant = 0
# ids for left child
li = [None] * (n - 1)
# ids for right child
ri = [None] * (n - 1)
# number of left descendants for a node
ld = np.zeros(n)
# number of values equal to value i
nequal = np.zeros(n)
for i in range(1, n):
NumBefore = 0
NumEqual = 1
root = 0
x0 = x[perm[i - 1]]
y0 = y[perm[i - 1]]
x1 = x[perm[i]]
y1 = y[perm[i]]
if x0 != x1:
DCount = 0
ECount = 1
else:
if y0 == y1:
ECount += 1
else:
DCount += ECount
ECount = 1
root = 0
inserting = True
while inserting:
current = y[perm[i]]
if current > y[perm[root]]:
# right branch
NumBefore += 1 + ld[root] + nequal[root]
if ri[root] is None:
# insert as right child to root
ri[root] = i
inserting = False
else:
root = ri[root]
elif current < y[perm[root]]:
# increment number of left descendants
ld[root] += 1
if li[root] is None:
# insert as left child to root
li[root] = i
inserting = False
else:
root = li[root]
elif current == y[perm[root]]:
NumBefore += ld[root]
NumEqual += nequal[root] + 1
nequal[root] += 1
inserting = False
ACount = NumBefore - DCount
BCount = NumEqual - ECount
CCount = i - (ACount + BCount + DCount + ECount - 1)
ExtraY += DCount
ExtraX += BCount
Concordant += ACount
Discordant += CCount
cd = Concordant + Discordant
num = Concordant - Discordant
tau = num / np.sqrt((cd + ExtraX) * (cd + ExtraY))
v = (4. * n + 10) / (9. * n * (n - 1))
z = tau / np.sqrt(v)
pval = erfc(np.abs(z) / 1.4142136) # follow scipy
return tau, pval, Concordant, Discordant, ExtraX, ExtraY | List based implementation of binary tree algorithm for concordance
measure after :cite:`Christensen2005`. | train | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/rank.py#L171-L261 | null | class Tau:
"""
Kendall's Tau is based on a comparison of the number of pairs of n
observations that have concordant ranks between two variables.
Parameters
----------
x : array
(n, ), first variable.
y : array
(n, ), second variable.
Attributes
----------
tau : float
The classic Tau statistic.
tau_p : float
asymptotic p-value.
Notes
-----
Modification of algorithm suggested by :cite:`Christensen2005`.PySAL/giddy
implementation uses a list based representation of a binary tree for
the accumulation of the concordance measures. Ties are handled by this
implementation (in other words, if there are ties in either x, or y, or
both, the calculation returns Tau_b, if no ties classic Tau is returned.)
Examples
--------
>>> from scipy.stats import kendalltau
>>> from giddy.rank import Tau
>>> x1 = [12, 2, 1, 12, 2]
>>> x2 = [1, 4, 7, 1, 0]
>>> kt = Tau(x1,x2)
>>> kt.tau
-0.47140452079103173
>>> kt.tau_p
0.24821309157521476
>>> tau, p = kendalltau(x1,x2)
>>> tau
-0.4714045207910316
>>> p
0.2827454599327748
"""
def __init__(self, x, y):
res = self._calc(x, y)
self.tau = res[0]
self.tau_p = res[1]
self.concordant = res[2]
self.discordant = res[3]
self.extraX = res[4]
self.extraY = res[5]
|
pysal/giddy | giddy/ergodic.py | steady_state | python | def steady_state(P):
v, d = la.eig(np.transpose(P))
d = np.array(d)
# for a regular P maximum eigenvalue will be 1
mv = max(v)
# find its position
i = v.tolist().index(mv)
row = abs(d[:, i])
# normalize eigenvector corresponding to the eigenvalue 1
return row / sum(row) | Calculates the steady state probability vector for a regular Markov
transition matrix P.
Parameters
----------
P : array
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
: array
(k, ), steady state distribution.
Examples
--------
Taken from :cite:`Kemeny1967`. Land of Oz example where the states are
Rain, Nice and Snow, so there is 25 percent chance that if it
rained in Oz today, it will snow tomorrow, while if it snowed today in
Oz there is a 50 percent chance of snow again tomorrow and a 25
percent chance of a nice day (nice, like when the witch with the monkeys
is melting).
>>> import numpy as np
>>> from giddy.ergodic import steady_state
>>> p=np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> steady_state(p)
array([0.4, 0.2, 0.4])
Thus, the long run distribution for Oz is to have 40 percent of the
days classified as Rain, 20 percent as Nice, and 40 percent as Snow
(states are mutually exclusive). | train | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/ergodic.py#L12-L59 | null | """
Summary measures for ergodic Markov chains
"""
__author__ = "Sergio J. Rey <sjsrey@gmail.com>"
__all__ = ['steady_state', 'fmpt', 'var_fmpt']
import numpy as np
import numpy.linalg as la
def fmpt(P):
"""
Calculates the matrix of first mean passage times for an ergodic transition
probability matrix.
Parameters
----------
P : array
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
M : array
(k, k), elements are the expected value for the number of intervals
required for a chain starting in state i to first enter state j.
If i=j then this is the recurrence time.
Examples
--------
>>> import numpy as np
>>> from giddy.ergodic import fmpt
>>> p=np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> fm=fmpt(p)
>>> fm
array([[2.5 , 4. , 3.33333333],
[2.66666667, 5. , 2.66666667],
[3.33333333, 4. , 2.5 ]])
Thus, if it is raining today in Oz we can expect a nice day to come
along in another 4 days, on average, and snow to hit in 3.33 days. We can
expect another rainy day in 2.5 days. If it is nice today in Oz, we would
experience a change in the weather (either rain or snow) in 2.67 days from
today. (That wicked witch can only die once so I reckon that is the
ultimate absorbing state).
Notes
-----
Uses formulation (and examples on p. 218) in :cite:`Kemeny1967`.
"""
P = np.matrix(P)
k = P.shape[0]
A = np.zeros_like(P)
ss = steady_state(P).reshape(k, 1)
for i in range(k):
A[:, i] = ss
A = A.transpose()
I = np.identity(k)
Z = la.inv(I - P + A)
E = np.ones_like(Z)
A_diag = np.diag(A)
A_diag = A_diag + (A_diag == 0)
D = np.diag(1. / A_diag)
Zdg = np.diag(np.diag(Z))
M = (I - Z + E * Zdg) * D
return np.array(M)
def var_fmpt(P):
"""
Variances of first mean passage times for an ergodic transition
probability matrix.
Parameters
----------
P : array
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
: array
(k, k), elements are the variances for the number of intervals
required for a chain starting in state i to first enter state j.
Examples
--------
>>> import numpy as np
>>> from giddy.ergodic import var_fmpt
>>> p=np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> vfm=var_fmpt(p)
>>> vfm
array([[ 5.58333333, 12. , 6.88888889],
[ 6.22222222, 12. , 6.22222222],
[ 6.88888889, 12. , 5.58333333]])
Notes
-----
Uses formulation (and examples on p. 83) in :cite:`Kemeny1967`.
"""
P = np.matrix(P)
A = P ** 1000
n, k = A.shape
I = np.identity(k)
Z = la.inv(I - P + A)
E = np.ones_like(Z)
D = np.diag(1. / np.diag(A))
Zdg = np.diag(np.diag(Z))
M = (I - Z + E * Zdg) * D
ZM = Z * M
ZMdg = np.diag(np.diag(ZM))
W = M * (2 * Zdg * D - I) + 2 * (ZM - E * ZMdg)
return np.array(W - np.multiply(M, M))
|
pysal/giddy | giddy/ergodic.py | fmpt | python | def fmpt(P):
P = np.matrix(P)
k = P.shape[0]
A = np.zeros_like(P)
ss = steady_state(P).reshape(k, 1)
for i in range(k):
A[:, i] = ss
A = A.transpose()
I = np.identity(k)
Z = la.inv(I - P + A)
E = np.ones_like(Z)
A_diag = np.diag(A)
A_diag = A_diag + (A_diag == 0)
D = np.diag(1. / A_diag)
Zdg = np.diag(np.diag(Z))
M = (I - Z + E * Zdg) * D
return np.array(M) | Calculates the matrix of first mean passage times for an ergodic transition
probability matrix.
Parameters
----------
P : array
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
M : array
(k, k), elements are the expected value for the number of intervals
required for a chain starting in state i to first enter state j.
If i=j then this is the recurrence time.
Examples
--------
>>> import numpy as np
>>> from giddy.ergodic import fmpt
>>> p=np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> fm=fmpt(p)
>>> fm
array([[2.5 , 4. , 3.33333333],
[2.66666667, 5. , 2.66666667],
[3.33333333, 4. , 2.5 ]])
Thus, if it is raining today in Oz we can expect a nice day to come
along in another 4 days, on average, and snow to hit in 3.33 days. We can
expect another rainy day in 2.5 days. If it is nice today in Oz, we would
experience a change in the weather (either rain or snow) in 2.67 days from
today. (That wicked witch can only die once so I reckon that is the
ultimate absorbing state).
Notes
-----
Uses formulation (and examples on p. 218) in :cite:`Kemeny1967`. | train | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/ergodic.py#L62-L118 | [
"def steady_state(P):\n \"\"\"\n Calculates the steady state probability vector for a regular Markov\n transition matrix P.\n\n Parameters\n ----------\n P : array\n (k, k), an ergodic Markov transition probability matrix.\n\n Returns\n -------\n : array\n (k, ), steady state distribution.\n\n Examples\n --------\n Taken from :cite:`Kemeny1967`. Land of Oz example where the states are\n Rain, Nice and Snow, so there is 25 percent chance that if it\n rained in Oz today, it will snow tomorrow, while if it snowed today in\n Oz there is a 50 percent chance of snow again tomorrow and a 25\n percent chance of a nice day (nice, like when the witch with the monkeys\n is melting).\n\n >>> import numpy as np\n >>> from giddy.ergodic import steady_state\n >>> p=np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])\n >>> steady_state(p)\n array([0.4, 0.2, 0.4])\n\n Thus, the long run distribution for Oz is to have 40 percent of the\n days classified as Rain, 20 percent as Nice, and 40 percent as Snow\n (states are mutually exclusive).\n\n \"\"\"\n\n v, d = la.eig(np.transpose(P))\n d = np.array(d)\n\n # for a regular P maximum eigenvalue will be 1\n mv = max(v)\n # find its position\n i = v.tolist().index(mv)\n\n row = abs(d[:, i])\n\n # normalize eigenvector corresponding to the eigenvalue 1\n return row / sum(row)\n"
] | """
Summary measures for ergodic Markov chains
"""
__author__ = "Sergio J. Rey <sjsrey@gmail.com>"
__all__ = ['steady_state', 'fmpt', 'var_fmpt']
import numpy as np
import numpy.linalg as la
def steady_state(P):
"""
Calculates the steady state probability vector for a regular Markov
transition matrix P.
Parameters
----------
P : array
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
: array
(k, ), steady state distribution.
Examples
--------
Taken from :cite:`Kemeny1967`. Land of Oz example where the states are
Rain, Nice and Snow, so there is 25 percent chance that if it
rained in Oz today, it will snow tomorrow, while if it snowed today in
Oz there is a 50 percent chance of snow again tomorrow and a 25
percent chance of a nice day (nice, like when the witch with the monkeys
is melting).
>>> import numpy as np
>>> from giddy.ergodic import steady_state
>>> p=np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> steady_state(p)
array([0.4, 0.2, 0.4])
Thus, the long run distribution for Oz is to have 40 percent of the
days classified as Rain, 20 percent as Nice, and 40 percent as Snow
(states are mutually exclusive).
"""
v, d = la.eig(np.transpose(P))
d = np.array(d)
# for a regular P maximum eigenvalue will be 1
mv = max(v)
# find its position
i = v.tolist().index(mv)
row = abs(d[:, i])
# normalize eigenvector corresponding to the eigenvalue 1
return row / sum(row)
def var_fmpt(P):
"""
Variances of first mean passage times for an ergodic transition
probability matrix.
Parameters
----------
P : array
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
: array
(k, k), elements are the variances for the number of intervals
required for a chain starting in state i to first enter state j.
Examples
--------
>>> import numpy as np
>>> from giddy.ergodic import var_fmpt
>>> p=np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> vfm=var_fmpt(p)
>>> vfm
array([[ 5.58333333, 12. , 6.88888889],
[ 6.22222222, 12. , 6.22222222],
[ 6.88888889, 12. , 5.58333333]])
Notes
-----
Uses formulation (and examples on p. 83) in :cite:`Kemeny1967`.
"""
P = np.matrix(P)
A = P ** 1000
n, k = A.shape
I = np.identity(k)
Z = la.inv(I - P + A)
E = np.ones_like(Z)
D = np.diag(1. / np.diag(A))
Zdg = np.diag(np.diag(Z))
M = (I - Z + E * Zdg) * D
ZM = Z * M
ZMdg = np.diag(np.diag(ZM))
W = M * (2 * Zdg * D - I) + 2 * (ZM - E * ZMdg)
return np.array(W - np.multiply(M, M))
|
pysal/giddy | giddy/ergodic.py | var_fmpt | python | def var_fmpt(P):
P = np.matrix(P)
A = P ** 1000
n, k = A.shape
I = np.identity(k)
Z = la.inv(I - P + A)
E = np.ones_like(Z)
D = np.diag(1. / np.diag(A))
Zdg = np.diag(np.diag(Z))
M = (I - Z + E * Zdg) * D
ZM = Z * M
ZMdg = np.diag(np.diag(ZM))
W = M * (2 * Zdg * D - I) + 2 * (ZM - E * ZMdg)
return np.array(W - np.multiply(M, M)) | Variances of first mean passage times for an ergodic transition
probability matrix.
Parameters
----------
P : array
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
: array
(k, k), elements are the variances for the number of intervals
required for a chain starting in state i to first enter state j.
Examples
--------
>>> import numpy as np
>>> from giddy.ergodic import var_fmpt
>>> p=np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> vfm=var_fmpt(p)
>>> vfm
array([[ 5.58333333, 12. , 6.88888889],
[ 6.22222222, 12. , 6.22222222],
[ 6.88888889, 12. , 5.58333333]])
Notes
-----
Uses formulation (and examples on p. 83) in :cite:`Kemeny1967`. | train | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/ergodic.py#L121-L167 | null | """
Summary measures for ergodic Markov chains
"""
__author__ = "Sergio J. Rey <sjsrey@gmail.com>"
__all__ = ['steady_state', 'fmpt', 'var_fmpt']
import numpy as np
import numpy.linalg as la
def steady_state(P):
"""
Calculates the steady state probability vector for a regular Markov
transition matrix P.
Parameters
----------
P : array
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
: array
(k, ), steady state distribution.
Examples
--------
Taken from :cite:`Kemeny1967`. Land of Oz example where the states are
Rain, Nice and Snow, so there is 25 percent chance that if it
rained in Oz today, it will snow tomorrow, while if it snowed today in
Oz there is a 50 percent chance of snow again tomorrow and a 25
percent chance of a nice day (nice, like when the witch with the monkeys
is melting).
>>> import numpy as np
>>> from giddy.ergodic import steady_state
>>> p=np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> steady_state(p)
array([0.4, 0.2, 0.4])
Thus, the long run distribution for Oz is to have 40 percent of the
days classified as Rain, 20 percent as Nice, and 40 percent as Snow
(states are mutually exclusive).
"""
v, d = la.eig(np.transpose(P))
d = np.array(d)
# for a regular P maximum eigenvalue will be 1
mv = max(v)
# find its position
i = v.tolist().index(mv)
row = abs(d[:, i])
# normalize eigenvector corresponding to the eigenvalue 1
return row / sum(row)
def fmpt(P):
"""
Calculates the matrix of first mean passage times for an ergodic transition
probability matrix.
Parameters
----------
P : array
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
M : array
(k, k), elements are the expected value for the number of intervals
required for a chain starting in state i to first enter state j.
If i=j then this is the recurrence time.
Examples
--------
>>> import numpy as np
>>> from giddy.ergodic import fmpt
>>> p=np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> fm=fmpt(p)
>>> fm
array([[2.5 , 4. , 3.33333333],
[2.66666667, 5. , 2.66666667],
[3.33333333, 4. , 2.5 ]])
Thus, if it is raining today in Oz we can expect a nice day to come
along in another 4 days, on average, and snow to hit in 3.33 days. We can
expect another rainy day in 2.5 days. If it is nice today in Oz, we would
experience a change in the weather (either rain or snow) in 2.67 days from
today. (That wicked witch can only die once so I reckon that is the
ultimate absorbing state).
Notes
-----
Uses formulation (and examples on p. 218) in :cite:`Kemeny1967`.
"""
P = np.matrix(P)
k = P.shape[0]
A = np.zeros_like(P)
ss = steady_state(P).reshape(k, 1)
for i in range(k):
A[:, i] = ss
A = A.transpose()
I = np.identity(k)
Z = la.inv(I - P + A)
E = np.ones_like(Z)
A_diag = np.diag(A)
A_diag = A_diag + (A_diag == 0)
D = np.diag(1. / A_diag)
Zdg = np.diag(np.diag(Z))
M = (I - Z + E * Zdg) * D
return np.array(M)
|
pysal/giddy | giddy/directional.py | Rose.permute | python | def permute(self, permutations=99, alternative='two.sided'):
rY = self.Y.copy()
idxs = np.arange(len(rY))
counts = np.zeros((permutations, len(self.counts)))
for m in range(permutations):
np.random.shuffle(idxs)
res = self._calc(rY[idxs, :], self.w, self.k)
counts[m] = res['counts']
self.counts_perm = counts
self.larger_perm = np.array(
[(counts[:, i] >= self.counts[i]).sum() for i in range(self.k)])
self.smaller_perm = np.array(
[(counts[:, i] <= self.counts[i]).sum() for i in range(self.k)])
self.expected_perm = counts.mean(axis=0)
self.alternative = alternative
# pvalue logic
# if P is the proportion that are as large for a one sided test (larger
# than), then
# p=P.
#
# For a two-tailed test, if P < .5, p = 2 * P, else, p = 2(1-P)
# Source: Rayner, J. C. W., O. Thas, and D. J. Best. 2009. "Appendix B:
# Parametric Bootstrap P-Values." In Smooth Tests of Goodness of Fit,
# 247. John Wiley and Sons.
# Note that the larger and smaller counts would be complements (except
# for the shared equality, for
# a given bin in the circular histogram. So we only need one of them.
# We report two-sided p-values for each bin as the default
# since a priori there could # be different alternatives for each bin
# depending on the problem at hand.
alt = alternative.upper()
if alt == 'TWO.SIDED':
P = (self.larger_perm + 1) / (permutations + 1.)
mask = P < 0.5
self.p = mask * 2 * P + (1 - mask) * 2 * (1 - P)
elif alt == 'POSITIVE':
# NE, SW sectors are higher, NW, SE are lower
POS = _POS8
if self.k == 4:
POS = _POS4
L = (self.larger_perm + 1) / (permutations + 1.)
S = (self.smaller_perm + 1) / (permutations + 1.)
P = POS * L + (1 - POS) * S
self.p = P
elif alt == 'NEGATIVE':
# NE, SW sectors are lower, NW, SE are higher
NEG = _NEG8
if self.k == 4:
NEG = _NEG4
L = (self.larger_perm + 1) / (permutations + 1.)
S = (self.smaller_perm + 1) / (permutations + 1.)
P = NEG * L + (1 - NEG) * S
self.p = P
else:
print(('Bad option for alternative: %s.' % alternative)) | Generate ransom spatial permutations for inference on LISA vectors.
Parameters
----------
permutations : int, optional
Number of random permutations of observations.
alternative : string, optional
Type of alternative to form in generating p-values.
Options are: `two-sided` which tests for difference between observed
counts and those obtained from the permutation distribution;
`positive` which tests the alternative that the focal unit and its
lag move in the same direction over time; `negative` which tests
that the focal unit and its lag move in opposite directions over
the interval. | train | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/directional.py#L227-L300 | [
"def _calc(self, Y, w, k):\n wY = weights.lag_spatial(w, Y)\n dx = Y[:, -1] - Y[:, 0]\n dy = wY[:, -1] - wY[:, 0]\n self.wY = wY\n self.Y = Y\n r = np.sqrt(dx * dx + dy * dy)\n theta = np.arctan2(dy, dx)\n neg = theta < 0.0\n utheta = theta * (1 - neg) + neg * (2 * np.pi + theta)\n counts, bins = np.histogram(utheta, self.cuts)\n results = {}\n results['counts'] = counts\n results['theta'] = theta\n results['bins'] = bins\n results['r'] = r\n results['lag'] = wY\n results['dx'] = dx\n results['dy'] = dy\n return results\n"
] | class Rose(object):
"""
Rose diagram based inference for directional LISAs.
For n units with LISA values at two points in time, the Rose class provides
the LISA vectors, their visualization, and computationally based inference.
Parameters
----------
Y : array (n,2)
Columns correspond to end-point time periods to calculate LISA vectors for n object.
w : PySAL W
Spatial weights object.
k : int
Number of circular sectors in rose diagram.
Attributes
----------
cuts : (k, 1) ndarray
Radian cuts for rose diagram (circular histogram).
counts: (k, 1) ndarray
Number of vectors contained in each sector.
r : (n, 1) ndarray
Vector lengths.
theta : (n,1) ndarray
Signed radians for observed LISA vectors.
If self.permute is called the following attributes are available:
alternative : string
Form of the specified alternative hypothesis ['two-sided'(default) |
'positive' | 'negative']
counts_perm : (permutations, k) ndarray
Counts obtained for each sector for every permutation
expected_perm : (k, 1) ndarray
Average number of counts for each sector taken over all permutations.
p : (k, 1) ndarray
Psuedo p-values for the observed sector counts under the specified alternative.
larger_perm : (k, 1) ndarray
Number of times realized counts are as large as observed sector count.
smaller_perm : (k, 1) ndarray
Number of times realized counts are as small as observed sector count.
"""
def __init__(self, Y, w, k=8):
"""
Calculation of rose diagram for local indicators of spatial
association.
Parameters
----------
Y : (n, 2) ndarray
Variable observed on n spatial units over 2 time periods
w : W
Spatial weights object.
k : int
number of circular sectors in rose diagram (the default is 8).
Notes
-----
Based on :cite:`Rey2011`.
Examples
--------
Constructing data for illustration of directional LISA analytics.
Data is for the 48 lower US states over the period 1969-2009 and
includes per capita income normalized to the national average.
Load comma delimited data file in and convert to a numpy array
>>> import libpysal
>>> from giddy.directional import Rose
>>> import matplotlib.pyplot as plt
>>> file_path = libpysal.examples.get_path("spi_download.csv")
>>> f=open(file_path,'r')
>>> lines=f.readlines()
>>> f.close()
>>> lines=[line.strip().split(",") for line in lines]
>>> names=[line[2] for line in lines[1:-5]]
>>> data=np.array([list(map(int,line[3:])) for line in lines[1:-5]])
Bottom of the file has regional data which we don't need for this
example so we will subset only those records that match a state name
>>> sids=list(range(60))
>>> out=['"United States 3/"',
... '"Alaska 3/"',
... '"District of Columbia"',
... '"Hawaii 3/"',
... '"New England"',
... '"Mideast"',
... '"Great Lakes"',
... '"Plains"',
... '"Southeast"',
... '"Southwest"',
... '"Rocky Mountain"',
... '"Far West 3/"']
>>> snames=[name for name in names if name not in out]
>>> sids=[names.index(name) for name in snames]
>>> states=data[sids,:]
>>> us=data[0]
>>> years=np.arange(1969,2009)
Now we convert state incomes to express them relative to the national
average
>>> rel=states/(us*1.)
Create our contiguity matrix from an external GAL file and row
standardize the resulting weights
>>> gal=libpysal.io.open(libpysal.examples.get_path('states48.gal'))
>>> w=gal.read()
>>> w.transform='r'
Take the first and last year of our income data as the interval to do
the directional directional analysis
>>> Y=rel[:,[0,-1]]
Set the random seed generator which is used in the permutation based
inference for the rose diagram so that we can replicate our example
results
>>> np.random.seed(100)
Call the rose function to construct the directional histogram for the
dynamic LISA statistics. We will use four circular sectors for our
histogram
>>> r4=Rose(Y,w,k=4)
What are the cut-offs for our histogram - in radians
>>> r4.cuts
array([0. , 1.57079633, 3.14159265, 4.71238898, 6.28318531])
How many vectors fell in each sector
>>> r4.counts
array([32, 5, 9, 2])
We can test whether these counts are different than what would be
expected if there was no association between the movement of the
focal unit and its spatial lag.
To do so we call the `permute` method of the object
>>> r4.permute()
and then inspect the `p` attibute:
>>> r4.p
array([0.04, 0. , 0.02, 0. ])
Repeat the exercise but now for 8 rather than 4 sectors
>>> r8 = Rose(Y, w, k=8)
>>> r8.counts
array([19, 13, 3, 2, 7, 2, 1, 1])
>>> r8.permute()
>>> r8.p
array([0.86, 0.08, 0.16, 0. , 0.02, 0.2 , 0.56, 0. ])
The default is a two-sided alternative. There is an option for a
directional alternative reflecting positive co-movement of the focal
series with its spatial lag. In this case the number of vectors in
quadrants I and III should be much larger than expected, while the
counts of vectors falling in quadrants II and IV should be much lower
than expected.
>>> r8.permute(alternative='positive')
>>> r8.p
array([0.51, 0.04, 0.28, 0.02, 0.01, 0.14, 0.57, 0.03])
Finally, there is a second directional alternative for examining the
hypothesis that the focal unit and its lag move in opposite directions.
>>> r8.permute(alternative='negative')
>>> r8.p
array([0.69, 0.99, 0.92, 1. , 1. , 0.97, 0.74, 1. ])
We can call the plot method to visualize directional LISAs as a
rose diagram conditional on the starting relative income:
>>> fig1, _ = r8.plot(attribute=Y[:,0])
>>> plt.show(fig1)
"""
self.Y = Y
self.w = w
self.k = k
self.permtuations = 0
self.sw = 2 * np.pi / self.k
self.cuts = np.arange(0.0, 2 * np.pi + self.sw, self.sw)
observed = self._calc(Y, w, k)
self.theta = observed['theta']
self.bins = observed['bins']
self.counts = observed['counts']
self.r = observed['r']
self.lag = observed['lag']
self._dx = observed['dx']
self._dy = observed['dy']
def _calc(self, Y, w, k):
wY = weights.lag_spatial(w, Y)
dx = Y[:, -1] - Y[:, 0]
dy = wY[:, -1] - wY[:, 0]
self.wY = wY
self.Y = Y
r = np.sqrt(dx * dx + dy * dy)
theta = np.arctan2(dy, dx)
neg = theta < 0.0
utheta = theta * (1 - neg) + neg * (2 * np.pi + theta)
counts, bins = np.histogram(utheta, self.cuts)
results = {}
results['counts'] = counts
results['theta'] = theta
results['bins'] = bins
results['r'] = r
results['lag'] = wY
results['dx'] = dx
results['dy'] = dy
return results
@_requires('splot')
def plot(self, attribute=None, ax=None, **kwargs):
"""
Plot the rose diagram.
Parameters
----------
attribute : (n,) ndarray, optional
Variable to specify colors of the colorbars.
ax : Matplotlib Axes instance, optional
If given, the figure will be created inside this axis.
Default =None. Note, this axis should have a polar projection.
**kwargs : keyword arguments, optional
Keywords used for creating and designing the plot.
Note: 'c' and 'color' cannot be passed when attribute is not None
Returns
-------
fig : Matplotlib Figure instance
Moran scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
"""
from splot.giddy import dynamic_lisa_rose
fig, ax = dynamic_lisa_rose(self, attribute=attribute,
ax=ax, **kwargs)
return fig, ax
def plot_origin(self): # TODO add attribute option to color vectors
"""
Plot vectors of positional transition of LISA values starting
from the same origin.
"""
import matplotlib.cm as cm
import matplotlib.pyplot as plt
ax = plt.subplot(111)
xlim = [self._dx.min(), self._dx.max()]
ylim = [self._dy.min(), self._dy.max()]
for x, y in zip(self._dx, self._dy):
xs = [0, x]
ys = [0, y]
plt.plot(xs, ys, '-b') # TODO change this to scale with attribute
plt.axis('equal')
plt.xlim(xlim)
plt.ylim(ylim)
@_requires('splot')
def plot_vectors(self, arrows=True):
"""
Plot vectors of positional transition of LISA values
within quadrant in scatterplot in a polar plot.
Parameters
----------
ax : Matplotlib Axes instance, optional
If given, the figure will be created inside this axis.
Default =None.
arrows : boolean, optional
If True show arrowheads of vectors. Default =True
**kwargs : keyword arguments, optional
Keywords used for creating and designing the plot.
Note: 'c' and 'color' cannot be passed when attribute is not None
Returns
-------
fig : Matplotlib Figure instance
Moran scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
"""
from splot.giddy import dynamic_lisa_vectors
fig, ax = dynamic_lisa_vectors(self, arrows=arrows)
return fig, ax
|
pysal/giddy | giddy/directional.py | Rose.plot | python | def plot(self, attribute=None, ax=None, **kwargs):
from splot.giddy import dynamic_lisa_rose
fig, ax = dynamic_lisa_rose(self, attribute=attribute,
ax=ax, **kwargs)
return fig, ax | Plot the rose diagram.
Parameters
----------
attribute : (n,) ndarray, optional
Variable to specify colors of the colorbars.
ax : Matplotlib Axes instance, optional
If given, the figure will be created inside this axis.
Default =None. Note, this axis should have a polar projection.
**kwargs : keyword arguments, optional
Keywords used for creating and designing the plot.
Note: 'c' and 'color' cannot be passed when attribute is not None
Returns
-------
fig : Matplotlib Figure instance
Moran scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted | train | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/directional.py#L324-L351 | null | class Rose(object):
"""
Rose diagram based inference for directional LISAs.
For n units with LISA values at two points in time, the Rose class provides
the LISA vectors, their visualization, and computationally based inference.
Parameters
----------
Y : array (n,2)
Columns correspond to end-point time periods to calculate LISA vectors for n object.
w : PySAL W
Spatial weights object.
k : int
Number of circular sectors in rose diagram.
Attributes
----------
cuts : (k, 1) ndarray
Radian cuts for rose diagram (circular histogram).
counts: (k, 1) ndarray
Number of vectors contained in each sector.
r : (n, 1) ndarray
Vector lengths.
theta : (n,1) ndarray
Signed radians for observed LISA vectors.
If self.permute is called the following attributes are available:
alternative : string
Form of the specified alternative hypothesis ['two-sided'(default) |
'positive' | 'negative']
counts_perm : (permutations, k) ndarray
Counts obtained for each sector for every permutation
expected_perm : (k, 1) ndarray
Average number of counts for each sector taken over all permutations.
p : (k, 1) ndarray
Psuedo p-values for the observed sector counts under the specified alternative.
larger_perm : (k, 1) ndarray
Number of times realized counts are as large as observed sector count.
smaller_perm : (k, 1) ndarray
Number of times realized counts are as small as observed sector count.
"""
def __init__(self, Y, w, k=8):
"""
Calculation of rose diagram for local indicators of spatial
association.
Parameters
----------
Y : (n, 2) ndarray
Variable observed on n spatial units over 2 time periods
w : W
Spatial weights object.
k : int
number of circular sectors in rose diagram (the default is 8).
Notes
-----
Based on :cite:`Rey2011`.
Examples
--------
Constructing data for illustration of directional LISA analytics.
Data is for the 48 lower US states over the period 1969-2009 and
includes per capita income normalized to the national average.
Load comma delimited data file in and convert to a numpy array
>>> import libpysal
>>> from giddy.directional import Rose
>>> import matplotlib.pyplot as plt
>>> file_path = libpysal.examples.get_path("spi_download.csv")
>>> f=open(file_path,'r')
>>> lines=f.readlines()
>>> f.close()
>>> lines=[line.strip().split(",") for line in lines]
>>> names=[line[2] for line in lines[1:-5]]
>>> data=np.array([list(map(int,line[3:])) for line in lines[1:-5]])
Bottom of the file has regional data which we don't need for this
example so we will subset only those records that match a state name
>>> sids=list(range(60))
>>> out=['"United States 3/"',
... '"Alaska 3/"',
... '"District of Columbia"',
... '"Hawaii 3/"',
... '"New England"',
... '"Mideast"',
... '"Great Lakes"',
... '"Plains"',
... '"Southeast"',
... '"Southwest"',
... '"Rocky Mountain"',
... '"Far West 3/"']
>>> snames=[name for name in names if name not in out]
>>> sids=[names.index(name) for name in snames]
>>> states=data[sids,:]
>>> us=data[0]
>>> years=np.arange(1969,2009)
Now we convert state incomes to express them relative to the national
average
>>> rel=states/(us*1.)
Create our contiguity matrix from an external GAL file and row
standardize the resulting weights
>>> gal=libpysal.io.open(libpysal.examples.get_path('states48.gal'))
>>> w=gal.read()
>>> w.transform='r'
Take the first and last year of our income data as the interval to do
the directional directional analysis
>>> Y=rel[:,[0,-1]]
Set the random seed generator which is used in the permutation based
inference for the rose diagram so that we can replicate our example
results
>>> np.random.seed(100)
Call the rose function to construct the directional histogram for the
dynamic LISA statistics. We will use four circular sectors for our
histogram
>>> r4=Rose(Y,w,k=4)
What are the cut-offs for our histogram - in radians
>>> r4.cuts
array([0. , 1.57079633, 3.14159265, 4.71238898, 6.28318531])
How many vectors fell in each sector
>>> r4.counts
array([32, 5, 9, 2])
We can test whether these counts are different than what would be
expected if there was no association between the movement of the
focal unit and its spatial lag.
To do so we call the `permute` method of the object
>>> r4.permute()
and then inspect the `p` attibute:
>>> r4.p
array([0.04, 0. , 0.02, 0. ])
Repeat the exercise but now for 8 rather than 4 sectors
>>> r8 = Rose(Y, w, k=8)
>>> r8.counts
array([19, 13, 3, 2, 7, 2, 1, 1])
>>> r8.permute()
>>> r8.p
array([0.86, 0.08, 0.16, 0. , 0.02, 0.2 , 0.56, 0. ])
The default is a two-sided alternative. There is an option for a
directional alternative reflecting positive co-movement of the focal
series with its spatial lag. In this case the number of vectors in
quadrants I and III should be much larger than expected, while the
counts of vectors falling in quadrants II and IV should be much lower
than expected.
>>> r8.permute(alternative='positive')
>>> r8.p
array([0.51, 0.04, 0.28, 0.02, 0.01, 0.14, 0.57, 0.03])
Finally, there is a second directional alternative for examining the
hypothesis that the focal unit and its lag move in opposite directions.
>>> r8.permute(alternative='negative')
>>> r8.p
array([0.69, 0.99, 0.92, 1. , 1. , 0.97, 0.74, 1. ])
We can call the plot method to visualize directional LISAs as a
rose diagram conditional on the starting relative income:
>>> fig1, _ = r8.plot(attribute=Y[:,0])
>>> plt.show(fig1)
"""
self.Y = Y
self.w = w
self.k = k
self.permtuations = 0
self.sw = 2 * np.pi / self.k
self.cuts = np.arange(0.0, 2 * np.pi + self.sw, self.sw)
observed = self._calc(Y, w, k)
self.theta = observed['theta']
self.bins = observed['bins']
self.counts = observed['counts']
self.r = observed['r']
self.lag = observed['lag']
self._dx = observed['dx']
self._dy = observed['dy']
def permute(self, permutations=99, alternative='two.sided'):
"""
Generate ransom spatial permutations for inference on LISA vectors.
Parameters
----------
permutations : int, optional
Number of random permutations of observations.
alternative : string, optional
Type of alternative to form in generating p-values.
Options are: `two-sided` which tests for difference between observed
counts and those obtained from the permutation distribution;
`positive` which tests the alternative that the focal unit and its
lag move in the same direction over time; `negative` which tests
that the focal unit and its lag move in opposite directions over
the interval.
"""
rY = self.Y.copy()
idxs = np.arange(len(rY))
counts = np.zeros((permutations, len(self.counts)))
for m in range(permutations):
np.random.shuffle(idxs)
res = self._calc(rY[idxs, :], self.w, self.k)
counts[m] = res['counts']
self.counts_perm = counts
self.larger_perm = np.array(
[(counts[:, i] >= self.counts[i]).sum() for i in range(self.k)])
self.smaller_perm = np.array(
[(counts[:, i] <= self.counts[i]).sum() for i in range(self.k)])
self.expected_perm = counts.mean(axis=0)
self.alternative = alternative
# pvalue logic
# if P is the proportion that are as large for a one sided test (larger
# than), then
# p=P.
#
# For a two-tailed test, if P < .5, p = 2 * P, else, p = 2(1-P)
# Source: Rayner, J. C. W., O. Thas, and D. J. Best. 2009. "Appendix B:
# Parametric Bootstrap P-Values." In Smooth Tests of Goodness of Fit,
# 247. John Wiley and Sons.
# Note that the larger and smaller counts would be complements (except
# for the shared equality, for
# a given bin in the circular histogram. So we only need one of them.
# We report two-sided p-values for each bin as the default
# since a priori there could # be different alternatives for each bin
# depending on the problem at hand.
alt = alternative.upper()
if alt == 'TWO.SIDED':
P = (self.larger_perm + 1) / (permutations + 1.)
mask = P < 0.5
self.p = mask * 2 * P + (1 - mask) * 2 * (1 - P)
elif alt == 'POSITIVE':
# NE, SW sectors are higher, NW, SE are lower
POS = _POS8
if self.k == 4:
POS = _POS4
L = (self.larger_perm + 1) / (permutations + 1.)
S = (self.smaller_perm + 1) / (permutations + 1.)
P = POS * L + (1 - POS) * S
self.p = P
elif alt == 'NEGATIVE':
# NE, SW sectors are lower, NW, SE are higher
NEG = _NEG8
if self.k == 4:
NEG = _NEG4
L = (self.larger_perm + 1) / (permutations + 1.)
S = (self.smaller_perm + 1) / (permutations + 1.)
P = NEG * L + (1 - NEG) * S
self.p = P
else:
print(('Bad option for alternative: %s.' % alternative))
def _calc(self, Y, w, k):
wY = weights.lag_spatial(w, Y)
dx = Y[:, -1] - Y[:, 0]
dy = wY[:, -1] - wY[:, 0]
self.wY = wY
self.Y = Y
r = np.sqrt(dx * dx + dy * dy)
theta = np.arctan2(dy, dx)
neg = theta < 0.0
utheta = theta * (1 - neg) + neg * (2 * np.pi + theta)
counts, bins = np.histogram(utheta, self.cuts)
results = {}
results['counts'] = counts
results['theta'] = theta
results['bins'] = bins
results['r'] = r
results['lag'] = wY
results['dx'] = dx
results['dy'] = dy
return results
@_requires('splot')
def plot_origin(self): # TODO add attribute option to color vectors
"""
Plot vectors of positional transition of LISA values starting
from the same origin.
"""
import matplotlib.cm as cm
import matplotlib.pyplot as plt
ax = plt.subplot(111)
xlim = [self._dx.min(), self._dx.max()]
ylim = [self._dy.min(), self._dy.max()]
for x, y in zip(self._dx, self._dy):
xs = [0, x]
ys = [0, y]
plt.plot(xs, ys, '-b') # TODO change this to scale with attribute
plt.axis('equal')
plt.xlim(xlim)
plt.ylim(ylim)
@_requires('splot')
def plot_vectors(self, arrows=True):
"""
Plot vectors of positional transition of LISA values
within quadrant in scatterplot in a polar plot.
Parameters
----------
ax : Matplotlib Axes instance, optional
If given, the figure will be created inside this axis.
Default =None.
arrows : boolean, optional
If True show arrowheads of vectors. Default =True
**kwargs : keyword arguments, optional
Keywords used for creating and designing the plot.
Note: 'c' and 'color' cannot be passed when attribute is not None
Returns
-------
fig : Matplotlib Figure instance
Moran scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
"""
from splot.giddy import dynamic_lisa_vectors
fig, ax = dynamic_lisa_vectors(self, arrows=arrows)
return fig, ax
|
pysal/giddy | giddy/directional.py | Rose.plot_origin | python | def plot_origin(self): # TODO add attribute option to color vectors
import matplotlib.cm as cm
import matplotlib.pyplot as plt
ax = plt.subplot(111)
xlim = [self._dx.min(), self._dx.max()]
ylim = [self._dy.min(), self._dy.max()]
for x, y in zip(self._dx, self._dy):
xs = [0, x]
ys = [0, y]
plt.plot(xs, ys, '-b') # TODO change this to scale with attribute
plt.axis('equal')
plt.xlim(xlim)
plt.ylim(ylim) | Plot vectors of positional transition of LISA values starting
from the same origin. | train | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/directional.py#L353-L369 | null | class Rose(object):
"""
Rose diagram based inference for directional LISAs.
For n units with LISA values at two points in time, the Rose class provides
the LISA vectors, their visualization, and computationally based inference.
Parameters
----------
Y : array (n,2)
Columns correspond to end-point time periods to calculate LISA vectors for n object.
w : PySAL W
Spatial weights object.
k : int
Number of circular sectors in rose diagram.
Attributes
----------
cuts : (k, 1) ndarray
Radian cuts for rose diagram (circular histogram).
counts: (k, 1) ndarray
Number of vectors contained in each sector.
r : (n, 1) ndarray
Vector lengths.
theta : (n,1) ndarray
Signed radians for observed LISA vectors.
If self.permute is called the following attributes are available:
alternative : string
Form of the specified alternative hypothesis ['two-sided'(default) |
'positive' | 'negative']
counts_perm : (permutations, k) ndarray
Counts obtained for each sector for every permutation
expected_perm : (k, 1) ndarray
Average number of counts for each sector taken over all permutations.
p : (k, 1) ndarray
Psuedo p-values for the observed sector counts under the specified alternative.
larger_perm : (k, 1) ndarray
Number of times realized counts are as large as observed sector count.
smaller_perm : (k, 1) ndarray
Number of times realized counts are as small as observed sector count.
"""
def __init__(self, Y, w, k=8):
"""
Calculation of rose diagram for local indicators of spatial
association.
Parameters
----------
Y : (n, 2) ndarray
Variable observed on n spatial units over 2 time periods
w : W
Spatial weights object.
k : int
number of circular sectors in rose diagram (the default is 8).
Notes
-----
Based on :cite:`Rey2011`.
Examples
--------
Constructing data for illustration of directional LISA analytics.
Data is for the 48 lower US states over the period 1969-2009 and
includes per capita income normalized to the national average.
Load comma delimited data file in and convert to a numpy array
>>> import libpysal
>>> from giddy.directional import Rose
>>> import matplotlib.pyplot as plt
>>> file_path = libpysal.examples.get_path("spi_download.csv")
>>> f=open(file_path,'r')
>>> lines=f.readlines()
>>> f.close()
>>> lines=[line.strip().split(",") for line in lines]
>>> names=[line[2] for line in lines[1:-5]]
>>> data=np.array([list(map(int,line[3:])) for line in lines[1:-5]])
Bottom of the file has regional data which we don't need for this
example so we will subset only those records that match a state name
>>> sids=list(range(60))
>>> out=['"United States 3/"',
... '"Alaska 3/"',
... '"District of Columbia"',
... '"Hawaii 3/"',
... '"New England"',
... '"Mideast"',
... '"Great Lakes"',
... '"Plains"',
... '"Southeast"',
... '"Southwest"',
... '"Rocky Mountain"',
... '"Far West 3/"']
>>> snames=[name for name in names if name not in out]
>>> sids=[names.index(name) for name in snames]
>>> states=data[sids,:]
>>> us=data[0]
>>> years=np.arange(1969,2009)
Now we convert state incomes to express them relative to the national
average
>>> rel=states/(us*1.)
Create our contiguity matrix from an external GAL file and row
standardize the resulting weights
>>> gal=libpysal.io.open(libpysal.examples.get_path('states48.gal'))
>>> w=gal.read()
>>> w.transform='r'
Take the first and last year of our income data as the interval to do
the directional directional analysis
>>> Y=rel[:,[0,-1]]
Set the random seed generator which is used in the permutation based
inference for the rose diagram so that we can replicate our example
results
>>> np.random.seed(100)
Call the rose function to construct the directional histogram for the
dynamic LISA statistics. We will use four circular sectors for our
histogram
>>> r4=Rose(Y,w,k=4)
What are the cut-offs for our histogram - in radians
>>> r4.cuts
array([0. , 1.57079633, 3.14159265, 4.71238898, 6.28318531])
How many vectors fell in each sector
>>> r4.counts
array([32, 5, 9, 2])
We can test whether these counts are different than what would be
expected if there was no association between the movement of the
focal unit and its spatial lag.
To do so we call the `permute` method of the object
>>> r4.permute()
and then inspect the `p` attibute:
>>> r4.p
array([0.04, 0. , 0.02, 0. ])
Repeat the exercise but now for 8 rather than 4 sectors
>>> r8 = Rose(Y, w, k=8)
>>> r8.counts
array([19, 13, 3, 2, 7, 2, 1, 1])
>>> r8.permute()
>>> r8.p
array([0.86, 0.08, 0.16, 0. , 0.02, 0.2 , 0.56, 0. ])
The default is a two-sided alternative. There is an option for a
directional alternative reflecting positive co-movement of the focal
series with its spatial lag. In this case the number of vectors in
quadrants I and III should be much larger than expected, while the
counts of vectors falling in quadrants II and IV should be much lower
than expected.
>>> r8.permute(alternative='positive')
>>> r8.p
array([0.51, 0.04, 0.28, 0.02, 0.01, 0.14, 0.57, 0.03])
Finally, there is a second directional alternative for examining the
hypothesis that the focal unit and its lag move in opposite directions.
>>> r8.permute(alternative='negative')
>>> r8.p
array([0.69, 0.99, 0.92, 1. , 1. , 0.97, 0.74, 1. ])
We can call the plot method to visualize directional LISAs as a
rose diagram conditional on the starting relative income:
>>> fig1, _ = r8.plot(attribute=Y[:,0])
>>> plt.show(fig1)
"""
self.Y = Y
self.w = w
self.k = k
self.permtuations = 0
self.sw = 2 * np.pi / self.k
self.cuts = np.arange(0.0, 2 * np.pi + self.sw, self.sw)
observed = self._calc(Y, w, k)
self.theta = observed['theta']
self.bins = observed['bins']
self.counts = observed['counts']
self.r = observed['r']
self.lag = observed['lag']
self._dx = observed['dx']
self._dy = observed['dy']
def permute(self, permutations=99, alternative='two.sided'):
"""
Generate ransom spatial permutations for inference on LISA vectors.
Parameters
----------
permutations : int, optional
Number of random permutations of observations.
alternative : string, optional
Type of alternative to form in generating p-values.
Options are: `two-sided` which tests for difference between observed
counts and those obtained from the permutation distribution;
`positive` which tests the alternative that the focal unit and its
lag move in the same direction over time; `negative` which tests
that the focal unit and its lag move in opposite directions over
the interval.
"""
rY = self.Y.copy()
idxs = np.arange(len(rY))
counts = np.zeros((permutations, len(self.counts)))
for m in range(permutations):
np.random.shuffle(idxs)
res = self._calc(rY[idxs, :], self.w, self.k)
counts[m] = res['counts']
self.counts_perm = counts
self.larger_perm = np.array(
[(counts[:, i] >= self.counts[i]).sum() for i in range(self.k)])
self.smaller_perm = np.array(
[(counts[:, i] <= self.counts[i]).sum() for i in range(self.k)])
self.expected_perm = counts.mean(axis=0)
self.alternative = alternative
# pvalue logic
# if P is the proportion that are as large for a one sided test (larger
# than), then
# p=P.
#
# For a two-tailed test, if P < .5, p = 2 * P, else, p = 2(1-P)
# Source: Rayner, J. C. W., O. Thas, and D. J. Best. 2009. "Appendix B:
# Parametric Bootstrap P-Values." In Smooth Tests of Goodness of Fit,
# 247. John Wiley and Sons.
# Note that the larger and smaller counts would be complements (except
# for the shared equality, for
# a given bin in the circular histogram. So we only need one of them.
# We report two-sided p-values for each bin as the default
# since a priori there could # be different alternatives for each bin
# depending on the problem at hand.
alt = alternative.upper()
if alt == 'TWO.SIDED':
P = (self.larger_perm + 1) / (permutations + 1.)
mask = P < 0.5
self.p = mask * 2 * P + (1 - mask) * 2 * (1 - P)
elif alt == 'POSITIVE':
# NE, SW sectors are higher, NW, SE are lower
POS = _POS8
if self.k == 4:
POS = _POS4
L = (self.larger_perm + 1) / (permutations + 1.)
S = (self.smaller_perm + 1) / (permutations + 1.)
P = POS * L + (1 - POS) * S
self.p = P
elif alt == 'NEGATIVE':
# NE, SW sectors are lower, NW, SE are higher
NEG = _NEG8
if self.k == 4:
NEG = _NEG4
L = (self.larger_perm + 1) / (permutations + 1.)
S = (self.smaller_perm + 1) / (permutations + 1.)
P = NEG * L + (1 - NEG) * S
self.p = P
else:
print(('Bad option for alternative: %s.' % alternative))
def _calc(self, Y, w, k):
wY = weights.lag_spatial(w, Y)
dx = Y[:, -1] - Y[:, 0]
dy = wY[:, -1] - wY[:, 0]
self.wY = wY
self.Y = Y
r = np.sqrt(dx * dx + dy * dy)
theta = np.arctan2(dy, dx)
neg = theta < 0.0
utheta = theta * (1 - neg) + neg * (2 * np.pi + theta)
counts, bins = np.histogram(utheta, self.cuts)
results = {}
results['counts'] = counts
results['theta'] = theta
results['bins'] = bins
results['r'] = r
results['lag'] = wY
results['dx'] = dx
results['dy'] = dy
return results
@_requires('splot')
def plot(self, attribute=None, ax=None, **kwargs):
"""
Plot the rose diagram.
Parameters
----------
attribute : (n,) ndarray, optional
Variable to specify colors of the colorbars.
ax : Matplotlib Axes instance, optional
If given, the figure will be created inside this axis.
Default =None. Note, this axis should have a polar projection.
**kwargs : keyword arguments, optional
Keywords used for creating and designing the plot.
Note: 'c' and 'color' cannot be passed when attribute is not None
Returns
-------
fig : Matplotlib Figure instance
Moran scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
"""
from splot.giddy import dynamic_lisa_rose
fig, ax = dynamic_lisa_rose(self, attribute=attribute,
ax=ax, **kwargs)
return fig, ax
@_requires('splot')
def plot_vectors(self, arrows=True):
"""
Plot vectors of positional transition of LISA values
within quadrant in scatterplot in a polar plot.
Parameters
----------
ax : Matplotlib Axes instance, optional
If given, the figure will be created inside this axis.
Default =None.
arrows : boolean, optional
If True show arrowheads of vectors. Default =True
**kwargs : keyword arguments, optional
Keywords used for creating and designing the plot.
Note: 'c' and 'color' cannot be passed when attribute is not None
Returns
-------
fig : Matplotlib Figure instance
Moran scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
"""
from splot.giddy import dynamic_lisa_vectors
fig, ax = dynamic_lisa_vectors(self, arrows=arrows)
return fig, ax
|
pysal/giddy | giddy/directional.py | Rose.plot_vectors | python | def plot_vectors(self, arrows=True):
from splot.giddy import dynamic_lisa_vectors
fig, ax = dynamic_lisa_vectors(self, arrows=arrows)
return fig, ax | Plot vectors of positional transition of LISA values
within quadrant in scatterplot in a polar plot.
Parameters
----------
ax : Matplotlib Axes instance, optional
If given, the figure will be created inside this axis.
Default =None.
arrows : boolean, optional
If True show arrowheads of vectors. Default =True
**kwargs : keyword arguments, optional
Keywords used for creating and designing the plot.
Note: 'c' and 'color' cannot be passed when attribute is not None
Returns
-------
fig : Matplotlib Figure instance
Moran scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted | train | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/directional.py#L372-L400 | null | class Rose(object):
"""
Rose diagram based inference for directional LISAs.
For n units with LISA values at two points in time, the Rose class provides
the LISA vectors, their visualization, and computationally based inference.
Parameters
----------
Y : array (n,2)
Columns correspond to end-point time periods to calculate LISA vectors for n object.
w : PySAL W
Spatial weights object.
k : int
Number of circular sectors in rose diagram.
Attributes
----------
cuts : (k, 1) ndarray
Radian cuts for rose diagram (circular histogram).
counts: (k, 1) ndarray
Number of vectors contained in each sector.
r : (n, 1) ndarray
Vector lengths.
theta : (n,1) ndarray
Signed radians for observed LISA vectors.
If self.permute is called the following attributes are available:
alternative : string
Form of the specified alternative hypothesis ['two-sided'(default) |
'positive' | 'negative']
counts_perm : (permutations, k) ndarray
Counts obtained for each sector for every permutation
expected_perm : (k, 1) ndarray
Average number of counts for each sector taken over all permutations.
p : (k, 1) ndarray
Psuedo p-values for the observed sector counts under the specified alternative.
larger_perm : (k, 1) ndarray
Number of times realized counts are as large as observed sector count.
smaller_perm : (k, 1) ndarray
Number of times realized counts are as small as observed sector count.
"""
def __init__(self, Y, w, k=8):
"""
Calculation of rose diagram for local indicators of spatial
association.
Parameters
----------
Y : (n, 2) ndarray
Variable observed on n spatial units over 2 time periods
w : W
Spatial weights object.
k : int
number of circular sectors in rose diagram (the default is 8).
Notes
-----
Based on :cite:`Rey2011`.
Examples
--------
Constructing data for illustration of directional LISA analytics.
Data is for the 48 lower US states over the period 1969-2009 and
includes per capita income normalized to the national average.
Load comma delimited data file in and convert to a numpy array
>>> import libpysal
>>> from giddy.directional import Rose
>>> import matplotlib.pyplot as plt
>>> file_path = libpysal.examples.get_path("spi_download.csv")
>>> f=open(file_path,'r')
>>> lines=f.readlines()
>>> f.close()
>>> lines=[line.strip().split(",") for line in lines]
>>> names=[line[2] for line in lines[1:-5]]
>>> data=np.array([list(map(int,line[3:])) for line in lines[1:-5]])
Bottom of the file has regional data which we don't need for this
example so we will subset only those records that match a state name
>>> sids=list(range(60))
>>> out=['"United States 3/"',
... '"Alaska 3/"',
... '"District of Columbia"',
... '"Hawaii 3/"',
... '"New England"',
... '"Mideast"',
... '"Great Lakes"',
... '"Plains"',
... '"Southeast"',
... '"Southwest"',
... '"Rocky Mountain"',
... '"Far West 3/"']
>>> snames=[name for name in names if name not in out]
>>> sids=[names.index(name) for name in snames]
>>> states=data[sids,:]
>>> us=data[0]
>>> years=np.arange(1969,2009)
Now we convert state incomes to express them relative to the national
average
>>> rel=states/(us*1.)
Create our contiguity matrix from an external GAL file and row
standardize the resulting weights
>>> gal=libpysal.io.open(libpysal.examples.get_path('states48.gal'))
>>> w=gal.read()
>>> w.transform='r'
Take the first and last year of our income data as the interval to do
the directional directional analysis
>>> Y=rel[:,[0,-1]]
Set the random seed generator which is used in the permutation based
inference for the rose diagram so that we can replicate our example
results
>>> np.random.seed(100)
Call the rose function to construct the directional histogram for the
dynamic LISA statistics. We will use four circular sectors for our
histogram
>>> r4=Rose(Y,w,k=4)
What are the cut-offs for our histogram - in radians
>>> r4.cuts
array([0. , 1.57079633, 3.14159265, 4.71238898, 6.28318531])
How many vectors fell in each sector
>>> r4.counts
array([32, 5, 9, 2])
We can test whether these counts are different than what would be
expected if there was no association between the movement of the
focal unit and its spatial lag.
To do so we call the `permute` method of the object
>>> r4.permute()
and then inspect the `p` attibute:
>>> r4.p
array([0.04, 0. , 0.02, 0. ])
Repeat the exercise but now for 8 rather than 4 sectors
>>> r8 = Rose(Y, w, k=8)
>>> r8.counts
array([19, 13, 3, 2, 7, 2, 1, 1])
>>> r8.permute()
>>> r8.p
array([0.86, 0.08, 0.16, 0. , 0.02, 0.2 , 0.56, 0. ])
The default is a two-sided alternative. There is an option for a
directional alternative reflecting positive co-movement of the focal
series with its spatial lag. In this case the number of vectors in
quadrants I and III should be much larger than expected, while the
counts of vectors falling in quadrants II and IV should be much lower
than expected.
>>> r8.permute(alternative='positive')
>>> r8.p
array([0.51, 0.04, 0.28, 0.02, 0.01, 0.14, 0.57, 0.03])
Finally, there is a second directional alternative for examining the
hypothesis that the focal unit and its lag move in opposite directions.
>>> r8.permute(alternative='negative')
>>> r8.p
array([0.69, 0.99, 0.92, 1. , 1. , 0.97, 0.74, 1. ])
We can call the plot method to visualize directional LISAs as a
rose diagram conditional on the starting relative income:
>>> fig1, _ = r8.plot(attribute=Y[:,0])
>>> plt.show(fig1)
"""
self.Y = Y
self.w = w
self.k = k
self.permtuations = 0
self.sw = 2 * np.pi / self.k
self.cuts = np.arange(0.0, 2 * np.pi + self.sw, self.sw)
observed = self._calc(Y, w, k)
self.theta = observed['theta']
self.bins = observed['bins']
self.counts = observed['counts']
self.r = observed['r']
self.lag = observed['lag']
self._dx = observed['dx']
self._dy = observed['dy']
def permute(self, permutations=99, alternative='two.sided'):
"""
Generate ransom spatial permutations for inference on LISA vectors.
Parameters
----------
permutations : int, optional
Number of random permutations of observations.
alternative : string, optional
Type of alternative to form in generating p-values.
Options are: `two-sided` which tests for difference between observed
counts and those obtained from the permutation distribution;
`positive` which tests the alternative that the focal unit and its
lag move in the same direction over time; `negative` which tests
that the focal unit and its lag move in opposite directions over
the interval.
"""
rY = self.Y.copy()
idxs = np.arange(len(rY))
counts = np.zeros((permutations, len(self.counts)))
for m in range(permutations):
np.random.shuffle(idxs)
res = self._calc(rY[idxs, :], self.w, self.k)
counts[m] = res['counts']
self.counts_perm = counts
self.larger_perm = np.array(
[(counts[:, i] >= self.counts[i]).sum() for i in range(self.k)])
self.smaller_perm = np.array(
[(counts[:, i] <= self.counts[i]).sum() for i in range(self.k)])
self.expected_perm = counts.mean(axis=0)
self.alternative = alternative
# pvalue logic
# if P is the proportion that are as large for a one sided test (larger
# than), then
# p=P.
#
# For a two-tailed test, if P < .5, p = 2 * P, else, p = 2(1-P)
# Source: Rayner, J. C. W., O. Thas, and D. J. Best. 2009. "Appendix B:
# Parametric Bootstrap P-Values." In Smooth Tests of Goodness of Fit,
# 247. John Wiley and Sons.
# Note that the larger and smaller counts would be complements (except
# for the shared equality, for
# a given bin in the circular histogram. So we only need one of them.
# We report two-sided p-values for each bin as the default
# since a priori there could # be different alternatives for each bin
# depending on the problem at hand.
alt = alternative.upper()
if alt == 'TWO.SIDED':
P = (self.larger_perm + 1) / (permutations + 1.)
mask = P < 0.5
self.p = mask * 2 * P + (1 - mask) * 2 * (1 - P)
elif alt == 'POSITIVE':
# NE, SW sectors are higher, NW, SE are lower
POS = _POS8
if self.k == 4:
POS = _POS4
L = (self.larger_perm + 1) / (permutations + 1.)
S = (self.smaller_perm + 1) / (permutations + 1.)
P = POS * L + (1 - POS) * S
self.p = P
elif alt == 'NEGATIVE':
# NE, SW sectors are lower, NW, SE are higher
NEG = _NEG8
if self.k == 4:
NEG = _NEG4
L = (self.larger_perm + 1) / (permutations + 1.)
S = (self.smaller_perm + 1) / (permutations + 1.)
P = NEG * L + (1 - NEG) * S
self.p = P
else:
print(('Bad option for alternative: %s.' % alternative))
def _calc(self, Y, w, k):
wY = weights.lag_spatial(w, Y)
dx = Y[:, -1] - Y[:, 0]
dy = wY[:, -1] - wY[:, 0]
self.wY = wY
self.Y = Y
r = np.sqrt(dx * dx + dy * dy)
theta = np.arctan2(dy, dx)
neg = theta < 0.0
utheta = theta * (1 - neg) + neg * (2 * np.pi + theta)
counts, bins = np.histogram(utheta, self.cuts)
results = {}
results['counts'] = counts
results['theta'] = theta
results['bins'] = bins
results['r'] = r
results['lag'] = wY
results['dx'] = dx
results['dy'] = dy
return results
@_requires('splot')
def plot(self, attribute=None, ax=None, **kwargs):
"""
Plot the rose diagram.
Parameters
----------
attribute : (n,) ndarray, optional
Variable to specify colors of the colorbars.
ax : Matplotlib Axes instance, optional
If given, the figure will be created inside this axis.
Default =None. Note, this axis should have a polar projection.
**kwargs : keyword arguments, optional
Keywords used for creating and designing the plot.
Note: 'c' and 'color' cannot be passed when attribute is not None
Returns
-------
fig : Matplotlib Figure instance
Moran scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
"""
from splot.giddy import dynamic_lisa_rose
fig, ax = dynamic_lisa_rose(self, attribute=attribute,
ax=ax, **kwargs)
return fig, ax
def plot_origin(self): # TODO add attribute option to color vectors
"""
Plot vectors of positional transition of LISA values starting
from the same origin.
"""
import matplotlib.cm as cm
import matplotlib.pyplot as plt
ax = plt.subplot(111)
xlim = [self._dx.min(), self._dx.max()]
ylim = [self._dy.min(), self._dy.max()]
for x, y in zip(self._dx, self._dy):
xs = [0, x]
ys = [0, y]
plt.plot(xs, ys, '-b') # TODO change this to scale with attribute
plt.axis('equal')
plt.xlim(xlim)
plt.ylim(ylim)
@_requires('splot')
|
pysal/giddy | giddy/util.py | shuffle_matrix | python | def shuffle_matrix(X, ids):
np.random.shuffle(ids)
return X[ids, :][:, ids] | Random permutation of rows and columns of a matrix
Parameters
----------
X : array
(k, k), array to be permutated.
ids : array
range (k, ).
Returns
-------
X : array
(k, k) with rows and columns randomly shuffled.
Examples
--------
>>> import numpy as np
>>> from giddy.util import shuffle_matrix
>>> X=np.arange(16)
>>> X.shape=(4,4)
>>> np.random.seed(10)
>>> shuffle_matrix(X,list(range(4)))
array([[10, 8, 11, 9],
[ 2, 0, 3, 1],
[14, 12, 15, 13],
[ 6, 4, 7, 5]]) | train | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/util.py#L9-L40 | null | """
Utilities for the spatial dynamics module.
"""
__all__ = ['shuffle_matrix', 'get_lower']
import numpy as np
def get_lower(matrix):
"""
Flattens the lower part of an n x n matrix into an n*(n-1)/2 x 1 vector.
Parameters
----------
matrix : array
(n, n) numpy array, a distance matrix.
Returns
-------
lowvec : array
numpy array, the lower half of the distance matrix flattened into
a vector of length n*(n-1)/2.
Examples
--------
>>> import numpy as np
>>> from giddy.util import get_lower
>>> test = np.array([[0,1,2,3],[1,0,1,2],[2,1,0,1],[4,2,1,0]])
>>> lower = get_lower(test)
>>> lower
array([[1],
[2],
[1],
[4],
[2],
[1]])
"""
n = matrix.shape[0]
lowerlist = []
for i in range(n):
for j in range(n):
if i > j:
lowerlist.append(matrix[i, j])
veclen = n * (n - 1) / 2
lowvec = np.reshape(np.array(lowerlist), (int(veclen), 1))
return lowvec
|
pysal/giddy | giddy/util.py | get_lower | python | def get_lower(matrix):
n = matrix.shape[0]
lowerlist = []
for i in range(n):
for j in range(n):
if i > j:
lowerlist.append(matrix[i, j])
veclen = n * (n - 1) / 2
lowvec = np.reshape(np.array(lowerlist), (int(veclen), 1))
return lowvec | Flattens the lower part of an n x n matrix into an n*(n-1)/2 x 1 vector.
Parameters
----------
matrix : array
(n, n) numpy array, a distance matrix.
Returns
-------
lowvec : array
numpy array, the lower half of the distance matrix flattened into
a vector of length n*(n-1)/2.
Examples
--------
>>> import numpy as np
>>> from giddy.util import get_lower
>>> test = np.array([[0,1,2,3],[1,0,1,2],[2,1,0,1],[4,2,1,0]])
>>> lower = get_lower(test)
>>> lower
array([[1],
[2],
[1],
[4],
[2],
[1]]) | train | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/util.py#L43-L81 | null | """
Utilities for the spatial dynamics module.
"""
__all__ = ['shuffle_matrix', 'get_lower']
import numpy as np
def shuffle_matrix(X, ids):
"""
Random permutation of rows and columns of a matrix
Parameters
----------
X : array
(k, k), array to be permutated.
ids : array
range (k, ).
Returns
-------
X : array
(k, k) with rows and columns randomly shuffled.
Examples
--------
>>> import numpy as np
>>> from giddy.util import shuffle_matrix
>>> X=np.arange(16)
>>> X.shape=(4,4)
>>> np.random.seed(10)
>>> shuffle_matrix(X,list(range(4)))
array([[10, 8, 11, 9],
[ 2, 0, 3, 1],
[14, 12, 15, 13],
[ 6, 4, 7, 5]])
"""
np.random.shuffle(ids)
return X[ids, :][:, ids]
|
pysal/giddy | giddy/mobility.py | markov_mobility | python | def markov_mobility(p, measure="P", ini=None):
p = np.array(p)
k = p.shape[1]
if measure == "P":
t = np.trace(p)
mobi = (k - t) / (k - 1)
elif measure == "D":
mobi = 1 - abs(la.det(p))
elif measure == "L2":
w, v = la.eig(p)
eigen_value_abs = abs(w)
mobi = 1 - np.sort(eigen_value_abs)[-2]
elif measure == "B1":
if ini is None:
ini = 1.0 / k * np.ones(k)
mobi = (k - k * np.sum(ini * np.diag(p))) / (k - 1)
elif measure == "B2":
mobi = 0
if ini is None:
ini = 1.0 / k * np.ones(k)
for i in range(k):
for j in range(k):
mobi = mobi + ini[i] * p[i, j] * abs(i - j)
mobi = mobi / (k - 1)
return mobi | Markov-based mobility index.
Parameters
----------
p : array
(k, k), Markov transition probability matrix.
measure : string
If measure= "P",
:math:`M_{P} = \\frac{m-\sum_{i=1}^m P_{ii}}{m-1}`;
if measure = "D",
:math:`M_{D} = 1 - |\det(P)|`,
where :math:`\det(P)` is the determinant of :math:`P`;
if measure = "L2",
:math:`M_{L2} = 1 - |\lambda_2|`,
where :math:`\lambda_2` is the second largest eigenvalue of
:math:`P`;
if measure = "B1",
:math:`M_{B1} = \\frac{m-m \sum_{i=1}^m \pi_i P_{ii}}{m-1}`,
where :math:`\pi` is the initial income distribution;
if measure == "B2",
:math:`M_{B2} = \\frac{1}{m-1} \sum_{i=1}^m \sum_{
j=1}^m \pi_i P_{ij} |i-j|`,
where :math:`\pi` is the initial income distribution.
ini : array
(k,), initial distribution. Need to be specified if
measure = "B1" or "B2". If not,
the initial distribution would be treated as a uniform
distribution.
Returns
-------
mobi : float
Mobility value.
Notes
-----
The mobility indices are based on :cite:`Formby:2004fk`.
Examples
--------
>>> import numpy as np
>>> import libpysal
>>> import mapclassify as mc
>>> from giddy.markov import Markov
>>> from giddy.mobility import markov_mobility
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
>>> q5 = np.array([mc.Quantiles(y).yb for y in pci]).transpose()
>>> m = Markov(q5)
>>> m.p
array([[0.91011236, 0.0886392 , 0.00124844, 0. , 0. ],
[0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ],
[0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ],
[0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443],
[0. , 0. , 0.00125156, 0.07133917, 0.92740926]])
(1) Estimate Shorrock1 mobility index:
>>> mobi_1 = markov_mobility(m.p, measure="P")
>>> print("{:.5f}".format(mobi_1))
0.19759
(2) Estimate Shorrock2 mobility index:
>>> mobi_2 = markov_mobility(m.p, measure="D")
>>> print("{:.5f}".format(mobi_2))
0.60685
(3) Estimate Sommers and Conlisk mobility index:
>>> mobi_3 = markov_mobility(m.p, measure="L2")
>>> print("{:.5f}".format(mobi_3))
0.03978
(4) Estimate Bartholomew1 mobility index (note that the initial
distribution should be given):
>>> ini = np.array([0.1,0.2,0.2,0.4,0.1])
>>> mobi_4 = markov_mobility(m.p, measure = "B1", ini=ini)
>>> print("{:.5f}".format(mobi_4))
0.22777
(5) Estimate Bartholomew2 mobility index (note that the initial
distribution should be given):
>>> ini = np.array([0.1,0.2,0.2,0.4,0.1])
>>> mobi_5 = markov_mobility(m.p, measure = "B2", ini=ini)
>>> print("{:.5f}".format(mobi_5))
0.04637 | train | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/mobility.py#L13-L131 | null | """
Income mobility measures.
"""
__author__ = "Wei Kang <weikang9009@gmail.com>, Sergio J. Rey <sjsrey@gmail.com>"
__all__ = ["markov_mobility"]
import numpy as np
import numpy.linalg as la
|
pysal/giddy | giddy/markov.py | chi2 | python | def chi2(T1, T2):
rs2 = T2.sum(axis=1)
rs1 = T1.sum(axis=1)
rs2nz = rs2 > 0
rs1nz = rs1 > 0
dof1 = sum(rs1nz)
dof2 = sum(rs2nz)
rs2 = rs2 + (rs2 == 0)
dof = (dof1 - 1) * (dof2 - 1)
p = np.diag(1 / rs2) * np.matrix(T2)
E = np.diag(rs1) * np.matrix(p)
num = T1 - E
num = np.multiply(num, num)
E = E + (E == 0)
chi2 = num / E
chi2 = chi2.sum()
pvalue = 1 - stats.chi2.cdf(chi2, dof)
return chi2, pvalue, dof | chi-squared test of difference between two transition matrices.
Parameters
----------
T1 : array
(k, k), matrix of transitions (counts).
T2 : array
(k, k), matrix of transitions (counts) to use to form the
probabilities under the null.
Returns
-------
: tuple
(3 elements).
(chi2 value, pvalue, degrees of freedom).
Examples
--------
>>> import libpysal
>>> from giddy.markov import Spatial_Markov, chi2
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> w.transform='r'
>>> sm = Spatial_Markov(rpci, w, fixed=True)
>>> T1 = sm.T[0]
>>> T1
array([[562., 22., 1., 0.],
[ 12., 201., 22., 0.],
[ 0., 17., 97., 4.],
[ 0., 0., 3., 19.]])
>>> T2 = sm.transitions
>>> T2
array([[884., 77., 4., 0.],
[ 68., 794., 87., 3.],
[ 1., 92., 815., 51.],
[ 1., 0., 60., 903.]])
>>> chi2(T1,T2)
(23.39728441473295, 0.005363116704861337, 9)
Notes
-----
Second matrix is used to form the probabilities under the null.
Marginal sums from first matrix are distributed across these probabilities
under the null. In other words the observed transitions are taken from T1
while the expected transitions are formed as follows
.. math::
E_{i,j} = \sum_j T1_{i,j} * T2_{i,j}/\sum_j T2_{i,j}
Degrees of freedom corrected for any rows in either T1 or T2 that have
zero total transitions. | train | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/markov.py#L858-L932 | null | """
Markov based methods for spatial dynamics.
"""
__author__ = "Sergio J. Rey <sjsrey@gmail.com>, Wei Kang <weikang9009@gmail.com>"
__all__ = ["Markov", "LISA_Markov", "Spatial_Markov", "kullback",
"prais", "homogeneity", "FullRank_Markov", "sojourn_time",
"GeoRank_Markov"]
import numpy as np
from .ergodic import fmpt
from .ergodic import steady_state as STEADY_STATE
from .components import Graph
from scipy import stats
from scipy.stats import rankdata
from operator import gt
from libpysal import weights
from esda.moran import Moran_Local
import mapclassify as mc
import itertools
# TT predefine LISA transitions
# TT[i,j] is the transition type from i to j
# i = quadrant in period 0
# j = quadrant in period 1
# uses one offset so first row and col of TT are ignored
TT = np.zeros((5, 5), int)
c = 1
for i in range(1, 5):
for j in range(1, 5):
TT[i, j] = c
c += 1
# MOVE_TYPES is a dictionary that returns the move type of a LISA transition
# filtered on the significance of the LISA end points
# True indicates significant LISA in a particular period
# e.g. a key of (1, 3, True, False) indicates a significant LISA located in
# quadrant 1 in period 0 moved to quadrant 3 in period 1 but was not
# significant in quadrant 3.
MOVE_TYPES = {}
c = 1
cases = (True, False)
sig_keys = [(i, j) for i in cases for j in cases]
for i, sig_key in enumerate(sig_keys):
c = 1 + i * 16
for i in range(1, 5):
for j in range(1, 5):
key = (i, j, sig_key[0], sig_key[1])
MOVE_TYPES[key] = c
c += 1
class Markov(object):
"""
Classic Markov transition matrices.
Parameters
----------
class_ids : array
(n, t), one row per observation, one column recording the
state of each observation, with as many columns as time
periods.
classes : array
(k, 1), all different classes (bins) of the matrix.
Attributes
----------
p : array
(k, k), transition probability matrix.
steady_state : array
(k, ), ergodic distribution.
transitions : array
(k, k), count of transitions between each state i and j.
Examples
--------
>>> import numpy as np
>>> from giddy.markov import Markov
>>> c = [['b','a','c'],['c','c','a'],['c','b','c']]
>>> c.extend([['a','a','b'], ['a','b','c']])
>>> c = np.array(c)
>>> m = Markov(c)
>>> m.classes.tolist()
['a', 'b', 'c']
>>> m.p
array([[0.25 , 0.5 , 0.25 ],
[0.33333333, 0. , 0.66666667],
[0.33333333, 0.33333333, 0.33333333]])
>>> m.steady_state
array([0.30769231, 0.28846154, 0.40384615])
US nominal per capita income 48 states 81 years 1929-2009
>>> import libpysal
>>> import mapclassify as mc
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
set classes to quintiles for each year
>>> q5 = np.array([mc.Quantiles(y).yb for y in pci]).transpose()
>>> m = Markov(q5)
>>> m.transitions
array([[729., 71., 1., 0., 0.],
[ 72., 567., 80., 3., 0.],
[ 0., 81., 631., 86., 2.],
[ 0., 3., 86., 573., 56.],
[ 0., 0., 1., 57., 741.]])
>>> m.p
array([[0.91011236, 0.0886392 , 0.00124844, 0. , 0. ],
[0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ],
[0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ],
[0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443],
[0. , 0. , 0.00125156, 0.07133917, 0.92740926]])
>>> m.steady_state
array([0.20774716, 0.18725774, 0.20740537, 0.18821787, 0.20937187])
Relative incomes
>>> pci = pci.transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> rq = mc.Quantiles(rpci.flatten()).yb.reshape(pci.shape)
>>> mq = Markov(rq)
>>> mq.transitions
array([[707., 58., 7., 1., 0.],
[ 50., 629., 80., 1., 1.],
[ 4., 79., 610., 73., 2.],
[ 0., 7., 72., 650., 37.],
[ 0., 0., 0., 48., 724.]])
>>> mq.steady_state
array([0.17957376, 0.21631443, 0.21499942, 0.21134662, 0.17776576])
"""
def __init__(self, class_ids, classes=None):
if classes is not None:
self.classes = classes
else:
self.classes = np.unique(class_ids)
n, t = class_ids.shape
k = len(self.classes)
js = list(range(t - 1))
classIds = self.classes.tolist()
transitions = np.zeros((k, k))
for state_0 in js:
state_1 = state_0 + 1
state_0 = class_ids[:, state_0]
state_1 = class_ids[:, state_1]
initial = np.unique(state_0)
for i in initial:
ending = state_1[state_0 == i]
uending = np.unique(ending)
row = classIds.index(i)
for j in uending:
col = classIds.index(j)
transitions[row, col] += sum(ending == j)
self.transitions = transitions
row_sum = transitions.sum(axis=1)
self.p = np.dot(np.diag(1 / (row_sum + (row_sum == 0))), transitions)
@property
def steady_state(self):
if not hasattr(self, '_steady_state'):
self._steady_state = STEADY_STATE(self.p)
return self._steady_state
class Spatial_Markov(object):
"""
Markov transitions conditioned on the value of the spatial lag.
Parameters
----------
y : array
(n, t), one row per observation, one column per state of
each observation, with as many columns as time periods.
w : W
spatial weights object.
k : integer, optional
number of classes (quantiles) for input time series y.
Default is 4. If discrete=True, k is determined
endogenously.
m : integer, optional
number of classes (quantiles) for the spatial lags of
regional time series. Default is 4. If discrete=True,
m is determined endogenously.
permutations : int, optional
number of permutations for use in randomization based
inference (the default is 0).
fixed : bool, optional
If true, discretization are taken over the entire n*t
pooled series and cutoffs can be user-defined. If
cutoffs and lag_cutoffs are not given, quantiles are
used. If false, quantiles are taken each time period
over n. Default is True.
discrete : bool, optional
If true, categorical spatial lags which are most common
categories of neighboring observations serve as the
conditioning and fixed is ignored; if false, weighted
averages of neighboring observations are used. Default is
false.
cutoffs : array, optional
users can specify the discretization cutoffs for
continuous time series. Default is None, meaning that
quantiles will be used for the discretization.
lag_cutoffs : array, optional
users can specify the discretization cutoffs for the
spatial lags of continuous time series. Default is
None, meaning that quantiles will be used for the
discretization.
variable_name : string
name of variable.
Attributes
----------
class_ids : array
(n, t), discretized series if y is continuous. Otherwise
it is identical to y.
classes : array
(k, 1), all different classes (bins).
lclass_ids : array
(n, t), spatial lag series.
lclasses : array
(k, 1), all different classes (bins) for
spatial lags.
p : array
(k, k), transition probability matrix for a-spatial
Markov.
s : array
(k, 1), ergodic distribution for a-spatial Markov.
transitions : array
(k, k), counts of transitions between each state i and j
for a-spatial Markov.
T : array
(k, k, k), counts of transitions for each conditional
Markov. T[0] is the matrix of transitions for
observations with lags in the 0th quantile; T[k-1] is the
transitions for the observations with lags in the k-1th.
P : array
(k, k, k), transition probability matrix for spatial
Markov first dimension is the conditioned on the lag.
S : array
(k, k), steady state distributions for spatial Markov.
Each row is a conditional steady_state.
F : array
(k, k, k),first mean passage times.
First dimension is conditioned on the lag.
shtest : list
(k elements), each element of the list is a tuple for a
multinomial difference test between the steady state
distribution from a conditional distribution versus the
overall steady state distribution: first element of the
tuple is the chi2 value, second its p-value and the third
the degrees of freedom.
chi2 : list
(k elements), each element of the list is a tuple for a
chi-squared test of the difference between the
conditional transition matrix against the overall
transition matrix: first element of the tuple is the chi2
value, second its p-value and the third the degrees of
freedom.
x2 : float
sum of the chi2 values for each of the conditional tests.
Has an asymptotic chi2 distribution with k(k-1)(k-1)
degrees of freedom. Under the null that transition
probabilities are spatially homogeneous.
(see chi2 above)
x2_dof : int
degrees of freedom for homogeneity test.
x2_pvalue : float
pvalue for homogeneity test based on analytic.
distribution
x2_rpvalue : float
(if permutations>0)
pseudo p-value for x2 based on random spatial
permutations of the rows of the original transitions.
x2_realizations : array
(permutations,1), the values of x2 for the random
permutations.
Q : float
Chi-square test of homogeneity across lag classes based
on :cite:`Bickenbach2003`.
Q_p_value : float
p-value for Q.
LR : float
Likelihood ratio statistic for homogeneity across lag
classes based on :cite:`Bickenbach2003`.
LR_p_value : float
p-value for LR.
dof_hom : int
degrees of freedom for LR and Q, corrected for 0 cells.
Notes
-----
Based on :cite:`Rey2001`.
The shtest and chi2 tests should be used with caution as they are based on
classic theory assuming random transitions. The x2 based test is
preferable since it simulates the randomness under the null. It is an
experimental test requiring further analysis.
Examples
--------
>>> import libpysal
>>> from giddy.markov import Spatial_Markov
>>> import numpy as np
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
>>> pci = pci.transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> w.transform = 'r'
Now we create a `Spatial_Markov` instance for the continuous relative per
capita income time series for 48 US lower states 1929-2009. The current
implementation allows users to classify the continuous incomes in a more
flexible way.
(1) Global quintiles to discretize the income data (k=5), and global
quintiles to discretize the spatial lags of incomes (m=5).
>>> sm = Spatial_Markov(rpci, w, fixed=True, k=5, m=5, variable_name='rpci')
We can examine the cutoffs for the incomes and cutoffs for the spatial lags
>>> sm.cutoffs
array([0.83999133, 0.94707545, 1.03242697, 1.14911154])
>>> sm.lag_cutoffs
array([0.88973386, 0.95891917, 1.01469758, 1.1183566 ])
Obviously, they are slightly different.
We now look at the estimated spatially lag conditioned transition
probability matrices.
>>> for p in sm.P:
... print(p)
[[0.96341463 0.0304878 0.00609756 0. 0. ]
[0.06040268 0.83221477 0.10738255 0. 0. ]
[0. 0.14 0.74 0.12 0. ]
[0. 0.03571429 0.32142857 0.57142857 0.07142857]
[0. 0. 0. 0.16666667 0.83333333]]
[[0.79831933 0.16806723 0.03361345 0. 0. ]
[0.0754717 0.88207547 0.04245283 0. 0. ]
[0.00537634 0.06989247 0.8655914 0.05913978 0. ]
[0. 0. 0.06372549 0.90196078 0.03431373]
[0. 0. 0. 0.19444444 0.80555556]]
[[0.84693878 0.15306122 0. 0. 0. ]
[0.08133971 0.78947368 0.1291866 0. 0. ]
[0.00518135 0.0984456 0.79274611 0.0984456 0.00518135]
[0. 0. 0.09411765 0.87058824 0.03529412]
[0. 0. 0. 0.10204082 0.89795918]]
[[0.8852459 0.09836066 0. 0.01639344 0. ]
[0.03875969 0.81395349 0.13953488 0. 0.00775194]
[0.0049505 0.09405941 0.77722772 0.11881188 0.0049505 ]
[0. 0.02339181 0.12865497 0.75438596 0.09356725]
[0. 0. 0. 0.09661836 0.90338164]]
[[0.33333333 0.66666667 0. 0. 0. ]
[0.0483871 0.77419355 0.16129032 0.01612903 0. ]
[0.01149425 0.16091954 0.74712644 0.08045977 0. ]
[0. 0.01036269 0.06217617 0.89637306 0.03108808]
[0. 0. 0. 0.02352941 0.97647059]]
The probability of a poor state remaining poor is 0.963 if their
neighbors are in the 1st quintile and 0.798 if their neighbors are
in the 2nd quintile. The probability of a rich economy remaining
rich is 0.976 if their neighbors are in the 5th quintile, but if their
neighbors are in the 4th quintile this drops to 0.903.
The global transition probability matrix is estimated:
>>> print(sm.p)
[[0.91461837 0.07503234 0.00905563 0.00129366 0. ]
[0.06570302 0.82654402 0.10512484 0.00131406 0.00131406]
[0.00520833 0.10286458 0.79427083 0.09505208 0.00260417]
[0. 0.00913838 0.09399478 0.84856397 0.04830287]
[0. 0. 0. 0.06217617 0.93782383]]
The Q and likelihood ratio statistics are both significant indicating
the dynamics are not homogeneous across the lag classes:
>>> "%.3f"%sm.LR
'170.659'
>>> "%.3f"%sm.Q
'200.624'
>>> "%.3f"%sm.LR_p_value
'0.000'
>>> "%.3f"%sm.Q_p_value
'0.000'
>>> sm.dof_hom
60
The long run distribution for states with poor (rich) neighbors has
0.435 (0.018) of the values in the first quintile, 0.263 (0.200) in
the second quintile, 0.204 (0.190) in the third, 0.0684 (0.255) in the
fourth and 0.029 (0.337) in the fifth quintile.
>>> sm.S
array([[0.43509425, 0.2635327 , 0.20363044, 0.06841983, 0.02932278],
[0.13391287, 0.33993305, 0.25153036, 0.23343016, 0.04119356],
[0.12124869, 0.21137444, 0.2635101 , 0.29013417, 0.1137326 ],
[0.0776413 , 0.19748806, 0.25352636, 0.22480415, 0.24654013],
[0.01776781, 0.19964349, 0.19009833, 0.25524697, 0.3372434 ]])
States with incomes in the first quintile with neighbors in the
first quintile return to the first quartile after 2.298 years, after
leaving the first quintile. They enter the fourth quintile after
80.810 years after leaving the first quintile, on average.
Poor states within neighbors in the fourth quintile return to the
first quintile, on average, after 12.88 years, and would enter the
fourth quintile after 28.473 years.
>>> for f in sm.F:
... print(f)
...
[[ 2.29835259 28.95614035 46.14285714 80.80952381 279.42857143]
[ 33.86549708 3.79459555 22.57142857 57.23809524 255.85714286]
[ 43.60233918 9.73684211 4.91085714 34.66666667 233.28571429]
[ 46.62865497 12.76315789 6.25714286 14.61564626 198.61904762]
[ 52.62865497 18.76315789 12.25714286 6. 34.1031746 ]]
[[ 7.46754205 9.70574606 25.76785714 74.53116883 194.23446197]
[ 27.76691978 2.94175577 24.97142857 73.73474026 193.4380334 ]
[ 53.57477715 28.48447637 3.97566318 48.76331169 168.46660482]
[ 72.03631562 46.94601483 18.46153846 4.28393653 119.70329314]
[ 77.17917276 52.08887197 23.6043956 5.14285714 24.27564033]]
[[ 8.24751154 6.53333333 18.38765432 40.70864198 112.76732026]
[ 47.35040872 4.73094099 11.85432099 34.17530864 106.23398693]
[ 69.42288828 24.76666667 3.794921 22.32098765 94.37966594]
[ 83.72288828 39.06666667 14.3 3.44668119 76.36702977]
[ 93.52288828 48.86666667 24.1 9.8 8.79255406]]
[[ 12.87974382 13.34847151 19.83446328 28.47257282 55.82395142]
[ 99.46114206 5.06359731 10.54545198 23.05133495 49.68944423]
[117.76777159 23.03735526 3.94436301 15.0843986 43.57927247]
[127.89752089 32.4393006 14.56853107 4.44831643 31.63099455]
[138.24752089 42.7893006 24.91853107 10.35 4.05613474]]
[[ 56.2815534 1.5 10.57236842 27.02173913 110.54347826]
[ 82.9223301 5.00892857 9.07236842 25.52173913 109.04347826]
[ 97.17718447 19.53125 5.26043557 21.42391304 104.94565217]
[127.1407767 48.74107143 33.29605263 3.91777427 83.52173913]
[169.6407767 91.24107143 75.79605263 42.5 2.96521739]]
(2) Global quintiles to discretize the income data (k=5), and global
quartiles to discretize the spatial lags of incomes (m=4).
>>> sm = Spatial_Markov(rpci, w, fixed=True, k=5, m=4, variable_name='rpci')
We can also examine the cutoffs for the incomes and cutoffs for the spatial
lags:
>>> sm.cutoffs
array([0.83999133, 0.94707545, 1.03242697, 1.14911154])
>>> sm.lag_cutoffs
array([0.91440247, 0.98583079, 1.08698351])
We now look at the estimated spatially lag conditioned transition
probability matrices.
>>> for p in sm.P:
... print(p)
[[0.95708955 0.03544776 0.00746269 0. 0. ]
[0.05825243 0.83980583 0.10194175 0. 0. ]
[0. 0.1294964 0.76258993 0.10791367 0. ]
[0. 0.01538462 0.18461538 0.72307692 0.07692308]
[0. 0. 0. 0.14285714 0.85714286]]
[[0.7421875 0.234375 0.0234375 0. 0. ]
[0.08550186 0.85130112 0.06319703 0. 0. ]
[0.00865801 0.06926407 0.86147186 0.05627706 0.004329 ]
[0. 0. 0.05363985 0.92337165 0.02298851]
[0. 0. 0. 0.13432836 0.86567164]]
[[0.95145631 0.04854369 0. 0. 0. ]
[0.06 0.79 0.145 0. 0.005 ]
[0.00358423 0.10394265 0.7921147 0.09677419 0.00358423]
[0. 0.01630435 0.13586957 0.75543478 0.0923913 ]
[0. 0. 0. 0.10204082 0.89795918]]
[[0.16666667 0.66666667 0. 0.16666667 0. ]
[0.03488372 0.80232558 0.15116279 0.01162791 0. ]
[0.00840336 0.13445378 0.70588235 0.1512605 0. ]
[0. 0.01171875 0.08203125 0.87109375 0.03515625]
[0. 0. 0. 0.03434343 0.96565657]]
We now obtain 4 5*5 spatial lag conditioned transition probability
matrices instead of 5 as in case (1).
The Q and likelihood ratio statistics are still both significant.
>>> "%.3f"%sm.LR
'172.105'
>>> "%.3f"%sm.Q
'321.128'
>>> "%.3f"%sm.LR_p_value
'0.000'
>>> "%.3f"%sm.Q_p_value
'0.000'
>>> sm.dof_hom
45
(3) We can also set the cutoffs for relative incomes and their
spatial lags manually.
For example, we want the defining cutoffs to be [0.8, 0.9, 1, 1.2],
meaning that relative incomes:
2.1 smaller than 0.8 : class 0
2.2 between 0.8 and 0.9: class 1
2.3 between 0.9 and 1.0 : class 2
2.4 between 1.0 and 1.2: class 3
2.5 larger than 1.2: class 4
>>> cc = np.array([0.8, 0.9, 1, 1.2])
>>> sm = Spatial_Markov(rpci, w, cutoffs=cc, lag_cutoffs=cc, variable_name='rpci')
>>> sm.cutoffs
array([0.8, 0.9, 1. , 1.2])
>>> sm.k
5
>>> sm.lag_cutoffs
array([0.8, 0.9, 1. , 1.2])
>>> sm.m
5
>>> for p in sm.P:
... print(p)
[[0.96703297 0.03296703 0. 0. 0. ]
[0.10638298 0.68085106 0.21276596 0. 0. ]
[0. 0.14285714 0.7755102 0.08163265 0. ]
[0. 0. 0.5 0.5 0. ]
[0. 0. 0. 0. 0. ]]
[[0.88636364 0.10606061 0.00757576 0. 0. ]
[0.04402516 0.89308176 0.06289308 0. 0. ]
[0. 0.05882353 0.8627451 0.07843137 0. ]
[0. 0. 0.13846154 0.86153846 0. ]
[0. 0. 0. 0. 1. ]]
[[0.78082192 0.17808219 0.02739726 0.01369863 0. ]
[0.03488372 0.90406977 0.05813953 0.00290698 0. ]
[0. 0.05919003 0.84735202 0.09034268 0.00311526]
[0. 0. 0.05811623 0.92985972 0.01202405]
[0. 0. 0. 0.14285714 0.85714286]]
[[0.82692308 0.15384615 0. 0.01923077 0. ]
[0.0703125 0.7890625 0.125 0.015625 0. ]
[0.00295858 0.06213018 0.82248521 0.10946746 0.00295858]
[0. 0.00185529 0.07606679 0.88497217 0.03710575]
[0. 0. 0. 0.07803468 0.92196532]]
[[0. 0. 0. 0. 0. ]
[0. 0. 0. 0. 0. ]
[0. 0.06666667 0.9 0.03333333 0. ]
[0. 0. 0.05660377 0.90566038 0.03773585]
[0. 0. 0. 0.03932584 0.96067416]]
(4) Spatial_Markov also accept discrete time series and calculate
categorical spatial lags on which several transition probability matrices
are conditioned.
Let's still use the US state income time series to demonstrate. We first
discretize them into categories and then pass them to Spatial_Markov.
>>> import mapclassify as mc
>>> y = mc.Quantiles(rpci.flatten(), k=5).yb.reshape(rpci.shape)
>>> np.random.seed(5)
>>> sm = Spatial_Markov(y, w, discrete=True, variable_name='discretized rpci')
>>> sm.k
5
>>> sm.m
5
>>> for p in sm.P:
... print(p)
[[0.94787645 0.04440154 0.00772201 0. 0. ]
[0.08333333 0.81060606 0.10606061 0. 0. ]
[0. 0.12765957 0.79787234 0.07446809 0. ]
[0. 0.02777778 0.22222222 0.66666667 0.08333333]
[0. 0. 0. 0.33333333 0.66666667]]
[[0.888 0.096 0.016 0. 0. ]
[0.06049822 0.84341637 0.09608541 0. 0. ]
[0.00666667 0.10666667 0.81333333 0.07333333 0. ]
[0. 0. 0.08527132 0.86821705 0.04651163]
[0. 0. 0. 0.10204082 0.89795918]]
[[0.65217391 0.32608696 0.02173913 0. 0. ]
[0.07446809 0.80851064 0.11170213 0. 0.00531915]
[0.01071429 0.1 0.76428571 0.11785714 0.00714286]
[0. 0.00552486 0.09392265 0.86187845 0.03867403]
[0. 0. 0. 0.13157895 0.86842105]]
[[0.91935484 0.06451613 0. 0.01612903 0. ]
[0.06796117 0.90291262 0.02912621 0. 0. ]
[0. 0.05755396 0.87769784 0.0647482 0. ]
[0. 0.02150538 0.10752688 0.80107527 0.06989247]
[0. 0. 0. 0.08064516 0.91935484]]
[[0.81818182 0.18181818 0. 0. 0. ]
[0.01754386 0.70175439 0.26315789 0.01754386 0. ]
[0. 0.14285714 0.73333333 0.12380952 0. ]
[0. 0.0042735 0.06837607 0.89316239 0.03418803]
[0. 0. 0. 0.03891051 0.96108949]]
"""
def __init__(self, y, w, k=4, m=4, permutations=0, fixed=True,
discrete=False, cutoffs=None, lag_cutoffs=None,
variable_name=None):
y = np.asarray(y)
self.fixed = fixed
self.discrete = discrete
self.cutoffs = cutoffs
self.m = m
self.lag_cutoffs = lag_cutoffs
self.variable_name = variable_name
if discrete:
merged = list(itertools.chain.from_iterable(y))
classes = np.unique(merged)
self.classes = classes
self.k = len(classes)
self.m = self.k
label_dict = dict(zip(classes, range(self.k)))
y_int = []
for yi in y:
y_int.append(list(map(label_dict.get, yi)))
self.class_ids = np.array(y_int)
self.lclass_ids = self.class_ids
else:
self.class_ids, self.cutoffs, self.k = self._maybe_classify(
y, k=k, cutoffs=self.cutoffs)
self.classes = np.arange(self.k)
classic = Markov(self.class_ids)
self.p = classic.p
self.transitions = classic.transitions
self.T, self.P = self._calc(y, w)
if permutations:
nrp = np.random.permutation
counter = 0
x2_realizations = np.zeros((permutations, 1))
for perm in range(permutations):
T, P = self._calc(nrp(y), w)
x2 = [chi2(T[i], self.transitions)[0] for i in range(self.k)]
x2s = sum(x2)
x2_realizations[perm] = x2s
if x2s >= self.x2:
counter += 1
self.x2_rpvalue = (counter + 1.0) / (permutations + 1.)
self.x2_realizations = x2_realizations
@property
def s(self):
if not hasattr(self, '_s'):
self._s = STEADY_STATE(self.p)
return self._s
@property
def S(self):
if not hasattr(self, '_S'):
S = np.zeros_like(self.p)
for i, p in enumerate(self.P):
S[i] = STEADY_STATE(p)
self._S = np.asarray(S)
return self._S
@property
def F(self):
if not hasattr(self, '_F'):
F = np.zeros_like(self.P)
for i, p in enumerate(self.P):
F[i] = fmpt(np.asmatrix(p))
self._F = np.asarray(F)
return self._F
# bickenbach and bode tests
@property
def ht(self):
if not hasattr(self, '_ht'):
self._ht = homogeneity(self.T)
return self._ht
@property
def Q(self):
if not hasattr(self, '_Q'):
self._Q = self.ht.Q
return self._Q
@property
def Q_p_value(self):
self._Q_p_value = self.ht.Q_p_value
return self._Q_p_value
@property
def LR(self):
self._LR = self.ht.LR
return self._LR
@property
def LR_p_value(self):
self._LR_p_value = self.ht.LR_p_value
return self._LR_p_value
@property
def dof_hom(self):
self._dof_hom = self.ht.dof
return self._dof_hom
# shtests
@property
def shtest(self):
if not hasattr(self, '_shtest'):
self._shtest = self._mn_test()
return self._shtest
@property
def chi2(self):
if not hasattr(self, '_chi2'):
self._chi2 = self._chi2_test()
return self._chi2
@property
def x2(self):
if not hasattr(self, '_x2'):
self._x2 = sum([c[0] for c in self.chi2])
return self._x2
@property
def x2_pvalue(self):
if not hasattr(self, '_x2_pvalue'):
self._x2_pvalue = 1 - stats.chi2.cdf(self.x2, self.x2_dof)
return self._x2_pvalue
@property
def x2_dof(self):
if not hasattr(self, '_x2_dof'):
k = self.k
self._x2_dof = k * (k - 1) * (k - 1)
return self._x2_dof
def _calc(self, y, w):
'''Helper to estimate spatial lag conditioned Markov transition
probability matrices based on maximum likelihood techniques.
'''
if self.discrete:
self.lclass_ids = weights.lag_categorical(w, self.class_ids,
ties="tryself")
else:
ly = weights.lag_spatial(w, y)
self.lclass_ids, self.lag_cutoffs, self.m = self._maybe_classify(
ly, self.m, self.lag_cutoffs)
self.lclasses = np.arange(self.m)
T = np.zeros((self.m, self.k, self.k))
n, t = y.shape
for t1 in range(t - 1):
t2 = t1 + 1
for i in range(n):
T[self.lclass_ids[i, t1], self.class_ids[i, t1],
self.class_ids[i, t2]] += 1
P = np.zeros_like(T)
for i, mat in enumerate(T):
row_sum = mat.sum(axis=1)
row_sum = row_sum + (row_sum == 0)
p_i = np.matrix(np.diag(1. / row_sum) * np.matrix(mat))
P[i] = p_i
return T, P
def _mn_test(self):
"""
helper to calculate tests of differences between steady state
distributions from the conditional and overall distributions.
"""
n0, n1, n2 = self.T.shape
rn = list(range(n0))
mat = [self._ssmnp_test(
self.s, self.S[i], self.T[i].sum()) for i in rn]
return mat
def _ssmnp_test(self, p1, p2, nt):
"""
Steady state multinomial probability difference test.
Arguments
---------
p1 : array
(k, ), first steady state probability distribution.
p1 : array
(k, ), second steady state probability distribution.
nt : int
number of transitions to base the test on.
Returns
-------
tuple
(3 elements)
(chi2 value, pvalue, degrees of freedom)
"""
o = nt * p2
e = nt * p1
d = np.multiply((o - e), (o - e))
d = d / e
chi2 = d.sum()
pvalue = 1 - stats.chi2.cdf(chi2, self.k - 1)
return (chi2, pvalue, self.k - 1)
def _chi2_test(self):
"""
helper to calculate tests of differences between the conditional
transition matrices and the overall transitions matrix.
"""
n0, n1, n2 = self.T.shape
rn = list(range(n0))
mat = [chi2(self.T[i], self.transitions) for i in rn]
return mat
def summary(self, file_name=None):
"""
A summary method to call the Markov homogeneity test to test for
temporally lagged spatial dependence.
To learn more about the properties of the tests, refer to
:cite:`Rey2016a` and :cite:`Kang2018`.
"""
class_names = ["C%d" % i for i in range(self.k)]
regime_names = ["LAG%d" % i for i in range(self.k)]
ht = homogeneity(self.T, class_names=class_names,
regime_names=regime_names)
title = "Spatial Markov Test"
if self.variable_name:
title = title + ": " + self.variable_name
if file_name:
ht.summary(file_name=file_name, title=title)
else:
ht.summary(title=title)
def _maybe_classify(self, y, k, cutoffs):
'''Helper method for classifying continuous data.
'''
rows, cols = y.shape
if cutoffs is None:
if self.fixed:
mcyb = mc.Quantiles(y.flatten(), k=k)
yb = mcyb.yb.reshape(y.shape)
cutoffs = mcyb.bins
k = len(cutoffs)
return yb, cutoffs[:-1], k
else:
yb = np.array([mc.Quantiles(y[:, i], k=k).yb for i in
np.arange(cols)]).transpose()
return yb, None, k
else:
cutoffs = list(cutoffs) + [np.inf]
cutoffs = np.array(cutoffs)
yb = mc.User_Defined(y.flatten(), np.array(cutoffs)).yb.reshape(
y.shape)
k = len(cutoffs)
return yb, cutoffs[:-1], k
class LISA_Markov(Markov):
"""
Markov for Local Indicators of Spatial Association
Parameters
----------
y : array
(n, t), n cross-sectional units observed over t time
periods.
w : W
spatial weights object.
permutations : int, optional
number of permutations used to determine LISA
significance (the default is 0).
significance_level : float, optional
significance level (two-sided) for filtering
significant LISA endpoints in a transition (the
default is 0.05).
geoda_quads : bool
If True use GeoDa scheme: HH=1, LL=2, LH=3, HL=4.
If False use PySAL Scheme: HH=1, LH=2, LL=3, HL=4.
(the default is False).
Attributes
----------
chi_2 : tuple
(3 elements)
(chi square test statistic, p-value, degrees of freedom) for
test that dynamics of y are independent of dynamics of wy.
classes : array
(4, 1)
1=HH, 2=LH, 3=LL, 4=HL (own, lag)
1=HH, 2=LL, 3=LH, 4=HL (own, lag) (if geoda_quads=True)
expected_t : array
(4, 4), expected number of transitions under the null that
dynamics of y are independent of dynamics of wy.
move_types : matrix
(n, t-1), integer values indicating which type of LISA
transition occurred (q1 is quadrant in period 1, q2 is
quadrant in period 2).
.. table:: Move Types
== == =========
q1 q2 move_type
== == =========
1 1 1
1 2 2
1 3 3
1 4 4
2 1 5
2 2 6
2 3 7
2 4 8
3 1 9
3 2 10
3 3 11
3 4 12
4 1 13
4 2 14
4 3 15
4 4 16
== == =========
p : array
(k, k), transition probability matrix.
p_values : matrix
(n, t), LISA p-values for each end point (if permutations >
0).
significant_moves : matrix
(n, t-1), integer values indicating the type and
significance of a LISA transition. st = 1 if
significant in period t, else st=0 (if permutations >
0).
.. Table:: Significant Moves1
=============== ===================
(s1,s2) move_type
=============== ===================
(1,1) [1, 16]
(1,0) [17, 32]
(0,1) [33, 48]
(0,0) [49, 64]
=============== ===================
.. Table:: Significant Moves2
== == == == =========
q1 q2 s1 s2 move_type
== == == == =========
1 1 1 1 1
1 2 1 1 2
1 3 1 1 3
1 4 1 1 4
2 1 1 1 5
2 2 1 1 6
2 3 1 1 7
2 4 1 1 8
3 1 1 1 9
3 2 1 1 10
3 3 1 1 11
3 4 1 1 12
4 1 1 1 13
4 2 1 1 14
4 3 1 1 15
4 4 1 1 16
1 1 1 0 17
1 2 1 0 18
. . . . .
. . . . .
4 3 1 0 31
4 4 1 0 32
1 1 0 1 33
1 2 0 1 34
. . . . .
. . . . .
4 3 0 1 47
4 4 0 1 48
1 1 0 0 49
1 2 0 0 50
. . . . .
. . . . .
4 3 0 0 63
4 4 0 0 64
== == == == =========
steady_state : array
(k, ), ergodic distribution.
transitions : array
(4, 4), count of transitions between each state i and j.
spillover : array
(n, 1) binary array, locations that were not part of a
cluster in period 1 but joined a prexisting cluster in
period 2.
Examples
--------
>>> import libpysal
>>> import numpy as np
>>> from giddy.markov import LISA_Markov
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> lm = LISA_Markov(pci,w)
>>> lm.classes
array([1, 2, 3, 4])
>>> lm.steady_state
array([0.28561505, 0.14190226, 0.40493672, 0.16754598])
>>> lm.transitions
array([[1.087e+03, 4.400e+01, 4.000e+00, 3.400e+01],
[4.100e+01, 4.700e+02, 3.600e+01, 1.000e+00],
[5.000e+00, 3.400e+01, 1.422e+03, 3.900e+01],
[3.000e+01, 1.000e+00, 4.000e+01, 5.520e+02]])
>>> lm.p
array([[0.92985458, 0.03763901, 0.00342173, 0.02908469],
[0.07481752, 0.85766423, 0.06569343, 0.00182482],
[0.00333333, 0.02266667, 0.948 , 0.026 ],
[0.04815409, 0.00160514, 0.06420546, 0.88603531]])
>>> lm.move_types[0,:3]
array([11, 11, 11])
>>> lm.move_types[0,-3:]
array([11, 11, 11])
Now consider only moves with one, or both, of the LISA end points being
significant
>>> np.random.seed(10)
>>> lm_random = LISA_Markov(pci, w, permutations=99)
>>> lm_random.significant_moves[0, :3]
array([11, 11, 11])
>>> lm_random.significant_moves[0,-3:]
array([59, 43, 27])
Any value less than 49 indicates at least one of the LISA end points was
significant. So for example, the first spatial unit experienced a
transition of type 11 (LL, LL) during the first three and last tree
intervals (according to lm.move_types), however, the last three of these
transitions involved insignificant LISAS in both the start and ending year
of each transition.
Test whether the moves of y are independent of the moves of wy
>>> "Chi2: %8.3f, p: %5.2f, dof: %d" % lm.chi_2
'Chi2: 1058.208, p: 0.00, dof: 9'
Actual transitions of LISAs
>>> lm.transitions
array([[1.087e+03, 4.400e+01, 4.000e+00, 3.400e+01],
[4.100e+01, 4.700e+02, 3.600e+01, 1.000e+00],
[5.000e+00, 3.400e+01, 1.422e+03, 3.900e+01],
[3.000e+01, 1.000e+00, 4.000e+01, 5.520e+02]])
Expected transitions of LISAs under the null y and wy are moving
independently of one another
>>> lm.expected_t
array([[1.12328098e+03, 1.15377356e+01, 3.47522158e-01, 3.38337644e+01],
[3.50272664e+00, 5.28473882e+02, 1.59178880e+01, 1.05503814e-01],
[1.53878082e-01, 2.32163556e+01, 1.46690710e+03, 9.72266513e+00],
[9.60775143e+00, 9.86856346e-02, 6.23537392e+00, 6.07058189e+02]])
If the LISA classes are to be defined according to GeoDa, the `geoda_quad`
option has to be set to true
>>> lm.q[0:5,0]
array([3, 2, 3, 1, 4])
>>> lm = LISA_Markov(pci,w, geoda_quads=True)
>>> lm.q[0:5,0]
array([2, 3, 2, 1, 4])
"""
def __init__(self, y, w, permutations=0,
significance_level=0.05, geoda_quads=False):
y = y.transpose()
pml = Moran_Local
gq = geoda_quads
ml = ([pml(yi, w, permutations=permutations, geoda_quads=gq)
for yi in y])
q = np.array([mli.q for mli in ml]).transpose()
classes = np.arange(1, 5) # no guarantee all 4 quadrants are visited
Markov.__init__(self, q, classes)
self.q = q
self.w = w
n, k = q.shape
k -= 1
self.significance_level = significance_level
move_types = np.zeros((n, k), int)
sm = np.zeros((n, k), int)
self.significance_level = significance_level
if permutations > 0:
p = np.array([mli.p_z_sim for mli in ml]).transpose()
self.p_values = p
pb = p <= significance_level
else:
pb = np.zeros_like(y.T)
for t in range(k):
origin = q[:, t]
dest = q[:, t + 1]
p_origin = pb[:, t]
p_dest = pb[:, t + 1]
for r in range(n):
move_types[r, t] = TT[origin[r], dest[r]]
key = (origin[r], dest[r], p_origin[r], p_dest[r])
sm[r, t] = MOVE_TYPES[key]
if permutations > 0:
self.significant_moves = sm
self.move_types = move_types
# null of own and lag moves being independent
ybar = y.mean(axis=0)
r = y / ybar
ylag = np.array([weights.lag_spatial(w, yt) for yt in y])
rlag = ylag / ybar
rc = r < 1.
rlagc = rlag < 1.
markov_y = Markov(rc)
markov_ylag = Markov(rlagc)
A = np.matrix([[1, 0, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0]])
kp = A * np.kron(markov_y.p, markov_ylag.p) * A.T
trans = self.transitions.sum(axis=1)
t1 = np.diag(trans) * kp
t2 = self.transitions
t1 = t1.getA()
self.chi_2 = chi2(t2, t1)
self.expected_t = t1
self.permutations = permutations
def spillover(self, quadrant=1, neighbors_on=False):
"""
Detect spillover locations for diffusion in LISA Markov.
Parameters
----------
quadrant : int
which quadrant in the scatterplot should form the core
of a cluster.
neighbors_on : binary
If false, then only the 1st order neighbors of a core
location are included in the cluster.
If true, neighbors of cluster core 1st order neighbors
are included in the cluster.
Returns
-------
results : dictionary
two keys - values pairs:
'components' - array (n, t)
values are integer ids (starting at 1) indicating which
component/cluster observation i in period t belonged to.
'spillover' - array (n, t-1)
binary values indicating if the location was a
spill-over location that became a new member of a
previously existing cluster.
Examples
--------
>>> import libpysal
>>> from giddy.markov import LISA_Markov
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> np.random.seed(10)
>>> lm_random = LISA_Markov(pci, w, permutations=99)
>>> r = lm_random.spillover()
>>> (r['components'][:, 12] > 0).sum()
17
>>> (r['components'][:, 13]>0).sum()
23
>>> (r['spill_over'][:,12]>0).sum()
6
Including neighbors of core neighbors
>>> rn = lm_random.spillover(neighbors_on=True)
>>> (rn['components'][:, 12] > 0).sum()
26
>>> (rn["components"][:, 13] > 0).sum()
34
>>> (rn["spill_over"][:, 12] > 0).sum()
8
"""
n, k = self.q.shape
if self.permutations:
spill_over = np.zeros((n, k - 1))
components = np.zeros((n, k))
i2id = {} # handle string keys
for key in list(self.w.neighbors.keys()):
idx = self.w.id2i[key]
i2id[idx] = key
sig_lisas = (self.q == quadrant) \
* (self.p_values <= self.significance_level)
sig_ids = [np.nonzero(
sig_lisas[:, i])[0].tolist() for i in range(k)]
neighbors = self.w.neighbors
for t in range(k - 1):
s1 = sig_ids[t]
s2 = sig_ids[t + 1]
g1 = Graph(undirected=True)
for i in s1:
for neighbor in neighbors[i2id[i]]:
g1.add_edge(i2id[i], neighbor, 1.0)
if neighbors_on:
for nn in neighbors[neighbor]:
g1.add_edge(neighbor, nn, 1.0)
components1 = g1.connected_components(op=gt)
components1 = [list(c.nodes) for c in components1]
g2 = Graph(undirected=True)
for i in s2:
for neighbor in neighbors[i2id[i]]:
g2.add_edge(i2id[i], neighbor, 1.0)
if neighbors_on:
for nn in neighbors[neighbor]:
g2.add_edge(neighbor, nn, 1.0)
components2 = g2.connected_components(op=gt)
components2 = [list(c.nodes) for c in components2]
c2 = []
c1 = []
for c in components2:
c2.extend(c)
for c in components1:
c1.extend(c)
new_ids = [j for j in c2 if j not in c1]
spill_ids = []
for j in new_ids:
# find j's component in period 2
cj = [c for c in components2 if j in c][0]
# for members of j's component in period 2, check if they
# belonged to any components in period 1
for i in cj:
if i in c1:
spill_ids.append(j)
break
for spill_id in spill_ids:
id = self.w.id2i[spill_id]
spill_over[id, t] = 1
for c, component in enumerate(components1):
for i in component:
ii = self.w.id2i[i]
components[ii, t] = c + 1
results = {}
results['components'] = components
results['spill_over'] = spill_over
return results
else:
return None
def kullback(F):
"""
Kullback information based test of Markov Homogeneity.
Parameters
----------
F : array
(s, r, r), values are transitions (not probabilities) for
s strata, r initial states, r terminal states.
Returns
-------
Results : dictionary
(key - value)
Conditional homogeneity - (float) test statistic for homogeneity
of transition probabilities across strata.
Conditional homogeneity pvalue - (float) p-value for test
statistic.
Conditional homogeneity dof - (int) degrees of freedom =
r(s-1)(r-1).
Notes
-----
Based on :cite:`Kullback1962`.
Example below is taken from Table 9.2 .
Examples
--------
>>> import numpy as np
>>> from giddy.markov import kullback
>>> s1 = np.array([
... [ 22, 11, 24, 2, 2, 7],
... [ 5, 23, 15, 3, 42, 6],
... [ 4, 21, 190, 25, 20, 34],
... [0, 2, 14, 56, 14, 28],
... [32, 15, 20, 10, 56, 14],
... [5, 22, 31, 18, 13, 134]
... ])
>>> s2 = np.array([
... [3, 6, 9, 3, 0, 8],
... [1, 9, 3, 12, 27, 5],
... [2, 9, 208, 32, 5, 18],
... [0, 14, 32, 108, 40, 40],
... [22, 14, 9, 26, 224, 14],
... [1, 5, 13, 53, 13, 116]
... ])
>>>
>>> F = np.array([s1, s2])
>>> res = kullback(F)
>>> "%8.3f"%res['Conditional homogeneity']
' 160.961'
>>> "%d"%res['Conditional homogeneity dof']
'30'
>>> "%3.1f"%res['Conditional homogeneity pvalue']
'0.0'
"""
F1 = F == 0
F1 = F + F1
FLF = F * np.log(F1)
T1 = 2 * FLF.sum()
FdJK = F.sum(axis=0)
FdJK1 = FdJK + (FdJK == 0)
FdJKLFdJK = FdJK * np.log(FdJK1)
T2 = 2 * FdJKLFdJK.sum()
FdJd = F.sum(axis=0).sum(axis=1)
FdJd1 = FdJd + (FdJd == 0)
T3 = 2 * (FdJd * np.log(FdJd1)).sum()
FIJd = F[:, :].sum(axis=1)
FIJd1 = FIJd + (FIJd == 0)
T4 = 2 * (FIJd * np.log(FIJd1)).sum()
T6 = F.sum()
T6 = 2 * T6 * np.log(T6)
s, r, r1 = F.shape
chom = T1 - T4 - T2 + T3
cdof = r * (s - 1) * (r - 1)
results = {}
results['Conditional homogeneity'] = chom
results['Conditional homogeneity dof'] = cdof
results['Conditional homogeneity pvalue'] = 1 - stats.chi2.cdf(chom, cdof)
return results
def prais(pmat):
"""
Prais conditional mobility measure.
Parameters
----------
pmat : matrix
(k, k), Markov probability transition matrix.
Returns
-------
pr : matrix
(1, k), conditional mobility measures for each of the k classes.
Notes
-----
Prais' conditional mobility measure for a class is defined as:
.. math::
pr_i = 1 - p_{i,i}
Examples
--------
>>> import numpy as np
>>> import libpysal
>>> from giddy.markov import Markov,prais
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
>>> q5 = np.array([mc.Quantiles(y).yb for y in pci]).transpose()
>>> m = Markov(q5)
>>> m.transitions
array([[729., 71., 1., 0., 0.],
[ 72., 567., 80., 3., 0.],
[ 0., 81., 631., 86., 2.],
[ 0., 3., 86., 573., 56.],
[ 0., 0., 1., 57., 741.]])
>>> m.p
array([[0.91011236, 0.0886392 , 0.00124844, 0. , 0. ],
[0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ],
[0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ],
[0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443],
[0. , 0. , 0.00125156, 0.07133917, 0.92740926]])
>>> prais(m.p)
array([0.08988764, 0.21468144, 0.21125 , 0.20194986, 0.07259074])
"""
pmat = np.array(pmat)
pr = 1 - np.diag(pmat)
return pr
def homogeneity(transition_matrices, regime_names=[], class_names=[],
title="Markov Homogeneity Test"):
"""
Test for homogeneity of Markov transition probabilities across regimes.
Parameters
----------
transition_matrices : list
of transition matrices for regimes, all matrices must
have same size (r, c). r is the number of rows in the
transition matrix and c is the number of columns in
the transition matrix.
regime_names : sequence
Labels for the regimes.
class_names : sequence
Labels for the classes/states of the Markov chain.
title : string
name of test.
Returns
-------
: implicit
an instance of Homogeneity_Results.
"""
return Homogeneity_Results(transition_matrices, regime_names=regime_names,
class_names=class_names, title=title)
class Homogeneity_Results:
"""
Wrapper class to present homogeneity results.
Parameters
----------
transition_matrices : list
of transition matrices for regimes, all matrices must
have same size (r, c). r is the number of rows in
the transition matrix and c is the number of columns
in the transition matrix.
regime_names : sequence
Labels for the regimes.
class_names : sequence
Labels for the classes/states of the Markov chain.
title : string
Title of the table.
Attributes
-----------
Notes
-----
Degrees of freedom adjustment follow the approach in :cite:`Bickenbach2003`.
Examples
--------
See Spatial_Markov above.
"""
def __init__(self, transition_matrices, regime_names=[], class_names=[],
title="Markov Homogeneity Test"):
self._homogeneity(transition_matrices)
self.regime_names = regime_names
self.class_names = class_names
self.title = title
def _homogeneity(self, transition_matrices):
# form null transition probability matrix
M = np.array(transition_matrices)
m, r, k = M.shape
self.k = k
B = np.zeros((r, m))
T = M.sum(axis=0)
self.t_total = T.sum()
n_i = T.sum(axis=1)
A_i = (T > 0).sum(axis=1)
A_im = np.zeros((r, m))
p_ij = np.dot(np.diag(1. / (n_i + (n_i == 0) * 1.)), T)
den = p_ij + 1. * (p_ij == 0)
b_i = np.zeros_like(A_i)
p_ijm = np.zeros_like(M)
# get dimensions
m, n_rows, n_cols = M.shape
m = 0
Q = 0.0
LR = 0.0
lr_table = np.zeros_like(M)
q_table = np.zeros_like(M)
for nijm in M:
nim = nijm.sum(axis=1)
B[:, m] = 1. * (nim > 0)
b_i = b_i + 1. * (nim > 0)
p_ijm[m] = np.dot(np.diag(1. / (nim + (nim == 0) * 1.)), nijm)
num = (p_ijm[m] - p_ij)**2
ratio = num / den
qijm = np.dot(np.diag(nim), ratio)
q_table[m] = qijm
Q = Q + qijm.sum()
# only use nonzero pijm in lr test
mask = (nijm > 0) * (p_ij > 0)
A_im[:, m] = (nijm > 0).sum(axis=1)
unmask = 1.0 * (mask == 0)
ratio = (mask * p_ijm[m] + unmask) / (mask * p_ij + unmask)
lr = nijm * np.log(ratio)
LR = LR + lr.sum()
lr_table[m] = 2 * lr
m += 1
# b_i is the number of regimes that have non-zero observations in row i
# A_i is the number of non-zero elements in row i of the aggregated
# transition matrix
self.dof = int(((b_i - 1) * (A_i - 1)).sum())
self.Q = Q
self.Q_p_value = 1 - stats.chi2.cdf(self.Q, self.dof)
self.LR = LR * 2.
self.LR_p_value = 1 - stats.chi2.cdf(self.LR, self.dof)
self.A = A_i
self.A_im = A_im
self.B = B
self.b_i = b_i
self.LR_table = lr_table
self.Q_table = q_table
self.m = m
self.p_h0 = p_ij
self.p_h1 = p_ijm
def summary(self, file_name=None, title="Markov Homogeneity Test"):
regime_names = ["%d" % i for i in range(self.m)]
if self.regime_names:
regime_names = self.regime_names
cols = ["P(%s)" % str(regime) for regime in regime_names]
if not self.class_names:
self.class_names = list(range(self.k))
max_col = max([len(col) for col in cols])
col_width = max([5, max_col]) # probabilities have 5 chars
n_tabs = self.k
width = n_tabs * 4 + (self.k + 1) * col_width
lead = "-" * width
head = title.center(width)
contents = [lead, head, lead]
l = "Number of regimes: %d" % int(self.m)
k = "Number of classes: %d" % int(self.k)
r = "Regime names: "
r += ", ".join(regime_names)
t = "Number of transitions: %d" % int(self.t_total)
contents.append(k)
contents.append(t)
contents.append(l)
contents.append(r)
contents.append(lead)
h = "%7s %20s %20s" % ('Test', 'LR', 'Chi-2')
contents.append(h)
stat = "%7s %20.3f %20.3f" % ('Stat.', self.LR, self.Q)
contents.append(stat)
stat = "%7s %20d %20d" % ('DOF', self.dof, self.dof)
contents.append(stat)
stat = "%7s %20.3f %20.3f" % ('p-value', self.LR_p_value,
self.Q_p_value)
contents.append(stat)
print(("\n".join(contents)))
print(lead)
cols = ["P(%s)" % str(regime) for regime in self.regime_names]
if not self.class_names:
self.class_names = list(range(self.k))
cols.extend(["%s" % str(cname) for cname in self.class_names])
max_col = max([len(col) for col in cols])
col_width = max([5, max_col]) # probabilities have 5 chars
p0 = []
line0 = ['{s: <{w}}'.format(s="P(H0)", w=col_width)]
line0.extend((['{s: >{w}}'.format(s=cname, w=col_width) for cname in
self.class_names]))
print((" ".join(line0)))
p0.append("&".join(line0))
for i, row in enumerate(self.p_h0):
line = ["%*s" % (col_width, str(self.class_names[i]))]
line.extend(["%*.3f" % (col_width, v) for v in row])
print((" ".join(line)))
p0.append("&".join(line))
pmats = [p0]
print(lead)
for r, p1 in enumerate(self.p_h1):
p0 = []
line0 = ['{s: <{w}}'.format(s="P(%s)" %
regime_names[r], w=col_width)]
line0.extend((['{s: >{w}}'.format(s=cname, w=col_width) for cname
in self.class_names]))
print((" ".join(line0)))
p0.append("&".join(line0))
for i, row in enumerate(p1):
line = ["%*s" % (col_width, str(self.class_names[i]))]
line.extend(["%*.3f" % (col_width, v) for v in row])
print((" ".join(line)))
p0.append("&".join(line))
pmats.append(p0)
print(lead)
if file_name:
k = self.k
ks = str(k + 1)
with open(file_name, 'w') as f:
c = []
fmt = "r" * (k + 1)
s = "\\begin{tabular}{|%s|}\\hline\n" % fmt
s += "\\multicolumn{%s}{|c|}{%s}" % (ks, title)
c.append(s)
s = "Number of classes: %d" % int(self.k)
c.append("\\hline\\multicolumn{%s}{|l|}{%s}" % (ks, s))
s = "Number of transitions: %d" % int(self.t_total)
c.append("\\multicolumn{%s}{|l|}{%s}" % (ks, s))
s = "Number of regimes: %d" % int(self.m)
c.append("\\multicolumn{%s}{|l|}{%s}" % (ks, s))
s = "Regime names: "
s += ", ".join(regime_names)
c.append("\\multicolumn{%s}{|l|}{%s}" % (ks, s))
s = "\\hline\\multicolumn{2}{|l}{%s}" % ("Test")
s += "&\\multicolumn{2}{r}{LR}&\\multicolumn{2}{r|}{Q}"
c.append(s)
s = "Stat."
s = "\\multicolumn{2}{|l}{%s}" % (s)
s += "&\\multicolumn{2}{r}{%.3f}" % self.LR
s += "&\\multicolumn{2}{r|}{%.3f}" % self.Q
c.append(s)
s = "\\multicolumn{2}{|l}{%s}" % ("DOF")
s += "&\\multicolumn{2}{r}{%d}" % int(self.dof)
s += "&\\multicolumn{2}{r|}{%d}" % int(self.dof)
c.append(s)
s = "\\multicolumn{2}{|l}{%s}" % ("p-value")
s += "&\\multicolumn{2}{r}{%.3f}" % self.LR_p_value
s += "&\\multicolumn{2}{r|}{%.3f}" % self.Q_p_value
c.append(s)
s1 = "\\\\\n".join(c)
s1 += "\\\\\n"
c = []
for mat in pmats:
c.append("\\hline\n")
for row in mat:
c.append(row + "\\\\\n")
c.append("\\hline\n")
c.append("\\end{tabular}")
s2 = "".join(c)
f.write(s1 + s2)
class FullRank_Markov:
"""
Full Rank Markov in which ranks are considered as Markov states rather
than quantiles or other discretized classes. This is one way to avoid
issues associated with discretization.
Parameters
----------
y : array
(n, t) with t>>n, one row per observation (n total),
one column recording the value of each observation,
with as many columns as time periods.
Attributes
----------
ranks : array
ranks of the original y array (by columns): higher values
rank higher, e.g. the largest value in a column ranks 1.
p : array
(n, n), transition probability matrix for Full
Rank Markov.
steady_state : array
(n, ), ergodic distribution.
transitions : array
(n, n), count of transitions between each rank i and j
fmpt : array
(n, n), first mean passage times.
sojourn_time : array
(n, ), sojourn times.
Notes
-----
Refer to :cite:`Rey2014a` Equation (11) for details. Ties are resolved by
assigning distinct ranks, corresponding to the order that the values occur
in each cross section.
Examples
--------
US nominal per capita income 48 states 81 years 1929-2009
>>> from giddy.markov import FullRank_Markov
>>> import libpysal as ps
>>> import numpy as np
>>> f = ps.io.open(ps.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)]).transpose()
>>> m = FullRank_Markov(pci)
>>> m.ranks
array([[45, 45, 44, ..., 41, 40, 39],
[24, 25, 25, ..., 36, 38, 41],
[46, 47, 45, ..., 43, 43, 43],
...,
[34, 34, 34, ..., 47, 46, 42],
[17, 17, 22, ..., 25, 26, 25],
[16, 18, 19, ..., 6, 6, 7]])
>>> m.transitions
array([[66., 5., 5., ..., 0., 0., 0.],
[ 8., 51., 9., ..., 0., 0., 0.],
[ 2., 13., 44., ..., 0., 0., 0.],
...,
[ 0., 0., 0., ..., 40., 17., 0.],
[ 0., 0., 0., ..., 15., 54., 2.],
[ 0., 0., 0., ..., 2., 1., 77.]])
>>> m.p[0, :5]
array([0.825 , 0.0625, 0.0625, 0.025 , 0.025 ])
>>> m.fmpt[0, :5]
array([48. , 87.96280048, 68.1089084 , 58.83306575, 41.77250827])
>>> m.sojourn_time[:5]
array([5.71428571, 2.75862069, 2.22222222, 1.77777778, 1.66666667])
"""
def __init__(self, y):
y = np.asarray(y)
# resolve ties: All values are given a distinct rank, corresponding
# to the order that the values occur in each cross section.
r_asc = np.array([rankdata(col, method='ordinal') for col in y.T]).T
# ranks by high (1) to low (n)
self.ranks = r_asc.shape[0] - r_asc + 1
frm = Markov(self.ranks)
self.p = frm.p
self.transitions = frm.transitions
@property
def steady_state(self):
if not hasattr(self, '_steady_state'):
self._steady_state = STEADY_STATE(self.p)
return self._steady_state
@property
def fmpt(self):
if not hasattr(self, '_fmpt'):
self._fmpt = fmpt(self.p)
return self._fmpt
@property
def sojourn_time(self):
if not hasattr(self, '_st'):
self._st = sojourn_time(self.p)
return self._st
def sojourn_time(p):
"""
Calculate sojourn time based on a given transition probability matrix.
Parameters
----------
p : array
(k, k), a Markov transition probability matrix.
Returns
-------
: array
(k, ), sojourn times. Each element is the expected time a Markov
chain spends in each states before leaving that state.
Notes
-----
Refer to :cite:`Ibe2009` for more details on sojourn times for Markov
chains.
Examples
--------
>>> from giddy.markov import sojourn_time
>>> import numpy as np
>>> p = np.array([[.5, .25, .25], [.5, 0, .5], [.25, .25, .5]])
>>> sojourn_time(p)
array([2., 1., 2.])
"""
p = np.asarray(p)
pii = p.diagonal()
if not (1 - pii).all():
print("Sojourn times are infinite for absorbing states!")
return 1 / (1 - pii)
class GeoRank_Markov:
"""
Geographic Rank Markov.
Geographic units are considered as Markov states.
Parameters
----------
y : array
(n, t) with t>>n, one row per observation (n total),
one column recording the value of each observation,
with as many columns as time periods.
Attributes
----------
p : array
(n, n), transition probability matrix for
geographic rank Markov.
steady_state : array
(n, ), ergodic distribution.
transitions : array
(n, n), count of rank transitions between each
geographic unit i and j.
fmpt : array
(n, n), first mean passage times.
sojourn_time : array
(n, ), sojourn times.
Notes
-----
Refer to :cite:`Rey2014a` Equation (13)-(16) for details. Ties are
resolved by assigning distinct ranks, corresponding to the order
that the values occur in each cross section.
Examples
--------
US nominal per capita income 48 states 81 years 1929-2009
>>> from giddy.markov import GeoRank_Markov
>>> import libpysal as ps
>>> import numpy as np
>>> f = ps.io.open(ps.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)]).transpose()
>>> m = GeoRank_Markov(pci)
>>> m.transitions
array([[38., 0., 8., ..., 0., 0., 0.],
[ 0., 15., 0., ..., 0., 1., 0.],
[ 6., 0., 44., ..., 5., 0., 0.],
...,
[ 2., 0., 5., ..., 34., 0., 0.],
[ 0., 0., 0., ..., 0., 18., 2.],
[ 0., 0., 0., ..., 0., 3., 14.]])
>>> m.p
array([[0.475 , 0. , 0.1 , ..., 0. , 0. , 0. ],
[0. , 0.1875, 0. , ..., 0. , 0.0125, 0. ],
[0.075 , 0. , 0.55 , ..., 0.0625, 0. , 0. ],
...,
[0.025 , 0. , 0.0625, ..., 0.425 , 0. , 0. ],
[0. , 0. , 0. , ..., 0. , 0.225 , 0.025 ],
[0. , 0. , 0. , ..., 0. , 0.0375, 0.175 ]])
>>> m.fmpt
array([[ 48. , 63.35532038, 92.75274652, ..., 82.47515731,
71.01114491, 68.65737127],
[108.25928005, 48. , 127.99032986, ..., 92.03098299,
63.36652935, 61.82733039],
[ 76.96801786, 64.7713783 , 48. , ..., 73.84595169,
72.24682723, 69.77497173],
...,
[ 93.3107474 , 62.47670463, 105.80634118, ..., 48. ,
69.30121319, 67.08838421],
[113.65278078, 61.1987031 , 133.57991745, ..., 96.0103924 ,
48. , 56.74165107],
[114.71894813, 63.4019776 , 134.73381719, ..., 97.287895 ,
61.45565054, 48. ]])
>>> m.sojourn_time
array([ 1.9047619 , 1.23076923, 2.22222222, 1.73913043, 1.15942029,
3.80952381, 1.70212766, 1.25 , 1.31147541, 1.11111111,
1.73913043, 1.37931034, 1.17647059, 1.21212121, 1.33333333,
1.37931034, 1.09589041, 2.10526316, 2. , 1.45454545,
1.26984127, 26.66666667, 1.19402985, 1.23076923, 1.09589041,
1.56862745, 1.26984127, 2.42424242, 1.50943396, 2. ,
1.29032258, 1.09589041, 1.6 , 1.42857143, 1.25 ,
1.45454545, 1.29032258, 1.6 , 1.17647059, 1.56862745,
1.25 , 1.37931034, 1.45454545, 1.42857143, 1.29032258,
1.73913043, 1.29032258, 1.21212121])
"""
def __init__(self, y):
y = np.asarray(y)
n = y.shape[0]
# resolve ties: All values are given a distinct rank, corresponding
# to the order that the values occur in each cross section.
ranks = np.array([rankdata(col, method='ordinal') for col in y.T]).T
geo_ranks = np.argsort(ranks, axis=0) + 1
grm = Markov(geo_ranks)
self.p = grm.p
self.transitions = grm.transitions
@property
def steady_state(self):
if not hasattr(self, '_steady_state'):
self._steady_state = STEADY_STATE(self.p)
return self._steady_state
@property
def fmpt(self):
if not hasattr(self, '_fmpt'):
self._fmpt = fmpt(self.p)
return self._fmpt
@property
def sojourn_time(self):
if not hasattr(self, '_st'):
self._st = sojourn_time(self.p)
return self._st
|
pysal/giddy | giddy/markov.py | kullback | python | def kullback(F):
F1 = F == 0
F1 = F + F1
FLF = F * np.log(F1)
T1 = 2 * FLF.sum()
FdJK = F.sum(axis=0)
FdJK1 = FdJK + (FdJK == 0)
FdJKLFdJK = FdJK * np.log(FdJK1)
T2 = 2 * FdJKLFdJK.sum()
FdJd = F.sum(axis=0).sum(axis=1)
FdJd1 = FdJd + (FdJd == 0)
T3 = 2 * (FdJd * np.log(FdJd1)).sum()
FIJd = F[:, :].sum(axis=1)
FIJd1 = FIJd + (FIJd == 0)
T4 = 2 * (FIJd * np.log(FIJd1)).sum()
T6 = F.sum()
T6 = 2 * T6 * np.log(T6)
s, r, r1 = F.shape
chom = T1 - T4 - T2 + T3
cdof = r * (s - 1) * (r - 1)
results = {}
results['Conditional homogeneity'] = chom
results['Conditional homogeneity dof'] = cdof
results['Conditional homogeneity pvalue'] = 1 - stats.chi2.cdf(chom, cdof)
return results | Kullback information based test of Markov Homogeneity.
Parameters
----------
F : array
(s, r, r), values are transitions (not probabilities) for
s strata, r initial states, r terminal states.
Returns
-------
Results : dictionary
(key - value)
Conditional homogeneity - (float) test statistic for homogeneity
of transition probabilities across strata.
Conditional homogeneity pvalue - (float) p-value for test
statistic.
Conditional homogeneity dof - (int) degrees of freedom =
r(s-1)(r-1).
Notes
-----
Based on :cite:`Kullback1962`.
Example below is taken from Table 9.2 .
Examples
--------
>>> import numpy as np
>>> from giddy.markov import kullback
>>> s1 = np.array([
... [ 22, 11, 24, 2, 2, 7],
... [ 5, 23, 15, 3, 42, 6],
... [ 4, 21, 190, 25, 20, 34],
... [0, 2, 14, 56, 14, 28],
... [32, 15, 20, 10, 56, 14],
... [5, 22, 31, 18, 13, 134]
... ])
>>> s2 = np.array([
... [3, 6, 9, 3, 0, 8],
... [1, 9, 3, 12, 27, 5],
... [2, 9, 208, 32, 5, 18],
... [0, 14, 32, 108, 40, 40],
... [22, 14, 9, 26, 224, 14],
... [1, 5, 13, 53, 13, 116]
... ])
>>>
>>> F = np.array([s1, s2])
>>> res = kullback(F)
>>> "%8.3f"%res['Conditional homogeneity']
' 160.961'
>>> "%d"%res['Conditional homogeneity dof']
'30'
>>> "%3.1f"%res['Conditional homogeneity pvalue']
'0.0' | train | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/markov.py#L1336-L1425 | null | """
Markov based methods for spatial dynamics.
"""
__author__ = "Sergio J. Rey <sjsrey@gmail.com>, Wei Kang <weikang9009@gmail.com>"
__all__ = ["Markov", "LISA_Markov", "Spatial_Markov", "kullback",
"prais", "homogeneity", "FullRank_Markov", "sojourn_time",
"GeoRank_Markov"]
import numpy as np
from .ergodic import fmpt
from .ergodic import steady_state as STEADY_STATE
from .components import Graph
from scipy import stats
from scipy.stats import rankdata
from operator import gt
from libpysal import weights
from esda.moran import Moran_Local
import mapclassify as mc
import itertools
# TT predefine LISA transitions
# TT[i,j] is the transition type from i to j
# i = quadrant in period 0
# j = quadrant in period 1
# uses one offset so first row and col of TT are ignored
TT = np.zeros((5, 5), int)
c = 1
for i in range(1, 5):
for j in range(1, 5):
TT[i, j] = c
c += 1
# MOVE_TYPES is a dictionary that returns the move type of a LISA transition
# filtered on the significance of the LISA end points
# True indicates significant LISA in a particular period
# e.g. a key of (1, 3, True, False) indicates a significant LISA located in
# quadrant 1 in period 0 moved to quadrant 3 in period 1 but was not
# significant in quadrant 3.
MOVE_TYPES = {}
c = 1
cases = (True, False)
sig_keys = [(i, j) for i in cases for j in cases]
for i, sig_key in enumerate(sig_keys):
c = 1 + i * 16
for i in range(1, 5):
for j in range(1, 5):
key = (i, j, sig_key[0], sig_key[1])
MOVE_TYPES[key] = c
c += 1
class Markov(object):
"""
Classic Markov transition matrices.
Parameters
----------
class_ids : array
(n, t), one row per observation, one column recording the
state of each observation, with as many columns as time
periods.
classes : array
(k, 1), all different classes (bins) of the matrix.
Attributes
----------
p : array
(k, k), transition probability matrix.
steady_state : array
(k, ), ergodic distribution.
transitions : array
(k, k), count of transitions between each state i and j.
Examples
--------
>>> import numpy as np
>>> from giddy.markov import Markov
>>> c = [['b','a','c'],['c','c','a'],['c','b','c']]
>>> c.extend([['a','a','b'], ['a','b','c']])
>>> c = np.array(c)
>>> m = Markov(c)
>>> m.classes.tolist()
['a', 'b', 'c']
>>> m.p
array([[0.25 , 0.5 , 0.25 ],
[0.33333333, 0. , 0.66666667],
[0.33333333, 0.33333333, 0.33333333]])
>>> m.steady_state
array([0.30769231, 0.28846154, 0.40384615])
US nominal per capita income 48 states 81 years 1929-2009
>>> import libpysal
>>> import mapclassify as mc
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
set classes to quintiles for each year
>>> q5 = np.array([mc.Quantiles(y).yb for y in pci]).transpose()
>>> m = Markov(q5)
>>> m.transitions
array([[729., 71., 1., 0., 0.],
[ 72., 567., 80., 3., 0.],
[ 0., 81., 631., 86., 2.],
[ 0., 3., 86., 573., 56.],
[ 0., 0., 1., 57., 741.]])
>>> m.p
array([[0.91011236, 0.0886392 , 0.00124844, 0. , 0. ],
[0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ],
[0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ],
[0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443],
[0. , 0. , 0.00125156, 0.07133917, 0.92740926]])
>>> m.steady_state
array([0.20774716, 0.18725774, 0.20740537, 0.18821787, 0.20937187])
Relative incomes
>>> pci = pci.transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> rq = mc.Quantiles(rpci.flatten()).yb.reshape(pci.shape)
>>> mq = Markov(rq)
>>> mq.transitions
array([[707., 58., 7., 1., 0.],
[ 50., 629., 80., 1., 1.],
[ 4., 79., 610., 73., 2.],
[ 0., 7., 72., 650., 37.],
[ 0., 0., 0., 48., 724.]])
>>> mq.steady_state
array([0.17957376, 0.21631443, 0.21499942, 0.21134662, 0.17776576])
"""
def __init__(self, class_ids, classes=None):
if classes is not None:
self.classes = classes
else:
self.classes = np.unique(class_ids)
n, t = class_ids.shape
k = len(self.classes)
js = list(range(t - 1))
classIds = self.classes.tolist()
transitions = np.zeros((k, k))
for state_0 in js:
state_1 = state_0 + 1
state_0 = class_ids[:, state_0]
state_1 = class_ids[:, state_1]
initial = np.unique(state_0)
for i in initial:
ending = state_1[state_0 == i]
uending = np.unique(ending)
row = classIds.index(i)
for j in uending:
col = classIds.index(j)
transitions[row, col] += sum(ending == j)
self.transitions = transitions
row_sum = transitions.sum(axis=1)
self.p = np.dot(np.diag(1 / (row_sum + (row_sum == 0))), transitions)
@property
def steady_state(self):
if not hasattr(self, '_steady_state'):
self._steady_state = STEADY_STATE(self.p)
return self._steady_state
class Spatial_Markov(object):
"""
Markov transitions conditioned on the value of the spatial lag.
Parameters
----------
y : array
(n, t), one row per observation, one column per state of
each observation, with as many columns as time periods.
w : W
spatial weights object.
k : integer, optional
number of classes (quantiles) for input time series y.
Default is 4. If discrete=True, k is determined
endogenously.
m : integer, optional
number of classes (quantiles) for the spatial lags of
regional time series. Default is 4. If discrete=True,
m is determined endogenously.
permutations : int, optional
number of permutations for use in randomization based
inference (the default is 0).
fixed : bool, optional
If true, discretization are taken over the entire n*t
pooled series and cutoffs can be user-defined. If
cutoffs and lag_cutoffs are not given, quantiles are
used. If false, quantiles are taken each time period
over n. Default is True.
discrete : bool, optional
If true, categorical spatial lags which are most common
categories of neighboring observations serve as the
conditioning and fixed is ignored; if false, weighted
averages of neighboring observations are used. Default is
false.
cutoffs : array, optional
users can specify the discretization cutoffs for
continuous time series. Default is None, meaning that
quantiles will be used for the discretization.
lag_cutoffs : array, optional
users can specify the discretization cutoffs for the
spatial lags of continuous time series. Default is
None, meaning that quantiles will be used for the
discretization.
variable_name : string
name of variable.
Attributes
----------
class_ids : array
(n, t), discretized series if y is continuous. Otherwise
it is identical to y.
classes : array
(k, 1), all different classes (bins).
lclass_ids : array
(n, t), spatial lag series.
lclasses : array
(k, 1), all different classes (bins) for
spatial lags.
p : array
(k, k), transition probability matrix for a-spatial
Markov.
s : array
(k, 1), ergodic distribution for a-spatial Markov.
transitions : array
(k, k), counts of transitions between each state i and j
for a-spatial Markov.
T : array
(k, k, k), counts of transitions for each conditional
Markov. T[0] is the matrix of transitions for
observations with lags in the 0th quantile; T[k-1] is the
transitions for the observations with lags in the k-1th.
P : array
(k, k, k), transition probability matrix for spatial
Markov first dimension is the conditioned on the lag.
S : array
(k, k), steady state distributions for spatial Markov.
Each row is a conditional steady_state.
F : array
(k, k, k),first mean passage times.
First dimension is conditioned on the lag.
shtest : list
(k elements), each element of the list is a tuple for a
multinomial difference test between the steady state
distribution from a conditional distribution versus the
overall steady state distribution: first element of the
tuple is the chi2 value, second its p-value and the third
the degrees of freedom.
chi2 : list
(k elements), each element of the list is a tuple for a
chi-squared test of the difference between the
conditional transition matrix against the overall
transition matrix: first element of the tuple is the chi2
value, second its p-value and the third the degrees of
freedom.
x2 : float
sum of the chi2 values for each of the conditional tests.
Has an asymptotic chi2 distribution with k(k-1)(k-1)
degrees of freedom. Under the null that transition
probabilities are spatially homogeneous.
(see chi2 above)
x2_dof : int
degrees of freedom for homogeneity test.
x2_pvalue : float
pvalue for homogeneity test based on analytic.
distribution
x2_rpvalue : float
(if permutations>0)
pseudo p-value for x2 based on random spatial
permutations of the rows of the original transitions.
x2_realizations : array
(permutations,1), the values of x2 for the random
permutations.
Q : float
Chi-square test of homogeneity across lag classes based
on :cite:`Bickenbach2003`.
Q_p_value : float
p-value for Q.
LR : float
Likelihood ratio statistic for homogeneity across lag
classes based on :cite:`Bickenbach2003`.
LR_p_value : float
p-value for LR.
dof_hom : int
degrees of freedom for LR and Q, corrected for 0 cells.
Notes
-----
Based on :cite:`Rey2001`.
The shtest and chi2 tests should be used with caution as they are based on
classic theory assuming random transitions. The x2 based test is
preferable since it simulates the randomness under the null. It is an
experimental test requiring further analysis.
Examples
--------
>>> import libpysal
>>> from giddy.markov import Spatial_Markov
>>> import numpy as np
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
>>> pci = pci.transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> w.transform = 'r'
Now we create a `Spatial_Markov` instance for the continuous relative per
capita income time series for 48 US lower states 1929-2009. The current
implementation allows users to classify the continuous incomes in a more
flexible way.
(1) Global quintiles to discretize the income data (k=5), and global
quintiles to discretize the spatial lags of incomes (m=5).
>>> sm = Spatial_Markov(rpci, w, fixed=True, k=5, m=5, variable_name='rpci')
We can examine the cutoffs for the incomes and cutoffs for the spatial lags
>>> sm.cutoffs
array([0.83999133, 0.94707545, 1.03242697, 1.14911154])
>>> sm.lag_cutoffs
array([0.88973386, 0.95891917, 1.01469758, 1.1183566 ])
Obviously, they are slightly different.
We now look at the estimated spatially lag conditioned transition
probability matrices.
>>> for p in sm.P:
... print(p)
[[0.96341463 0.0304878 0.00609756 0. 0. ]
[0.06040268 0.83221477 0.10738255 0. 0. ]
[0. 0.14 0.74 0.12 0. ]
[0. 0.03571429 0.32142857 0.57142857 0.07142857]
[0. 0. 0. 0.16666667 0.83333333]]
[[0.79831933 0.16806723 0.03361345 0. 0. ]
[0.0754717 0.88207547 0.04245283 0. 0. ]
[0.00537634 0.06989247 0.8655914 0.05913978 0. ]
[0. 0. 0.06372549 0.90196078 0.03431373]
[0. 0. 0. 0.19444444 0.80555556]]
[[0.84693878 0.15306122 0. 0. 0. ]
[0.08133971 0.78947368 0.1291866 0. 0. ]
[0.00518135 0.0984456 0.79274611 0.0984456 0.00518135]
[0. 0. 0.09411765 0.87058824 0.03529412]
[0. 0. 0. 0.10204082 0.89795918]]
[[0.8852459 0.09836066 0. 0.01639344 0. ]
[0.03875969 0.81395349 0.13953488 0. 0.00775194]
[0.0049505 0.09405941 0.77722772 0.11881188 0.0049505 ]
[0. 0.02339181 0.12865497 0.75438596 0.09356725]
[0. 0. 0. 0.09661836 0.90338164]]
[[0.33333333 0.66666667 0. 0. 0. ]
[0.0483871 0.77419355 0.16129032 0.01612903 0. ]
[0.01149425 0.16091954 0.74712644 0.08045977 0. ]
[0. 0.01036269 0.06217617 0.89637306 0.03108808]
[0. 0. 0. 0.02352941 0.97647059]]
The probability of a poor state remaining poor is 0.963 if their
neighbors are in the 1st quintile and 0.798 if their neighbors are
in the 2nd quintile. The probability of a rich economy remaining
rich is 0.976 if their neighbors are in the 5th quintile, but if their
neighbors are in the 4th quintile this drops to 0.903.
The global transition probability matrix is estimated:
>>> print(sm.p)
[[0.91461837 0.07503234 0.00905563 0.00129366 0. ]
[0.06570302 0.82654402 0.10512484 0.00131406 0.00131406]
[0.00520833 0.10286458 0.79427083 0.09505208 0.00260417]
[0. 0.00913838 0.09399478 0.84856397 0.04830287]
[0. 0. 0. 0.06217617 0.93782383]]
The Q and likelihood ratio statistics are both significant indicating
the dynamics are not homogeneous across the lag classes:
>>> "%.3f"%sm.LR
'170.659'
>>> "%.3f"%sm.Q
'200.624'
>>> "%.3f"%sm.LR_p_value
'0.000'
>>> "%.3f"%sm.Q_p_value
'0.000'
>>> sm.dof_hom
60
The long run distribution for states with poor (rich) neighbors has
0.435 (0.018) of the values in the first quintile, 0.263 (0.200) in
the second quintile, 0.204 (0.190) in the third, 0.0684 (0.255) in the
fourth and 0.029 (0.337) in the fifth quintile.
>>> sm.S
array([[0.43509425, 0.2635327 , 0.20363044, 0.06841983, 0.02932278],
[0.13391287, 0.33993305, 0.25153036, 0.23343016, 0.04119356],
[0.12124869, 0.21137444, 0.2635101 , 0.29013417, 0.1137326 ],
[0.0776413 , 0.19748806, 0.25352636, 0.22480415, 0.24654013],
[0.01776781, 0.19964349, 0.19009833, 0.25524697, 0.3372434 ]])
States with incomes in the first quintile with neighbors in the
first quintile return to the first quartile after 2.298 years, after
leaving the first quintile. They enter the fourth quintile after
80.810 years after leaving the first quintile, on average.
Poor states within neighbors in the fourth quintile return to the
first quintile, on average, after 12.88 years, and would enter the
fourth quintile after 28.473 years.
>>> for f in sm.F:
... print(f)
...
[[ 2.29835259 28.95614035 46.14285714 80.80952381 279.42857143]
[ 33.86549708 3.79459555 22.57142857 57.23809524 255.85714286]
[ 43.60233918 9.73684211 4.91085714 34.66666667 233.28571429]
[ 46.62865497 12.76315789 6.25714286 14.61564626 198.61904762]
[ 52.62865497 18.76315789 12.25714286 6. 34.1031746 ]]
[[ 7.46754205 9.70574606 25.76785714 74.53116883 194.23446197]
[ 27.76691978 2.94175577 24.97142857 73.73474026 193.4380334 ]
[ 53.57477715 28.48447637 3.97566318 48.76331169 168.46660482]
[ 72.03631562 46.94601483 18.46153846 4.28393653 119.70329314]
[ 77.17917276 52.08887197 23.6043956 5.14285714 24.27564033]]
[[ 8.24751154 6.53333333 18.38765432 40.70864198 112.76732026]
[ 47.35040872 4.73094099 11.85432099 34.17530864 106.23398693]
[ 69.42288828 24.76666667 3.794921 22.32098765 94.37966594]
[ 83.72288828 39.06666667 14.3 3.44668119 76.36702977]
[ 93.52288828 48.86666667 24.1 9.8 8.79255406]]
[[ 12.87974382 13.34847151 19.83446328 28.47257282 55.82395142]
[ 99.46114206 5.06359731 10.54545198 23.05133495 49.68944423]
[117.76777159 23.03735526 3.94436301 15.0843986 43.57927247]
[127.89752089 32.4393006 14.56853107 4.44831643 31.63099455]
[138.24752089 42.7893006 24.91853107 10.35 4.05613474]]
[[ 56.2815534 1.5 10.57236842 27.02173913 110.54347826]
[ 82.9223301 5.00892857 9.07236842 25.52173913 109.04347826]
[ 97.17718447 19.53125 5.26043557 21.42391304 104.94565217]
[127.1407767 48.74107143 33.29605263 3.91777427 83.52173913]
[169.6407767 91.24107143 75.79605263 42.5 2.96521739]]
(2) Global quintiles to discretize the income data (k=5), and global
quartiles to discretize the spatial lags of incomes (m=4).
>>> sm = Spatial_Markov(rpci, w, fixed=True, k=5, m=4, variable_name='rpci')
We can also examine the cutoffs for the incomes and cutoffs for the spatial
lags:
>>> sm.cutoffs
array([0.83999133, 0.94707545, 1.03242697, 1.14911154])
>>> sm.lag_cutoffs
array([0.91440247, 0.98583079, 1.08698351])
We now look at the estimated spatially lag conditioned transition
probability matrices.
>>> for p in sm.P:
... print(p)
[[0.95708955 0.03544776 0.00746269 0. 0. ]
[0.05825243 0.83980583 0.10194175 0. 0. ]
[0. 0.1294964 0.76258993 0.10791367 0. ]
[0. 0.01538462 0.18461538 0.72307692 0.07692308]
[0. 0. 0. 0.14285714 0.85714286]]
[[0.7421875 0.234375 0.0234375 0. 0. ]
[0.08550186 0.85130112 0.06319703 0. 0. ]
[0.00865801 0.06926407 0.86147186 0.05627706 0.004329 ]
[0. 0. 0.05363985 0.92337165 0.02298851]
[0. 0. 0. 0.13432836 0.86567164]]
[[0.95145631 0.04854369 0. 0. 0. ]
[0.06 0.79 0.145 0. 0.005 ]
[0.00358423 0.10394265 0.7921147 0.09677419 0.00358423]
[0. 0.01630435 0.13586957 0.75543478 0.0923913 ]
[0. 0. 0. 0.10204082 0.89795918]]
[[0.16666667 0.66666667 0. 0.16666667 0. ]
[0.03488372 0.80232558 0.15116279 0.01162791 0. ]
[0.00840336 0.13445378 0.70588235 0.1512605 0. ]
[0. 0.01171875 0.08203125 0.87109375 0.03515625]
[0. 0. 0. 0.03434343 0.96565657]]
We now obtain 4 5*5 spatial lag conditioned transition probability
matrices instead of 5 as in case (1).
The Q and likelihood ratio statistics are still both significant.
>>> "%.3f"%sm.LR
'172.105'
>>> "%.3f"%sm.Q
'321.128'
>>> "%.3f"%sm.LR_p_value
'0.000'
>>> "%.3f"%sm.Q_p_value
'0.000'
>>> sm.dof_hom
45
(3) We can also set the cutoffs for relative incomes and their
spatial lags manually.
For example, we want the defining cutoffs to be [0.8, 0.9, 1, 1.2],
meaning that relative incomes:
2.1 smaller than 0.8 : class 0
2.2 between 0.8 and 0.9: class 1
2.3 between 0.9 and 1.0 : class 2
2.4 between 1.0 and 1.2: class 3
2.5 larger than 1.2: class 4
>>> cc = np.array([0.8, 0.9, 1, 1.2])
>>> sm = Spatial_Markov(rpci, w, cutoffs=cc, lag_cutoffs=cc, variable_name='rpci')
>>> sm.cutoffs
array([0.8, 0.9, 1. , 1.2])
>>> sm.k
5
>>> sm.lag_cutoffs
array([0.8, 0.9, 1. , 1.2])
>>> sm.m
5
>>> for p in sm.P:
... print(p)
[[0.96703297 0.03296703 0. 0. 0. ]
[0.10638298 0.68085106 0.21276596 0. 0. ]
[0. 0.14285714 0.7755102 0.08163265 0. ]
[0. 0. 0.5 0.5 0. ]
[0. 0. 0. 0. 0. ]]
[[0.88636364 0.10606061 0.00757576 0. 0. ]
[0.04402516 0.89308176 0.06289308 0. 0. ]
[0. 0.05882353 0.8627451 0.07843137 0. ]
[0. 0. 0.13846154 0.86153846 0. ]
[0. 0. 0. 0. 1. ]]
[[0.78082192 0.17808219 0.02739726 0.01369863 0. ]
[0.03488372 0.90406977 0.05813953 0.00290698 0. ]
[0. 0.05919003 0.84735202 0.09034268 0.00311526]
[0. 0. 0.05811623 0.92985972 0.01202405]
[0. 0. 0. 0.14285714 0.85714286]]
[[0.82692308 0.15384615 0. 0.01923077 0. ]
[0.0703125 0.7890625 0.125 0.015625 0. ]
[0.00295858 0.06213018 0.82248521 0.10946746 0.00295858]
[0. 0.00185529 0.07606679 0.88497217 0.03710575]
[0. 0. 0. 0.07803468 0.92196532]]
[[0. 0. 0. 0. 0. ]
[0. 0. 0. 0. 0. ]
[0. 0.06666667 0.9 0.03333333 0. ]
[0. 0. 0.05660377 0.90566038 0.03773585]
[0. 0. 0. 0.03932584 0.96067416]]
(4) Spatial_Markov also accept discrete time series and calculate
categorical spatial lags on which several transition probability matrices
are conditioned.
Let's still use the US state income time series to demonstrate. We first
discretize them into categories and then pass them to Spatial_Markov.
>>> import mapclassify as mc
>>> y = mc.Quantiles(rpci.flatten(), k=5).yb.reshape(rpci.shape)
>>> np.random.seed(5)
>>> sm = Spatial_Markov(y, w, discrete=True, variable_name='discretized rpci')
>>> sm.k
5
>>> sm.m
5
>>> for p in sm.P:
... print(p)
[[0.94787645 0.04440154 0.00772201 0. 0. ]
[0.08333333 0.81060606 0.10606061 0. 0. ]
[0. 0.12765957 0.79787234 0.07446809 0. ]
[0. 0.02777778 0.22222222 0.66666667 0.08333333]
[0. 0. 0. 0.33333333 0.66666667]]
[[0.888 0.096 0.016 0. 0. ]
[0.06049822 0.84341637 0.09608541 0. 0. ]
[0.00666667 0.10666667 0.81333333 0.07333333 0. ]
[0. 0. 0.08527132 0.86821705 0.04651163]
[0. 0. 0. 0.10204082 0.89795918]]
[[0.65217391 0.32608696 0.02173913 0. 0. ]
[0.07446809 0.80851064 0.11170213 0. 0.00531915]
[0.01071429 0.1 0.76428571 0.11785714 0.00714286]
[0. 0.00552486 0.09392265 0.86187845 0.03867403]
[0. 0. 0. 0.13157895 0.86842105]]
[[0.91935484 0.06451613 0. 0.01612903 0. ]
[0.06796117 0.90291262 0.02912621 0. 0. ]
[0. 0.05755396 0.87769784 0.0647482 0. ]
[0. 0.02150538 0.10752688 0.80107527 0.06989247]
[0. 0. 0. 0.08064516 0.91935484]]
[[0.81818182 0.18181818 0. 0. 0. ]
[0.01754386 0.70175439 0.26315789 0.01754386 0. ]
[0. 0.14285714 0.73333333 0.12380952 0. ]
[0. 0.0042735 0.06837607 0.89316239 0.03418803]
[0. 0. 0. 0.03891051 0.96108949]]
"""
def __init__(self, y, w, k=4, m=4, permutations=0, fixed=True,
discrete=False, cutoffs=None, lag_cutoffs=None,
variable_name=None):
y = np.asarray(y)
self.fixed = fixed
self.discrete = discrete
self.cutoffs = cutoffs
self.m = m
self.lag_cutoffs = lag_cutoffs
self.variable_name = variable_name
if discrete:
merged = list(itertools.chain.from_iterable(y))
classes = np.unique(merged)
self.classes = classes
self.k = len(classes)
self.m = self.k
label_dict = dict(zip(classes, range(self.k)))
y_int = []
for yi in y:
y_int.append(list(map(label_dict.get, yi)))
self.class_ids = np.array(y_int)
self.lclass_ids = self.class_ids
else:
self.class_ids, self.cutoffs, self.k = self._maybe_classify(
y, k=k, cutoffs=self.cutoffs)
self.classes = np.arange(self.k)
classic = Markov(self.class_ids)
self.p = classic.p
self.transitions = classic.transitions
self.T, self.P = self._calc(y, w)
if permutations:
nrp = np.random.permutation
counter = 0
x2_realizations = np.zeros((permutations, 1))
for perm in range(permutations):
T, P = self._calc(nrp(y), w)
x2 = [chi2(T[i], self.transitions)[0] for i in range(self.k)]
x2s = sum(x2)
x2_realizations[perm] = x2s
if x2s >= self.x2:
counter += 1
self.x2_rpvalue = (counter + 1.0) / (permutations + 1.)
self.x2_realizations = x2_realizations
@property
def s(self):
if not hasattr(self, '_s'):
self._s = STEADY_STATE(self.p)
return self._s
@property
def S(self):
if not hasattr(self, '_S'):
S = np.zeros_like(self.p)
for i, p in enumerate(self.P):
S[i] = STEADY_STATE(p)
self._S = np.asarray(S)
return self._S
@property
def F(self):
if not hasattr(self, '_F'):
F = np.zeros_like(self.P)
for i, p in enumerate(self.P):
F[i] = fmpt(np.asmatrix(p))
self._F = np.asarray(F)
return self._F
# bickenbach and bode tests
@property
def ht(self):
if not hasattr(self, '_ht'):
self._ht = homogeneity(self.T)
return self._ht
@property
def Q(self):
if not hasattr(self, '_Q'):
self._Q = self.ht.Q
return self._Q
@property
def Q_p_value(self):
self._Q_p_value = self.ht.Q_p_value
return self._Q_p_value
@property
def LR(self):
self._LR = self.ht.LR
return self._LR
@property
def LR_p_value(self):
self._LR_p_value = self.ht.LR_p_value
return self._LR_p_value
@property
def dof_hom(self):
self._dof_hom = self.ht.dof
return self._dof_hom
# shtests
@property
def shtest(self):
if not hasattr(self, '_shtest'):
self._shtest = self._mn_test()
return self._shtest
@property
def chi2(self):
if not hasattr(self, '_chi2'):
self._chi2 = self._chi2_test()
return self._chi2
@property
def x2(self):
if not hasattr(self, '_x2'):
self._x2 = sum([c[0] for c in self.chi2])
return self._x2
@property
def x2_pvalue(self):
if not hasattr(self, '_x2_pvalue'):
self._x2_pvalue = 1 - stats.chi2.cdf(self.x2, self.x2_dof)
return self._x2_pvalue
@property
def x2_dof(self):
if not hasattr(self, '_x2_dof'):
k = self.k
self._x2_dof = k * (k - 1) * (k - 1)
return self._x2_dof
def _calc(self, y, w):
'''Helper to estimate spatial lag conditioned Markov transition
probability matrices based on maximum likelihood techniques.
'''
if self.discrete:
self.lclass_ids = weights.lag_categorical(w, self.class_ids,
ties="tryself")
else:
ly = weights.lag_spatial(w, y)
self.lclass_ids, self.lag_cutoffs, self.m = self._maybe_classify(
ly, self.m, self.lag_cutoffs)
self.lclasses = np.arange(self.m)
T = np.zeros((self.m, self.k, self.k))
n, t = y.shape
for t1 in range(t - 1):
t2 = t1 + 1
for i in range(n):
T[self.lclass_ids[i, t1], self.class_ids[i, t1],
self.class_ids[i, t2]] += 1
P = np.zeros_like(T)
for i, mat in enumerate(T):
row_sum = mat.sum(axis=1)
row_sum = row_sum + (row_sum == 0)
p_i = np.matrix(np.diag(1. / row_sum) * np.matrix(mat))
P[i] = p_i
return T, P
def _mn_test(self):
"""
helper to calculate tests of differences between steady state
distributions from the conditional and overall distributions.
"""
n0, n1, n2 = self.T.shape
rn = list(range(n0))
mat = [self._ssmnp_test(
self.s, self.S[i], self.T[i].sum()) for i in rn]
return mat
def _ssmnp_test(self, p1, p2, nt):
"""
Steady state multinomial probability difference test.
Arguments
---------
p1 : array
(k, ), first steady state probability distribution.
p1 : array
(k, ), second steady state probability distribution.
nt : int
number of transitions to base the test on.
Returns
-------
tuple
(3 elements)
(chi2 value, pvalue, degrees of freedom)
"""
o = nt * p2
e = nt * p1
d = np.multiply((o - e), (o - e))
d = d / e
chi2 = d.sum()
pvalue = 1 - stats.chi2.cdf(chi2, self.k - 1)
return (chi2, pvalue, self.k - 1)
def _chi2_test(self):
"""
helper to calculate tests of differences between the conditional
transition matrices and the overall transitions matrix.
"""
n0, n1, n2 = self.T.shape
rn = list(range(n0))
mat = [chi2(self.T[i], self.transitions) for i in rn]
return mat
def summary(self, file_name=None):
"""
A summary method to call the Markov homogeneity test to test for
temporally lagged spatial dependence.
To learn more about the properties of the tests, refer to
:cite:`Rey2016a` and :cite:`Kang2018`.
"""
class_names = ["C%d" % i for i in range(self.k)]
regime_names = ["LAG%d" % i for i in range(self.k)]
ht = homogeneity(self.T, class_names=class_names,
regime_names=regime_names)
title = "Spatial Markov Test"
if self.variable_name:
title = title + ": " + self.variable_name
if file_name:
ht.summary(file_name=file_name, title=title)
else:
ht.summary(title=title)
def _maybe_classify(self, y, k, cutoffs):
'''Helper method for classifying continuous data.
'''
rows, cols = y.shape
if cutoffs is None:
if self.fixed:
mcyb = mc.Quantiles(y.flatten(), k=k)
yb = mcyb.yb.reshape(y.shape)
cutoffs = mcyb.bins
k = len(cutoffs)
return yb, cutoffs[:-1], k
else:
yb = np.array([mc.Quantiles(y[:, i], k=k).yb for i in
np.arange(cols)]).transpose()
return yb, None, k
else:
cutoffs = list(cutoffs) + [np.inf]
cutoffs = np.array(cutoffs)
yb = mc.User_Defined(y.flatten(), np.array(cutoffs)).yb.reshape(
y.shape)
k = len(cutoffs)
return yb, cutoffs[:-1], k
def chi2(T1, T2):
"""
chi-squared test of difference between two transition matrices.
Parameters
----------
T1 : array
(k, k), matrix of transitions (counts).
T2 : array
(k, k), matrix of transitions (counts) to use to form the
probabilities under the null.
Returns
-------
: tuple
(3 elements).
(chi2 value, pvalue, degrees of freedom).
Examples
--------
>>> import libpysal
>>> from giddy.markov import Spatial_Markov, chi2
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> w.transform='r'
>>> sm = Spatial_Markov(rpci, w, fixed=True)
>>> T1 = sm.T[0]
>>> T1
array([[562., 22., 1., 0.],
[ 12., 201., 22., 0.],
[ 0., 17., 97., 4.],
[ 0., 0., 3., 19.]])
>>> T2 = sm.transitions
>>> T2
array([[884., 77., 4., 0.],
[ 68., 794., 87., 3.],
[ 1., 92., 815., 51.],
[ 1., 0., 60., 903.]])
>>> chi2(T1,T2)
(23.39728441473295, 0.005363116704861337, 9)
Notes
-----
Second matrix is used to form the probabilities under the null.
Marginal sums from first matrix are distributed across these probabilities
under the null. In other words the observed transitions are taken from T1
while the expected transitions are formed as follows
.. math::
E_{i,j} = \sum_j T1_{i,j} * T2_{i,j}/\sum_j T2_{i,j}
Degrees of freedom corrected for any rows in either T1 or T2 that have
zero total transitions.
"""
rs2 = T2.sum(axis=1)
rs1 = T1.sum(axis=1)
rs2nz = rs2 > 0
rs1nz = rs1 > 0
dof1 = sum(rs1nz)
dof2 = sum(rs2nz)
rs2 = rs2 + (rs2 == 0)
dof = (dof1 - 1) * (dof2 - 1)
p = np.diag(1 / rs2) * np.matrix(T2)
E = np.diag(rs1) * np.matrix(p)
num = T1 - E
num = np.multiply(num, num)
E = E + (E == 0)
chi2 = num / E
chi2 = chi2.sum()
pvalue = 1 - stats.chi2.cdf(chi2, dof)
return chi2, pvalue, dof
class LISA_Markov(Markov):
"""
Markov for Local Indicators of Spatial Association
Parameters
----------
y : array
(n, t), n cross-sectional units observed over t time
periods.
w : W
spatial weights object.
permutations : int, optional
number of permutations used to determine LISA
significance (the default is 0).
significance_level : float, optional
significance level (two-sided) for filtering
significant LISA endpoints in a transition (the
default is 0.05).
geoda_quads : bool
If True use GeoDa scheme: HH=1, LL=2, LH=3, HL=4.
If False use PySAL Scheme: HH=1, LH=2, LL=3, HL=4.
(the default is False).
Attributes
----------
chi_2 : tuple
(3 elements)
(chi square test statistic, p-value, degrees of freedom) for
test that dynamics of y are independent of dynamics of wy.
classes : array
(4, 1)
1=HH, 2=LH, 3=LL, 4=HL (own, lag)
1=HH, 2=LL, 3=LH, 4=HL (own, lag) (if geoda_quads=True)
expected_t : array
(4, 4), expected number of transitions under the null that
dynamics of y are independent of dynamics of wy.
move_types : matrix
(n, t-1), integer values indicating which type of LISA
transition occurred (q1 is quadrant in period 1, q2 is
quadrant in period 2).
.. table:: Move Types
== == =========
q1 q2 move_type
== == =========
1 1 1
1 2 2
1 3 3
1 4 4
2 1 5
2 2 6
2 3 7
2 4 8
3 1 9
3 2 10
3 3 11
3 4 12
4 1 13
4 2 14
4 3 15
4 4 16
== == =========
p : array
(k, k), transition probability matrix.
p_values : matrix
(n, t), LISA p-values for each end point (if permutations >
0).
significant_moves : matrix
(n, t-1), integer values indicating the type and
significance of a LISA transition. st = 1 if
significant in period t, else st=0 (if permutations >
0).
.. Table:: Significant Moves1
=============== ===================
(s1,s2) move_type
=============== ===================
(1,1) [1, 16]
(1,0) [17, 32]
(0,1) [33, 48]
(0,0) [49, 64]
=============== ===================
.. Table:: Significant Moves2
== == == == =========
q1 q2 s1 s2 move_type
== == == == =========
1 1 1 1 1
1 2 1 1 2
1 3 1 1 3
1 4 1 1 4
2 1 1 1 5
2 2 1 1 6
2 3 1 1 7
2 4 1 1 8
3 1 1 1 9
3 2 1 1 10
3 3 1 1 11
3 4 1 1 12
4 1 1 1 13
4 2 1 1 14
4 3 1 1 15
4 4 1 1 16
1 1 1 0 17
1 2 1 0 18
. . . . .
. . . . .
4 3 1 0 31
4 4 1 0 32
1 1 0 1 33
1 2 0 1 34
. . . . .
. . . . .
4 3 0 1 47
4 4 0 1 48
1 1 0 0 49
1 2 0 0 50
. . . . .
. . . . .
4 3 0 0 63
4 4 0 0 64
== == == == =========
steady_state : array
(k, ), ergodic distribution.
transitions : array
(4, 4), count of transitions between each state i and j.
spillover : array
(n, 1) binary array, locations that were not part of a
cluster in period 1 but joined a prexisting cluster in
period 2.
Examples
--------
>>> import libpysal
>>> import numpy as np
>>> from giddy.markov import LISA_Markov
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> lm = LISA_Markov(pci,w)
>>> lm.classes
array([1, 2, 3, 4])
>>> lm.steady_state
array([0.28561505, 0.14190226, 0.40493672, 0.16754598])
>>> lm.transitions
array([[1.087e+03, 4.400e+01, 4.000e+00, 3.400e+01],
[4.100e+01, 4.700e+02, 3.600e+01, 1.000e+00],
[5.000e+00, 3.400e+01, 1.422e+03, 3.900e+01],
[3.000e+01, 1.000e+00, 4.000e+01, 5.520e+02]])
>>> lm.p
array([[0.92985458, 0.03763901, 0.00342173, 0.02908469],
[0.07481752, 0.85766423, 0.06569343, 0.00182482],
[0.00333333, 0.02266667, 0.948 , 0.026 ],
[0.04815409, 0.00160514, 0.06420546, 0.88603531]])
>>> lm.move_types[0,:3]
array([11, 11, 11])
>>> lm.move_types[0,-3:]
array([11, 11, 11])
Now consider only moves with one, or both, of the LISA end points being
significant
>>> np.random.seed(10)
>>> lm_random = LISA_Markov(pci, w, permutations=99)
>>> lm_random.significant_moves[0, :3]
array([11, 11, 11])
>>> lm_random.significant_moves[0,-3:]
array([59, 43, 27])
Any value less than 49 indicates at least one of the LISA end points was
significant. So for example, the first spatial unit experienced a
transition of type 11 (LL, LL) during the first three and last tree
intervals (according to lm.move_types), however, the last three of these
transitions involved insignificant LISAS in both the start and ending year
of each transition.
Test whether the moves of y are independent of the moves of wy
>>> "Chi2: %8.3f, p: %5.2f, dof: %d" % lm.chi_2
'Chi2: 1058.208, p: 0.00, dof: 9'
Actual transitions of LISAs
>>> lm.transitions
array([[1.087e+03, 4.400e+01, 4.000e+00, 3.400e+01],
[4.100e+01, 4.700e+02, 3.600e+01, 1.000e+00],
[5.000e+00, 3.400e+01, 1.422e+03, 3.900e+01],
[3.000e+01, 1.000e+00, 4.000e+01, 5.520e+02]])
Expected transitions of LISAs under the null y and wy are moving
independently of one another
>>> lm.expected_t
array([[1.12328098e+03, 1.15377356e+01, 3.47522158e-01, 3.38337644e+01],
[3.50272664e+00, 5.28473882e+02, 1.59178880e+01, 1.05503814e-01],
[1.53878082e-01, 2.32163556e+01, 1.46690710e+03, 9.72266513e+00],
[9.60775143e+00, 9.86856346e-02, 6.23537392e+00, 6.07058189e+02]])
If the LISA classes are to be defined according to GeoDa, the `geoda_quad`
option has to be set to true
>>> lm.q[0:5,0]
array([3, 2, 3, 1, 4])
>>> lm = LISA_Markov(pci,w, geoda_quads=True)
>>> lm.q[0:5,0]
array([2, 3, 2, 1, 4])
"""
def __init__(self, y, w, permutations=0,
significance_level=0.05, geoda_quads=False):
y = y.transpose()
pml = Moran_Local
gq = geoda_quads
ml = ([pml(yi, w, permutations=permutations, geoda_quads=gq)
for yi in y])
q = np.array([mli.q for mli in ml]).transpose()
classes = np.arange(1, 5) # no guarantee all 4 quadrants are visited
Markov.__init__(self, q, classes)
self.q = q
self.w = w
n, k = q.shape
k -= 1
self.significance_level = significance_level
move_types = np.zeros((n, k), int)
sm = np.zeros((n, k), int)
self.significance_level = significance_level
if permutations > 0:
p = np.array([mli.p_z_sim for mli in ml]).transpose()
self.p_values = p
pb = p <= significance_level
else:
pb = np.zeros_like(y.T)
for t in range(k):
origin = q[:, t]
dest = q[:, t + 1]
p_origin = pb[:, t]
p_dest = pb[:, t + 1]
for r in range(n):
move_types[r, t] = TT[origin[r], dest[r]]
key = (origin[r], dest[r], p_origin[r], p_dest[r])
sm[r, t] = MOVE_TYPES[key]
if permutations > 0:
self.significant_moves = sm
self.move_types = move_types
# null of own and lag moves being independent
ybar = y.mean(axis=0)
r = y / ybar
ylag = np.array([weights.lag_spatial(w, yt) for yt in y])
rlag = ylag / ybar
rc = r < 1.
rlagc = rlag < 1.
markov_y = Markov(rc)
markov_ylag = Markov(rlagc)
A = np.matrix([[1, 0, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0]])
kp = A * np.kron(markov_y.p, markov_ylag.p) * A.T
trans = self.transitions.sum(axis=1)
t1 = np.diag(trans) * kp
t2 = self.transitions
t1 = t1.getA()
self.chi_2 = chi2(t2, t1)
self.expected_t = t1
self.permutations = permutations
def spillover(self, quadrant=1, neighbors_on=False):
"""
Detect spillover locations for diffusion in LISA Markov.
Parameters
----------
quadrant : int
which quadrant in the scatterplot should form the core
of a cluster.
neighbors_on : binary
If false, then only the 1st order neighbors of a core
location are included in the cluster.
If true, neighbors of cluster core 1st order neighbors
are included in the cluster.
Returns
-------
results : dictionary
two keys - values pairs:
'components' - array (n, t)
values are integer ids (starting at 1) indicating which
component/cluster observation i in period t belonged to.
'spillover' - array (n, t-1)
binary values indicating if the location was a
spill-over location that became a new member of a
previously existing cluster.
Examples
--------
>>> import libpysal
>>> from giddy.markov import LISA_Markov
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> np.random.seed(10)
>>> lm_random = LISA_Markov(pci, w, permutations=99)
>>> r = lm_random.spillover()
>>> (r['components'][:, 12] > 0).sum()
17
>>> (r['components'][:, 13]>0).sum()
23
>>> (r['spill_over'][:,12]>0).sum()
6
Including neighbors of core neighbors
>>> rn = lm_random.spillover(neighbors_on=True)
>>> (rn['components'][:, 12] > 0).sum()
26
>>> (rn["components"][:, 13] > 0).sum()
34
>>> (rn["spill_over"][:, 12] > 0).sum()
8
"""
n, k = self.q.shape
if self.permutations:
spill_over = np.zeros((n, k - 1))
components = np.zeros((n, k))
i2id = {} # handle string keys
for key in list(self.w.neighbors.keys()):
idx = self.w.id2i[key]
i2id[idx] = key
sig_lisas = (self.q == quadrant) \
* (self.p_values <= self.significance_level)
sig_ids = [np.nonzero(
sig_lisas[:, i])[0].tolist() for i in range(k)]
neighbors = self.w.neighbors
for t in range(k - 1):
s1 = sig_ids[t]
s2 = sig_ids[t + 1]
g1 = Graph(undirected=True)
for i in s1:
for neighbor in neighbors[i2id[i]]:
g1.add_edge(i2id[i], neighbor, 1.0)
if neighbors_on:
for nn in neighbors[neighbor]:
g1.add_edge(neighbor, nn, 1.0)
components1 = g1.connected_components(op=gt)
components1 = [list(c.nodes) for c in components1]
g2 = Graph(undirected=True)
for i in s2:
for neighbor in neighbors[i2id[i]]:
g2.add_edge(i2id[i], neighbor, 1.0)
if neighbors_on:
for nn in neighbors[neighbor]:
g2.add_edge(neighbor, nn, 1.0)
components2 = g2.connected_components(op=gt)
components2 = [list(c.nodes) for c in components2]
c2 = []
c1 = []
for c in components2:
c2.extend(c)
for c in components1:
c1.extend(c)
new_ids = [j for j in c2 if j not in c1]
spill_ids = []
for j in new_ids:
# find j's component in period 2
cj = [c for c in components2 if j in c][0]
# for members of j's component in period 2, check if they
# belonged to any components in period 1
for i in cj:
if i in c1:
spill_ids.append(j)
break
for spill_id in spill_ids:
id = self.w.id2i[spill_id]
spill_over[id, t] = 1
for c, component in enumerate(components1):
for i in component:
ii = self.w.id2i[i]
components[ii, t] = c + 1
results = {}
results['components'] = components
results['spill_over'] = spill_over
return results
else:
return None
def prais(pmat):
"""
Prais conditional mobility measure.
Parameters
----------
pmat : matrix
(k, k), Markov probability transition matrix.
Returns
-------
pr : matrix
(1, k), conditional mobility measures for each of the k classes.
Notes
-----
Prais' conditional mobility measure for a class is defined as:
.. math::
pr_i = 1 - p_{i,i}
Examples
--------
>>> import numpy as np
>>> import libpysal
>>> from giddy.markov import Markov,prais
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
>>> q5 = np.array([mc.Quantiles(y).yb for y in pci]).transpose()
>>> m = Markov(q5)
>>> m.transitions
array([[729., 71., 1., 0., 0.],
[ 72., 567., 80., 3., 0.],
[ 0., 81., 631., 86., 2.],
[ 0., 3., 86., 573., 56.],
[ 0., 0., 1., 57., 741.]])
>>> m.p
array([[0.91011236, 0.0886392 , 0.00124844, 0. , 0. ],
[0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ],
[0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ],
[0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443],
[0. , 0. , 0.00125156, 0.07133917, 0.92740926]])
>>> prais(m.p)
array([0.08988764, 0.21468144, 0.21125 , 0.20194986, 0.07259074])
"""
pmat = np.array(pmat)
pr = 1 - np.diag(pmat)
return pr
def homogeneity(transition_matrices, regime_names=[], class_names=[],
title="Markov Homogeneity Test"):
"""
Test for homogeneity of Markov transition probabilities across regimes.
Parameters
----------
transition_matrices : list
of transition matrices for regimes, all matrices must
have same size (r, c). r is the number of rows in the
transition matrix and c is the number of columns in
the transition matrix.
regime_names : sequence
Labels for the regimes.
class_names : sequence
Labels for the classes/states of the Markov chain.
title : string
name of test.
Returns
-------
: implicit
an instance of Homogeneity_Results.
"""
return Homogeneity_Results(transition_matrices, regime_names=regime_names,
class_names=class_names, title=title)
class Homogeneity_Results:
"""
Wrapper class to present homogeneity results.
Parameters
----------
transition_matrices : list
of transition matrices for regimes, all matrices must
have same size (r, c). r is the number of rows in
the transition matrix and c is the number of columns
in the transition matrix.
regime_names : sequence
Labels for the regimes.
class_names : sequence
Labels for the classes/states of the Markov chain.
title : string
Title of the table.
Attributes
-----------
Notes
-----
Degrees of freedom adjustment follow the approach in :cite:`Bickenbach2003`.
Examples
--------
See Spatial_Markov above.
"""
def __init__(self, transition_matrices, regime_names=[], class_names=[],
title="Markov Homogeneity Test"):
self._homogeneity(transition_matrices)
self.regime_names = regime_names
self.class_names = class_names
self.title = title
def _homogeneity(self, transition_matrices):
# form null transition probability matrix
M = np.array(transition_matrices)
m, r, k = M.shape
self.k = k
B = np.zeros((r, m))
T = M.sum(axis=0)
self.t_total = T.sum()
n_i = T.sum(axis=1)
A_i = (T > 0).sum(axis=1)
A_im = np.zeros((r, m))
p_ij = np.dot(np.diag(1. / (n_i + (n_i == 0) * 1.)), T)
den = p_ij + 1. * (p_ij == 0)
b_i = np.zeros_like(A_i)
p_ijm = np.zeros_like(M)
# get dimensions
m, n_rows, n_cols = M.shape
m = 0
Q = 0.0
LR = 0.0
lr_table = np.zeros_like(M)
q_table = np.zeros_like(M)
for nijm in M:
nim = nijm.sum(axis=1)
B[:, m] = 1. * (nim > 0)
b_i = b_i + 1. * (nim > 0)
p_ijm[m] = np.dot(np.diag(1. / (nim + (nim == 0) * 1.)), nijm)
num = (p_ijm[m] - p_ij)**2
ratio = num / den
qijm = np.dot(np.diag(nim), ratio)
q_table[m] = qijm
Q = Q + qijm.sum()
# only use nonzero pijm in lr test
mask = (nijm > 0) * (p_ij > 0)
A_im[:, m] = (nijm > 0).sum(axis=1)
unmask = 1.0 * (mask == 0)
ratio = (mask * p_ijm[m] + unmask) / (mask * p_ij + unmask)
lr = nijm * np.log(ratio)
LR = LR + lr.sum()
lr_table[m] = 2 * lr
m += 1
# b_i is the number of regimes that have non-zero observations in row i
# A_i is the number of non-zero elements in row i of the aggregated
# transition matrix
self.dof = int(((b_i - 1) * (A_i - 1)).sum())
self.Q = Q
self.Q_p_value = 1 - stats.chi2.cdf(self.Q, self.dof)
self.LR = LR * 2.
self.LR_p_value = 1 - stats.chi2.cdf(self.LR, self.dof)
self.A = A_i
self.A_im = A_im
self.B = B
self.b_i = b_i
self.LR_table = lr_table
self.Q_table = q_table
self.m = m
self.p_h0 = p_ij
self.p_h1 = p_ijm
def summary(self, file_name=None, title="Markov Homogeneity Test"):
regime_names = ["%d" % i for i in range(self.m)]
if self.regime_names:
regime_names = self.regime_names
cols = ["P(%s)" % str(regime) for regime in regime_names]
if not self.class_names:
self.class_names = list(range(self.k))
max_col = max([len(col) for col in cols])
col_width = max([5, max_col]) # probabilities have 5 chars
n_tabs = self.k
width = n_tabs * 4 + (self.k + 1) * col_width
lead = "-" * width
head = title.center(width)
contents = [lead, head, lead]
l = "Number of regimes: %d" % int(self.m)
k = "Number of classes: %d" % int(self.k)
r = "Regime names: "
r += ", ".join(regime_names)
t = "Number of transitions: %d" % int(self.t_total)
contents.append(k)
contents.append(t)
contents.append(l)
contents.append(r)
contents.append(lead)
h = "%7s %20s %20s" % ('Test', 'LR', 'Chi-2')
contents.append(h)
stat = "%7s %20.3f %20.3f" % ('Stat.', self.LR, self.Q)
contents.append(stat)
stat = "%7s %20d %20d" % ('DOF', self.dof, self.dof)
contents.append(stat)
stat = "%7s %20.3f %20.3f" % ('p-value', self.LR_p_value,
self.Q_p_value)
contents.append(stat)
print(("\n".join(contents)))
print(lead)
cols = ["P(%s)" % str(regime) for regime in self.regime_names]
if not self.class_names:
self.class_names = list(range(self.k))
cols.extend(["%s" % str(cname) for cname in self.class_names])
max_col = max([len(col) for col in cols])
col_width = max([5, max_col]) # probabilities have 5 chars
p0 = []
line0 = ['{s: <{w}}'.format(s="P(H0)", w=col_width)]
line0.extend((['{s: >{w}}'.format(s=cname, w=col_width) for cname in
self.class_names]))
print((" ".join(line0)))
p0.append("&".join(line0))
for i, row in enumerate(self.p_h0):
line = ["%*s" % (col_width, str(self.class_names[i]))]
line.extend(["%*.3f" % (col_width, v) for v in row])
print((" ".join(line)))
p0.append("&".join(line))
pmats = [p0]
print(lead)
for r, p1 in enumerate(self.p_h1):
p0 = []
line0 = ['{s: <{w}}'.format(s="P(%s)" %
regime_names[r], w=col_width)]
line0.extend((['{s: >{w}}'.format(s=cname, w=col_width) for cname
in self.class_names]))
print((" ".join(line0)))
p0.append("&".join(line0))
for i, row in enumerate(p1):
line = ["%*s" % (col_width, str(self.class_names[i]))]
line.extend(["%*.3f" % (col_width, v) for v in row])
print((" ".join(line)))
p0.append("&".join(line))
pmats.append(p0)
print(lead)
if file_name:
k = self.k
ks = str(k + 1)
with open(file_name, 'w') as f:
c = []
fmt = "r" * (k + 1)
s = "\\begin{tabular}{|%s|}\\hline\n" % fmt
s += "\\multicolumn{%s}{|c|}{%s}" % (ks, title)
c.append(s)
s = "Number of classes: %d" % int(self.k)
c.append("\\hline\\multicolumn{%s}{|l|}{%s}" % (ks, s))
s = "Number of transitions: %d" % int(self.t_total)
c.append("\\multicolumn{%s}{|l|}{%s}" % (ks, s))
s = "Number of regimes: %d" % int(self.m)
c.append("\\multicolumn{%s}{|l|}{%s}" % (ks, s))
s = "Regime names: "
s += ", ".join(regime_names)
c.append("\\multicolumn{%s}{|l|}{%s}" % (ks, s))
s = "\\hline\\multicolumn{2}{|l}{%s}" % ("Test")
s += "&\\multicolumn{2}{r}{LR}&\\multicolumn{2}{r|}{Q}"
c.append(s)
s = "Stat."
s = "\\multicolumn{2}{|l}{%s}" % (s)
s += "&\\multicolumn{2}{r}{%.3f}" % self.LR
s += "&\\multicolumn{2}{r|}{%.3f}" % self.Q
c.append(s)
s = "\\multicolumn{2}{|l}{%s}" % ("DOF")
s += "&\\multicolumn{2}{r}{%d}" % int(self.dof)
s += "&\\multicolumn{2}{r|}{%d}" % int(self.dof)
c.append(s)
s = "\\multicolumn{2}{|l}{%s}" % ("p-value")
s += "&\\multicolumn{2}{r}{%.3f}" % self.LR_p_value
s += "&\\multicolumn{2}{r|}{%.3f}" % self.Q_p_value
c.append(s)
s1 = "\\\\\n".join(c)
s1 += "\\\\\n"
c = []
for mat in pmats:
c.append("\\hline\n")
for row in mat:
c.append(row + "\\\\\n")
c.append("\\hline\n")
c.append("\\end{tabular}")
s2 = "".join(c)
f.write(s1 + s2)
class FullRank_Markov:
"""
Full Rank Markov in which ranks are considered as Markov states rather
than quantiles or other discretized classes. This is one way to avoid
issues associated with discretization.
Parameters
----------
y : array
(n, t) with t>>n, one row per observation (n total),
one column recording the value of each observation,
with as many columns as time periods.
Attributes
----------
ranks : array
ranks of the original y array (by columns): higher values
rank higher, e.g. the largest value in a column ranks 1.
p : array
(n, n), transition probability matrix for Full
Rank Markov.
steady_state : array
(n, ), ergodic distribution.
transitions : array
(n, n), count of transitions between each rank i and j
fmpt : array
(n, n), first mean passage times.
sojourn_time : array
(n, ), sojourn times.
Notes
-----
Refer to :cite:`Rey2014a` Equation (11) for details. Ties are resolved by
assigning distinct ranks, corresponding to the order that the values occur
in each cross section.
Examples
--------
US nominal per capita income 48 states 81 years 1929-2009
>>> from giddy.markov import FullRank_Markov
>>> import libpysal as ps
>>> import numpy as np
>>> f = ps.io.open(ps.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)]).transpose()
>>> m = FullRank_Markov(pci)
>>> m.ranks
array([[45, 45, 44, ..., 41, 40, 39],
[24, 25, 25, ..., 36, 38, 41],
[46, 47, 45, ..., 43, 43, 43],
...,
[34, 34, 34, ..., 47, 46, 42],
[17, 17, 22, ..., 25, 26, 25],
[16, 18, 19, ..., 6, 6, 7]])
>>> m.transitions
array([[66., 5., 5., ..., 0., 0., 0.],
[ 8., 51., 9., ..., 0., 0., 0.],
[ 2., 13., 44., ..., 0., 0., 0.],
...,
[ 0., 0., 0., ..., 40., 17., 0.],
[ 0., 0., 0., ..., 15., 54., 2.],
[ 0., 0., 0., ..., 2., 1., 77.]])
>>> m.p[0, :5]
array([0.825 , 0.0625, 0.0625, 0.025 , 0.025 ])
>>> m.fmpt[0, :5]
array([48. , 87.96280048, 68.1089084 , 58.83306575, 41.77250827])
>>> m.sojourn_time[:5]
array([5.71428571, 2.75862069, 2.22222222, 1.77777778, 1.66666667])
"""
def __init__(self, y):
y = np.asarray(y)
# resolve ties: All values are given a distinct rank, corresponding
# to the order that the values occur in each cross section.
r_asc = np.array([rankdata(col, method='ordinal') for col in y.T]).T
# ranks by high (1) to low (n)
self.ranks = r_asc.shape[0] - r_asc + 1
frm = Markov(self.ranks)
self.p = frm.p
self.transitions = frm.transitions
@property
def steady_state(self):
if not hasattr(self, '_steady_state'):
self._steady_state = STEADY_STATE(self.p)
return self._steady_state
@property
def fmpt(self):
if not hasattr(self, '_fmpt'):
self._fmpt = fmpt(self.p)
return self._fmpt
@property
def sojourn_time(self):
if not hasattr(self, '_st'):
self._st = sojourn_time(self.p)
return self._st
def sojourn_time(p):
"""
Calculate sojourn time based on a given transition probability matrix.
Parameters
----------
p : array
(k, k), a Markov transition probability matrix.
Returns
-------
: array
(k, ), sojourn times. Each element is the expected time a Markov
chain spends in each states before leaving that state.
Notes
-----
Refer to :cite:`Ibe2009` for more details on sojourn times for Markov
chains.
Examples
--------
>>> from giddy.markov import sojourn_time
>>> import numpy as np
>>> p = np.array([[.5, .25, .25], [.5, 0, .5], [.25, .25, .5]])
>>> sojourn_time(p)
array([2., 1., 2.])
"""
p = np.asarray(p)
pii = p.diagonal()
if not (1 - pii).all():
print("Sojourn times are infinite for absorbing states!")
return 1 / (1 - pii)
class GeoRank_Markov:
"""
Geographic Rank Markov.
Geographic units are considered as Markov states.
Parameters
----------
y : array
(n, t) with t>>n, one row per observation (n total),
one column recording the value of each observation,
with as many columns as time periods.
Attributes
----------
p : array
(n, n), transition probability matrix for
geographic rank Markov.
steady_state : array
(n, ), ergodic distribution.
transitions : array
(n, n), count of rank transitions between each
geographic unit i and j.
fmpt : array
(n, n), first mean passage times.
sojourn_time : array
(n, ), sojourn times.
Notes
-----
Refer to :cite:`Rey2014a` Equation (13)-(16) for details. Ties are
resolved by assigning distinct ranks, corresponding to the order
that the values occur in each cross section.
Examples
--------
US nominal per capita income 48 states 81 years 1929-2009
>>> from giddy.markov import GeoRank_Markov
>>> import libpysal as ps
>>> import numpy as np
>>> f = ps.io.open(ps.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)]).transpose()
>>> m = GeoRank_Markov(pci)
>>> m.transitions
array([[38., 0., 8., ..., 0., 0., 0.],
[ 0., 15., 0., ..., 0., 1., 0.],
[ 6., 0., 44., ..., 5., 0., 0.],
...,
[ 2., 0., 5., ..., 34., 0., 0.],
[ 0., 0., 0., ..., 0., 18., 2.],
[ 0., 0., 0., ..., 0., 3., 14.]])
>>> m.p
array([[0.475 , 0. , 0.1 , ..., 0. , 0. , 0. ],
[0. , 0.1875, 0. , ..., 0. , 0.0125, 0. ],
[0.075 , 0. , 0.55 , ..., 0.0625, 0. , 0. ],
...,
[0.025 , 0. , 0.0625, ..., 0.425 , 0. , 0. ],
[0. , 0. , 0. , ..., 0. , 0.225 , 0.025 ],
[0. , 0. , 0. , ..., 0. , 0.0375, 0.175 ]])
>>> m.fmpt
array([[ 48. , 63.35532038, 92.75274652, ..., 82.47515731,
71.01114491, 68.65737127],
[108.25928005, 48. , 127.99032986, ..., 92.03098299,
63.36652935, 61.82733039],
[ 76.96801786, 64.7713783 , 48. , ..., 73.84595169,
72.24682723, 69.77497173],
...,
[ 93.3107474 , 62.47670463, 105.80634118, ..., 48. ,
69.30121319, 67.08838421],
[113.65278078, 61.1987031 , 133.57991745, ..., 96.0103924 ,
48. , 56.74165107],
[114.71894813, 63.4019776 , 134.73381719, ..., 97.287895 ,
61.45565054, 48. ]])
>>> m.sojourn_time
array([ 1.9047619 , 1.23076923, 2.22222222, 1.73913043, 1.15942029,
3.80952381, 1.70212766, 1.25 , 1.31147541, 1.11111111,
1.73913043, 1.37931034, 1.17647059, 1.21212121, 1.33333333,
1.37931034, 1.09589041, 2.10526316, 2. , 1.45454545,
1.26984127, 26.66666667, 1.19402985, 1.23076923, 1.09589041,
1.56862745, 1.26984127, 2.42424242, 1.50943396, 2. ,
1.29032258, 1.09589041, 1.6 , 1.42857143, 1.25 ,
1.45454545, 1.29032258, 1.6 , 1.17647059, 1.56862745,
1.25 , 1.37931034, 1.45454545, 1.42857143, 1.29032258,
1.73913043, 1.29032258, 1.21212121])
"""
def __init__(self, y):
y = np.asarray(y)
n = y.shape[0]
# resolve ties: All values are given a distinct rank, corresponding
# to the order that the values occur in each cross section.
ranks = np.array([rankdata(col, method='ordinal') for col in y.T]).T
geo_ranks = np.argsort(ranks, axis=0) + 1
grm = Markov(geo_ranks)
self.p = grm.p
self.transitions = grm.transitions
@property
def steady_state(self):
if not hasattr(self, '_steady_state'):
self._steady_state = STEADY_STATE(self.p)
return self._steady_state
@property
def fmpt(self):
if not hasattr(self, '_fmpt'):
self._fmpt = fmpt(self.p)
return self._fmpt
@property
def sojourn_time(self):
if not hasattr(self, '_st'):
self._st = sojourn_time(self.p)
return self._st
|
pysal/giddy | giddy/markov.py | prais | python | def prais(pmat):
pmat = np.array(pmat)
pr = 1 - np.diag(pmat)
return pr | Prais conditional mobility measure.
Parameters
----------
pmat : matrix
(k, k), Markov probability transition matrix.
Returns
-------
pr : matrix
(1, k), conditional mobility measures for each of the k classes.
Notes
-----
Prais' conditional mobility measure for a class is defined as:
.. math::
pr_i = 1 - p_{i,i}
Examples
--------
>>> import numpy as np
>>> import libpysal
>>> from giddy.markov import Markov,prais
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
>>> q5 = np.array([mc.Quantiles(y).yb for y in pci]).transpose()
>>> m = Markov(q5)
>>> m.transitions
array([[729., 71., 1., 0., 0.],
[ 72., 567., 80., 3., 0.],
[ 0., 81., 631., 86., 2.],
[ 0., 3., 86., 573., 56.],
[ 0., 0., 1., 57., 741.]])
>>> m.p
array([[0.91011236, 0.0886392 , 0.00124844, 0. , 0. ],
[0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ],
[0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ],
[0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443],
[0. , 0. , 0.00125156, 0.07133917, 0.92740926]])
>>> prais(m.p)
array([0.08988764, 0.21468144, 0.21125 , 0.20194986, 0.07259074]) | train | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/markov.py#L1428-L1477 | null | """
Markov based methods for spatial dynamics.
"""
__author__ = "Sergio J. Rey <sjsrey@gmail.com>, Wei Kang <weikang9009@gmail.com>"
__all__ = ["Markov", "LISA_Markov", "Spatial_Markov", "kullback",
"prais", "homogeneity", "FullRank_Markov", "sojourn_time",
"GeoRank_Markov"]
import numpy as np
from .ergodic import fmpt
from .ergodic import steady_state as STEADY_STATE
from .components import Graph
from scipy import stats
from scipy.stats import rankdata
from operator import gt
from libpysal import weights
from esda.moran import Moran_Local
import mapclassify as mc
import itertools
# TT predefine LISA transitions
# TT[i,j] is the transition type from i to j
# i = quadrant in period 0
# j = quadrant in period 1
# uses one offset so first row and col of TT are ignored
TT = np.zeros((5, 5), int)
c = 1
for i in range(1, 5):
for j in range(1, 5):
TT[i, j] = c
c += 1
# MOVE_TYPES is a dictionary that returns the move type of a LISA transition
# filtered on the significance of the LISA end points
# True indicates significant LISA in a particular period
# e.g. a key of (1, 3, True, False) indicates a significant LISA located in
# quadrant 1 in period 0 moved to quadrant 3 in period 1 but was not
# significant in quadrant 3.
MOVE_TYPES = {}
c = 1
cases = (True, False)
sig_keys = [(i, j) for i in cases for j in cases]
for i, sig_key in enumerate(sig_keys):
c = 1 + i * 16
for i in range(1, 5):
for j in range(1, 5):
key = (i, j, sig_key[0], sig_key[1])
MOVE_TYPES[key] = c
c += 1
class Markov(object):
"""
Classic Markov transition matrices.
Parameters
----------
class_ids : array
(n, t), one row per observation, one column recording the
state of each observation, with as many columns as time
periods.
classes : array
(k, 1), all different classes (bins) of the matrix.
Attributes
----------
p : array
(k, k), transition probability matrix.
steady_state : array
(k, ), ergodic distribution.
transitions : array
(k, k), count of transitions between each state i and j.
Examples
--------
>>> import numpy as np
>>> from giddy.markov import Markov
>>> c = [['b','a','c'],['c','c','a'],['c','b','c']]
>>> c.extend([['a','a','b'], ['a','b','c']])
>>> c = np.array(c)
>>> m = Markov(c)
>>> m.classes.tolist()
['a', 'b', 'c']
>>> m.p
array([[0.25 , 0.5 , 0.25 ],
[0.33333333, 0. , 0.66666667],
[0.33333333, 0.33333333, 0.33333333]])
>>> m.steady_state
array([0.30769231, 0.28846154, 0.40384615])
US nominal per capita income 48 states 81 years 1929-2009
>>> import libpysal
>>> import mapclassify as mc
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
set classes to quintiles for each year
>>> q5 = np.array([mc.Quantiles(y).yb for y in pci]).transpose()
>>> m = Markov(q5)
>>> m.transitions
array([[729., 71., 1., 0., 0.],
[ 72., 567., 80., 3., 0.],
[ 0., 81., 631., 86., 2.],
[ 0., 3., 86., 573., 56.],
[ 0., 0., 1., 57., 741.]])
>>> m.p
array([[0.91011236, 0.0886392 , 0.00124844, 0. , 0. ],
[0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ],
[0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ],
[0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443],
[0. , 0. , 0.00125156, 0.07133917, 0.92740926]])
>>> m.steady_state
array([0.20774716, 0.18725774, 0.20740537, 0.18821787, 0.20937187])
Relative incomes
>>> pci = pci.transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> rq = mc.Quantiles(rpci.flatten()).yb.reshape(pci.shape)
>>> mq = Markov(rq)
>>> mq.transitions
array([[707., 58., 7., 1., 0.],
[ 50., 629., 80., 1., 1.],
[ 4., 79., 610., 73., 2.],
[ 0., 7., 72., 650., 37.],
[ 0., 0., 0., 48., 724.]])
>>> mq.steady_state
array([0.17957376, 0.21631443, 0.21499942, 0.21134662, 0.17776576])
"""
def __init__(self, class_ids, classes=None):
if classes is not None:
self.classes = classes
else:
self.classes = np.unique(class_ids)
n, t = class_ids.shape
k = len(self.classes)
js = list(range(t - 1))
classIds = self.classes.tolist()
transitions = np.zeros((k, k))
for state_0 in js:
state_1 = state_0 + 1
state_0 = class_ids[:, state_0]
state_1 = class_ids[:, state_1]
initial = np.unique(state_0)
for i in initial:
ending = state_1[state_0 == i]
uending = np.unique(ending)
row = classIds.index(i)
for j in uending:
col = classIds.index(j)
transitions[row, col] += sum(ending == j)
self.transitions = transitions
row_sum = transitions.sum(axis=1)
self.p = np.dot(np.diag(1 / (row_sum + (row_sum == 0))), transitions)
@property
def steady_state(self):
if not hasattr(self, '_steady_state'):
self._steady_state = STEADY_STATE(self.p)
return self._steady_state
class Spatial_Markov(object):
"""
Markov transitions conditioned on the value of the spatial lag.
Parameters
----------
y : array
(n, t), one row per observation, one column per state of
each observation, with as many columns as time periods.
w : W
spatial weights object.
k : integer, optional
number of classes (quantiles) for input time series y.
Default is 4. If discrete=True, k is determined
endogenously.
m : integer, optional
number of classes (quantiles) for the spatial lags of
regional time series. Default is 4. If discrete=True,
m is determined endogenously.
permutations : int, optional
number of permutations for use in randomization based
inference (the default is 0).
fixed : bool, optional
If true, discretization are taken over the entire n*t
pooled series and cutoffs can be user-defined. If
cutoffs and lag_cutoffs are not given, quantiles are
used. If false, quantiles are taken each time period
over n. Default is True.
discrete : bool, optional
If true, categorical spatial lags which are most common
categories of neighboring observations serve as the
conditioning and fixed is ignored; if false, weighted
averages of neighboring observations are used. Default is
false.
cutoffs : array, optional
users can specify the discretization cutoffs for
continuous time series. Default is None, meaning that
quantiles will be used for the discretization.
lag_cutoffs : array, optional
users can specify the discretization cutoffs for the
spatial lags of continuous time series. Default is
None, meaning that quantiles will be used for the
discretization.
variable_name : string
name of variable.
Attributes
----------
class_ids : array
(n, t), discretized series if y is continuous. Otherwise
it is identical to y.
classes : array
(k, 1), all different classes (bins).
lclass_ids : array
(n, t), spatial lag series.
lclasses : array
(k, 1), all different classes (bins) for
spatial lags.
p : array
(k, k), transition probability matrix for a-spatial
Markov.
s : array
(k, 1), ergodic distribution for a-spatial Markov.
transitions : array
(k, k), counts of transitions between each state i and j
for a-spatial Markov.
T : array
(k, k, k), counts of transitions for each conditional
Markov. T[0] is the matrix of transitions for
observations with lags in the 0th quantile; T[k-1] is the
transitions for the observations with lags in the k-1th.
P : array
(k, k, k), transition probability matrix for spatial
Markov first dimension is the conditioned on the lag.
S : array
(k, k), steady state distributions for spatial Markov.
Each row is a conditional steady_state.
F : array
(k, k, k),first mean passage times.
First dimension is conditioned on the lag.
shtest : list
(k elements), each element of the list is a tuple for a
multinomial difference test between the steady state
distribution from a conditional distribution versus the
overall steady state distribution: first element of the
tuple is the chi2 value, second its p-value and the third
the degrees of freedom.
chi2 : list
(k elements), each element of the list is a tuple for a
chi-squared test of the difference between the
conditional transition matrix against the overall
transition matrix: first element of the tuple is the chi2
value, second its p-value and the third the degrees of
freedom.
x2 : float
sum of the chi2 values for each of the conditional tests.
Has an asymptotic chi2 distribution with k(k-1)(k-1)
degrees of freedom. Under the null that transition
probabilities are spatially homogeneous.
(see chi2 above)
x2_dof : int
degrees of freedom for homogeneity test.
x2_pvalue : float
pvalue for homogeneity test based on analytic.
distribution
x2_rpvalue : float
(if permutations>0)
pseudo p-value for x2 based on random spatial
permutations of the rows of the original transitions.
x2_realizations : array
(permutations,1), the values of x2 for the random
permutations.
Q : float
Chi-square test of homogeneity across lag classes based
on :cite:`Bickenbach2003`.
Q_p_value : float
p-value for Q.
LR : float
Likelihood ratio statistic for homogeneity across lag
classes based on :cite:`Bickenbach2003`.
LR_p_value : float
p-value for LR.
dof_hom : int
degrees of freedom for LR and Q, corrected for 0 cells.
Notes
-----
Based on :cite:`Rey2001`.
The shtest and chi2 tests should be used with caution as they are based on
classic theory assuming random transitions. The x2 based test is
preferable since it simulates the randomness under the null. It is an
experimental test requiring further analysis.
Examples
--------
>>> import libpysal
>>> from giddy.markov import Spatial_Markov
>>> import numpy as np
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
>>> pci = pci.transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> w.transform = 'r'
Now we create a `Spatial_Markov` instance for the continuous relative per
capita income time series for 48 US lower states 1929-2009. The current
implementation allows users to classify the continuous incomes in a more
flexible way.
(1) Global quintiles to discretize the income data (k=5), and global
quintiles to discretize the spatial lags of incomes (m=5).
>>> sm = Spatial_Markov(rpci, w, fixed=True, k=5, m=5, variable_name='rpci')
We can examine the cutoffs for the incomes and cutoffs for the spatial lags
>>> sm.cutoffs
array([0.83999133, 0.94707545, 1.03242697, 1.14911154])
>>> sm.lag_cutoffs
array([0.88973386, 0.95891917, 1.01469758, 1.1183566 ])
Obviously, they are slightly different.
We now look at the estimated spatially lag conditioned transition
probability matrices.
>>> for p in sm.P:
... print(p)
[[0.96341463 0.0304878 0.00609756 0. 0. ]
[0.06040268 0.83221477 0.10738255 0. 0. ]
[0. 0.14 0.74 0.12 0. ]
[0. 0.03571429 0.32142857 0.57142857 0.07142857]
[0. 0. 0. 0.16666667 0.83333333]]
[[0.79831933 0.16806723 0.03361345 0. 0. ]
[0.0754717 0.88207547 0.04245283 0. 0. ]
[0.00537634 0.06989247 0.8655914 0.05913978 0. ]
[0. 0. 0.06372549 0.90196078 0.03431373]
[0. 0. 0. 0.19444444 0.80555556]]
[[0.84693878 0.15306122 0. 0. 0. ]
[0.08133971 0.78947368 0.1291866 0. 0. ]
[0.00518135 0.0984456 0.79274611 0.0984456 0.00518135]
[0. 0. 0.09411765 0.87058824 0.03529412]
[0. 0. 0. 0.10204082 0.89795918]]
[[0.8852459 0.09836066 0. 0.01639344 0. ]
[0.03875969 0.81395349 0.13953488 0. 0.00775194]
[0.0049505 0.09405941 0.77722772 0.11881188 0.0049505 ]
[0. 0.02339181 0.12865497 0.75438596 0.09356725]
[0. 0. 0. 0.09661836 0.90338164]]
[[0.33333333 0.66666667 0. 0. 0. ]
[0.0483871 0.77419355 0.16129032 0.01612903 0. ]
[0.01149425 0.16091954 0.74712644 0.08045977 0. ]
[0. 0.01036269 0.06217617 0.89637306 0.03108808]
[0. 0. 0. 0.02352941 0.97647059]]
The probability of a poor state remaining poor is 0.963 if their
neighbors are in the 1st quintile and 0.798 if their neighbors are
in the 2nd quintile. The probability of a rich economy remaining
rich is 0.976 if their neighbors are in the 5th quintile, but if their
neighbors are in the 4th quintile this drops to 0.903.
The global transition probability matrix is estimated:
>>> print(sm.p)
[[0.91461837 0.07503234 0.00905563 0.00129366 0. ]
[0.06570302 0.82654402 0.10512484 0.00131406 0.00131406]
[0.00520833 0.10286458 0.79427083 0.09505208 0.00260417]
[0. 0.00913838 0.09399478 0.84856397 0.04830287]
[0. 0. 0. 0.06217617 0.93782383]]
The Q and likelihood ratio statistics are both significant indicating
the dynamics are not homogeneous across the lag classes:
>>> "%.3f"%sm.LR
'170.659'
>>> "%.3f"%sm.Q
'200.624'
>>> "%.3f"%sm.LR_p_value
'0.000'
>>> "%.3f"%sm.Q_p_value
'0.000'
>>> sm.dof_hom
60
The long run distribution for states with poor (rich) neighbors has
0.435 (0.018) of the values in the first quintile, 0.263 (0.200) in
the second quintile, 0.204 (0.190) in the third, 0.0684 (0.255) in the
fourth and 0.029 (0.337) in the fifth quintile.
>>> sm.S
array([[0.43509425, 0.2635327 , 0.20363044, 0.06841983, 0.02932278],
[0.13391287, 0.33993305, 0.25153036, 0.23343016, 0.04119356],
[0.12124869, 0.21137444, 0.2635101 , 0.29013417, 0.1137326 ],
[0.0776413 , 0.19748806, 0.25352636, 0.22480415, 0.24654013],
[0.01776781, 0.19964349, 0.19009833, 0.25524697, 0.3372434 ]])
States with incomes in the first quintile with neighbors in the
first quintile return to the first quartile after 2.298 years, after
leaving the first quintile. They enter the fourth quintile after
80.810 years after leaving the first quintile, on average.
Poor states within neighbors in the fourth quintile return to the
first quintile, on average, after 12.88 years, and would enter the
fourth quintile after 28.473 years.
>>> for f in sm.F:
... print(f)
...
[[ 2.29835259 28.95614035 46.14285714 80.80952381 279.42857143]
[ 33.86549708 3.79459555 22.57142857 57.23809524 255.85714286]
[ 43.60233918 9.73684211 4.91085714 34.66666667 233.28571429]
[ 46.62865497 12.76315789 6.25714286 14.61564626 198.61904762]
[ 52.62865497 18.76315789 12.25714286 6. 34.1031746 ]]
[[ 7.46754205 9.70574606 25.76785714 74.53116883 194.23446197]
[ 27.76691978 2.94175577 24.97142857 73.73474026 193.4380334 ]
[ 53.57477715 28.48447637 3.97566318 48.76331169 168.46660482]
[ 72.03631562 46.94601483 18.46153846 4.28393653 119.70329314]
[ 77.17917276 52.08887197 23.6043956 5.14285714 24.27564033]]
[[ 8.24751154 6.53333333 18.38765432 40.70864198 112.76732026]
[ 47.35040872 4.73094099 11.85432099 34.17530864 106.23398693]
[ 69.42288828 24.76666667 3.794921 22.32098765 94.37966594]
[ 83.72288828 39.06666667 14.3 3.44668119 76.36702977]
[ 93.52288828 48.86666667 24.1 9.8 8.79255406]]
[[ 12.87974382 13.34847151 19.83446328 28.47257282 55.82395142]
[ 99.46114206 5.06359731 10.54545198 23.05133495 49.68944423]
[117.76777159 23.03735526 3.94436301 15.0843986 43.57927247]
[127.89752089 32.4393006 14.56853107 4.44831643 31.63099455]
[138.24752089 42.7893006 24.91853107 10.35 4.05613474]]
[[ 56.2815534 1.5 10.57236842 27.02173913 110.54347826]
[ 82.9223301 5.00892857 9.07236842 25.52173913 109.04347826]
[ 97.17718447 19.53125 5.26043557 21.42391304 104.94565217]
[127.1407767 48.74107143 33.29605263 3.91777427 83.52173913]
[169.6407767 91.24107143 75.79605263 42.5 2.96521739]]
(2) Global quintiles to discretize the income data (k=5), and global
quartiles to discretize the spatial lags of incomes (m=4).
>>> sm = Spatial_Markov(rpci, w, fixed=True, k=5, m=4, variable_name='rpci')
We can also examine the cutoffs for the incomes and cutoffs for the spatial
lags:
>>> sm.cutoffs
array([0.83999133, 0.94707545, 1.03242697, 1.14911154])
>>> sm.lag_cutoffs
array([0.91440247, 0.98583079, 1.08698351])
We now look at the estimated spatially lag conditioned transition
probability matrices.
>>> for p in sm.P:
... print(p)
[[0.95708955 0.03544776 0.00746269 0. 0. ]
[0.05825243 0.83980583 0.10194175 0. 0. ]
[0. 0.1294964 0.76258993 0.10791367 0. ]
[0. 0.01538462 0.18461538 0.72307692 0.07692308]
[0. 0. 0. 0.14285714 0.85714286]]
[[0.7421875 0.234375 0.0234375 0. 0. ]
[0.08550186 0.85130112 0.06319703 0. 0. ]
[0.00865801 0.06926407 0.86147186 0.05627706 0.004329 ]
[0. 0. 0.05363985 0.92337165 0.02298851]
[0. 0. 0. 0.13432836 0.86567164]]
[[0.95145631 0.04854369 0. 0. 0. ]
[0.06 0.79 0.145 0. 0.005 ]
[0.00358423 0.10394265 0.7921147 0.09677419 0.00358423]
[0. 0.01630435 0.13586957 0.75543478 0.0923913 ]
[0. 0. 0. 0.10204082 0.89795918]]
[[0.16666667 0.66666667 0. 0.16666667 0. ]
[0.03488372 0.80232558 0.15116279 0.01162791 0. ]
[0.00840336 0.13445378 0.70588235 0.1512605 0. ]
[0. 0.01171875 0.08203125 0.87109375 0.03515625]
[0. 0. 0. 0.03434343 0.96565657]]
We now obtain 4 5*5 spatial lag conditioned transition probability
matrices instead of 5 as in case (1).
The Q and likelihood ratio statistics are still both significant.
>>> "%.3f"%sm.LR
'172.105'
>>> "%.3f"%sm.Q
'321.128'
>>> "%.3f"%sm.LR_p_value
'0.000'
>>> "%.3f"%sm.Q_p_value
'0.000'
>>> sm.dof_hom
45
(3) We can also set the cutoffs for relative incomes and their
spatial lags manually.
For example, we want the defining cutoffs to be [0.8, 0.9, 1, 1.2],
meaning that relative incomes:
2.1 smaller than 0.8 : class 0
2.2 between 0.8 and 0.9: class 1
2.3 between 0.9 and 1.0 : class 2
2.4 between 1.0 and 1.2: class 3
2.5 larger than 1.2: class 4
>>> cc = np.array([0.8, 0.9, 1, 1.2])
>>> sm = Spatial_Markov(rpci, w, cutoffs=cc, lag_cutoffs=cc, variable_name='rpci')
>>> sm.cutoffs
array([0.8, 0.9, 1. , 1.2])
>>> sm.k
5
>>> sm.lag_cutoffs
array([0.8, 0.9, 1. , 1.2])
>>> sm.m
5
>>> for p in sm.P:
... print(p)
[[0.96703297 0.03296703 0. 0. 0. ]
[0.10638298 0.68085106 0.21276596 0. 0. ]
[0. 0.14285714 0.7755102 0.08163265 0. ]
[0. 0. 0.5 0.5 0. ]
[0. 0. 0. 0. 0. ]]
[[0.88636364 0.10606061 0.00757576 0. 0. ]
[0.04402516 0.89308176 0.06289308 0. 0. ]
[0. 0.05882353 0.8627451 0.07843137 0. ]
[0. 0. 0.13846154 0.86153846 0. ]
[0. 0. 0. 0. 1. ]]
[[0.78082192 0.17808219 0.02739726 0.01369863 0. ]
[0.03488372 0.90406977 0.05813953 0.00290698 0. ]
[0. 0.05919003 0.84735202 0.09034268 0.00311526]
[0. 0. 0.05811623 0.92985972 0.01202405]
[0. 0. 0. 0.14285714 0.85714286]]
[[0.82692308 0.15384615 0. 0.01923077 0. ]
[0.0703125 0.7890625 0.125 0.015625 0. ]
[0.00295858 0.06213018 0.82248521 0.10946746 0.00295858]
[0. 0.00185529 0.07606679 0.88497217 0.03710575]
[0. 0. 0. 0.07803468 0.92196532]]
[[0. 0. 0. 0. 0. ]
[0. 0. 0. 0. 0. ]
[0. 0.06666667 0.9 0.03333333 0. ]
[0. 0. 0.05660377 0.90566038 0.03773585]
[0. 0. 0. 0.03932584 0.96067416]]
(4) Spatial_Markov also accept discrete time series and calculate
categorical spatial lags on which several transition probability matrices
are conditioned.
Let's still use the US state income time series to demonstrate. We first
discretize them into categories and then pass them to Spatial_Markov.
>>> import mapclassify as mc
>>> y = mc.Quantiles(rpci.flatten(), k=5).yb.reshape(rpci.shape)
>>> np.random.seed(5)
>>> sm = Spatial_Markov(y, w, discrete=True, variable_name='discretized rpci')
>>> sm.k
5
>>> sm.m
5
>>> for p in sm.P:
... print(p)
[[0.94787645 0.04440154 0.00772201 0. 0. ]
[0.08333333 0.81060606 0.10606061 0. 0. ]
[0. 0.12765957 0.79787234 0.07446809 0. ]
[0. 0.02777778 0.22222222 0.66666667 0.08333333]
[0. 0. 0. 0.33333333 0.66666667]]
[[0.888 0.096 0.016 0. 0. ]
[0.06049822 0.84341637 0.09608541 0. 0. ]
[0.00666667 0.10666667 0.81333333 0.07333333 0. ]
[0. 0. 0.08527132 0.86821705 0.04651163]
[0. 0. 0. 0.10204082 0.89795918]]
[[0.65217391 0.32608696 0.02173913 0. 0. ]
[0.07446809 0.80851064 0.11170213 0. 0.00531915]
[0.01071429 0.1 0.76428571 0.11785714 0.00714286]
[0. 0.00552486 0.09392265 0.86187845 0.03867403]
[0. 0. 0. 0.13157895 0.86842105]]
[[0.91935484 0.06451613 0. 0.01612903 0. ]
[0.06796117 0.90291262 0.02912621 0. 0. ]
[0. 0.05755396 0.87769784 0.0647482 0. ]
[0. 0.02150538 0.10752688 0.80107527 0.06989247]
[0. 0. 0. 0.08064516 0.91935484]]
[[0.81818182 0.18181818 0. 0. 0. ]
[0.01754386 0.70175439 0.26315789 0.01754386 0. ]
[0. 0.14285714 0.73333333 0.12380952 0. ]
[0. 0.0042735 0.06837607 0.89316239 0.03418803]
[0. 0. 0. 0.03891051 0.96108949]]
"""
def __init__(self, y, w, k=4, m=4, permutations=0, fixed=True,
discrete=False, cutoffs=None, lag_cutoffs=None,
variable_name=None):
y = np.asarray(y)
self.fixed = fixed
self.discrete = discrete
self.cutoffs = cutoffs
self.m = m
self.lag_cutoffs = lag_cutoffs
self.variable_name = variable_name
if discrete:
merged = list(itertools.chain.from_iterable(y))
classes = np.unique(merged)
self.classes = classes
self.k = len(classes)
self.m = self.k
label_dict = dict(zip(classes, range(self.k)))
y_int = []
for yi in y:
y_int.append(list(map(label_dict.get, yi)))
self.class_ids = np.array(y_int)
self.lclass_ids = self.class_ids
else:
self.class_ids, self.cutoffs, self.k = self._maybe_classify(
y, k=k, cutoffs=self.cutoffs)
self.classes = np.arange(self.k)
classic = Markov(self.class_ids)
self.p = classic.p
self.transitions = classic.transitions
self.T, self.P = self._calc(y, w)
if permutations:
nrp = np.random.permutation
counter = 0
x2_realizations = np.zeros((permutations, 1))
for perm in range(permutations):
T, P = self._calc(nrp(y), w)
x2 = [chi2(T[i], self.transitions)[0] for i in range(self.k)]
x2s = sum(x2)
x2_realizations[perm] = x2s
if x2s >= self.x2:
counter += 1
self.x2_rpvalue = (counter + 1.0) / (permutations + 1.)
self.x2_realizations = x2_realizations
@property
def s(self):
if not hasattr(self, '_s'):
self._s = STEADY_STATE(self.p)
return self._s
@property
def S(self):
if not hasattr(self, '_S'):
S = np.zeros_like(self.p)
for i, p in enumerate(self.P):
S[i] = STEADY_STATE(p)
self._S = np.asarray(S)
return self._S
@property
def F(self):
if not hasattr(self, '_F'):
F = np.zeros_like(self.P)
for i, p in enumerate(self.P):
F[i] = fmpt(np.asmatrix(p))
self._F = np.asarray(F)
return self._F
# bickenbach and bode tests
@property
def ht(self):
if not hasattr(self, '_ht'):
self._ht = homogeneity(self.T)
return self._ht
@property
def Q(self):
if not hasattr(self, '_Q'):
self._Q = self.ht.Q
return self._Q
@property
def Q_p_value(self):
self._Q_p_value = self.ht.Q_p_value
return self._Q_p_value
@property
def LR(self):
self._LR = self.ht.LR
return self._LR
@property
def LR_p_value(self):
self._LR_p_value = self.ht.LR_p_value
return self._LR_p_value
@property
def dof_hom(self):
self._dof_hom = self.ht.dof
return self._dof_hom
# shtests
@property
def shtest(self):
if not hasattr(self, '_shtest'):
self._shtest = self._mn_test()
return self._shtest
@property
def chi2(self):
if not hasattr(self, '_chi2'):
self._chi2 = self._chi2_test()
return self._chi2
@property
def x2(self):
if not hasattr(self, '_x2'):
self._x2 = sum([c[0] for c in self.chi2])
return self._x2
@property
def x2_pvalue(self):
if not hasattr(self, '_x2_pvalue'):
self._x2_pvalue = 1 - stats.chi2.cdf(self.x2, self.x2_dof)
return self._x2_pvalue
@property
def x2_dof(self):
if not hasattr(self, '_x2_dof'):
k = self.k
self._x2_dof = k * (k - 1) * (k - 1)
return self._x2_dof
def _calc(self, y, w):
'''Helper to estimate spatial lag conditioned Markov transition
probability matrices based on maximum likelihood techniques.
'''
if self.discrete:
self.lclass_ids = weights.lag_categorical(w, self.class_ids,
ties="tryself")
else:
ly = weights.lag_spatial(w, y)
self.lclass_ids, self.lag_cutoffs, self.m = self._maybe_classify(
ly, self.m, self.lag_cutoffs)
self.lclasses = np.arange(self.m)
T = np.zeros((self.m, self.k, self.k))
n, t = y.shape
for t1 in range(t - 1):
t2 = t1 + 1
for i in range(n):
T[self.lclass_ids[i, t1], self.class_ids[i, t1],
self.class_ids[i, t2]] += 1
P = np.zeros_like(T)
for i, mat in enumerate(T):
row_sum = mat.sum(axis=1)
row_sum = row_sum + (row_sum == 0)
p_i = np.matrix(np.diag(1. / row_sum) * np.matrix(mat))
P[i] = p_i
return T, P
def _mn_test(self):
"""
helper to calculate tests of differences between steady state
distributions from the conditional and overall distributions.
"""
n0, n1, n2 = self.T.shape
rn = list(range(n0))
mat = [self._ssmnp_test(
self.s, self.S[i], self.T[i].sum()) for i in rn]
return mat
def _ssmnp_test(self, p1, p2, nt):
"""
Steady state multinomial probability difference test.
Arguments
---------
p1 : array
(k, ), first steady state probability distribution.
p1 : array
(k, ), second steady state probability distribution.
nt : int
number of transitions to base the test on.
Returns
-------
tuple
(3 elements)
(chi2 value, pvalue, degrees of freedom)
"""
o = nt * p2
e = nt * p1
d = np.multiply((o - e), (o - e))
d = d / e
chi2 = d.sum()
pvalue = 1 - stats.chi2.cdf(chi2, self.k - 1)
return (chi2, pvalue, self.k - 1)
def _chi2_test(self):
"""
helper to calculate tests of differences between the conditional
transition matrices and the overall transitions matrix.
"""
n0, n1, n2 = self.T.shape
rn = list(range(n0))
mat = [chi2(self.T[i], self.transitions) for i in rn]
return mat
def summary(self, file_name=None):
"""
A summary method to call the Markov homogeneity test to test for
temporally lagged spatial dependence.
To learn more about the properties of the tests, refer to
:cite:`Rey2016a` and :cite:`Kang2018`.
"""
class_names = ["C%d" % i for i in range(self.k)]
regime_names = ["LAG%d" % i for i in range(self.k)]
ht = homogeneity(self.T, class_names=class_names,
regime_names=regime_names)
title = "Spatial Markov Test"
if self.variable_name:
title = title + ": " + self.variable_name
if file_name:
ht.summary(file_name=file_name, title=title)
else:
ht.summary(title=title)
def _maybe_classify(self, y, k, cutoffs):
'''Helper method for classifying continuous data.
'''
rows, cols = y.shape
if cutoffs is None:
if self.fixed:
mcyb = mc.Quantiles(y.flatten(), k=k)
yb = mcyb.yb.reshape(y.shape)
cutoffs = mcyb.bins
k = len(cutoffs)
return yb, cutoffs[:-1], k
else:
yb = np.array([mc.Quantiles(y[:, i], k=k).yb for i in
np.arange(cols)]).transpose()
return yb, None, k
else:
cutoffs = list(cutoffs) + [np.inf]
cutoffs = np.array(cutoffs)
yb = mc.User_Defined(y.flatten(), np.array(cutoffs)).yb.reshape(
y.shape)
k = len(cutoffs)
return yb, cutoffs[:-1], k
def chi2(T1, T2):
"""
chi-squared test of difference between two transition matrices.
Parameters
----------
T1 : array
(k, k), matrix of transitions (counts).
T2 : array
(k, k), matrix of transitions (counts) to use to form the
probabilities under the null.
Returns
-------
: tuple
(3 elements).
(chi2 value, pvalue, degrees of freedom).
Examples
--------
>>> import libpysal
>>> from giddy.markov import Spatial_Markov, chi2
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> w.transform='r'
>>> sm = Spatial_Markov(rpci, w, fixed=True)
>>> T1 = sm.T[0]
>>> T1
array([[562., 22., 1., 0.],
[ 12., 201., 22., 0.],
[ 0., 17., 97., 4.],
[ 0., 0., 3., 19.]])
>>> T2 = sm.transitions
>>> T2
array([[884., 77., 4., 0.],
[ 68., 794., 87., 3.],
[ 1., 92., 815., 51.],
[ 1., 0., 60., 903.]])
>>> chi2(T1,T2)
(23.39728441473295, 0.005363116704861337, 9)
Notes
-----
Second matrix is used to form the probabilities under the null.
Marginal sums from first matrix are distributed across these probabilities
under the null. In other words the observed transitions are taken from T1
while the expected transitions are formed as follows
.. math::
E_{i,j} = \sum_j T1_{i,j} * T2_{i,j}/\sum_j T2_{i,j}
Degrees of freedom corrected for any rows in either T1 or T2 that have
zero total transitions.
"""
rs2 = T2.sum(axis=1)
rs1 = T1.sum(axis=1)
rs2nz = rs2 > 0
rs1nz = rs1 > 0
dof1 = sum(rs1nz)
dof2 = sum(rs2nz)
rs2 = rs2 + (rs2 == 0)
dof = (dof1 - 1) * (dof2 - 1)
p = np.diag(1 / rs2) * np.matrix(T2)
E = np.diag(rs1) * np.matrix(p)
num = T1 - E
num = np.multiply(num, num)
E = E + (E == 0)
chi2 = num / E
chi2 = chi2.sum()
pvalue = 1 - stats.chi2.cdf(chi2, dof)
return chi2, pvalue, dof
class LISA_Markov(Markov):
"""
Markov for Local Indicators of Spatial Association
Parameters
----------
y : array
(n, t), n cross-sectional units observed over t time
periods.
w : W
spatial weights object.
permutations : int, optional
number of permutations used to determine LISA
significance (the default is 0).
significance_level : float, optional
significance level (two-sided) for filtering
significant LISA endpoints in a transition (the
default is 0.05).
geoda_quads : bool
If True use GeoDa scheme: HH=1, LL=2, LH=3, HL=4.
If False use PySAL Scheme: HH=1, LH=2, LL=3, HL=4.
(the default is False).
Attributes
----------
chi_2 : tuple
(3 elements)
(chi square test statistic, p-value, degrees of freedom) for
test that dynamics of y are independent of dynamics of wy.
classes : array
(4, 1)
1=HH, 2=LH, 3=LL, 4=HL (own, lag)
1=HH, 2=LL, 3=LH, 4=HL (own, lag) (if geoda_quads=True)
expected_t : array
(4, 4), expected number of transitions under the null that
dynamics of y are independent of dynamics of wy.
move_types : matrix
(n, t-1), integer values indicating which type of LISA
transition occurred (q1 is quadrant in period 1, q2 is
quadrant in period 2).
.. table:: Move Types
== == =========
q1 q2 move_type
== == =========
1 1 1
1 2 2
1 3 3
1 4 4
2 1 5
2 2 6
2 3 7
2 4 8
3 1 9
3 2 10
3 3 11
3 4 12
4 1 13
4 2 14
4 3 15
4 4 16
== == =========
p : array
(k, k), transition probability matrix.
p_values : matrix
(n, t), LISA p-values for each end point (if permutations >
0).
significant_moves : matrix
(n, t-1), integer values indicating the type and
significance of a LISA transition. st = 1 if
significant in period t, else st=0 (if permutations >
0).
.. Table:: Significant Moves1
=============== ===================
(s1,s2) move_type
=============== ===================
(1,1) [1, 16]
(1,0) [17, 32]
(0,1) [33, 48]
(0,0) [49, 64]
=============== ===================
.. Table:: Significant Moves2
== == == == =========
q1 q2 s1 s2 move_type
== == == == =========
1 1 1 1 1
1 2 1 1 2
1 3 1 1 3
1 4 1 1 4
2 1 1 1 5
2 2 1 1 6
2 3 1 1 7
2 4 1 1 8
3 1 1 1 9
3 2 1 1 10
3 3 1 1 11
3 4 1 1 12
4 1 1 1 13
4 2 1 1 14
4 3 1 1 15
4 4 1 1 16
1 1 1 0 17
1 2 1 0 18
. . . . .
. . . . .
4 3 1 0 31
4 4 1 0 32
1 1 0 1 33
1 2 0 1 34
. . . . .
. . . . .
4 3 0 1 47
4 4 0 1 48
1 1 0 0 49
1 2 0 0 50
. . . . .
. . . . .
4 3 0 0 63
4 4 0 0 64
== == == == =========
steady_state : array
(k, ), ergodic distribution.
transitions : array
(4, 4), count of transitions between each state i and j.
spillover : array
(n, 1) binary array, locations that were not part of a
cluster in period 1 but joined a prexisting cluster in
period 2.
Examples
--------
>>> import libpysal
>>> import numpy as np
>>> from giddy.markov import LISA_Markov
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> lm = LISA_Markov(pci,w)
>>> lm.classes
array([1, 2, 3, 4])
>>> lm.steady_state
array([0.28561505, 0.14190226, 0.40493672, 0.16754598])
>>> lm.transitions
array([[1.087e+03, 4.400e+01, 4.000e+00, 3.400e+01],
[4.100e+01, 4.700e+02, 3.600e+01, 1.000e+00],
[5.000e+00, 3.400e+01, 1.422e+03, 3.900e+01],
[3.000e+01, 1.000e+00, 4.000e+01, 5.520e+02]])
>>> lm.p
array([[0.92985458, 0.03763901, 0.00342173, 0.02908469],
[0.07481752, 0.85766423, 0.06569343, 0.00182482],
[0.00333333, 0.02266667, 0.948 , 0.026 ],
[0.04815409, 0.00160514, 0.06420546, 0.88603531]])
>>> lm.move_types[0,:3]
array([11, 11, 11])
>>> lm.move_types[0,-3:]
array([11, 11, 11])
Now consider only moves with one, or both, of the LISA end points being
significant
>>> np.random.seed(10)
>>> lm_random = LISA_Markov(pci, w, permutations=99)
>>> lm_random.significant_moves[0, :3]
array([11, 11, 11])
>>> lm_random.significant_moves[0,-3:]
array([59, 43, 27])
Any value less than 49 indicates at least one of the LISA end points was
significant. So for example, the first spatial unit experienced a
transition of type 11 (LL, LL) during the first three and last tree
intervals (according to lm.move_types), however, the last three of these
transitions involved insignificant LISAS in both the start and ending year
of each transition.
Test whether the moves of y are independent of the moves of wy
>>> "Chi2: %8.3f, p: %5.2f, dof: %d" % lm.chi_2
'Chi2: 1058.208, p: 0.00, dof: 9'
Actual transitions of LISAs
>>> lm.transitions
array([[1.087e+03, 4.400e+01, 4.000e+00, 3.400e+01],
[4.100e+01, 4.700e+02, 3.600e+01, 1.000e+00],
[5.000e+00, 3.400e+01, 1.422e+03, 3.900e+01],
[3.000e+01, 1.000e+00, 4.000e+01, 5.520e+02]])
Expected transitions of LISAs under the null y and wy are moving
independently of one another
>>> lm.expected_t
array([[1.12328098e+03, 1.15377356e+01, 3.47522158e-01, 3.38337644e+01],
[3.50272664e+00, 5.28473882e+02, 1.59178880e+01, 1.05503814e-01],
[1.53878082e-01, 2.32163556e+01, 1.46690710e+03, 9.72266513e+00],
[9.60775143e+00, 9.86856346e-02, 6.23537392e+00, 6.07058189e+02]])
If the LISA classes are to be defined according to GeoDa, the `geoda_quad`
option has to be set to true
>>> lm.q[0:5,0]
array([3, 2, 3, 1, 4])
>>> lm = LISA_Markov(pci,w, geoda_quads=True)
>>> lm.q[0:5,0]
array([2, 3, 2, 1, 4])
"""
def __init__(self, y, w, permutations=0,
significance_level=0.05, geoda_quads=False):
y = y.transpose()
pml = Moran_Local
gq = geoda_quads
ml = ([pml(yi, w, permutations=permutations, geoda_quads=gq)
for yi in y])
q = np.array([mli.q for mli in ml]).transpose()
classes = np.arange(1, 5) # no guarantee all 4 quadrants are visited
Markov.__init__(self, q, classes)
self.q = q
self.w = w
n, k = q.shape
k -= 1
self.significance_level = significance_level
move_types = np.zeros((n, k), int)
sm = np.zeros((n, k), int)
self.significance_level = significance_level
if permutations > 0:
p = np.array([mli.p_z_sim for mli in ml]).transpose()
self.p_values = p
pb = p <= significance_level
else:
pb = np.zeros_like(y.T)
for t in range(k):
origin = q[:, t]
dest = q[:, t + 1]
p_origin = pb[:, t]
p_dest = pb[:, t + 1]
for r in range(n):
move_types[r, t] = TT[origin[r], dest[r]]
key = (origin[r], dest[r], p_origin[r], p_dest[r])
sm[r, t] = MOVE_TYPES[key]
if permutations > 0:
self.significant_moves = sm
self.move_types = move_types
# null of own and lag moves being independent
ybar = y.mean(axis=0)
r = y / ybar
ylag = np.array([weights.lag_spatial(w, yt) for yt in y])
rlag = ylag / ybar
rc = r < 1.
rlagc = rlag < 1.
markov_y = Markov(rc)
markov_ylag = Markov(rlagc)
A = np.matrix([[1, 0, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0]])
kp = A * np.kron(markov_y.p, markov_ylag.p) * A.T
trans = self.transitions.sum(axis=1)
t1 = np.diag(trans) * kp
t2 = self.transitions
t1 = t1.getA()
self.chi_2 = chi2(t2, t1)
self.expected_t = t1
self.permutations = permutations
def spillover(self, quadrant=1, neighbors_on=False):
"""
Detect spillover locations for diffusion in LISA Markov.
Parameters
----------
quadrant : int
which quadrant in the scatterplot should form the core
of a cluster.
neighbors_on : binary
If false, then only the 1st order neighbors of a core
location are included in the cluster.
If true, neighbors of cluster core 1st order neighbors
are included in the cluster.
Returns
-------
results : dictionary
two keys - values pairs:
'components' - array (n, t)
values are integer ids (starting at 1) indicating which
component/cluster observation i in period t belonged to.
'spillover' - array (n, t-1)
binary values indicating if the location was a
spill-over location that became a new member of a
previously existing cluster.
Examples
--------
>>> import libpysal
>>> from giddy.markov import LISA_Markov
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> np.random.seed(10)
>>> lm_random = LISA_Markov(pci, w, permutations=99)
>>> r = lm_random.spillover()
>>> (r['components'][:, 12] > 0).sum()
17
>>> (r['components'][:, 13]>0).sum()
23
>>> (r['spill_over'][:,12]>0).sum()
6
Including neighbors of core neighbors
>>> rn = lm_random.spillover(neighbors_on=True)
>>> (rn['components'][:, 12] > 0).sum()
26
>>> (rn["components"][:, 13] > 0).sum()
34
>>> (rn["spill_over"][:, 12] > 0).sum()
8
"""
n, k = self.q.shape
if self.permutations:
spill_over = np.zeros((n, k - 1))
components = np.zeros((n, k))
i2id = {} # handle string keys
for key in list(self.w.neighbors.keys()):
idx = self.w.id2i[key]
i2id[idx] = key
sig_lisas = (self.q == quadrant) \
* (self.p_values <= self.significance_level)
sig_ids = [np.nonzero(
sig_lisas[:, i])[0].tolist() for i in range(k)]
neighbors = self.w.neighbors
for t in range(k - 1):
s1 = sig_ids[t]
s2 = sig_ids[t + 1]
g1 = Graph(undirected=True)
for i in s1:
for neighbor in neighbors[i2id[i]]:
g1.add_edge(i2id[i], neighbor, 1.0)
if neighbors_on:
for nn in neighbors[neighbor]:
g1.add_edge(neighbor, nn, 1.0)
components1 = g1.connected_components(op=gt)
components1 = [list(c.nodes) for c in components1]
g2 = Graph(undirected=True)
for i in s2:
for neighbor in neighbors[i2id[i]]:
g2.add_edge(i2id[i], neighbor, 1.0)
if neighbors_on:
for nn in neighbors[neighbor]:
g2.add_edge(neighbor, nn, 1.0)
components2 = g2.connected_components(op=gt)
components2 = [list(c.nodes) for c in components2]
c2 = []
c1 = []
for c in components2:
c2.extend(c)
for c in components1:
c1.extend(c)
new_ids = [j for j in c2 if j not in c1]
spill_ids = []
for j in new_ids:
# find j's component in period 2
cj = [c for c in components2 if j in c][0]
# for members of j's component in period 2, check if they
# belonged to any components in period 1
for i in cj:
if i in c1:
spill_ids.append(j)
break
for spill_id in spill_ids:
id = self.w.id2i[spill_id]
spill_over[id, t] = 1
for c, component in enumerate(components1):
for i in component:
ii = self.w.id2i[i]
components[ii, t] = c + 1
results = {}
results['components'] = components
results['spill_over'] = spill_over
return results
else:
return None
def kullback(F):
"""
Kullback information based test of Markov Homogeneity.
Parameters
----------
F : array
(s, r, r), values are transitions (not probabilities) for
s strata, r initial states, r terminal states.
Returns
-------
Results : dictionary
(key - value)
Conditional homogeneity - (float) test statistic for homogeneity
of transition probabilities across strata.
Conditional homogeneity pvalue - (float) p-value for test
statistic.
Conditional homogeneity dof - (int) degrees of freedom =
r(s-1)(r-1).
Notes
-----
Based on :cite:`Kullback1962`.
Example below is taken from Table 9.2 .
Examples
--------
>>> import numpy as np
>>> from giddy.markov import kullback
>>> s1 = np.array([
... [ 22, 11, 24, 2, 2, 7],
... [ 5, 23, 15, 3, 42, 6],
... [ 4, 21, 190, 25, 20, 34],
... [0, 2, 14, 56, 14, 28],
... [32, 15, 20, 10, 56, 14],
... [5, 22, 31, 18, 13, 134]
... ])
>>> s2 = np.array([
... [3, 6, 9, 3, 0, 8],
... [1, 9, 3, 12, 27, 5],
... [2, 9, 208, 32, 5, 18],
... [0, 14, 32, 108, 40, 40],
... [22, 14, 9, 26, 224, 14],
... [1, 5, 13, 53, 13, 116]
... ])
>>>
>>> F = np.array([s1, s2])
>>> res = kullback(F)
>>> "%8.3f"%res['Conditional homogeneity']
' 160.961'
>>> "%d"%res['Conditional homogeneity dof']
'30'
>>> "%3.1f"%res['Conditional homogeneity pvalue']
'0.0'
"""
F1 = F == 0
F1 = F + F1
FLF = F * np.log(F1)
T1 = 2 * FLF.sum()
FdJK = F.sum(axis=0)
FdJK1 = FdJK + (FdJK == 0)
FdJKLFdJK = FdJK * np.log(FdJK1)
T2 = 2 * FdJKLFdJK.sum()
FdJd = F.sum(axis=0).sum(axis=1)
FdJd1 = FdJd + (FdJd == 0)
T3 = 2 * (FdJd * np.log(FdJd1)).sum()
FIJd = F[:, :].sum(axis=1)
FIJd1 = FIJd + (FIJd == 0)
T4 = 2 * (FIJd * np.log(FIJd1)).sum()
T6 = F.sum()
T6 = 2 * T6 * np.log(T6)
s, r, r1 = F.shape
chom = T1 - T4 - T2 + T3
cdof = r * (s - 1) * (r - 1)
results = {}
results['Conditional homogeneity'] = chom
results['Conditional homogeneity dof'] = cdof
results['Conditional homogeneity pvalue'] = 1 - stats.chi2.cdf(chom, cdof)
return results
def homogeneity(transition_matrices, regime_names=[], class_names=[],
title="Markov Homogeneity Test"):
"""
Test for homogeneity of Markov transition probabilities across regimes.
Parameters
----------
transition_matrices : list
of transition matrices for regimes, all matrices must
have same size (r, c). r is the number of rows in the
transition matrix and c is the number of columns in
the transition matrix.
regime_names : sequence
Labels for the regimes.
class_names : sequence
Labels for the classes/states of the Markov chain.
title : string
name of test.
Returns
-------
: implicit
an instance of Homogeneity_Results.
"""
return Homogeneity_Results(transition_matrices, regime_names=regime_names,
class_names=class_names, title=title)
class Homogeneity_Results:
"""
Wrapper class to present homogeneity results.
Parameters
----------
transition_matrices : list
of transition matrices for regimes, all matrices must
have same size (r, c). r is the number of rows in
the transition matrix and c is the number of columns
in the transition matrix.
regime_names : sequence
Labels for the regimes.
class_names : sequence
Labels for the classes/states of the Markov chain.
title : string
Title of the table.
Attributes
-----------
Notes
-----
Degrees of freedom adjustment follow the approach in :cite:`Bickenbach2003`.
Examples
--------
See Spatial_Markov above.
"""
def __init__(self, transition_matrices, regime_names=[], class_names=[],
title="Markov Homogeneity Test"):
self._homogeneity(transition_matrices)
self.regime_names = regime_names
self.class_names = class_names
self.title = title
def _homogeneity(self, transition_matrices):
# form null transition probability matrix
M = np.array(transition_matrices)
m, r, k = M.shape
self.k = k
B = np.zeros((r, m))
T = M.sum(axis=0)
self.t_total = T.sum()
n_i = T.sum(axis=1)
A_i = (T > 0).sum(axis=1)
A_im = np.zeros((r, m))
p_ij = np.dot(np.diag(1. / (n_i + (n_i == 0) * 1.)), T)
den = p_ij + 1. * (p_ij == 0)
b_i = np.zeros_like(A_i)
p_ijm = np.zeros_like(M)
# get dimensions
m, n_rows, n_cols = M.shape
m = 0
Q = 0.0
LR = 0.0
lr_table = np.zeros_like(M)
q_table = np.zeros_like(M)
for nijm in M:
nim = nijm.sum(axis=1)
B[:, m] = 1. * (nim > 0)
b_i = b_i + 1. * (nim > 0)
p_ijm[m] = np.dot(np.diag(1. / (nim + (nim == 0) * 1.)), nijm)
num = (p_ijm[m] - p_ij)**2
ratio = num / den
qijm = np.dot(np.diag(nim), ratio)
q_table[m] = qijm
Q = Q + qijm.sum()
# only use nonzero pijm in lr test
mask = (nijm > 0) * (p_ij > 0)
A_im[:, m] = (nijm > 0).sum(axis=1)
unmask = 1.0 * (mask == 0)
ratio = (mask * p_ijm[m] + unmask) / (mask * p_ij + unmask)
lr = nijm * np.log(ratio)
LR = LR + lr.sum()
lr_table[m] = 2 * lr
m += 1
# b_i is the number of regimes that have non-zero observations in row i
# A_i is the number of non-zero elements in row i of the aggregated
# transition matrix
self.dof = int(((b_i - 1) * (A_i - 1)).sum())
self.Q = Q
self.Q_p_value = 1 - stats.chi2.cdf(self.Q, self.dof)
self.LR = LR * 2.
self.LR_p_value = 1 - stats.chi2.cdf(self.LR, self.dof)
self.A = A_i
self.A_im = A_im
self.B = B
self.b_i = b_i
self.LR_table = lr_table
self.Q_table = q_table
self.m = m
self.p_h0 = p_ij
self.p_h1 = p_ijm
def summary(self, file_name=None, title="Markov Homogeneity Test"):
regime_names = ["%d" % i for i in range(self.m)]
if self.regime_names:
regime_names = self.regime_names
cols = ["P(%s)" % str(regime) for regime in regime_names]
if not self.class_names:
self.class_names = list(range(self.k))
max_col = max([len(col) for col in cols])
col_width = max([5, max_col]) # probabilities have 5 chars
n_tabs = self.k
width = n_tabs * 4 + (self.k + 1) * col_width
lead = "-" * width
head = title.center(width)
contents = [lead, head, lead]
l = "Number of regimes: %d" % int(self.m)
k = "Number of classes: %d" % int(self.k)
r = "Regime names: "
r += ", ".join(regime_names)
t = "Number of transitions: %d" % int(self.t_total)
contents.append(k)
contents.append(t)
contents.append(l)
contents.append(r)
contents.append(lead)
h = "%7s %20s %20s" % ('Test', 'LR', 'Chi-2')
contents.append(h)
stat = "%7s %20.3f %20.3f" % ('Stat.', self.LR, self.Q)
contents.append(stat)
stat = "%7s %20d %20d" % ('DOF', self.dof, self.dof)
contents.append(stat)
stat = "%7s %20.3f %20.3f" % ('p-value', self.LR_p_value,
self.Q_p_value)
contents.append(stat)
print(("\n".join(contents)))
print(lead)
cols = ["P(%s)" % str(regime) for regime in self.regime_names]
if not self.class_names:
self.class_names = list(range(self.k))
cols.extend(["%s" % str(cname) for cname in self.class_names])
max_col = max([len(col) for col in cols])
col_width = max([5, max_col]) # probabilities have 5 chars
p0 = []
line0 = ['{s: <{w}}'.format(s="P(H0)", w=col_width)]
line0.extend((['{s: >{w}}'.format(s=cname, w=col_width) for cname in
self.class_names]))
print((" ".join(line0)))
p0.append("&".join(line0))
for i, row in enumerate(self.p_h0):
line = ["%*s" % (col_width, str(self.class_names[i]))]
line.extend(["%*.3f" % (col_width, v) for v in row])
print((" ".join(line)))
p0.append("&".join(line))
pmats = [p0]
print(lead)
for r, p1 in enumerate(self.p_h1):
p0 = []
line0 = ['{s: <{w}}'.format(s="P(%s)" %
regime_names[r], w=col_width)]
line0.extend((['{s: >{w}}'.format(s=cname, w=col_width) for cname
in self.class_names]))
print((" ".join(line0)))
p0.append("&".join(line0))
for i, row in enumerate(p1):
line = ["%*s" % (col_width, str(self.class_names[i]))]
line.extend(["%*.3f" % (col_width, v) for v in row])
print((" ".join(line)))
p0.append("&".join(line))
pmats.append(p0)
print(lead)
if file_name:
k = self.k
ks = str(k + 1)
with open(file_name, 'w') as f:
c = []
fmt = "r" * (k + 1)
s = "\\begin{tabular}{|%s|}\\hline\n" % fmt
s += "\\multicolumn{%s}{|c|}{%s}" % (ks, title)
c.append(s)
s = "Number of classes: %d" % int(self.k)
c.append("\\hline\\multicolumn{%s}{|l|}{%s}" % (ks, s))
s = "Number of transitions: %d" % int(self.t_total)
c.append("\\multicolumn{%s}{|l|}{%s}" % (ks, s))
s = "Number of regimes: %d" % int(self.m)
c.append("\\multicolumn{%s}{|l|}{%s}" % (ks, s))
s = "Regime names: "
s += ", ".join(regime_names)
c.append("\\multicolumn{%s}{|l|}{%s}" % (ks, s))
s = "\\hline\\multicolumn{2}{|l}{%s}" % ("Test")
s += "&\\multicolumn{2}{r}{LR}&\\multicolumn{2}{r|}{Q}"
c.append(s)
s = "Stat."
s = "\\multicolumn{2}{|l}{%s}" % (s)
s += "&\\multicolumn{2}{r}{%.3f}" % self.LR
s += "&\\multicolumn{2}{r|}{%.3f}" % self.Q
c.append(s)
s = "\\multicolumn{2}{|l}{%s}" % ("DOF")
s += "&\\multicolumn{2}{r}{%d}" % int(self.dof)
s += "&\\multicolumn{2}{r|}{%d}" % int(self.dof)
c.append(s)
s = "\\multicolumn{2}{|l}{%s}" % ("p-value")
s += "&\\multicolumn{2}{r}{%.3f}" % self.LR_p_value
s += "&\\multicolumn{2}{r|}{%.3f}" % self.Q_p_value
c.append(s)
s1 = "\\\\\n".join(c)
s1 += "\\\\\n"
c = []
for mat in pmats:
c.append("\\hline\n")
for row in mat:
c.append(row + "\\\\\n")
c.append("\\hline\n")
c.append("\\end{tabular}")
s2 = "".join(c)
f.write(s1 + s2)
class FullRank_Markov:
"""
Full Rank Markov in which ranks are considered as Markov states rather
than quantiles or other discretized classes. This is one way to avoid
issues associated with discretization.
Parameters
----------
y : array
(n, t) with t>>n, one row per observation (n total),
one column recording the value of each observation,
with as many columns as time periods.
Attributes
----------
ranks : array
ranks of the original y array (by columns): higher values
rank higher, e.g. the largest value in a column ranks 1.
p : array
(n, n), transition probability matrix for Full
Rank Markov.
steady_state : array
(n, ), ergodic distribution.
transitions : array
(n, n), count of transitions between each rank i and j
fmpt : array
(n, n), first mean passage times.
sojourn_time : array
(n, ), sojourn times.
Notes
-----
Refer to :cite:`Rey2014a` Equation (11) for details. Ties are resolved by
assigning distinct ranks, corresponding to the order that the values occur
in each cross section.
Examples
--------
US nominal per capita income 48 states 81 years 1929-2009
>>> from giddy.markov import FullRank_Markov
>>> import libpysal as ps
>>> import numpy as np
>>> f = ps.io.open(ps.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)]).transpose()
>>> m = FullRank_Markov(pci)
>>> m.ranks
array([[45, 45, 44, ..., 41, 40, 39],
[24, 25, 25, ..., 36, 38, 41],
[46, 47, 45, ..., 43, 43, 43],
...,
[34, 34, 34, ..., 47, 46, 42],
[17, 17, 22, ..., 25, 26, 25],
[16, 18, 19, ..., 6, 6, 7]])
>>> m.transitions
array([[66., 5., 5., ..., 0., 0., 0.],
[ 8., 51., 9., ..., 0., 0., 0.],
[ 2., 13., 44., ..., 0., 0., 0.],
...,
[ 0., 0., 0., ..., 40., 17., 0.],
[ 0., 0., 0., ..., 15., 54., 2.],
[ 0., 0., 0., ..., 2., 1., 77.]])
>>> m.p[0, :5]
array([0.825 , 0.0625, 0.0625, 0.025 , 0.025 ])
>>> m.fmpt[0, :5]
array([48. , 87.96280048, 68.1089084 , 58.83306575, 41.77250827])
>>> m.sojourn_time[:5]
array([5.71428571, 2.75862069, 2.22222222, 1.77777778, 1.66666667])
"""
def __init__(self, y):
y = np.asarray(y)
# resolve ties: All values are given a distinct rank, corresponding
# to the order that the values occur in each cross section.
r_asc = np.array([rankdata(col, method='ordinal') for col in y.T]).T
# ranks by high (1) to low (n)
self.ranks = r_asc.shape[0] - r_asc + 1
frm = Markov(self.ranks)
self.p = frm.p
self.transitions = frm.transitions
@property
def steady_state(self):
if not hasattr(self, '_steady_state'):
self._steady_state = STEADY_STATE(self.p)
return self._steady_state
@property
def fmpt(self):
if not hasattr(self, '_fmpt'):
self._fmpt = fmpt(self.p)
return self._fmpt
@property
def sojourn_time(self):
if not hasattr(self, '_st'):
self._st = sojourn_time(self.p)
return self._st
def sojourn_time(p):
"""
Calculate sojourn time based on a given transition probability matrix.
Parameters
----------
p : array
(k, k), a Markov transition probability matrix.
Returns
-------
: array
(k, ), sojourn times. Each element is the expected time a Markov
chain spends in each states before leaving that state.
Notes
-----
Refer to :cite:`Ibe2009` for more details on sojourn times for Markov
chains.
Examples
--------
>>> from giddy.markov import sojourn_time
>>> import numpy as np
>>> p = np.array([[.5, .25, .25], [.5, 0, .5], [.25, .25, .5]])
>>> sojourn_time(p)
array([2., 1., 2.])
"""
p = np.asarray(p)
pii = p.diagonal()
if not (1 - pii).all():
print("Sojourn times are infinite for absorbing states!")
return 1 / (1 - pii)
class GeoRank_Markov:
"""
Geographic Rank Markov.
Geographic units are considered as Markov states.
Parameters
----------
y : array
(n, t) with t>>n, one row per observation (n total),
one column recording the value of each observation,
with as many columns as time periods.
Attributes
----------
p : array
(n, n), transition probability matrix for
geographic rank Markov.
steady_state : array
(n, ), ergodic distribution.
transitions : array
(n, n), count of rank transitions between each
geographic unit i and j.
fmpt : array
(n, n), first mean passage times.
sojourn_time : array
(n, ), sojourn times.
Notes
-----
Refer to :cite:`Rey2014a` Equation (13)-(16) for details. Ties are
resolved by assigning distinct ranks, corresponding to the order
that the values occur in each cross section.
Examples
--------
US nominal per capita income 48 states 81 years 1929-2009
>>> from giddy.markov import GeoRank_Markov
>>> import libpysal as ps
>>> import numpy as np
>>> f = ps.io.open(ps.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)]).transpose()
>>> m = GeoRank_Markov(pci)
>>> m.transitions
array([[38., 0., 8., ..., 0., 0., 0.],
[ 0., 15., 0., ..., 0., 1., 0.],
[ 6., 0., 44., ..., 5., 0., 0.],
...,
[ 2., 0., 5., ..., 34., 0., 0.],
[ 0., 0., 0., ..., 0., 18., 2.],
[ 0., 0., 0., ..., 0., 3., 14.]])
>>> m.p
array([[0.475 , 0. , 0.1 , ..., 0. , 0. , 0. ],
[0. , 0.1875, 0. , ..., 0. , 0.0125, 0. ],
[0.075 , 0. , 0.55 , ..., 0.0625, 0. , 0. ],
...,
[0.025 , 0. , 0.0625, ..., 0.425 , 0. , 0. ],
[0. , 0. , 0. , ..., 0. , 0.225 , 0.025 ],
[0. , 0. , 0. , ..., 0. , 0.0375, 0.175 ]])
>>> m.fmpt
array([[ 48. , 63.35532038, 92.75274652, ..., 82.47515731,
71.01114491, 68.65737127],
[108.25928005, 48. , 127.99032986, ..., 92.03098299,
63.36652935, 61.82733039],
[ 76.96801786, 64.7713783 , 48. , ..., 73.84595169,
72.24682723, 69.77497173],
...,
[ 93.3107474 , 62.47670463, 105.80634118, ..., 48. ,
69.30121319, 67.08838421],
[113.65278078, 61.1987031 , 133.57991745, ..., 96.0103924 ,
48. , 56.74165107],
[114.71894813, 63.4019776 , 134.73381719, ..., 97.287895 ,
61.45565054, 48. ]])
>>> m.sojourn_time
array([ 1.9047619 , 1.23076923, 2.22222222, 1.73913043, 1.15942029,
3.80952381, 1.70212766, 1.25 , 1.31147541, 1.11111111,
1.73913043, 1.37931034, 1.17647059, 1.21212121, 1.33333333,
1.37931034, 1.09589041, 2.10526316, 2. , 1.45454545,
1.26984127, 26.66666667, 1.19402985, 1.23076923, 1.09589041,
1.56862745, 1.26984127, 2.42424242, 1.50943396, 2. ,
1.29032258, 1.09589041, 1.6 , 1.42857143, 1.25 ,
1.45454545, 1.29032258, 1.6 , 1.17647059, 1.56862745,
1.25 , 1.37931034, 1.45454545, 1.42857143, 1.29032258,
1.73913043, 1.29032258, 1.21212121])
"""
def __init__(self, y):
y = np.asarray(y)
n = y.shape[0]
# resolve ties: All values are given a distinct rank, corresponding
# to the order that the values occur in each cross section.
ranks = np.array([rankdata(col, method='ordinal') for col in y.T]).T
geo_ranks = np.argsort(ranks, axis=0) + 1
grm = Markov(geo_ranks)
self.p = grm.p
self.transitions = grm.transitions
@property
def steady_state(self):
if not hasattr(self, '_steady_state'):
self._steady_state = STEADY_STATE(self.p)
return self._steady_state
@property
def fmpt(self):
if not hasattr(self, '_fmpt'):
self._fmpt = fmpt(self.p)
return self._fmpt
@property
def sojourn_time(self):
if not hasattr(self, '_st'):
self._st = sojourn_time(self.p)
return self._st
|
pysal/giddy | giddy/markov.py | homogeneity | python | def homogeneity(transition_matrices, regime_names=[], class_names=[],
title="Markov Homogeneity Test"):
return Homogeneity_Results(transition_matrices, regime_names=regime_names,
class_names=class_names, title=title) | Test for homogeneity of Markov transition probabilities across regimes.
Parameters
----------
transition_matrices : list
of transition matrices for regimes, all matrices must
have same size (r, c). r is the number of rows in the
transition matrix and c is the number of columns in
the transition matrix.
regime_names : sequence
Labels for the regimes.
class_names : sequence
Labels for the classes/states of the Markov chain.
title : string
name of test.
Returns
-------
: implicit
an instance of Homogeneity_Results. | train | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/markov.py#L1480-L1506 | null | """
Markov based methods for spatial dynamics.
"""
__author__ = "Sergio J. Rey <sjsrey@gmail.com>, Wei Kang <weikang9009@gmail.com>"
__all__ = ["Markov", "LISA_Markov", "Spatial_Markov", "kullback",
"prais", "homogeneity", "FullRank_Markov", "sojourn_time",
"GeoRank_Markov"]
import numpy as np
from .ergodic import fmpt
from .ergodic import steady_state as STEADY_STATE
from .components import Graph
from scipy import stats
from scipy.stats import rankdata
from operator import gt
from libpysal import weights
from esda.moran import Moran_Local
import mapclassify as mc
import itertools
# TT predefine LISA transitions
# TT[i,j] is the transition type from i to j
# i = quadrant in period 0
# j = quadrant in period 1
# uses one offset so first row and col of TT are ignored
TT = np.zeros((5, 5), int)
c = 1
for i in range(1, 5):
for j in range(1, 5):
TT[i, j] = c
c += 1
# MOVE_TYPES is a dictionary that returns the move type of a LISA transition
# filtered on the significance of the LISA end points
# True indicates significant LISA in a particular period
# e.g. a key of (1, 3, True, False) indicates a significant LISA located in
# quadrant 1 in period 0 moved to quadrant 3 in period 1 but was not
# significant in quadrant 3.
MOVE_TYPES = {}
c = 1
cases = (True, False)
sig_keys = [(i, j) for i in cases for j in cases]
for i, sig_key in enumerate(sig_keys):
c = 1 + i * 16
for i in range(1, 5):
for j in range(1, 5):
key = (i, j, sig_key[0], sig_key[1])
MOVE_TYPES[key] = c
c += 1
class Markov(object):
"""
Classic Markov transition matrices.
Parameters
----------
class_ids : array
(n, t), one row per observation, one column recording the
state of each observation, with as many columns as time
periods.
classes : array
(k, 1), all different classes (bins) of the matrix.
Attributes
----------
p : array
(k, k), transition probability matrix.
steady_state : array
(k, ), ergodic distribution.
transitions : array
(k, k), count of transitions between each state i and j.
Examples
--------
>>> import numpy as np
>>> from giddy.markov import Markov
>>> c = [['b','a','c'],['c','c','a'],['c','b','c']]
>>> c.extend([['a','a','b'], ['a','b','c']])
>>> c = np.array(c)
>>> m = Markov(c)
>>> m.classes.tolist()
['a', 'b', 'c']
>>> m.p
array([[0.25 , 0.5 , 0.25 ],
[0.33333333, 0. , 0.66666667],
[0.33333333, 0.33333333, 0.33333333]])
>>> m.steady_state
array([0.30769231, 0.28846154, 0.40384615])
US nominal per capita income 48 states 81 years 1929-2009
>>> import libpysal
>>> import mapclassify as mc
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
set classes to quintiles for each year
>>> q5 = np.array([mc.Quantiles(y).yb for y in pci]).transpose()
>>> m = Markov(q5)
>>> m.transitions
array([[729., 71., 1., 0., 0.],
[ 72., 567., 80., 3., 0.],
[ 0., 81., 631., 86., 2.],
[ 0., 3., 86., 573., 56.],
[ 0., 0., 1., 57., 741.]])
>>> m.p
array([[0.91011236, 0.0886392 , 0.00124844, 0. , 0. ],
[0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ],
[0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ],
[0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443],
[0. , 0. , 0.00125156, 0.07133917, 0.92740926]])
>>> m.steady_state
array([0.20774716, 0.18725774, 0.20740537, 0.18821787, 0.20937187])
Relative incomes
>>> pci = pci.transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> rq = mc.Quantiles(rpci.flatten()).yb.reshape(pci.shape)
>>> mq = Markov(rq)
>>> mq.transitions
array([[707., 58., 7., 1., 0.],
[ 50., 629., 80., 1., 1.],
[ 4., 79., 610., 73., 2.],
[ 0., 7., 72., 650., 37.],
[ 0., 0., 0., 48., 724.]])
>>> mq.steady_state
array([0.17957376, 0.21631443, 0.21499942, 0.21134662, 0.17776576])
"""
def __init__(self, class_ids, classes=None):
if classes is not None:
self.classes = classes
else:
self.classes = np.unique(class_ids)
n, t = class_ids.shape
k = len(self.classes)
js = list(range(t - 1))
classIds = self.classes.tolist()
transitions = np.zeros((k, k))
for state_0 in js:
state_1 = state_0 + 1
state_0 = class_ids[:, state_0]
state_1 = class_ids[:, state_1]
initial = np.unique(state_0)
for i in initial:
ending = state_1[state_0 == i]
uending = np.unique(ending)
row = classIds.index(i)
for j in uending:
col = classIds.index(j)
transitions[row, col] += sum(ending == j)
self.transitions = transitions
row_sum = transitions.sum(axis=1)
self.p = np.dot(np.diag(1 / (row_sum + (row_sum == 0))), transitions)
@property
def steady_state(self):
if not hasattr(self, '_steady_state'):
self._steady_state = STEADY_STATE(self.p)
return self._steady_state
class Spatial_Markov(object):
"""
Markov transitions conditioned on the value of the spatial lag.
Parameters
----------
y : array
(n, t), one row per observation, one column per state of
each observation, with as many columns as time periods.
w : W
spatial weights object.
k : integer, optional
number of classes (quantiles) for input time series y.
Default is 4. If discrete=True, k is determined
endogenously.
m : integer, optional
number of classes (quantiles) for the spatial lags of
regional time series. Default is 4. If discrete=True,
m is determined endogenously.
permutations : int, optional
number of permutations for use in randomization based
inference (the default is 0).
fixed : bool, optional
If true, discretization are taken over the entire n*t
pooled series and cutoffs can be user-defined. If
cutoffs and lag_cutoffs are not given, quantiles are
used. If false, quantiles are taken each time period
over n. Default is True.
discrete : bool, optional
If true, categorical spatial lags which are most common
categories of neighboring observations serve as the
conditioning and fixed is ignored; if false, weighted
averages of neighboring observations are used. Default is
false.
cutoffs : array, optional
users can specify the discretization cutoffs for
continuous time series. Default is None, meaning that
quantiles will be used for the discretization.
lag_cutoffs : array, optional
users can specify the discretization cutoffs for the
spatial lags of continuous time series. Default is
None, meaning that quantiles will be used for the
discretization.
variable_name : string
name of variable.
Attributes
----------
class_ids : array
(n, t), discretized series if y is continuous. Otherwise
it is identical to y.
classes : array
(k, 1), all different classes (bins).
lclass_ids : array
(n, t), spatial lag series.
lclasses : array
(k, 1), all different classes (bins) for
spatial lags.
p : array
(k, k), transition probability matrix for a-spatial
Markov.
s : array
(k, 1), ergodic distribution for a-spatial Markov.
transitions : array
(k, k), counts of transitions between each state i and j
for a-spatial Markov.
T : array
(k, k, k), counts of transitions for each conditional
Markov. T[0] is the matrix of transitions for
observations with lags in the 0th quantile; T[k-1] is the
transitions for the observations with lags in the k-1th.
P : array
(k, k, k), transition probability matrix for spatial
Markov first dimension is the conditioned on the lag.
S : array
(k, k), steady state distributions for spatial Markov.
Each row is a conditional steady_state.
F : array
(k, k, k),first mean passage times.
First dimension is conditioned on the lag.
shtest : list
(k elements), each element of the list is a tuple for a
multinomial difference test between the steady state
distribution from a conditional distribution versus the
overall steady state distribution: first element of the
tuple is the chi2 value, second its p-value and the third
the degrees of freedom.
chi2 : list
(k elements), each element of the list is a tuple for a
chi-squared test of the difference between the
conditional transition matrix against the overall
transition matrix: first element of the tuple is the chi2
value, second its p-value and the third the degrees of
freedom.
x2 : float
sum of the chi2 values for each of the conditional tests.
Has an asymptotic chi2 distribution with k(k-1)(k-1)
degrees of freedom. Under the null that transition
probabilities are spatially homogeneous.
(see chi2 above)
x2_dof : int
degrees of freedom for homogeneity test.
x2_pvalue : float
pvalue for homogeneity test based on analytic.
distribution
x2_rpvalue : float
(if permutations>0)
pseudo p-value for x2 based on random spatial
permutations of the rows of the original transitions.
x2_realizations : array
(permutations,1), the values of x2 for the random
permutations.
Q : float
Chi-square test of homogeneity across lag classes based
on :cite:`Bickenbach2003`.
Q_p_value : float
p-value for Q.
LR : float
Likelihood ratio statistic for homogeneity across lag
classes based on :cite:`Bickenbach2003`.
LR_p_value : float
p-value for LR.
dof_hom : int
degrees of freedom for LR and Q, corrected for 0 cells.
Notes
-----
Based on :cite:`Rey2001`.
The shtest and chi2 tests should be used with caution as they are based on
classic theory assuming random transitions. The x2 based test is
preferable since it simulates the randomness under the null. It is an
experimental test requiring further analysis.
Examples
--------
>>> import libpysal
>>> from giddy.markov import Spatial_Markov
>>> import numpy as np
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
>>> pci = pci.transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> w.transform = 'r'
Now we create a `Spatial_Markov` instance for the continuous relative per
capita income time series for 48 US lower states 1929-2009. The current
implementation allows users to classify the continuous incomes in a more
flexible way.
(1) Global quintiles to discretize the income data (k=5), and global
quintiles to discretize the spatial lags of incomes (m=5).
>>> sm = Spatial_Markov(rpci, w, fixed=True, k=5, m=5, variable_name='rpci')
We can examine the cutoffs for the incomes and cutoffs for the spatial lags
>>> sm.cutoffs
array([0.83999133, 0.94707545, 1.03242697, 1.14911154])
>>> sm.lag_cutoffs
array([0.88973386, 0.95891917, 1.01469758, 1.1183566 ])
Obviously, they are slightly different.
We now look at the estimated spatially lag conditioned transition
probability matrices.
>>> for p in sm.P:
... print(p)
[[0.96341463 0.0304878 0.00609756 0. 0. ]
[0.06040268 0.83221477 0.10738255 0. 0. ]
[0. 0.14 0.74 0.12 0. ]
[0. 0.03571429 0.32142857 0.57142857 0.07142857]
[0. 0. 0. 0.16666667 0.83333333]]
[[0.79831933 0.16806723 0.03361345 0. 0. ]
[0.0754717 0.88207547 0.04245283 0. 0. ]
[0.00537634 0.06989247 0.8655914 0.05913978 0. ]
[0. 0. 0.06372549 0.90196078 0.03431373]
[0. 0. 0. 0.19444444 0.80555556]]
[[0.84693878 0.15306122 0. 0. 0. ]
[0.08133971 0.78947368 0.1291866 0. 0. ]
[0.00518135 0.0984456 0.79274611 0.0984456 0.00518135]
[0. 0. 0.09411765 0.87058824 0.03529412]
[0. 0. 0. 0.10204082 0.89795918]]
[[0.8852459 0.09836066 0. 0.01639344 0. ]
[0.03875969 0.81395349 0.13953488 0. 0.00775194]
[0.0049505 0.09405941 0.77722772 0.11881188 0.0049505 ]
[0. 0.02339181 0.12865497 0.75438596 0.09356725]
[0. 0. 0. 0.09661836 0.90338164]]
[[0.33333333 0.66666667 0. 0. 0. ]
[0.0483871 0.77419355 0.16129032 0.01612903 0. ]
[0.01149425 0.16091954 0.74712644 0.08045977 0. ]
[0. 0.01036269 0.06217617 0.89637306 0.03108808]
[0. 0. 0. 0.02352941 0.97647059]]
The probability of a poor state remaining poor is 0.963 if their
neighbors are in the 1st quintile and 0.798 if their neighbors are
in the 2nd quintile. The probability of a rich economy remaining
rich is 0.976 if their neighbors are in the 5th quintile, but if their
neighbors are in the 4th quintile this drops to 0.903.
The global transition probability matrix is estimated:
>>> print(sm.p)
[[0.91461837 0.07503234 0.00905563 0.00129366 0. ]
[0.06570302 0.82654402 0.10512484 0.00131406 0.00131406]
[0.00520833 0.10286458 0.79427083 0.09505208 0.00260417]
[0. 0.00913838 0.09399478 0.84856397 0.04830287]
[0. 0. 0. 0.06217617 0.93782383]]
The Q and likelihood ratio statistics are both significant indicating
the dynamics are not homogeneous across the lag classes:
>>> "%.3f"%sm.LR
'170.659'
>>> "%.3f"%sm.Q
'200.624'
>>> "%.3f"%sm.LR_p_value
'0.000'
>>> "%.3f"%sm.Q_p_value
'0.000'
>>> sm.dof_hom
60
The long run distribution for states with poor (rich) neighbors has
0.435 (0.018) of the values in the first quintile, 0.263 (0.200) in
the second quintile, 0.204 (0.190) in the third, 0.0684 (0.255) in the
fourth and 0.029 (0.337) in the fifth quintile.
>>> sm.S
array([[0.43509425, 0.2635327 , 0.20363044, 0.06841983, 0.02932278],
[0.13391287, 0.33993305, 0.25153036, 0.23343016, 0.04119356],
[0.12124869, 0.21137444, 0.2635101 , 0.29013417, 0.1137326 ],
[0.0776413 , 0.19748806, 0.25352636, 0.22480415, 0.24654013],
[0.01776781, 0.19964349, 0.19009833, 0.25524697, 0.3372434 ]])
States with incomes in the first quintile with neighbors in the
first quintile return to the first quartile after 2.298 years, after
leaving the first quintile. They enter the fourth quintile after
80.810 years after leaving the first quintile, on average.
Poor states within neighbors in the fourth quintile return to the
first quintile, on average, after 12.88 years, and would enter the
fourth quintile after 28.473 years.
>>> for f in sm.F:
... print(f)
...
[[ 2.29835259 28.95614035 46.14285714 80.80952381 279.42857143]
[ 33.86549708 3.79459555 22.57142857 57.23809524 255.85714286]
[ 43.60233918 9.73684211 4.91085714 34.66666667 233.28571429]
[ 46.62865497 12.76315789 6.25714286 14.61564626 198.61904762]
[ 52.62865497 18.76315789 12.25714286 6. 34.1031746 ]]
[[ 7.46754205 9.70574606 25.76785714 74.53116883 194.23446197]
[ 27.76691978 2.94175577 24.97142857 73.73474026 193.4380334 ]
[ 53.57477715 28.48447637 3.97566318 48.76331169 168.46660482]
[ 72.03631562 46.94601483 18.46153846 4.28393653 119.70329314]
[ 77.17917276 52.08887197 23.6043956 5.14285714 24.27564033]]
[[ 8.24751154 6.53333333 18.38765432 40.70864198 112.76732026]
[ 47.35040872 4.73094099 11.85432099 34.17530864 106.23398693]
[ 69.42288828 24.76666667 3.794921 22.32098765 94.37966594]
[ 83.72288828 39.06666667 14.3 3.44668119 76.36702977]
[ 93.52288828 48.86666667 24.1 9.8 8.79255406]]
[[ 12.87974382 13.34847151 19.83446328 28.47257282 55.82395142]
[ 99.46114206 5.06359731 10.54545198 23.05133495 49.68944423]
[117.76777159 23.03735526 3.94436301 15.0843986 43.57927247]
[127.89752089 32.4393006 14.56853107 4.44831643 31.63099455]
[138.24752089 42.7893006 24.91853107 10.35 4.05613474]]
[[ 56.2815534 1.5 10.57236842 27.02173913 110.54347826]
[ 82.9223301 5.00892857 9.07236842 25.52173913 109.04347826]
[ 97.17718447 19.53125 5.26043557 21.42391304 104.94565217]
[127.1407767 48.74107143 33.29605263 3.91777427 83.52173913]
[169.6407767 91.24107143 75.79605263 42.5 2.96521739]]
(2) Global quintiles to discretize the income data (k=5), and global
quartiles to discretize the spatial lags of incomes (m=4).
>>> sm = Spatial_Markov(rpci, w, fixed=True, k=5, m=4, variable_name='rpci')
We can also examine the cutoffs for the incomes and cutoffs for the spatial
lags:
>>> sm.cutoffs
array([0.83999133, 0.94707545, 1.03242697, 1.14911154])
>>> sm.lag_cutoffs
array([0.91440247, 0.98583079, 1.08698351])
We now look at the estimated spatially lag conditioned transition
probability matrices.
>>> for p in sm.P:
... print(p)
[[0.95708955 0.03544776 0.00746269 0. 0. ]
[0.05825243 0.83980583 0.10194175 0. 0. ]
[0. 0.1294964 0.76258993 0.10791367 0. ]
[0. 0.01538462 0.18461538 0.72307692 0.07692308]
[0. 0. 0. 0.14285714 0.85714286]]
[[0.7421875 0.234375 0.0234375 0. 0. ]
[0.08550186 0.85130112 0.06319703 0. 0. ]
[0.00865801 0.06926407 0.86147186 0.05627706 0.004329 ]
[0. 0. 0.05363985 0.92337165 0.02298851]
[0. 0. 0. 0.13432836 0.86567164]]
[[0.95145631 0.04854369 0. 0. 0. ]
[0.06 0.79 0.145 0. 0.005 ]
[0.00358423 0.10394265 0.7921147 0.09677419 0.00358423]
[0. 0.01630435 0.13586957 0.75543478 0.0923913 ]
[0. 0. 0. 0.10204082 0.89795918]]
[[0.16666667 0.66666667 0. 0.16666667 0. ]
[0.03488372 0.80232558 0.15116279 0.01162791 0. ]
[0.00840336 0.13445378 0.70588235 0.1512605 0. ]
[0. 0.01171875 0.08203125 0.87109375 0.03515625]
[0. 0. 0. 0.03434343 0.96565657]]
We now obtain 4 5*5 spatial lag conditioned transition probability
matrices instead of 5 as in case (1).
The Q and likelihood ratio statistics are still both significant.
>>> "%.3f"%sm.LR
'172.105'
>>> "%.3f"%sm.Q
'321.128'
>>> "%.3f"%sm.LR_p_value
'0.000'
>>> "%.3f"%sm.Q_p_value
'0.000'
>>> sm.dof_hom
45
(3) We can also set the cutoffs for relative incomes and their
spatial lags manually.
For example, we want the defining cutoffs to be [0.8, 0.9, 1, 1.2],
meaning that relative incomes:
2.1 smaller than 0.8 : class 0
2.2 between 0.8 and 0.9: class 1
2.3 between 0.9 and 1.0 : class 2
2.4 between 1.0 and 1.2: class 3
2.5 larger than 1.2: class 4
>>> cc = np.array([0.8, 0.9, 1, 1.2])
>>> sm = Spatial_Markov(rpci, w, cutoffs=cc, lag_cutoffs=cc, variable_name='rpci')
>>> sm.cutoffs
array([0.8, 0.9, 1. , 1.2])
>>> sm.k
5
>>> sm.lag_cutoffs
array([0.8, 0.9, 1. , 1.2])
>>> sm.m
5
>>> for p in sm.P:
... print(p)
[[0.96703297 0.03296703 0. 0. 0. ]
[0.10638298 0.68085106 0.21276596 0. 0. ]
[0. 0.14285714 0.7755102 0.08163265 0. ]
[0. 0. 0.5 0.5 0. ]
[0. 0. 0. 0. 0. ]]
[[0.88636364 0.10606061 0.00757576 0. 0. ]
[0.04402516 0.89308176 0.06289308 0. 0. ]
[0. 0.05882353 0.8627451 0.07843137 0. ]
[0. 0. 0.13846154 0.86153846 0. ]
[0. 0. 0. 0. 1. ]]
[[0.78082192 0.17808219 0.02739726 0.01369863 0. ]
[0.03488372 0.90406977 0.05813953 0.00290698 0. ]
[0. 0.05919003 0.84735202 0.09034268 0.00311526]
[0. 0. 0.05811623 0.92985972 0.01202405]
[0. 0. 0. 0.14285714 0.85714286]]
[[0.82692308 0.15384615 0. 0.01923077 0. ]
[0.0703125 0.7890625 0.125 0.015625 0. ]
[0.00295858 0.06213018 0.82248521 0.10946746 0.00295858]
[0. 0.00185529 0.07606679 0.88497217 0.03710575]
[0. 0. 0. 0.07803468 0.92196532]]
[[0. 0. 0. 0. 0. ]
[0. 0. 0. 0. 0. ]
[0. 0.06666667 0.9 0.03333333 0. ]
[0. 0. 0.05660377 0.90566038 0.03773585]
[0. 0. 0. 0.03932584 0.96067416]]
(4) Spatial_Markov also accept discrete time series and calculate
categorical spatial lags on which several transition probability matrices
are conditioned.
Let's still use the US state income time series to demonstrate. We first
discretize them into categories and then pass them to Spatial_Markov.
>>> import mapclassify as mc
>>> y = mc.Quantiles(rpci.flatten(), k=5).yb.reshape(rpci.shape)
>>> np.random.seed(5)
>>> sm = Spatial_Markov(y, w, discrete=True, variable_name='discretized rpci')
>>> sm.k
5
>>> sm.m
5
>>> for p in sm.P:
... print(p)
[[0.94787645 0.04440154 0.00772201 0. 0. ]
[0.08333333 0.81060606 0.10606061 0. 0. ]
[0. 0.12765957 0.79787234 0.07446809 0. ]
[0. 0.02777778 0.22222222 0.66666667 0.08333333]
[0. 0. 0. 0.33333333 0.66666667]]
[[0.888 0.096 0.016 0. 0. ]
[0.06049822 0.84341637 0.09608541 0. 0. ]
[0.00666667 0.10666667 0.81333333 0.07333333 0. ]
[0. 0. 0.08527132 0.86821705 0.04651163]
[0. 0. 0. 0.10204082 0.89795918]]
[[0.65217391 0.32608696 0.02173913 0. 0. ]
[0.07446809 0.80851064 0.11170213 0. 0.00531915]
[0.01071429 0.1 0.76428571 0.11785714 0.00714286]
[0. 0.00552486 0.09392265 0.86187845 0.03867403]
[0. 0. 0. 0.13157895 0.86842105]]
[[0.91935484 0.06451613 0. 0.01612903 0. ]
[0.06796117 0.90291262 0.02912621 0. 0. ]
[0. 0.05755396 0.87769784 0.0647482 0. ]
[0. 0.02150538 0.10752688 0.80107527 0.06989247]
[0. 0. 0. 0.08064516 0.91935484]]
[[0.81818182 0.18181818 0. 0. 0. ]
[0.01754386 0.70175439 0.26315789 0.01754386 0. ]
[0. 0.14285714 0.73333333 0.12380952 0. ]
[0. 0.0042735 0.06837607 0.89316239 0.03418803]
[0. 0. 0. 0.03891051 0.96108949]]
"""
def __init__(self, y, w, k=4, m=4, permutations=0, fixed=True,
discrete=False, cutoffs=None, lag_cutoffs=None,
variable_name=None):
y = np.asarray(y)
self.fixed = fixed
self.discrete = discrete
self.cutoffs = cutoffs
self.m = m
self.lag_cutoffs = lag_cutoffs
self.variable_name = variable_name
if discrete:
merged = list(itertools.chain.from_iterable(y))
classes = np.unique(merged)
self.classes = classes
self.k = len(classes)
self.m = self.k
label_dict = dict(zip(classes, range(self.k)))
y_int = []
for yi in y:
y_int.append(list(map(label_dict.get, yi)))
self.class_ids = np.array(y_int)
self.lclass_ids = self.class_ids
else:
self.class_ids, self.cutoffs, self.k = self._maybe_classify(
y, k=k, cutoffs=self.cutoffs)
self.classes = np.arange(self.k)
classic = Markov(self.class_ids)
self.p = classic.p
self.transitions = classic.transitions
self.T, self.P = self._calc(y, w)
if permutations:
nrp = np.random.permutation
counter = 0
x2_realizations = np.zeros((permutations, 1))
for perm in range(permutations):
T, P = self._calc(nrp(y), w)
x2 = [chi2(T[i], self.transitions)[0] for i in range(self.k)]
x2s = sum(x2)
x2_realizations[perm] = x2s
if x2s >= self.x2:
counter += 1
self.x2_rpvalue = (counter + 1.0) / (permutations + 1.)
self.x2_realizations = x2_realizations
@property
def s(self):
if not hasattr(self, '_s'):
self._s = STEADY_STATE(self.p)
return self._s
@property
def S(self):
if not hasattr(self, '_S'):
S = np.zeros_like(self.p)
for i, p in enumerate(self.P):
S[i] = STEADY_STATE(p)
self._S = np.asarray(S)
return self._S
@property
def F(self):
if not hasattr(self, '_F'):
F = np.zeros_like(self.P)
for i, p in enumerate(self.P):
F[i] = fmpt(np.asmatrix(p))
self._F = np.asarray(F)
return self._F
# bickenbach and bode tests
@property
def ht(self):
if not hasattr(self, '_ht'):
self._ht = homogeneity(self.T)
return self._ht
@property
def Q(self):
if not hasattr(self, '_Q'):
self._Q = self.ht.Q
return self._Q
@property
def Q_p_value(self):
self._Q_p_value = self.ht.Q_p_value
return self._Q_p_value
@property
def LR(self):
self._LR = self.ht.LR
return self._LR
@property
def LR_p_value(self):
self._LR_p_value = self.ht.LR_p_value
return self._LR_p_value
@property
def dof_hom(self):
self._dof_hom = self.ht.dof
return self._dof_hom
# shtests
@property
def shtest(self):
if not hasattr(self, '_shtest'):
self._shtest = self._mn_test()
return self._shtest
@property
def chi2(self):
if not hasattr(self, '_chi2'):
self._chi2 = self._chi2_test()
return self._chi2
@property
def x2(self):
if not hasattr(self, '_x2'):
self._x2 = sum([c[0] for c in self.chi2])
return self._x2
@property
def x2_pvalue(self):
if not hasattr(self, '_x2_pvalue'):
self._x2_pvalue = 1 - stats.chi2.cdf(self.x2, self.x2_dof)
return self._x2_pvalue
@property
def x2_dof(self):
if not hasattr(self, '_x2_dof'):
k = self.k
self._x2_dof = k * (k - 1) * (k - 1)
return self._x2_dof
def _calc(self, y, w):
'''Helper to estimate spatial lag conditioned Markov transition
probability matrices based on maximum likelihood techniques.
'''
if self.discrete:
self.lclass_ids = weights.lag_categorical(w, self.class_ids,
ties="tryself")
else:
ly = weights.lag_spatial(w, y)
self.lclass_ids, self.lag_cutoffs, self.m = self._maybe_classify(
ly, self.m, self.lag_cutoffs)
self.lclasses = np.arange(self.m)
T = np.zeros((self.m, self.k, self.k))
n, t = y.shape
for t1 in range(t - 1):
t2 = t1 + 1
for i in range(n):
T[self.lclass_ids[i, t1], self.class_ids[i, t1],
self.class_ids[i, t2]] += 1
P = np.zeros_like(T)
for i, mat in enumerate(T):
row_sum = mat.sum(axis=1)
row_sum = row_sum + (row_sum == 0)
p_i = np.matrix(np.diag(1. / row_sum) * np.matrix(mat))
P[i] = p_i
return T, P
def _mn_test(self):
"""
helper to calculate tests of differences between steady state
distributions from the conditional and overall distributions.
"""
n0, n1, n2 = self.T.shape
rn = list(range(n0))
mat = [self._ssmnp_test(
self.s, self.S[i], self.T[i].sum()) for i in rn]
return mat
def _ssmnp_test(self, p1, p2, nt):
"""
Steady state multinomial probability difference test.
Arguments
---------
p1 : array
(k, ), first steady state probability distribution.
p1 : array
(k, ), second steady state probability distribution.
nt : int
number of transitions to base the test on.
Returns
-------
tuple
(3 elements)
(chi2 value, pvalue, degrees of freedom)
"""
o = nt * p2
e = nt * p1
d = np.multiply((o - e), (o - e))
d = d / e
chi2 = d.sum()
pvalue = 1 - stats.chi2.cdf(chi2, self.k - 1)
return (chi2, pvalue, self.k - 1)
def _chi2_test(self):
"""
helper to calculate tests of differences between the conditional
transition matrices and the overall transitions matrix.
"""
n0, n1, n2 = self.T.shape
rn = list(range(n0))
mat = [chi2(self.T[i], self.transitions) for i in rn]
return mat
def summary(self, file_name=None):
"""
A summary method to call the Markov homogeneity test to test for
temporally lagged spatial dependence.
To learn more about the properties of the tests, refer to
:cite:`Rey2016a` and :cite:`Kang2018`.
"""
class_names = ["C%d" % i for i in range(self.k)]
regime_names = ["LAG%d" % i for i in range(self.k)]
ht = homogeneity(self.T, class_names=class_names,
regime_names=regime_names)
title = "Spatial Markov Test"
if self.variable_name:
title = title + ": " + self.variable_name
if file_name:
ht.summary(file_name=file_name, title=title)
else:
ht.summary(title=title)
def _maybe_classify(self, y, k, cutoffs):
'''Helper method for classifying continuous data.
'''
rows, cols = y.shape
if cutoffs is None:
if self.fixed:
mcyb = mc.Quantiles(y.flatten(), k=k)
yb = mcyb.yb.reshape(y.shape)
cutoffs = mcyb.bins
k = len(cutoffs)
return yb, cutoffs[:-1], k
else:
yb = np.array([mc.Quantiles(y[:, i], k=k).yb for i in
np.arange(cols)]).transpose()
return yb, None, k
else:
cutoffs = list(cutoffs) + [np.inf]
cutoffs = np.array(cutoffs)
yb = mc.User_Defined(y.flatten(), np.array(cutoffs)).yb.reshape(
y.shape)
k = len(cutoffs)
return yb, cutoffs[:-1], k
def chi2(T1, T2):
"""
chi-squared test of difference between two transition matrices.
Parameters
----------
T1 : array
(k, k), matrix of transitions (counts).
T2 : array
(k, k), matrix of transitions (counts) to use to form the
probabilities under the null.
Returns
-------
: tuple
(3 elements).
(chi2 value, pvalue, degrees of freedom).
Examples
--------
>>> import libpysal
>>> from giddy.markov import Spatial_Markov, chi2
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> w.transform='r'
>>> sm = Spatial_Markov(rpci, w, fixed=True)
>>> T1 = sm.T[0]
>>> T1
array([[562., 22., 1., 0.],
[ 12., 201., 22., 0.],
[ 0., 17., 97., 4.],
[ 0., 0., 3., 19.]])
>>> T2 = sm.transitions
>>> T2
array([[884., 77., 4., 0.],
[ 68., 794., 87., 3.],
[ 1., 92., 815., 51.],
[ 1., 0., 60., 903.]])
>>> chi2(T1,T2)
(23.39728441473295, 0.005363116704861337, 9)
Notes
-----
Second matrix is used to form the probabilities under the null.
Marginal sums from first matrix are distributed across these probabilities
under the null. In other words the observed transitions are taken from T1
while the expected transitions are formed as follows
.. math::
E_{i,j} = \sum_j T1_{i,j} * T2_{i,j}/\sum_j T2_{i,j}
Degrees of freedom corrected for any rows in either T1 or T2 that have
zero total transitions.
"""
rs2 = T2.sum(axis=1)
rs1 = T1.sum(axis=1)
rs2nz = rs2 > 0
rs1nz = rs1 > 0
dof1 = sum(rs1nz)
dof2 = sum(rs2nz)
rs2 = rs2 + (rs2 == 0)
dof = (dof1 - 1) * (dof2 - 1)
p = np.diag(1 / rs2) * np.matrix(T2)
E = np.diag(rs1) * np.matrix(p)
num = T1 - E
num = np.multiply(num, num)
E = E + (E == 0)
chi2 = num / E
chi2 = chi2.sum()
pvalue = 1 - stats.chi2.cdf(chi2, dof)
return chi2, pvalue, dof
class LISA_Markov(Markov):
"""
Markov for Local Indicators of Spatial Association
Parameters
----------
y : array
(n, t), n cross-sectional units observed over t time
periods.
w : W
spatial weights object.
permutations : int, optional
number of permutations used to determine LISA
significance (the default is 0).
significance_level : float, optional
significance level (two-sided) for filtering
significant LISA endpoints in a transition (the
default is 0.05).
geoda_quads : bool
If True use GeoDa scheme: HH=1, LL=2, LH=3, HL=4.
If False use PySAL Scheme: HH=1, LH=2, LL=3, HL=4.
(the default is False).
Attributes
----------
chi_2 : tuple
(3 elements)
(chi square test statistic, p-value, degrees of freedom) for
test that dynamics of y are independent of dynamics of wy.
classes : array
(4, 1)
1=HH, 2=LH, 3=LL, 4=HL (own, lag)
1=HH, 2=LL, 3=LH, 4=HL (own, lag) (if geoda_quads=True)
expected_t : array
(4, 4), expected number of transitions under the null that
dynamics of y are independent of dynamics of wy.
move_types : matrix
(n, t-1), integer values indicating which type of LISA
transition occurred (q1 is quadrant in period 1, q2 is
quadrant in period 2).
.. table:: Move Types
== == =========
q1 q2 move_type
== == =========
1 1 1
1 2 2
1 3 3
1 4 4
2 1 5
2 2 6
2 3 7
2 4 8
3 1 9
3 2 10
3 3 11
3 4 12
4 1 13
4 2 14
4 3 15
4 4 16
== == =========
p : array
(k, k), transition probability matrix.
p_values : matrix
(n, t), LISA p-values for each end point (if permutations >
0).
significant_moves : matrix
(n, t-1), integer values indicating the type and
significance of a LISA transition. st = 1 if
significant in period t, else st=0 (if permutations >
0).
.. Table:: Significant Moves1
=============== ===================
(s1,s2) move_type
=============== ===================
(1,1) [1, 16]
(1,0) [17, 32]
(0,1) [33, 48]
(0,0) [49, 64]
=============== ===================
.. Table:: Significant Moves2
== == == == =========
q1 q2 s1 s2 move_type
== == == == =========
1 1 1 1 1
1 2 1 1 2
1 3 1 1 3
1 4 1 1 4
2 1 1 1 5
2 2 1 1 6
2 3 1 1 7
2 4 1 1 8
3 1 1 1 9
3 2 1 1 10
3 3 1 1 11
3 4 1 1 12
4 1 1 1 13
4 2 1 1 14
4 3 1 1 15
4 4 1 1 16
1 1 1 0 17
1 2 1 0 18
. . . . .
. . . . .
4 3 1 0 31
4 4 1 0 32
1 1 0 1 33
1 2 0 1 34
. . . . .
. . . . .
4 3 0 1 47
4 4 0 1 48
1 1 0 0 49
1 2 0 0 50
. . . . .
. . . . .
4 3 0 0 63
4 4 0 0 64
== == == == =========
steady_state : array
(k, ), ergodic distribution.
transitions : array
(4, 4), count of transitions between each state i and j.
spillover : array
(n, 1) binary array, locations that were not part of a
cluster in period 1 but joined a prexisting cluster in
period 2.
Examples
--------
>>> import libpysal
>>> import numpy as np
>>> from giddy.markov import LISA_Markov
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> lm = LISA_Markov(pci,w)
>>> lm.classes
array([1, 2, 3, 4])
>>> lm.steady_state
array([0.28561505, 0.14190226, 0.40493672, 0.16754598])
>>> lm.transitions
array([[1.087e+03, 4.400e+01, 4.000e+00, 3.400e+01],
[4.100e+01, 4.700e+02, 3.600e+01, 1.000e+00],
[5.000e+00, 3.400e+01, 1.422e+03, 3.900e+01],
[3.000e+01, 1.000e+00, 4.000e+01, 5.520e+02]])
>>> lm.p
array([[0.92985458, 0.03763901, 0.00342173, 0.02908469],
[0.07481752, 0.85766423, 0.06569343, 0.00182482],
[0.00333333, 0.02266667, 0.948 , 0.026 ],
[0.04815409, 0.00160514, 0.06420546, 0.88603531]])
>>> lm.move_types[0,:3]
array([11, 11, 11])
>>> lm.move_types[0,-3:]
array([11, 11, 11])
Now consider only moves with one, or both, of the LISA end points being
significant
>>> np.random.seed(10)
>>> lm_random = LISA_Markov(pci, w, permutations=99)
>>> lm_random.significant_moves[0, :3]
array([11, 11, 11])
>>> lm_random.significant_moves[0,-3:]
array([59, 43, 27])
Any value less than 49 indicates at least one of the LISA end points was
significant. So for example, the first spatial unit experienced a
transition of type 11 (LL, LL) during the first three and last tree
intervals (according to lm.move_types), however, the last three of these
transitions involved insignificant LISAS in both the start and ending year
of each transition.
Test whether the moves of y are independent of the moves of wy
>>> "Chi2: %8.3f, p: %5.2f, dof: %d" % lm.chi_2
'Chi2: 1058.208, p: 0.00, dof: 9'
Actual transitions of LISAs
>>> lm.transitions
array([[1.087e+03, 4.400e+01, 4.000e+00, 3.400e+01],
[4.100e+01, 4.700e+02, 3.600e+01, 1.000e+00],
[5.000e+00, 3.400e+01, 1.422e+03, 3.900e+01],
[3.000e+01, 1.000e+00, 4.000e+01, 5.520e+02]])
Expected transitions of LISAs under the null y and wy are moving
independently of one another
>>> lm.expected_t
array([[1.12328098e+03, 1.15377356e+01, 3.47522158e-01, 3.38337644e+01],
[3.50272664e+00, 5.28473882e+02, 1.59178880e+01, 1.05503814e-01],
[1.53878082e-01, 2.32163556e+01, 1.46690710e+03, 9.72266513e+00],
[9.60775143e+00, 9.86856346e-02, 6.23537392e+00, 6.07058189e+02]])
If the LISA classes are to be defined according to GeoDa, the `geoda_quad`
option has to be set to true
>>> lm.q[0:5,0]
array([3, 2, 3, 1, 4])
>>> lm = LISA_Markov(pci,w, geoda_quads=True)
>>> lm.q[0:5,0]
array([2, 3, 2, 1, 4])
"""
def __init__(self, y, w, permutations=0,
significance_level=0.05, geoda_quads=False):
y = y.transpose()
pml = Moran_Local
gq = geoda_quads
ml = ([pml(yi, w, permutations=permutations, geoda_quads=gq)
for yi in y])
q = np.array([mli.q for mli in ml]).transpose()
classes = np.arange(1, 5) # no guarantee all 4 quadrants are visited
Markov.__init__(self, q, classes)
self.q = q
self.w = w
n, k = q.shape
k -= 1
self.significance_level = significance_level
move_types = np.zeros((n, k), int)
sm = np.zeros((n, k), int)
self.significance_level = significance_level
if permutations > 0:
p = np.array([mli.p_z_sim for mli in ml]).transpose()
self.p_values = p
pb = p <= significance_level
else:
pb = np.zeros_like(y.T)
for t in range(k):
origin = q[:, t]
dest = q[:, t + 1]
p_origin = pb[:, t]
p_dest = pb[:, t + 1]
for r in range(n):
move_types[r, t] = TT[origin[r], dest[r]]
key = (origin[r], dest[r], p_origin[r], p_dest[r])
sm[r, t] = MOVE_TYPES[key]
if permutations > 0:
self.significant_moves = sm
self.move_types = move_types
# null of own and lag moves being independent
ybar = y.mean(axis=0)
r = y / ybar
ylag = np.array([weights.lag_spatial(w, yt) for yt in y])
rlag = ylag / ybar
rc = r < 1.
rlagc = rlag < 1.
markov_y = Markov(rc)
markov_ylag = Markov(rlagc)
A = np.matrix([[1, 0, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0]])
kp = A * np.kron(markov_y.p, markov_ylag.p) * A.T
trans = self.transitions.sum(axis=1)
t1 = np.diag(trans) * kp
t2 = self.transitions
t1 = t1.getA()
self.chi_2 = chi2(t2, t1)
self.expected_t = t1
self.permutations = permutations
def spillover(self, quadrant=1, neighbors_on=False):
"""
Detect spillover locations for diffusion in LISA Markov.
Parameters
----------
quadrant : int
which quadrant in the scatterplot should form the core
of a cluster.
neighbors_on : binary
If false, then only the 1st order neighbors of a core
location are included in the cluster.
If true, neighbors of cluster core 1st order neighbors
are included in the cluster.
Returns
-------
results : dictionary
two keys - values pairs:
'components' - array (n, t)
values are integer ids (starting at 1) indicating which
component/cluster observation i in period t belonged to.
'spillover' - array (n, t-1)
binary values indicating if the location was a
spill-over location that became a new member of a
previously existing cluster.
Examples
--------
>>> import libpysal
>>> from giddy.markov import LISA_Markov
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> np.random.seed(10)
>>> lm_random = LISA_Markov(pci, w, permutations=99)
>>> r = lm_random.spillover()
>>> (r['components'][:, 12] > 0).sum()
17
>>> (r['components'][:, 13]>0).sum()
23
>>> (r['spill_over'][:,12]>0).sum()
6
Including neighbors of core neighbors
>>> rn = lm_random.spillover(neighbors_on=True)
>>> (rn['components'][:, 12] > 0).sum()
26
>>> (rn["components"][:, 13] > 0).sum()
34
>>> (rn["spill_over"][:, 12] > 0).sum()
8
"""
n, k = self.q.shape
if self.permutations:
spill_over = np.zeros((n, k - 1))
components = np.zeros((n, k))
i2id = {} # handle string keys
for key in list(self.w.neighbors.keys()):
idx = self.w.id2i[key]
i2id[idx] = key
sig_lisas = (self.q == quadrant) \
* (self.p_values <= self.significance_level)
sig_ids = [np.nonzero(
sig_lisas[:, i])[0].tolist() for i in range(k)]
neighbors = self.w.neighbors
for t in range(k - 1):
s1 = sig_ids[t]
s2 = sig_ids[t + 1]
g1 = Graph(undirected=True)
for i in s1:
for neighbor in neighbors[i2id[i]]:
g1.add_edge(i2id[i], neighbor, 1.0)
if neighbors_on:
for nn in neighbors[neighbor]:
g1.add_edge(neighbor, nn, 1.0)
components1 = g1.connected_components(op=gt)
components1 = [list(c.nodes) for c in components1]
g2 = Graph(undirected=True)
for i in s2:
for neighbor in neighbors[i2id[i]]:
g2.add_edge(i2id[i], neighbor, 1.0)
if neighbors_on:
for nn in neighbors[neighbor]:
g2.add_edge(neighbor, nn, 1.0)
components2 = g2.connected_components(op=gt)
components2 = [list(c.nodes) for c in components2]
c2 = []
c1 = []
for c in components2:
c2.extend(c)
for c in components1:
c1.extend(c)
new_ids = [j for j in c2 if j not in c1]
spill_ids = []
for j in new_ids:
# find j's component in period 2
cj = [c for c in components2 if j in c][0]
# for members of j's component in period 2, check if they
# belonged to any components in period 1
for i in cj:
if i in c1:
spill_ids.append(j)
break
for spill_id in spill_ids:
id = self.w.id2i[spill_id]
spill_over[id, t] = 1
for c, component in enumerate(components1):
for i in component:
ii = self.w.id2i[i]
components[ii, t] = c + 1
results = {}
results['components'] = components
results['spill_over'] = spill_over
return results
else:
return None
def kullback(F):
"""
Kullback information based test of Markov Homogeneity.
Parameters
----------
F : array
(s, r, r), values are transitions (not probabilities) for
s strata, r initial states, r terminal states.
Returns
-------
Results : dictionary
(key - value)
Conditional homogeneity - (float) test statistic for homogeneity
of transition probabilities across strata.
Conditional homogeneity pvalue - (float) p-value for test
statistic.
Conditional homogeneity dof - (int) degrees of freedom =
r(s-1)(r-1).
Notes
-----
Based on :cite:`Kullback1962`.
Example below is taken from Table 9.2 .
Examples
--------
>>> import numpy as np
>>> from giddy.markov import kullback
>>> s1 = np.array([
... [ 22, 11, 24, 2, 2, 7],
... [ 5, 23, 15, 3, 42, 6],
... [ 4, 21, 190, 25, 20, 34],
... [0, 2, 14, 56, 14, 28],
... [32, 15, 20, 10, 56, 14],
... [5, 22, 31, 18, 13, 134]
... ])
>>> s2 = np.array([
... [3, 6, 9, 3, 0, 8],
... [1, 9, 3, 12, 27, 5],
... [2, 9, 208, 32, 5, 18],
... [0, 14, 32, 108, 40, 40],
... [22, 14, 9, 26, 224, 14],
... [1, 5, 13, 53, 13, 116]
... ])
>>>
>>> F = np.array([s1, s2])
>>> res = kullback(F)
>>> "%8.3f"%res['Conditional homogeneity']
' 160.961'
>>> "%d"%res['Conditional homogeneity dof']
'30'
>>> "%3.1f"%res['Conditional homogeneity pvalue']
'0.0'
"""
F1 = F == 0
F1 = F + F1
FLF = F * np.log(F1)
T1 = 2 * FLF.sum()
FdJK = F.sum(axis=0)
FdJK1 = FdJK + (FdJK == 0)
FdJKLFdJK = FdJK * np.log(FdJK1)
T2 = 2 * FdJKLFdJK.sum()
FdJd = F.sum(axis=0).sum(axis=1)
FdJd1 = FdJd + (FdJd == 0)
T3 = 2 * (FdJd * np.log(FdJd1)).sum()
FIJd = F[:, :].sum(axis=1)
FIJd1 = FIJd + (FIJd == 0)
T4 = 2 * (FIJd * np.log(FIJd1)).sum()
T6 = F.sum()
T6 = 2 * T6 * np.log(T6)
s, r, r1 = F.shape
chom = T1 - T4 - T2 + T3
cdof = r * (s - 1) * (r - 1)
results = {}
results['Conditional homogeneity'] = chom
results['Conditional homogeneity dof'] = cdof
results['Conditional homogeneity pvalue'] = 1 - stats.chi2.cdf(chom, cdof)
return results
def prais(pmat):
"""
Prais conditional mobility measure.
Parameters
----------
pmat : matrix
(k, k), Markov probability transition matrix.
Returns
-------
pr : matrix
(1, k), conditional mobility measures for each of the k classes.
Notes
-----
Prais' conditional mobility measure for a class is defined as:
.. math::
pr_i = 1 - p_{i,i}
Examples
--------
>>> import numpy as np
>>> import libpysal
>>> from giddy.markov import Markov,prais
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
>>> q5 = np.array([mc.Quantiles(y).yb for y in pci]).transpose()
>>> m = Markov(q5)
>>> m.transitions
array([[729., 71., 1., 0., 0.],
[ 72., 567., 80., 3., 0.],
[ 0., 81., 631., 86., 2.],
[ 0., 3., 86., 573., 56.],
[ 0., 0., 1., 57., 741.]])
>>> m.p
array([[0.91011236, 0.0886392 , 0.00124844, 0. , 0. ],
[0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ],
[0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ],
[0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443],
[0. , 0. , 0.00125156, 0.07133917, 0.92740926]])
>>> prais(m.p)
array([0.08988764, 0.21468144, 0.21125 , 0.20194986, 0.07259074])
"""
pmat = np.array(pmat)
pr = 1 - np.diag(pmat)
return pr
class Homogeneity_Results:
"""
Wrapper class to present homogeneity results.
Parameters
----------
transition_matrices : list
of transition matrices for regimes, all matrices must
have same size (r, c). r is the number of rows in
the transition matrix and c is the number of columns
in the transition matrix.
regime_names : sequence
Labels for the regimes.
class_names : sequence
Labels for the classes/states of the Markov chain.
title : string
Title of the table.
Attributes
-----------
Notes
-----
Degrees of freedom adjustment follow the approach in :cite:`Bickenbach2003`.
Examples
--------
See Spatial_Markov above.
"""
def __init__(self, transition_matrices, regime_names=[], class_names=[],
title="Markov Homogeneity Test"):
self._homogeneity(transition_matrices)
self.regime_names = regime_names
self.class_names = class_names
self.title = title
def _homogeneity(self, transition_matrices):
# form null transition probability matrix
M = np.array(transition_matrices)
m, r, k = M.shape
self.k = k
B = np.zeros((r, m))
T = M.sum(axis=0)
self.t_total = T.sum()
n_i = T.sum(axis=1)
A_i = (T > 0).sum(axis=1)
A_im = np.zeros((r, m))
p_ij = np.dot(np.diag(1. / (n_i + (n_i == 0) * 1.)), T)
den = p_ij + 1. * (p_ij == 0)
b_i = np.zeros_like(A_i)
p_ijm = np.zeros_like(M)
# get dimensions
m, n_rows, n_cols = M.shape
m = 0
Q = 0.0
LR = 0.0
lr_table = np.zeros_like(M)
q_table = np.zeros_like(M)
for nijm in M:
nim = nijm.sum(axis=1)
B[:, m] = 1. * (nim > 0)
b_i = b_i + 1. * (nim > 0)
p_ijm[m] = np.dot(np.diag(1. / (nim + (nim == 0) * 1.)), nijm)
num = (p_ijm[m] - p_ij)**2
ratio = num / den
qijm = np.dot(np.diag(nim), ratio)
q_table[m] = qijm
Q = Q + qijm.sum()
# only use nonzero pijm in lr test
mask = (nijm > 0) * (p_ij > 0)
A_im[:, m] = (nijm > 0).sum(axis=1)
unmask = 1.0 * (mask == 0)
ratio = (mask * p_ijm[m] + unmask) / (mask * p_ij + unmask)
lr = nijm * np.log(ratio)
LR = LR + lr.sum()
lr_table[m] = 2 * lr
m += 1
# b_i is the number of regimes that have non-zero observations in row i
# A_i is the number of non-zero elements in row i of the aggregated
# transition matrix
self.dof = int(((b_i - 1) * (A_i - 1)).sum())
self.Q = Q
self.Q_p_value = 1 - stats.chi2.cdf(self.Q, self.dof)
self.LR = LR * 2.
self.LR_p_value = 1 - stats.chi2.cdf(self.LR, self.dof)
self.A = A_i
self.A_im = A_im
self.B = B
self.b_i = b_i
self.LR_table = lr_table
self.Q_table = q_table
self.m = m
self.p_h0 = p_ij
self.p_h1 = p_ijm
def summary(self, file_name=None, title="Markov Homogeneity Test"):
regime_names = ["%d" % i for i in range(self.m)]
if self.regime_names:
regime_names = self.regime_names
cols = ["P(%s)" % str(regime) for regime in regime_names]
if not self.class_names:
self.class_names = list(range(self.k))
max_col = max([len(col) for col in cols])
col_width = max([5, max_col]) # probabilities have 5 chars
n_tabs = self.k
width = n_tabs * 4 + (self.k + 1) * col_width
lead = "-" * width
head = title.center(width)
contents = [lead, head, lead]
l = "Number of regimes: %d" % int(self.m)
k = "Number of classes: %d" % int(self.k)
r = "Regime names: "
r += ", ".join(regime_names)
t = "Number of transitions: %d" % int(self.t_total)
contents.append(k)
contents.append(t)
contents.append(l)
contents.append(r)
contents.append(lead)
h = "%7s %20s %20s" % ('Test', 'LR', 'Chi-2')
contents.append(h)
stat = "%7s %20.3f %20.3f" % ('Stat.', self.LR, self.Q)
contents.append(stat)
stat = "%7s %20d %20d" % ('DOF', self.dof, self.dof)
contents.append(stat)
stat = "%7s %20.3f %20.3f" % ('p-value', self.LR_p_value,
self.Q_p_value)
contents.append(stat)
print(("\n".join(contents)))
print(lead)
cols = ["P(%s)" % str(regime) for regime in self.regime_names]
if not self.class_names:
self.class_names = list(range(self.k))
cols.extend(["%s" % str(cname) for cname in self.class_names])
max_col = max([len(col) for col in cols])
col_width = max([5, max_col]) # probabilities have 5 chars
p0 = []
line0 = ['{s: <{w}}'.format(s="P(H0)", w=col_width)]
line0.extend((['{s: >{w}}'.format(s=cname, w=col_width) for cname in
self.class_names]))
print((" ".join(line0)))
p0.append("&".join(line0))
for i, row in enumerate(self.p_h0):
line = ["%*s" % (col_width, str(self.class_names[i]))]
line.extend(["%*.3f" % (col_width, v) for v in row])
print((" ".join(line)))
p0.append("&".join(line))
pmats = [p0]
print(lead)
for r, p1 in enumerate(self.p_h1):
p0 = []
line0 = ['{s: <{w}}'.format(s="P(%s)" %
regime_names[r], w=col_width)]
line0.extend((['{s: >{w}}'.format(s=cname, w=col_width) for cname
in self.class_names]))
print((" ".join(line0)))
p0.append("&".join(line0))
for i, row in enumerate(p1):
line = ["%*s" % (col_width, str(self.class_names[i]))]
line.extend(["%*.3f" % (col_width, v) for v in row])
print((" ".join(line)))
p0.append("&".join(line))
pmats.append(p0)
print(lead)
if file_name:
k = self.k
ks = str(k + 1)
with open(file_name, 'w') as f:
c = []
fmt = "r" * (k + 1)
s = "\\begin{tabular}{|%s|}\\hline\n" % fmt
s += "\\multicolumn{%s}{|c|}{%s}" % (ks, title)
c.append(s)
s = "Number of classes: %d" % int(self.k)
c.append("\\hline\\multicolumn{%s}{|l|}{%s}" % (ks, s))
s = "Number of transitions: %d" % int(self.t_total)
c.append("\\multicolumn{%s}{|l|}{%s}" % (ks, s))
s = "Number of regimes: %d" % int(self.m)
c.append("\\multicolumn{%s}{|l|}{%s}" % (ks, s))
s = "Regime names: "
s += ", ".join(regime_names)
c.append("\\multicolumn{%s}{|l|}{%s}" % (ks, s))
s = "\\hline\\multicolumn{2}{|l}{%s}" % ("Test")
s += "&\\multicolumn{2}{r}{LR}&\\multicolumn{2}{r|}{Q}"
c.append(s)
s = "Stat."
s = "\\multicolumn{2}{|l}{%s}" % (s)
s += "&\\multicolumn{2}{r}{%.3f}" % self.LR
s += "&\\multicolumn{2}{r|}{%.3f}" % self.Q
c.append(s)
s = "\\multicolumn{2}{|l}{%s}" % ("DOF")
s += "&\\multicolumn{2}{r}{%d}" % int(self.dof)
s += "&\\multicolumn{2}{r|}{%d}" % int(self.dof)
c.append(s)
s = "\\multicolumn{2}{|l}{%s}" % ("p-value")
s += "&\\multicolumn{2}{r}{%.3f}" % self.LR_p_value
s += "&\\multicolumn{2}{r|}{%.3f}" % self.Q_p_value
c.append(s)
s1 = "\\\\\n".join(c)
s1 += "\\\\\n"
c = []
for mat in pmats:
c.append("\\hline\n")
for row in mat:
c.append(row + "\\\\\n")
c.append("\\hline\n")
c.append("\\end{tabular}")
s2 = "".join(c)
f.write(s1 + s2)
class FullRank_Markov:
"""
Full Rank Markov in which ranks are considered as Markov states rather
than quantiles or other discretized classes. This is one way to avoid
issues associated with discretization.
Parameters
----------
y : array
(n, t) with t>>n, one row per observation (n total),
one column recording the value of each observation,
with as many columns as time periods.
Attributes
----------
ranks : array
ranks of the original y array (by columns): higher values
rank higher, e.g. the largest value in a column ranks 1.
p : array
(n, n), transition probability matrix for Full
Rank Markov.
steady_state : array
(n, ), ergodic distribution.
transitions : array
(n, n), count of transitions between each rank i and j
fmpt : array
(n, n), first mean passage times.
sojourn_time : array
(n, ), sojourn times.
Notes
-----
Refer to :cite:`Rey2014a` Equation (11) for details. Ties are resolved by
assigning distinct ranks, corresponding to the order that the values occur
in each cross section.
Examples
--------
US nominal per capita income 48 states 81 years 1929-2009
>>> from giddy.markov import FullRank_Markov
>>> import libpysal as ps
>>> import numpy as np
>>> f = ps.io.open(ps.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)]).transpose()
>>> m = FullRank_Markov(pci)
>>> m.ranks
array([[45, 45, 44, ..., 41, 40, 39],
[24, 25, 25, ..., 36, 38, 41],
[46, 47, 45, ..., 43, 43, 43],
...,
[34, 34, 34, ..., 47, 46, 42],
[17, 17, 22, ..., 25, 26, 25],
[16, 18, 19, ..., 6, 6, 7]])
>>> m.transitions
array([[66., 5., 5., ..., 0., 0., 0.],
[ 8., 51., 9., ..., 0., 0., 0.],
[ 2., 13., 44., ..., 0., 0., 0.],
...,
[ 0., 0., 0., ..., 40., 17., 0.],
[ 0., 0., 0., ..., 15., 54., 2.],
[ 0., 0., 0., ..., 2., 1., 77.]])
>>> m.p[0, :5]
array([0.825 , 0.0625, 0.0625, 0.025 , 0.025 ])
>>> m.fmpt[0, :5]
array([48. , 87.96280048, 68.1089084 , 58.83306575, 41.77250827])
>>> m.sojourn_time[:5]
array([5.71428571, 2.75862069, 2.22222222, 1.77777778, 1.66666667])
"""
def __init__(self, y):
y = np.asarray(y)
# resolve ties: All values are given a distinct rank, corresponding
# to the order that the values occur in each cross section.
r_asc = np.array([rankdata(col, method='ordinal') for col in y.T]).T
# ranks by high (1) to low (n)
self.ranks = r_asc.shape[0] - r_asc + 1
frm = Markov(self.ranks)
self.p = frm.p
self.transitions = frm.transitions
@property
def steady_state(self):
if not hasattr(self, '_steady_state'):
self._steady_state = STEADY_STATE(self.p)
return self._steady_state
@property
def fmpt(self):
if not hasattr(self, '_fmpt'):
self._fmpt = fmpt(self.p)
return self._fmpt
@property
def sojourn_time(self):
if not hasattr(self, '_st'):
self._st = sojourn_time(self.p)
return self._st
def sojourn_time(p):
"""
Calculate sojourn time based on a given transition probability matrix.
Parameters
----------
p : array
(k, k), a Markov transition probability matrix.
Returns
-------
: array
(k, ), sojourn times. Each element is the expected time a Markov
chain spends in each states before leaving that state.
Notes
-----
Refer to :cite:`Ibe2009` for more details on sojourn times for Markov
chains.
Examples
--------
>>> from giddy.markov import sojourn_time
>>> import numpy as np
>>> p = np.array([[.5, .25, .25], [.5, 0, .5], [.25, .25, .5]])
>>> sojourn_time(p)
array([2., 1., 2.])
"""
p = np.asarray(p)
pii = p.diagonal()
if not (1 - pii).all():
print("Sojourn times are infinite for absorbing states!")
return 1 / (1 - pii)
class GeoRank_Markov:
"""
Geographic Rank Markov.
Geographic units are considered as Markov states.
Parameters
----------
y : array
(n, t) with t>>n, one row per observation (n total),
one column recording the value of each observation,
with as many columns as time periods.
Attributes
----------
p : array
(n, n), transition probability matrix for
geographic rank Markov.
steady_state : array
(n, ), ergodic distribution.
transitions : array
(n, n), count of rank transitions between each
geographic unit i and j.
fmpt : array
(n, n), first mean passage times.
sojourn_time : array
(n, ), sojourn times.
Notes
-----
Refer to :cite:`Rey2014a` Equation (13)-(16) for details. Ties are
resolved by assigning distinct ranks, corresponding to the order
that the values occur in each cross section.
Examples
--------
US nominal per capita income 48 states 81 years 1929-2009
>>> from giddy.markov import GeoRank_Markov
>>> import libpysal as ps
>>> import numpy as np
>>> f = ps.io.open(ps.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)]).transpose()
>>> m = GeoRank_Markov(pci)
>>> m.transitions
array([[38., 0., 8., ..., 0., 0., 0.],
[ 0., 15., 0., ..., 0., 1., 0.],
[ 6., 0., 44., ..., 5., 0., 0.],
...,
[ 2., 0., 5., ..., 34., 0., 0.],
[ 0., 0., 0., ..., 0., 18., 2.],
[ 0., 0., 0., ..., 0., 3., 14.]])
>>> m.p
array([[0.475 , 0. , 0.1 , ..., 0. , 0. , 0. ],
[0. , 0.1875, 0. , ..., 0. , 0.0125, 0. ],
[0.075 , 0. , 0.55 , ..., 0.0625, 0. , 0. ],
...,
[0.025 , 0. , 0.0625, ..., 0.425 , 0. , 0. ],
[0. , 0. , 0. , ..., 0. , 0.225 , 0.025 ],
[0. , 0. , 0. , ..., 0. , 0.0375, 0.175 ]])
>>> m.fmpt
array([[ 48. , 63.35532038, 92.75274652, ..., 82.47515731,
71.01114491, 68.65737127],
[108.25928005, 48. , 127.99032986, ..., 92.03098299,
63.36652935, 61.82733039],
[ 76.96801786, 64.7713783 , 48. , ..., 73.84595169,
72.24682723, 69.77497173],
...,
[ 93.3107474 , 62.47670463, 105.80634118, ..., 48. ,
69.30121319, 67.08838421],
[113.65278078, 61.1987031 , 133.57991745, ..., 96.0103924 ,
48. , 56.74165107],
[114.71894813, 63.4019776 , 134.73381719, ..., 97.287895 ,
61.45565054, 48. ]])
>>> m.sojourn_time
array([ 1.9047619 , 1.23076923, 2.22222222, 1.73913043, 1.15942029,
3.80952381, 1.70212766, 1.25 , 1.31147541, 1.11111111,
1.73913043, 1.37931034, 1.17647059, 1.21212121, 1.33333333,
1.37931034, 1.09589041, 2.10526316, 2. , 1.45454545,
1.26984127, 26.66666667, 1.19402985, 1.23076923, 1.09589041,
1.56862745, 1.26984127, 2.42424242, 1.50943396, 2. ,
1.29032258, 1.09589041, 1.6 , 1.42857143, 1.25 ,
1.45454545, 1.29032258, 1.6 , 1.17647059, 1.56862745,
1.25 , 1.37931034, 1.45454545, 1.42857143, 1.29032258,
1.73913043, 1.29032258, 1.21212121])
"""
def __init__(self, y):
y = np.asarray(y)
n = y.shape[0]
# resolve ties: All values are given a distinct rank, corresponding
# to the order that the values occur in each cross section.
ranks = np.array([rankdata(col, method='ordinal') for col in y.T]).T
geo_ranks = np.argsort(ranks, axis=0) + 1
grm = Markov(geo_ranks)
self.p = grm.p
self.transitions = grm.transitions
@property
def steady_state(self):
if not hasattr(self, '_steady_state'):
self._steady_state = STEADY_STATE(self.p)
return self._steady_state
@property
def fmpt(self):
if not hasattr(self, '_fmpt'):
self._fmpt = fmpt(self.p)
return self._fmpt
@property
def sojourn_time(self):
if not hasattr(self, '_st'):
self._st = sojourn_time(self.p)
return self._st
|
pysal/giddy | giddy/markov.py | sojourn_time | python | def sojourn_time(p):
p = np.asarray(p)
pii = p.diagonal()
if not (1 - pii).all():
print("Sojourn times are infinite for absorbing states!")
return 1 / (1 - pii) | Calculate sojourn time based on a given transition probability matrix.
Parameters
----------
p : array
(k, k), a Markov transition probability matrix.
Returns
-------
: array
(k, ), sojourn times. Each element is the expected time a Markov
chain spends in each states before leaving that state.
Notes
-----
Refer to :cite:`Ibe2009` for more details on sojourn times for Markov
chains.
Examples
--------
>>> from giddy.markov import sojourn_time
>>> import numpy as np
>>> p = np.array([[.5, .25, .25], [.5, 0, .5], [.25, .25, .5]])
>>> sojourn_time(p)
array([2., 1., 2.]) | train | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/markov.py#L1830-L1864 | null | """
Markov based methods for spatial dynamics.
"""
__author__ = "Sergio J. Rey <sjsrey@gmail.com>, Wei Kang <weikang9009@gmail.com>"
__all__ = ["Markov", "LISA_Markov", "Spatial_Markov", "kullback",
"prais", "homogeneity", "FullRank_Markov", "sojourn_time",
"GeoRank_Markov"]
import numpy as np
from .ergodic import fmpt
from .ergodic import steady_state as STEADY_STATE
from .components import Graph
from scipy import stats
from scipy.stats import rankdata
from operator import gt
from libpysal import weights
from esda.moran import Moran_Local
import mapclassify as mc
import itertools
# TT predefine LISA transitions
# TT[i,j] is the transition type from i to j
# i = quadrant in period 0
# j = quadrant in period 1
# uses one offset so first row and col of TT are ignored
TT = np.zeros((5, 5), int)
c = 1
for i in range(1, 5):
for j in range(1, 5):
TT[i, j] = c
c += 1
# MOVE_TYPES is a dictionary that returns the move type of a LISA transition
# filtered on the significance of the LISA end points
# True indicates significant LISA in a particular period
# e.g. a key of (1, 3, True, False) indicates a significant LISA located in
# quadrant 1 in period 0 moved to quadrant 3 in period 1 but was not
# significant in quadrant 3.
MOVE_TYPES = {}
c = 1
cases = (True, False)
sig_keys = [(i, j) for i in cases for j in cases]
for i, sig_key in enumerate(sig_keys):
c = 1 + i * 16
for i in range(1, 5):
for j in range(1, 5):
key = (i, j, sig_key[0], sig_key[1])
MOVE_TYPES[key] = c
c += 1
class Markov(object):
"""
Classic Markov transition matrices.
Parameters
----------
class_ids : array
(n, t), one row per observation, one column recording the
state of each observation, with as many columns as time
periods.
classes : array
(k, 1), all different classes (bins) of the matrix.
Attributes
----------
p : array
(k, k), transition probability matrix.
steady_state : array
(k, ), ergodic distribution.
transitions : array
(k, k), count of transitions between each state i and j.
Examples
--------
>>> import numpy as np
>>> from giddy.markov import Markov
>>> c = [['b','a','c'],['c','c','a'],['c','b','c']]
>>> c.extend([['a','a','b'], ['a','b','c']])
>>> c = np.array(c)
>>> m = Markov(c)
>>> m.classes.tolist()
['a', 'b', 'c']
>>> m.p
array([[0.25 , 0.5 , 0.25 ],
[0.33333333, 0. , 0.66666667],
[0.33333333, 0.33333333, 0.33333333]])
>>> m.steady_state
array([0.30769231, 0.28846154, 0.40384615])
US nominal per capita income 48 states 81 years 1929-2009
>>> import libpysal
>>> import mapclassify as mc
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
set classes to quintiles for each year
>>> q5 = np.array([mc.Quantiles(y).yb for y in pci]).transpose()
>>> m = Markov(q5)
>>> m.transitions
array([[729., 71., 1., 0., 0.],
[ 72., 567., 80., 3., 0.],
[ 0., 81., 631., 86., 2.],
[ 0., 3., 86., 573., 56.],
[ 0., 0., 1., 57., 741.]])
>>> m.p
array([[0.91011236, 0.0886392 , 0.00124844, 0. , 0. ],
[0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ],
[0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ],
[0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443],
[0. , 0. , 0.00125156, 0.07133917, 0.92740926]])
>>> m.steady_state
array([0.20774716, 0.18725774, 0.20740537, 0.18821787, 0.20937187])
Relative incomes
>>> pci = pci.transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> rq = mc.Quantiles(rpci.flatten()).yb.reshape(pci.shape)
>>> mq = Markov(rq)
>>> mq.transitions
array([[707., 58., 7., 1., 0.],
[ 50., 629., 80., 1., 1.],
[ 4., 79., 610., 73., 2.],
[ 0., 7., 72., 650., 37.],
[ 0., 0., 0., 48., 724.]])
>>> mq.steady_state
array([0.17957376, 0.21631443, 0.21499942, 0.21134662, 0.17776576])
"""
def __init__(self, class_ids, classes=None):
if classes is not None:
self.classes = classes
else:
self.classes = np.unique(class_ids)
n, t = class_ids.shape
k = len(self.classes)
js = list(range(t - 1))
classIds = self.classes.tolist()
transitions = np.zeros((k, k))
for state_0 in js:
state_1 = state_0 + 1
state_0 = class_ids[:, state_0]
state_1 = class_ids[:, state_1]
initial = np.unique(state_0)
for i in initial:
ending = state_1[state_0 == i]
uending = np.unique(ending)
row = classIds.index(i)
for j in uending:
col = classIds.index(j)
transitions[row, col] += sum(ending == j)
self.transitions = transitions
row_sum = transitions.sum(axis=1)
self.p = np.dot(np.diag(1 / (row_sum + (row_sum == 0))), transitions)
@property
def steady_state(self):
if not hasattr(self, '_steady_state'):
self._steady_state = STEADY_STATE(self.p)
return self._steady_state
class Spatial_Markov(object):
"""
Markov transitions conditioned on the value of the spatial lag.
Parameters
----------
y : array
(n, t), one row per observation, one column per state of
each observation, with as many columns as time periods.
w : W
spatial weights object.
k : integer, optional
number of classes (quantiles) for input time series y.
Default is 4. If discrete=True, k is determined
endogenously.
m : integer, optional
number of classes (quantiles) for the spatial lags of
regional time series. Default is 4. If discrete=True,
m is determined endogenously.
permutations : int, optional
number of permutations for use in randomization based
inference (the default is 0).
fixed : bool, optional
If true, discretization are taken over the entire n*t
pooled series and cutoffs can be user-defined. If
cutoffs and lag_cutoffs are not given, quantiles are
used. If false, quantiles are taken each time period
over n. Default is True.
discrete : bool, optional
If true, categorical spatial lags which are most common
categories of neighboring observations serve as the
conditioning and fixed is ignored; if false, weighted
averages of neighboring observations are used. Default is
false.
cutoffs : array, optional
users can specify the discretization cutoffs for
continuous time series. Default is None, meaning that
quantiles will be used for the discretization.
lag_cutoffs : array, optional
users can specify the discretization cutoffs for the
spatial lags of continuous time series. Default is
None, meaning that quantiles will be used for the
discretization.
variable_name : string
name of variable.
Attributes
----------
class_ids : array
(n, t), discretized series if y is continuous. Otherwise
it is identical to y.
classes : array
(k, 1), all different classes (bins).
lclass_ids : array
(n, t), spatial lag series.
lclasses : array
(k, 1), all different classes (bins) for
spatial lags.
p : array
(k, k), transition probability matrix for a-spatial
Markov.
s : array
(k, 1), ergodic distribution for a-spatial Markov.
transitions : array
(k, k), counts of transitions between each state i and j
for a-spatial Markov.
T : array
(k, k, k), counts of transitions for each conditional
Markov. T[0] is the matrix of transitions for
observations with lags in the 0th quantile; T[k-1] is the
transitions for the observations with lags in the k-1th.
P : array
(k, k, k), transition probability matrix for spatial
Markov first dimension is the conditioned on the lag.
S : array
(k, k), steady state distributions for spatial Markov.
Each row is a conditional steady_state.
F : array
(k, k, k),first mean passage times.
First dimension is conditioned on the lag.
shtest : list
(k elements), each element of the list is a tuple for a
multinomial difference test between the steady state
distribution from a conditional distribution versus the
overall steady state distribution: first element of the
tuple is the chi2 value, second its p-value and the third
the degrees of freedom.
chi2 : list
(k elements), each element of the list is a tuple for a
chi-squared test of the difference between the
conditional transition matrix against the overall
transition matrix: first element of the tuple is the chi2
value, second its p-value and the third the degrees of
freedom.
x2 : float
sum of the chi2 values for each of the conditional tests.
Has an asymptotic chi2 distribution with k(k-1)(k-1)
degrees of freedom. Under the null that transition
probabilities are spatially homogeneous.
(see chi2 above)
x2_dof : int
degrees of freedom for homogeneity test.
x2_pvalue : float
pvalue for homogeneity test based on analytic.
distribution
x2_rpvalue : float
(if permutations>0)
pseudo p-value for x2 based on random spatial
permutations of the rows of the original transitions.
x2_realizations : array
(permutations,1), the values of x2 for the random
permutations.
Q : float
Chi-square test of homogeneity across lag classes based
on :cite:`Bickenbach2003`.
Q_p_value : float
p-value for Q.
LR : float
Likelihood ratio statistic for homogeneity across lag
classes based on :cite:`Bickenbach2003`.
LR_p_value : float
p-value for LR.
dof_hom : int
degrees of freedom for LR and Q, corrected for 0 cells.
Notes
-----
Based on :cite:`Rey2001`.
The shtest and chi2 tests should be used with caution as they are based on
classic theory assuming random transitions. The x2 based test is
preferable since it simulates the randomness under the null. It is an
experimental test requiring further analysis.
Examples
--------
>>> import libpysal
>>> from giddy.markov import Spatial_Markov
>>> import numpy as np
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
>>> pci = pci.transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> w.transform = 'r'
Now we create a `Spatial_Markov` instance for the continuous relative per
capita income time series for 48 US lower states 1929-2009. The current
implementation allows users to classify the continuous incomes in a more
flexible way.
(1) Global quintiles to discretize the income data (k=5), and global
quintiles to discretize the spatial lags of incomes (m=5).
>>> sm = Spatial_Markov(rpci, w, fixed=True, k=5, m=5, variable_name='rpci')
We can examine the cutoffs for the incomes and cutoffs for the spatial lags
>>> sm.cutoffs
array([0.83999133, 0.94707545, 1.03242697, 1.14911154])
>>> sm.lag_cutoffs
array([0.88973386, 0.95891917, 1.01469758, 1.1183566 ])
Obviously, they are slightly different.
We now look at the estimated spatially lag conditioned transition
probability matrices.
>>> for p in sm.P:
... print(p)
[[0.96341463 0.0304878 0.00609756 0. 0. ]
[0.06040268 0.83221477 0.10738255 0. 0. ]
[0. 0.14 0.74 0.12 0. ]
[0. 0.03571429 0.32142857 0.57142857 0.07142857]
[0. 0. 0. 0.16666667 0.83333333]]
[[0.79831933 0.16806723 0.03361345 0. 0. ]
[0.0754717 0.88207547 0.04245283 0. 0. ]
[0.00537634 0.06989247 0.8655914 0.05913978 0. ]
[0. 0. 0.06372549 0.90196078 0.03431373]
[0. 0. 0. 0.19444444 0.80555556]]
[[0.84693878 0.15306122 0. 0. 0. ]
[0.08133971 0.78947368 0.1291866 0. 0. ]
[0.00518135 0.0984456 0.79274611 0.0984456 0.00518135]
[0. 0. 0.09411765 0.87058824 0.03529412]
[0. 0. 0. 0.10204082 0.89795918]]
[[0.8852459 0.09836066 0. 0.01639344 0. ]
[0.03875969 0.81395349 0.13953488 0. 0.00775194]
[0.0049505 0.09405941 0.77722772 0.11881188 0.0049505 ]
[0. 0.02339181 0.12865497 0.75438596 0.09356725]
[0. 0. 0. 0.09661836 0.90338164]]
[[0.33333333 0.66666667 0. 0. 0. ]
[0.0483871 0.77419355 0.16129032 0.01612903 0. ]
[0.01149425 0.16091954 0.74712644 0.08045977 0. ]
[0. 0.01036269 0.06217617 0.89637306 0.03108808]
[0. 0. 0. 0.02352941 0.97647059]]
The probability of a poor state remaining poor is 0.963 if their
neighbors are in the 1st quintile and 0.798 if their neighbors are
in the 2nd quintile. The probability of a rich economy remaining
rich is 0.976 if their neighbors are in the 5th quintile, but if their
neighbors are in the 4th quintile this drops to 0.903.
The global transition probability matrix is estimated:
>>> print(sm.p)
[[0.91461837 0.07503234 0.00905563 0.00129366 0. ]
[0.06570302 0.82654402 0.10512484 0.00131406 0.00131406]
[0.00520833 0.10286458 0.79427083 0.09505208 0.00260417]
[0. 0.00913838 0.09399478 0.84856397 0.04830287]
[0. 0. 0. 0.06217617 0.93782383]]
The Q and likelihood ratio statistics are both significant indicating
the dynamics are not homogeneous across the lag classes:
>>> "%.3f"%sm.LR
'170.659'
>>> "%.3f"%sm.Q
'200.624'
>>> "%.3f"%sm.LR_p_value
'0.000'
>>> "%.3f"%sm.Q_p_value
'0.000'
>>> sm.dof_hom
60
The long run distribution for states with poor (rich) neighbors has
0.435 (0.018) of the values in the first quintile, 0.263 (0.200) in
the second quintile, 0.204 (0.190) in the third, 0.0684 (0.255) in the
fourth and 0.029 (0.337) in the fifth quintile.
>>> sm.S
array([[0.43509425, 0.2635327 , 0.20363044, 0.06841983, 0.02932278],
[0.13391287, 0.33993305, 0.25153036, 0.23343016, 0.04119356],
[0.12124869, 0.21137444, 0.2635101 , 0.29013417, 0.1137326 ],
[0.0776413 , 0.19748806, 0.25352636, 0.22480415, 0.24654013],
[0.01776781, 0.19964349, 0.19009833, 0.25524697, 0.3372434 ]])
States with incomes in the first quintile with neighbors in the
first quintile return to the first quartile after 2.298 years, after
leaving the first quintile. They enter the fourth quintile after
80.810 years after leaving the first quintile, on average.
Poor states within neighbors in the fourth quintile return to the
first quintile, on average, after 12.88 years, and would enter the
fourth quintile after 28.473 years.
>>> for f in sm.F:
... print(f)
...
[[ 2.29835259 28.95614035 46.14285714 80.80952381 279.42857143]
[ 33.86549708 3.79459555 22.57142857 57.23809524 255.85714286]
[ 43.60233918 9.73684211 4.91085714 34.66666667 233.28571429]
[ 46.62865497 12.76315789 6.25714286 14.61564626 198.61904762]
[ 52.62865497 18.76315789 12.25714286 6. 34.1031746 ]]
[[ 7.46754205 9.70574606 25.76785714 74.53116883 194.23446197]
[ 27.76691978 2.94175577 24.97142857 73.73474026 193.4380334 ]
[ 53.57477715 28.48447637 3.97566318 48.76331169 168.46660482]
[ 72.03631562 46.94601483 18.46153846 4.28393653 119.70329314]
[ 77.17917276 52.08887197 23.6043956 5.14285714 24.27564033]]
[[ 8.24751154 6.53333333 18.38765432 40.70864198 112.76732026]
[ 47.35040872 4.73094099 11.85432099 34.17530864 106.23398693]
[ 69.42288828 24.76666667 3.794921 22.32098765 94.37966594]
[ 83.72288828 39.06666667 14.3 3.44668119 76.36702977]
[ 93.52288828 48.86666667 24.1 9.8 8.79255406]]
[[ 12.87974382 13.34847151 19.83446328 28.47257282 55.82395142]
[ 99.46114206 5.06359731 10.54545198 23.05133495 49.68944423]
[117.76777159 23.03735526 3.94436301 15.0843986 43.57927247]
[127.89752089 32.4393006 14.56853107 4.44831643 31.63099455]
[138.24752089 42.7893006 24.91853107 10.35 4.05613474]]
[[ 56.2815534 1.5 10.57236842 27.02173913 110.54347826]
[ 82.9223301 5.00892857 9.07236842 25.52173913 109.04347826]
[ 97.17718447 19.53125 5.26043557 21.42391304 104.94565217]
[127.1407767 48.74107143 33.29605263 3.91777427 83.52173913]
[169.6407767 91.24107143 75.79605263 42.5 2.96521739]]
(2) Global quintiles to discretize the income data (k=5), and global
quartiles to discretize the spatial lags of incomes (m=4).
>>> sm = Spatial_Markov(rpci, w, fixed=True, k=5, m=4, variable_name='rpci')
We can also examine the cutoffs for the incomes and cutoffs for the spatial
lags:
>>> sm.cutoffs
array([0.83999133, 0.94707545, 1.03242697, 1.14911154])
>>> sm.lag_cutoffs
array([0.91440247, 0.98583079, 1.08698351])
We now look at the estimated spatially lag conditioned transition
probability matrices.
>>> for p in sm.P:
... print(p)
[[0.95708955 0.03544776 0.00746269 0. 0. ]
[0.05825243 0.83980583 0.10194175 0. 0. ]
[0. 0.1294964 0.76258993 0.10791367 0. ]
[0. 0.01538462 0.18461538 0.72307692 0.07692308]
[0. 0. 0. 0.14285714 0.85714286]]
[[0.7421875 0.234375 0.0234375 0. 0. ]
[0.08550186 0.85130112 0.06319703 0. 0. ]
[0.00865801 0.06926407 0.86147186 0.05627706 0.004329 ]
[0. 0. 0.05363985 0.92337165 0.02298851]
[0. 0. 0. 0.13432836 0.86567164]]
[[0.95145631 0.04854369 0. 0. 0. ]
[0.06 0.79 0.145 0. 0.005 ]
[0.00358423 0.10394265 0.7921147 0.09677419 0.00358423]
[0. 0.01630435 0.13586957 0.75543478 0.0923913 ]
[0. 0. 0. 0.10204082 0.89795918]]
[[0.16666667 0.66666667 0. 0.16666667 0. ]
[0.03488372 0.80232558 0.15116279 0.01162791 0. ]
[0.00840336 0.13445378 0.70588235 0.1512605 0. ]
[0. 0.01171875 0.08203125 0.87109375 0.03515625]
[0. 0. 0. 0.03434343 0.96565657]]
We now obtain 4 5*5 spatial lag conditioned transition probability
matrices instead of 5 as in case (1).
The Q and likelihood ratio statistics are still both significant.
>>> "%.3f"%sm.LR
'172.105'
>>> "%.3f"%sm.Q
'321.128'
>>> "%.3f"%sm.LR_p_value
'0.000'
>>> "%.3f"%sm.Q_p_value
'0.000'
>>> sm.dof_hom
45
(3) We can also set the cutoffs for relative incomes and their
spatial lags manually.
For example, we want the defining cutoffs to be [0.8, 0.9, 1, 1.2],
meaning that relative incomes:
2.1 smaller than 0.8 : class 0
2.2 between 0.8 and 0.9: class 1
2.3 between 0.9 and 1.0 : class 2
2.4 between 1.0 and 1.2: class 3
2.5 larger than 1.2: class 4
>>> cc = np.array([0.8, 0.9, 1, 1.2])
>>> sm = Spatial_Markov(rpci, w, cutoffs=cc, lag_cutoffs=cc, variable_name='rpci')
>>> sm.cutoffs
array([0.8, 0.9, 1. , 1.2])
>>> sm.k
5
>>> sm.lag_cutoffs
array([0.8, 0.9, 1. , 1.2])
>>> sm.m
5
>>> for p in sm.P:
... print(p)
[[0.96703297 0.03296703 0. 0. 0. ]
[0.10638298 0.68085106 0.21276596 0. 0. ]
[0. 0.14285714 0.7755102 0.08163265 0. ]
[0. 0. 0.5 0.5 0. ]
[0. 0. 0. 0. 0. ]]
[[0.88636364 0.10606061 0.00757576 0. 0. ]
[0.04402516 0.89308176 0.06289308 0. 0. ]
[0. 0.05882353 0.8627451 0.07843137 0. ]
[0. 0. 0.13846154 0.86153846 0. ]
[0. 0. 0. 0. 1. ]]
[[0.78082192 0.17808219 0.02739726 0.01369863 0. ]
[0.03488372 0.90406977 0.05813953 0.00290698 0. ]
[0. 0.05919003 0.84735202 0.09034268 0.00311526]
[0. 0. 0.05811623 0.92985972 0.01202405]
[0. 0. 0. 0.14285714 0.85714286]]
[[0.82692308 0.15384615 0. 0.01923077 0. ]
[0.0703125 0.7890625 0.125 0.015625 0. ]
[0.00295858 0.06213018 0.82248521 0.10946746 0.00295858]
[0. 0.00185529 0.07606679 0.88497217 0.03710575]
[0. 0. 0. 0.07803468 0.92196532]]
[[0. 0. 0. 0. 0. ]
[0. 0. 0. 0. 0. ]
[0. 0.06666667 0.9 0.03333333 0. ]
[0. 0. 0.05660377 0.90566038 0.03773585]
[0. 0. 0. 0.03932584 0.96067416]]
(4) Spatial_Markov also accept discrete time series and calculate
categorical spatial lags on which several transition probability matrices
are conditioned.
Let's still use the US state income time series to demonstrate. We first
discretize them into categories and then pass them to Spatial_Markov.
>>> import mapclassify as mc
>>> y = mc.Quantiles(rpci.flatten(), k=5).yb.reshape(rpci.shape)
>>> np.random.seed(5)
>>> sm = Spatial_Markov(y, w, discrete=True, variable_name='discretized rpci')
>>> sm.k
5
>>> sm.m
5
>>> for p in sm.P:
... print(p)
[[0.94787645 0.04440154 0.00772201 0. 0. ]
[0.08333333 0.81060606 0.10606061 0. 0. ]
[0. 0.12765957 0.79787234 0.07446809 0. ]
[0. 0.02777778 0.22222222 0.66666667 0.08333333]
[0. 0. 0. 0.33333333 0.66666667]]
[[0.888 0.096 0.016 0. 0. ]
[0.06049822 0.84341637 0.09608541 0. 0. ]
[0.00666667 0.10666667 0.81333333 0.07333333 0. ]
[0. 0. 0.08527132 0.86821705 0.04651163]
[0. 0. 0. 0.10204082 0.89795918]]
[[0.65217391 0.32608696 0.02173913 0. 0. ]
[0.07446809 0.80851064 0.11170213 0. 0.00531915]
[0.01071429 0.1 0.76428571 0.11785714 0.00714286]
[0. 0.00552486 0.09392265 0.86187845 0.03867403]
[0. 0. 0. 0.13157895 0.86842105]]
[[0.91935484 0.06451613 0. 0.01612903 0. ]
[0.06796117 0.90291262 0.02912621 0. 0. ]
[0. 0.05755396 0.87769784 0.0647482 0. ]
[0. 0.02150538 0.10752688 0.80107527 0.06989247]
[0. 0. 0. 0.08064516 0.91935484]]
[[0.81818182 0.18181818 0. 0. 0. ]
[0.01754386 0.70175439 0.26315789 0.01754386 0. ]
[0. 0.14285714 0.73333333 0.12380952 0. ]
[0. 0.0042735 0.06837607 0.89316239 0.03418803]
[0. 0. 0. 0.03891051 0.96108949]]
"""
def __init__(self, y, w, k=4, m=4, permutations=0, fixed=True,
discrete=False, cutoffs=None, lag_cutoffs=None,
variable_name=None):
y = np.asarray(y)
self.fixed = fixed
self.discrete = discrete
self.cutoffs = cutoffs
self.m = m
self.lag_cutoffs = lag_cutoffs
self.variable_name = variable_name
if discrete:
merged = list(itertools.chain.from_iterable(y))
classes = np.unique(merged)
self.classes = classes
self.k = len(classes)
self.m = self.k
label_dict = dict(zip(classes, range(self.k)))
y_int = []
for yi in y:
y_int.append(list(map(label_dict.get, yi)))
self.class_ids = np.array(y_int)
self.lclass_ids = self.class_ids
else:
self.class_ids, self.cutoffs, self.k = self._maybe_classify(
y, k=k, cutoffs=self.cutoffs)
self.classes = np.arange(self.k)
classic = Markov(self.class_ids)
self.p = classic.p
self.transitions = classic.transitions
self.T, self.P = self._calc(y, w)
if permutations:
nrp = np.random.permutation
counter = 0
x2_realizations = np.zeros((permutations, 1))
for perm in range(permutations):
T, P = self._calc(nrp(y), w)
x2 = [chi2(T[i], self.transitions)[0] for i in range(self.k)]
x2s = sum(x2)
x2_realizations[perm] = x2s
if x2s >= self.x2:
counter += 1
self.x2_rpvalue = (counter + 1.0) / (permutations + 1.)
self.x2_realizations = x2_realizations
@property
def s(self):
if not hasattr(self, '_s'):
self._s = STEADY_STATE(self.p)
return self._s
@property
def S(self):
if not hasattr(self, '_S'):
S = np.zeros_like(self.p)
for i, p in enumerate(self.P):
S[i] = STEADY_STATE(p)
self._S = np.asarray(S)
return self._S
@property
def F(self):
if not hasattr(self, '_F'):
F = np.zeros_like(self.P)
for i, p in enumerate(self.P):
F[i] = fmpt(np.asmatrix(p))
self._F = np.asarray(F)
return self._F
# bickenbach and bode tests
@property
def ht(self):
if not hasattr(self, '_ht'):
self._ht = homogeneity(self.T)
return self._ht
@property
def Q(self):
if not hasattr(self, '_Q'):
self._Q = self.ht.Q
return self._Q
@property
def Q_p_value(self):
self._Q_p_value = self.ht.Q_p_value
return self._Q_p_value
@property
def LR(self):
self._LR = self.ht.LR
return self._LR
@property
def LR_p_value(self):
self._LR_p_value = self.ht.LR_p_value
return self._LR_p_value
@property
def dof_hom(self):
self._dof_hom = self.ht.dof
return self._dof_hom
# shtests
@property
def shtest(self):
if not hasattr(self, '_shtest'):
self._shtest = self._mn_test()
return self._shtest
@property
def chi2(self):
if not hasattr(self, '_chi2'):
self._chi2 = self._chi2_test()
return self._chi2
@property
def x2(self):
if not hasattr(self, '_x2'):
self._x2 = sum([c[0] for c in self.chi2])
return self._x2
@property
def x2_pvalue(self):
if not hasattr(self, '_x2_pvalue'):
self._x2_pvalue = 1 - stats.chi2.cdf(self.x2, self.x2_dof)
return self._x2_pvalue
@property
def x2_dof(self):
if not hasattr(self, '_x2_dof'):
k = self.k
self._x2_dof = k * (k - 1) * (k - 1)
return self._x2_dof
def _calc(self, y, w):
'''Helper to estimate spatial lag conditioned Markov transition
probability matrices based on maximum likelihood techniques.
'''
if self.discrete:
self.lclass_ids = weights.lag_categorical(w, self.class_ids,
ties="tryself")
else:
ly = weights.lag_spatial(w, y)
self.lclass_ids, self.lag_cutoffs, self.m = self._maybe_classify(
ly, self.m, self.lag_cutoffs)
self.lclasses = np.arange(self.m)
T = np.zeros((self.m, self.k, self.k))
n, t = y.shape
for t1 in range(t - 1):
t2 = t1 + 1
for i in range(n):
T[self.lclass_ids[i, t1], self.class_ids[i, t1],
self.class_ids[i, t2]] += 1
P = np.zeros_like(T)
for i, mat in enumerate(T):
row_sum = mat.sum(axis=1)
row_sum = row_sum + (row_sum == 0)
p_i = np.matrix(np.diag(1. / row_sum) * np.matrix(mat))
P[i] = p_i
return T, P
def _mn_test(self):
"""
helper to calculate tests of differences between steady state
distributions from the conditional and overall distributions.
"""
n0, n1, n2 = self.T.shape
rn = list(range(n0))
mat = [self._ssmnp_test(
self.s, self.S[i], self.T[i].sum()) for i in rn]
return mat
def _ssmnp_test(self, p1, p2, nt):
"""
Steady state multinomial probability difference test.
Arguments
---------
p1 : array
(k, ), first steady state probability distribution.
p1 : array
(k, ), second steady state probability distribution.
nt : int
number of transitions to base the test on.
Returns
-------
tuple
(3 elements)
(chi2 value, pvalue, degrees of freedom)
"""
o = nt * p2
e = nt * p1
d = np.multiply((o - e), (o - e))
d = d / e
chi2 = d.sum()
pvalue = 1 - stats.chi2.cdf(chi2, self.k - 1)
return (chi2, pvalue, self.k - 1)
def _chi2_test(self):
"""
helper to calculate tests of differences between the conditional
transition matrices and the overall transitions matrix.
"""
n0, n1, n2 = self.T.shape
rn = list(range(n0))
mat = [chi2(self.T[i], self.transitions) for i in rn]
return mat
def summary(self, file_name=None):
"""
A summary method to call the Markov homogeneity test to test for
temporally lagged spatial dependence.
To learn more about the properties of the tests, refer to
:cite:`Rey2016a` and :cite:`Kang2018`.
"""
class_names = ["C%d" % i for i in range(self.k)]
regime_names = ["LAG%d" % i for i in range(self.k)]
ht = homogeneity(self.T, class_names=class_names,
regime_names=regime_names)
title = "Spatial Markov Test"
if self.variable_name:
title = title + ": " + self.variable_name
if file_name:
ht.summary(file_name=file_name, title=title)
else:
ht.summary(title=title)
def _maybe_classify(self, y, k, cutoffs):
'''Helper method for classifying continuous data.
'''
rows, cols = y.shape
if cutoffs is None:
if self.fixed:
mcyb = mc.Quantiles(y.flatten(), k=k)
yb = mcyb.yb.reshape(y.shape)
cutoffs = mcyb.bins
k = len(cutoffs)
return yb, cutoffs[:-1], k
else:
yb = np.array([mc.Quantiles(y[:, i], k=k).yb for i in
np.arange(cols)]).transpose()
return yb, None, k
else:
cutoffs = list(cutoffs) + [np.inf]
cutoffs = np.array(cutoffs)
yb = mc.User_Defined(y.flatten(), np.array(cutoffs)).yb.reshape(
y.shape)
k = len(cutoffs)
return yb, cutoffs[:-1], k
def chi2(T1, T2):
"""
chi-squared test of difference between two transition matrices.
Parameters
----------
T1 : array
(k, k), matrix of transitions (counts).
T2 : array
(k, k), matrix of transitions (counts) to use to form the
probabilities under the null.
Returns
-------
: tuple
(3 elements).
(chi2 value, pvalue, degrees of freedom).
Examples
--------
>>> import libpysal
>>> from giddy.markov import Spatial_Markov, chi2
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> w.transform='r'
>>> sm = Spatial_Markov(rpci, w, fixed=True)
>>> T1 = sm.T[0]
>>> T1
array([[562., 22., 1., 0.],
[ 12., 201., 22., 0.],
[ 0., 17., 97., 4.],
[ 0., 0., 3., 19.]])
>>> T2 = sm.transitions
>>> T2
array([[884., 77., 4., 0.],
[ 68., 794., 87., 3.],
[ 1., 92., 815., 51.],
[ 1., 0., 60., 903.]])
>>> chi2(T1,T2)
(23.39728441473295, 0.005363116704861337, 9)
Notes
-----
Second matrix is used to form the probabilities under the null.
Marginal sums from first matrix are distributed across these probabilities
under the null. In other words the observed transitions are taken from T1
while the expected transitions are formed as follows
.. math::
E_{i,j} = \sum_j T1_{i,j} * T2_{i,j}/\sum_j T2_{i,j}
Degrees of freedom corrected for any rows in either T1 or T2 that have
zero total transitions.
"""
rs2 = T2.sum(axis=1)
rs1 = T1.sum(axis=1)
rs2nz = rs2 > 0
rs1nz = rs1 > 0
dof1 = sum(rs1nz)
dof2 = sum(rs2nz)
rs2 = rs2 + (rs2 == 0)
dof = (dof1 - 1) * (dof2 - 1)
p = np.diag(1 / rs2) * np.matrix(T2)
E = np.diag(rs1) * np.matrix(p)
num = T1 - E
num = np.multiply(num, num)
E = E + (E == 0)
chi2 = num / E
chi2 = chi2.sum()
pvalue = 1 - stats.chi2.cdf(chi2, dof)
return chi2, pvalue, dof
class LISA_Markov(Markov):
"""
Markov for Local Indicators of Spatial Association
Parameters
----------
y : array
(n, t), n cross-sectional units observed over t time
periods.
w : W
spatial weights object.
permutations : int, optional
number of permutations used to determine LISA
significance (the default is 0).
significance_level : float, optional
significance level (two-sided) for filtering
significant LISA endpoints in a transition (the
default is 0.05).
geoda_quads : bool
If True use GeoDa scheme: HH=1, LL=2, LH=3, HL=4.
If False use PySAL Scheme: HH=1, LH=2, LL=3, HL=4.
(the default is False).
Attributes
----------
chi_2 : tuple
(3 elements)
(chi square test statistic, p-value, degrees of freedom) for
test that dynamics of y are independent of dynamics of wy.
classes : array
(4, 1)
1=HH, 2=LH, 3=LL, 4=HL (own, lag)
1=HH, 2=LL, 3=LH, 4=HL (own, lag) (if geoda_quads=True)
expected_t : array
(4, 4), expected number of transitions under the null that
dynamics of y are independent of dynamics of wy.
move_types : matrix
(n, t-1), integer values indicating which type of LISA
transition occurred (q1 is quadrant in period 1, q2 is
quadrant in period 2).
.. table:: Move Types
== == =========
q1 q2 move_type
== == =========
1 1 1
1 2 2
1 3 3
1 4 4
2 1 5
2 2 6
2 3 7
2 4 8
3 1 9
3 2 10
3 3 11
3 4 12
4 1 13
4 2 14
4 3 15
4 4 16
== == =========
p : array
(k, k), transition probability matrix.
p_values : matrix
(n, t), LISA p-values for each end point (if permutations >
0).
significant_moves : matrix
(n, t-1), integer values indicating the type and
significance of a LISA transition. st = 1 if
significant in period t, else st=0 (if permutations >
0).
.. Table:: Significant Moves1
=============== ===================
(s1,s2) move_type
=============== ===================
(1,1) [1, 16]
(1,0) [17, 32]
(0,1) [33, 48]
(0,0) [49, 64]
=============== ===================
.. Table:: Significant Moves2
== == == == =========
q1 q2 s1 s2 move_type
== == == == =========
1 1 1 1 1
1 2 1 1 2
1 3 1 1 3
1 4 1 1 4
2 1 1 1 5
2 2 1 1 6
2 3 1 1 7
2 4 1 1 8
3 1 1 1 9
3 2 1 1 10
3 3 1 1 11
3 4 1 1 12
4 1 1 1 13
4 2 1 1 14
4 3 1 1 15
4 4 1 1 16
1 1 1 0 17
1 2 1 0 18
. . . . .
. . . . .
4 3 1 0 31
4 4 1 0 32
1 1 0 1 33
1 2 0 1 34
. . . . .
. . . . .
4 3 0 1 47
4 4 0 1 48
1 1 0 0 49
1 2 0 0 50
. . . . .
. . . . .
4 3 0 0 63
4 4 0 0 64
== == == == =========
steady_state : array
(k, ), ergodic distribution.
transitions : array
(4, 4), count of transitions between each state i and j.
spillover : array
(n, 1) binary array, locations that were not part of a
cluster in period 1 but joined a prexisting cluster in
period 2.
Examples
--------
>>> import libpysal
>>> import numpy as np
>>> from giddy.markov import LISA_Markov
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> lm = LISA_Markov(pci,w)
>>> lm.classes
array([1, 2, 3, 4])
>>> lm.steady_state
array([0.28561505, 0.14190226, 0.40493672, 0.16754598])
>>> lm.transitions
array([[1.087e+03, 4.400e+01, 4.000e+00, 3.400e+01],
[4.100e+01, 4.700e+02, 3.600e+01, 1.000e+00],
[5.000e+00, 3.400e+01, 1.422e+03, 3.900e+01],
[3.000e+01, 1.000e+00, 4.000e+01, 5.520e+02]])
>>> lm.p
array([[0.92985458, 0.03763901, 0.00342173, 0.02908469],
[0.07481752, 0.85766423, 0.06569343, 0.00182482],
[0.00333333, 0.02266667, 0.948 , 0.026 ],
[0.04815409, 0.00160514, 0.06420546, 0.88603531]])
>>> lm.move_types[0,:3]
array([11, 11, 11])
>>> lm.move_types[0,-3:]
array([11, 11, 11])
Now consider only moves with one, or both, of the LISA end points being
significant
>>> np.random.seed(10)
>>> lm_random = LISA_Markov(pci, w, permutations=99)
>>> lm_random.significant_moves[0, :3]
array([11, 11, 11])
>>> lm_random.significant_moves[0,-3:]
array([59, 43, 27])
Any value less than 49 indicates at least one of the LISA end points was
significant. So for example, the first spatial unit experienced a
transition of type 11 (LL, LL) during the first three and last tree
intervals (according to lm.move_types), however, the last three of these
transitions involved insignificant LISAS in both the start and ending year
of each transition.
Test whether the moves of y are independent of the moves of wy
>>> "Chi2: %8.3f, p: %5.2f, dof: %d" % lm.chi_2
'Chi2: 1058.208, p: 0.00, dof: 9'
Actual transitions of LISAs
>>> lm.transitions
array([[1.087e+03, 4.400e+01, 4.000e+00, 3.400e+01],
[4.100e+01, 4.700e+02, 3.600e+01, 1.000e+00],
[5.000e+00, 3.400e+01, 1.422e+03, 3.900e+01],
[3.000e+01, 1.000e+00, 4.000e+01, 5.520e+02]])
Expected transitions of LISAs under the null y and wy are moving
independently of one another
>>> lm.expected_t
array([[1.12328098e+03, 1.15377356e+01, 3.47522158e-01, 3.38337644e+01],
[3.50272664e+00, 5.28473882e+02, 1.59178880e+01, 1.05503814e-01],
[1.53878082e-01, 2.32163556e+01, 1.46690710e+03, 9.72266513e+00],
[9.60775143e+00, 9.86856346e-02, 6.23537392e+00, 6.07058189e+02]])
If the LISA classes are to be defined according to GeoDa, the `geoda_quad`
option has to be set to true
>>> lm.q[0:5,0]
array([3, 2, 3, 1, 4])
>>> lm = LISA_Markov(pci,w, geoda_quads=True)
>>> lm.q[0:5,0]
array([2, 3, 2, 1, 4])
"""
def __init__(self, y, w, permutations=0,
significance_level=0.05, geoda_quads=False):
y = y.transpose()
pml = Moran_Local
gq = geoda_quads
ml = ([pml(yi, w, permutations=permutations, geoda_quads=gq)
for yi in y])
q = np.array([mli.q for mli in ml]).transpose()
classes = np.arange(1, 5) # no guarantee all 4 quadrants are visited
Markov.__init__(self, q, classes)
self.q = q
self.w = w
n, k = q.shape
k -= 1
self.significance_level = significance_level
move_types = np.zeros((n, k), int)
sm = np.zeros((n, k), int)
self.significance_level = significance_level
if permutations > 0:
p = np.array([mli.p_z_sim for mli in ml]).transpose()
self.p_values = p
pb = p <= significance_level
else:
pb = np.zeros_like(y.T)
for t in range(k):
origin = q[:, t]
dest = q[:, t + 1]
p_origin = pb[:, t]
p_dest = pb[:, t + 1]
for r in range(n):
move_types[r, t] = TT[origin[r], dest[r]]
key = (origin[r], dest[r], p_origin[r], p_dest[r])
sm[r, t] = MOVE_TYPES[key]
if permutations > 0:
self.significant_moves = sm
self.move_types = move_types
# null of own and lag moves being independent
ybar = y.mean(axis=0)
r = y / ybar
ylag = np.array([weights.lag_spatial(w, yt) for yt in y])
rlag = ylag / ybar
rc = r < 1.
rlagc = rlag < 1.
markov_y = Markov(rc)
markov_ylag = Markov(rlagc)
A = np.matrix([[1, 0, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0]])
kp = A * np.kron(markov_y.p, markov_ylag.p) * A.T
trans = self.transitions.sum(axis=1)
t1 = np.diag(trans) * kp
t2 = self.transitions
t1 = t1.getA()
self.chi_2 = chi2(t2, t1)
self.expected_t = t1
self.permutations = permutations
def spillover(self, quadrant=1, neighbors_on=False):
"""
Detect spillover locations for diffusion in LISA Markov.
Parameters
----------
quadrant : int
which quadrant in the scatterplot should form the core
of a cluster.
neighbors_on : binary
If false, then only the 1st order neighbors of a core
location are included in the cluster.
If true, neighbors of cluster core 1st order neighbors
are included in the cluster.
Returns
-------
results : dictionary
two keys - values pairs:
'components' - array (n, t)
values are integer ids (starting at 1) indicating which
component/cluster observation i in period t belonged to.
'spillover' - array (n, t-1)
binary values indicating if the location was a
spill-over location that became a new member of a
previously existing cluster.
Examples
--------
>>> import libpysal
>>> from giddy.markov import LISA_Markov
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> np.random.seed(10)
>>> lm_random = LISA_Markov(pci, w, permutations=99)
>>> r = lm_random.spillover()
>>> (r['components'][:, 12] > 0).sum()
17
>>> (r['components'][:, 13]>0).sum()
23
>>> (r['spill_over'][:,12]>0).sum()
6
Including neighbors of core neighbors
>>> rn = lm_random.spillover(neighbors_on=True)
>>> (rn['components'][:, 12] > 0).sum()
26
>>> (rn["components"][:, 13] > 0).sum()
34
>>> (rn["spill_over"][:, 12] > 0).sum()
8
"""
n, k = self.q.shape
if self.permutations:
spill_over = np.zeros((n, k - 1))
components = np.zeros((n, k))
i2id = {} # handle string keys
for key in list(self.w.neighbors.keys()):
idx = self.w.id2i[key]
i2id[idx] = key
sig_lisas = (self.q == quadrant) \
* (self.p_values <= self.significance_level)
sig_ids = [np.nonzero(
sig_lisas[:, i])[0].tolist() for i in range(k)]
neighbors = self.w.neighbors
for t in range(k - 1):
s1 = sig_ids[t]
s2 = sig_ids[t + 1]
g1 = Graph(undirected=True)
for i in s1:
for neighbor in neighbors[i2id[i]]:
g1.add_edge(i2id[i], neighbor, 1.0)
if neighbors_on:
for nn in neighbors[neighbor]:
g1.add_edge(neighbor, nn, 1.0)
components1 = g1.connected_components(op=gt)
components1 = [list(c.nodes) for c in components1]
g2 = Graph(undirected=True)
for i in s2:
for neighbor in neighbors[i2id[i]]:
g2.add_edge(i2id[i], neighbor, 1.0)
if neighbors_on:
for nn in neighbors[neighbor]:
g2.add_edge(neighbor, nn, 1.0)
components2 = g2.connected_components(op=gt)
components2 = [list(c.nodes) for c in components2]
c2 = []
c1 = []
for c in components2:
c2.extend(c)
for c in components1:
c1.extend(c)
new_ids = [j for j in c2 if j not in c1]
spill_ids = []
for j in new_ids:
# find j's component in period 2
cj = [c for c in components2 if j in c][0]
# for members of j's component in period 2, check if they
# belonged to any components in period 1
for i in cj:
if i in c1:
spill_ids.append(j)
break
for spill_id in spill_ids:
id = self.w.id2i[spill_id]
spill_over[id, t] = 1
for c, component in enumerate(components1):
for i in component:
ii = self.w.id2i[i]
components[ii, t] = c + 1
results = {}
results['components'] = components
results['spill_over'] = spill_over
return results
else:
return None
def kullback(F):
"""
Kullback information based test of Markov Homogeneity.
Parameters
----------
F : array
(s, r, r), values are transitions (not probabilities) for
s strata, r initial states, r terminal states.
Returns
-------
Results : dictionary
(key - value)
Conditional homogeneity - (float) test statistic for homogeneity
of transition probabilities across strata.
Conditional homogeneity pvalue - (float) p-value for test
statistic.
Conditional homogeneity dof - (int) degrees of freedom =
r(s-1)(r-1).
Notes
-----
Based on :cite:`Kullback1962`.
Example below is taken from Table 9.2 .
Examples
--------
>>> import numpy as np
>>> from giddy.markov import kullback
>>> s1 = np.array([
... [ 22, 11, 24, 2, 2, 7],
... [ 5, 23, 15, 3, 42, 6],
... [ 4, 21, 190, 25, 20, 34],
... [0, 2, 14, 56, 14, 28],
... [32, 15, 20, 10, 56, 14],
... [5, 22, 31, 18, 13, 134]
... ])
>>> s2 = np.array([
... [3, 6, 9, 3, 0, 8],
... [1, 9, 3, 12, 27, 5],
... [2, 9, 208, 32, 5, 18],
... [0, 14, 32, 108, 40, 40],
... [22, 14, 9, 26, 224, 14],
... [1, 5, 13, 53, 13, 116]
... ])
>>>
>>> F = np.array([s1, s2])
>>> res = kullback(F)
>>> "%8.3f"%res['Conditional homogeneity']
' 160.961'
>>> "%d"%res['Conditional homogeneity dof']
'30'
>>> "%3.1f"%res['Conditional homogeneity pvalue']
'0.0'
"""
F1 = F == 0
F1 = F + F1
FLF = F * np.log(F1)
T1 = 2 * FLF.sum()
FdJK = F.sum(axis=0)
FdJK1 = FdJK + (FdJK == 0)
FdJKLFdJK = FdJK * np.log(FdJK1)
T2 = 2 * FdJKLFdJK.sum()
FdJd = F.sum(axis=0).sum(axis=1)
FdJd1 = FdJd + (FdJd == 0)
T3 = 2 * (FdJd * np.log(FdJd1)).sum()
FIJd = F[:, :].sum(axis=1)
FIJd1 = FIJd + (FIJd == 0)
T4 = 2 * (FIJd * np.log(FIJd1)).sum()
T6 = F.sum()
T6 = 2 * T6 * np.log(T6)
s, r, r1 = F.shape
chom = T1 - T4 - T2 + T3
cdof = r * (s - 1) * (r - 1)
results = {}
results['Conditional homogeneity'] = chom
results['Conditional homogeneity dof'] = cdof
results['Conditional homogeneity pvalue'] = 1 - stats.chi2.cdf(chom, cdof)
return results
def prais(pmat):
"""
Prais conditional mobility measure.
Parameters
----------
pmat : matrix
(k, k), Markov probability transition matrix.
Returns
-------
pr : matrix
(1, k), conditional mobility measures for each of the k classes.
Notes
-----
Prais' conditional mobility measure for a class is defined as:
.. math::
pr_i = 1 - p_{i,i}
Examples
--------
>>> import numpy as np
>>> import libpysal
>>> from giddy.markov import Markov,prais
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
>>> q5 = np.array([mc.Quantiles(y).yb for y in pci]).transpose()
>>> m = Markov(q5)
>>> m.transitions
array([[729., 71., 1., 0., 0.],
[ 72., 567., 80., 3., 0.],
[ 0., 81., 631., 86., 2.],
[ 0., 3., 86., 573., 56.],
[ 0., 0., 1., 57., 741.]])
>>> m.p
array([[0.91011236, 0.0886392 , 0.00124844, 0. , 0. ],
[0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ],
[0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ],
[0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443],
[0. , 0. , 0.00125156, 0.07133917, 0.92740926]])
>>> prais(m.p)
array([0.08988764, 0.21468144, 0.21125 , 0.20194986, 0.07259074])
"""
pmat = np.array(pmat)
pr = 1 - np.diag(pmat)
return pr
def homogeneity(transition_matrices, regime_names=[], class_names=[],
title="Markov Homogeneity Test"):
"""
Test for homogeneity of Markov transition probabilities across regimes.
Parameters
----------
transition_matrices : list
of transition matrices for regimes, all matrices must
have same size (r, c). r is the number of rows in the
transition matrix and c is the number of columns in
the transition matrix.
regime_names : sequence
Labels for the regimes.
class_names : sequence
Labels for the classes/states of the Markov chain.
title : string
name of test.
Returns
-------
: implicit
an instance of Homogeneity_Results.
"""
return Homogeneity_Results(transition_matrices, regime_names=regime_names,
class_names=class_names, title=title)
class Homogeneity_Results:
"""
Wrapper class to present homogeneity results.
Parameters
----------
transition_matrices : list
of transition matrices for regimes, all matrices must
have same size (r, c). r is the number of rows in
the transition matrix and c is the number of columns
in the transition matrix.
regime_names : sequence
Labels for the regimes.
class_names : sequence
Labels for the classes/states of the Markov chain.
title : string
Title of the table.
Attributes
-----------
Notes
-----
Degrees of freedom adjustment follow the approach in :cite:`Bickenbach2003`.
Examples
--------
See Spatial_Markov above.
"""
def __init__(self, transition_matrices, regime_names=[], class_names=[],
title="Markov Homogeneity Test"):
self._homogeneity(transition_matrices)
self.regime_names = regime_names
self.class_names = class_names
self.title = title
def _homogeneity(self, transition_matrices):
# form null transition probability matrix
M = np.array(transition_matrices)
m, r, k = M.shape
self.k = k
B = np.zeros((r, m))
T = M.sum(axis=0)
self.t_total = T.sum()
n_i = T.sum(axis=1)
A_i = (T > 0).sum(axis=1)
A_im = np.zeros((r, m))
p_ij = np.dot(np.diag(1. / (n_i + (n_i == 0) * 1.)), T)
den = p_ij + 1. * (p_ij == 0)
b_i = np.zeros_like(A_i)
p_ijm = np.zeros_like(M)
# get dimensions
m, n_rows, n_cols = M.shape
m = 0
Q = 0.0
LR = 0.0
lr_table = np.zeros_like(M)
q_table = np.zeros_like(M)
for nijm in M:
nim = nijm.sum(axis=1)
B[:, m] = 1. * (nim > 0)
b_i = b_i + 1. * (nim > 0)
p_ijm[m] = np.dot(np.diag(1. / (nim + (nim == 0) * 1.)), nijm)
num = (p_ijm[m] - p_ij)**2
ratio = num / den
qijm = np.dot(np.diag(nim), ratio)
q_table[m] = qijm
Q = Q + qijm.sum()
# only use nonzero pijm in lr test
mask = (nijm > 0) * (p_ij > 0)
A_im[:, m] = (nijm > 0).sum(axis=1)
unmask = 1.0 * (mask == 0)
ratio = (mask * p_ijm[m] + unmask) / (mask * p_ij + unmask)
lr = nijm * np.log(ratio)
LR = LR + lr.sum()
lr_table[m] = 2 * lr
m += 1
# b_i is the number of regimes that have non-zero observations in row i
# A_i is the number of non-zero elements in row i of the aggregated
# transition matrix
self.dof = int(((b_i - 1) * (A_i - 1)).sum())
self.Q = Q
self.Q_p_value = 1 - stats.chi2.cdf(self.Q, self.dof)
self.LR = LR * 2.
self.LR_p_value = 1 - stats.chi2.cdf(self.LR, self.dof)
self.A = A_i
self.A_im = A_im
self.B = B
self.b_i = b_i
self.LR_table = lr_table
self.Q_table = q_table
self.m = m
self.p_h0 = p_ij
self.p_h1 = p_ijm
def summary(self, file_name=None, title="Markov Homogeneity Test"):
regime_names = ["%d" % i for i in range(self.m)]
if self.regime_names:
regime_names = self.regime_names
cols = ["P(%s)" % str(regime) for regime in regime_names]
if not self.class_names:
self.class_names = list(range(self.k))
max_col = max([len(col) for col in cols])
col_width = max([5, max_col]) # probabilities have 5 chars
n_tabs = self.k
width = n_tabs * 4 + (self.k + 1) * col_width
lead = "-" * width
head = title.center(width)
contents = [lead, head, lead]
l = "Number of regimes: %d" % int(self.m)
k = "Number of classes: %d" % int(self.k)
r = "Regime names: "
r += ", ".join(regime_names)
t = "Number of transitions: %d" % int(self.t_total)
contents.append(k)
contents.append(t)
contents.append(l)
contents.append(r)
contents.append(lead)
h = "%7s %20s %20s" % ('Test', 'LR', 'Chi-2')
contents.append(h)
stat = "%7s %20.3f %20.3f" % ('Stat.', self.LR, self.Q)
contents.append(stat)
stat = "%7s %20d %20d" % ('DOF', self.dof, self.dof)
contents.append(stat)
stat = "%7s %20.3f %20.3f" % ('p-value', self.LR_p_value,
self.Q_p_value)
contents.append(stat)
print(("\n".join(contents)))
print(lead)
cols = ["P(%s)" % str(regime) for regime in self.regime_names]
if not self.class_names:
self.class_names = list(range(self.k))
cols.extend(["%s" % str(cname) for cname in self.class_names])
max_col = max([len(col) for col in cols])
col_width = max([5, max_col]) # probabilities have 5 chars
p0 = []
line0 = ['{s: <{w}}'.format(s="P(H0)", w=col_width)]
line0.extend((['{s: >{w}}'.format(s=cname, w=col_width) for cname in
self.class_names]))
print((" ".join(line0)))
p0.append("&".join(line0))
for i, row in enumerate(self.p_h0):
line = ["%*s" % (col_width, str(self.class_names[i]))]
line.extend(["%*.3f" % (col_width, v) for v in row])
print((" ".join(line)))
p0.append("&".join(line))
pmats = [p0]
print(lead)
for r, p1 in enumerate(self.p_h1):
p0 = []
line0 = ['{s: <{w}}'.format(s="P(%s)" %
regime_names[r], w=col_width)]
line0.extend((['{s: >{w}}'.format(s=cname, w=col_width) for cname
in self.class_names]))
print((" ".join(line0)))
p0.append("&".join(line0))
for i, row in enumerate(p1):
line = ["%*s" % (col_width, str(self.class_names[i]))]
line.extend(["%*.3f" % (col_width, v) for v in row])
print((" ".join(line)))
p0.append("&".join(line))
pmats.append(p0)
print(lead)
if file_name:
k = self.k
ks = str(k + 1)
with open(file_name, 'w') as f:
c = []
fmt = "r" * (k + 1)
s = "\\begin{tabular}{|%s|}\\hline\n" % fmt
s += "\\multicolumn{%s}{|c|}{%s}" % (ks, title)
c.append(s)
s = "Number of classes: %d" % int(self.k)
c.append("\\hline\\multicolumn{%s}{|l|}{%s}" % (ks, s))
s = "Number of transitions: %d" % int(self.t_total)
c.append("\\multicolumn{%s}{|l|}{%s}" % (ks, s))
s = "Number of regimes: %d" % int(self.m)
c.append("\\multicolumn{%s}{|l|}{%s}" % (ks, s))
s = "Regime names: "
s += ", ".join(regime_names)
c.append("\\multicolumn{%s}{|l|}{%s}" % (ks, s))
s = "\\hline\\multicolumn{2}{|l}{%s}" % ("Test")
s += "&\\multicolumn{2}{r}{LR}&\\multicolumn{2}{r|}{Q}"
c.append(s)
s = "Stat."
s = "\\multicolumn{2}{|l}{%s}" % (s)
s += "&\\multicolumn{2}{r}{%.3f}" % self.LR
s += "&\\multicolumn{2}{r|}{%.3f}" % self.Q
c.append(s)
s = "\\multicolumn{2}{|l}{%s}" % ("DOF")
s += "&\\multicolumn{2}{r}{%d}" % int(self.dof)
s += "&\\multicolumn{2}{r|}{%d}" % int(self.dof)
c.append(s)
s = "\\multicolumn{2}{|l}{%s}" % ("p-value")
s += "&\\multicolumn{2}{r}{%.3f}" % self.LR_p_value
s += "&\\multicolumn{2}{r|}{%.3f}" % self.Q_p_value
c.append(s)
s1 = "\\\\\n".join(c)
s1 += "\\\\\n"
c = []
for mat in pmats:
c.append("\\hline\n")
for row in mat:
c.append(row + "\\\\\n")
c.append("\\hline\n")
c.append("\\end{tabular}")
s2 = "".join(c)
f.write(s1 + s2)
class FullRank_Markov:
"""
Full Rank Markov in which ranks are considered as Markov states rather
than quantiles or other discretized classes. This is one way to avoid
issues associated with discretization.
Parameters
----------
y : array
(n, t) with t>>n, one row per observation (n total),
one column recording the value of each observation,
with as many columns as time periods.
Attributes
----------
ranks : array
ranks of the original y array (by columns): higher values
rank higher, e.g. the largest value in a column ranks 1.
p : array
(n, n), transition probability matrix for Full
Rank Markov.
steady_state : array
(n, ), ergodic distribution.
transitions : array
(n, n), count of transitions between each rank i and j
fmpt : array
(n, n), first mean passage times.
sojourn_time : array
(n, ), sojourn times.
Notes
-----
Refer to :cite:`Rey2014a` Equation (11) for details. Ties are resolved by
assigning distinct ranks, corresponding to the order that the values occur
in each cross section.
Examples
--------
US nominal per capita income 48 states 81 years 1929-2009
>>> from giddy.markov import FullRank_Markov
>>> import libpysal as ps
>>> import numpy as np
>>> f = ps.io.open(ps.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)]).transpose()
>>> m = FullRank_Markov(pci)
>>> m.ranks
array([[45, 45, 44, ..., 41, 40, 39],
[24, 25, 25, ..., 36, 38, 41],
[46, 47, 45, ..., 43, 43, 43],
...,
[34, 34, 34, ..., 47, 46, 42],
[17, 17, 22, ..., 25, 26, 25],
[16, 18, 19, ..., 6, 6, 7]])
>>> m.transitions
array([[66., 5., 5., ..., 0., 0., 0.],
[ 8., 51., 9., ..., 0., 0., 0.],
[ 2., 13., 44., ..., 0., 0., 0.],
...,
[ 0., 0., 0., ..., 40., 17., 0.],
[ 0., 0., 0., ..., 15., 54., 2.],
[ 0., 0., 0., ..., 2., 1., 77.]])
>>> m.p[0, :5]
array([0.825 , 0.0625, 0.0625, 0.025 , 0.025 ])
>>> m.fmpt[0, :5]
array([48. , 87.96280048, 68.1089084 , 58.83306575, 41.77250827])
>>> m.sojourn_time[:5]
array([5.71428571, 2.75862069, 2.22222222, 1.77777778, 1.66666667])
"""
def __init__(self, y):
y = np.asarray(y)
# resolve ties: All values are given a distinct rank, corresponding
# to the order that the values occur in each cross section.
r_asc = np.array([rankdata(col, method='ordinal') for col in y.T]).T
# ranks by high (1) to low (n)
self.ranks = r_asc.shape[0] - r_asc + 1
frm = Markov(self.ranks)
self.p = frm.p
self.transitions = frm.transitions
@property
def steady_state(self):
if not hasattr(self, '_steady_state'):
self._steady_state = STEADY_STATE(self.p)
return self._steady_state
@property
def fmpt(self):
if not hasattr(self, '_fmpt'):
self._fmpt = fmpt(self.p)
return self._fmpt
@property
def sojourn_time(self):
if not hasattr(self, '_st'):
self._st = sojourn_time(self.p)
return self._st
class GeoRank_Markov:
"""
Geographic Rank Markov.
Geographic units are considered as Markov states.
Parameters
----------
y : array
(n, t) with t>>n, one row per observation (n total),
one column recording the value of each observation,
with as many columns as time periods.
Attributes
----------
p : array
(n, n), transition probability matrix for
geographic rank Markov.
steady_state : array
(n, ), ergodic distribution.
transitions : array
(n, n), count of rank transitions between each
geographic unit i and j.
fmpt : array
(n, n), first mean passage times.
sojourn_time : array
(n, ), sojourn times.
Notes
-----
Refer to :cite:`Rey2014a` Equation (13)-(16) for details. Ties are
resolved by assigning distinct ranks, corresponding to the order
that the values occur in each cross section.
Examples
--------
US nominal per capita income 48 states 81 years 1929-2009
>>> from giddy.markov import GeoRank_Markov
>>> import libpysal as ps
>>> import numpy as np
>>> f = ps.io.open(ps.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)]).transpose()
>>> m = GeoRank_Markov(pci)
>>> m.transitions
array([[38., 0., 8., ..., 0., 0., 0.],
[ 0., 15., 0., ..., 0., 1., 0.],
[ 6., 0., 44., ..., 5., 0., 0.],
...,
[ 2., 0., 5., ..., 34., 0., 0.],
[ 0., 0., 0., ..., 0., 18., 2.],
[ 0., 0., 0., ..., 0., 3., 14.]])
>>> m.p
array([[0.475 , 0. , 0.1 , ..., 0. , 0. , 0. ],
[0. , 0.1875, 0. , ..., 0. , 0.0125, 0. ],
[0.075 , 0. , 0.55 , ..., 0.0625, 0. , 0. ],
...,
[0.025 , 0. , 0.0625, ..., 0.425 , 0. , 0. ],
[0. , 0. , 0. , ..., 0. , 0.225 , 0.025 ],
[0. , 0. , 0. , ..., 0. , 0.0375, 0.175 ]])
>>> m.fmpt
array([[ 48. , 63.35532038, 92.75274652, ..., 82.47515731,
71.01114491, 68.65737127],
[108.25928005, 48. , 127.99032986, ..., 92.03098299,
63.36652935, 61.82733039],
[ 76.96801786, 64.7713783 , 48. , ..., 73.84595169,
72.24682723, 69.77497173],
...,
[ 93.3107474 , 62.47670463, 105.80634118, ..., 48. ,
69.30121319, 67.08838421],
[113.65278078, 61.1987031 , 133.57991745, ..., 96.0103924 ,
48. , 56.74165107],
[114.71894813, 63.4019776 , 134.73381719, ..., 97.287895 ,
61.45565054, 48. ]])
>>> m.sojourn_time
array([ 1.9047619 , 1.23076923, 2.22222222, 1.73913043, 1.15942029,
3.80952381, 1.70212766, 1.25 , 1.31147541, 1.11111111,
1.73913043, 1.37931034, 1.17647059, 1.21212121, 1.33333333,
1.37931034, 1.09589041, 2.10526316, 2. , 1.45454545,
1.26984127, 26.66666667, 1.19402985, 1.23076923, 1.09589041,
1.56862745, 1.26984127, 2.42424242, 1.50943396, 2. ,
1.29032258, 1.09589041, 1.6 , 1.42857143, 1.25 ,
1.45454545, 1.29032258, 1.6 , 1.17647059, 1.56862745,
1.25 , 1.37931034, 1.45454545, 1.42857143, 1.29032258,
1.73913043, 1.29032258, 1.21212121])
"""
def __init__(self, y):
y = np.asarray(y)
n = y.shape[0]
# resolve ties: All values are given a distinct rank, corresponding
# to the order that the values occur in each cross section.
ranks = np.array([rankdata(col, method='ordinal') for col in y.T]).T
geo_ranks = np.argsort(ranks, axis=0) + 1
grm = Markov(geo_ranks)
self.p = grm.p
self.transitions = grm.transitions
@property
def steady_state(self):
if not hasattr(self, '_steady_state'):
self._steady_state = STEADY_STATE(self.p)
return self._steady_state
@property
def fmpt(self):
if not hasattr(self, '_fmpt'):
self._fmpt = fmpt(self.p)
return self._fmpt
@property
def sojourn_time(self):
if not hasattr(self, '_st'):
self._st = sojourn_time(self.p)
return self._st
|
pysal/giddy | giddy/markov.py | Spatial_Markov._calc | python | def _calc(self, y, w):
'''Helper to estimate spatial lag conditioned Markov transition
probability matrices based on maximum likelihood techniques.
'''
if self.discrete:
self.lclass_ids = weights.lag_categorical(w, self.class_ids,
ties="tryself")
else:
ly = weights.lag_spatial(w, y)
self.lclass_ids, self.lag_cutoffs, self.m = self._maybe_classify(
ly, self.m, self.lag_cutoffs)
self.lclasses = np.arange(self.m)
T = np.zeros((self.m, self.k, self.k))
n, t = y.shape
for t1 in range(t - 1):
t2 = t1 + 1
for i in range(n):
T[self.lclass_ids[i, t1], self.class_ids[i, t1],
self.class_ids[i, t2]] += 1
P = np.zeros_like(T)
for i, mat in enumerate(T):
row_sum = mat.sum(axis=1)
row_sum = row_sum + (row_sum == 0)
p_i = np.matrix(np.diag(1. / row_sum) * np.matrix(mat))
P[i] = p_i
return T, P | Helper to estimate spatial lag conditioned Markov transition
probability matrices based on maximum likelihood techniques. | train | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/markov.py#L731-L759 | [
"def _maybe_classify(self, y, k, cutoffs):\n '''Helper method for classifying continuous data.\n\n '''\n\n rows, cols = y.shape\n if cutoffs is None:\n if self.fixed:\n mcyb = mc.Quantiles(y.flatten(), k=k)\n yb = mcyb.yb.reshape(y.shape)\n cutoffs = mcyb.bins\n k = len(cutoffs)\n return yb, cutoffs[:-1], k\n else:\n yb = np.array([mc.Quantiles(y[:, i], k=k).yb for i in\n np.arange(cols)]).transpose()\n return yb, None, k\n else:\n cutoffs = list(cutoffs) + [np.inf]\n cutoffs = np.array(cutoffs)\n yb = mc.User_Defined(y.flatten(), np.array(cutoffs)).yb.reshape(\n y.shape)\n k = len(cutoffs)\n return yb, cutoffs[:-1], k\n"
] | class Spatial_Markov(object):
"""
Markov transitions conditioned on the value of the spatial lag.
Parameters
----------
y : array
(n, t), one row per observation, one column per state of
each observation, with as many columns as time periods.
w : W
spatial weights object.
k : integer, optional
number of classes (quantiles) for input time series y.
Default is 4. If discrete=True, k is determined
endogenously.
m : integer, optional
number of classes (quantiles) for the spatial lags of
regional time series. Default is 4. If discrete=True,
m is determined endogenously.
permutations : int, optional
number of permutations for use in randomization based
inference (the default is 0).
fixed : bool, optional
If true, discretization are taken over the entire n*t
pooled series and cutoffs can be user-defined. If
cutoffs and lag_cutoffs are not given, quantiles are
used. If false, quantiles are taken each time period
over n. Default is True.
discrete : bool, optional
If true, categorical spatial lags which are most common
categories of neighboring observations serve as the
conditioning and fixed is ignored; if false, weighted
averages of neighboring observations are used. Default is
false.
cutoffs : array, optional
users can specify the discretization cutoffs for
continuous time series. Default is None, meaning that
quantiles will be used for the discretization.
lag_cutoffs : array, optional
users can specify the discretization cutoffs for the
spatial lags of continuous time series. Default is
None, meaning that quantiles will be used for the
discretization.
variable_name : string
name of variable.
Attributes
----------
class_ids : array
(n, t), discretized series if y is continuous. Otherwise
it is identical to y.
classes : array
(k, 1), all different classes (bins).
lclass_ids : array
(n, t), spatial lag series.
lclasses : array
(k, 1), all different classes (bins) for
spatial lags.
p : array
(k, k), transition probability matrix for a-spatial
Markov.
s : array
(k, 1), ergodic distribution for a-spatial Markov.
transitions : array
(k, k), counts of transitions between each state i and j
for a-spatial Markov.
T : array
(k, k, k), counts of transitions for each conditional
Markov. T[0] is the matrix of transitions for
observations with lags in the 0th quantile; T[k-1] is the
transitions for the observations with lags in the k-1th.
P : array
(k, k, k), transition probability matrix for spatial
Markov first dimension is the conditioned on the lag.
S : array
(k, k), steady state distributions for spatial Markov.
Each row is a conditional steady_state.
F : array
(k, k, k),first mean passage times.
First dimension is conditioned on the lag.
shtest : list
(k elements), each element of the list is a tuple for a
multinomial difference test between the steady state
distribution from a conditional distribution versus the
overall steady state distribution: first element of the
tuple is the chi2 value, second its p-value and the third
the degrees of freedom.
chi2 : list
(k elements), each element of the list is a tuple for a
chi-squared test of the difference between the
conditional transition matrix against the overall
transition matrix: first element of the tuple is the chi2
value, second its p-value and the third the degrees of
freedom.
x2 : float
sum of the chi2 values for each of the conditional tests.
Has an asymptotic chi2 distribution with k(k-1)(k-1)
degrees of freedom. Under the null that transition
probabilities are spatially homogeneous.
(see chi2 above)
x2_dof : int
degrees of freedom for homogeneity test.
x2_pvalue : float
pvalue for homogeneity test based on analytic.
distribution
x2_rpvalue : float
(if permutations>0)
pseudo p-value for x2 based on random spatial
permutations of the rows of the original transitions.
x2_realizations : array
(permutations,1), the values of x2 for the random
permutations.
Q : float
Chi-square test of homogeneity across lag classes based
on :cite:`Bickenbach2003`.
Q_p_value : float
p-value for Q.
LR : float
Likelihood ratio statistic for homogeneity across lag
classes based on :cite:`Bickenbach2003`.
LR_p_value : float
p-value for LR.
dof_hom : int
degrees of freedom for LR and Q, corrected for 0 cells.
Notes
-----
Based on :cite:`Rey2001`.
The shtest and chi2 tests should be used with caution as they are based on
classic theory assuming random transitions. The x2 based test is
preferable since it simulates the randomness under the null. It is an
experimental test requiring further analysis.
Examples
--------
>>> import libpysal
>>> from giddy.markov import Spatial_Markov
>>> import numpy as np
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
>>> pci = pci.transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> w.transform = 'r'
Now we create a `Spatial_Markov` instance for the continuous relative per
capita income time series for 48 US lower states 1929-2009. The current
implementation allows users to classify the continuous incomes in a more
flexible way.
(1) Global quintiles to discretize the income data (k=5), and global
quintiles to discretize the spatial lags of incomes (m=5).
>>> sm = Spatial_Markov(rpci, w, fixed=True, k=5, m=5, variable_name='rpci')
We can examine the cutoffs for the incomes and cutoffs for the spatial lags
>>> sm.cutoffs
array([0.83999133, 0.94707545, 1.03242697, 1.14911154])
>>> sm.lag_cutoffs
array([0.88973386, 0.95891917, 1.01469758, 1.1183566 ])
Obviously, they are slightly different.
We now look at the estimated spatially lag conditioned transition
probability matrices.
>>> for p in sm.P:
... print(p)
[[0.96341463 0.0304878 0.00609756 0. 0. ]
[0.06040268 0.83221477 0.10738255 0. 0. ]
[0. 0.14 0.74 0.12 0. ]
[0. 0.03571429 0.32142857 0.57142857 0.07142857]
[0. 0. 0. 0.16666667 0.83333333]]
[[0.79831933 0.16806723 0.03361345 0. 0. ]
[0.0754717 0.88207547 0.04245283 0. 0. ]
[0.00537634 0.06989247 0.8655914 0.05913978 0. ]
[0. 0. 0.06372549 0.90196078 0.03431373]
[0. 0. 0. 0.19444444 0.80555556]]
[[0.84693878 0.15306122 0. 0. 0. ]
[0.08133971 0.78947368 0.1291866 0. 0. ]
[0.00518135 0.0984456 0.79274611 0.0984456 0.00518135]
[0. 0. 0.09411765 0.87058824 0.03529412]
[0. 0. 0. 0.10204082 0.89795918]]
[[0.8852459 0.09836066 0. 0.01639344 0. ]
[0.03875969 0.81395349 0.13953488 0. 0.00775194]
[0.0049505 0.09405941 0.77722772 0.11881188 0.0049505 ]
[0. 0.02339181 0.12865497 0.75438596 0.09356725]
[0. 0. 0. 0.09661836 0.90338164]]
[[0.33333333 0.66666667 0. 0. 0. ]
[0.0483871 0.77419355 0.16129032 0.01612903 0. ]
[0.01149425 0.16091954 0.74712644 0.08045977 0. ]
[0. 0.01036269 0.06217617 0.89637306 0.03108808]
[0. 0. 0. 0.02352941 0.97647059]]
The probability of a poor state remaining poor is 0.963 if their
neighbors are in the 1st quintile and 0.798 if their neighbors are
in the 2nd quintile. The probability of a rich economy remaining
rich is 0.976 if their neighbors are in the 5th quintile, but if their
neighbors are in the 4th quintile this drops to 0.903.
The global transition probability matrix is estimated:
>>> print(sm.p)
[[0.91461837 0.07503234 0.00905563 0.00129366 0. ]
[0.06570302 0.82654402 0.10512484 0.00131406 0.00131406]
[0.00520833 0.10286458 0.79427083 0.09505208 0.00260417]
[0. 0.00913838 0.09399478 0.84856397 0.04830287]
[0. 0. 0. 0.06217617 0.93782383]]
The Q and likelihood ratio statistics are both significant indicating
the dynamics are not homogeneous across the lag classes:
>>> "%.3f"%sm.LR
'170.659'
>>> "%.3f"%sm.Q
'200.624'
>>> "%.3f"%sm.LR_p_value
'0.000'
>>> "%.3f"%sm.Q_p_value
'0.000'
>>> sm.dof_hom
60
The long run distribution for states with poor (rich) neighbors has
0.435 (0.018) of the values in the first quintile, 0.263 (0.200) in
the second quintile, 0.204 (0.190) in the third, 0.0684 (0.255) in the
fourth and 0.029 (0.337) in the fifth quintile.
>>> sm.S
array([[0.43509425, 0.2635327 , 0.20363044, 0.06841983, 0.02932278],
[0.13391287, 0.33993305, 0.25153036, 0.23343016, 0.04119356],
[0.12124869, 0.21137444, 0.2635101 , 0.29013417, 0.1137326 ],
[0.0776413 , 0.19748806, 0.25352636, 0.22480415, 0.24654013],
[0.01776781, 0.19964349, 0.19009833, 0.25524697, 0.3372434 ]])
States with incomes in the first quintile with neighbors in the
first quintile return to the first quartile after 2.298 years, after
leaving the first quintile. They enter the fourth quintile after
80.810 years after leaving the first quintile, on average.
Poor states within neighbors in the fourth quintile return to the
first quintile, on average, after 12.88 years, and would enter the
fourth quintile after 28.473 years.
>>> for f in sm.F:
... print(f)
...
[[ 2.29835259 28.95614035 46.14285714 80.80952381 279.42857143]
[ 33.86549708 3.79459555 22.57142857 57.23809524 255.85714286]
[ 43.60233918 9.73684211 4.91085714 34.66666667 233.28571429]
[ 46.62865497 12.76315789 6.25714286 14.61564626 198.61904762]
[ 52.62865497 18.76315789 12.25714286 6. 34.1031746 ]]
[[ 7.46754205 9.70574606 25.76785714 74.53116883 194.23446197]
[ 27.76691978 2.94175577 24.97142857 73.73474026 193.4380334 ]
[ 53.57477715 28.48447637 3.97566318 48.76331169 168.46660482]
[ 72.03631562 46.94601483 18.46153846 4.28393653 119.70329314]
[ 77.17917276 52.08887197 23.6043956 5.14285714 24.27564033]]
[[ 8.24751154 6.53333333 18.38765432 40.70864198 112.76732026]
[ 47.35040872 4.73094099 11.85432099 34.17530864 106.23398693]
[ 69.42288828 24.76666667 3.794921 22.32098765 94.37966594]
[ 83.72288828 39.06666667 14.3 3.44668119 76.36702977]
[ 93.52288828 48.86666667 24.1 9.8 8.79255406]]
[[ 12.87974382 13.34847151 19.83446328 28.47257282 55.82395142]
[ 99.46114206 5.06359731 10.54545198 23.05133495 49.68944423]
[117.76777159 23.03735526 3.94436301 15.0843986 43.57927247]
[127.89752089 32.4393006 14.56853107 4.44831643 31.63099455]
[138.24752089 42.7893006 24.91853107 10.35 4.05613474]]
[[ 56.2815534 1.5 10.57236842 27.02173913 110.54347826]
[ 82.9223301 5.00892857 9.07236842 25.52173913 109.04347826]
[ 97.17718447 19.53125 5.26043557 21.42391304 104.94565217]
[127.1407767 48.74107143 33.29605263 3.91777427 83.52173913]
[169.6407767 91.24107143 75.79605263 42.5 2.96521739]]
(2) Global quintiles to discretize the income data (k=5), and global
quartiles to discretize the spatial lags of incomes (m=4).
>>> sm = Spatial_Markov(rpci, w, fixed=True, k=5, m=4, variable_name='rpci')
We can also examine the cutoffs for the incomes and cutoffs for the spatial
lags:
>>> sm.cutoffs
array([0.83999133, 0.94707545, 1.03242697, 1.14911154])
>>> sm.lag_cutoffs
array([0.91440247, 0.98583079, 1.08698351])
We now look at the estimated spatially lag conditioned transition
probability matrices.
>>> for p in sm.P:
... print(p)
[[0.95708955 0.03544776 0.00746269 0. 0. ]
[0.05825243 0.83980583 0.10194175 0. 0. ]
[0. 0.1294964 0.76258993 0.10791367 0. ]
[0. 0.01538462 0.18461538 0.72307692 0.07692308]
[0. 0. 0. 0.14285714 0.85714286]]
[[0.7421875 0.234375 0.0234375 0. 0. ]
[0.08550186 0.85130112 0.06319703 0. 0. ]
[0.00865801 0.06926407 0.86147186 0.05627706 0.004329 ]
[0. 0. 0.05363985 0.92337165 0.02298851]
[0. 0. 0. 0.13432836 0.86567164]]
[[0.95145631 0.04854369 0. 0. 0. ]
[0.06 0.79 0.145 0. 0.005 ]
[0.00358423 0.10394265 0.7921147 0.09677419 0.00358423]
[0. 0.01630435 0.13586957 0.75543478 0.0923913 ]
[0. 0. 0. 0.10204082 0.89795918]]
[[0.16666667 0.66666667 0. 0.16666667 0. ]
[0.03488372 0.80232558 0.15116279 0.01162791 0. ]
[0.00840336 0.13445378 0.70588235 0.1512605 0. ]
[0. 0.01171875 0.08203125 0.87109375 0.03515625]
[0. 0. 0. 0.03434343 0.96565657]]
We now obtain 4 5*5 spatial lag conditioned transition probability
matrices instead of 5 as in case (1).
The Q and likelihood ratio statistics are still both significant.
>>> "%.3f"%sm.LR
'172.105'
>>> "%.3f"%sm.Q
'321.128'
>>> "%.3f"%sm.LR_p_value
'0.000'
>>> "%.3f"%sm.Q_p_value
'0.000'
>>> sm.dof_hom
45
(3) We can also set the cutoffs for relative incomes and their
spatial lags manually.
For example, we want the defining cutoffs to be [0.8, 0.9, 1, 1.2],
meaning that relative incomes:
2.1 smaller than 0.8 : class 0
2.2 between 0.8 and 0.9: class 1
2.3 between 0.9 and 1.0 : class 2
2.4 between 1.0 and 1.2: class 3
2.5 larger than 1.2: class 4
>>> cc = np.array([0.8, 0.9, 1, 1.2])
>>> sm = Spatial_Markov(rpci, w, cutoffs=cc, lag_cutoffs=cc, variable_name='rpci')
>>> sm.cutoffs
array([0.8, 0.9, 1. , 1.2])
>>> sm.k
5
>>> sm.lag_cutoffs
array([0.8, 0.9, 1. , 1.2])
>>> sm.m
5
>>> for p in sm.P:
... print(p)
[[0.96703297 0.03296703 0. 0. 0. ]
[0.10638298 0.68085106 0.21276596 0. 0. ]
[0. 0.14285714 0.7755102 0.08163265 0. ]
[0. 0. 0.5 0.5 0. ]
[0. 0. 0. 0. 0. ]]
[[0.88636364 0.10606061 0.00757576 0. 0. ]
[0.04402516 0.89308176 0.06289308 0. 0. ]
[0. 0.05882353 0.8627451 0.07843137 0. ]
[0. 0. 0.13846154 0.86153846 0. ]
[0. 0. 0. 0. 1. ]]
[[0.78082192 0.17808219 0.02739726 0.01369863 0. ]
[0.03488372 0.90406977 0.05813953 0.00290698 0. ]
[0. 0.05919003 0.84735202 0.09034268 0.00311526]
[0. 0. 0.05811623 0.92985972 0.01202405]
[0. 0. 0. 0.14285714 0.85714286]]
[[0.82692308 0.15384615 0. 0.01923077 0. ]
[0.0703125 0.7890625 0.125 0.015625 0. ]
[0.00295858 0.06213018 0.82248521 0.10946746 0.00295858]
[0. 0.00185529 0.07606679 0.88497217 0.03710575]
[0. 0. 0. 0.07803468 0.92196532]]
[[0. 0. 0. 0. 0. ]
[0. 0. 0. 0. 0. ]
[0. 0.06666667 0.9 0.03333333 0. ]
[0. 0. 0.05660377 0.90566038 0.03773585]
[0. 0. 0. 0.03932584 0.96067416]]
(4) Spatial_Markov also accept discrete time series and calculate
categorical spatial lags on which several transition probability matrices
are conditioned.
Let's still use the US state income time series to demonstrate. We first
discretize them into categories and then pass them to Spatial_Markov.
>>> import mapclassify as mc
>>> y = mc.Quantiles(rpci.flatten(), k=5).yb.reshape(rpci.shape)
>>> np.random.seed(5)
>>> sm = Spatial_Markov(y, w, discrete=True, variable_name='discretized rpci')
>>> sm.k
5
>>> sm.m
5
>>> for p in sm.P:
... print(p)
[[0.94787645 0.04440154 0.00772201 0. 0. ]
[0.08333333 0.81060606 0.10606061 0. 0. ]
[0. 0.12765957 0.79787234 0.07446809 0. ]
[0. 0.02777778 0.22222222 0.66666667 0.08333333]
[0. 0. 0. 0.33333333 0.66666667]]
[[0.888 0.096 0.016 0. 0. ]
[0.06049822 0.84341637 0.09608541 0. 0. ]
[0.00666667 0.10666667 0.81333333 0.07333333 0. ]
[0. 0. 0.08527132 0.86821705 0.04651163]
[0. 0. 0. 0.10204082 0.89795918]]
[[0.65217391 0.32608696 0.02173913 0. 0. ]
[0.07446809 0.80851064 0.11170213 0. 0.00531915]
[0.01071429 0.1 0.76428571 0.11785714 0.00714286]
[0. 0.00552486 0.09392265 0.86187845 0.03867403]
[0. 0. 0. 0.13157895 0.86842105]]
[[0.91935484 0.06451613 0. 0.01612903 0. ]
[0.06796117 0.90291262 0.02912621 0. 0. ]
[0. 0.05755396 0.87769784 0.0647482 0. ]
[0. 0.02150538 0.10752688 0.80107527 0.06989247]
[0. 0. 0. 0.08064516 0.91935484]]
[[0.81818182 0.18181818 0. 0. 0. ]
[0.01754386 0.70175439 0.26315789 0.01754386 0. ]
[0. 0.14285714 0.73333333 0.12380952 0. ]
[0. 0.0042735 0.06837607 0.89316239 0.03418803]
[0. 0. 0. 0.03891051 0.96108949]]
"""
def __init__(self, y, w, k=4, m=4, permutations=0, fixed=True,
discrete=False, cutoffs=None, lag_cutoffs=None,
variable_name=None):
y = np.asarray(y)
self.fixed = fixed
self.discrete = discrete
self.cutoffs = cutoffs
self.m = m
self.lag_cutoffs = lag_cutoffs
self.variable_name = variable_name
if discrete:
merged = list(itertools.chain.from_iterable(y))
classes = np.unique(merged)
self.classes = classes
self.k = len(classes)
self.m = self.k
label_dict = dict(zip(classes, range(self.k)))
y_int = []
for yi in y:
y_int.append(list(map(label_dict.get, yi)))
self.class_ids = np.array(y_int)
self.lclass_ids = self.class_ids
else:
self.class_ids, self.cutoffs, self.k = self._maybe_classify(
y, k=k, cutoffs=self.cutoffs)
self.classes = np.arange(self.k)
classic = Markov(self.class_ids)
self.p = classic.p
self.transitions = classic.transitions
self.T, self.P = self._calc(y, w)
if permutations:
nrp = np.random.permutation
counter = 0
x2_realizations = np.zeros((permutations, 1))
for perm in range(permutations):
T, P = self._calc(nrp(y), w)
x2 = [chi2(T[i], self.transitions)[0] for i in range(self.k)]
x2s = sum(x2)
x2_realizations[perm] = x2s
if x2s >= self.x2:
counter += 1
self.x2_rpvalue = (counter + 1.0) / (permutations + 1.)
self.x2_realizations = x2_realizations
@property
def s(self):
if not hasattr(self, '_s'):
self._s = STEADY_STATE(self.p)
return self._s
@property
def S(self):
if not hasattr(self, '_S'):
S = np.zeros_like(self.p)
for i, p in enumerate(self.P):
S[i] = STEADY_STATE(p)
self._S = np.asarray(S)
return self._S
@property
def F(self):
if not hasattr(self, '_F'):
F = np.zeros_like(self.P)
for i, p in enumerate(self.P):
F[i] = fmpt(np.asmatrix(p))
self._F = np.asarray(F)
return self._F
# bickenbach and bode tests
@property
def ht(self):
if not hasattr(self, '_ht'):
self._ht = homogeneity(self.T)
return self._ht
@property
def Q(self):
if not hasattr(self, '_Q'):
self._Q = self.ht.Q
return self._Q
@property
def Q_p_value(self):
self._Q_p_value = self.ht.Q_p_value
return self._Q_p_value
@property
def LR(self):
self._LR = self.ht.LR
return self._LR
@property
def LR_p_value(self):
self._LR_p_value = self.ht.LR_p_value
return self._LR_p_value
@property
def dof_hom(self):
self._dof_hom = self.ht.dof
return self._dof_hom
# shtests
@property
def shtest(self):
if not hasattr(self, '_shtest'):
self._shtest = self._mn_test()
return self._shtest
@property
def chi2(self):
if not hasattr(self, '_chi2'):
self._chi2 = self._chi2_test()
return self._chi2
@property
def x2(self):
if not hasattr(self, '_x2'):
self._x2 = sum([c[0] for c in self.chi2])
return self._x2
@property
def x2_pvalue(self):
if not hasattr(self, '_x2_pvalue'):
self._x2_pvalue = 1 - stats.chi2.cdf(self.x2, self.x2_dof)
return self._x2_pvalue
@property
def x2_dof(self):
if not hasattr(self, '_x2_dof'):
k = self.k
self._x2_dof = k * (k - 1) * (k - 1)
return self._x2_dof
def _mn_test(self):
"""
helper to calculate tests of differences between steady state
distributions from the conditional and overall distributions.
"""
n0, n1, n2 = self.T.shape
rn = list(range(n0))
mat = [self._ssmnp_test(
self.s, self.S[i], self.T[i].sum()) for i in rn]
return mat
def _ssmnp_test(self, p1, p2, nt):
"""
Steady state multinomial probability difference test.
Arguments
---------
p1 : array
(k, ), first steady state probability distribution.
p1 : array
(k, ), second steady state probability distribution.
nt : int
number of transitions to base the test on.
Returns
-------
tuple
(3 elements)
(chi2 value, pvalue, degrees of freedom)
"""
o = nt * p2
e = nt * p1
d = np.multiply((o - e), (o - e))
d = d / e
chi2 = d.sum()
pvalue = 1 - stats.chi2.cdf(chi2, self.k - 1)
return (chi2, pvalue, self.k - 1)
def _chi2_test(self):
"""
helper to calculate tests of differences between the conditional
transition matrices and the overall transitions matrix.
"""
n0, n1, n2 = self.T.shape
rn = list(range(n0))
mat = [chi2(self.T[i], self.transitions) for i in rn]
return mat
def summary(self, file_name=None):
"""
A summary method to call the Markov homogeneity test to test for
temporally lagged spatial dependence.
To learn more about the properties of the tests, refer to
:cite:`Rey2016a` and :cite:`Kang2018`.
"""
class_names = ["C%d" % i for i in range(self.k)]
regime_names = ["LAG%d" % i for i in range(self.k)]
ht = homogeneity(self.T, class_names=class_names,
regime_names=regime_names)
title = "Spatial Markov Test"
if self.variable_name:
title = title + ": " + self.variable_name
if file_name:
ht.summary(file_name=file_name, title=title)
else:
ht.summary(title=title)
def _maybe_classify(self, y, k, cutoffs):
'''Helper method for classifying continuous data.
'''
rows, cols = y.shape
if cutoffs is None:
if self.fixed:
mcyb = mc.Quantiles(y.flatten(), k=k)
yb = mcyb.yb.reshape(y.shape)
cutoffs = mcyb.bins
k = len(cutoffs)
return yb, cutoffs[:-1], k
else:
yb = np.array([mc.Quantiles(y[:, i], k=k).yb for i in
np.arange(cols)]).transpose()
return yb, None, k
else:
cutoffs = list(cutoffs) + [np.inf]
cutoffs = np.array(cutoffs)
yb = mc.User_Defined(y.flatten(), np.array(cutoffs)).yb.reshape(
y.shape)
k = len(cutoffs)
return yb, cutoffs[:-1], k
|
pysal/giddy | giddy/markov.py | Spatial_Markov.summary | python | def summary(self, file_name=None):
class_names = ["C%d" % i for i in range(self.k)]
regime_names = ["LAG%d" % i for i in range(self.k)]
ht = homogeneity(self.T, class_names=class_names,
regime_names=regime_names)
title = "Spatial Markov Test"
if self.variable_name:
title = title + ": " + self.variable_name
if file_name:
ht.summary(file_name=file_name, title=title)
else:
ht.summary(title=title) | A summary method to call the Markov homogeneity test to test for
temporally lagged spatial dependence.
To learn more about the properties of the tests, refer to
:cite:`Rey2016a` and :cite:`Kang2018`. | train | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/markov.py#L811-L830 | [
"def homogeneity(transition_matrices, regime_names=[], class_names=[],\n title=\"Markov Homogeneity Test\"):\n \"\"\"\n Test for homogeneity of Markov transition probabilities across regimes.\n\n Parameters\n ----------\n transition_matrices : list\n of transition matrices for regimes, all matrices must\n have same size (r, c). r is the number of rows in the\n transition matrix and c is the number of columns in\n the transition matrix.\n regime_names : sequence\n Labels for the regimes.\n class_names : sequence\n Labels for the classes/states of the Markov chain.\n title : string\n name of test.\n\n Returns\n -------\n : implicit\n an instance of Homogeneity_Results.\n \"\"\"\n\n return Homogeneity_Results(transition_matrices, regime_names=regime_names,\n class_names=class_names, title=title)\n",
"def summary(self, file_name=None, title=\"Markov Homogeneity Test\"):\n regime_names = [\"%d\" % i for i in range(self.m)]\n if self.regime_names:\n regime_names = self.regime_names\n cols = [\"P(%s)\" % str(regime) for regime in regime_names]\n if not self.class_names:\n self.class_names = list(range(self.k))\n\n max_col = max([len(col) for col in cols])\n col_width = max([5, max_col]) # probabilities have 5 chars\n n_tabs = self.k\n width = n_tabs * 4 + (self.k + 1) * col_width\n lead = \"-\" * width\n head = title.center(width)\n contents = [lead, head, lead]\n l = \"Number of regimes: %d\" % int(self.m)\n k = \"Number of classes: %d\" % int(self.k)\n r = \"Regime names: \"\n r += \", \".join(regime_names)\n t = \"Number of transitions: %d\" % int(self.t_total)\n contents.append(k)\n contents.append(t)\n contents.append(l)\n contents.append(r)\n contents.append(lead)\n h = \"%7s %20s %20s\" % ('Test', 'LR', 'Chi-2')\n contents.append(h)\n stat = \"%7s %20.3f %20.3f\" % ('Stat.', self.LR, self.Q)\n contents.append(stat)\n stat = \"%7s %20d %20d\" % ('DOF', self.dof, self.dof)\n contents.append(stat)\n stat = \"%7s %20.3f %20.3f\" % ('p-value', self.LR_p_value,\n self.Q_p_value)\n contents.append(stat)\n print((\"\\n\".join(contents)))\n print(lead)\n\n cols = [\"P(%s)\" % str(regime) for regime in self.regime_names]\n if not self.class_names:\n self.class_names = list(range(self.k))\n cols.extend([\"%s\" % str(cname) for cname in self.class_names])\n\n max_col = max([len(col) for col in cols])\n col_width = max([5, max_col]) # probabilities have 5 chars\n p0 = []\n line0 = ['{s: <{w}}'.format(s=\"P(H0)\", w=col_width)]\n line0.extend((['{s: >{w}}'.format(s=cname, w=col_width) for cname in\n self.class_names]))\n print((\" \".join(line0)))\n p0.append(\"&\".join(line0))\n for i, row in enumerate(self.p_h0):\n line = [\"%*s\" % (col_width, str(self.class_names[i]))]\n line.extend([\"%*.3f\" % (col_width, v) for v in row])\n print((\" \".join(line)))\n p0.append(\"&\".join(line))\n pmats = [p0]\n\n print(lead)\n for r, p1 in enumerate(self.p_h1):\n p0 = []\n line0 = ['{s: <{w}}'.format(s=\"P(%s)\" %\n regime_names[r], w=col_width)]\n line0.extend((['{s: >{w}}'.format(s=cname, w=col_width) for cname\n in self.class_names]))\n print((\" \".join(line0)))\n p0.append(\"&\".join(line0))\n for i, row in enumerate(p1):\n line = [\"%*s\" % (col_width, str(self.class_names[i]))]\n line.extend([\"%*.3f\" % (col_width, v) for v in row])\n print((\" \".join(line)))\n p0.append(\"&\".join(line))\n pmats.append(p0)\n print(lead)\n\n if file_name:\n k = self.k\n ks = str(k + 1)\n with open(file_name, 'w') as f:\n c = []\n fmt = \"r\" * (k + 1)\n s = \"\\\\begin{tabular}{|%s|}\\\\hline\\n\" % fmt\n s += \"\\\\multicolumn{%s}{|c|}{%s}\" % (ks, title)\n c.append(s)\n s = \"Number of classes: %d\" % int(self.k)\n c.append(\"\\\\hline\\\\multicolumn{%s}{|l|}{%s}\" % (ks, s))\n s = \"Number of transitions: %d\" % int(self.t_total)\n c.append(\"\\\\multicolumn{%s}{|l|}{%s}\" % (ks, s))\n s = \"Number of regimes: %d\" % int(self.m)\n c.append(\"\\\\multicolumn{%s}{|l|}{%s}\" % (ks, s))\n s = \"Regime names: \"\n s += \", \".join(regime_names)\n c.append(\"\\\\multicolumn{%s}{|l|}{%s}\" % (ks, s))\n s = \"\\\\hline\\\\multicolumn{2}{|l}{%s}\" % (\"Test\")\n s += \"&\\\\multicolumn{2}{r}{LR}&\\\\multicolumn{2}{r|}{Q}\"\n c.append(s)\n s = \"Stat.\"\n s = \"\\\\multicolumn{2}{|l}{%s}\" % (s)\n s += \"&\\\\multicolumn{2}{r}{%.3f}\" % self.LR\n s += \"&\\\\multicolumn{2}{r|}{%.3f}\" % self.Q\n c.append(s)\n s = \"\\\\multicolumn{2}{|l}{%s}\" % (\"DOF\")\n s += \"&\\\\multicolumn{2}{r}{%d}\" % int(self.dof)\n s += \"&\\\\multicolumn{2}{r|}{%d}\" % int(self.dof)\n c.append(s)\n s = \"\\\\multicolumn{2}{|l}{%s}\" % (\"p-value\")\n s += \"&\\\\multicolumn{2}{r}{%.3f}\" % self.LR_p_value\n s += \"&\\\\multicolumn{2}{r|}{%.3f}\" % self.Q_p_value\n c.append(s)\n s1 = \"\\\\\\\\\\n\".join(c)\n s1 += \"\\\\\\\\\\n\"\n c = []\n for mat in pmats:\n c.append(\"\\\\hline\\n\")\n for row in mat:\n c.append(row + \"\\\\\\\\\\n\")\n c.append(\"\\\\hline\\n\")\n c.append(\"\\\\end{tabular}\")\n s2 = \"\".join(c)\n f.write(s1 + s2)\n"
] | class Spatial_Markov(object):
"""
Markov transitions conditioned on the value of the spatial lag.
Parameters
----------
y : array
(n, t), one row per observation, one column per state of
each observation, with as many columns as time periods.
w : W
spatial weights object.
k : integer, optional
number of classes (quantiles) for input time series y.
Default is 4. If discrete=True, k is determined
endogenously.
m : integer, optional
number of classes (quantiles) for the spatial lags of
regional time series. Default is 4. If discrete=True,
m is determined endogenously.
permutations : int, optional
number of permutations for use in randomization based
inference (the default is 0).
fixed : bool, optional
If true, discretization are taken over the entire n*t
pooled series and cutoffs can be user-defined. If
cutoffs and lag_cutoffs are not given, quantiles are
used. If false, quantiles are taken each time period
over n. Default is True.
discrete : bool, optional
If true, categorical spatial lags which are most common
categories of neighboring observations serve as the
conditioning and fixed is ignored; if false, weighted
averages of neighboring observations are used. Default is
false.
cutoffs : array, optional
users can specify the discretization cutoffs for
continuous time series. Default is None, meaning that
quantiles will be used for the discretization.
lag_cutoffs : array, optional
users can specify the discretization cutoffs for the
spatial lags of continuous time series. Default is
None, meaning that quantiles will be used for the
discretization.
variable_name : string
name of variable.
Attributes
----------
class_ids : array
(n, t), discretized series if y is continuous. Otherwise
it is identical to y.
classes : array
(k, 1), all different classes (bins).
lclass_ids : array
(n, t), spatial lag series.
lclasses : array
(k, 1), all different classes (bins) for
spatial lags.
p : array
(k, k), transition probability matrix for a-spatial
Markov.
s : array
(k, 1), ergodic distribution for a-spatial Markov.
transitions : array
(k, k), counts of transitions between each state i and j
for a-spatial Markov.
T : array
(k, k, k), counts of transitions for each conditional
Markov. T[0] is the matrix of transitions for
observations with lags in the 0th quantile; T[k-1] is the
transitions for the observations with lags in the k-1th.
P : array
(k, k, k), transition probability matrix for spatial
Markov first dimension is the conditioned on the lag.
S : array
(k, k), steady state distributions for spatial Markov.
Each row is a conditional steady_state.
F : array
(k, k, k),first mean passage times.
First dimension is conditioned on the lag.
shtest : list
(k elements), each element of the list is a tuple for a
multinomial difference test between the steady state
distribution from a conditional distribution versus the
overall steady state distribution: first element of the
tuple is the chi2 value, second its p-value and the third
the degrees of freedom.
chi2 : list
(k elements), each element of the list is a tuple for a
chi-squared test of the difference between the
conditional transition matrix against the overall
transition matrix: first element of the tuple is the chi2
value, second its p-value and the third the degrees of
freedom.
x2 : float
sum of the chi2 values for each of the conditional tests.
Has an asymptotic chi2 distribution with k(k-1)(k-1)
degrees of freedom. Under the null that transition
probabilities are spatially homogeneous.
(see chi2 above)
x2_dof : int
degrees of freedom for homogeneity test.
x2_pvalue : float
pvalue for homogeneity test based on analytic.
distribution
x2_rpvalue : float
(if permutations>0)
pseudo p-value for x2 based on random spatial
permutations of the rows of the original transitions.
x2_realizations : array
(permutations,1), the values of x2 for the random
permutations.
Q : float
Chi-square test of homogeneity across lag classes based
on :cite:`Bickenbach2003`.
Q_p_value : float
p-value for Q.
LR : float
Likelihood ratio statistic for homogeneity across lag
classes based on :cite:`Bickenbach2003`.
LR_p_value : float
p-value for LR.
dof_hom : int
degrees of freedom for LR and Q, corrected for 0 cells.
Notes
-----
Based on :cite:`Rey2001`.
The shtest and chi2 tests should be used with caution as they are based on
classic theory assuming random transitions. The x2 based test is
preferable since it simulates the randomness under the null. It is an
experimental test requiring further analysis.
Examples
--------
>>> import libpysal
>>> from giddy.markov import Spatial_Markov
>>> import numpy as np
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
>>> pci = pci.transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> w.transform = 'r'
Now we create a `Spatial_Markov` instance for the continuous relative per
capita income time series for 48 US lower states 1929-2009. The current
implementation allows users to classify the continuous incomes in a more
flexible way.
(1) Global quintiles to discretize the income data (k=5), and global
quintiles to discretize the spatial lags of incomes (m=5).
>>> sm = Spatial_Markov(rpci, w, fixed=True, k=5, m=5, variable_name='rpci')
We can examine the cutoffs for the incomes and cutoffs for the spatial lags
>>> sm.cutoffs
array([0.83999133, 0.94707545, 1.03242697, 1.14911154])
>>> sm.lag_cutoffs
array([0.88973386, 0.95891917, 1.01469758, 1.1183566 ])
Obviously, they are slightly different.
We now look at the estimated spatially lag conditioned transition
probability matrices.
>>> for p in sm.P:
... print(p)
[[0.96341463 0.0304878 0.00609756 0. 0. ]
[0.06040268 0.83221477 0.10738255 0. 0. ]
[0. 0.14 0.74 0.12 0. ]
[0. 0.03571429 0.32142857 0.57142857 0.07142857]
[0. 0. 0. 0.16666667 0.83333333]]
[[0.79831933 0.16806723 0.03361345 0. 0. ]
[0.0754717 0.88207547 0.04245283 0. 0. ]
[0.00537634 0.06989247 0.8655914 0.05913978 0. ]
[0. 0. 0.06372549 0.90196078 0.03431373]
[0. 0. 0. 0.19444444 0.80555556]]
[[0.84693878 0.15306122 0. 0. 0. ]
[0.08133971 0.78947368 0.1291866 0. 0. ]
[0.00518135 0.0984456 0.79274611 0.0984456 0.00518135]
[0. 0. 0.09411765 0.87058824 0.03529412]
[0. 0. 0. 0.10204082 0.89795918]]
[[0.8852459 0.09836066 0. 0.01639344 0. ]
[0.03875969 0.81395349 0.13953488 0. 0.00775194]
[0.0049505 0.09405941 0.77722772 0.11881188 0.0049505 ]
[0. 0.02339181 0.12865497 0.75438596 0.09356725]
[0. 0. 0. 0.09661836 0.90338164]]
[[0.33333333 0.66666667 0. 0. 0. ]
[0.0483871 0.77419355 0.16129032 0.01612903 0. ]
[0.01149425 0.16091954 0.74712644 0.08045977 0. ]
[0. 0.01036269 0.06217617 0.89637306 0.03108808]
[0. 0. 0. 0.02352941 0.97647059]]
The probability of a poor state remaining poor is 0.963 if their
neighbors are in the 1st quintile and 0.798 if their neighbors are
in the 2nd quintile. The probability of a rich economy remaining
rich is 0.976 if their neighbors are in the 5th quintile, but if their
neighbors are in the 4th quintile this drops to 0.903.
The global transition probability matrix is estimated:
>>> print(sm.p)
[[0.91461837 0.07503234 0.00905563 0.00129366 0. ]
[0.06570302 0.82654402 0.10512484 0.00131406 0.00131406]
[0.00520833 0.10286458 0.79427083 0.09505208 0.00260417]
[0. 0.00913838 0.09399478 0.84856397 0.04830287]
[0. 0. 0. 0.06217617 0.93782383]]
The Q and likelihood ratio statistics are both significant indicating
the dynamics are not homogeneous across the lag classes:
>>> "%.3f"%sm.LR
'170.659'
>>> "%.3f"%sm.Q
'200.624'
>>> "%.3f"%sm.LR_p_value
'0.000'
>>> "%.3f"%sm.Q_p_value
'0.000'
>>> sm.dof_hom
60
The long run distribution for states with poor (rich) neighbors has
0.435 (0.018) of the values in the first quintile, 0.263 (0.200) in
the second quintile, 0.204 (0.190) in the third, 0.0684 (0.255) in the
fourth and 0.029 (0.337) in the fifth quintile.
>>> sm.S
array([[0.43509425, 0.2635327 , 0.20363044, 0.06841983, 0.02932278],
[0.13391287, 0.33993305, 0.25153036, 0.23343016, 0.04119356],
[0.12124869, 0.21137444, 0.2635101 , 0.29013417, 0.1137326 ],
[0.0776413 , 0.19748806, 0.25352636, 0.22480415, 0.24654013],
[0.01776781, 0.19964349, 0.19009833, 0.25524697, 0.3372434 ]])
States with incomes in the first quintile with neighbors in the
first quintile return to the first quartile after 2.298 years, after
leaving the first quintile. They enter the fourth quintile after
80.810 years after leaving the first quintile, on average.
Poor states within neighbors in the fourth quintile return to the
first quintile, on average, after 12.88 years, and would enter the
fourth quintile after 28.473 years.
>>> for f in sm.F:
... print(f)
...
[[ 2.29835259 28.95614035 46.14285714 80.80952381 279.42857143]
[ 33.86549708 3.79459555 22.57142857 57.23809524 255.85714286]
[ 43.60233918 9.73684211 4.91085714 34.66666667 233.28571429]
[ 46.62865497 12.76315789 6.25714286 14.61564626 198.61904762]
[ 52.62865497 18.76315789 12.25714286 6. 34.1031746 ]]
[[ 7.46754205 9.70574606 25.76785714 74.53116883 194.23446197]
[ 27.76691978 2.94175577 24.97142857 73.73474026 193.4380334 ]
[ 53.57477715 28.48447637 3.97566318 48.76331169 168.46660482]
[ 72.03631562 46.94601483 18.46153846 4.28393653 119.70329314]
[ 77.17917276 52.08887197 23.6043956 5.14285714 24.27564033]]
[[ 8.24751154 6.53333333 18.38765432 40.70864198 112.76732026]
[ 47.35040872 4.73094099 11.85432099 34.17530864 106.23398693]
[ 69.42288828 24.76666667 3.794921 22.32098765 94.37966594]
[ 83.72288828 39.06666667 14.3 3.44668119 76.36702977]
[ 93.52288828 48.86666667 24.1 9.8 8.79255406]]
[[ 12.87974382 13.34847151 19.83446328 28.47257282 55.82395142]
[ 99.46114206 5.06359731 10.54545198 23.05133495 49.68944423]
[117.76777159 23.03735526 3.94436301 15.0843986 43.57927247]
[127.89752089 32.4393006 14.56853107 4.44831643 31.63099455]
[138.24752089 42.7893006 24.91853107 10.35 4.05613474]]
[[ 56.2815534 1.5 10.57236842 27.02173913 110.54347826]
[ 82.9223301 5.00892857 9.07236842 25.52173913 109.04347826]
[ 97.17718447 19.53125 5.26043557 21.42391304 104.94565217]
[127.1407767 48.74107143 33.29605263 3.91777427 83.52173913]
[169.6407767 91.24107143 75.79605263 42.5 2.96521739]]
(2) Global quintiles to discretize the income data (k=5), and global
quartiles to discretize the spatial lags of incomes (m=4).
>>> sm = Spatial_Markov(rpci, w, fixed=True, k=5, m=4, variable_name='rpci')
We can also examine the cutoffs for the incomes and cutoffs for the spatial
lags:
>>> sm.cutoffs
array([0.83999133, 0.94707545, 1.03242697, 1.14911154])
>>> sm.lag_cutoffs
array([0.91440247, 0.98583079, 1.08698351])
We now look at the estimated spatially lag conditioned transition
probability matrices.
>>> for p in sm.P:
... print(p)
[[0.95708955 0.03544776 0.00746269 0. 0. ]
[0.05825243 0.83980583 0.10194175 0. 0. ]
[0. 0.1294964 0.76258993 0.10791367 0. ]
[0. 0.01538462 0.18461538 0.72307692 0.07692308]
[0. 0. 0. 0.14285714 0.85714286]]
[[0.7421875 0.234375 0.0234375 0. 0. ]
[0.08550186 0.85130112 0.06319703 0. 0. ]
[0.00865801 0.06926407 0.86147186 0.05627706 0.004329 ]
[0. 0. 0.05363985 0.92337165 0.02298851]
[0. 0. 0. 0.13432836 0.86567164]]
[[0.95145631 0.04854369 0. 0. 0. ]
[0.06 0.79 0.145 0. 0.005 ]
[0.00358423 0.10394265 0.7921147 0.09677419 0.00358423]
[0. 0.01630435 0.13586957 0.75543478 0.0923913 ]
[0. 0. 0. 0.10204082 0.89795918]]
[[0.16666667 0.66666667 0. 0.16666667 0. ]
[0.03488372 0.80232558 0.15116279 0.01162791 0. ]
[0.00840336 0.13445378 0.70588235 0.1512605 0. ]
[0. 0.01171875 0.08203125 0.87109375 0.03515625]
[0. 0. 0. 0.03434343 0.96565657]]
We now obtain 4 5*5 spatial lag conditioned transition probability
matrices instead of 5 as in case (1).
The Q and likelihood ratio statistics are still both significant.
>>> "%.3f"%sm.LR
'172.105'
>>> "%.3f"%sm.Q
'321.128'
>>> "%.3f"%sm.LR_p_value
'0.000'
>>> "%.3f"%sm.Q_p_value
'0.000'
>>> sm.dof_hom
45
(3) We can also set the cutoffs for relative incomes and their
spatial lags manually.
For example, we want the defining cutoffs to be [0.8, 0.9, 1, 1.2],
meaning that relative incomes:
2.1 smaller than 0.8 : class 0
2.2 between 0.8 and 0.9: class 1
2.3 between 0.9 and 1.0 : class 2
2.4 between 1.0 and 1.2: class 3
2.5 larger than 1.2: class 4
>>> cc = np.array([0.8, 0.9, 1, 1.2])
>>> sm = Spatial_Markov(rpci, w, cutoffs=cc, lag_cutoffs=cc, variable_name='rpci')
>>> sm.cutoffs
array([0.8, 0.9, 1. , 1.2])
>>> sm.k
5
>>> sm.lag_cutoffs
array([0.8, 0.9, 1. , 1.2])
>>> sm.m
5
>>> for p in sm.P:
... print(p)
[[0.96703297 0.03296703 0. 0. 0. ]
[0.10638298 0.68085106 0.21276596 0. 0. ]
[0. 0.14285714 0.7755102 0.08163265 0. ]
[0. 0. 0.5 0.5 0. ]
[0. 0. 0. 0. 0. ]]
[[0.88636364 0.10606061 0.00757576 0. 0. ]
[0.04402516 0.89308176 0.06289308 0. 0. ]
[0. 0.05882353 0.8627451 0.07843137 0. ]
[0. 0. 0.13846154 0.86153846 0. ]
[0. 0. 0. 0. 1. ]]
[[0.78082192 0.17808219 0.02739726 0.01369863 0. ]
[0.03488372 0.90406977 0.05813953 0.00290698 0. ]
[0. 0.05919003 0.84735202 0.09034268 0.00311526]
[0. 0. 0.05811623 0.92985972 0.01202405]
[0. 0. 0. 0.14285714 0.85714286]]
[[0.82692308 0.15384615 0. 0.01923077 0. ]
[0.0703125 0.7890625 0.125 0.015625 0. ]
[0.00295858 0.06213018 0.82248521 0.10946746 0.00295858]
[0. 0.00185529 0.07606679 0.88497217 0.03710575]
[0. 0. 0. 0.07803468 0.92196532]]
[[0. 0. 0. 0. 0. ]
[0. 0. 0. 0. 0. ]
[0. 0.06666667 0.9 0.03333333 0. ]
[0. 0. 0.05660377 0.90566038 0.03773585]
[0. 0. 0. 0.03932584 0.96067416]]
(4) Spatial_Markov also accept discrete time series and calculate
categorical spatial lags on which several transition probability matrices
are conditioned.
Let's still use the US state income time series to demonstrate. We first
discretize them into categories and then pass them to Spatial_Markov.
>>> import mapclassify as mc
>>> y = mc.Quantiles(rpci.flatten(), k=5).yb.reshape(rpci.shape)
>>> np.random.seed(5)
>>> sm = Spatial_Markov(y, w, discrete=True, variable_name='discretized rpci')
>>> sm.k
5
>>> sm.m
5
>>> for p in sm.P:
... print(p)
[[0.94787645 0.04440154 0.00772201 0. 0. ]
[0.08333333 0.81060606 0.10606061 0. 0. ]
[0. 0.12765957 0.79787234 0.07446809 0. ]
[0. 0.02777778 0.22222222 0.66666667 0.08333333]
[0. 0. 0. 0.33333333 0.66666667]]
[[0.888 0.096 0.016 0. 0. ]
[0.06049822 0.84341637 0.09608541 0. 0. ]
[0.00666667 0.10666667 0.81333333 0.07333333 0. ]
[0. 0. 0.08527132 0.86821705 0.04651163]
[0. 0. 0. 0.10204082 0.89795918]]
[[0.65217391 0.32608696 0.02173913 0. 0. ]
[0.07446809 0.80851064 0.11170213 0. 0.00531915]
[0.01071429 0.1 0.76428571 0.11785714 0.00714286]
[0. 0.00552486 0.09392265 0.86187845 0.03867403]
[0. 0. 0. 0.13157895 0.86842105]]
[[0.91935484 0.06451613 0. 0.01612903 0. ]
[0.06796117 0.90291262 0.02912621 0. 0. ]
[0. 0.05755396 0.87769784 0.0647482 0. ]
[0. 0.02150538 0.10752688 0.80107527 0.06989247]
[0. 0. 0. 0.08064516 0.91935484]]
[[0.81818182 0.18181818 0. 0. 0. ]
[0.01754386 0.70175439 0.26315789 0.01754386 0. ]
[0. 0.14285714 0.73333333 0.12380952 0. ]
[0. 0.0042735 0.06837607 0.89316239 0.03418803]
[0. 0. 0. 0.03891051 0.96108949]]
"""
def __init__(self, y, w, k=4, m=4, permutations=0, fixed=True,
discrete=False, cutoffs=None, lag_cutoffs=None,
variable_name=None):
y = np.asarray(y)
self.fixed = fixed
self.discrete = discrete
self.cutoffs = cutoffs
self.m = m
self.lag_cutoffs = lag_cutoffs
self.variable_name = variable_name
if discrete:
merged = list(itertools.chain.from_iterable(y))
classes = np.unique(merged)
self.classes = classes
self.k = len(classes)
self.m = self.k
label_dict = dict(zip(classes, range(self.k)))
y_int = []
for yi in y:
y_int.append(list(map(label_dict.get, yi)))
self.class_ids = np.array(y_int)
self.lclass_ids = self.class_ids
else:
self.class_ids, self.cutoffs, self.k = self._maybe_classify(
y, k=k, cutoffs=self.cutoffs)
self.classes = np.arange(self.k)
classic = Markov(self.class_ids)
self.p = classic.p
self.transitions = classic.transitions
self.T, self.P = self._calc(y, w)
if permutations:
nrp = np.random.permutation
counter = 0
x2_realizations = np.zeros((permutations, 1))
for perm in range(permutations):
T, P = self._calc(nrp(y), w)
x2 = [chi2(T[i], self.transitions)[0] for i in range(self.k)]
x2s = sum(x2)
x2_realizations[perm] = x2s
if x2s >= self.x2:
counter += 1
self.x2_rpvalue = (counter + 1.0) / (permutations + 1.)
self.x2_realizations = x2_realizations
@property
def s(self):
if not hasattr(self, '_s'):
self._s = STEADY_STATE(self.p)
return self._s
@property
def S(self):
if not hasattr(self, '_S'):
S = np.zeros_like(self.p)
for i, p in enumerate(self.P):
S[i] = STEADY_STATE(p)
self._S = np.asarray(S)
return self._S
@property
def F(self):
if not hasattr(self, '_F'):
F = np.zeros_like(self.P)
for i, p in enumerate(self.P):
F[i] = fmpt(np.asmatrix(p))
self._F = np.asarray(F)
return self._F
# bickenbach and bode tests
@property
def ht(self):
if not hasattr(self, '_ht'):
self._ht = homogeneity(self.T)
return self._ht
@property
def Q(self):
if not hasattr(self, '_Q'):
self._Q = self.ht.Q
return self._Q
@property
def Q_p_value(self):
self._Q_p_value = self.ht.Q_p_value
return self._Q_p_value
@property
def LR(self):
self._LR = self.ht.LR
return self._LR
@property
def LR_p_value(self):
self._LR_p_value = self.ht.LR_p_value
return self._LR_p_value
@property
def dof_hom(self):
self._dof_hom = self.ht.dof
return self._dof_hom
# shtests
@property
def shtest(self):
if not hasattr(self, '_shtest'):
self._shtest = self._mn_test()
return self._shtest
@property
def chi2(self):
if not hasattr(self, '_chi2'):
self._chi2 = self._chi2_test()
return self._chi2
@property
def x2(self):
if not hasattr(self, '_x2'):
self._x2 = sum([c[0] for c in self.chi2])
return self._x2
@property
def x2_pvalue(self):
if not hasattr(self, '_x2_pvalue'):
self._x2_pvalue = 1 - stats.chi2.cdf(self.x2, self.x2_dof)
return self._x2_pvalue
@property
def x2_dof(self):
if not hasattr(self, '_x2_dof'):
k = self.k
self._x2_dof = k * (k - 1) * (k - 1)
return self._x2_dof
def _calc(self, y, w):
'''Helper to estimate spatial lag conditioned Markov transition
probability matrices based on maximum likelihood techniques.
'''
if self.discrete:
self.lclass_ids = weights.lag_categorical(w, self.class_ids,
ties="tryself")
else:
ly = weights.lag_spatial(w, y)
self.lclass_ids, self.lag_cutoffs, self.m = self._maybe_classify(
ly, self.m, self.lag_cutoffs)
self.lclasses = np.arange(self.m)
T = np.zeros((self.m, self.k, self.k))
n, t = y.shape
for t1 in range(t - 1):
t2 = t1 + 1
for i in range(n):
T[self.lclass_ids[i, t1], self.class_ids[i, t1],
self.class_ids[i, t2]] += 1
P = np.zeros_like(T)
for i, mat in enumerate(T):
row_sum = mat.sum(axis=1)
row_sum = row_sum + (row_sum == 0)
p_i = np.matrix(np.diag(1. / row_sum) * np.matrix(mat))
P[i] = p_i
return T, P
def _mn_test(self):
"""
helper to calculate tests of differences between steady state
distributions from the conditional and overall distributions.
"""
n0, n1, n2 = self.T.shape
rn = list(range(n0))
mat = [self._ssmnp_test(
self.s, self.S[i], self.T[i].sum()) for i in rn]
return mat
def _ssmnp_test(self, p1, p2, nt):
"""
Steady state multinomial probability difference test.
Arguments
---------
p1 : array
(k, ), first steady state probability distribution.
p1 : array
(k, ), second steady state probability distribution.
nt : int
number of transitions to base the test on.
Returns
-------
tuple
(3 elements)
(chi2 value, pvalue, degrees of freedom)
"""
o = nt * p2
e = nt * p1
d = np.multiply((o - e), (o - e))
d = d / e
chi2 = d.sum()
pvalue = 1 - stats.chi2.cdf(chi2, self.k - 1)
return (chi2, pvalue, self.k - 1)
def _chi2_test(self):
"""
helper to calculate tests of differences between the conditional
transition matrices and the overall transitions matrix.
"""
n0, n1, n2 = self.T.shape
rn = list(range(n0))
mat = [chi2(self.T[i], self.transitions) for i in rn]
return mat
def _maybe_classify(self, y, k, cutoffs):
'''Helper method for classifying continuous data.
'''
rows, cols = y.shape
if cutoffs is None:
if self.fixed:
mcyb = mc.Quantiles(y.flatten(), k=k)
yb = mcyb.yb.reshape(y.shape)
cutoffs = mcyb.bins
k = len(cutoffs)
return yb, cutoffs[:-1], k
else:
yb = np.array([mc.Quantiles(y[:, i], k=k).yb for i in
np.arange(cols)]).transpose()
return yb, None, k
else:
cutoffs = list(cutoffs) + [np.inf]
cutoffs = np.array(cutoffs)
yb = mc.User_Defined(y.flatten(), np.array(cutoffs)).yb.reshape(
y.shape)
k = len(cutoffs)
return yb, cutoffs[:-1], k
|
pysal/giddy | giddy/markov.py | Spatial_Markov._maybe_classify | python | def _maybe_classify(self, y, k, cutoffs):
'''Helper method for classifying continuous data.
'''
rows, cols = y.shape
if cutoffs is None:
if self.fixed:
mcyb = mc.Quantiles(y.flatten(), k=k)
yb = mcyb.yb.reshape(y.shape)
cutoffs = mcyb.bins
k = len(cutoffs)
return yb, cutoffs[:-1], k
else:
yb = np.array([mc.Quantiles(y[:, i], k=k).yb for i in
np.arange(cols)]).transpose()
return yb, None, k
else:
cutoffs = list(cutoffs) + [np.inf]
cutoffs = np.array(cutoffs)
yb = mc.User_Defined(y.flatten(), np.array(cutoffs)).yb.reshape(
y.shape)
k = len(cutoffs)
return yb, cutoffs[:-1], k | Helper method for classifying continuous data. | train | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/markov.py#L832-L855 | null | class Spatial_Markov(object):
"""
Markov transitions conditioned on the value of the spatial lag.
Parameters
----------
y : array
(n, t), one row per observation, one column per state of
each observation, with as many columns as time periods.
w : W
spatial weights object.
k : integer, optional
number of classes (quantiles) for input time series y.
Default is 4. If discrete=True, k is determined
endogenously.
m : integer, optional
number of classes (quantiles) for the spatial lags of
regional time series. Default is 4. If discrete=True,
m is determined endogenously.
permutations : int, optional
number of permutations for use in randomization based
inference (the default is 0).
fixed : bool, optional
If true, discretization are taken over the entire n*t
pooled series and cutoffs can be user-defined. If
cutoffs and lag_cutoffs are not given, quantiles are
used. If false, quantiles are taken each time period
over n. Default is True.
discrete : bool, optional
If true, categorical spatial lags which are most common
categories of neighboring observations serve as the
conditioning and fixed is ignored; if false, weighted
averages of neighboring observations are used. Default is
false.
cutoffs : array, optional
users can specify the discretization cutoffs for
continuous time series. Default is None, meaning that
quantiles will be used for the discretization.
lag_cutoffs : array, optional
users can specify the discretization cutoffs for the
spatial lags of continuous time series. Default is
None, meaning that quantiles will be used for the
discretization.
variable_name : string
name of variable.
Attributes
----------
class_ids : array
(n, t), discretized series if y is continuous. Otherwise
it is identical to y.
classes : array
(k, 1), all different classes (bins).
lclass_ids : array
(n, t), spatial lag series.
lclasses : array
(k, 1), all different classes (bins) for
spatial lags.
p : array
(k, k), transition probability matrix for a-spatial
Markov.
s : array
(k, 1), ergodic distribution for a-spatial Markov.
transitions : array
(k, k), counts of transitions between each state i and j
for a-spatial Markov.
T : array
(k, k, k), counts of transitions for each conditional
Markov. T[0] is the matrix of transitions for
observations with lags in the 0th quantile; T[k-1] is the
transitions for the observations with lags in the k-1th.
P : array
(k, k, k), transition probability matrix for spatial
Markov first dimension is the conditioned on the lag.
S : array
(k, k), steady state distributions for spatial Markov.
Each row is a conditional steady_state.
F : array
(k, k, k),first mean passage times.
First dimension is conditioned on the lag.
shtest : list
(k elements), each element of the list is a tuple for a
multinomial difference test between the steady state
distribution from a conditional distribution versus the
overall steady state distribution: first element of the
tuple is the chi2 value, second its p-value and the third
the degrees of freedom.
chi2 : list
(k elements), each element of the list is a tuple for a
chi-squared test of the difference between the
conditional transition matrix against the overall
transition matrix: first element of the tuple is the chi2
value, second its p-value and the third the degrees of
freedom.
x2 : float
sum of the chi2 values for each of the conditional tests.
Has an asymptotic chi2 distribution with k(k-1)(k-1)
degrees of freedom. Under the null that transition
probabilities are spatially homogeneous.
(see chi2 above)
x2_dof : int
degrees of freedom for homogeneity test.
x2_pvalue : float
pvalue for homogeneity test based on analytic.
distribution
x2_rpvalue : float
(if permutations>0)
pseudo p-value for x2 based on random spatial
permutations of the rows of the original transitions.
x2_realizations : array
(permutations,1), the values of x2 for the random
permutations.
Q : float
Chi-square test of homogeneity across lag classes based
on :cite:`Bickenbach2003`.
Q_p_value : float
p-value for Q.
LR : float
Likelihood ratio statistic for homogeneity across lag
classes based on :cite:`Bickenbach2003`.
LR_p_value : float
p-value for LR.
dof_hom : int
degrees of freedom for LR and Q, corrected for 0 cells.
Notes
-----
Based on :cite:`Rey2001`.
The shtest and chi2 tests should be used with caution as they are based on
classic theory assuming random transitions. The x2 based test is
preferable since it simulates the randomness under the null. It is an
experimental test requiring further analysis.
Examples
--------
>>> import libpysal
>>> from giddy.markov import Spatial_Markov
>>> import numpy as np
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
>>> pci = pci.transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> w.transform = 'r'
Now we create a `Spatial_Markov` instance for the continuous relative per
capita income time series for 48 US lower states 1929-2009. The current
implementation allows users to classify the continuous incomes in a more
flexible way.
(1) Global quintiles to discretize the income data (k=5), and global
quintiles to discretize the spatial lags of incomes (m=5).
>>> sm = Spatial_Markov(rpci, w, fixed=True, k=5, m=5, variable_name='rpci')
We can examine the cutoffs for the incomes and cutoffs for the spatial lags
>>> sm.cutoffs
array([0.83999133, 0.94707545, 1.03242697, 1.14911154])
>>> sm.lag_cutoffs
array([0.88973386, 0.95891917, 1.01469758, 1.1183566 ])
Obviously, they are slightly different.
We now look at the estimated spatially lag conditioned transition
probability matrices.
>>> for p in sm.P:
... print(p)
[[0.96341463 0.0304878 0.00609756 0. 0. ]
[0.06040268 0.83221477 0.10738255 0. 0. ]
[0. 0.14 0.74 0.12 0. ]
[0. 0.03571429 0.32142857 0.57142857 0.07142857]
[0. 0. 0. 0.16666667 0.83333333]]
[[0.79831933 0.16806723 0.03361345 0. 0. ]
[0.0754717 0.88207547 0.04245283 0. 0. ]
[0.00537634 0.06989247 0.8655914 0.05913978 0. ]
[0. 0. 0.06372549 0.90196078 0.03431373]
[0. 0. 0. 0.19444444 0.80555556]]
[[0.84693878 0.15306122 0. 0. 0. ]
[0.08133971 0.78947368 0.1291866 0. 0. ]
[0.00518135 0.0984456 0.79274611 0.0984456 0.00518135]
[0. 0. 0.09411765 0.87058824 0.03529412]
[0. 0. 0. 0.10204082 0.89795918]]
[[0.8852459 0.09836066 0. 0.01639344 0. ]
[0.03875969 0.81395349 0.13953488 0. 0.00775194]
[0.0049505 0.09405941 0.77722772 0.11881188 0.0049505 ]
[0. 0.02339181 0.12865497 0.75438596 0.09356725]
[0. 0. 0. 0.09661836 0.90338164]]
[[0.33333333 0.66666667 0. 0. 0. ]
[0.0483871 0.77419355 0.16129032 0.01612903 0. ]
[0.01149425 0.16091954 0.74712644 0.08045977 0. ]
[0. 0.01036269 0.06217617 0.89637306 0.03108808]
[0. 0. 0. 0.02352941 0.97647059]]
The probability of a poor state remaining poor is 0.963 if their
neighbors are in the 1st quintile and 0.798 if their neighbors are
in the 2nd quintile. The probability of a rich economy remaining
rich is 0.976 if their neighbors are in the 5th quintile, but if their
neighbors are in the 4th quintile this drops to 0.903.
The global transition probability matrix is estimated:
>>> print(sm.p)
[[0.91461837 0.07503234 0.00905563 0.00129366 0. ]
[0.06570302 0.82654402 0.10512484 0.00131406 0.00131406]
[0.00520833 0.10286458 0.79427083 0.09505208 0.00260417]
[0. 0.00913838 0.09399478 0.84856397 0.04830287]
[0. 0. 0. 0.06217617 0.93782383]]
The Q and likelihood ratio statistics are both significant indicating
the dynamics are not homogeneous across the lag classes:
>>> "%.3f"%sm.LR
'170.659'
>>> "%.3f"%sm.Q
'200.624'
>>> "%.3f"%sm.LR_p_value
'0.000'
>>> "%.3f"%sm.Q_p_value
'0.000'
>>> sm.dof_hom
60
The long run distribution for states with poor (rich) neighbors has
0.435 (0.018) of the values in the first quintile, 0.263 (0.200) in
the second quintile, 0.204 (0.190) in the third, 0.0684 (0.255) in the
fourth and 0.029 (0.337) in the fifth quintile.
>>> sm.S
array([[0.43509425, 0.2635327 , 0.20363044, 0.06841983, 0.02932278],
[0.13391287, 0.33993305, 0.25153036, 0.23343016, 0.04119356],
[0.12124869, 0.21137444, 0.2635101 , 0.29013417, 0.1137326 ],
[0.0776413 , 0.19748806, 0.25352636, 0.22480415, 0.24654013],
[0.01776781, 0.19964349, 0.19009833, 0.25524697, 0.3372434 ]])
States with incomes in the first quintile with neighbors in the
first quintile return to the first quartile after 2.298 years, after
leaving the first quintile. They enter the fourth quintile after
80.810 years after leaving the first quintile, on average.
Poor states within neighbors in the fourth quintile return to the
first quintile, on average, after 12.88 years, and would enter the
fourth quintile after 28.473 years.
>>> for f in sm.F:
... print(f)
...
[[ 2.29835259 28.95614035 46.14285714 80.80952381 279.42857143]
[ 33.86549708 3.79459555 22.57142857 57.23809524 255.85714286]
[ 43.60233918 9.73684211 4.91085714 34.66666667 233.28571429]
[ 46.62865497 12.76315789 6.25714286 14.61564626 198.61904762]
[ 52.62865497 18.76315789 12.25714286 6. 34.1031746 ]]
[[ 7.46754205 9.70574606 25.76785714 74.53116883 194.23446197]
[ 27.76691978 2.94175577 24.97142857 73.73474026 193.4380334 ]
[ 53.57477715 28.48447637 3.97566318 48.76331169 168.46660482]
[ 72.03631562 46.94601483 18.46153846 4.28393653 119.70329314]
[ 77.17917276 52.08887197 23.6043956 5.14285714 24.27564033]]
[[ 8.24751154 6.53333333 18.38765432 40.70864198 112.76732026]
[ 47.35040872 4.73094099 11.85432099 34.17530864 106.23398693]
[ 69.42288828 24.76666667 3.794921 22.32098765 94.37966594]
[ 83.72288828 39.06666667 14.3 3.44668119 76.36702977]
[ 93.52288828 48.86666667 24.1 9.8 8.79255406]]
[[ 12.87974382 13.34847151 19.83446328 28.47257282 55.82395142]
[ 99.46114206 5.06359731 10.54545198 23.05133495 49.68944423]
[117.76777159 23.03735526 3.94436301 15.0843986 43.57927247]
[127.89752089 32.4393006 14.56853107 4.44831643 31.63099455]
[138.24752089 42.7893006 24.91853107 10.35 4.05613474]]
[[ 56.2815534 1.5 10.57236842 27.02173913 110.54347826]
[ 82.9223301 5.00892857 9.07236842 25.52173913 109.04347826]
[ 97.17718447 19.53125 5.26043557 21.42391304 104.94565217]
[127.1407767 48.74107143 33.29605263 3.91777427 83.52173913]
[169.6407767 91.24107143 75.79605263 42.5 2.96521739]]
(2) Global quintiles to discretize the income data (k=5), and global
quartiles to discretize the spatial lags of incomes (m=4).
>>> sm = Spatial_Markov(rpci, w, fixed=True, k=5, m=4, variable_name='rpci')
We can also examine the cutoffs for the incomes and cutoffs for the spatial
lags:
>>> sm.cutoffs
array([0.83999133, 0.94707545, 1.03242697, 1.14911154])
>>> sm.lag_cutoffs
array([0.91440247, 0.98583079, 1.08698351])
We now look at the estimated spatially lag conditioned transition
probability matrices.
>>> for p in sm.P:
... print(p)
[[0.95708955 0.03544776 0.00746269 0. 0. ]
[0.05825243 0.83980583 0.10194175 0. 0. ]
[0. 0.1294964 0.76258993 0.10791367 0. ]
[0. 0.01538462 0.18461538 0.72307692 0.07692308]
[0. 0. 0. 0.14285714 0.85714286]]
[[0.7421875 0.234375 0.0234375 0. 0. ]
[0.08550186 0.85130112 0.06319703 0. 0. ]
[0.00865801 0.06926407 0.86147186 0.05627706 0.004329 ]
[0. 0. 0.05363985 0.92337165 0.02298851]
[0. 0. 0. 0.13432836 0.86567164]]
[[0.95145631 0.04854369 0. 0. 0. ]
[0.06 0.79 0.145 0. 0.005 ]
[0.00358423 0.10394265 0.7921147 0.09677419 0.00358423]
[0. 0.01630435 0.13586957 0.75543478 0.0923913 ]
[0. 0. 0. 0.10204082 0.89795918]]
[[0.16666667 0.66666667 0. 0.16666667 0. ]
[0.03488372 0.80232558 0.15116279 0.01162791 0. ]
[0.00840336 0.13445378 0.70588235 0.1512605 0. ]
[0. 0.01171875 0.08203125 0.87109375 0.03515625]
[0. 0. 0. 0.03434343 0.96565657]]
We now obtain 4 5*5 spatial lag conditioned transition probability
matrices instead of 5 as in case (1).
The Q and likelihood ratio statistics are still both significant.
>>> "%.3f"%sm.LR
'172.105'
>>> "%.3f"%sm.Q
'321.128'
>>> "%.3f"%sm.LR_p_value
'0.000'
>>> "%.3f"%sm.Q_p_value
'0.000'
>>> sm.dof_hom
45
(3) We can also set the cutoffs for relative incomes and their
spatial lags manually.
For example, we want the defining cutoffs to be [0.8, 0.9, 1, 1.2],
meaning that relative incomes:
2.1 smaller than 0.8 : class 0
2.2 between 0.8 and 0.9: class 1
2.3 between 0.9 and 1.0 : class 2
2.4 between 1.0 and 1.2: class 3
2.5 larger than 1.2: class 4
>>> cc = np.array([0.8, 0.9, 1, 1.2])
>>> sm = Spatial_Markov(rpci, w, cutoffs=cc, lag_cutoffs=cc, variable_name='rpci')
>>> sm.cutoffs
array([0.8, 0.9, 1. , 1.2])
>>> sm.k
5
>>> sm.lag_cutoffs
array([0.8, 0.9, 1. , 1.2])
>>> sm.m
5
>>> for p in sm.P:
... print(p)
[[0.96703297 0.03296703 0. 0. 0. ]
[0.10638298 0.68085106 0.21276596 0. 0. ]
[0. 0.14285714 0.7755102 0.08163265 0. ]
[0. 0. 0.5 0.5 0. ]
[0. 0. 0. 0. 0. ]]
[[0.88636364 0.10606061 0.00757576 0. 0. ]
[0.04402516 0.89308176 0.06289308 0. 0. ]
[0. 0.05882353 0.8627451 0.07843137 0. ]
[0. 0. 0.13846154 0.86153846 0. ]
[0. 0. 0. 0. 1. ]]
[[0.78082192 0.17808219 0.02739726 0.01369863 0. ]
[0.03488372 0.90406977 0.05813953 0.00290698 0. ]
[0. 0.05919003 0.84735202 0.09034268 0.00311526]
[0. 0. 0.05811623 0.92985972 0.01202405]
[0. 0. 0. 0.14285714 0.85714286]]
[[0.82692308 0.15384615 0. 0.01923077 0. ]
[0.0703125 0.7890625 0.125 0.015625 0. ]
[0.00295858 0.06213018 0.82248521 0.10946746 0.00295858]
[0. 0.00185529 0.07606679 0.88497217 0.03710575]
[0. 0. 0. 0.07803468 0.92196532]]
[[0. 0. 0. 0. 0. ]
[0. 0. 0. 0. 0. ]
[0. 0.06666667 0.9 0.03333333 0. ]
[0. 0. 0.05660377 0.90566038 0.03773585]
[0. 0. 0. 0.03932584 0.96067416]]
(4) Spatial_Markov also accept discrete time series and calculate
categorical spatial lags on which several transition probability matrices
are conditioned.
Let's still use the US state income time series to demonstrate. We first
discretize them into categories and then pass them to Spatial_Markov.
>>> import mapclassify as mc
>>> y = mc.Quantiles(rpci.flatten(), k=5).yb.reshape(rpci.shape)
>>> np.random.seed(5)
>>> sm = Spatial_Markov(y, w, discrete=True, variable_name='discretized rpci')
>>> sm.k
5
>>> sm.m
5
>>> for p in sm.P:
... print(p)
[[0.94787645 0.04440154 0.00772201 0. 0. ]
[0.08333333 0.81060606 0.10606061 0. 0. ]
[0. 0.12765957 0.79787234 0.07446809 0. ]
[0. 0.02777778 0.22222222 0.66666667 0.08333333]
[0. 0. 0. 0.33333333 0.66666667]]
[[0.888 0.096 0.016 0. 0. ]
[0.06049822 0.84341637 0.09608541 0. 0. ]
[0.00666667 0.10666667 0.81333333 0.07333333 0. ]
[0. 0. 0.08527132 0.86821705 0.04651163]
[0. 0. 0. 0.10204082 0.89795918]]
[[0.65217391 0.32608696 0.02173913 0. 0. ]
[0.07446809 0.80851064 0.11170213 0. 0.00531915]
[0.01071429 0.1 0.76428571 0.11785714 0.00714286]
[0. 0.00552486 0.09392265 0.86187845 0.03867403]
[0. 0. 0. 0.13157895 0.86842105]]
[[0.91935484 0.06451613 0. 0.01612903 0. ]
[0.06796117 0.90291262 0.02912621 0. 0. ]
[0. 0.05755396 0.87769784 0.0647482 0. ]
[0. 0.02150538 0.10752688 0.80107527 0.06989247]
[0. 0. 0. 0.08064516 0.91935484]]
[[0.81818182 0.18181818 0. 0. 0. ]
[0.01754386 0.70175439 0.26315789 0.01754386 0. ]
[0. 0.14285714 0.73333333 0.12380952 0. ]
[0. 0.0042735 0.06837607 0.89316239 0.03418803]
[0. 0. 0. 0.03891051 0.96108949]]
"""
def __init__(self, y, w, k=4, m=4, permutations=0, fixed=True,
discrete=False, cutoffs=None, lag_cutoffs=None,
variable_name=None):
y = np.asarray(y)
self.fixed = fixed
self.discrete = discrete
self.cutoffs = cutoffs
self.m = m
self.lag_cutoffs = lag_cutoffs
self.variable_name = variable_name
if discrete:
merged = list(itertools.chain.from_iterable(y))
classes = np.unique(merged)
self.classes = classes
self.k = len(classes)
self.m = self.k
label_dict = dict(zip(classes, range(self.k)))
y_int = []
for yi in y:
y_int.append(list(map(label_dict.get, yi)))
self.class_ids = np.array(y_int)
self.lclass_ids = self.class_ids
else:
self.class_ids, self.cutoffs, self.k = self._maybe_classify(
y, k=k, cutoffs=self.cutoffs)
self.classes = np.arange(self.k)
classic = Markov(self.class_ids)
self.p = classic.p
self.transitions = classic.transitions
self.T, self.P = self._calc(y, w)
if permutations:
nrp = np.random.permutation
counter = 0
x2_realizations = np.zeros((permutations, 1))
for perm in range(permutations):
T, P = self._calc(nrp(y), w)
x2 = [chi2(T[i], self.transitions)[0] for i in range(self.k)]
x2s = sum(x2)
x2_realizations[perm] = x2s
if x2s >= self.x2:
counter += 1
self.x2_rpvalue = (counter + 1.0) / (permutations + 1.)
self.x2_realizations = x2_realizations
@property
def s(self):
if not hasattr(self, '_s'):
self._s = STEADY_STATE(self.p)
return self._s
@property
def S(self):
if not hasattr(self, '_S'):
S = np.zeros_like(self.p)
for i, p in enumerate(self.P):
S[i] = STEADY_STATE(p)
self._S = np.asarray(S)
return self._S
@property
def F(self):
if not hasattr(self, '_F'):
F = np.zeros_like(self.P)
for i, p in enumerate(self.P):
F[i] = fmpt(np.asmatrix(p))
self._F = np.asarray(F)
return self._F
# bickenbach and bode tests
@property
def ht(self):
if not hasattr(self, '_ht'):
self._ht = homogeneity(self.T)
return self._ht
@property
def Q(self):
if not hasattr(self, '_Q'):
self._Q = self.ht.Q
return self._Q
@property
def Q_p_value(self):
self._Q_p_value = self.ht.Q_p_value
return self._Q_p_value
@property
def LR(self):
self._LR = self.ht.LR
return self._LR
@property
def LR_p_value(self):
self._LR_p_value = self.ht.LR_p_value
return self._LR_p_value
@property
def dof_hom(self):
self._dof_hom = self.ht.dof
return self._dof_hom
# shtests
@property
def shtest(self):
if not hasattr(self, '_shtest'):
self._shtest = self._mn_test()
return self._shtest
@property
def chi2(self):
if not hasattr(self, '_chi2'):
self._chi2 = self._chi2_test()
return self._chi2
@property
def x2(self):
if not hasattr(self, '_x2'):
self._x2 = sum([c[0] for c in self.chi2])
return self._x2
@property
def x2_pvalue(self):
if not hasattr(self, '_x2_pvalue'):
self._x2_pvalue = 1 - stats.chi2.cdf(self.x2, self.x2_dof)
return self._x2_pvalue
@property
def x2_dof(self):
if not hasattr(self, '_x2_dof'):
k = self.k
self._x2_dof = k * (k - 1) * (k - 1)
return self._x2_dof
def _calc(self, y, w):
'''Helper to estimate spatial lag conditioned Markov transition
probability matrices based on maximum likelihood techniques.
'''
if self.discrete:
self.lclass_ids = weights.lag_categorical(w, self.class_ids,
ties="tryself")
else:
ly = weights.lag_spatial(w, y)
self.lclass_ids, self.lag_cutoffs, self.m = self._maybe_classify(
ly, self.m, self.lag_cutoffs)
self.lclasses = np.arange(self.m)
T = np.zeros((self.m, self.k, self.k))
n, t = y.shape
for t1 in range(t - 1):
t2 = t1 + 1
for i in range(n):
T[self.lclass_ids[i, t1], self.class_ids[i, t1],
self.class_ids[i, t2]] += 1
P = np.zeros_like(T)
for i, mat in enumerate(T):
row_sum = mat.sum(axis=1)
row_sum = row_sum + (row_sum == 0)
p_i = np.matrix(np.diag(1. / row_sum) * np.matrix(mat))
P[i] = p_i
return T, P
def _mn_test(self):
"""
helper to calculate tests of differences between steady state
distributions from the conditional and overall distributions.
"""
n0, n1, n2 = self.T.shape
rn = list(range(n0))
mat = [self._ssmnp_test(
self.s, self.S[i], self.T[i].sum()) for i in rn]
return mat
def _ssmnp_test(self, p1, p2, nt):
"""
Steady state multinomial probability difference test.
Arguments
---------
p1 : array
(k, ), first steady state probability distribution.
p1 : array
(k, ), second steady state probability distribution.
nt : int
number of transitions to base the test on.
Returns
-------
tuple
(3 elements)
(chi2 value, pvalue, degrees of freedom)
"""
o = nt * p2
e = nt * p1
d = np.multiply((o - e), (o - e))
d = d / e
chi2 = d.sum()
pvalue = 1 - stats.chi2.cdf(chi2, self.k - 1)
return (chi2, pvalue, self.k - 1)
def _chi2_test(self):
"""
helper to calculate tests of differences between the conditional
transition matrices and the overall transitions matrix.
"""
n0, n1, n2 = self.T.shape
rn = list(range(n0))
mat = [chi2(self.T[i], self.transitions) for i in rn]
return mat
def summary(self, file_name=None):
"""
A summary method to call the Markov homogeneity test to test for
temporally lagged spatial dependence.
To learn more about the properties of the tests, refer to
:cite:`Rey2016a` and :cite:`Kang2018`.
"""
class_names = ["C%d" % i for i in range(self.k)]
regime_names = ["LAG%d" % i for i in range(self.k)]
ht = homogeneity(self.T, class_names=class_names,
regime_names=regime_names)
title = "Spatial Markov Test"
if self.variable_name:
title = title + ": " + self.variable_name
if file_name:
ht.summary(file_name=file_name, title=title)
else:
ht.summary(title=title)
|
pysal/giddy | giddy/markov.py | LISA_Markov.spillover | python | def spillover(self, quadrant=1, neighbors_on=False):
n, k = self.q.shape
if self.permutations:
spill_over = np.zeros((n, k - 1))
components = np.zeros((n, k))
i2id = {} # handle string keys
for key in list(self.w.neighbors.keys()):
idx = self.w.id2i[key]
i2id[idx] = key
sig_lisas = (self.q == quadrant) \
* (self.p_values <= self.significance_level)
sig_ids = [np.nonzero(
sig_lisas[:, i])[0].tolist() for i in range(k)]
neighbors = self.w.neighbors
for t in range(k - 1):
s1 = sig_ids[t]
s2 = sig_ids[t + 1]
g1 = Graph(undirected=True)
for i in s1:
for neighbor in neighbors[i2id[i]]:
g1.add_edge(i2id[i], neighbor, 1.0)
if neighbors_on:
for nn in neighbors[neighbor]:
g1.add_edge(neighbor, nn, 1.0)
components1 = g1.connected_components(op=gt)
components1 = [list(c.nodes) for c in components1]
g2 = Graph(undirected=True)
for i in s2:
for neighbor in neighbors[i2id[i]]:
g2.add_edge(i2id[i], neighbor, 1.0)
if neighbors_on:
for nn in neighbors[neighbor]:
g2.add_edge(neighbor, nn, 1.0)
components2 = g2.connected_components(op=gt)
components2 = [list(c.nodes) for c in components2]
c2 = []
c1 = []
for c in components2:
c2.extend(c)
for c in components1:
c1.extend(c)
new_ids = [j for j in c2 if j not in c1]
spill_ids = []
for j in new_ids:
# find j's component in period 2
cj = [c for c in components2 if j in c][0]
# for members of j's component in period 2, check if they
# belonged to any components in period 1
for i in cj:
if i in c1:
spill_ids.append(j)
break
for spill_id in spill_ids:
id = self.w.id2i[spill_id]
spill_over[id, t] = 1
for c, component in enumerate(components1):
for i in component:
ii = self.w.id2i[i]
components[ii, t] = c + 1
results = {}
results['components'] = components
results['spill_over'] = spill_over
return results
else:
return None | Detect spillover locations for diffusion in LISA Markov.
Parameters
----------
quadrant : int
which quadrant in the scatterplot should form the core
of a cluster.
neighbors_on : binary
If false, then only the 1st order neighbors of a core
location are included in the cluster.
If true, neighbors of cluster core 1st order neighbors
are included in the cluster.
Returns
-------
results : dictionary
two keys - values pairs:
'components' - array (n, t)
values are integer ids (starting at 1) indicating which
component/cluster observation i in period t belonged to.
'spillover' - array (n, t-1)
binary values indicating if the location was a
spill-over location that became a new member of a
previously existing cluster.
Examples
--------
>>> import libpysal
>>> from giddy.markov import LISA_Markov
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> np.random.seed(10)
>>> lm_random = LISA_Markov(pci, w, permutations=99)
>>> r = lm_random.spillover()
>>> (r['components'][:, 12] > 0).sum()
17
>>> (r['components'][:, 13]>0).sum()
23
>>> (r['spill_over'][:,12]>0).sum()
6
Including neighbors of core neighbors
>>> rn = lm_random.spillover(neighbors_on=True)
>>> (rn['components'][:, 12] > 0).sum()
26
>>> (rn["components"][:, 13] > 0).sum()
34
>>> (rn["spill_over"][:, 12] > 0).sum()
8 | train | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/markov.py#L1212-L1333 | [
"def add_edge(self, n1, n2, w):\n self.nodes.add(n1)\n self.nodes.add(n2)\n self.edges.setdefault(n1, {}).update({n2: w})\n if self.undirected:\n self.edges.setdefault(n2, {}).update({n1: w})\n",
"def connected_components(self, threshold=0.9, op=lt):\n if not self.undirected:\n warn = \"Warning, connected _components not \"\n warn += \"defined for a directed graph\"\n print(warn)\n return None\n else:\n nodes = set(self.nodes)\n components, visited = [], set()\n while len(nodes) > 0:\n connected, visited = self.dfs(\n nodes.pop(), visited, threshold, op)\n connected = set(connected)\n for node in connected:\n if node in nodes:\n nodes.remove(node)\n subgraph = Graph()\n subgraph.nodes = connected\n subgraph.no_link = self.no_link\n for s in subgraph.nodes:\n for k, v in list(self.edges.get(s, {}).items()):\n if k in subgraph.nodes:\n subgraph.edges.setdefault(s, {}).update({k: v})\n if s in self.cluster_lookup:\n subgraph.cluster_lookup[s] = self.cluster_lookup[s]\n components.append(subgraph)\n return components\n"
] | class LISA_Markov(Markov):
"""
Markov for Local Indicators of Spatial Association
Parameters
----------
y : array
(n, t), n cross-sectional units observed over t time
periods.
w : W
spatial weights object.
permutations : int, optional
number of permutations used to determine LISA
significance (the default is 0).
significance_level : float, optional
significance level (two-sided) for filtering
significant LISA endpoints in a transition (the
default is 0.05).
geoda_quads : bool
If True use GeoDa scheme: HH=1, LL=2, LH=3, HL=4.
If False use PySAL Scheme: HH=1, LH=2, LL=3, HL=4.
(the default is False).
Attributes
----------
chi_2 : tuple
(3 elements)
(chi square test statistic, p-value, degrees of freedom) for
test that dynamics of y are independent of dynamics of wy.
classes : array
(4, 1)
1=HH, 2=LH, 3=LL, 4=HL (own, lag)
1=HH, 2=LL, 3=LH, 4=HL (own, lag) (if geoda_quads=True)
expected_t : array
(4, 4), expected number of transitions under the null that
dynamics of y are independent of dynamics of wy.
move_types : matrix
(n, t-1), integer values indicating which type of LISA
transition occurred (q1 is quadrant in period 1, q2 is
quadrant in period 2).
.. table:: Move Types
== == =========
q1 q2 move_type
== == =========
1 1 1
1 2 2
1 3 3
1 4 4
2 1 5
2 2 6
2 3 7
2 4 8
3 1 9
3 2 10
3 3 11
3 4 12
4 1 13
4 2 14
4 3 15
4 4 16
== == =========
p : array
(k, k), transition probability matrix.
p_values : matrix
(n, t), LISA p-values for each end point (if permutations >
0).
significant_moves : matrix
(n, t-1), integer values indicating the type and
significance of a LISA transition. st = 1 if
significant in period t, else st=0 (if permutations >
0).
.. Table:: Significant Moves1
=============== ===================
(s1,s2) move_type
=============== ===================
(1,1) [1, 16]
(1,0) [17, 32]
(0,1) [33, 48]
(0,0) [49, 64]
=============== ===================
.. Table:: Significant Moves2
== == == == =========
q1 q2 s1 s2 move_type
== == == == =========
1 1 1 1 1
1 2 1 1 2
1 3 1 1 3
1 4 1 1 4
2 1 1 1 5
2 2 1 1 6
2 3 1 1 7
2 4 1 1 8
3 1 1 1 9
3 2 1 1 10
3 3 1 1 11
3 4 1 1 12
4 1 1 1 13
4 2 1 1 14
4 3 1 1 15
4 4 1 1 16
1 1 1 0 17
1 2 1 0 18
. . . . .
. . . . .
4 3 1 0 31
4 4 1 0 32
1 1 0 1 33
1 2 0 1 34
. . . . .
. . . . .
4 3 0 1 47
4 4 0 1 48
1 1 0 0 49
1 2 0 0 50
. . . . .
. . . . .
4 3 0 0 63
4 4 0 0 64
== == == == =========
steady_state : array
(k, ), ergodic distribution.
transitions : array
(4, 4), count of transitions between each state i and j.
spillover : array
(n, 1) binary array, locations that were not part of a
cluster in period 1 but joined a prexisting cluster in
period 2.
Examples
--------
>>> import libpysal
>>> import numpy as np
>>> from giddy.markov import LISA_Markov
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> lm = LISA_Markov(pci,w)
>>> lm.classes
array([1, 2, 3, 4])
>>> lm.steady_state
array([0.28561505, 0.14190226, 0.40493672, 0.16754598])
>>> lm.transitions
array([[1.087e+03, 4.400e+01, 4.000e+00, 3.400e+01],
[4.100e+01, 4.700e+02, 3.600e+01, 1.000e+00],
[5.000e+00, 3.400e+01, 1.422e+03, 3.900e+01],
[3.000e+01, 1.000e+00, 4.000e+01, 5.520e+02]])
>>> lm.p
array([[0.92985458, 0.03763901, 0.00342173, 0.02908469],
[0.07481752, 0.85766423, 0.06569343, 0.00182482],
[0.00333333, 0.02266667, 0.948 , 0.026 ],
[0.04815409, 0.00160514, 0.06420546, 0.88603531]])
>>> lm.move_types[0,:3]
array([11, 11, 11])
>>> lm.move_types[0,-3:]
array([11, 11, 11])
Now consider only moves with one, or both, of the LISA end points being
significant
>>> np.random.seed(10)
>>> lm_random = LISA_Markov(pci, w, permutations=99)
>>> lm_random.significant_moves[0, :3]
array([11, 11, 11])
>>> lm_random.significant_moves[0,-3:]
array([59, 43, 27])
Any value less than 49 indicates at least one of the LISA end points was
significant. So for example, the first spatial unit experienced a
transition of type 11 (LL, LL) during the first three and last tree
intervals (according to lm.move_types), however, the last three of these
transitions involved insignificant LISAS in both the start and ending year
of each transition.
Test whether the moves of y are independent of the moves of wy
>>> "Chi2: %8.3f, p: %5.2f, dof: %d" % lm.chi_2
'Chi2: 1058.208, p: 0.00, dof: 9'
Actual transitions of LISAs
>>> lm.transitions
array([[1.087e+03, 4.400e+01, 4.000e+00, 3.400e+01],
[4.100e+01, 4.700e+02, 3.600e+01, 1.000e+00],
[5.000e+00, 3.400e+01, 1.422e+03, 3.900e+01],
[3.000e+01, 1.000e+00, 4.000e+01, 5.520e+02]])
Expected transitions of LISAs under the null y and wy are moving
independently of one another
>>> lm.expected_t
array([[1.12328098e+03, 1.15377356e+01, 3.47522158e-01, 3.38337644e+01],
[3.50272664e+00, 5.28473882e+02, 1.59178880e+01, 1.05503814e-01],
[1.53878082e-01, 2.32163556e+01, 1.46690710e+03, 9.72266513e+00],
[9.60775143e+00, 9.86856346e-02, 6.23537392e+00, 6.07058189e+02]])
If the LISA classes are to be defined according to GeoDa, the `geoda_quad`
option has to be set to true
>>> lm.q[0:5,0]
array([3, 2, 3, 1, 4])
>>> lm = LISA_Markov(pci,w, geoda_quads=True)
>>> lm.q[0:5,0]
array([2, 3, 2, 1, 4])
"""
def __init__(self, y, w, permutations=0,
significance_level=0.05, geoda_quads=False):
y = y.transpose()
pml = Moran_Local
gq = geoda_quads
ml = ([pml(yi, w, permutations=permutations, geoda_quads=gq)
for yi in y])
q = np.array([mli.q for mli in ml]).transpose()
classes = np.arange(1, 5) # no guarantee all 4 quadrants are visited
Markov.__init__(self, q, classes)
self.q = q
self.w = w
n, k = q.shape
k -= 1
self.significance_level = significance_level
move_types = np.zeros((n, k), int)
sm = np.zeros((n, k), int)
self.significance_level = significance_level
if permutations > 0:
p = np.array([mli.p_z_sim for mli in ml]).transpose()
self.p_values = p
pb = p <= significance_level
else:
pb = np.zeros_like(y.T)
for t in range(k):
origin = q[:, t]
dest = q[:, t + 1]
p_origin = pb[:, t]
p_dest = pb[:, t + 1]
for r in range(n):
move_types[r, t] = TT[origin[r], dest[r]]
key = (origin[r], dest[r], p_origin[r], p_dest[r])
sm[r, t] = MOVE_TYPES[key]
if permutations > 0:
self.significant_moves = sm
self.move_types = move_types
# null of own and lag moves being independent
ybar = y.mean(axis=0)
r = y / ybar
ylag = np.array([weights.lag_spatial(w, yt) for yt in y])
rlag = ylag / ybar
rc = r < 1.
rlagc = rlag < 1.
markov_y = Markov(rc)
markov_ylag = Markov(rlagc)
A = np.matrix([[1, 0, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0]])
kp = A * np.kron(markov_y.p, markov_ylag.p) * A.T
trans = self.transitions.sum(axis=1)
t1 = np.diag(trans) * kp
t2 = self.transitions
t1 = t1.getA()
self.chi_2 = chi2(t2, t1)
self.expected_t = t1
self.permutations = permutations
|
Kentzo/Power | power/common.py | PowerManagementBase.add_observer | python | def add_observer(self, observer):
if not isinstance(observer, PowerManagementObserver):
raise TypeError("observer MUST conform to power.PowerManagementObserver")
self._weak_observers.append(weakref.ref(observer)) | Adds weak ref to an observer.
@param observer: Instance of class registered with PowerManagementObserver
@raise TypeError: If observer is not registered with PowerManagementObserver abstract class | train | https://github.com/Kentzo/Power/blob/2c99b156546225e448f7030681af3df5cd345e4b/power/common.py#L115-L124 | null | class PowerManagementBase(object):
"""
Base class for platform dependent PowerManagement functions.
@ivar _weak_observers: List of weak reference to added observers
@note: Platform's implementation may provide additional parameters for initialization
"""
__metaclass__ = ABCMeta
def __init__(self):
super(PowerManagementBase, self).__init__()
self._weak_observers = []
@abstractmethod
def get_providing_power_source_type(self):
"""
Returns type of the providing power source.
@return: Possible values:
- POWER_TYPE_AC
- POWER_TYPE_BATTERY
- POWER_TYPE_UPS
@rtype: int
"""
pass
@abstractmethod
def get_low_battery_warning_level(self):
"""
Returns the system battery warning level.
@return: Possible values:
- LOW_BATTERY_WARNING_NONE
- LOW_BATTERY_WARNING_EARLY
- LOW_BATTERY_WARNING_FINAL
@rtype: int
"""
pass
@abstractmethod
def get_time_remaining_estimate(self):
"""
Returns the estimated minutes remaining until all power sources (battery and/or UPS) are empty.
@return: Special values:
- TIME_REMAINING_UNKNOWN
- TIME_REMAINING_UNLIMITED
@rtype: float
"""
pass
@abstractmethod
@abstractmethod
def remove_observer(self, observer):
"""
Removes an observer.
@param observer: Previously added observer
"""
self._weak_observers.remove(weakref.ref(observer))
def remove_all_observers(self):
"""
Removes all registered observers.
"""
for weak_observer in self._weak_observers:
observer = weak_observer()
if observer:
self.remove_observer(observer)
|
Kentzo/Power | power/common.py | PowerManagementBase.remove_all_observers | python | def remove_all_observers(self):
for weak_observer in self._weak_observers:
observer = weak_observer()
if observer:
self.remove_observer(observer) | Removes all registered observers. | train | https://github.com/Kentzo/Power/blob/2c99b156546225e448f7030681af3df5cd345e4b/power/common.py#L135-L142 | [
"def remove_observer(self, observer):\n \"\"\"\n Removes an observer.\n\n @param observer: Previously added observer\n \"\"\"\n self._weak_observers.remove(weakref.ref(observer))\n"
] | class PowerManagementBase(object):
"""
Base class for platform dependent PowerManagement functions.
@ivar _weak_observers: List of weak reference to added observers
@note: Platform's implementation may provide additional parameters for initialization
"""
__metaclass__ = ABCMeta
def __init__(self):
super(PowerManagementBase, self).__init__()
self._weak_observers = []
@abstractmethod
def get_providing_power_source_type(self):
"""
Returns type of the providing power source.
@return: Possible values:
- POWER_TYPE_AC
- POWER_TYPE_BATTERY
- POWER_TYPE_UPS
@rtype: int
"""
pass
@abstractmethod
def get_low_battery_warning_level(self):
"""
Returns the system battery warning level.
@return: Possible values:
- LOW_BATTERY_WARNING_NONE
- LOW_BATTERY_WARNING_EARLY
- LOW_BATTERY_WARNING_FINAL
@rtype: int
"""
pass
@abstractmethod
def get_time_remaining_estimate(self):
"""
Returns the estimated minutes remaining until all power sources (battery and/or UPS) are empty.
@return: Special values:
- TIME_REMAINING_UNKNOWN
- TIME_REMAINING_UNLIMITED
@rtype: float
"""
pass
@abstractmethod
def add_observer(self, observer):
"""
Adds weak ref to an observer.
@param observer: Instance of class registered with PowerManagementObserver
@raise TypeError: If observer is not registered with PowerManagementObserver abstract class
"""
if not isinstance(observer, PowerManagementObserver):
raise TypeError("observer MUST conform to power.PowerManagementObserver")
self._weak_observers.append(weakref.ref(observer))
@abstractmethod
def remove_observer(self, observer):
"""
Removes an observer.
@param observer: Previously added observer
"""
self._weak_observers.remove(weakref.ref(observer))
|
Kentzo/Power | power/darwin.py | PowerSourcesNotificationsObserver.startThread | python | def startThread(self):
if self._thread is not None:
return
self._thread = NSThread.alloc().initWithTarget_selector_object_(self, 'runPowerNotificationsThread', None)
self._thread.start() | Spawns new NSThread to handle notifications. | train | https://github.com/Kentzo/Power/blob/2c99b156546225e448f7030681af3df5cd345e4b/power/darwin.py#L177-L182 | null | class PowerSourcesNotificationsObserver(NSObject):
"""
Manages NSThread instance which is used to run NSRunLoop with only source - IOPSNotificationCreateRunLoopSource.
Thread is automatically spawned when first observer is added and stopped when last observer is removed.
Does not keep strong references to observers.
@note: Method names break PEP8 convention to conform PyObjC naming conventions
"""
def init(self):
self = super(PowerSourcesNotificationsObserver, self).init()
if self is not None:
self._weak_observers = []
self._thread = None
self._lock = objc.object_lock(self)
return self
def stopThread(self):
"""Stops spawned NSThread."""
if self._thread is not None:
self.performSelector_onThread_withObject_waitUntilDone_('stopPowerNotificationsThread', self._thread, None, objc.YES)
self._thread = None
def runPowerNotificationsThread(self):
"""Main method of the spawned NSThread. Registers run loop source and runs current NSRunLoop."""
pool = NSAutoreleasePool.alloc().init()
@objc.callbackFor(IOPSNotificationCreateRunLoopSource)
def on_power_source_notification(context):
with self._lock:
for weak_observer in self._weak_observers:
observer = weak_observer()
if observer:
observer.on_power_source_notification()
self._source = IOPSNotificationCreateRunLoopSource(on_power_source_notification, None)
CFRunLoopAddSource(NSRunLoop.currentRunLoop().getCFRunLoop(), self._source, kCFRunLoopDefaultMode)
while not NSThread.currentThread().isCancelled():
NSRunLoop.currentRunLoop().runMode_beforeDate_(NSDefaultRunLoopMode, NSDate.distantFuture())
del pool
def stopPowerNotificationsThread(self):
"""Removes the only source from NSRunLoop and cancels thread."""
assert NSThread.currentThread() == self._thread
CFRunLoopSourceInvalidate(self._source)
self._source = None
NSThread.currentThread().cancel()
def addObserver(self, observer):
"""
Adds weak ref to an observer.
@param observer: Instance of class that implements on_power_source_notification()
"""
with self._lock:
self._weak_observers.append(weakref.ref(observer))
if len(self._weak_observers) == 1:
self.startThread()
def removeObserver(self, observer):
"""
Removes an observer.
@param observer: Previously added observer
"""
with self._lock:
self._weak_observers.remove(weakref.ref(observer))
if len(self._weak_observers) == 0:
self.stopThread()
|
Kentzo/Power | power/darwin.py | PowerSourcesNotificationsObserver.stopThread | python | def stopThread(self):
if self._thread is not None:
self.performSelector_onThread_withObject_waitUntilDone_('stopPowerNotificationsThread', self._thread, None, objc.YES)
self._thread = None | Stops spawned NSThread. | train | https://github.com/Kentzo/Power/blob/2c99b156546225e448f7030681af3df5cd345e4b/power/darwin.py#L184-L188 | null | class PowerSourcesNotificationsObserver(NSObject):
"""
Manages NSThread instance which is used to run NSRunLoop with only source - IOPSNotificationCreateRunLoopSource.
Thread is automatically spawned when first observer is added and stopped when last observer is removed.
Does not keep strong references to observers.
@note: Method names break PEP8 convention to conform PyObjC naming conventions
"""
def init(self):
self = super(PowerSourcesNotificationsObserver, self).init()
if self is not None:
self._weak_observers = []
self._thread = None
self._lock = objc.object_lock(self)
return self
def startThread(self):
"""Spawns new NSThread to handle notifications."""
if self._thread is not None:
return
self._thread = NSThread.alloc().initWithTarget_selector_object_(self, 'runPowerNotificationsThread', None)
self._thread.start()
def runPowerNotificationsThread(self):
"""Main method of the spawned NSThread. Registers run loop source and runs current NSRunLoop."""
pool = NSAutoreleasePool.alloc().init()
@objc.callbackFor(IOPSNotificationCreateRunLoopSource)
def on_power_source_notification(context):
with self._lock:
for weak_observer in self._weak_observers:
observer = weak_observer()
if observer:
observer.on_power_source_notification()
self._source = IOPSNotificationCreateRunLoopSource(on_power_source_notification, None)
CFRunLoopAddSource(NSRunLoop.currentRunLoop().getCFRunLoop(), self._source, kCFRunLoopDefaultMode)
while not NSThread.currentThread().isCancelled():
NSRunLoop.currentRunLoop().runMode_beforeDate_(NSDefaultRunLoopMode, NSDate.distantFuture())
del pool
def stopPowerNotificationsThread(self):
"""Removes the only source from NSRunLoop and cancels thread."""
assert NSThread.currentThread() == self._thread
CFRunLoopSourceInvalidate(self._source)
self._source = None
NSThread.currentThread().cancel()
def addObserver(self, observer):
"""
Adds weak ref to an observer.
@param observer: Instance of class that implements on_power_source_notification()
"""
with self._lock:
self._weak_observers.append(weakref.ref(observer))
if len(self._weak_observers) == 1:
self.startThread()
def removeObserver(self, observer):
"""
Removes an observer.
@param observer: Previously added observer
"""
with self._lock:
self._weak_observers.remove(weakref.ref(observer))
if len(self._weak_observers) == 0:
self.stopThread()
|
Kentzo/Power | power/darwin.py | PowerSourcesNotificationsObserver.runPowerNotificationsThread | python | def runPowerNotificationsThread(self):
pool = NSAutoreleasePool.alloc().init()
@objc.callbackFor(IOPSNotificationCreateRunLoopSource)
def on_power_source_notification(context):
with self._lock:
for weak_observer in self._weak_observers:
observer = weak_observer()
if observer:
observer.on_power_source_notification()
self._source = IOPSNotificationCreateRunLoopSource(on_power_source_notification, None)
CFRunLoopAddSource(NSRunLoop.currentRunLoop().getCFRunLoop(), self._source, kCFRunLoopDefaultMode)
while not NSThread.currentThread().isCancelled():
NSRunLoop.currentRunLoop().runMode_beforeDate_(NSDefaultRunLoopMode, NSDate.distantFuture())
del pool | Main method of the spawned NSThread. Registers run loop source and runs current NSRunLoop. | train | https://github.com/Kentzo/Power/blob/2c99b156546225e448f7030681af3df5cd345e4b/power/darwin.py#L190-L206 | null | class PowerSourcesNotificationsObserver(NSObject):
"""
Manages NSThread instance which is used to run NSRunLoop with only source - IOPSNotificationCreateRunLoopSource.
Thread is automatically spawned when first observer is added and stopped when last observer is removed.
Does not keep strong references to observers.
@note: Method names break PEP8 convention to conform PyObjC naming conventions
"""
def init(self):
self = super(PowerSourcesNotificationsObserver, self).init()
if self is not None:
self._weak_observers = []
self._thread = None
self._lock = objc.object_lock(self)
return self
def startThread(self):
"""Spawns new NSThread to handle notifications."""
if self._thread is not None:
return
self._thread = NSThread.alloc().initWithTarget_selector_object_(self, 'runPowerNotificationsThread', None)
self._thread.start()
def stopThread(self):
"""Stops spawned NSThread."""
if self._thread is not None:
self.performSelector_onThread_withObject_waitUntilDone_('stopPowerNotificationsThread', self._thread, None, objc.YES)
self._thread = None
def stopPowerNotificationsThread(self):
"""Removes the only source from NSRunLoop and cancels thread."""
assert NSThread.currentThread() == self._thread
CFRunLoopSourceInvalidate(self._source)
self._source = None
NSThread.currentThread().cancel()
def addObserver(self, observer):
"""
Adds weak ref to an observer.
@param observer: Instance of class that implements on_power_source_notification()
"""
with self._lock:
self._weak_observers.append(weakref.ref(observer))
if len(self._weak_observers) == 1:
self.startThread()
def removeObserver(self, observer):
"""
Removes an observer.
@param observer: Previously added observer
"""
with self._lock:
self._weak_observers.remove(weakref.ref(observer))
if len(self._weak_observers) == 0:
self.stopThread()
|
Kentzo/Power | power/darwin.py | PowerSourcesNotificationsObserver.stopPowerNotificationsThread | python | def stopPowerNotificationsThread(self):
assert NSThread.currentThread() == self._thread
CFRunLoopSourceInvalidate(self._source)
self._source = None
NSThread.currentThread().cancel() | Removes the only source from NSRunLoop and cancels thread. | train | https://github.com/Kentzo/Power/blob/2c99b156546225e448f7030681af3df5cd345e4b/power/darwin.py#L209-L215 | null | class PowerSourcesNotificationsObserver(NSObject):
"""
Manages NSThread instance which is used to run NSRunLoop with only source - IOPSNotificationCreateRunLoopSource.
Thread is automatically spawned when first observer is added and stopped when last observer is removed.
Does not keep strong references to observers.
@note: Method names break PEP8 convention to conform PyObjC naming conventions
"""
def init(self):
self = super(PowerSourcesNotificationsObserver, self).init()
if self is not None:
self._weak_observers = []
self._thread = None
self._lock = objc.object_lock(self)
return self
def startThread(self):
"""Spawns new NSThread to handle notifications."""
if self._thread is not None:
return
self._thread = NSThread.alloc().initWithTarget_selector_object_(self, 'runPowerNotificationsThread', None)
self._thread.start()
def stopThread(self):
"""Stops spawned NSThread."""
if self._thread is not None:
self.performSelector_onThread_withObject_waitUntilDone_('stopPowerNotificationsThread', self._thread, None, objc.YES)
self._thread = None
def runPowerNotificationsThread(self):
"""Main method of the spawned NSThread. Registers run loop source and runs current NSRunLoop."""
pool = NSAutoreleasePool.alloc().init()
@objc.callbackFor(IOPSNotificationCreateRunLoopSource)
def on_power_source_notification(context):
with self._lock:
for weak_observer in self._weak_observers:
observer = weak_observer()
if observer:
observer.on_power_source_notification()
self._source = IOPSNotificationCreateRunLoopSource(on_power_source_notification, None)
CFRunLoopAddSource(NSRunLoop.currentRunLoop().getCFRunLoop(), self._source, kCFRunLoopDefaultMode)
while not NSThread.currentThread().isCancelled():
NSRunLoop.currentRunLoop().runMode_beforeDate_(NSDefaultRunLoopMode, NSDate.distantFuture())
del pool
def addObserver(self, observer):
"""
Adds weak ref to an observer.
@param observer: Instance of class that implements on_power_source_notification()
"""
with self._lock:
self._weak_observers.append(weakref.ref(observer))
if len(self._weak_observers) == 1:
self.startThread()
def removeObserver(self, observer):
"""
Removes an observer.
@param observer: Previously added observer
"""
with self._lock:
self._weak_observers.remove(weakref.ref(observer))
if len(self._weak_observers) == 0:
self.stopThread()
|
Kentzo/Power | power/darwin.py | PowerSourcesNotificationsObserver.addObserver | python | def addObserver(self, observer):
with self._lock:
self._weak_observers.append(weakref.ref(observer))
if len(self._weak_observers) == 1:
self.startThread() | Adds weak ref to an observer.
@param observer: Instance of class that implements on_power_source_notification() | train | https://github.com/Kentzo/Power/blob/2c99b156546225e448f7030681af3df5cd345e4b/power/darwin.py#L217-L226 | null | class PowerSourcesNotificationsObserver(NSObject):
"""
Manages NSThread instance which is used to run NSRunLoop with only source - IOPSNotificationCreateRunLoopSource.
Thread is automatically spawned when first observer is added and stopped when last observer is removed.
Does not keep strong references to observers.
@note: Method names break PEP8 convention to conform PyObjC naming conventions
"""
def init(self):
self = super(PowerSourcesNotificationsObserver, self).init()
if self is not None:
self._weak_observers = []
self._thread = None
self._lock = objc.object_lock(self)
return self
def startThread(self):
"""Spawns new NSThread to handle notifications."""
if self._thread is not None:
return
self._thread = NSThread.alloc().initWithTarget_selector_object_(self, 'runPowerNotificationsThread', None)
self._thread.start()
def stopThread(self):
"""Stops spawned NSThread."""
if self._thread is not None:
self.performSelector_onThread_withObject_waitUntilDone_('stopPowerNotificationsThread', self._thread, None, objc.YES)
self._thread = None
def runPowerNotificationsThread(self):
"""Main method of the spawned NSThread. Registers run loop source and runs current NSRunLoop."""
pool = NSAutoreleasePool.alloc().init()
@objc.callbackFor(IOPSNotificationCreateRunLoopSource)
def on_power_source_notification(context):
with self._lock:
for weak_observer in self._weak_observers:
observer = weak_observer()
if observer:
observer.on_power_source_notification()
self._source = IOPSNotificationCreateRunLoopSource(on_power_source_notification, None)
CFRunLoopAddSource(NSRunLoop.currentRunLoop().getCFRunLoop(), self._source, kCFRunLoopDefaultMode)
while not NSThread.currentThread().isCancelled():
NSRunLoop.currentRunLoop().runMode_beforeDate_(NSDefaultRunLoopMode, NSDate.distantFuture())
del pool
def stopPowerNotificationsThread(self):
"""Removes the only source from NSRunLoop and cancels thread."""
assert NSThread.currentThread() == self._thread
CFRunLoopSourceInvalidate(self._source)
self._source = None
NSThread.currentThread().cancel()
def removeObserver(self, observer):
"""
Removes an observer.
@param observer: Previously added observer
"""
with self._lock:
self._weak_observers.remove(weakref.ref(observer))
if len(self._weak_observers) == 0:
self.stopThread()
|
Kentzo/Power | power/darwin.py | PowerSourcesNotificationsObserver.removeObserver | python | def removeObserver(self, observer):
with self._lock:
self._weak_observers.remove(weakref.ref(observer))
if len(self._weak_observers) == 0:
self.stopThread() | Removes an observer.
@param observer: Previously added observer | train | https://github.com/Kentzo/Power/blob/2c99b156546225e448f7030681af3df5cd345e4b/power/darwin.py#L228-L237 | null | class PowerSourcesNotificationsObserver(NSObject):
"""
Manages NSThread instance which is used to run NSRunLoop with only source - IOPSNotificationCreateRunLoopSource.
Thread is automatically spawned when first observer is added and stopped when last observer is removed.
Does not keep strong references to observers.
@note: Method names break PEP8 convention to conform PyObjC naming conventions
"""
def init(self):
self = super(PowerSourcesNotificationsObserver, self).init()
if self is not None:
self._weak_observers = []
self._thread = None
self._lock = objc.object_lock(self)
return self
def startThread(self):
"""Spawns new NSThread to handle notifications."""
if self._thread is not None:
return
self._thread = NSThread.alloc().initWithTarget_selector_object_(self, 'runPowerNotificationsThread', None)
self._thread.start()
def stopThread(self):
"""Stops spawned NSThread."""
if self._thread is not None:
self.performSelector_onThread_withObject_waitUntilDone_('stopPowerNotificationsThread', self._thread, None, objc.YES)
self._thread = None
def runPowerNotificationsThread(self):
"""Main method of the spawned NSThread. Registers run loop source and runs current NSRunLoop."""
pool = NSAutoreleasePool.alloc().init()
@objc.callbackFor(IOPSNotificationCreateRunLoopSource)
def on_power_source_notification(context):
with self._lock:
for weak_observer in self._weak_observers:
observer = weak_observer()
if observer:
observer.on_power_source_notification()
self._source = IOPSNotificationCreateRunLoopSource(on_power_source_notification, None)
CFRunLoopAddSource(NSRunLoop.currentRunLoop().getCFRunLoop(), self._source, kCFRunLoopDefaultMode)
while not NSThread.currentThread().isCancelled():
NSRunLoop.currentRunLoop().runMode_beforeDate_(NSDefaultRunLoopMode, NSDate.distantFuture())
del pool
def stopPowerNotificationsThread(self):
"""Removes the only source from NSRunLoop and cancels thread."""
assert NSThread.currentThread() == self._thread
CFRunLoopSourceInvalidate(self._source)
self._source = None
NSThread.currentThread().cancel()
def addObserver(self, observer):
"""
Adds weak ref to an observer.
@param observer: Instance of class that implements on_power_source_notification()
"""
with self._lock:
self._weak_observers.append(weakref.ref(observer))
if len(self._weak_observers) == 1:
self.startThread()
|
Kentzo/Power | power/darwin.py | PowerManagement.on_power_source_notification | python | def on_power_source_notification(self):
for weak_observer in self._weak_observers:
observer = weak_observer()
if observer:
observer.on_power_sources_change(self)
observer.on_time_remaining_change(self) | Called in response to IOPSNotificationCreateRunLoopSource() event. | train | https://github.com/Kentzo/Power/blob/2c99b156546225e448f7030681af3df5cd345e4b/power/darwin.py#L250-L258 | null | class PowerManagement(common.PowerManagementBase):
notifications_observer = PowerSourcesNotificationsObserver.alloc().init()
def __init__(self, cf_run_loop=None):
"""
@param cf_run_loop: If provided, all notifications are posted within this loop
"""
super(PowerManagement, self).__init__()
self._cf_run_loop = cf_run_loop
def get_providing_power_source_type(self):
"""
Uses IOPSCopyPowerSourcesInfo and IOPSGetProvidingPowerSourceType to get providing power source type.
"""
blob = IOPSCopyPowerSourcesInfo()
type = IOPSGetProvidingPowerSourceType(blob)
return POWER_TYPE_MAP[type]
def get_low_battery_warning_level(self):
"""
Uses IOPSGetBatteryWarningLevel to get battery warning level.
"""
warning_level = IOPSGetBatteryWarningLevel()
return WARNING_LEVEL_MAP[warning_level]
def get_time_remaining_estimate(self):
"""
In Mac OS X 10.7+
Uses IOPSGetTimeRemainingEstimate to get time remaining estimate.
In Mac OS X 10.6
IOPSGetTimeRemainingEstimate is not available.
If providing power source type is AC, returns TIME_REMAINING_UNLIMITED.
Otherwise looks through all power sources returned by IOPSGetProvidingPowerSourceType
and returns total estimate.
"""
if IOPSGetTimeRemainingEstimate is not None: # Mac OS X 10.7+
estimate = float(IOPSGetTimeRemainingEstimate())
if estimate == -1.0:
return common.TIME_REMAINING_UNKNOWN
elif estimate == -2.0:
return common.TIME_REMAINING_UNLIMITED
else:
return estimate / 60.0
else: # Mac OS X 10.6
warnings.warn("IOPSGetTimeRemainingEstimate is not preset", RuntimeWarning)
blob = IOPSCopyPowerSourcesInfo()
type = IOPSGetProvidingPowerSourceType(blob)
if type == common.POWER_TYPE_AC:
return common.TIME_REMAINING_UNLIMITED
else:
estimate = 0.0
for source in IOPSCopyPowerSourcesList(blob):
description = IOPSGetPowerSourceDescription(blob, source)
if kIOPSIsPresentKey in description and description[kIOPSIsPresentKey] and kIOPSTimeToEmptyKey in description and description[kIOPSTimeToEmptyKey] > 0.0:
estimate += float(description[kIOPSTimeToEmptyKey])
if estimate > 0.0:
return float(estimate)
else:
return common.TIME_REMAINING_UNKNOWN
def add_observer(self, observer):
"""
Spawns thread or adds IOPSNotificationCreateRunLoopSource directly to provided cf_run_loop
@see: __init__
"""
super(PowerManagement, self).add_observer(observer)
if len(self._weak_observers) == 1:
if not self._cf_run_loop:
PowerManagement.notifications_observer.addObserver(self)
else:
@objc.callbackFor(IOPSNotificationCreateRunLoopSource)
def on_power_sources_change(context):
self.on_power_source_notification()
self._source = IOPSNotificationCreateRunLoopSource(on_power_sources_change, None)
CFRunLoopAddSource(self._cf_run_loop, self._source, kCFRunLoopDefaultMode)
def remove_observer(self, observer):
"""
Stops thread and invalidates source.
"""
super(PowerManagement, self).remove_observer(observer)
if len(self._weak_observers) == 0:
if not self._cf_run_loop:
PowerManagement.notifications_observer.removeObserver(self)
else:
CFRunLoopSourceInvalidate(self._source)
self._source = None
|
Kentzo/Power | power/darwin.py | PowerManagement.get_time_remaining_estimate | python | def get_time_remaining_estimate(self):
if IOPSGetTimeRemainingEstimate is not None: # Mac OS X 10.7+
estimate = float(IOPSGetTimeRemainingEstimate())
if estimate == -1.0:
return common.TIME_REMAINING_UNKNOWN
elif estimate == -2.0:
return common.TIME_REMAINING_UNLIMITED
else:
return estimate / 60.0
else: # Mac OS X 10.6
warnings.warn("IOPSGetTimeRemainingEstimate is not preset", RuntimeWarning)
blob = IOPSCopyPowerSourcesInfo()
type = IOPSGetProvidingPowerSourceType(blob)
if type == common.POWER_TYPE_AC:
return common.TIME_REMAINING_UNLIMITED
else:
estimate = 0.0
for source in IOPSCopyPowerSourcesList(blob):
description = IOPSGetPowerSourceDescription(blob, source)
if kIOPSIsPresentKey in description and description[kIOPSIsPresentKey] and kIOPSTimeToEmptyKey in description and description[kIOPSTimeToEmptyKey] > 0.0:
estimate += float(description[kIOPSTimeToEmptyKey])
if estimate > 0.0:
return float(estimate)
else:
return common.TIME_REMAINING_UNKNOWN | In Mac OS X 10.7+
Uses IOPSGetTimeRemainingEstimate to get time remaining estimate.
In Mac OS X 10.6
IOPSGetTimeRemainingEstimate is not available.
If providing power source type is AC, returns TIME_REMAINING_UNLIMITED.
Otherwise looks through all power sources returned by IOPSGetProvidingPowerSourceType
and returns total estimate. | train | https://github.com/Kentzo/Power/blob/2c99b156546225e448f7030681af3df5cd345e4b/power/darwin.py#L276-L310 | null | class PowerManagement(common.PowerManagementBase):
notifications_observer = PowerSourcesNotificationsObserver.alloc().init()
def __init__(self, cf_run_loop=None):
"""
@param cf_run_loop: If provided, all notifications are posted within this loop
"""
super(PowerManagement, self).__init__()
self._cf_run_loop = cf_run_loop
def on_power_source_notification(self):
"""
Called in response to IOPSNotificationCreateRunLoopSource() event.
"""
for weak_observer in self._weak_observers:
observer = weak_observer()
if observer:
observer.on_power_sources_change(self)
observer.on_time_remaining_change(self)
def get_providing_power_source_type(self):
"""
Uses IOPSCopyPowerSourcesInfo and IOPSGetProvidingPowerSourceType to get providing power source type.
"""
blob = IOPSCopyPowerSourcesInfo()
type = IOPSGetProvidingPowerSourceType(blob)
return POWER_TYPE_MAP[type]
def get_low_battery_warning_level(self):
"""
Uses IOPSGetBatteryWarningLevel to get battery warning level.
"""
warning_level = IOPSGetBatteryWarningLevel()
return WARNING_LEVEL_MAP[warning_level]
def add_observer(self, observer):
"""
Spawns thread or adds IOPSNotificationCreateRunLoopSource directly to provided cf_run_loop
@see: __init__
"""
super(PowerManagement, self).add_observer(observer)
if len(self._weak_observers) == 1:
if not self._cf_run_loop:
PowerManagement.notifications_observer.addObserver(self)
else:
@objc.callbackFor(IOPSNotificationCreateRunLoopSource)
def on_power_sources_change(context):
self.on_power_source_notification()
self._source = IOPSNotificationCreateRunLoopSource(on_power_sources_change, None)
CFRunLoopAddSource(self._cf_run_loop, self._source, kCFRunLoopDefaultMode)
def remove_observer(self, observer):
"""
Stops thread and invalidates source.
"""
super(PowerManagement, self).remove_observer(observer)
if len(self._weak_observers) == 0:
if not self._cf_run_loop:
PowerManagement.notifications_observer.removeObserver(self)
else:
CFRunLoopSourceInvalidate(self._source)
self._source = None
|
Kentzo/Power | power/darwin.py | PowerManagement.add_observer | python | def add_observer(self, observer):
super(PowerManagement, self).add_observer(observer)
if len(self._weak_observers) == 1:
if not self._cf_run_loop:
PowerManagement.notifications_observer.addObserver(self)
else:
@objc.callbackFor(IOPSNotificationCreateRunLoopSource)
def on_power_sources_change(context):
self.on_power_source_notification()
self._source = IOPSNotificationCreateRunLoopSource(on_power_sources_change, None)
CFRunLoopAddSource(self._cf_run_loop, self._source, kCFRunLoopDefaultMode) | Spawns thread or adds IOPSNotificationCreateRunLoopSource directly to provided cf_run_loop
@see: __init__ | train | https://github.com/Kentzo/Power/blob/2c99b156546225e448f7030681af3df5cd345e4b/power/darwin.py#L312-L327 | null | class PowerManagement(common.PowerManagementBase):
notifications_observer = PowerSourcesNotificationsObserver.alloc().init()
def __init__(self, cf_run_loop=None):
"""
@param cf_run_loop: If provided, all notifications are posted within this loop
"""
super(PowerManagement, self).__init__()
self._cf_run_loop = cf_run_loop
def on_power_source_notification(self):
"""
Called in response to IOPSNotificationCreateRunLoopSource() event.
"""
for weak_observer in self._weak_observers:
observer = weak_observer()
if observer:
observer.on_power_sources_change(self)
observer.on_time_remaining_change(self)
def get_providing_power_source_type(self):
"""
Uses IOPSCopyPowerSourcesInfo and IOPSGetProvidingPowerSourceType to get providing power source type.
"""
blob = IOPSCopyPowerSourcesInfo()
type = IOPSGetProvidingPowerSourceType(blob)
return POWER_TYPE_MAP[type]
def get_low_battery_warning_level(self):
"""
Uses IOPSGetBatteryWarningLevel to get battery warning level.
"""
warning_level = IOPSGetBatteryWarningLevel()
return WARNING_LEVEL_MAP[warning_level]
def get_time_remaining_estimate(self):
"""
In Mac OS X 10.7+
Uses IOPSGetTimeRemainingEstimate to get time remaining estimate.
In Mac OS X 10.6
IOPSGetTimeRemainingEstimate is not available.
If providing power source type is AC, returns TIME_REMAINING_UNLIMITED.
Otherwise looks through all power sources returned by IOPSGetProvidingPowerSourceType
and returns total estimate.
"""
if IOPSGetTimeRemainingEstimate is not None: # Mac OS X 10.7+
estimate = float(IOPSGetTimeRemainingEstimate())
if estimate == -1.0:
return common.TIME_REMAINING_UNKNOWN
elif estimate == -2.0:
return common.TIME_REMAINING_UNLIMITED
else:
return estimate / 60.0
else: # Mac OS X 10.6
warnings.warn("IOPSGetTimeRemainingEstimate is not preset", RuntimeWarning)
blob = IOPSCopyPowerSourcesInfo()
type = IOPSGetProvidingPowerSourceType(blob)
if type == common.POWER_TYPE_AC:
return common.TIME_REMAINING_UNLIMITED
else:
estimate = 0.0
for source in IOPSCopyPowerSourcesList(blob):
description = IOPSGetPowerSourceDescription(blob, source)
if kIOPSIsPresentKey in description and description[kIOPSIsPresentKey] and kIOPSTimeToEmptyKey in description and description[kIOPSTimeToEmptyKey] > 0.0:
estimate += float(description[kIOPSTimeToEmptyKey])
if estimate > 0.0:
return float(estimate)
else:
return common.TIME_REMAINING_UNKNOWN
def remove_observer(self, observer):
"""
Stops thread and invalidates source.
"""
super(PowerManagement, self).remove_observer(observer)
if len(self._weak_observers) == 0:
if not self._cf_run_loop:
PowerManagement.notifications_observer.removeObserver(self)
else:
CFRunLoopSourceInvalidate(self._source)
self._source = None
|
Kentzo/Power | power/darwin.py | PowerManagement.remove_observer | python | def remove_observer(self, observer):
super(PowerManagement, self).remove_observer(observer)
if len(self._weak_observers) == 0:
if not self._cf_run_loop:
PowerManagement.notifications_observer.removeObserver(self)
else:
CFRunLoopSourceInvalidate(self._source)
self._source = None | Stops thread and invalidates source. | train | https://github.com/Kentzo/Power/blob/2c99b156546225e448f7030681af3df5cd345e4b/power/darwin.py#L329-L339 | null | class PowerManagement(common.PowerManagementBase):
notifications_observer = PowerSourcesNotificationsObserver.alloc().init()
def __init__(self, cf_run_loop=None):
"""
@param cf_run_loop: If provided, all notifications are posted within this loop
"""
super(PowerManagement, self).__init__()
self._cf_run_loop = cf_run_loop
def on_power_source_notification(self):
"""
Called in response to IOPSNotificationCreateRunLoopSource() event.
"""
for weak_observer in self._weak_observers:
observer = weak_observer()
if observer:
observer.on_power_sources_change(self)
observer.on_time_remaining_change(self)
def get_providing_power_source_type(self):
"""
Uses IOPSCopyPowerSourcesInfo and IOPSGetProvidingPowerSourceType to get providing power source type.
"""
blob = IOPSCopyPowerSourcesInfo()
type = IOPSGetProvidingPowerSourceType(blob)
return POWER_TYPE_MAP[type]
def get_low_battery_warning_level(self):
"""
Uses IOPSGetBatteryWarningLevel to get battery warning level.
"""
warning_level = IOPSGetBatteryWarningLevel()
return WARNING_LEVEL_MAP[warning_level]
def get_time_remaining_estimate(self):
"""
In Mac OS X 10.7+
Uses IOPSGetTimeRemainingEstimate to get time remaining estimate.
In Mac OS X 10.6
IOPSGetTimeRemainingEstimate is not available.
If providing power source type is AC, returns TIME_REMAINING_UNLIMITED.
Otherwise looks through all power sources returned by IOPSGetProvidingPowerSourceType
and returns total estimate.
"""
if IOPSGetTimeRemainingEstimate is not None: # Mac OS X 10.7+
estimate = float(IOPSGetTimeRemainingEstimate())
if estimate == -1.0:
return common.TIME_REMAINING_UNKNOWN
elif estimate == -2.0:
return common.TIME_REMAINING_UNLIMITED
else:
return estimate / 60.0
else: # Mac OS X 10.6
warnings.warn("IOPSGetTimeRemainingEstimate is not preset", RuntimeWarning)
blob = IOPSCopyPowerSourcesInfo()
type = IOPSGetProvidingPowerSourceType(blob)
if type == common.POWER_TYPE_AC:
return common.TIME_REMAINING_UNLIMITED
else:
estimate = 0.0
for source in IOPSCopyPowerSourcesList(blob):
description = IOPSGetPowerSourceDescription(blob, source)
if kIOPSIsPresentKey in description and description[kIOPSIsPresentKey] and kIOPSTimeToEmptyKey in description and description[kIOPSTimeToEmptyKey] > 0.0:
estimate += float(description[kIOPSTimeToEmptyKey])
if estimate > 0.0:
return float(estimate)
else:
return common.TIME_REMAINING_UNKNOWN
def add_observer(self, observer):
"""
Spawns thread or adds IOPSNotificationCreateRunLoopSource directly to provided cf_run_loop
@see: __init__
"""
super(PowerManagement, self).add_observer(observer)
if len(self._weak_observers) == 1:
if not self._cf_run_loop:
PowerManagement.notifications_observer.addObserver(self)
else:
@objc.callbackFor(IOPSNotificationCreateRunLoopSource)
def on_power_sources_change(context):
self.on_power_source_notification()
self._source = IOPSNotificationCreateRunLoopSource(on_power_sources_change, None)
CFRunLoopAddSource(self._cf_run_loop, self._source, kCFRunLoopDefaultMode)
|
Kentzo/Power | power/win32.py | PowerManagement.get_providing_power_source_type | python | def get_providing_power_source_type(self):
power_status = SYSTEM_POWER_STATUS()
if not GetSystemPowerStatus(pointer(power_status)):
raise WinError()
return POWER_TYPE_MAP[power_status.ACLineStatus] | Returns GetSystemPowerStatus().ACLineStatus
@raise: WindowsError if any underlying error occures. | train | https://github.com/Kentzo/Power/blob/2c99b156546225e448f7030681af3df5cd345e4b/power/win32.py#L45-L54 | null | class PowerManagement(common.PowerManagementBase):
def get_low_battery_warning_level(self):
"""
Returns warning according to GetSystemPowerStatus().BatteryLifeTime/BatteryLifePercent
@raise WindowsError if any underlying error occures.
"""
power_status = SYSTEM_POWER_STATUS()
if not GetSystemPowerStatus(pointer(power_status)):
raise WinError()
if POWER_TYPE_MAP[power_status.ACLineStatus] == common.POWER_TYPE_AC:
return common.LOW_BATTERY_WARNING_NONE
else:
if power_status.BatteryLifeTime != -1 and power_status.BatteryLifeTime <= 600:
return common.LOW_BATTERY_WARNING_FINAL
elif power_status.BatteryLifePercent <= 22:
return common.LOW_BATTERY_WARNING_EARLY
else:
return common.LOW_BATTERY_WARNING_NONE
def get_time_remaining_estimate(self):
"""
Returns time remaining estimate according to GetSystemPowerStatus().BatteryLifeTime
"""
power_status = SYSTEM_POWER_STATUS()
if not GetSystemPowerStatus(pointer(power_status)):
raise WinError()
if POWER_TYPE_MAP[power_status.ACLineStatus] == common.POWER_TYPE_AC:
return common.TIME_REMAINING_UNLIMITED
elif power_status.BatteryLifeTime == -1:
return common.TIME_REMAINING_UNKNOWN
else:
return float(power_status.BatteryLifeTime) / 60.0
def add_observer(self, observer):
warnings.warn("Current system does not support observing.")
pass
def remove_observer(self, observer):
warnings.warn("Current system does not support observing.")
pass
|
Kentzo/Power | power/win32.py | PowerManagement.get_low_battery_warning_level | python | def get_low_battery_warning_level(self):
power_status = SYSTEM_POWER_STATUS()
if not GetSystemPowerStatus(pointer(power_status)):
raise WinError()
if POWER_TYPE_MAP[power_status.ACLineStatus] == common.POWER_TYPE_AC:
return common.LOW_BATTERY_WARNING_NONE
else:
if power_status.BatteryLifeTime != -1 and power_status.BatteryLifeTime <= 600:
return common.LOW_BATTERY_WARNING_FINAL
elif power_status.BatteryLifePercent <= 22:
return common.LOW_BATTERY_WARNING_EARLY
else:
return common.LOW_BATTERY_WARNING_NONE | Returns warning according to GetSystemPowerStatus().BatteryLifeTime/BatteryLifePercent
@raise WindowsError if any underlying error occures. | train | https://github.com/Kentzo/Power/blob/2c99b156546225e448f7030681af3df5cd345e4b/power/win32.py#L56-L74 | null | class PowerManagement(common.PowerManagementBase):
def get_providing_power_source_type(self):
"""
Returns GetSystemPowerStatus().ACLineStatus
@raise: WindowsError if any underlying error occures.
"""
power_status = SYSTEM_POWER_STATUS()
if not GetSystemPowerStatus(pointer(power_status)):
raise WinError()
return POWER_TYPE_MAP[power_status.ACLineStatus]
def get_time_remaining_estimate(self):
"""
Returns time remaining estimate according to GetSystemPowerStatus().BatteryLifeTime
"""
power_status = SYSTEM_POWER_STATUS()
if not GetSystemPowerStatus(pointer(power_status)):
raise WinError()
if POWER_TYPE_MAP[power_status.ACLineStatus] == common.POWER_TYPE_AC:
return common.TIME_REMAINING_UNLIMITED
elif power_status.BatteryLifeTime == -1:
return common.TIME_REMAINING_UNKNOWN
else:
return float(power_status.BatteryLifeTime) / 60.0
def add_observer(self, observer):
warnings.warn("Current system does not support observing.")
pass
def remove_observer(self, observer):
warnings.warn("Current system does not support observing.")
pass
|
Kentzo/Power | power/win32.py | PowerManagement.get_time_remaining_estimate | python | def get_time_remaining_estimate(self):
power_status = SYSTEM_POWER_STATUS()
if not GetSystemPowerStatus(pointer(power_status)):
raise WinError()
if POWER_TYPE_MAP[power_status.ACLineStatus] == common.POWER_TYPE_AC:
return common.TIME_REMAINING_UNLIMITED
elif power_status.BatteryLifeTime == -1:
return common.TIME_REMAINING_UNKNOWN
else:
return float(power_status.BatteryLifeTime) / 60.0 | Returns time remaining estimate according to GetSystemPowerStatus().BatteryLifeTime | train | https://github.com/Kentzo/Power/blob/2c99b156546225e448f7030681af3df5cd345e4b/power/win32.py#L76-L89 | null | class PowerManagement(common.PowerManagementBase):
def get_providing_power_source_type(self):
"""
Returns GetSystemPowerStatus().ACLineStatus
@raise: WindowsError if any underlying error occures.
"""
power_status = SYSTEM_POWER_STATUS()
if not GetSystemPowerStatus(pointer(power_status)):
raise WinError()
return POWER_TYPE_MAP[power_status.ACLineStatus]
def get_low_battery_warning_level(self):
"""
Returns warning according to GetSystemPowerStatus().BatteryLifeTime/BatteryLifePercent
@raise WindowsError if any underlying error occures.
"""
power_status = SYSTEM_POWER_STATUS()
if not GetSystemPowerStatus(pointer(power_status)):
raise WinError()
if POWER_TYPE_MAP[power_status.ACLineStatus] == common.POWER_TYPE_AC:
return common.LOW_BATTERY_WARNING_NONE
else:
if power_status.BatteryLifeTime != -1 and power_status.BatteryLifeTime <= 600:
return common.LOW_BATTERY_WARNING_FINAL
elif power_status.BatteryLifePercent <= 22:
return common.LOW_BATTERY_WARNING_EARLY
else:
return common.LOW_BATTERY_WARNING_NONE
def add_observer(self, observer):
warnings.warn("Current system does not support observing.")
pass
def remove_observer(self, observer):
warnings.warn("Current system does not support observing.")
pass
|
Kentzo/Power | power/freebsd.py | PowerManagement.power_source_type | python | def power_source_type():
try:
supply=int(subprocess.check_output(["sysctl","-n","hw.acpi.acline"]))
except:
return common.POWER_TYPE_AC
if supply == 1:
return common.POWER_TYPE_AC
elif supply == 0:
return common.POWER_TYPE_BATTERY
else:
raise RuntimeError("Unknown power source type!") | FreeBSD use sysctl hw.acpi.acline to tell if Mains (1) is used or Battery (0).
Beware, that on a Desktop machines this hw.acpi.acline oid may not exist.
@return: One of common.POWER_TYPE_*
@raise: Runtime error if type of power source is not supported | train | https://github.com/Kentzo/Power/blob/2c99b156546225e448f7030681af3df5cd345e4b/power/freebsd.py#L13-L30 | null | class PowerManagement(common.PowerManagementBase):
@staticmethod
def power_source_type():
"""
FreeBSD use sysctl hw.acpi.acline to tell if Mains (1) is used or Battery (0).
Beware, that on a Desktop machines this hw.acpi.acline oid may not exist.
@return: One of common.POWER_TYPE_*
@raise: Runtime error if type of power source is not supported
"""
try:
supply=int(subprocess.check_output(["sysctl","-n","hw.acpi.acline"]))
except:
return common.POWER_TYPE_AC
if supply == 1:
return common.POWER_TYPE_AC
elif supply == 0:
return common.POWER_TYPE_BATTERY
else:
raise RuntimeError("Unknown power source type!")
@staticmethod
def is_ac_online():
"""
@return: True if ac is online. Otherwise False
"""
try:
supply=int(subprocess.check_output(["sysctl","-n","hw.acpi.acline"]))
except:
return True
return supply == 1
@staticmethod
def is_battery_present():
"""
TODO
@return: True if battery is present. Otherwise False
"""
return False
@staticmethod
def is_battery_discharging():
"""
TODO
@return: True if ac is online. Otherwise False
"""
return False
@staticmethod
def get_battery_state():
"""
TODO
@return: Tuple (energy_full, energy_now, power_now)
"""
energy_now = float(100.0)
power_now = float(100.0)
energy_full = float(100.0)
return energy_full, energy_now, power_now
def get_providing_power_source_type(self):
"""
Looks through all power supplies in POWER_SUPPLY_PATH.
If there is an AC adapter online returns POWER_TYPE_AC.
If there is a discharging battery, returns POWER_TYPE_BATTERY.
Since the order of supplies is arbitrary, whatever found first is returned.
"""
type = self.power_source_type()
if type == common.POWER_TYPE_AC:
if self.is_ac_online():
return common.POWER_TYPE_AC
elif type == common.POWER_TYPE_BATTERY:
if self.is_battery_present() and self.is_battery_discharging():
return common.POWER_TYPE_BATTERY
else:
warnings.warn("UPS is not supported.")
return common.POWER_TYPE_AC
def get_low_battery_warning_level(self):
"""
Looks through all power supplies in POWER_SUPPLY_PATH.
If there is an AC adapter online returns POWER_TYPE_AC returns LOW_BATTERY_WARNING_NONE.
Otherwise determines total percentage and time remaining across all attached batteries.
"""
all_energy_full = []
all_energy_now = []
all_power_now = []
try:
type = self.power_source_type()
if type == common.POWER_TYPE_AC:
if self.is_ac_online():
return common.LOW_BATTERY_WARNING_NONE
elif type == common.POWER_TYPE_BATTERY:
if self.is_battery_present() and self.is_battery_discharging():
energy_full, energy_now, power_now = self.get_battery_state()
all_energy_full.append(energy_full)
all_energy_now.append(energy_now)
all_power_now.append(power_now)
else:
warnings.warn("UPS is not supported.")
except (RuntimeError, IOError) as e:
warnings.warn("Unable to read system power information!", category=RuntimeWarning)
try:
total_percentage = sum(all_energy_full) / sum(all_energy_now)
total_time = sum([energy_now / power_now * 60.0 for energy_now, power_now in zip(all_energy_now, all_power_now)])
if total_time <= 10.0:
return common.LOW_BATTERY_WARNING_FINAL
elif total_percentage <= 22.0:
return common.LOW_BATTERY_WARNING_EARLY
else:
return common.LOW_BATTERY_WARNING_NONE
except ZeroDivisionError as e:
warnings.warn("Unable to calculate low battery level: {0}".format(e), category=RuntimeWarning)
return common.LOW_BATTERY_WARNING_NONE
def get_time_remaining_estimate(self):
"""
Looks through all power sources and returns total time remaining estimate
or TIME_REMAINING_UNLIMITED if ac power supply is online.
"""
all_energy_now = []
all_power_now = []
try:
type = self.power_source_type()
if type == common.POWER_TYPE_AC:
if self.is_ac_online(supply_path):
return common.TIME_REMAINING_UNLIMITED
elif type == common.POWER_TYPE_BATTERY:
if self.is_battery_present() and self.is_battery_discharging():
energy_full, energy_now, power_now = self.get_battery_state()
all_energy_now.append(energy_now)
all_power_now.append(power_now)
else:
warnings.warn("UPS is not supported.")
except (RuntimeError, IOError) as e:
warnings.warn("Unable to read system power information!", category=RuntimeWarning)
if len(all_energy_now) > 0:
try:
return sum([energy_now / power_now * 60.0 for energy_now, power_now in zip(all_energy_now, all_power_now)])
except ZeroDivisionError as e:
warnings.warn("Unable to calculate time remaining estimate: {0}".format(e), category=RuntimeWarning)
return common.TIME_REMAINING_UNKNOWN
else:
return common.TIME_REMAINING_UNKNOWN
def add_observer(self, observer):
warnings.warn("Current system does not support observing.")
pass
def remove_observer(self, observer):
warnings.warn("Current system does not support observing.")
pass
|
Kentzo/Power | power/freebsd.py | PowerManagement.get_battery_state | python | def get_battery_state():
energy_now = float(100.0)
power_now = float(100.0)
energy_full = float(100.0)
return energy_full, energy_now, power_now | TODO
@return: Tuple (energy_full, energy_now, power_now) | train | https://github.com/Kentzo/Power/blob/2c99b156546225e448f7030681af3df5cd345e4b/power/freebsd.py#L64-L72 | null | class PowerManagement(common.PowerManagementBase):
@staticmethod
def power_source_type():
"""
FreeBSD use sysctl hw.acpi.acline to tell if Mains (1) is used or Battery (0).
Beware, that on a Desktop machines this hw.acpi.acline oid may not exist.
@return: One of common.POWER_TYPE_*
@raise: Runtime error if type of power source is not supported
"""
try:
supply=int(subprocess.check_output(["sysctl","-n","hw.acpi.acline"]))
except:
return common.POWER_TYPE_AC
if supply == 1:
return common.POWER_TYPE_AC
elif supply == 0:
return common.POWER_TYPE_BATTERY
else:
raise RuntimeError("Unknown power source type!")
@staticmethod
def is_ac_online():
"""
@return: True if ac is online. Otherwise False
"""
try:
supply=int(subprocess.check_output(["sysctl","-n","hw.acpi.acline"]))
except:
return True
return supply == 1
@staticmethod
def is_battery_present():
"""
TODO
@return: True if battery is present. Otherwise False
"""
return False
@staticmethod
def is_battery_discharging():
"""
TODO
@return: True if ac is online. Otherwise False
"""
return False
@staticmethod
def get_providing_power_source_type(self):
"""
Looks through all power supplies in POWER_SUPPLY_PATH.
If there is an AC adapter online returns POWER_TYPE_AC.
If there is a discharging battery, returns POWER_TYPE_BATTERY.
Since the order of supplies is arbitrary, whatever found first is returned.
"""
type = self.power_source_type()
if type == common.POWER_TYPE_AC:
if self.is_ac_online():
return common.POWER_TYPE_AC
elif type == common.POWER_TYPE_BATTERY:
if self.is_battery_present() and self.is_battery_discharging():
return common.POWER_TYPE_BATTERY
else:
warnings.warn("UPS is not supported.")
return common.POWER_TYPE_AC
def get_low_battery_warning_level(self):
"""
Looks through all power supplies in POWER_SUPPLY_PATH.
If there is an AC adapter online returns POWER_TYPE_AC returns LOW_BATTERY_WARNING_NONE.
Otherwise determines total percentage and time remaining across all attached batteries.
"""
all_energy_full = []
all_energy_now = []
all_power_now = []
try:
type = self.power_source_type()
if type == common.POWER_TYPE_AC:
if self.is_ac_online():
return common.LOW_BATTERY_WARNING_NONE
elif type == common.POWER_TYPE_BATTERY:
if self.is_battery_present() and self.is_battery_discharging():
energy_full, energy_now, power_now = self.get_battery_state()
all_energy_full.append(energy_full)
all_energy_now.append(energy_now)
all_power_now.append(power_now)
else:
warnings.warn("UPS is not supported.")
except (RuntimeError, IOError) as e:
warnings.warn("Unable to read system power information!", category=RuntimeWarning)
try:
total_percentage = sum(all_energy_full) / sum(all_energy_now)
total_time = sum([energy_now / power_now * 60.0 for energy_now, power_now in zip(all_energy_now, all_power_now)])
if total_time <= 10.0:
return common.LOW_BATTERY_WARNING_FINAL
elif total_percentage <= 22.0:
return common.LOW_BATTERY_WARNING_EARLY
else:
return common.LOW_BATTERY_WARNING_NONE
except ZeroDivisionError as e:
warnings.warn("Unable to calculate low battery level: {0}".format(e), category=RuntimeWarning)
return common.LOW_BATTERY_WARNING_NONE
def get_time_remaining_estimate(self):
"""
Looks through all power sources and returns total time remaining estimate
or TIME_REMAINING_UNLIMITED if ac power supply is online.
"""
all_energy_now = []
all_power_now = []
try:
type = self.power_source_type()
if type == common.POWER_TYPE_AC:
if self.is_ac_online(supply_path):
return common.TIME_REMAINING_UNLIMITED
elif type == common.POWER_TYPE_BATTERY:
if self.is_battery_present() and self.is_battery_discharging():
energy_full, energy_now, power_now = self.get_battery_state()
all_energy_now.append(energy_now)
all_power_now.append(power_now)
else:
warnings.warn("UPS is not supported.")
except (RuntimeError, IOError) as e:
warnings.warn("Unable to read system power information!", category=RuntimeWarning)
if len(all_energy_now) > 0:
try:
return sum([energy_now / power_now * 60.0 for energy_now, power_now in zip(all_energy_now, all_power_now)])
except ZeroDivisionError as e:
warnings.warn("Unable to calculate time remaining estimate: {0}".format(e), category=RuntimeWarning)
return common.TIME_REMAINING_UNKNOWN
else:
return common.TIME_REMAINING_UNKNOWN
def add_observer(self, observer):
warnings.warn("Current system does not support observing.")
pass
def remove_observer(self, observer):
warnings.warn("Current system does not support observing.")
pass
|
Kentzo/Power | power/freebsd.py | PowerManagement.get_providing_power_source_type | python | def get_providing_power_source_type(self):
type = self.power_source_type()
if type == common.POWER_TYPE_AC:
if self.is_ac_online():
return common.POWER_TYPE_AC
elif type == common.POWER_TYPE_BATTERY:
if self.is_battery_present() and self.is_battery_discharging():
return common.POWER_TYPE_BATTERY
else:
warnings.warn("UPS is not supported.")
return common.POWER_TYPE_AC | Looks through all power supplies in POWER_SUPPLY_PATH.
If there is an AC adapter online returns POWER_TYPE_AC.
If there is a discharging battery, returns POWER_TYPE_BATTERY.
Since the order of supplies is arbitrary, whatever found first is returned. | train | https://github.com/Kentzo/Power/blob/2c99b156546225e448f7030681af3df5cd345e4b/power/freebsd.py#L75-L91 | [
"def power_source_type():\n \"\"\"\n FreeBSD use sysctl hw.acpi.acline to tell if Mains (1) is used or Battery (0).\n Beware, that on a Desktop machines this hw.acpi.acline oid may not exist.\n @return: One of common.POWER_TYPE_*\n @raise: Runtime error if type of power source is not supported\n \"\"\"\n try:\n supply=int(subprocess.check_output([\"sysctl\",\"-n\",\"hw.acpi.acline\"]))\n except:\n return common.POWER_TYPE_AC\n\n if supply == 1:\n return common.POWER_TYPE_AC\n elif supply == 0:\n return common.POWER_TYPE_BATTERY\n else:\n raise RuntimeError(\"Unknown power source type!\")\n",
"def is_ac_online():\n \"\"\"\n @return: True if ac is online. Otherwise False\n \"\"\"\n try:\n supply=int(subprocess.check_output([\"sysctl\",\"-n\",\"hw.acpi.acline\"]))\n except:\n return True\n return supply == 1\n"
] | class PowerManagement(common.PowerManagementBase):
@staticmethod
def power_source_type():
"""
FreeBSD use sysctl hw.acpi.acline to tell if Mains (1) is used or Battery (0).
Beware, that on a Desktop machines this hw.acpi.acline oid may not exist.
@return: One of common.POWER_TYPE_*
@raise: Runtime error if type of power source is not supported
"""
try:
supply=int(subprocess.check_output(["sysctl","-n","hw.acpi.acline"]))
except:
return common.POWER_TYPE_AC
if supply == 1:
return common.POWER_TYPE_AC
elif supply == 0:
return common.POWER_TYPE_BATTERY
else:
raise RuntimeError("Unknown power source type!")
@staticmethod
def is_ac_online():
"""
@return: True if ac is online. Otherwise False
"""
try:
supply=int(subprocess.check_output(["sysctl","-n","hw.acpi.acline"]))
except:
return True
return supply == 1
@staticmethod
def is_battery_present():
"""
TODO
@return: True if battery is present. Otherwise False
"""
return False
@staticmethod
def is_battery_discharging():
"""
TODO
@return: True if ac is online. Otherwise False
"""
return False
@staticmethod
def get_battery_state():
"""
TODO
@return: Tuple (energy_full, energy_now, power_now)
"""
energy_now = float(100.0)
power_now = float(100.0)
energy_full = float(100.0)
return energy_full, energy_now, power_now
def get_low_battery_warning_level(self):
"""
Looks through all power supplies in POWER_SUPPLY_PATH.
If there is an AC adapter online returns POWER_TYPE_AC returns LOW_BATTERY_WARNING_NONE.
Otherwise determines total percentage and time remaining across all attached batteries.
"""
all_energy_full = []
all_energy_now = []
all_power_now = []
try:
type = self.power_source_type()
if type == common.POWER_TYPE_AC:
if self.is_ac_online():
return common.LOW_BATTERY_WARNING_NONE
elif type == common.POWER_TYPE_BATTERY:
if self.is_battery_present() and self.is_battery_discharging():
energy_full, energy_now, power_now = self.get_battery_state()
all_energy_full.append(energy_full)
all_energy_now.append(energy_now)
all_power_now.append(power_now)
else:
warnings.warn("UPS is not supported.")
except (RuntimeError, IOError) as e:
warnings.warn("Unable to read system power information!", category=RuntimeWarning)
try:
total_percentage = sum(all_energy_full) / sum(all_energy_now)
total_time = sum([energy_now / power_now * 60.0 for energy_now, power_now in zip(all_energy_now, all_power_now)])
if total_time <= 10.0:
return common.LOW_BATTERY_WARNING_FINAL
elif total_percentage <= 22.0:
return common.LOW_BATTERY_WARNING_EARLY
else:
return common.LOW_BATTERY_WARNING_NONE
except ZeroDivisionError as e:
warnings.warn("Unable to calculate low battery level: {0}".format(e), category=RuntimeWarning)
return common.LOW_BATTERY_WARNING_NONE
def get_time_remaining_estimate(self):
"""
Looks through all power sources and returns total time remaining estimate
or TIME_REMAINING_UNLIMITED if ac power supply is online.
"""
all_energy_now = []
all_power_now = []
try:
type = self.power_source_type()
if type == common.POWER_TYPE_AC:
if self.is_ac_online(supply_path):
return common.TIME_REMAINING_UNLIMITED
elif type == common.POWER_TYPE_BATTERY:
if self.is_battery_present() and self.is_battery_discharging():
energy_full, energy_now, power_now = self.get_battery_state()
all_energy_now.append(energy_now)
all_power_now.append(power_now)
else:
warnings.warn("UPS is not supported.")
except (RuntimeError, IOError) as e:
warnings.warn("Unable to read system power information!", category=RuntimeWarning)
if len(all_energy_now) > 0:
try:
return sum([energy_now / power_now * 60.0 for energy_now, power_now in zip(all_energy_now, all_power_now)])
except ZeroDivisionError as e:
warnings.warn("Unable to calculate time remaining estimate: {0}".format(e), category=RuntimeWarning)
return common.TIME_REMAINING_UNKNOWN
else:
return common.TIME_REMAINING_UNKNOWN
def add_observer(self, observer):
warnings.warn("Current system does not support observing.")
pass
def remove_observer(self, observer):
warnings.warn("Current system does not support observing.")
pass
|
Kentzo/Power | power/freebsd.py | PowerManagement.get_low_battery_warning_level | python | def get_low_battery_warning_level(self):
all_energy_full = []
all_energy_now = []
all_power_now = []
try:
type = self.power_source_type()
if type == common.POWER_TYPE_AC:
if self.is_ac_online():
return common.LOW_BATTERY_WARNING_NONE
elif type == common.POWER_TYPE_BATTERY:
if self.is_battery_present() and self.is_battery_discharging():
energy_full, energy_now, power_now = self.get_battery_state()
all_energy_full.append(energy_full)
all_energy_now.append(energy_now)
all_power_now.append(power_now)
else:
warnings.warn("UPS is not supported.")
except (RuntimeError, IOError) as e:
warnings.warn("Unable to read system power information!", category=RuntimeWarning)
try:
total_percentage = sum(all_energy_full) / sum(all_energy_now)
total_time = sum([energy_now / power_now * 60.0 for energy_now, power_now in zip(all_energy_now, all_power_now)])
if total_time <= 10.0:
return common.LOW_BATTERY_WARNING_FINAL
elif total_percentage <= 22.0:
return common.LOW_BATTERY_WARNING_EARLY
else:
return common.LOW_BATTERY_WARNING_NONE
except ZeroDivisionError as e:
warnings.warn("Unable to calculate low battery level: {0}".format(e), category=RuntimeWarning)
return common.LOW_BATTERY_WARNING_NONE | Looks through all power supplies in POWER_SUPPLY_PATH.
If there is an AC adapter online returns POWER_TYPE_AC returns LOW_BATTERY_WARNING_NONE.
Otherwise determines total percentage and time remaining across all attached batteries. | train | https://github.com/Kentzo/Power/blob/2c99b156546225e448f7030681af3df5cd345e4b/power/freebsd.py#L94-L130 | [
"def power_source_type():\n \"\"\"\n FreeBSD use sysctl hw.acpi.acline to tell if Mains (1) is used or Battery (0).\n Beware, that on a Desktop machines this hw.acpi.acline oid may not exist.\n @return: One of common.POWER_TYPE_*\n @raise: Runtime error if type of power source is not supported\n \"\"\"\n try:\n supply=int(subprocess.check_output([\"sysctl\",\"-n\",\"hw.acpi.acline\"]))\n except:\n return common.POWER_TYPE_AC\n\n if supply == 1:\n return common.POWER_TYPE_AC\n elif supply == 0:\n return common.POWER_TYPE_BATTERY\n else:\n raise RuntimeError(\"Unknown power source type!\")\n",
"def is_ac_online():\n \"\"\"\n @return: True if ac is online. Otherwise False\n \"\"\"\n try:\n supply=int(subprocess.check_output([\"sysctl\",\"-n\",\"hw.acpi.acline\"]))\n except:\n return True\n return supply == 1\n",
"def is_battery_present():\n \"\"\"\n TODO\n @return: True if battery is present. Otherwise False\n \"\"\"\n return False\n"
] | class PowerManagement(common.PowerManagementBase):
@staticmethod
def power_source_type():
"""
FreeBSD use sysctl hw.acpi.acline to tell if Mains (1) is used or Battery (0).
Beware, that on a Desktop machines this hw.acpi.acline oid may not exist.
@return: One of common.POWER_TYPE_*
@raise: Runtime error if type of power source is not supported
"""
try:
supply=int(subprocess.check_output(["sysctl","-n","hw.acpi.acline"]))
except:
return common.POWER_TYPE_AC
if supply == 1:
return common.POWER_TYPE_AC
elif supply == 0:
return common.POWER_TYPE_BATTERY
else:
raise RuntimeError("Unknown power source type!")
@staticmethod
def is_ac_online():
"""
@return: True if ac is online. Otherwise False
"""
try:
supply=int(subprocess.check_output(["sysctl","-n","hw.acpi.acline"]))
except:
return True
return supply == 1
@staticmethod
def is_battery_present():
"""
TODO
@return: True if battery is present. Otherwise False
"""
return False
@staticmethod
def is_battery_discharging():
"""
TODO
@return: True if ac is online. Otherwise False
"""
return False
@staticmethod
def get_battery_state():
"""
TODO
@return: Tuple (energy_full, energy_now, power_now)
"""
energy_now = float(100.0)
power_now = float(100.0)
energy_full = float(100.0)
return energy_full, energy_now, power_now
def get_providing_power_source_type(self):
"""
Looks through all power supplies in POWER_SUPPLY_PATH.
If there is an AC adapter online returns POWER_TYPE_AC.
If there is a discharging battery, returns POWER_TYPE_BATTERY.
Since the order of supplies is arbitrary, whatever found first is returned.
"""
type = self.power_source_type()
if type == common.POWER_TYPE_AC:
if self.is_ac_online():
return common.POWER_TYPE_AC
elif type == common.POWER_TYPE_BATTERY:
if self.is_battery_present() and self.is_battery_discharging():
return common.POWER_TYPE_BATTERY
else:
warnings.warn("UPS is not supported.")
return common.POWER_TYPE_AC
def get_time_remaining_estimate(self):
"""
Looks through all power sources and returns total time remaining estimate
or TIME_REMAINING_UNLIMITED if ac power supply is online.
"""
all_energy_now = []
all_power_now = []
try:
type = self.power_source_type()
if type == common.POWER_TYPE_AC:
if self.is_ac_online(supply_path):
return common.TIME_REMAINING_UNLIMITED
elif type == common.POWER_TYPE_BATTERY:
if self.is_battery_present() and self.is_battery_discharging():
energy_full, energy_now, power_now = self.get_battery_state()
all_energy_now.append(energy_now)
all_power_now.append(power_now)
else:
warnings.warn("UPS is not supported.")
except (RuntimeError, IOError) as e:
warnings.warn("Unable to read system power information!", category=RuntimeWarning)
if len(all_energy_now) > 0:
try:
return sum([energy_now / power_now * 60.0 for energy_now, power_now in zip(all_energy_now, all_power_now)])
except ZeroDivisionError as e:
warnings.warn("Unable to calculate time remaining estimate: {0}".format(e), category=RuntimeWarning)
return common.TIME_REMAINING_UNKNOWN
else:
return common.TIME_REMAINING_UNKNOWN
def add_observer(self, observer):
warnings.warn("Current system does not support observing.")
pass
def remove_observer(self, observer):
warnings.warn("Current system does not support observing.")
pass
|
Kentzo/Power | power/freebsd.py | PowerManagement.get_time_remaining_estimate | python | def get_time_remaining_estimate(self):
all_energy_now = []
all_power_now = []
try:
type = self.power_source_type()
if type == common.POWER_TYPE_AC:
if self.is_ac_online(supply_path):
return common.TIME_REMAINING_UNLIMITED
elif type == common.POWER_TYPE_BATTERY:
if self.is_battery_present() and self.is_battery_discharging():
energy_full, energy_now, power_now = self.get_battery_state()
all_energy_now.append(energy_now)
all_power_now.append(power_now)
else:
warnings.warn("UPS is not supported.")
except (RuntimeError, IOError) as e:
warnings.warn("Unable to read system power information!", category=RuntimeWarning)
if len(all_energy_now) > 0:
try:
return sum([energy_now / power_now * 60.0 for energy_now, power_now in zip(all_energy_now, all_power_now)])
except ZeroDivisionError as e:
warnings.warn("Unable to calculate time remaining estimate: {0}".format(e), category=RuntimeWarning)
return common.TIME_REMAINING_UNKNOWN
else:
return common.TIME_REMAINING_UNKNOWN | Looks through all power sources and returns total time remaining estimate
or TIME_REMAINING_UNLIMITED if ac power supply is online. | train | https://github.com/Kentzo/Power/blob/2c99b156546225e448f7030681af3df5cd345e4b/power/freebsd.py#L133-L162 | [
"def power_source_type():\n \"\"\"\n FreeBSD use sysctl hw.acpi.acline to tell if Mains (1) is used or Battery (0).\n Beware, that on a Desktop machines this hw.acpi.acline oid may not exist.\n @return: One of common.POWER_TYPE_*\n @raise: Runtime error if type of power source is not supported\n \"\"\"\n try:\n supply=int(subprocess.check_output([\"sysctl\",\"-n\",\"hw.acpi.acline\"]))\n except:\n return common.POWER_TYPE_AC\n\n if supply == 1:\n return common.POWER_TYPE_AC\n elif supply == 0:\n return common.POWER_TYPE_BATTERY\n else:\n raise RuntimeError(\"Unknown power source type!\")\n",
"def is_ac_online():\n \"\"\"\n @return: True if ac is online. Otherwise False\n \"\"\"\n try:\n supply=int(subprocess.check_output([\"sysctl\",\"-n\",\"hw.acpi.acline\"]))\n except:\n return True\n return supply == 1\n",
"def is_battery_present():\n \"\"\"\n TODO\n @return: True if battery is present. Otherwise False\n \"\"\"\n return False\n"
] | class PowerManagement(common.PowerManagementBase):
@staticmethod
def power_source_type():
"""
FreeBSD use sysctl hw.acpi.acline to tell if Mains (1) is used or Battery (0).
Beware, that on a Desktop machines this hw.acpi.acline oid may not exist.
@return: One of common.POWER_TYPE_*
@raise: Runtime error if type of power source is not supported
"""
try:
supply=int(subprocess.check_output(["sysctl","-n","hw.acpi.acline"]))
except:
return common.POWER_TYPE_AC
if supply == 1:
return common.POWER_TYPE_AC
elif supply == 0:
return common.POWER_TYPE_BATTERY
else:
raise RuntimeError("Unknown power source type!")
@staticmethod
def is_ac_online():
"""
@return: True if ac is online. Otherwise False
"""
try:
supply=int(subprocess.check_output(["sysctl","-n","hw.acpi.acline"]))
except:
return True
return supply == 1
@staticmethod
def is_battery_present():
"""
TODO
@return: True if battery is present. Otherwise False
"""
return False
@staticmethod
def is_battery_discharging():
"""
TODO
@return: True if ac is online. Otherwise False
"""
return False
@staticmethod
def get_battery_state():
"""
TODO
@return: Tuple (energy_full, energy_now, power_now)
"""
energy_now = float(100.0)
power_now = float(100.0)
energy_full = float(100.0)
return energy_full, energy_now, power_now
def get_providing_power_source_type(self):
"""
Looks through all power supplies in POWER_SUPPLY_PATH.
If there is an AC adapter online returns POWER_TYPE_AC.
If there is a discharging battery, returns POWER_TYPE_BATTERY.
Since the order of supplies is arbitrary, whatever found first is returned.
"""
type = self.power_source_type()
if type == common.POWER_TYPE_AC:
if self.is_ac_online():
return common.POWER_TYPE_AC
elif type == common.POWER_TYPE_BATTERY:
if self.is_battery_present() and self.is_battery_discharging():
return common.POWER_TYPE_BATTERY
else:
warnings.warn("UPS is not supported.")
return common.POWER_TYPE_AC
def get_low_battery_warning_level(self):
"""
Looks through all power supplies in POWER_SUPPLY_PATH.
If there is an AC adapter online returns POWER_TYPE_AC returns LOW_BATTERY_WARNING_NONE.
Otherwise determines total percentage and time remaining across all attached batteries.
"""
all_energy_full = []
all_energy_now = []
all_power_now = []
try:
type = self.power_source_type()
if type == common.POWER_TYPE_AC:
if self.is_ac_online():
return common.LOW_BATTERY_WARNING_NONE
elif type == common.POWER_TYPE_BATTERY:
if self.is_battery_present() and self.is_battery_discharging():
energy_full, energy_now, power_now = self.get_battery_state()
all_energy_full.append(energy_full)
all_energy_now.append(energy_now)
all_power_now.append(power_now)
else:
warnings.warn("UPS is not supported.")
except (RuntimeError, IOError) as e:
warnings.warn("Unable to read system power information!", category=RuntimeWarning)
try:
total_percentage = sum(all_energy_full) / sum(all_energy_now)
total_time = sum([energy_now / power_now * 60.0 for energy_now, power_now in zip(all_energy_now, all_power_now)])
if total_time <= 10.0:
return common.LOW_BATTERY_WARNING_FINAL
elif total_percentage <= 22.0:
return common.LOW_BATTERY_WARNING_EARLY
else:
return common.LOW_BATTERY_WARNING_NONE
except ZeroDivisionError as e:
warnings.warn("Unable to calculate low battery level: {0}".format(e), category=RuntimeWarning)
return common.LOW_BATTERY_WARNING_NONE
def add_observer(self, observer):
warnings.warn("Current system does not support observing.")
pass
def remove_observer(self, observer):
warnings.warn("Current system does not support observing.")
pass
|
Kentzo/Power | power/linux.py | PowerManagement.get_providing_power_source_type | python | def get_providing_power_source_type(self):
for supply in os.listdir(POWER_SUPPLY_PATH):
supply_path = os.path.join(POWER_SUPPLY_PATH, supply)
try:
type = self.power_source_type(supply_path)
if type == common.POWER_TYPE_AC:
if self.is_ac_online(supply_path):
return common.POWER_TYPE_AC
elif type == common.POWER_TYPE_BATTERY:
if self.is_battery_present(supply_path) and self.is_battery_discharging(supply_path):
return common.POWER_TYPE_BATTERY
else:
warnings.warn("UPS is not supported.")
except (RuntimeError, IOError) as e:
warnings.warn("Unable to read properties of {0}: {1}".format(supply_path, e), category=RuntimeWarning)
return common.POWER_TYPE_AC | Looks through all power supplies in POWER_SUPPLY_PATH.
If there is an AC adapter online returns POWER_TYPE_AC.
If there is a discharging battery, returns POWER_TYPE_BATTERY.
Since the order of supplies is arbitrary, whatever found first is returned. | train | https://github.com/Kentzo/Power/blob/2c99b156546225e448f7030681af3df5cd345e4b/power/linux.py#L88-L110 | [
"def power_source_type(supply_path):\n \"\"\"\n @param supply_path: Path to power supply\n @return: One of common.POWER_TYPE_*\n @raise: Runtime error if type of power source is not supported\n \"\"\"\n with open(os.path.join(supply_path, 'type'), 'r') as type_file:\n type = type_file.readline().strip()\n if type == 'Mains':\n return common.POWER_TYPE_AC\n elif type == 'UPS':\n return common.POWER_TYPE_UPS\n elif type == 'Battery':\n return common.POWER_TYPE_BATTERY\n else:\n raise RuntimeError(\"Type of {path} ({type}) is not supported\".format(path=supply_path, type=type))\n",
"def is_ac_online(supply_path):\n \"\"\"\n @param supply_path: Path to power supply\n @return: True if ac is online. Otherwise False\n \"\"\"\n with open(os.path.join(supply_path, 'online'), 'r') as online_file:\n return online_file.readline().strip() == '1'\n",
"def is_battery_present(supply_path):\n \"\"\"\n @param supply_path: Path to power supply\n @return: True if battery is present. Otherwise False\n \"\"\"\n with open(os.path.join(supply_path, 'present'), 'r') as present_file:\n return present_file.readline().strip() == '1'\n"
] | class PowerManagement(common.PowerManagementBase):
@staticmethod
def power_source_type(supply_path):
"""
@param supply_path: Path to power supply
@return: One of common.POWER_TYPE_*
@raise: Runtime error if type of power source is not supported
"""
with open(os.path.join(supply_path, 'type'), 'r') as type_file:
type = type_file.readline().strip()
if type == 'Mains':
return common.POWER_TYPE_AC
elif type == 'UPS':
return common.POWER_TYPE_UPS
elif type == 'Battery':
return common.POWER_TYPE_BATTERY
else:
raise RuntimeError("Type of {path} ({type}) is not supported".format(path=supply_path, type=type))
@staticmethod
def is_ac_online(supply_path):
"""
@param supply_path: Path to power supply
@return: True if ac is online. Otherwise False
"""
with open(os.path.join(supply_path, 'online'), 'r') as online_file:
return online_file.readline().strip() == '1'
@staticmethod
def is_battery_present(supply_path):
"""
@param supply_path: Path to power supply
@return: True if battery is present. Otherwise False
"""
with open(os.path.join(supply_path, 'present'), 'r') as present_file:
return present_file.readline().strip() == '1'
@staticmethod
def is_battery_discharging(supply_path):
"""
@param supply_path: Path to power supply
@return: True if ac is online. Otherwise False
"""
with open(os.path.join(supply_path, 'status'), 'r') as status_file:
return status_file.readline().strip() == 'Discharging'
@staticmethod
def get_battery_state(supply_path):
"""
@param supply_path: Path to power supply
@return: Tuple (energy_full, energy_now, power_now)
"""
try:
energy_now_file = open(os.path.join(supply_path, 'energy_now'), 'r')
except IOError:
energy_now_file = open(os.path.join(supply_path, 'charge_now'), 'r')
try:
energy_full_file = open(os.path.join(supply_path, 'energy_full'), 'r')
except IOError:
energy_full_file = open(os.path.join(supply_path, 'charge_full'), 'r')
with energy_now_file:
with open(os.path.join(supply_path, 'power_now'), 'r') as power_now_file:
with energy_full_file:
energy_now = float(energy_now_file.readline().strip())
power_now = float(power_now_file.readline().strip())
energy_full = float(energy_full_file.readline().strip())
return energy_full, energy_now, power_now
def get_low_battery_warning_level(self):
"""
Looks through all power supplies in POWER_SUPPLY_PATH.
If there is an AC adapter online returns POWER_TYPE_AC returns LOW_BATTERY_WARNING_NONE.
Otherwise determines total percentage and time remaining across all attached batteries.
"""
all_energy_full = []
all_energy_now = []
all_power_now = []
for supply in os.listdir(POWER_SUPPLY_PATH):
supply_path = os.path.join(POWER_SUPPLY_PATH, supply)
try:
type = self.power_source_type(supply_path)
if type == common.POWER_TYPE_AC:
if self.is_ac_online(supply_path):
return common.LOW_BATTERY_WARNING_NONE
elif type == common.POWER_TYPE_BATTERY:
if self.is_battery_present(supply_path) and self.is_battery_discharging(supply_path):
energy_full, energy_now, power_now = self.get_battery_state(supply_path)
all_energy_full.append(energy_full)
all_energy_now.append(energy_now)
all_power_now.append(power_now)
else:
warnings.warn("UPS is not supported.")
except (RuntimeError, IOError) as e:
warnings.warn("Unable to read properties of {0}: {1}".format(supply_path, e), category=RuntimeWarning)
try:
total_percentage = sum(all_energy_full) / sum(all_energy_now)
total_time = sum([energy_now / power_now * 60.0 for energy_now, power_now in zip(all_energy_now, all_power_now)])
if total_time <= 10.0:
return common.LOW_BATTERY_WARNING_FINAL
elif total_percentage <= 22.0:
return common.LOW_BATTERY_WARNING_EARLY
else:
return common.LOW_BATTERY_WARNING_NONE
except ZeroDivisionError as e:
warnings.warn("Unable to calculate low battery level: {0}".format(e), category=RuntimeWarning)
return common.LOW_BATTERY_WARNING_NONE
def get_time_remaining_estimate(self):
"""
Looks through all power sources and returns total time remaining estimate
or TIME_REMAINING_UNLIMITED if ac power supply is online.
"""
all_energy_now = []
all_energy_not_discharging = []
all_power_now = []
for supply in os.listdir(POWER_SUPPLY_PATH):
supply_path = os.path.join(POWER_SUPPLY_PATH, supply)
try:
type = self.power_source_type(supply_path)
if type == common.POWER_TYPE_AC:
if self.is_ac_online(supply_path):
return common.TIME_REMAINING_UNLIMITED
elif type == common.POWER_TYPE_BATTERY:
if self.is_battery_present(supply_path) and self.is_battery_discharging(supply_path):
energy_full, energy_now, power_now = self.get_battery_state(supply_path)
all_energy_now.append(energy_now)
all_power_now.append(power_now)
elif self.is_battery_present(supply_path) and not self.is_battery_discharging(supply_path):
energy_now = self.get_battery_state(supply_path)[1]
all_energy_not_discharging.append(energy_now)
else:
warnings.warn("UPS is not supported.")
except (RuntimeError, IOError) as e:
warnings.warn("Unable to read properties of {0}: {1}".format(supply_path, e), category=RuntimeWarning)
if len(all_energy_now) > 0:
try:
return sum([energy_now / power_now * 60.0 for energy_now, power_now in zip(all_energy_now, all_power_now)])\
+ sum(all_energy_not_discharging) / (sum(all_power_now) / len(all_power_now)) * 60.0
except ZeroDivisionError as e:
warnings.warn("Unable to calculate time remaining estimate: {0}".format(e), category=RuntimeWarning)
return common.TIME_REMAINING_UNKNOWN
else:
return common.TIME_REMAINING_UNKNOWN
def add_observer(self, observer):
warnings.warn("Current system does not support observing.")
pass
def remove_observer(self, observer):
warnings.warn("Current system does not support observing.")
pass
|
Kentzo/Power | power/linux.py | PowerManagement.get_time_remaining_estimate | python | def get_time_remaining_estimate(self):
all_energy_now = []
all_energy_not_discharging = []
all_power_now = []
for supply in os.listdir(POWER_SUPPLY_PATH):
supply_path = os.path.join(POWER_SUPPLY_PATH, supply)
try:
type = self.power_source_type(supply_path)
if type == common.POWER_TYPE_AC:
if self.is_ac_online(supply_path):
return common.TIME_REMAINING_UNLIMITED
elif type == common.POWER_TYPE_BATTERY:
if self.is_battery_present(supply_path) and self.is_battery_discharging(supply_path):
energy_full, energy_now, power_now = self.get_battery_state(supply_path)
all_energy_now.append(energy_now)
all_power_now.append(power_now)
elif self.is_battery_present(supply_path) and not self.is_battery_discharging(supply_path):
energy_now = self.get_battery_state(supply_path)[1]
all_energy_not_discharging.append(energy_now)
else:
warnings.warn("UPS is not supported.")
except (RuntimeError, IOError) as e:
warnings.warn("Unable to read properties of {0}: {1}".format(supply_path, e), category=RuntimeWarning)
if len(all_energy_now) > 0:
try:
return sum([energy_now / power_now * 60.0 for energy_now, power_now in zip(all_energy_now, all_power_now)])\
+ sum(all_energy_not_discharging) / (sum(all_power_now) / len(all_power_now)) * 60.0
except ZeroDivisionError as e:
warnings.warn("Unable to calculate time remaining estimate: {0}".format(e), category=RuntimeWarning)
return common.TIME_REMAINING_UNKNOWN
else:
return common.TIME_REMAINING_UNKNOWN | Looks through all power sources and returns total time remaining estimate
or TIME_REMAINING_UNLIMITED if ac power supply is online. | train | https://github.com/Kentzo/Power/blob/2c99b156546225e448f7030681af3df5cd345e4b/power/linux.py#L152-L188 | [
"def power_source_type(supply_path):\n \"\"\"\n @param supply_path: Path to power supply\n @return: One of common.POWER_TYPE_*\n @raise: Runtime error if type of power source is not supported\n \"\"\"\n with open(os.path.join(supply_path, 'type'), 'r') as type_file:\n type = type_file.readline().strip()\n if type == 'Mains':\n return common.POWER_TYPE_AC\n elif type == 'UPS':\n return common.POWER_TYPE_UPS\n elif type == 'Battery':\n return common.POWER_TYPE_BATTERY\n else:\n raise RuntimeError(\"Type of {path} ({type}) is not supported\".format(path=supply_path, type=type))\n",
"def is_ac_online(supply_path):\n \"\"\"\n @param supply_path: Path to power supply\n @return: True if ac is online. Otherwise False\n \"\"\"\n with open(os.path.join(supply_path, 'online'), 'r') as online_file:\n return online_file.readline().strip() == '1'\n",
"def is_battery_present(supply_path):\n \"\"\"\n @param supply_path: Path to power supply\n @return: True if battery is present. Otherwise False\n \"\"\"\n with open(os.path.join(supply_path, 'present'), 'r') as present_file:\n return present_file.readline().strip() == '1'\n"
] | class PowerManagement(common.PowerManagementBase):
@staticmethod
def power_source_type(supply_path):
"""
@param supply_path: Path to power supply
@return: One of common.POWER_TYPE_*
@raise: Runtime error if type of power source is not supported
"""
with open(os.path.join(supply_path, 'type'), 'r') as type_file:
type = type_file.readline().strip()
if type == 'Mains':
return common.POWER_TYPE_AC
elif type == 'UPS':
return common.POWER_TYPE_UPS
elif type == 'Battery':
return common.POWER_TYPE_BATTERY
else:
raise RuntimeError("Type of {path} ({type}) is not supported".format(path=supply_path, type=type))
@staticmethod
def is_ac_online(supply_path):
"""
@param supply_path: Path to power supply
@return: True if ac is online. Otherwise False
"""
with open(os.path.join(supply_path, 'online'), 'r') as online_file:
return online_file.readline().strip() == '1'
@staticmethod
def is_battery_present(supply_path):
"""
@param supply_path: Path to power supply
@return: True if battery is present. Otherwise False
"""
with open(os.path.join(supply_path, 'present'), 'r') as present_file:
return present_file.readline().strip() == '1'
@staticmethod
def is_battery_discharging(supply_path):
"""
@param supply_path: Path to power supply
@return: True if ac is online. Otherwise False
"""
with open(os.path.join(supply_path, 'status'), 'r') as status_file:
return status_file.readline().strip() == 'Discharging'
@staticmethod
def get_battery_state(supply_path):
"""
@param supply_path: Path to power supply
@return: Tuple (energy_full, energy_now, power_now)
"""
try:
energy_now_file = open(os.path.join(supply_path, 'energy_now'), 'r')
except IOError:
energy_now_file = open(os.path.join(supply_path, 'charge_now'), 'r')
try:
energy_full_file = open(os.path.join(supply_path, 'energy_full'), 'r')
except IOError:
energy_full_file = open(os.path.join(supply_path, 'charge_full'), 'r')
with energy_now_file:
with open(os.path.join(supply_path, 'power_now'), 'r') as power_now_file:
with energy_full_file:
energy_now = float(energy_now_file.readline().strip())
power_now = float(power_now_file.readline().strip())
energy_full = float(energy_full_file.readline().strip())
return energy_full, energy_now, power_now
def get_providing_power_source_type(self):
"""
Looks through all power supplies in POWER_SUPPLY_PATH.
If there is an AC adapter online returns POWER_TYPE_AC.
If there is a discharging battery, returns POWER_TYPE_BATTERY.
Since the order of supplies is arbitrary, whatever found first is returned.
"""
for supply in os.listdir(POWER_SUPPLY_PATH):
supply_path = os.path.join(POWER_SUPPLY_PATH, supply)
try:
type = self.power_source_type(supply_path)
if type == common.POWER_TYPE_AC:
if self.is_ac_online(supply_path):
return common.POWER_TYPE_AC
elif type == common.POWER_TYPE_BATTERY:
if self.is_battery_present(supply_path) and self.is_battery_discharging(supply_path):
return common.POWER_TYPE_BATTERY
else:
warnings.warn("UPS is not supported.")
except (RuntimeError, IOError) as e:
warnings.warn("Unable to read properties of {0}: {1}".format(supply_path, e), category=RuntimeWarning)
return common.POWER_TYPE_AC
def get_low_battery_warning_level(self):
"""
Looks through all power supplies in POWER_SUPPLY_PATH.
If there is an AC adapter online returns POWER_TYPE_AC returns LOW_BATTERY_WARNING_NONE.
Otherwise determines total percentage and time remaining across all attached batteries.
"""
all_energy_full = []
all_energy_now = []
all_power_now = []
for supply in os.listdir(POWER_SUPPLY_PATH):
supply_path = os.path.join(POWER_SUPPLY_PATH, supply)
try:
type = self.power_source_type(supply_path)
if type == common.POWER_TYPE_AC:
if self.is_ac_online(supply_path):
return common.LOW_BATTERY_WARNING_NONE
elif type == common.POWER_TYPE_BATTERY:
if self.is_battery_present(supply_path) and self.is_battery_discharging(supply_path):
energy_full, energy_now, power_now = self.get_battery_state(supply_path)
all_energy_full.append(energy_full)
all_energy_now.append(energy_now)
all_power_now.append(power_now)
else:
warnings.warn("UPS is not supported.")
except (RuntimeError, IOError) as e:
warnings.warn("Unable to read properties of {0}: {1}".format(supply_path, e), category=RuntimeWarning)
try:
total_percentage = sum(all_energy_full) / sum(all_energy_now)
total_time = sum([energy_now / power_now * 60.0 for energy_now, power_now in zip(all_energy_now, all_power_now)])
if total_time <= 10.0:
return common.LOW_BATTERY_WARNING_FINAL
elif total_percentage <= 22.0:
return common.LOW_BATTERY_WARNING_EARLY
else:
return common.LOW_BATTERY_WARNING_NONE
except ZeroDivisionError as e:
warnings.warn("Unable to calculate low battery level: {0}".format(e), category=RuntimeWarning)
return common.LOW_BATTERY_WARNING_NONE
def add_observer(self, observer):
warnings.warn("Current system does not support observing.")
pass
def remove_observer(self, observer):
warnings.warn("Current system does not support observing.")
pass
|
Arubacloud/pyArubaCloud | ArubaCloud/base/vm.py | VMList.find | python | def find(self, name):
if name.__class__ is 'base.Server.Pro' or name.__class__ is 'base.Server.Smart':
# print('DEBUG: matched VM object %s' % name.__class__)
pattern = name.vm_name
else:
# print('DEBUG: matched Str Object %s' % name.__class__)
pattern = name
# 14/06/2013: since this method is called within a thread and I wont to pass the return objects with queue or
# call back, I will allocate a list inside the Interface class object itself, which contain all of the vm found
# 02/11/2015: this must be changed ASAP! it's a mess this way... what was I thinking??
self.last_search_result = [vm for vm in self if pattern in vm.vm_name]
return self.last_search_result | Return a list of subset of VM that match the pattern name
@param name (str): the vm name of the virtual machine
@param name (Obj): the vm object that represent the virtual
machine (can be Pro or Smart)
@return (list): the subset containing the serach result. | train | https://github.com/Arubacloud/pyArubaCloud/blob/ec4aecd8ca342b1e1a4f16b7cc87cb5e697cfcd4/ArubaCloud/base/vm.py#L6-L24 | null | class VMList(list):
def __init__(self, *args, **kwargs):
super(VMList, self).__init__(*args)
self.last_search_result = []
def show(self):
for vm in self:
print(vm)
def find_ip(self, ip):
f = None
if ip.__class__ is 'base.Ip.Ip':
# logger.debug('DEBUG: matched IP Object: %s' % ip.__class__)
pattern = ip.ip_addr
else:
# logger.debug('DEBUG: matched Str Object: %s' % ip.__class__)
pattern = ip
for vm in self:
if vm.__class__.__name__ is 'Smart':
if pattern == vm.ip_addr:
f = vm
else:
if pattern == vm.ip_addr.ip_addr:
f = vm
return f
|
Arubacloud/pyArubaCloud | ArubaCloud/base/vm.py | VM.reinitialize | python | def reinitialize(self, admin_password=None, debug=False, ConfigureIPv6=False, OSTemplateID=None):
data = dict(
AdministratorPassword=admin_password,
ServerId=self.sid,
ConfigureIPv6=ConfigureIPv6
)
if OSTemplateID is not None:
data.update(OSTemplateID=OSTemplateID)
assert data['AdministratorPassword'] is not None, 'Error reinitializing VM: no admin password specified.'
assert data['ServerId'] is not None, 'Error reinitializing VM: no Server Id specified.'
json_scheme = self.interface.gen_def_json_scheme('SetEnqueueReinitializeServer', method_fields=data)
json_obj = self.interface.call_method_post('SetEnqueueReinitializeServer', json_scheme=json_scheme, debug=debug)
return True if json_obj['Success'] is 'True' else False | Reinitialize a VM.
:param admin_password: Administrator password.
:param debug: Flag to enable debug output.
:param ConfigureIPv6: Flag to enable IPv6 on the VM.
:param OSTemplateID: TemplateID to reinitialize the VM with.
:return: True in case of success, otherwise False
:type admin_password: str
:type debug: bool
:type ConfigureIPv6: bool
:type OSTemplateID: int | train | https://github.com/Arubacloud/pyArubaCloud/blob/ec4aecd8ca342b1e1a4f16b7cc87cb5e697cfcd4/ArubaCloud/base/vm.py#L82-L106 | null | class VM(object):
vm_name = None
cpu_qty = None
ram_qty = None
status = None
sid = None
datacenter_id = None
auth = None
admin_password = None
wcf_baseurl = None
template_id = None
hd_total_size = None
hd_qty = None
def __init__(self, interface):
super(VM, self).__init__()
self.interface = interface
def poweroff(self, debug=False):
data = dict(
ServerId=self.sid
)
json_scheme = self.interface.gen_def_json_scheme('SetEnqueueServerPowerOff', data)
json_obj = self.interface.call_method_post('SetEnqueueServerPowerOff', json_scheme=json_scheme, debug=debug)
return True if json_obj['Success'] is True else False
def poweron(self, debug=False):
data = dict(
ServerId=self.sid
)
json_scheme = self.interface.gen_def_json_scheme('SetEnqueueServerStart', data)
json_obj = self.interface.call_method_post('SetEnqueueServerStart', json_scheme=json_scheme, debug=debug)
return True if json_obj['Success'] is 'True' else False
def edit_cpu(self, cpu_qty, debug=False):
raise NotImplemented()
def edit_ram(self, ram_qty, debug=False):
raise NotImplemented()
def add_virtual_disk(self, *args, **kwargs):
raise NotImplemented()
def remove_virtual_disk(self, *args, **kwargs):
raise NotImplemented()
def edit_virtual_disk_size(self, *args, **kwargs):
raise NotImplemented()
|
Arubacloud/pyArubaCloud | ArubaCloud/PyArubaAPI.py | CloudInterface.login | python | def login(self, username, password, load=True):
self.auth = Auth(username, password)
if load is True:
self.get_ip()
self.get_servers() | Set the authentication data in the object, and if load is True
(default is True) it also retrieve the ip list and the vm list
in order to build the internal objects list.
@param (str) username: username of the cloud
@param (str) password: password of the cloud
@param (bool) load: define if pre cache the objects.
@return: None | train | https://github.com/Arubacloud/pyArubaCloud/blob/ec4aecd8ca342b1e1a4f16b7cc87cb5e697cfcd4/ArubaCloud/PyArubaAPI.py#L54-L67 | [
"def get_servers(self):\n \"\"\"\n Create the list of Server object inside the Datacenter objects.\n Build an internal list of VM Objects (pro or smart) as iterator.\n :return: bool\n \"\"\"\n json_scheme = self.gen_def_json_scheme('GetServers')\n json_obj = self.call_method_post(method='GetServers', json_scheme=json_scheme)\n self.json_servers = json_obj\n # if this method is called I assume that i must re-read the data\n # so i reinitialize the vmlist\n self.vmlist = VMList()\n # getting all instanced IP in case the list is empty\n if len(self.iplist) <= 0:\n self.get_ip()\n for elem in dict(json_obj)[\"Value\"]:\n if elem['HypervisorType'] is 4:\n s = Smart(interface=self, sid=elem['ServerId'])\n else:\n s = Pro(interface=self, sid=elem['ServerId'])\n s.vm_name = elem['Name']\n s.cpu_qty = elem['CPUQuantity']\n s.ram_qty = elem['RAMQuantity']\n s.status = elem['ServerStatus']\n s.datacenter_id = elem['DatacenterId']\n s.wcf_baseurl = self.wcf_baseurl\n s.auth = self.auth\n s.hd_qty = elem['HDQuantity']\n s.hd_total_size = elem['HDTotalSize']\n if elem['HypervisorType'] is 4:\n ssd = self.get_server_detail(elem['ServerId'])\n try:\n s.ip_addr = str(ssd['EasyCloudIPAddress']['Value'])\n except TypeError:\n s.ip_addr = 'Not retrieved.'\n else:\n s.ip_addr = []\n for ip in self.iplist:\n if ip.serverid == s.sid:\n s.ip_addr.append(ip)\n self.vmlist.append(s)\n return True if json_obj['Success'] is True else False\n",
"def get_ip(self):\n \"\"\"\n Retrieve a complete list of bought ip address related only to PRO Servers.\n It create an internal object (Iplist) representing all of the ips object\n iterated form the WS.\n @param: None\n @return: None\n \"\"\"\n json_scheme = self.gen_def_json_scheme('GetPurchasedIpAddresses')\n json_obj = self.call_method_post(method='GetPurchasedIpAddresses ', json_scheme=json_scheme)\n self.iplist = IpList()\n for ip in json_obj['Value']:\n r = Ip()\n r.ip_addr = ip['Value']\n r.resid = ip['ResourceId']\n r.serverid = ip['ServerId'] if 'None' not in str(ip['ServerId']) else None\n self.iplist.append(r)\n"
] | class CloudInterface(JsonInterface):
templates = []
vmlist = VMList()
iplist = IpList()
json_templates = None
json_servers = None
ip_resource = None
hypervisors = {3: "LC", 4: "SMART", 2: "VW", 1: "HV"}
def __init__(self, dc, debug_level=logging.INFO):
super(CloudInterface, self).__init__()
assert isinstance(dc, int), Exception('dc must be an integer and must be not null.')
self.wcf_baseurl = 'https://api.dc%s.computing.cloud.it/WsEndUser/v2.9/WsEndUser.svc/json' % (str(dc))
self.logger = ArubaLog(level=debug_level, log_to_file=False)
self.logger.name = self.__class__
self.auth = None
def poweroff_server(self, server=None, server_id=None):
"""
Poweroff a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power off,
server_id: Int or Str representing the ID of the VM to power off.
Returns:
return True if json_obj['Success'] is 'True' else False
"""
sid = server_id if server_id is not None else server.sid
if sid is None:
raise Exception('No Server Specified.')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerPowerOff', dict(ServerId=sid))
json_obj = self.call_method_post('SetEnqueueServerPowerOff', json_scheme=json_scheme)
return True if json_obj['Success'] is 'True' else False
def poweron_server(self, server=None, server_id=None):
"""
Poweron a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power on,
server_id: Int or Str representing the ID of the VM to power on.
Returns:
return True if json_obj['Success'] is 'True' else False
"""
sid = server_id if server_id is not None else server.sid
if sid is None:
raise Exception('No Server Specified.')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerStart', dict(ServerId=sid))
json_obj = self.call_method_post('SetEnqueueServerStart', json_scheme=json_scheme)
return True if json_obj['Success'] is 'True' else False
def get_hypervisors(self):
"""
Initialize the internal list containing each template available for each
hypervisor.
:return: [bool] True in case of success, otherwise False
"""
json_scheme = self.gen_def_json_scheme('GetHypervisors')
json_obj = self.call_method_post(method='GetHypervisors', json_scheme=json_scheme)
self.json_templates = json_obj
d = dict(json_obj)
for elem in d['Value']:
hv = self.hypervisors[elem['HypervisorType']]
for inner_elem in elem['Templates']:
o = Template(hv)
o.template_id = inner_elem['Id']
o.descr = inner_elem['Description']
o.id_code = inner_elem['IdentificationCode']
o.name = inner_elem['Name']
o.enabled = inner_elem['Enabled']
if hv != 'SMART':
for rb in inner_elem['ResourceBounds']:
resource_type = rb['ResourceType']
if resource_type == 1:
o.resource_bounds.max_cpu = rb['Max']
if resource_type == 2:
o.resource_bounds.max_memory = rb['Max']
if resource_type == 3:
o.resource_bounds.hdd0 = rb['Max']
if resource_type == 7:
o.resource_bounds.hdd1 = rb['Max']
if resource_type == 8:
o.resource_bounds.hdd2 = rb['Max']
if resource_type == 9:
o.resource_bounds.hdd3 = rb['Max']
self.templates.append(o)
return True if json_obj['Success'] is 'True' else False
def get_servers(self):
"""
Create the list of Server object inside the Datacenter objects.
Build an internal list of VM Objects (pro or smart) as iterator.
:return: bool
"""
json_scheme = self.gen_def_json_scheme('GetServers')
json_obj = self.call_method_post(method='GetServers', json_scheme=json_scheme)
self.json_servers = json_obj
# if this method is called I assume that i must re-read the data
# so i reinitialize the vmlist
self.vmlist = VMList()
# getting all instanced IP in case the list is empty
if len(self.iplist) <= 0:
self.get_ip()
for elem in dict(json_obj)["Value"]:
if elem['HypervisorType'] is 4:
s = Smart(interface=self, sid=elem['ServerId'])
else:
s = Pro(interface=self, sid=elem['ServerId'])
s.vm_name = elem['Name']
s.cpu_qty = elem['CPUQuantity']
s.ram_qty = elem['RAMQuantity']
s.status = elem['ServerStatus']
s.datacenter_id = elem['DatacenterId']
s.wcf_baseurl = self.wcf_baseurl
s.auth = self.auth
s.hd_qty = elem['HDQuantity']
s.hd_total_size = elem['HDTotalSize']
if elem['HypervisorType'] is 4:
ssd = self.get_server_detail(elem['ServerId'])
try:
s.ip_addr = str(ssd['EasyCloudIPAddress']['Value'])
except TypeError:
s.ip_addr = 'Not retrieved.'
else:
s.ip_addr = []
for ip in self.iplist:
if ip.serverid == s.sid:
s.ip_addr.append(ip)
self.vmlist.append(s)
return True if json_obj['Success'] is True else False
def find_template(self, name=None, hv=None):
"""
Return a list of templates that could have one or more elements.
Args:
name: name of the template to find.
hv: the ID of the hypervisor to search the template in
Returns:
A list of templates object. If hv is None will return all the
templates matching the name if every hypervisor type. Otherwise
if name is None will return all templates of an hypervisor.
Raises:
ValidationError: if name and hv are None
"""
if len(self.templates) <= 0:
self.get_hypervisors()
if name is not None and hv is not None:
template_list = filter(
lambda x: name in x.descr and x.hypervisor == self.hypervisors[hv], self.templates
)
elif name is not None and hv is None:
template_list = filter(
lambda x: name in x.descr, self.templates
)
elif name is None and hv is not None:
template_list = filter(
lambda x: x.hypervisor == self.hypervisors[hv], self.templates
)
else:
raise Exception('Error, no pattern defined')
if sys.version_info.major < (3):
return template_list
else:
return(list(template_list))
def get_vm(self, pattern=None):
if len(self.vmlist) <= 0:
self.get_servers()
if pattern is None:
return self.vmlist
else:
return self.vmlist.find(pattern)
def get_ip_by_vm(self, vm):
self.get_ip() # call get ip list to create the internal list of IPs.
vm_id = self.get_vm(vm)[0].sid
for ip in self.iplist:
if ip.serverid == vm_id:
return ip
return 'IPNOTFOUND'
def purchase_ip(self, debug=False):
"""
Return an ip object representing a new bought IP
@param debug [Boolean] if true, request and response will be printed
@return (Ip): Ip object
"""
json_scheme = self.gen_def_json_scheme('SetPurchaseIpAddress')
json_obj = self.call_method_post(method='SetPurchaseIpAddress', json_scheme=json_scheme, debug=debug)
try:
ip = Ip()
ip.ip_addr = json_obj['Value']['Value']
ip.resid = json_obj['Value']['ResourceId']
return ip
except:
raise Exception('Unknown error retrieving IP.')
def purchase_vlan(self, vlan_name, debug=False):
"""
Purchase a new VLAN.
:param debug: Log the json response if True
:param vlan_name: String representing the name of the vlan (virtual switch)
:return: a Vlan Object representing the vlan created
"""
vlan_name = {'VLanName': vlan_name}
json_scheme = self.gen_def_json_scheme('SetPurchaseVLan', vlan_name)
json_obj = self.call_method_post(method="SetPurchaseVLan", json_scheme=json_scheme)
if debug is True:
self.logger.debug(json_obj)
if json_obj['Success'] is False:
raise Exception("Cannot purchase new vlan.")
vlan = Vlan()
vlan.name = json_obj['Value']['Name']
vlan.resource_id = json_obj['Value']['ResourceId']
vlan.vlan_code = json_obj['Value']['VlanCode']
return vlan
def remove_vlan(self, vlan_resource_id):
"""
Remove a VLAN
:param vlan_resource_id:
:return:
"""
vlan_id = {'VLanResourceId': vlan_resource_id}
json_scheme = self.gen_def_json_scheme('SetRemoveVLan', vlan_id)
json_obj = self.call_method_post(method='SetRemoveVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def get_vlan(self, vlan_name=None):
json_scheme = self.gen_def_json_scheme('GetPurchasedVLans')
json_obj = self.call_method_post(method='GetPurchasedVLans', json_scheme=json_scheme)
if vlan_name is not None:
raw_vlans = filter(lambda x: vlan_name in x['Name'], json_obj['Value'])
else:
raw_vlans = json_obj['Value']
vlans = []
for raw_vlan in raw_vlans:
v = Vlan()
v.name = raw_vlan['Name']
v.vlan_code = raw_vlan['VlanCode']
v.resource_id = raw_vlan['ResourceId']
vlans.append(v)
return vlans
def remove_ip(self, ip_id):
"""
Delete an Ip from the boughs ip list
@param (str) ip_id: a string representing the resource id of the IP
@return: True if json method had success else False
"""
ip_id = ' "IpAddressResourceId": %s' % ip_id
json_scheme = self.gen_def_json_scheme('SetRemoveIpAddress', ip_id)
json_obj = self.call_method_post(method='SetRemoveIpAddress', json_scheme=json_scheme)
pprint(json_obj)
return True if json_obj['Success'] is True else False
def get_package_id(self, name):
"""
Retrieve the smart package id given is English name
@param (str) name: the Aruba Smart package size name, ie: "small", "medium", "large", "extra large".
@return: The package id that depends on the Data center and the size choosen.
"""
json_scheme = self.gen_def_json_scheme('GetPreConfiguredPackages', dict(HypervisorType=4))
json_obj = self.call_method_post(method='GetPreConfiguredPackages ', json_scheme=json_scheme)
for package in json_obj['Value']:
packageId = package['PackageID']
for description in package['Descriptions']:
languageID = description['LanguageID']
packageName = description['Text']
if languageID == 2 and packageName.lower() == name.lower():
return packageId
def get_ip(self):
"""
Retrieve a complete list of bought ip address related only to PRO Servers.
It create an internal object (Iplist) representing all of the ips object
iterated form the WS.
@param: None
@return: None
"""
json_scheme = self.gen_def_json_scheme('GetPurchasedIpAddresses')
json_obj = self.call_method_post(method='GetPurchasedIpAddresses ', json_scheme=json_scheme)
self.iplist = IpList()
for ip in json_obj['Value']:
r = Ip()
r.ip_addr = ip['Value']
r.resid = ip['ResourceId']
r.serverid = ip['ServerId'] if 'None' not in str(ip['ServerId']) else None
self.iplist.append(r)
def delete_vm(self, server=None, server_id=None):
self.logger.debug('%s: Deleting: %s' % (self.__class__.__name__, server))
sid = server_id if server_id is not None else server.sid
self.logger.debug('%s: Deleting SID: %s' % (self.__class__.__name__, sid))
if sid is None:
raise Exception('NoServerSpecified')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerDeletion', dict(ServerId=sid))
json_obj = self.call_method_post(method='SetEnqueueServerDeletion', json_scheme=json_scheme)
print('Deletion enqueued successfully for server_id: %s' % sid)
return True if json_obj['Success'] is 'True' else False
def get_jobs(self):
json_scheme = self.gen_def_json_scheme('GetJobs')
return self.call_method_post(method='GetJobs', json_scheme=json_scheme)
def find_job(self, vm_name):
jobs_list = self.get_jobs()
if jobs_list['Value'] is None:
_i = 0
while jobs_list['Value'] is not None:
_i += 1
jobs_list = self.get_jobs()
if _i > 10:
return 'JOBNOTFOUND'
if len(jobs_list['Value']) <= 0:
return 'JOBNOTFOUND'
for job in jobs_list['Value']:
if vm_name in job['ServerName']:
return job
return 'JOBNOTFOUND'
def get_virtual_datacenter(self):
json_scheme = self.gen_def_json_scheme('GetVirtualDatacenter')
json_obj = self.call_method_post(method='GetVirtualDatacenter', json_scheme=json_scheme)
return json_obj
def get_server_detail(self, server_id):
json_scheme = self.gen_def_json_scheme('GetServerDetails', dict(ServerId=server_id))
json_obj = self.call_method_post(method='GetServerDetails', json_scheme=json_scheme)
return json_obj['Value']
def attach_vlan(self, network_adapter_id, vlan_resource_id, ip=None, subnet_mask=None, gateway=None):
if gateway is not None:
additional_fields = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "true",
"VLanResourceId": vlan_resource_id,
"PrivateIps": [{
"GateWay": gateway,
"IP": ip,
"SubNetMask": subnet_mask
}]
}
}
else:
additional_fields = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "false",
"VLanResourceId": vlan_resource_id,
"PrivateIps": [{
"GateWay": None,
"IP": None,
"SubNetMask": None
}]
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueAssociateVLan', method_fields=additional_fields)
json_obj = self.call_method_post(method='SetEnqueueAssociateVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def detach_vlan(self, network_adapter_id, vlan_resource_id):
vlan_request = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "false",
"VLanResourceId": vlan_resource_id
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueDeassociateVLan', method_fields=vlan_request)
json_obj = self.call_method_post(method='SetEnqueueDeassociateVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def create_snapshot(self, dc, server_id=None):
sid = CloudInterface(dc).get_server_detail(server_id)
if sid['HypervisorType'] is not 4:
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Create"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def restore_snapshot(self, server_id=None):
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Restore"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def delete_snapshot(self, server_id=None):
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Delete"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def archive_vm(self, dc, server_id=None):
sid = CloudInterface(dc).get_server_detail(server_id)
if sid['HypervisorType'] is not 4:
archive_request = {
"ArchiveVirtualServer": {
"ServerId": server_id
}
}
json_scheme = self.gen_def_json_scheme('ArchiveVirtualServer', method_fields=archive_request)
json_obj = self.call_method_post(method='ArchiveVirtualServer', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def restore_vm(self, server_id=None, cpu_qty=None, ram_qty=None):
restore_request = {
"Server": {
"ServerId": server_id,
"CPUQuantity": cpu_qty,
"RAMQuantity": ram_qty
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerRestore', method_fields=restore_request)
json_obj = self.call_method_post(method='SetEnqueueServerRestore', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
|
Arubacloud/pyArubaCloud | ArubaCloud/PyArubaAPI.py | CloudInterface.poweroff_server | python | def poweroff_server(self, server=None, server_id=None):
sid = server_id if server_id is not None else server.sid
if sid is None:
raise Exception('No Server Specified.')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerPowerOff', dict(ServerId=sid))
json_obj = self.call_method_post('SetEnqueueServerPowerOff', json_scheme=json_scheme)
return True if json_obj['Success'] is 'True' else False | Poweroff a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power off,
server_id: Int or Str representing the ID of the VM to power off.
Returns:
return True if json_obj['Success'] is 'True' else False | train | https://github.com/Arubacloud/pyArubaCloud/blob/ec4aecd8ca342b1e1a4f16b7cc87cb5e697cfcd4/ArubaCloud/PyArubaAPI.py#L69-L84 | [
"def gen_def_json_scheme(self, req, method_fields=None):\n \"\"\"\n Generate the scheme for the json request.\n :param req: String representing the name of the method to call\n :param method_fields: A dictionary containing the method-specified fields\n :rtype : json object representing the method call\n \"\"\"\n json_dict = dict(\n ApplicationId=req,\n RequestId=req,\n SessionId=req,\n Password=self.auth.password,\n Username=self.auth.username\n )\n if method_fields is not None:\n json_dict.update(method_fields)\n self.logger.debug(json.dumps(json_dict))\n return json.dumps(json_dict)\n",
"def call_method_post(self, method, json_scheme, debug=False):\n url = '{}/{}'.format(self.wcf_baseurl, method)\n headers = {'Content-Type': 'application/json', 'Content-Length': str(len(json_scheme))}\n response = Http.post(url=url, data=json_scheme, headers=headers)\n parsed_response = json.loads(response.content.decode('utf-8'))\n if response.status_code != 200:\n from ArubaCloud.base.Errors import MalformedJsonRequest\n raise MalformedJsonRequest(\"Request: {}, Status Code: {}\".format(json_scheme, response.status_code))\n if parsed_response['Success'] is False:\n from ArubaCloud.base.Errors import RequestFailed\n raise RequestFailed(\"Request: {}, Response: {}\".format(json_scheme, parsed_response))\n if debug is True:\n msg = \"Response Message: {}\\nHTTP Status Code: {}\".format(parsed_response, response.status_code)\n self.logger.debug(msg)\n print(msg)\n return parsed_response\n"
] | class CloudInterface(JsonInterface):
templates = []
vmlist = VMList()
iplist = IpList()
json_templates = None
json_servers = None
ip_resource = None
hypervisors = {3: "LC", 4: "SMART", 2: "VW", 1: "HV"}
def __init__(self, dc, debug_level=logging.INFO):
super(CloudInterface, self).__init__()
assert isinstance(dc, int), Exception('dc must be an integer and must be not null.')
self.wcf_baseurl = 'https://api.dc%s.computing.cloud.it/WsEndUser/v2.9/WsEndUser.svc/json' % (str(dc))
self.logger = ArubaLog(level=debug_level, log_to_file=False)
self.logger.name = self.__class__
self.auth = None
def login(self, username, password, load=True):
"""
Set the authentication data in the object, and if load is True
(default is True) it also retrieve the ip list and the vm list
in order to build the internal objects list.
@param (str) username: username of the cloud
@param (str) password: password of the cloud
@param (bool) load: define if pre cache the objects.
@return: None
"""
self.auth = Auth(username, password)
if load is True:
self.get_ip()
self.get_servers()
def poweron_server(self, server=None, server_id=None):
"""
Poweron a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power on,
server_id: Int or Str representing the ID of the VM to power on.
Returns:
return True if json_obj['Success'] is 'True' else False
"""
sid = server_id if server_id is not None else server.sid
if sid is None:
raise Exception('No Server Specified.')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerStart', dict(ServerId=sid))
json_obj = self.call_method_post('SetEnqueueServerStart', json_scheme=json_scheme)
return True if json_obj['Success'] is 'True' else False
def get_hypervisors(self):
"""
Initialize the internal list containing each template available for each
hypervisor.
:return: [bool] True in case of success, otherwise False
"""
json_scheme = self.gen_def_json_scheme('GetHypervisors')
json_obj = self.call_method_post(method='GetHypervisors', json_scheme=json_scheme)
self.json_templates = json_obj
d = dict(json_obj)
for elem in d['Value']:
hv = self.hypervisors[elem['HypervisorType']]
for inner_elem in elem['Templates']:
o = Template(hv)
o.template_id = inner_elem['Id']
o.descr = inner_elem['Description']
o.id_code = inner_elem['IdentificationCode']
o.name = inner_elem['Name']
o.enabled = inner_elem['Enabled']
if hv != 'SMART':
for rb in inner_elem['ResourceBounds']:
resource_type = rb['ResourceType']
if resource_type == 1:
o.resource_bounds.max_cpu = rb['Max']
if resource_type == 2:
o.resource_bounds.max_memory = rb['Max']
if resource_type == 3:
o.resource_bounds.hdd0 = rb['Max']
if resource_type == 7:
o.resource_bounds.hdd1 = rb['Max']
if resource_type == 8:
o.resource_bounds.hdd2 = rb['Max']
if resource_type == 9:
o.resource_bounds.hdd3 = rb['Max']
self.templates.append(o)
return True if json_obj['Success'] is 'True' else False
def get_servers(self):
"""
Create the list of Server object inside the Datacenter objects.
Build an internal list of VM Objects (pro or smart) as iterator.
:return: bool
"""
json_scheme = self.gen_def_json_scheme('GetServers')
json_obj = self.call_method_post(method='GetServers', json_scheme=json_scheme)
self.json_servers = json_obj
# if this method is called I assume that i must re-read the data
# so i reinitialize the vmlist
self.vmlist = VMList()
# getting all instanced IP in case the list is empty
if len(self.iplist) <= 0:
self.get_ip()
for elem in dict(json_obj)["Value"]:
if elem['HypervisorType'] is 4:
s = Smart(interface=self, sid=elem['ServerId'])
else:
s = Pro(interface=self, sid=elem['ServerId'])
s.vm_name = elem['Name']
s.cpu_qty = elem['CPUQuantity']
s.ram_qty = elem['RAMQuantity']
s.status = elem['ServerStatus']
s.datacenter_id = elem['DatacenterId']
s.wcf_baseurl = self.wcf_baseurl
s.auth = self.auth
s.hd_qty = elem['HDQuantity']
s.hd_total_size = elem['HDTotalSize']
if elem['HypervisorType'] is 4:
ssd = self.get_server_detail(elem['ServerId'])
try:
s.ip_addr = str(ssd['EasyCloudIPAddress']['Value'])
except TypeError:
s.ip_addr = 'Not retrieved.'
else:
s.ip_addr = []
for ip in self.iplist:
if ip.serverid == s.sid:
s.ip_addr.append(ip)
self.vmlist.append(s)
return True if json_obj['Success'] is True else False
def find_template(self, name=None, hv=None):
"""
Return a list of templates that could have one or more elements.
Args:
name: name of the template to find.
hv: the ID of the hypervisor to search the template in
Returns:
A list of templates object. If hv is None will return all the
templates matching the name if every hypervisor type. Otherwise
if name is None will return all templates of an hypervisor.
Raises:
ValidationError: if name and hv are None
"""
if len(self.templates) <= 0:
self.get_hypervisors()
if name is not None and hv is not None:
template_list = filter(
lambda x: name in x.descr and x.hypervisor == self.hypervisors[hv], self.templates
)
elif name is not None and hv is None:
template_list = filter(
lambda x: name in x.descr, self.templates
)
elif name is None and hv is not None:
template_list = filter(
lambda x: x.hypervisor == self.hypervisors[hv], self.templates
)
else:
raise Exception('Error, no pattern defined')
if sys.version_info.major < (3):
return template_list
else:
return(list(template_list))
def get_vm(self, pattern=None):
if len(self.vmlist) <= 0:
self.get_servers()
if pattern is None:
return self.vmlist
else:
return self.vmlist.find(pattern)
def get_ip_by_vm(self, vm):
self.get_ip() # call get ip list to create the internal list of IPs.
vm_id = self.get_vm(vm)[0].sid
for ip in self.iplist:
if ip.serverid == vm_id:
return ip
return 'IPNOTFOUND'
def purchase_ip(self, debug=False):
"""
Return an ip object representing a new bought IP
@param debug [Boolean] if true, request and response will be printed
@return (Ip): Ip object
"""
json_scheme = self.gen_def_json_scheme('SetPurchaseIpAddress')
json_obj = self.call_method_post(method='SetPurchaseIpAddress', json_scheme=json_scheme, debug=debug)
try:
ip = Ip()
ip.ip_addr = json_obj['Value']['Value']
ip.resid = json_obj['Value']['ResourceId']
return ip
except:
raise Exception('Unknown error retrieving IP.')
def purchase_vlan(self, vlan_name, debug=False):
"""
Purchase a new VLAN.
:param debug: Log the json response if True
:param vlan_name: String representing the name of the vlan (virtual switch)
:return: a Vlan Object representing the vlan created
"""
vlan_name = {'VLanName': vlan_name}
json_scheme = self.gen_def_json_scheme('SetPurchaseVLan', vlan_name)
json_obj = self.call_method_post(method="SetPurchaseVLan", json_scheme=json_scheme)
if debug is True:
self.logger.debug(json_obj)
if json_obj['Success'] is False:
raise Exception("Cannot purchase new vlan.")
vlan = Vlan()
vlan.name = json_obj['Value']['Name']
vlan.resource_id = json_obj['Value']['ResourceId']
vlan.vlan_code = json_obj['Value']['VlanCode']
return vlan
def remove_vlan(self, vlan_resource_id):
"""
Remove a VLAN
:param vlan_resource_id:
:return:
"""
vlan_id = {'VLanResourceId': vlan_resource_id}
json_scheme = self.gen_def_json_scheme('SetRemoveVLan', vlan_id)
json_obj = self.call_method_post(method='SetRemoveVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def get_vlan(self, vlan_name=None):
json_scheme = self.gen_def_json_scheme('GetPurchasedVLans')
json_obj = self.call_method_post(method='GetPurchasedVLans', json_scheme=json_scheme)
if vlan_name is not None:
raw_vlans = filter(lambda x: vlan_name in x['Name'], json_obj['Value'])
else:
raw_vlans = json_obj['Value']
vlans = []
for raw_vlan in raw_vlans:
v = Vlan()
v.name = raw_vlan['Name']
v.vlan_code = raw_vlan['VlanCode']
v.resource_id = raw_vlan['ResourceId']
vlans.append(v)
return vlans
def remove_ip(self, ip_id):
"""
Delete an Ip from the boughs ip list
@param (str) ip_id: a string representing the resource id of the IP
@return: True if json method had success else False
"""
ip_id = ' "IpAddressResourceId": %s' % ip_id
json_scheme = self.gen_def_json_scheme('SetRemoveIpAddress', ip_id)
json_obj = self.call_method_post(method='SetRemoveIpAddress', json_scheme=json_scheme)
pprint(json_obj)
return True if json_obj['Success'] is True else False
def get_package_id(self, name):
"""
Retrieve the smart package id given is English name
@param (str) name: the Aruba Smart package size name, ie: "small", "medium", "large", "extra large".
@return: The package id that depends on the Data center and the size choosen.
"""
json_scheme = self.gen_def_json_scheme('GetPreConfiguredPackages', dict(HypervisorType=4))
json_obj = self.call_method_post(method='GetPreConfiguredPackages ', json_scheme=json_scheme)
for package in json_obj['Value']:
packageId = package['PackageID']
for description in package['Descriptions']:
languageID = description['LanguageID']
packageName = description['Text']
if languageID == 2 and packageName.lower() == name.lower():
return packageId
def get_ip(self):
"""
Retrieve a complete list of bought ip address related only to PRO Servers.
It create an internal object (Iplist) representing all of the ips object
iterated form the WS.
@param: None
@return: None
"""
json_scheme = self.gen_def_json_scheme('GetPurchasedIpAddresses')
json_obj = self.call_method_post(method='GetPurchasedIpAddresses ', json_scheme=json_scheme)
self.iplist = IpList()
for ip in json_obj['Value']:
r = Ip()
r.ip_addr = ip['Value']
r.resid = ip['ResourceId']
r.serverid = ip['ServerId'] if 'None' not in str(ip['ServerId']) else None
self.iplist.append(r)
def delete_vm(self, server=None, server_id=None):
self.logger.debug('%s: Deleting: %s' % (self.__class__.__name__, server))
sid = server_id if server_id is not None else server.sid
self.logger.debug('%s: Deleting SID: %s' % (self.__class__.__name__, sid))
if sid is None:
raise Exception('NoServerSpecified')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerDeletion', dict(ServerId=sid))
json_obj = self.call_method_post(method='SetEnqueueServerDeletion', json_scheme=json_scheme)
print('Deletion enqueued successfully for server_id: %s' % sid)
return True if json_obj['Success'] is 'True' else False
def get_jobs(self):
json_scheme = self.gen_def_json_scheme('GetJobs')
return self.call_method_post(method='GetJobs', json_scheme=json_scheme)
def find_job(self, vm_name):
jobs_list = self.get_jobs()
if jobs_list['Value'] is None:
_i = 0
while jobs_list['Value'] is not None:
_i += 1
jobs_list = self.get_jobs()
if _i > 10:
return 'JOBNOTFOUND'
if len(jobs_list['Value']) <= 0:
return 'JOBNOTFOUND'
for job in jobs_list['Value']:
if vm_name in job['ServerName']:
return job
return 'JOBNOTFOUND'
def get_virtual_datacenter(self):
json_scheme = self.gen_def_json_scheme('GetVirtualDatacenter')
json_obj = self.call_method_post(method='GetVirtualDatacenter', json_scheme=json_scheme)
return json_obj
def get_server_detail(self, server_id):
json_scheme = self.gen_def_json_scheme('GetServerDetails', dict(ServerId=server_id))
json_obj = self.call_method_post(method='GetServerDetails', json_scheme=json_scheme)
return json_obj['Value']
def attach_vlan(self, network_adapter_id, vlan_resource_id, ip=None, subnet_mask=None, gateway=None):
if gateway is not None:
additional_fields = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "true",
"VLanResourceId": vlan_resource_id,
"PrivateIps": [{
"GateWay": gateway,
"IP": ip,
"SubNetMask": subnet_mask
}]
}
}
else:
additional_fields = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "false",
"VLanResourceId": vlan_resource_id,
"PrivateIps": [{
"GateWay": None,
"IP": None,
"SubNetMask": None
}]
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueAssociateVLan', method_fields=additional_fields)
json_obj = self.call_method_post(method='SetEnqueueAssociateVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def detach_vlan(self, network_adapter_id, vlan_resource_id):
vlan_request = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "false",
"VLanResourceId": vlan_resource_id
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueDeassociateVLan', method_fields=vlan_request)
json_obj = self.call_method_post(method='SetEnqueueDeassociateVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def create_snapshot(self, dc, server_id=None):
sid = CloudInterface(dc).get_server_detail(server_id)
if sid['HypervisorType'] is not 4:
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Create"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def restore_snapshot(self, server_id=None):
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Restore"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def delete_snapshot(self, server_id=None):
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Delete"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def archive_vm(self, dc, server_id=None):
sid = CloudInterface(dc).get_server_detail(server_id)
if sid['HypervisorType'] is not 4:
archive_request = {
"ArchiveVirtualServer": {
"ServerId": server_id
}
}
json_scheme = self.gen_def_json_scheme('ArchiveVirtualServer', method_fields=archive_request)
json_obj = self.call_method_post(method='ArchiveVirtualServer', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def restore_vm(self, server_id=None, cpu_qty=None, ram_qty=None):
restore_request = {
"Server": {
"ServerId": server_id,
"CPUQuantity": cpu_qty,
"RAMQuantity": ram_qty
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerRestore', method_fields=restore_request)
json_obj = self.call_method_post(method='SetEnqueueServerRestore', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
|
Arubacloud/pyArubaCloud | ArubaCloud/PyArubaAPI.py | CloudInterface.get_hypervisors | python | def get_hypervisors(self):
json_scheme = self.gen_def_json_scheme('GetHypervisors')
json_obj = self.call_method_post(method='GetHypervisors', json_scheme=json_scheme)
self.json_templates = json_obj
d = dict(json_obj)
for elem in d['Value']:
hv = self.hypervisors[elem['HypervisorType']]
for inner_elem in elem['Templates']:
o = Template(hv)
o.template_id = inner_elem['Id']
o.descr = inner_elem['Description']
o.id_code = inner_elem['IdentificationCode']
o.name = inner_elem['Name']
o.enabled = inner_elem['Enabled']
if hv != 'SMART':
for rb in inner_elem['ResourceBounds']:
resource_type = rb['ResourceType']
if resource_type == 1:
o.resource_bounds.max_cpu = rb['Max']
if resource_type == 2:
o.resource_bounds.max_memory = rb['Max']
if resource_type == 3:
o.resource_bounds.hdd0 = rb['Max']
if resource_type == 7:
o.resource_bounds.hdd1 = rb['Max']
if resource_type == 8:
o.resource_bounds.hdd2 = rb['Max']
if resource_type == 9:
o.resource_bounds.hdd3 = rb['Max']
self.templates.append(o)
return True if json_obj['Success'] is 'True' else False | Initialize the internal list containing each template available for each
hypervisor.
:return: [bool] True in case of success, otherwise False | train | https://github.com/Arubacloud/pyArubaCloud/blob/ec4aecd8ca342b1e1a4f16b7cc87cb5e697cfcd4/ArubaCloud/PyArubaAPI.py#L103-L139 | [
"def gen_def_json_scheme(self, req, method_fields=None):\n \"\"\"\n Generate the scheme for the json request.\n :param req: String representing the name of the method to call\n :param method_fields: A dictionary containing the method-specified fields\n :rtype : json object representing the method call\n \"\"\"\n json_dict = dict(\n ApplicationId=req,\n RequestId=req,\n SessionId=req,\n Password=self.auth.password,\n Username=self.auth.username\n )\n if method_fields is not None:\n json_dict.update(method_fields)\n self.logger.debug(json.dumps(json_dict))\n return json.dumps(json_dict)\n",
"def call_method_post(self, method, json_scheme, debug=False):\n url = '{}/{}'.format(self.wcf_baseurl, method)\n headers = {'Content-Type': 'application/json', 'Content-Length': str(len(json_scheme))}\n response = Http.post(url=url, data=json_scheme, headers=headers)\n parsed_response = json.loads(response.content.decode('utf-8'))\n if response.status_code != 200:\n from ArubaCloud.base.Errors import MalformedJsonRequest\n raise MalformedJsonRequest(\"Request: {}, Status Code: {}\".format(json_scheme, response.status_code))\n if parsed_response['Success'] is False:\n from ArubaCloud.base.Errors import RequestFailed\n raise RequestFailed(\"Request: {}, Response: {}\".format(json_scheme, parsed_response))\n if debug is True:\n msg = \"Response Message: {}\\nHTTP Status Code: {}\".format(parsed_response, response.status_code)\n self.logger.debug(msg)\n print(msg)\n return parsed_response\n"
] | class CloudInterface(JsonInterface):
templates = []
vmlist = VMList()
iplist = IpList()
json_templates = None
json_servers = None
ip_resource = None
hypervisors = {3: "LC", 4: "SMART", 2: "VW", 1: "HV"}
def __init__(self, dc, debug_level=logging.INFO):
super(CloudInterface, self).__init__()
assert isinstance(dc, int), Exception('dc must be an integer and must be not null.')
self.wcf_baseurl = 'https://api.dc%s.computing.cloud.it/WsEndUser/v2.9/WsEndUser.svc/json' % (str(dc))
self.logger = ArubaLog(level=debug_level, log_to_file=False)
self.logger.name = self.__class__
self.auth = None
def login(self, username, password, load=True):
"""
Set the authentication data in the object, and if load is True
(default is True) it also retrieve the ip list and the vm list
in order to build the internal objects list.
@param (str) username: username of the cloud
@param (str) password: password of the cloud
@param (bool) load: define if pre cache the objects.
@return: None
"""
self.auth = Auth(username, password)
if load is True:
self.get_ip()
self.get_servers()
def poweroff_server(self, server=None, server_id=None):
"""
Poweroff a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power off,
server_id: Int or Str representing the ID of the VM to power off.
Returns:
return True if json_obj['Success'] is 'True' else False
"""
sid = server_id if server_id is not None else server.sid
if sid is None:
raise Exception('No Server Specified.')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerPowerOff', dict(ServerId=sid))
json_obj = self.call_method_post('SetEnqueueServerPowerOff', json_scheme=json_scheme)
return True if json_obj['Success'] is 'True' else False
def poweron_server(self, server=None, server_id=None):
"""
Poweron a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power on,
server_id: Int or Str representing the ID of the VM to power on.
Returns:
return True if json_obj['Success'] is 'True' else False
"""
sid = server_id if server_id is not None else server.sid
if sid is None:
raise Exception('No Server Specified.')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerStart', dict(ServerId=sid))
json_obj = self.call_method_post('SetEnqueueServerStart', json_scheme=json_scheme)
return True if json_obj['Success'] is 'True' else False
def get_servers(self):
"""
Create the list of Server object inside the Datacenter objects.
Build an internal list of VM Objects (pro or smart) as iterator.
:return: bool
"""
json_scheme = self.gen_def_json_scheme('GetServers')
json_obj = self.call_method_post(method='GetServers', json_scheme=json_scheme)
self.json_servers = json_obj
# if this method is called I assume that i must re-read the data
# so i reinitialize the vmlist
self.vmlist = VMList()
# getting all instanced IP in case the list is empty
if len(self.iplist) <= 0:
self.get_ip()
for elem in dict(json_obj)["Value"]:
if elem['HypervisorType'] is 4:
s = Smart(interface=self, sid=elem['ServerId'])
else:
s = Pro(interface=self, sid=elem['ServerId'])
s.vm_name = elem['Name']
s.cpu_qty = elem['CPUQuantity']
s.ram_qty = elem['RAMQuantity']
s.status = elem['ServerStatus']
s.datacenter_id = elem['DatacenterId']
s.wcf_baseurl = self.wcf_baseurl
s.auth = self.auth
s.hd_qty = elem['HDQuantity']
s.hd_total_size = elem['HDTotalSize']
if elem['HypervisorType'] is 4:
ssd = self.get_server_detail(elem['ServerId'])
try:
s.ip_addr = str(ssd['EasyCloudIPAddress']['Value'])
except TypeError:
s.ip_addr = 'Not retrieved.'
else:
s.ip_addr = []
for ip in self.iplist:
if ip.serverid == s.sid:
s.ip_addr.append(ip)
self.vmlist.append(s)
return True if json_obj['Success'] is True else False
def find_template(self, name=None, hv=None):
"""
Return a list of templates that could have one or more elements.
Args:
name: name of the template to find.
hv: the ID of the hypervisor to search the template in
Returns:
A list of templates object. If hv is None will return all the
templates matching the name if every hypervisor type. Otherwise
if name is None will return all templates of an hypervisor.
Raises:
ValidationError: if name and hv are None
"""
if len(self.templates) <= 0:
self.get_hypervisors()
if name is not None and hv is not None:
template_list = filter(
lambda x: name in x.descr and x.hypervisor == self.hypervisors[hv], self.templates
)
elif name is not None and hv is None:
template_list = filter(
lambda x: name in x.descr, self.templates
)
elif name is None and hv is not None:
template_list = filter(
lambda x: x.hypervisor == self.hypervisors[hv], self.templates
)
else:
raise Exception('Error, no pattern defined')
if sys.version_info.major < (3):
return template_list
else:
return(list(template_list))
def get_vm(self, pattern=None):
if len(self.vmlist) <= 0:
self.get_servers()
if pattern is None:
return self.vmlist
else:
return self.vmlist.find(pattern)
def get_ip_by_vm(self, vm):
self.get_ip() # call get ip list to create the internal list of IPs.
vm_id = self.get_vm(vm)[0].sid
for ip in self.iplist:
if ip.serverid == vm_id:
return ip
return 'IPNOTFOUND'
def purchase_ip(self, debug=False):
"""
Return an ip object representing a new bought IP
@param debug [Boolean] if true, request and response will be printed
@return (Ip): Ip object
"""
json_scheme = self.gen_def_json_scheme('SetPurchaseIpAddress')
json_obj = self.call_method_post(method='SetPurchaseIpAddress', json_scheme=json_scheme, debug=debug)
try:
ip = Ip()
ip.ip_addr = json_obj['Value']['Value']
ip.resid = json_obj['Value']['ResourceId']
return ip
except:
raise Exception('Unknown error retrieving IP.')
def purchase_vlan(self, vlan_name, debug=False):
"""
Purchase a new VLAN.
:param debug: Log the json response if True
:param vlan_name: String representing the name of the vlan (virtual switch)
:return: a Vlan Object representing the vlan created
"""
vlan_name = {'VLanName': vlan_name}
json_scheme = self.gen_def_json_scheme('SetPurchaseVLan', vlan_name)
json_obj = self.call_method_post(method="SetPurchaseVLan", json_scheme=json_scheme)
if debug is True:
self.logger.debug(json_obj)
if json_obj['Success'] is False:
raise Exception("Cannot purchase new vlan.")
vlan = Vlan()
vlan.name = json_obj['Value']['Name']
vlan.resource_id = json_obj['Value']['ResourceId']
vlan.vlan_code = json_obj['Value']['VlanCode']
return vlan
def remove_vlan(self, vlan_resource_id):
"""
Remove a VLAN
:param vlan_resource_id:
:return:
"""
vlan_id = {'VLanResourceId': vlan_resource_id}
json_scheme = self.gen_def_json_scheme('SetRemoveVLan', vlan_id)
json_obj = self.call_method_post(method='SetRemoveVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def get_vlan(self, vlan_name=None):
json_scheme = self.gen_def_json_scheme('GetPurchasedVLans')
json_obj = self.call_method_post(method='GetPurchasedVLans', json_scheme=json_scheme)
if vlan_name is not None:
raw_vlans = filter(lambda x: vlan_name in x['Name'], json_obj['Value'])
else:
raw_vlans = json_obj['Value']
vlans = []
for raw_vlan in raw_vlans:
v = Vlan()
v.name = raw_vlan['Name']
v.vlan_code = raw_vlan['VlanCode']
v.resource_id = raw_vlan['ResourceId']
vlans.append(v)
return vlans
def remove_ip(self, ip_id):
"""
Delete an Ip from the boughs ip list
@param (str) ip_id: a string representing the resource id of the IP
@return: True if json method had success else False
"""
ip_id = ' "IpAddressResourceId": %s' % ip_id
json_scheme = self.gen_def_json_scheme('SetRemoveIpAddress', ip_id)
json_obj = self.call_method_post(method='SetRemoveIpAddress', json_scheme=json_scheme)
pprint(json_obj)
return True if json_obj['Success'] is True else False
def get_package_id(self, name):
"""
Retrieve the smart package id given is English name
@param (str) name: the Aruba Smart package size name, ie: "small", "medium", "large", "extra large".
@return: The package id that depends on the Data center and the size choosen.
"""
json_scheme = self.gen_def_json_scheme('GetPreConfiguredPackages', dict(HypervisorType=4))
json_obj = self.call_method_post(method='GetPreConfiguredPackages ', json_scheme=json_scheme)
for package in json_obj['Value']:
packageId = package['PackageID']
for description in package['Descriptions']:
languageID = description['LanguageID']
packageName = description['Text']
if languageID == 2 and packageName.lower() == name.lower():
return packageId
def get_ip(self):
"""
Retrieve a complete list of bought ip address related only to PRO Servers.
It create an internal object (Iplist) representing all of the ips object
iterated form the WS.
@param: None
@return: None
"""
json_scheme = self.gen_def_json_scheme('GetPurchasedIpAddresses')
json_obj = self.call_method_post(method='GetPurchasedIpAddresses ', json_scheme=json_scheme)
self.iplist = IpList()
for ip in json_obj['Value']:
r = Ip()
r.ip_addr = ip['Value']
r.resid = ip['ResourceId']
r.serverid = ip['ServerId'] if 'None' not in str(ip['ServerId']) else None
self.iplist.append(r)
def delete_vm(self, server=None, server_id=None):
self.logger.debug('%s: Deleting: %s' % (self.__class__.__name__, server))
sid = server_id if server_id is not None else server.sid
self.logger.debug('%s: Deleting SID: %s' % (self.__class__.__name__, sid))
if sid is None:
raise Exception('NoServerSpecified')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerDeletion', dict(ServerId=sid))
json_obj = self.call_method_post(method='SetEnqueueServerDeletion', json_scheme=json_scheme)
print('Deletion enqueued successfully for server_id: %s' % sid)
return True if json_obj['Success'] is 'True' else False
def get_jobs(self):
json_scheme = self.gen_def_json_scheme('GetJobs')
return self.call_method_post(method='GetJobs', json_scheme=json_scheme)
def find_job(self, vm_name):
jobs_list = self.get_jobs()
if jobs_list['Value'] is None:
_i = 0
while jobs_list['Value'] is not None:
_i += 1
jobs_list = self.get_jobs()
if _i > 10:
return 'JOBNOTFOUND'
if len(jobs_list['Value']) <= 0:
return 'JOBNOTFOUND'
for job in jobs_list['Value']:
if vm_name in job['ServerName']:
return job
return 'JOBNOTFOUND'
def get_virtual_datacenter(self):
json_scheme = self.gen_def_json_scheme('GetVirtualDatacenter')
json_obj = self.call_method_post(method='GetVirtualDatacenter', json_scheme=json_scheme)
return json_obj
def get_server_detail(self, server_id):
json_scheme = self.gen_def_json_scheme('GetServerDetails', dict(ServerId=server_id))
json_obj = self.call_method_post(method='GetServerDetails', json_scheme=json_scheme)
return json_obj['Value']
def attach_vlan(self, network_adapter_id, vlan_resource_id, ip=None, subnet_mask=None, gateway=None):
if gateway is not None:
additional_fields = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "true",
"VLanResourceId": vlan_resource_id,
"PrivateIps": [{
"GateWay": gateway,
"IP": ip,
"SubNetMask": subnet_mask
}]
}
}
else:
additional_fields = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "false",
"VLanResourceId": vlan_resource_id,
"PrivateIps": [{
"GateWay": None,
"IP": None,
"SubNetMask": None
}]
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueAssociateVLan', method_fields=additional_fields)
json_obj = self.call_method_post(method='SetEnqueueAssociateVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def detach_vlan(self, network_adapter_id, vlan_resource_id):
vlan_request = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "false",
"VLanResourceId": vlan_resource_id
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueDeassociateVLan', method_fields=vlan_request)
json_obj = self.call_method_post(method='SetEnqueueDeassociateVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def create_snapshot(self, dc, server_id=None):
sid = CloudInterface(dc).get_server_detail(server_id)
if sid['HypervisorType'] is not 4:
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Create"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def restore_snapshot(self, server_id=None):
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Restore"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def delete_snapshot(self, server_id=None):
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Delete"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def archive_vm(self, dc, server_id=None):
sid = CloudInterface(dc).get_server_detail(server_id)
if sid['HypervisorType'] is not 4:
archive_request = {
"ArchiveVirtualServer": {
"ServerId": server_id
}
}
json_scheme = self.gen_def_json_scheme('ArchiveVirtualServer', method_fields=archive_request)
json_obj = self.call_method_post(method='ArchiveVirtualServer', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def restore_vm(self, server_id=None, cpu_qty=None, ram_qty=None):
restore_request = {
"Server": {
"ServerId": server_id,
"CPUQuantity": cpu_qty,
"RAMQuantity": ram_qty
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerRestore', method_fields=restore_request)
json_obj = self.call_method_post(method='SetEnqueueServerRestore', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
|
Arubacloud/pyArubaCloud | ArubaCloud/PyArubaAPI.py | CloudInterface.get_servers | python | def get_servers(self):
json_scheme = self.gen_def_json_scheme('GetServers')
json_obj = self.call_method_post(method='GetServers', json_scheme=json_scheme)
self.json_servers = json_obj
# if this method is called I assume that i must re-read the data
# so i reinitialize the vmlist
self.vmlist = VMList()
# getting all instanced IP in case the list is empty
if len(self.iplist) <= 0:
self.get_ip()
for elem in dict(json_obj)["Value"]:
if elem['HypervisorType'] is 4:
s = Smart(interface=self, sid=elem['ServerId'])
else:
s = Pro(interface=self, sid=elem['ServerId'])
s.vm_name = elem['Name']
s.cpu_qty = elem['CPUQuantity']
s.ram_qty = elem['RAMQuantity']
s.status = elem['ServerStatus']
s.datacenter_id = elem['DatacenterId']
s.wcf_baseurl = self.wcf_baseurl
s.auth = self.auth
s.hd_qty = elem['HDQuantity']
s.hd_total_size = elem['HDTotalSize']
if elem['HypervisorType'] is 4:
ssd = self.get_server_detail(elem['ServerId'])
try:
s.ip_addr = str(ssd['EasyCloudIPAddress']['Value'])
except TypeError:
s.ip_addr = 'Not retrieved.'
else:
s.ip_addr = []
for ip in self.iplist:
if ip.serverid == s.sid:
s.ip_addr.append(ip)
self.vmlist.append(s)
return True if json_obj['Success'] is True else False | Create the list of Server object inside the Datacenter objects.
Build an internal list of VM Objects (pro or smart) as iterator.
:return: bool | train | https://github.com/Arubacloud/pyArubaCloud/blob/ec4aecd8ca342b1e1a4f16b7cc87cb5e697cfcd4/ArubaCloud/PyArubaAPI.py#L141-L182 | [
"def get_ip(self):\n \"\"\"\n Retrieve a complete list of bought ip address related only to PRO Servers.\n It create an internal object (Iplist) representing all of the ips object\n iterated form the WS.\n @param: None\n @return: None\n \"\"\"\n json_scheme = self.gen_def_json_scheme('GetPurchasedIpAddresses')\n json_obj = self.call_method_post(method='GetPurchasedIpAddresses ', json_scheme=json_scheme)\n self.iplist = IpList()\n for ip in json_obj['Value']:\n r = Ip()\n r.ip_addr = ip['Value']\n r.resid = ip['ResourceId']\n r.serverid = ip['ServerId'] if 'None' not in str(ip['ServerId']) else None\n self.iplist.append(r)\n",
"def get_server_detail(self, server_id):\n json_scheme = self.gen_def_json_scheme('GetServerDetails', dict(ServerId=server_id))\n json_obj = self.call_method_post(method='GetServerDetails', json_scheme=json_scheme)\n return json_obj['Value']\n",
"def gen_def_json_scheme(self, req, method_fields=None):\n \"\"\"\n Generate the scheme for the json request.\n :param req: String representing the name of the method to call\n :param method_fields: A dictionary containing the method-specified fields\n :rtype : json object representing the method call\n \"\"\"\n json_dict = dict(\n ApplicationId=req,\n RequestId=req,\n SessionId=req,\n Password=self.auth.password,\n Username=self.auth.username\n )\n if method_fields is not None:\n json_dict.update(method_fields)\n self.logger.debug(json.dumps(json_dict))\n return json.dumps(json_dict)\n",
"def call_method_post(self, method, json_scheme, debug=False):\n url = '{}/{}'.format(self.wcf_baseurl, method)\n headers = {'Content-Type': 'application/json', 'Content-Length': str(len(json_scheme))}\n response = Http.post(url=url, data=json_scheme, headers=headers)\n parsed_response = json.loads(response.content.decode('utf-8'))\n if response.status_code != 200:\n from ArubaCloud.base.Errors import MalformedJsonRequest\n raise MalformedJsonRequest(\"Request: {}, Status Code: {}\".format(json_scheme, response.status_code))\n if parsed_response['Success'] is False:\n from ArubaCloud.base.Errors import RequestFailed\n raise RequestFailed(\"Request: {}, Response: {}\".format(json_scheme, parsed_response))\n if debug is True:\n msg = \"Response Message: {}\\nHTTP Status Code: {}\".format(parsed_response, response.status_code)\n self.logger.debug(msg)\n print(msg)\n return parsed_response\n"
] | class CloudInterface(JsonInterface):
templates = []
vmlist = VMList()
iplist = IpList()
json_templates = None
json_servers = None
ip_resource = None
hypervisors = {3: "LC", 4: "SMART", 2: "VW", 1: "HV"}
def __init__(self, dc, debug_level=logging.INFO):
super(CloudInterface, self).__init__()
assert isinstance(dc, int), Exception('dc must be an integer and must be not null.')
self.wcf_baseurl = 'https://api.dc%s.computing.cloud.it/WsEndUser/v2.9/WsEndUser.svc/json' % (str(dc))
self.logger = ArubaLog(level=debug_level, log_to_file=False)
self.logger.name = self.__class__
self.auth = None
def login(self, username, password, load=True):
"""
Set the authentication data in the object, and if load is True
(default is True) it also retrieve the ip list and the vm list
in order to build the internal objects list.
@param (str) username: username of the cloud
@param (str) password: password of the cloud
@param (bool) load: define if pre cache the objects.
@return: None
"""
self.auth = Auth(username, password)
if load is True:
self.get_ip()
self.get_servers()
def poweroff_server(self, server=None, server_id=None):
"""
Poweroff a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power off,
server_id: Int or Str representing the ID of the VM to power off.
Returns:
return True if json_obj['Success'] is 'True' else False
"""
sid = server_id if server_id is not None else server.sid
if sid is None:
raise Exception('No Server Specified.')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerPowerOff', dict(ServerId=sid))
json_obj = self.call_method_post('SetEnqueueServerPowerOff', json_scheme=json_scheme)
return True if json_obj['Success'] is 'True' else False
def poweron_server(self, server=None, server_id=None):
"""
Poweron a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power on,
server_id: Int or Str representing the ID of the VM to power on.
Returns:
return True if json_obj['Success'] is 'True' else False
"""
sid = server_id if server_id is not None else server.sid
if sid is None:
raise Exception('No Server Specified.')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerStart', dict(ServerId=sid))
json_obj = self.call_method_post('SetEnqueueServerStart', json_scheme=json_scheme)
return True if json_obj['Success'] is 'True' else False
def get_hypervisors(self):
"""
Initialize the internal list containing each template available for each
hypervisor.
:return: [bool] True in case of success, otherwise False
"""
json_scheme = self.gen_def_json_scheme('GetHypervisors')
json_obj = self.call_method_post(method='GetHypervisors', json_scheme=json_scheme)
self.json_templates = json_obj
d = dict(json_obj)
for elem in d['Value']:
hv = self.hypervisors[elem['HypervisorType']]
for inner_elem in elem['Templates']:
o = Template(hv)
o.template_id = inner_elem['Id']
o.descr = inner_elem['Description']
o.id_code = inner_elem['IdentificationCode']
o.name = inner_elem['Name']
o.enabled = inner_elem['Enabled']
if hv != 'SMART':
for rb in inner_elem['ResourceBounds']:
resource_type = rb['ResourceType']
if resource_type == 1:
o.resource_bounds.max_cpu = rb['Max']
if resource_type == 2:
o.resource_bounds.max_memory = rb['Max']
if resource_type == 3:
o.resource_bounds.hdd0 = rb['Max']
if resource_type == 7:
o.resource_bounds.hdd1 = rb['Max']
if resource_type == 8:
o.resource_bounds.hdd2 = rb['Max']
if resource_type == 9:
o.resource_bounds.hdd3 = rb['Max']
self.templates.append(o)
return True if json_obj['Success'] is 'True' else False
def find_template(self, name=None, hv=None):
"""
Return a list of templates that could have one or more elements.
Args:
name: name of the template to find.
hv: the ID of the hypervisor to search the template in
Returns:
A list of templates object. If hv is None will return all the
templates matching the name if every hypervisor type. Otherwise
if name is None will return all templates of an hypervisor.
Raises:
ValidationError: if name and hv are None
"""
if len(self.templates) <= 0:
self.get_hypervisors()
if name is not None and hv is not None:
template_list = filter(
lambda x: name in x.descr and x.hypervisor == self.hypervisors[hv], self.templates
)
elif name is not None and hv is None:
template_list = filter(
lambda x: name in x.descr, self.templates
)
elif name is None and hv is not None:
template_list = filter(
lambda x: x.hypervisor == self.hypervisors[hv], self.templates
)
else:
raise Exception('Error, no pattern defined')
if sys.version_info.major < (3):
return template_list
else:
return(list(template_list))
def get_vm(self, pattern=None):
if len(self.vmlist) <= 0:
self.get_servers()
if pattern is None:
return self.vmlist
else:
return self.vmlist.find(pattern)
def get_ip_by_vm(self, vm):
self.get_ip() # call get ip list to create the internal list of IPs.
vm_id = self.get_vm(vm)[0].sid
for ip in self.iplist:
if ip.serverid == vm_id:
return ip
return 'IPNOTFOUND'
def purchase_ip(self, debug=False):
"""
Return an ip object representing a new bought IP
@param debug [Boolean] if true, request and response will be printed
@return (Ip): Ip object
"""
json_scheme = self.gen_def_json_scheme('SetPurchaseIpAddress')
json_obj = self.call_method_post(method='SetPurchaseIpAddress', json_scheme=json_scheme, debug=debug)
try:
ip = Ip()
ip.ip_addr = json_obj['Value']['Value']
ip.resid = json_obj['Value']['ResourceId']
return ip
except:
raise Exception('Unknown error retrieving IP.')
def purchase_vlan(self, vlan_name, debug=False):
"""
Purchase a new VLAN.
:param debug: Log the json response if True
:param vlan_name: String representing the name of the vlan (virtual switch)
:return: a Vlan Object representing the vlan created
"""
vlan_name = {'VLanName': vlan_name}
json_scheme = self.gen_def_json_scheme('SetPurchaseVLan', vlan_name)
json_obj = self.call_method_post(method="SetPurchaseVLan", json_scheme=json_scheme)
if debug is True:
self.logger.debug(json_obj)
if json_obj['Success'] is False:
raise Exception("Cannot purchase new vlan.")
vlan = Vlan()
vlan.name = json_obj['Value']['Name']
vlan.resource_id = json_obj['Value']['ResourceId']
vlan.vlan_code = json_obj['Value']['VlanCode']
return vlan
def remove_vlan(self, vlan_resource_id):
"""
Remove a VLAN
:param vlan_resource_id:
:return:
"""
vlan_id = {'VLanResourceId': vlan_resource_id}
json_scheme = self.gen_def_json_scheme('SetRemoveVLan', vlan_id)
json_obj = self.call_method_post(method='SetRemoveVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def get_vlan(self, vlan_name=None):
json_scheme = self.gen_def_json_scheme('GetPurchasedVLans')
json_obj = self.call_method_post(method='GetPurchasedVLans', json_scheme=json_scheme)
if vlan_name is not None:
raw_vlans = filter(lambda x: vlan_name in x['Name'], json_obj['Value'])
else:
raw_vlans = json_obj['Value']
vlans = []
for raw_vlan in raw_vlans:
v = Vlan()
v.name = raw_vlan['Name']
v.vlan_code = raw_vlan['VlanCode']
v.resource_id = raw_vlan['ResourceId']
vlans.append(v)
return vlans
def remove_ip(self, ip_id):
"""
Delete an Ip from the boughs ip list
@param (str) ip_id: a string representing the resource id of the IP
@return: True if json method had success else False
"""
ip_id = ' "IpAddressResourceId": %s' % ip_id
json_scheme = self.gen_def_json_scheme('SetRemoveIpAddress', ip_id)
json_obj = self.call_method_post(method='SetRemoveIpAddress', json_scheme=json_scheme)
pprint(json_obj)
return True if json_obj['Success'] is True else False
def get_package_id(self, name):
"""
Retrieve the smart package id given is English name
@param (str) name: the Aruba Smart package size name, ie: "small", "medium", "large", "extra large".
@return: The package id that depends on the Data center and the size choosen.
"""
json_scheme = self.gen_def_json_scheme('GetPreConfiguredPackages', dict(HypervisorType=4))
json_obj = self.call_method_post(method='GetPreConfiguredPackages ', json_scheme=json_scheme)
for package in json_obj['Value']:
packageId = package['PackageID']
for description in package['Descriptions']:
languageID = description['LanguageID']
packageName = description['Text']
if languageID == 2 and packageName.lower() == name.lower():
return packageId
def get_ip(self):
"""
Retrieve a complete list of bought ip address related only to PRO Servers.
It create an internal object (Iplist) representing all of the ips object
iterated form the WS.
@param: None
@return: None
"""
json_scheme = self.gen_def_json_scheme('GetPurchasedIpAddresses')
json_obj = self.call_method_post(method='GetPurchasedIpAddresses ', json_scheme=json_scheme)
self.iplist = IpList()
for ip in json_obj['Value']:
r = Ip()
r.ip_addr = ip['Value']
r.resid = ip['ResourceId']
r.serverid = ip['ServerId'] if 'None' not in str(ip['ServerId']) else None
self.iplist.append(r)
def delete_vm(self, server=None, server_id=None):
self.logger.debug('%s: Deleting: %s' % (self.__class__.__name__, server))
sid = server_id if server_id is not None else server.sid
self.logger.debug('%s: Deleting SID: %s' % (self.__class__.__name__, sid))
if sid is None:
raise Exception('NoServerSpecified')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerDeletion', dict(ServerId=sid))
json_obj = self.call_method_post(method='SetEnqueueServerDeletion', json_scheme=json_scheme)
print('Deletion enqueued successfully for server_id: %s' % sid)
return True if json_obj['Success'] is 'True' else False
def get_jobs(self):
json_scheme = self.gen_def_json_scheme('GetJobs')
return self.call_method_post(method='GetJobs', json_scheme=json_scheme)
def find_job(self, vm_name):
jobs_list = self.get_jobs()
if jobs_list['Value'] is None:
_i = 0
while jobs_list['Value'] is not None:
_i += 1
jobs_list = self.get_jobs()
if _i > 10:
return 'JOBNOTFOUND'
if len(jobs_list['Value']) <= 0:
return 'JOBNOTFOUND'
for job in jobs_list['Value']:
if vm_name in job['ServerName']:
return job
return 'JOBNOTFOUND'
def get_virtual_datacenter(self):
json_scheme = self.gen_def_json_scheme('GetVirtualDatacenter')
json_obj = self.call_method_post(method='GetVirtualDatacenter', json_scheme=json_scheme)
return json_obj
def get_server_detail(self, server_id):
json_scheme = self.gen_def_json_scheme('GetServerDetails', dict(ServerId=server_id))
json_obj = self.call_method_post(method='GetServerDetails', json_scheme=json_scheme)
return json_obj['Value']
def attach_vlan(self, network_adapter_id, vlan_resource_id, ip=None, subnet_mask=None, gateway=None):
if gateway is not None:
additional_fields = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "true",
"VLanResourceId": vlan_resource_id,
"PrivateIps": [{
"GateWay": gateway,
"IP": ip,
"SubNetMask": subnet_mask
}]
}
}
else:
additional_fields = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "false",
"VLanResourceId": vlan_resource_id,
"PrivateIps": [{
"GateWay": None,
"IP": None,
"SubNetMask": None
}]
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueAssociateVLan', method_fields=additional_fields)
json_obj = self.call_method_post(method='SetEnqueueAssociateVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def detach_vlan(self, network_adapter_id, vlan_resource_id):
vlan_request = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "false",
"VLanResourceId": vlan_resource_id
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueDeassociateVLan', method_fields=vlan_request)
json_obj = self.call_method_post(method='SetEnqueueDeassociateVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def create_snapshot(self, dc, server_id=None):
sid = CloudInterface(dc).get_server_detail(server_id)
if sid['HypervisorType'] is not 4:
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Create"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def restore_snapshot(self, server_id=None):
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Restore"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def delete_snapshot(self, server_id=None):
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Delete"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def archive_vm(self, dc, server_id=None):
sid = CloudInterface(dc).get_server_detail(server_id)
if sid['HypervisorType'] is not 4:
archive_request = {
"ArchiveVirtualServer": {
"ServerId": server_id
}
}
json_scheme = self.gen_def_json_scheme('ArchiveVirtualServer', method_fields=archive_request)
json_obj = self.call_method_post(method='ArchiveVirtualServer', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def restore_vm(self, server_id=None, cpu_qty=None, ram_qty=None):
restore_request = {
"Server": {
"ServerId": server_id,
"CPUQuantity": cpu_qty,
"RAMQuantity": ram_qty
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerRestore', method_fields=restore_request)
json_obj = self.call_method_post(method='SetEnqueueServerRestore', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
|
Arubacloud/pyArubaCloud | ArubaCloud/PyArubaAPI.py | CloudInterface.find_template | python | def find_template(self, name=None, hv=None):
if len(self.templates) <= 0:
self.get_hypervisors()
if name is not None and hv is not None:
template_list = filter(
lambda x: name in x.descr and x.hypervisor == self.hypervisors[hv], self.templates
)
elif name is not None and hv is None:
template_list = filter(
lambda x: name in x.descr, self.templates
)
elif name is None and hv is not None:
template_list = filter(
lambda x: x.hypervisor == self.hypervisors[hv], self.templates
)
else:
raise Exception('Error, no pattern defined')
if sys.version_info.major < (3):
return template_list
else:
return(list(template_list)) | Return a list of templates that could have one or more elements.
Args:
name: name of the template to find.
hv: the ID of the hypervisor to search the template in
Returns:
A list of templates object. If hv is None will return all the
templates matching the name if every hypervisor type. Otherwise
if name is None will return all templates of an hypervisor.
Raises:
ValidationError: if name and hv are None | train | https://github.com/Arubacloud/pyArubaCloud/blob/ec4aecd8ca342b1e1a4f16b7cc87cb5e697cfcd4/ArubaCloud/PyArubaAPI.py#L184-L216 | [
"def get_hypervisors(self):\n \"\"\"\n Initialize the internal list containing each template available for each\n hypervisor.\n\n :return: [bool] True in case of success, otherwise False\n \"\"\"\n json_scheme = self.gen_def_json_scheme('GetHypervisors')\n json_obj = self.call_method_post(method='GetHypervisors', json_scheme=json_scheme)\n self.json_templates = json_obj\n d = dict(json_obj)\n for elem in d['Value']:\n hv = self.hypervisors[elem['HypervisorType']]\n for inner_elem in elem['Templates']:\n o = Template(hv)\n o.template_id = inner_elem['Id']\n o.descr = inner_elem['Description']\n o.id_code = inner_elem['IdentificationCode']\n o.name = inner_elem['Name']\n o.enabled = inner_elem['Enabled']\n if hv != 'SMART':\n for rb in inner_elem['ResourceBounds']:\n resource_type = rb['ResourceType']\n if resource_type == 1:\n o.resource_bounds.max_cpu = rb['Max']\n if resource_type == 2:\n o.resource_bounds.max_memory = rb['Max']\n if resource_type == 3:\n o.resource_bounds.hdd0 = rb['Max']\n if resource_type == 7:\n o.resource_bounds.hdd1 = rb['Max']\n if resource_type == 8:\n o.resource_bounds.hdd2 = rb['Max']\n if resource_type == 9:\n o.resource_bounds.hdd3 = rb['Max']\n self.templates.append(o)\n return True if json_obj['Success'] is 'True' else False\n"
] | class CloudInterface(JsonInterface):
templates = []
vmlist = VMList()
iplist = IpList()
json_templates = None
json_servers = None
ip_resource = None
hypervisors = {3: "LC", 4: "SMART", 2: "VW", 1: "HV"}
def __init__(self, dc, debug_level=logging.INFO):
super(CloudInterface, self).__init__()
assert isinstance(dc, int), Exception('dc must be an integer and must be not null.')
self.wcf_baseurl = 'https://api.dc%s.computing.cloud.it/WsEndUser/v2.9/WsEndUser.svc/json' % (str(dc))
self.logger = ArubaLog(level=debug_level, log_to_file=False)
self.logger.name = self.__class__
self.auth = None
def login(self, username, password, load=True):
"""
Set the authentication data in the object, and if load is True
(default is True) it also retrieve the ip list and the vm list
in order to build the internal objects list.
@param (str) username: username of the cloud
@param (str) password: password of the cloud
@param (bool) load: define if pre cache the objects.
@return: None
"""
self.auth = Auth(username, password)
if load is True:
self.get_ip()
self.get_servers()
def poweroff_server(self, server=None, server_id=None):
"""
Poweroff a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power off,
server_id: Int or Str representing the ID of the VM to power off.
Returns:
return True if json_obj['Success'] is 'True' else False
"""
sid = server_id if server_id is not None else server.sid
if sid is None:
raise Exception('No Server Specified.')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerPowerOff', dict(ServerId=sid))
json_obj = self.call_method_post('SetEnqueueServerPowerOff', json_scheme=json_scheme)
return True if json_obj['Success'] is 'True' else False
def poweron_server(self, server=None, server_id=None):
"""
Poweron a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power on,
server_id: Int or Str representing the ID of the VM to power on.
Returns:
return True if json_obj['Success'] is 'True' else False
"""
sid = server_id if server_id is not None else server.sid
if sid is None:
raise Exception('No Server Specified.')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerStart', dict(ServerId=sid))
json_obj = self.call_method_post('SetEnqueueServerStart', json_scheme=json_scheme)
return True if json_obj['Success'] is 'True' else False
def get_hypervisors(self):
"""
Initialize the internal list containing each template available for each
hypervisor.
:return: [bool] True in case of success, otherwise False
"""
json_scheme = self.gen_def_json_scheme('GetHypervisors')
json_obj = self.call_method_post(method='GetHypervisors', json_scheme=json_scheme)
self.json_templates = json_obj
d = dict(json_obj)
for elem in d['Value']:
hv = self.hypervisors[elem['HypervisorType']]
for inner_elem in elem['Templates']:
o = Template(hv)
o.template_id = inner_elem['Id']
o.descr = inner_elem['Description']
o.id_code = inner_elem['IdentificationCode']
o.name = inner_elem['Name']
o.enabled = inner_elem['Enabled']
if hv != 'SMART':
for rb in inner_elem['ResourceBounds']:
resource_type = rb['ResourceType']
if resource_type == 1:
o.resource_bounds.max_cpu = rb['Max']
if resource_type == 2:
o.resource_bounds.max_memory = rb['Max']
if resource_type == 3:
o.resource_bounds.hdd0 = rb['Max']
if resource_type == 7:
o.resource_bounds.hdd1 = rb['Max']
if resource_type == 8:
o.resource_bounds.hdd2 = rb['Max']
if resource_type == 9:
o.resource_bounds.hdd3 = rb['Max']
self.templates.append(o)
return True if json_obj['Success'] is 'True' else False
def get_servers(self):
"""
Create the list of Server object inside the Datacenter objects.
Build an internal list of VM Objects (pro or smart) as iterator.
:return: bool
"""
json_scheme = self.gen_def_json_scheme('GetServers')
json_obj = self.call_method_post(method='GetServers', json_scheme=json_scheme)
self.json_servers = json_obj
# if this method is called I assume that i must re-read the data
# so i reinitialize the vmlist
self.vmlist = VMList()
# getting all instanced IP in case the list is empty
if len(self.iplist) <= 0:
self.get_ip()
for elem in dict(json_obj)["Value"]:
if elem['HypervisorType'] is 4:
s = Smart(interface=self, sid=elem['ServerId'])
else:
s = Pro(interface=self, sid=elem['ServerId'])
s.vm_name = elem['Name']
s.cpu_qty = elem['CPUQuantity']
s.ram_qty = elem['RAMQuantity']
s.status = elem['ServerStatus']
s.datacenter_id = elem['DatacenterId']
s.wcf_baseurl = self.wcf_baseurl
s.auth = self.auth
s.hd_qty = elem['HDQuantity']
s.hd_total_size = elem['HDTotalSize']
if elem['HypervisorType'] is 4:
ssd = self.get_server_detail(elem['ServerId'])
try:
s.ip_addr = str(ssd['EasyCloudIPAddress']['Value'])
except TypeError:
s.ip_addr = 'Not retrieved.'
else:
s.ip_addr = []
for ip in self.iplist:
if ip.serverid == s.sid:
s.ip_addr.append(ip)
self.vmlist.append(s)
return True if json_obj['Success'] is True else False
def get_vm(self, pattern=None):
if len(self.vmlist) <= 0:
self.get_servers()
if pattern is None:
return self.vmlist
else:
return self.vmlist.find(pattern)
def get_ip_by_vm(self, vm):
self.get_ip() # call get ip list to create the internal list of IPs.
vm_id = self.get_vm(vm)[0].sid
for ip in self.iplist:
if ip.serverid == vm_id:
return ip
return 'IPNOTFOUND'
def purchase_ip(self, debug=False):
"""
Return an ip object representing a new bought IP
@param debug [Boolean] if true, request and response will be printed
@return (Ip): Ip object
"""
json_scheme = self.gen_def_json_scheme('SetPurchaseIpAddress')
json_obj = self.call_method_post(method='SetPurchaseIpAddress', json_scheme=json_scheme, debug=debug)
try:
ip = Ip()
ip.ip_addr = json_obj['Value']['Value']
ip.resid = json_obj['Value']['ResourceId']
return ip
except:
raise Exception('Unknown error retrieving IP.')
def purchase_vlan(self, vlan_name, debug=False):
"""
Purchase a new VLAN.
:param debug: Log the json response if True
:param vlan_name: String representing the name of the vlan (virtual switch)
:return: a Vlan Object representing the vlan created
"""
vlan_name = {'VLanName': vlan_name}
json_scheme = self.gen_def_json_scheme('SetPurchaseVLan', vlan_name)
json_obj = self.call_method_post(method="SetPurchaseVLan", json_scheme=json_scheme)
if debug is True:
self.logger.debug(json_obj)
if json_obj['Success'] is False:
raise Exception("Cannot purchase new vlan.")
vlan = Vlan()
vlan.name = json_obj['Value']['Name']
vlan.resource_id = json_obj['Value']['ResourceId']
vlan.vlan_code = json_obj['Value']['VlanCode']
return vlan
def remove_vlan(self, vlan_resource_id):
"""
Remove a VLAN
:param vlan_resource_id:
:return:
"""
vlan_id = {'VLanResourceId': vlan_resource_id}
json_scheme = self.gen_def_json_scheme('SetRemoveVLan', vlan_id)
json_obj = self.call_method_post(method='SetRemoveVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def get_vlan(self, vlan_name=None):
json_scheme = self.gen_def_json_scheme('GetPurchasedVLans')
json_obj = self.call_method_post(method='GetPurchasedVLans', json_scheme=json_scheme)
if vlan_name is not None:
raw_vlans = filter(lambda x: vlan_name in x['Name'], json_obj['Value'])
else:
raw_vlans = json_obj['Value']
vlans = []
for raw_vlan in raw_vlans:
v = Vlan()
v.name = raw_vlan['Name']
v.vlan_code = raw_vlan['VlanCode']
v.resource_id = raw_vlan['ResourceId']
vlans.append(v)
return vlans
def remove_ip(self, ip_id):
"""
Delete an Ip from the boughs ip list
@param (str) ip_id: a string representing the resource id of the IP
@return: True if json method had success else False
"""
ip_id = ' "IpAddressResourceId": %s' % ip_id
json_scheme = self.gen_def_json_scheme('SetRemoveIpAddress', ip_id)
json_obj = self.call_method_post(method='SetRemoveIpAddress', json_scheme=json_scheme)
pprint(json_obj)
return True if json_obj['Success'] is True else False
def get_package_id(self, name):
"""
Retrieve the smart package id given is English name
@param (str) name: the Aruba Smart package size name, ie: "small", "medium", "large", "extra large".
@return: The package id that depends on the Data center and the size choosen.
"""
json_scheme = self.gen_def_json_scheme('GetPreConfiguredPackages', dict(HypervisorType=4))
json_obj = self.call_method_post(method='GetPreConfiguredPackages ', json_scheme=json_scheme)
for package in json_obj['Value']:
packageId = package['PackageID']
for description in package['Descriptions']:
languageID = description['LanguageID']
packageName = description['Text']
if languageID == 2 and packageName.lower() == name.lower():
return packageId
def get_ip(self):
"""
Retrieve a complete list of bought ip address related only to PRO Servers.
It create an internal object (Iplist) representing all of the ips object
iterated form the WS.
@param: None
@return: None
"""
json_scheme = self.gen_def_json_scheme('GetPurchasedIpAddresses')
json_obj = self.call_method_post(method='GetPurchasedIpAddresses ', json_scheme=json_scheme)
self.iplist = IpList()
for ip in json_obj['Value']:
r = Ip()
r.ip_addr = ip['Value']
r.resid = ip['ResourceId']
r.serverid = ip['ServerId'] if 'None' not in str(ip['ServerId']) else None
self.iplist.append(r)
def delete_vm(self, server=None, server_id=None):
self.logger.debug('%s: Deleting: %s' % (self.__class__.__name__, server))
sid = server_id if server_id is not None else server.sid
self.logger.debug('%s: Deleting SID: %s' % (self.__class__.__name__, sid))
if sid is None:
raise Exception('NoServerSpecified')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerDeletion', dict(ServerId=sid))
json_obj = self.call_method_post(method='SetEnqueueServerDeletion', json_scheme=json_scheme)
print('Deletion enqueued successfully for server_id: %s' % sid)
return True if json_obj['Success'] is 'True' else False
def get_jobs(self):
json_scheme = self.gen_def_json_scheme('GetJobs')
return self.call_method_post(method='GetJobs', json_scheme=json_scheme)
def find_job(self, vm_name):
jobs_list = self.get_jobs()
if jobs_list['Value'] is None:
_i = 0
while jobs_list['Value'] is not None:
_i += 1
jobs_list = self.get_jobs()
if _i > 10:
return 'JOBNOTFOUND'
if len(jobs_list['Value']) <= 0:
return 'JOBNOTFOUND'
for job in jobs_list['Value']:
if vm_name in job['ServerName']:
return job
return 'JOBNOTFOUND'
def get_virtual_datacenter(self):
json_scheme = self.gen_def_json_scheme('GetVirtualDatacenter')
json_obj = self.call_method_post(method='GetVirtualDatacenter', json_scheme=json_scheme)
return json_obj
def get_server_detail(self, server_id):
json_scheme = self.gen_def_json_scheme('GetServerDetails', dict(ServerId=server_id))
json_obj = self.call_method_post(method='GetServerDetails', json_scheme=json_scheme)
return json_obj['Value']
def attach_vlan(self, network_adapter_id, vlan_resource_id, ip=None, subnet_mask=None, gateway=None):
if gateway is not None:
additional_fields = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "true",
"VLanResourceId": vlan_resource_id,
"PrivateIps": [{
"GateWay": gateway,
"IP": ip,
"SubNetMask": subnet_mask
}]
}
}
else:
additional_fields = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "false",
"VLanResourceId": vlan_resource_id,
"PrivateIps": [{
"GateWay": None,
"IP": None,
"SubNetMask": None
}]
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueAssociateVLan', method_fields=additional_fields)
json_obj = self.call_method_post(method='SetEnqueueAssociateVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def detach_vlan(self, network_adapter_id, vlan_resource_id):
vlan_request = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "false",
"VLanResourceId": vlan_resource_id
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueDeassociateVLan', method_fields=vlan_request)
json_obj = self.call_method_post(method='SetEnqueueDeassociateVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def create_snapshot(self, dc, server_id=None):
sid = CloudInterface(dc).get_server_detail(server_id)
if sid['HypervisorType'] is not 4:
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Create"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def restore_snapshot(self, server_id=None):
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Restore"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def delete_snapshot(self, server_id=None):
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Delete"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def archive_vm(self, dc, server_id=None):
sid = CloudInterface(dc).get_server_detail(server_id)
if sid['HypervisorType'] is not 4:
archive_request = {
"ArchiveVirtualServer": {
"ServerId": server_id
}
}
json_scheme = self.gen_def_json_scheme('ArchiveVirtualServer', method_fields=archive_request)
json_obj = self.call_method_post(method='ArchiveVirtualServer', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def restore_vm(self, server_id=None, cpu_qty=None, ram_qty=None):
restore_request = {
"Server": {
"ServerId": server_id,
"CPUQuantity": cpu_qty,
"RAMQuantity": ram_qty
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerRestore', method_fields=restore_request)
json_obj = self.call_method_post(method='SetEnqueueServerRestore', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
|
Arubacloud/pyArubaCloud | ArubaCloud/PyArubaAPI.py | CloudInterface.purchase_ip | python | def purchase_ip(self, debug=False):
json_scheme = self.gen_def_json_scheme('SetPurchaseIpAddress')
json_obj = self.call_method_post(method='SetPurchaseIpAddress', json_scheme=json_scheme, debug=debug)
try:
ip = Ip()
ip.ip_addr = json_obj['Value']['Value']
ip.resid = json_obj['Value']['ResourceId']
return ip
except:
raise Exception('Unknown error retrieving IP.') | Return an ip object representing a new bought IP
@param debug [Boolean] if true, request and response will be printed
@return (Ip): Ip object | train | https://github.com/Arubacloud/pyArubaCloud/blob/ec4aecd8ca342b1e1a4f16b7cc87cb5e697cfcd4/ArubaCloud/PyArubaAPI.py#L234-L248 | [
"def gen_def_json_scheme(self, req, method_fields=None):\n \"\"\"\n Generate the scheme for the json request.\n :param req: String representing the name of the method to call\n :param method_fields: A dictionary containing the method-specified fields\n :rtype : json object representing the method call\n \"\"\"\n json_dict = dict(\n ApplicationId=req,\n RequestId=req,\n SessionId=req,\n Password=self.auth.password,\n Username=self.auth.username\n )\n if method_fields is not None:\n json_dict.update(method_fields)\n self.logger.debug(json.dumps(json_dict))\n return json.dumps(json_dict)\n",
"def call_method_post(self, method, json_scheme, debug=False):\n url = '{}/{}'.format(self.wcf_baseurl, method)\n headers = {'Content-Type': 'application/json', 'Content-Length': str(len(json_scheme))}\n response = Http.post(url=url, data=json_scheme, headers=headers)\n parsed_response = json.loads(response.content.decode('utf-8'))\n if response.status_code != 200:\n from ArubaCloud.base.Errors import MalformedJsonRequest\n raise MalformedJsonRequest(\"Request: {}, Status Code: {}\".format(json_scheme, response.status_code))\n if parsed_response['Success'] is False:\n from ArubaCloud.base.Errors import RequestFailed\n raise RequestFailed(\"Request: {}, Response: {}\".format(json_scheme, parsed_response))\n if debug is True:\n msg = \"Response Message: {}\\nHTTP Status Code: {}\".format(parsed_response, response.status_code)\n self.logger.debug(msg)\n print(msg)\n return parsed_response\n"
] | class CloudInterface(JsonInterface):
templates = []
vmlist = VMList()
iplist = IpList()
json_templates = None
json_servers = None
ip_resource = None
hypervisors = {3: "LC", 4: "SMART", 2: "VW", 1: "HV"}
def __init__(self, dc, debug_level=logging.INFO):
super(CloudInterface, self).__init__()
assert isinstance(dc, int), Exception('dc must be an integer and must be not null.')
self.wcf_baseurl = 'https://api.dc%s.computing.cloud.it/WsEndUser/v2.9/WsEndUser.svc/json' % (str(dc))
self.logger = ArubaLog(level=debug_level, log_to_file=False)
self.logger.name = self.__class__
self.auth = None
def login(self, username, password, load=True):
"""
Set the authentication data in the object, and if load is True
(default is True) it also retrieve the ip list and the vm list
in order to build the internal objects list.
@param (str) username: username of the cloud
@param (str) password: password of the cloud
@param (bool) load: define if pre cache the objects.
@return: None
"""
self.auth = Auth(username, password)
if load is True:
self.get_ip()
self.get_servers()
def poweroff_server(self, server=None, server_id=None):
"""
Poweroff a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power off,
server_id: Int or Str representing the ID of the VM to power off.
Returns:
return True if json_obj['Success'] is 'True' else False
"""
sid = server_id if server_id is not None else server.sid
if sid is None:
raise Exception('No Server Specified.')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerPowerOff', dict(ServerId=sid))
json_obj = self.call_method_post('SetEnqueueServerPowerOff', json_scheme=json_scheme)
return True if json_obj['Success'] is 'True' else False
def poweron_server(self, server=None, server_id=None):
"""
Poweron a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power on,
server_id: Int or Str representing the ID of the VM to power on.
Returns:
return True if json_obj['Success'] is 'True' else False
"""
sid = server_id if server_id is not None else server.sid
if sid is None:
raise Exception('No Server Specified.')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerStart', dict(ServerId=sid))
json_obj = self.call_method_post('SetEnqueueServerStart', json_scheme=json_scheme)
return True if json_obj['Success'] is 'True' else False
def get_hypervisors(self):
"""
Initialize the internal list containing each template available for each
hypervisor.
:return: [bool] True in case of success, otherwise False
"""
json_scheme = self.gen_def_json_scheme('GetHypervisors')
json_obj = self.call_method_post(method='GetHypervisors', json_scheme=json_scheme)
self.json_templates = json_obj
d = dict(json_obj)
for elem in d['Value']:
hv = self.hypervisors[elem['HypervisorType']]
for inner_elem in elem['Templates']:
o = Template(hv)
o.template_id = inner_elem['Id']
o.descr = inner_elem['Description']
o.id_code = inner_elem['IdentificationCode']
o.name = inner_elem['Name']
o.enabled = inner_elem['Enabled']
if hv != 'SMART':
for rb in inner_elem['ResourceBounds']:
resource_type = rb['ResourceType']
if resource_type == 1:
o.resource_bounds.max_cpu = rb['Max']
if resource_type == 2:
o.resource_bounds.max_memory = rb['Max']
if resource_type == 3:
o.resource_bounds.hdd0 = rb['Max']
if resource_type == 7:
o.resource_bounds.hdd1 = rb['Max']
if resource_type == 8:
o.resource_bounds.hdd2 = rb['Max']
if resource_type == 9:
o.resource_bounds.hdd3 = rb['Max']
self.templates.append(o)
return True if json_obj['Success'] is 'True' else False
def get_servers(self):
"""
Create the list of Server object inside the Datacenter objects.
Build an internal list of VM Objects (pro or smart) as iterator.
:return: bool
"""
json_scheme = self.gen_def_json_scheme('GetServers')
json_obj = self.call_method_post(method='GetServers', json_scheme=json_scheme)
self.json_servers = json_obj
# if this method is called I assume that i must re-read the data
# so i reinitialize the vmlist
self.vmlist = VMList()
# getting all instanced IP in case the list is empty
if len(self.iplist) <= 0:
self.get_ip()
for elem in dict(json_obj)["Value"]:
if elem['HypervisorType'] is 4:
s = Smart(interface=self, sid=elem['ServerId'])
else:
s = Pro(interface=self, sid=elem['ServerId'])
s.vm_name = elem['Name']
s.cpu_qty = elem['CPUQuantity']
s.ram_qty = elem['RAMQuantity']
s.status = elem['ServerStatus']
s.datacenter_id = elem['DatacenterId']
s.wcf_baseurl = self.wcf_baseurl
s.auth = self.auth
s.hd_qty = elem['HDQuantity']
s.hd_total_size = elem['HDTotalSize']
if elem['HypervisorType'] is 4:
ssd = self.get_server_detail(elem['ServerId'])
try:
s.ip_addr = str(ssd['EasyCloudIPAddress']['Value'])
except TypeError:
s.ip_addr = 'Not retrieved.'
else:
s.ip_addr = []
for ip in self.iplist:
if ip.serverid == s.sid:
s.ip_addr.append(ip)
self.vmlist.append(s)
return True if json_obj['Success'] is True else False
def find_template(self, name=None, hv=None):
"""
Return a list of templates that could have one or more elements.
Args:
name: name of the template to find.
hv: the ID of the hypervisor to search the template in
Returns:
A list of templates object. If hv is None will return all the
templates matching the name if every hypervisor type. Otherwise
if name is None will return all templates of an hypervisor.
Raises:
ValidationError: if name and hv are None
"""
if len(self.templates) <= 0:
self.get_hypervisors()
if name is not None and hv is not None:
template_list = filter(
lambda x: name in x.descr and x.hypervisor == self.hypervisors[hv], self.templates
)
elif name is not None and hv is None:
template_list = filter(
lambda x: name in x.descr, self.templates
)
elif name is None and hv is not None:
template_list = filter(
lambda x: x.hypervisor == self.hypervisors[hv], self.templates
)
else:
raise Exception('Error, no pattern defined')
if sys.version_info.major < (3):
return template_list
else:
return(list(template_list))
def get_vm(self, pattern=None):
if len(self.vmlist) <= 0:
self.get_servers()
if pattern is None:
return self.vmlist
else:
return self.vmlist.find(pattern)
def get_ip_by_vm(self, vm):
self.get_ip() # call get ip list to create the internal list of IPs.
vm_id = self.get_vm(vm)[0].sid
for ip in self.iplist:
if ip.serverid == vm_id:
return ip
return 'IPNOTFOUND'
def purchase_vlan(self, vlan_name, debug=False):
"""
Purchase a new VLAN.
:param debug: Log the json response if True
:param vlan_name: String representing the name of the vlan (virtual switch)
:return: a Vlan Object representing the vlan created
"""
vlan_name = {'VLanName': vlan_name}
json_scheme = self.gen_def_json_scheme('SetPurchaseVLan', vlan_name)
json_obj = self.call_method_post(method="SetPurchaseVLan", json_scheme=json_scheme)
if debug is True:
self.logger.debug(json_obj)
if json_obj['Success'] is False:
raise Exception("Cannot purchase new vlan.")
vlan = Vlan()
vlan.name = json_obj['Value']['Name']
vlan.resource_id = json_obj['Value']['ResourceId']
vlan.vlan_code = json_obj['Value']['VlanCode']
return vlan
def remove_vlan(self, vlan_resource_id):
"""
Remove a VLAN
:param vlan_resource_id:
:return:
"""
vlan_id = {'VLanResourceId': vlan_resource_id}
json_scheme = self.gen_def_json_scheme('SetRemoveVLan', vlan_id)
json_obj = self.call_method_post(method='SetRemoveVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def get_vlan(self, vlan_name=None):
json_scheme = self.gen_def_json_scheme('GetPurchasedVLans')
json_obj = self.call_method_post(method='GetPurchasedVLans', json_scheme=json_scheme)
if vlan_name is not None:
raw_vlans = filter(lambda x: vlan_name in x['Name'], json_obj['Value'])
else:
raw_vlans = json_obj['Value']
vlans = []
for raw_vlan in raw_vlans:
v = Vlan()
v.name = raw_vlan['Name']
v.vlan_code = raw_vlan['VlanCode']
v.resource_id = raw_vlan['ResourceId']
vlans.append(v)
return vlans
def remove_ip(self, ip_id):
"""
Delete an Ip from the boughs ip list
@param (str) ip_id: a string representing the resource id of the IP
@return: True if json method had success else False
"""
ip_id = ' "IpAddressResourceId": %s' % ip_id
json_scheme = self.gen_def_json_scheme('SetRemoveIpAddress', ip_id)
json_obj = self.call_method_post(method='SetRemoveIpAddress', json_scheme=json_scheme)
pprint(json_obj)
return True if json_obj['Success'] is True else False
def get_package_id(self, name):
"""
Retrieve the smart package id given is English name
@param (str) name: the Aruba Smart package size name, ie: "small", "medium", "large", "extra large".
@return: The package id that depends on the Data center and the size choosen.
"""
json_scheme = self.gen_def_json_scheme('GetPreConfiguredPackages', dict(HypervisorType=4))
json_obj = self.call_method_post(method='GetPreConfiguredPackages ', json_scheme=json_scheme)
for package in json_obj['Value']:
packageId = package['PackageID']
for description in package['Descriptions']:
languageID = description['LanguageID']
packageName = description['Text']
if languageID == 2 and packageName.lower() == name.lower():
return packageId
def get_ip(self):
"""
Retrieve a complete list of bought ip address related only to PRO Servers.
It create an internal object (Iplist) representing all of the ips object
iterated form the WS.
@param: None
@return: None
"""
json_scheme = self.gen_def_json_scheme('GetPurchasedIpAddresses')
json_obj = self.call_method_post(method='GetPurchasedIpAddresses ', json_scheme=json_scheme)
self.iplist = IpList()
for ip in json_obj['Value']:
r = Ip()
r.ip_addr = ip['Value']
r.resid = ip['ResourceId']
r.serverid = ip['ServerId'] if 'None' not in str(ip['ServerId']) else None
self.iplist.append(r)
def delete_vm(self, server=None, server_id=None):
self.logger.debug('%s: Deleting: %s' % (self.__class__.__name__, server))
sid = server_id if server_id is not None else server.sid
self.logger.debug('%s: Deleting SID: %s' % (self.__class__.__name__, sid))
if sid is None:
raise Exception('NoServerSpecified')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerDeletion', dict(ServerId=sid))
json_obj = self.call_method_post(method='SetEnqueueServerDeletion', json_scheme=json_scheme)
print('Deletion enqueued successfully for server_id: %s' % sid)
return True if json_obj['Success'] is 'True' else False
def get_jobs(self):
json_scheme = self.gen_def_json_scheme('GetJobs')
return self.call_method_post(method='GetJobs', json_scheme=json_scheme)
def find_job(self, vm_name):
jobs_list = self.get_jobs()
if jobs_list['Value'] is None:
_i = 0
while jobs_list['Value'] is not None:
_i += 1
jobs_list = self.get_jobs()
if _i > 10:
return 'JOBNOTFOUND'
if len(jobs_list['Value']) <= 0:
return 'JOBNOTFOUND'
for job in jobs_list['Value']:
if vm_name in job['ServerName']:
return job
return 'JOBNOTFOUND'
def get_virtual_datacenter(self):
json_scheme = self.gen_def_json_scheme('GetVirtualDatacenter')
json_obj = self.call_method_post(method='GetVirtualDatacenter', json_scheme=json_scheme)
return json_obj
def get_server_detail(self, server_id):
json_scheme = self.gen_def_json_scheme('GetServerDetails', dict(ServerId=server_id))
json_obj = self.call_method_post(method='GetServerDetails', json_scheme=json_scheme)
return json_obj['Value']
def attach_vlan(self, network_adapter_id, vlan_resource_id, ip=None, subnet_mask=None, gateway=None):
if gateway is not None:
additional_fields = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "true",
"VLanResourceId": vlan_resource_id,
"PrivateIps": [{
"GateWay": gateway,
"IP": ip,
"SubNetMask": subnet_mask
}]
}
}
else:
additional_fields = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "false",
"VLanResourceId": vlan_resource_id,
"PrivateIps": [{
"GateWay": None,
"IP": None,
"SubNetMask": None
}]
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueAssociateVLan', method_fields=additional_fields)
json_obj = self.call_method_post(method='SetEnqueueAssociateVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def detach_vlan(self, network_adapter_id, vlan_resource_id):
vlan_request = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "false",
"VLanResourceId": vlan_resource_id
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueDeassociateVLan', method_fields=vlan_request)
json_obj = self.call_method_post(method='SetEnqueueDeassociateVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def create_snapshot(self, dc, server_id=None):
sid = CloudInterface(dc).get_server_detail(server_id)
if sid['HypervisorType'] is not 4:
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Create"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def restore_snapshot(self, server_id=None):
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Restore"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def delete_snapshot(self, server_id=None):
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Delete"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def archive_vm(self, dc, server_id=None):
sid = CloudInterface(dc).get_server_detail(server_id)
if sid['HypervisorType'] is not 4:
archive_request = {
"ArchiveVirtualServer": {
"ServerId": server_id
}
}
json_scheme = self.gen_def_json_scheme('ArchiveVirtualServer', method_fields=archive_request)
json_obj = self.call_method_post(method='ArchiveVirtualServer', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def restore_vm(self, server_id=None, cpu_qty=None, ram_qty=None):
restore_request = {
"Server": {
"ServerId": server_id,
"CPUQuantity": cpu_qty,
"RAMQuantity": ram_qty
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerRestore', method_fields=restore_request)
json_obj = self.call_method_post(method='SetEnqueueServerRestore', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
|
Arubacloud/pyArubaCloud | ArubaCloud/PyArubaAPI.py | CloudInterface.purchase_vlan | python | def purchase_vlan(self, vlan_name, debug=False):
vlan_name = {'VLanName': vlan_name}
json_scheme = self.gen_def_json_scheme('SetPurchaseVLan', vlan_name)
json_obj = self.call_method_post(method="SetPurchaseVLan", json_scheme=json_scheme)
if debug is True:
self.logger.debug(json_obj)
if json_obj['Success'] is False:
raise Exception("Cannot purchase new vlan.")
vlan = Vlan()
vlan.name = json_obj['Value']['Name']
vlan.resource_id = json_obj['Value']['ResourceId']
vlan.vlan_code = json_obj['Value']['VlanCode']
return vlan | Purchase a new VLAN.
:param debug: Log the json response if True
:param vlan_name: String representing the name of the vlan (virtual switch)
:return: a Vlan Object representing the vlan created | train | https://github.com/Arubacloud/pyArubaCloud/blob/ec4aecd8ca342b1e1a4f16b7cc87cb5e697cfcd4/ArubaCloud/PyArubaAPI.py#L250-L268 | [
"def gen_def_json_scheme(self, req, method_fields=None):\n \"\"\"\n Generate the scheme for the json request.\n :param req: String representing the name of the method to call\n :param method_fields: A dictionary containing the method-specified fields\n :rtype : json object representing the method call\n \"\"\"\n json_dict = dict(\n ApplicationId=req,\n RequestId=req,\n SessionId=req,\n Password=self.auth.password,\n Username=self.auth.username\n )\n if method_fields is not None:\n json_dict.update(method_fields)\n self.logger.debug(json.dumps(json_dict))\n return json.dumps(json_dict)\n",
"def call_method_post(self, method, json_scheme, debug=False):\n url = '{}/{}'.format(self.wcf_baseurl, method)\n headers = {'Content-Type': 'application/json', 'Content-Length': str(len(json_scheme))}\n response = Http.post(url=url, data=json_scheme, headers=headers)\n parsed_response = json.loads(response.content.decode('utf-8'))\n if response.status_code != 200:\n from ArubaCloud.base.Errors import MalformedJsonRequest\n raise MalformedJsonRequest(\"Request: {}, Status Code: {}\".format(json_scheme, response.status_code))\n if parsed_response['Success'] is False:\n from ArubaCloud.base.Errors import RequestFailed\n raise RequestFailed(\"Request: {}, Response: {}\".format(json_scheme, parsed_response))\n if debug is True:\n msg = \"Response Message: {}\\nHTTP Status Code: {}\".format(parsed_response, response.status_code)\n self.logger.debug(msg)\n print(msg)\n return parsed_response\n"
] | class CloudInterface(JsonInterface):
templates = []
vmlist = VMList()
iplist = IpList()
json_templates = None
json_servers = None
ip_resource = None
hypervisors = {3: "LC", 4: "SMART", 2: "VW", 1: "HV"}
def __init__(self, dc, debug_level=logging.INFO):
super(CloudInterface, self).__init__()
assert isinstance(dc, int), Exception('dc must be an integer and must be not null.')
self.wcf_baseurl = 'https://api.dc%s.computing.cloud.it/WsEndUser/v2.9/WsEndUser.svc/json' % (str(dc))
self.logger = ArubaLog(level=debug_level, log_to_file=False)
self.logger.name = self.__class__
self.auth = None
def login(self, username, password, load=True):
"""
Set the authentication data in the object, and if load is True
(default is True) it also retrieve the ip list and the vm list
in order to build the internal objects list.
@param (str) username: username of the cloud
@param (str) password: password of the cloud
@param (bool) load: define if pre cache the objects.
@return: None
"""
self.auth = Auth(username, password)
if load is True:
self.get_ip()
self.get_servers()
def poweroff_server(self, server=None, server_id=None):
"""
Poweroff a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power off,
server_id: Int or Str representing the ID of the VM to power off.
Returns:
return True if json_obj['Success'] is 'True' else False
"""
sid = server_id if server_id is not None else server.sid
if sid is None:
raise Exception('No Server Specified.')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerPowerOff', dict(ServerId=sid))
json_obj = self.call_method_post('SetEnqueueServerPowerOff', json_scheme=json_scheme)
return True if json_obj['Success'] is 'True' else False
def poweron_server(self, server=None, server_id=None):
"""
Poweron a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power on,
server_id: Int or Str representing the ID of the VM to power on.
Returns:
return True if json_obj['Success'] is 'True' else False
"""
sid = server_id if server_id is not None else server.sid
if sid is None:
raise Exception('No Server Specified.')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerStart', dict(ServerId=sid))
json_obj = self.call_method_post('SetEnqueueServerStart', json_scheme=json_scheme)
return True if json_obj['Success'] is 'True' else False
def get_hypervisors(self):
"""
Initialize the internal list containing each template available for each
hypervisor.
:return: [bool] True in case of success, otherwise False
"""
json_scheme = self.gen_def_json_scheme('GetHypervisors')
json_obj = self.call_method_post(method='GetHypervisors', json_scheme=json_scheme)
self.json_templates = json_obj
d = dict(json_obj)
for elem in d['Value']:
hv = self.hypervisors[elem['HypervisorType']]
for inner_elem in elem['Templates']:
o = Template(hv)
o.template_id = inner_elem['Id']
o.descr = inner_elem['Description']
o.id_code = inner_elem['IdentificationCode']
o.name = inner_elem['Name']
o.enabled = inner_elem['Enabled']
if hv != 'SMART':
for rb in inner_elem['ResourceBounds']:
resource_type = rb['ResourceType']
if resource_type == 1:
o.resource_bounds.max_cpu = rb['Max']
if resource_type == 2:
o.resource_bounds.max_memory = rb['Max']
if resource_type == 3:
o.resource_bounds.hdd0 = rb['Max']
if resource_type == 7:
o.resource_bounds.hdd1 = rb['Max']
if resource_type == 8:
o.resource_bounds.hdd2 = rb['Max']
if resource_type == 9:
o.resource_bounds.hdd3 = rb['Max']
self.templates.append(o)
return True if json_obj['Success'] is 'True' else False
def get_servers(self):
"""
Create the list of Server object inside the Datacenter objects.
Build an internal list of VM Objects (pro or smart) as iterator.
:return: bool
"""
json_scheme = self.gen_def_json_scheme('GetServers')
json_obj = self.call_method_post(method='GetServers', json_scheme=json_scheme)
self.json_servers = json_obj
# if this method is called I assume that i must re-read the data
# so i reinitialize the vmlist
self.vmlist = VMList()
# getting all instanced IP in case the list is empty
if len(self.iplist) <= 0:
self.get_ip()
for elem in dict(json_obj)["Value"]:
if elem['HypervisorType'] is 4:
s = Smart(interface=self, sid=elem['ServerId'])
else:
s = Pro(interface=self, sid=elem['ServerId'])
s.vm_name = elem['Name']
s.cpu_qty = elem['CPUQuantity']
s.ram_qty = elem['RAMQuantity']
s.status = elem['ServerStatus']
s.datacenter_id = elem['DatacenterId']
s.wcf_baseurl = self.wcf_baseurl
s.auth = self.auth
s.hd_qty = elem['HDQuantity']
s.hd_total_size = elem['HDTotalSize']
if elem['HypervisorType'] is 4:
ssd = self.get_server_detail(elem['ServerId'])
try:
s.ip_addr = str(ssd['EasyCloudIPAddress']['Value'])
except TypeError:
s.ip_addr = 'Not retrieved.'
else:
s.ip_addr = []
for ip in self.iplist:
if ip.serverid == s.sid:
s.ip_addr.append(ip)
self.vmlist.append(s)
return True if json_obj['Success'] is True else False
def find_template(self, name=None, hv=None):
"""
Return a list of templates that could have one or more elements.
Args:
name: name of the template to find.
hv: the ID of the hypervisor to search the template in
Returns:
A list of templates object. If hv is None will return all the
templates matching the name if every hypervisor type. Otherwise
if name is None will return all templates of an hypervisor.
Raises:
ValidationError: if name and hv are None
"""
if len(self.templates) <= 0:
self.get_hypervisors()
if name is not None and hv is not None:
template_list = filter(
lambda x: name in x.descr and x.hypervisor == self.hypervisors[hv], self.templates
)
elif name is not None and hv is None:
template_list = filter(
lambda x: name in x.descr, self.templates
)
elif name is None and hv is not None:
template_list = filter(
lambda x: x.hypervisor == self.hypervisors[hv], self.templates
)
else:
raise Exception('Error, no pattern defined')
if sys.version_info.major < (3):
return template_list
else:
return(list(template_list))
def get_vm(self, pattern=None):
if len(self.vmlist) <= 0:
self.get_servers()
if pattern is None:
return self.vmlist
else:
return self.vmlist.find(pattern)
def get_ip_by_vm(self, vm):
self.get_ip() # call get ip list to create the internal list of IPs.
vm_id = self.get_vm(vm)[0].sid
for ip in self.iplist:
if ip.serverid == vm_id:
return ip
return 'IPNOTFOUND'
def purchase_ip(self, debug=False):
"""
Return an ip object representing a new bought IP
@param debug [Boolean] if true, request and response will be printed
@return (Ip): Ip object
"""
json_scheme = self.gen_def_json_scheme('SetPurchaseIpAddress')
json_obj = self.call_method_post(method='SetPurchaseIpAddress', json_scheme=json_scheme, debug=debug)
try:
ip = Ip()
ip.ip_addr = json_obj['Value']['Value']
ip.resid = json_obj['Value']['ResourceId']
return ip
except:
raise Exception('Unknown error retrieving IP.')
def remove_vlan(self, vlan_resource_id):
"""
Remove a VLAN
:param vlan_resource_id:
:return:
"""
vlan_id = {'VLanResourceId': vlan_resource_id}
json_scheme = self.gen_def_json_scheme('SetRemoveVLan', vlan_id)
json_obj = self.call_method_post(method='SetRemoveVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def get_vlan(self, vlan_name=None):
json_scheme = self.gen_def_json_scheme('GetPurchasedVLans')
json_obj = self.call_method_post(method='GetPurchasedVLans', json_scheme=json_scheme)
if vlan_name is not None:
raw_vlans = filter(lambda x: vlan_name in x['Name'], json_obj['Value'])
else:
raw_vlans = json_obj['Value']
vlans = []
for raw_vlan in raw_vlans:
v = Vlan()
v.name = raw_vlan['Name']
v.vlan_code = raw_vlan['VlanCode']
v.resource_id = raw_vlan['ResourceId']
vlans.append(v)
return vlans
def remove_ip(self, ip_id):
"""
Delete an Ip from the boughs ip list
@param (str) ip_id: a string representing the resource id of the IP
@return: True if json method had success else False
"""
ip_id = ' "IpAddressResourceId": %s' % ip_id
json_scheme = self.gen_def_json_scheme('SetRemoveIpAddress', ip_id)
json_obj = self.call_method_post(method='SetRemoveIpAddress', json_scheme=json_scheme)
pprint(json_obj)
return True if json_obj['Success'] is True else False
def get_package_id(self, name):
"""
Retrieve the smart package id given is English name
@param (str) name: the Aruba Smart package size name, ie: "small", "medium", "large", "extra large".
@return: The package id that depends on the Data center and the size choosen.
"""
json_scheme = self.gen_def_json_scheme('GetPreConfiguredPackages', dict(HypervisorType=4))
json_obj = self.call_method_post(method='GetPreConfiguredPackages ', json_scheme=json_scheme)
for package in json_obj['Value']:
packageId = package['PackageID']
for description in package['Descriptions']:
languageID = description['LanguageID']
packageName = description['Text']
if languageID == 2 and packageName.lower() == name.lower():
return packageId
def get_ip(self):
"""
Retrieve a complete list of bought ip address related only to PRO Servers.
It create an internal object (Iplist) representing all of the ips object
iterated form the WS.
@param: None
@return: None
"""
json_scheme = self.gen_def_json_scheme('GetPurchasedIpAddresses')
json_obj = self.call_method_post(method='GetPurchasedIpAddresses ', json_scheme=json_scheme)
self.iplist = IpList()
for ip in json_obj['Value']:
r = Ip()
r.ip_addr = ip['Value']
r.resid = ip['ResourceId']
r.serverid = ip['ServerId'] if 'None' not in str(ip['ServerId']) else None
self.iplist.append(r)
def delete_vm(self, server=None, server_id=None):
self.logger.debug('%s: Deleting: %s' % (self.__class__.__name__, server))
sid = server_id if server_id is not None else server.sid
self.logger.debug('%s: Deleting SID: %s' % (self.__class__.__name__, sid))
if sid is None:
raise Exception('NoServerSpecified')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerDeletion', dict(ServerId=sid))
json_obj = self.call_method_post(method='SetEnqueueServerDeletion', json_scheme=json_scheme)
print('Deletion enqueued successfully for server_id: %s' % sid)
return True if json_obj['Success'] is 'True' else False
def get_jobs(self):
json_scheme = self.gen_def_json_scheme('GetJobs')
return self.call_method_post(method='GetJobs', json_scheme=json_scheme)
def find_job(self, vm_name):
jobs_list = self.get_jobs()
if jobs_list['Value'] is None:
_i = 0
while jobs_list['Value'] is not None:
_i += 1
jobs_list = self.get_jobs()
if _i > 10:
return 'JOBNOTFOUND'
if len(jobs_list['Value']) <= 0:
return 'JOBNOTFOUND'
for job in jobs_list['Value']:
if vm_name in job['ServerName']:
return job
return 'JOBNOTFOUND'
def get_virtual_datacenter(self):
json_scheme = self.gen_def_json_scheme('GetVirtualDatacenter')
json_obj = self.call_method_post(method='GetVirtualDatacenter', json_scheme=json_scheme)
return json_obj
def get_server_detail(self, server_id):
json_scheme = self.gen_def_json_scheme('GetServerDetails', dict(ServerId=server_id))
json_obj = self.call_method_post(method='GetServerDetails', json_scheme=json_scheme)
return json_obj['Value']
def attach_vlan(self, network_adapter_id, vlan_resource_id, ip=None, subnet_mask=None, gateway=None):
if gateway is not None:
additional_fields = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "true",
"VLanResourceId": vlan_resource_id,
"PrivateIps": [{
"GateWay": gateway,
"IP": ip,
"SubNetMask": subnet_mask
}]
}
}
else:
additional_fields = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "false",
"VLanResourceId": vlan_resource_id,
"PrivateIps": [{
"GateWay": None,
"IP": None,
"SubNetMask": None
}]
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueAssociateVLan', method_fields=additional_fields)
json_obj = self.call_method_post(method='SetEnqueueAssociateVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def detach_vlan(self, network_adapter_id, vlan_resource_id):
vlan_request = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "false",
"VLanResourceId": vlan_resource_id
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueDeassociateVLan', method_fields=vlan_request)
json_obj = self.call_method_post(method='SetEnqueueDeassociateVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def create_snapshot(self, dc, server_id=None):
sid = CloudInterface(dc).get_server_detail(server_id)
if sid['HypervisorType'] is not 4:
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Create"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def restore_snapshot(self, server_id=None):
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Restore"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def delete_snapshot(self, server_id=None):
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Delete"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def archive_vm(self, dc, server_id=None):
sid = CloudInterface(dc).get_server_detail(server_id)
if sid['HypervisorType'] is not 4:
archive_request = {
"ArchiveVirtualServer": {
"ServerId": server_id
}
}
json_scheme = self.gen_def_json_scheme('ArchiveVirtualServer', method_fields=archive_request)
json_obj = self.call_method_post(method='ArchiveVirtualServer', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def restore_vm(self, server_id=None, cpu_qty=None, ram_qty=None):
restore_request = {
"Server": {
"ServerId": server_id,
"CPUQuantity": cpu_qty,
"RAMQuantity": ram_qty
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerRestore', method_fields=restore_request)
json_obj = self.call_method_post(method='SetEnqueueServerRestore', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
|
Arubacloud/pyArubaCloud | ArubaCloud/PyArubaAPI.py | CloudInterface.remove_vlan | python | def remove_vlan(self, vlan_resource_id):
vlan_id = {'VLanResourceId': vlan_resource_id}
json_scheme = self.gen_def_json_scheme('SetRemoveVLan', vlan_id)
json_obj = self.call_method_post(method='SetRemoveVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False | Remove a VLAN
:param vlan_resource_id:
:return: | train | https://github.com/Arubacloud/pyArubaCloud/blob/ec4aecd8ca342b1e1a4f16b7cc87cb5e697cfcd4/ArubaCloud/PyArubaAPI.py#L270-L279 | [
"def gen_def_json_scheme(self, req, method_fields=None):\n \"\"\"\n Generate the scheme for the json request.\n :param req: String representing the name of the method to call\n :param method_fields: A dictionary containing the method-specified fields\n :rtype : json object representing the method call\n \"\"\"\n json_dict = dict(\n ApplicationId=req,\n RequestId=req,\n SessionId=req,\n Password=self.auth.password,\n Username=self.auth.username\n )\n if method_fields is not None:\n json_dict.update(method_fields)\n self.logger.debug(json.dumps(json_dict))\n return json.dumps(json_dict)\n",
"def call_method_post(self, method, json_scheme, debug=False):\n url = '{}/{}'.format(self.wcf_baseurl, method)\n headers = {'Content-Type': 'application/json', 'Content-Length': str(len(json_scheme))}\n response = Http.post(url=url, data=json_scheme, headers=headers)\n parsed_response = json.loads(response.content.decode('utf-8'))\n if response.status_code != 200:\n from ArubaCloud.base.Errors import MalformedJsonRequest\n raise MalformedJsonRequest(\"Request: {}, Status Code: {}\".format(json_scheme, response.status_code))\n if parsed_response['Success'] is False:\n from ArubaCloud.base.Errors import RequestFailed\n raise RequestFailed(\"Request: {}, Response: {}\".format(json_scheme, parsed_response))\n if debug is True:\n msg = \"Response Message: {}\\nHTTP Status Code: {}\".format(parsed_response, response.status_code)\n self.logger.debug(msg)\n print(msg)\n return parsed_response\n"
] | class CloudInterface(JsonInterface):
templates = []
vmlist = VMList()
iplist = IpList()
json_templates = None
json_servers = None
ip_resource = None
hypervisors = {3: "LC", 4: "SMART", 2: "VW", 1: "HV"}
def __init__(self, dc, debug_level=logging.INFO):
super(CloudInterface, self).__init__()
assert isinstance(dc, int), Exception('dc must be an integer and must be not null.')
self.wcf_baseurl = 'https://api.dc%s.computing.cloud.it/WsEndUser/v2.9/WsEndUser.svc/json' % (str(dc))
self.logger = ArubaLog(level=debug_level, log_to_file=False)
self.logger.name = self.__class__
self.auth = None
def login(self, username, password, load=True):
"""
Set the authentication data in the object, and if load is True
(default is True) it also retrieve the ip list and the vm list
in order to build the internal objects list.
@param (str) username: username of the cloud
@param (str) password: password of the cloud
@param (bool) load: define if pre cache the objects.
@return: None
"""
self.auth = Auth(username, password)
if load is True:
self.get_ip()
self.get_servers()
def poweroff_server(self, server=None, server_id=None):
"""
Poweroff a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power off,
server_id: Int or Str representing the ID of the VM to power off.
Returns:
return True if json_obj['Success'] is 'True' else False
"""
sid = server_id if server_id is not None else server.sid
if sid is None:
raise Exception('No Server Specified.')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerPowerOff', dict(ServerId=sid))
json_obj = self.call_method_post('SetEnqueueServerPowerOff', json_scheme=json_scheme)
return True if json_obj['Success'] is 'True' else False
def poweron_server(self, server=None, server_id=None):
"""
Poweron a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power on,
server_id: Int or Str representing the ID of the VM to power on.
Returns:
return True if json_obj['Success'] is 'True' else False
"""
sid = server_id if server_id is not None else server.sid
if sid is None:
raise Exception('No Server Specified.')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerStart', dict(ServerId=sid))
json_obj = self.call_method_post('SetEnqueueServerStart', json_scheme=json_scheme)
return True if json_obj['Success'] is 'True' else False
def get_hypervisors(self):
"""
Initialize the internal list containing each template available for each
hypervisor.
:return: [bool] True in case of success, otherwise False
"""
json_scheme = self.gen_def_json_scheme('GetHypervisors')
json_obj = self.call_method_post(method='GetHypervisors', json_scheme=json_scheme)
self.json_templates = json_obj
d = dict(json_obj)
for elem in d['Value']:
hv = self.hypervisors[elem['HypervisorType']]
for inner_elem in elem['Templates']:
o = Template(hv)
o.template_id = inner_elem['Id']
o.descr = inner_elem['Description']
o.id_code = inner_elem['IdentificationCode']
o.name = inner_elem['Name']
o.enabled = inner_elem['Enabled']
if hv != 'SMART':
for rb in inner_elem['ResourceBounds']:
resource_type = rb['ResourceType']
if resource_type == 1:
o.resource_bounds.max_cpu = rb['Max']
if resource_type == 2:
o.resource_bounds.max_memory = rb['Max']
if resource_type == 3:
o.resource_bounds.hdd0 = rb['Max']
if resource_type == 7:
o.resource_bounds.hdd1 = rb['Max']
if resource_type == 8:
o.resource_bounds.hdd2 = rb['Max']
if resource_type == 9:
o.resource_bounds.hdd3 = rb['Max']
self.templates.append(o)
return True if json_obj['Success'] is 'True' else False
def get_servers(self):
"""
Create the list of Server object inside the Datacenter objects.
Build an internal list of VM Objects (pro or smart) as iterator.
:return: bool
"""
json_scheme = self.gen_def_json_scheme('GetServers')
json_obj = self.call_method_post(method='GetServers', json_scheme=json_scheme)
self.json_servers = json_obj
# if this method is called I assume that i must re-read the data
# so i reinitialize the vmlist
self.vmlist = VMList()
# getting all instanced IP in case the list is empty
if len(self.iplist) <= 0:
self.get_ip()
for elem in dict(json_obj)["Value"]:
if elem['HypervisorType'] is 4:
s = Smart(interface=self, sid=elem['ServerId'])
else:
s = Pro(interface=self, sid=elem['ServerId'])
s.vm_name = elem['Name']
s.cpu_qty = elem['CPUQuantity']
s.ram_qty = elem['RAMQuantity']
s.status = elem['ServerStatus']
s.datacenter_id = elem['DatacenterId']
s.wcf_baseurl = self.wcf_baseurl
s.auth = self.auth
s.hd_qty = elem['HDQuantity']
s.hd_total_size = elem['HDTotalSize']
if elem['HypervisorType'] is 4:
ssd = self.get_server_detail(elem['ServerId'])
try:
s.ip_addr = str(ssd['EasyCloudIPAddress']['Value'])
except TypeError:
s.ip_addr = 'Not retrieved.'
else:
s.ip_addr = []
for ip in self.iplist:
if ip.serverid == s.sid:
s.ip_addr.append(ip)
self.vmlist.append(s)
return True if json_obj['Success'] is True else False
def find_template(self, name=None, hv=None):
"""
Return a list of templates that could have one or more elements.
Args:
name: name of the template to find.
hv: the ID of the hypervisor to search the template in
Returns:
A list of templates object. If hv is None will return all the
templates matching the name if every hypervisor type. Otherwise
if name is None will return all templates of an hypervisor.
Raises:
ValidationError: if name and hv are None
"""
if len(self.templates) <= 0:
self.get_hypervisors()
if name is not None and hv is not None:
template_list = filter(
lambda x: name in x.descr and x.hypervisor == self.hypervisors[hv], self.templates
)
elif name is not None and hv is None:
template_list = filter(
lambda x: name in x.descr, self.templates
)
elif name is None and hv is not None:
template_list = filter(
lambda x: x.hypervisor == self.hypervisors[hv], self.templates
)
else:
raise Exception('Error, no pattern defined')
if sys.version_info.major < (3):
return template_list
else:
return(list(template_list))
def get_vm(self, pattern=None):
if len(self.vmlist) <= 0:
self.get_servers()
if pattern is None:
return self.vmlist
else:
return self.vmlist.find(pattern)
def get_ip_by_vm(self, vm):
self.get_ip() # call get ip list to create the internal list of IPs.
vm_id = self.get_vm(vm)[0].sid
for ip in self.iplist:
if ip.serverid == vm_id:
return ip
return 'IPNOTFOUND'
def purchase_ip(self, debug=False):
"""
Return an ip object representing a new bought IP
@param debug [Boolean] if true, request and response will be printed
@return (Ip): Ip object
"""
json_scheme = self.gen_def_json_scheme('SetPurchaseIpAddress')
json_obj = self.call_method_post(method='SetPurchaseIpAddress', json_scheme=json_scheme, debug=debug)
try:
ip = Ip()
ip.ip_addr = json_obj['Value']['Value']
ip.resid = json_obj['Value']['ResourceId']
return ip
except:
raise Exception('Unknown error retrieving IP.')
def purchase_vlan(self, vlan_name, debug=False):
"""
Purchase a new VLAN.
:param debug: Log the json response if True
:param vlan_name: String representing the name of the vlan (virtual switch)
:return: a Vlan Object representing the vlan created
"""
vlan_name = {'VLanName': vlan_name}
json_scheme = self.gen_def_json_scheme('SetPurchaseVLan', vlan_name)
json_obj = self.call_method_post(method="SetPurchaseVLan", json_scheme=json_scheme)
if debug is True:
self.logger.debug(json_obj)
if json_obj['Success'] is False:
raise Exception("Cannot purchase new vlan.")
vlan = Vlan()
vlan.name = json_obj['Value']['Name']
vlan.resource_id = json_obj['Value']['ResourceId']
vlan.vlan_code = json_obj['Value']['VlanCode']
return vlan
def get_vlan(self, vlan_name=None):
json_scheme = self.gen_def_json_scheme('GetPurchasedVLans')
json_obj = self.call_method_post(method='GetPurchasedVLans', json_scheme=json_scheme)
if vlan_name is not None:
raw_vlans = filter(lambda x: vlan_name in x['Name'], json_obj['Value'])
else:
raw_vlans = json_obj['Value']
vlans = []
for raw_vlan in raw_vlans:
v = Vlan()
v.name = raw_vlan['Name']
v.vlan_code = raw_vlan['VlanCode']
v.resource_id = raw_vlan['ResourceId']
vlans.append(v)
return vlans
def remove_ip(self, ip_id):
"""
Delete an Ip from the boughs ip list
@param (str) ip_id: a string representing the resource id of the IP
@return: True if json method had success else False
"""
ip_id = ' "IpAddressResourceId": %s' % ip_id
json_scheme = self.gen_def_json_scheme('SetRemoveIpAddress', ip_id)
json_obj = self.call_method_post(method='SetRemoveIpAddress', json_scheme=json_scheme)
pprint(json_obj)
return True if json_obj['Success'] is True else False
def get_package_id(self, name):
"""
Retrieve the smart package id given is English name
@param (str) name: the Aruba Smart package size name, ie: "small", "medium", "large", "extra large".
@return: The package id that depends on the Data center and the size choosen.
"""
json_scheme = self.gen_def_json_scheme('GetPreConfiguredPackages', dict(HypervisorType=4))
json_obj = self.call_method_post(method='GetPreConfiguredPackages ', json_scheme=json_scheme)
for package in json_obj['Value']:
packageId = package['PackageID']
for description in package['Descriptions']:
languageID = description['LanguageID']
packageName = description['Text']
if languageID == 2 and packageName.lower() == name.lower():
return packageId
def get_ip(self):
"""
Retrieve a complete list of bought ip address related only to PRO Servers.
It create an internal object (Iplist) representing all of the ips object
iterated form the WS.
@param: None
@return: None
"""
json_scheme = self.gen_def_json_scheme('GetPurchasedIpAddresses')
json_obj = self.call_method_post(method='GetPurchasedIpAddresses ', json_scheme=json_scheme)
self.iplist = IpList()
for ip in json_obj['Value']:
r = Ip()
r.ip_addr = ip['Value']
r.resid = ip['ResourceId']
r.serverid = ip['ServerId'] if 'None' not in str(ip['ServerId']) else None
self.iplist.append(r)
def delete_vm(self, server=None, server_id=None):
self.logger.debug('%s: Deleting: %s' % (self.__class__.__name__, server))
sid = server_id if server_id is not None else server.sid
self.logger.debug('%s: Deleting SID: %s' % (self.__class__.__name__, sid))
if sid is None:
raise Exception('NoServerSpecified')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerDeletion', dict(ServerId=sid))
json_obj = self.call_method_post(method='SetEnqueueServerDeletion', json_scheme=json_scheme)
print('Deletion enqueued successfully for server_id: %s' % sid)
return True if json_obj['Success'] is 'True' else False
def get_jobs(self):
json_scheme = self.gen_def_json_scheme('GetJobs')
return self.call_method_post(method='GetJobs', json_scheme=json_scheme)
def find_job(self, vm_name):
jobs_list = self.get_jobs()
if jobs_list['Value'] is None:
_i = 0
while jobs_list['Value'] is not None:
_i += 1
jobs_list = self.get_jobs()
if _i > 10:
return 'JOBNOTFOUND'
if len(jobs_list['Value']) <= 0:
return 'JOBNOTFOUND'
for job in jobs_list['Value']:
if vm_name in job['ServerName']:
return job
return 'JOBNOTFOUND'
def get_virtual_datacenter(self):
json_scheme = self.gen_def_json_scheme('GetVirtualDatacenter')
json_obj = self.call_method_post(method='GetVirtualDatacenter', json_scheme=json_scheme)
return json_obj
def get_server_detail(self, server_id):
json_scheme = self.gen_def_json_scheme('GetServerDetails', dict(ServerId=server_id))
json_obj = self.call_method_post(method='GetServerDetails', json_scheme=json_scheme)
return json_obj['Value']
def attach_vlan(self, network_adapter_id, vlan_resource_id, ip=None, subnet_mask=None, gateway=None):
if gateway is not None:
additional_fields = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "true",
"VLanResourceId": vlan_resource_id,
"PrivateIps": [{
"GateWay": gateway,
"IP": ip,
"SubNetMask": subnet_mask
}]
}
}
else:
additional_fields = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "false",
"VLanResourceId": vlan_resource_id,
"PrivateIps": [{
"GateWay": None,
"IP": None,
"SubNetMask": None
}]
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueAssociateVLan', method_fields=additional_fields)
json_obj = self.call_method_post(method='SetEnqueueAssociateVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def detach_vlan(self, network_adapter_id, vlan_resource_id):
vlan_request = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "false",
"VLanResourceId": vlan_resource_id
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueDeassociateVLan', method_fields=vlan_request)
json_obj = self.call_method_post(method='SetEnqueueDeassociateVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def create_snapshot(self, dc, server_id=None):
sid = CloudInterface(dc).get_server_detail(server_id)
if sid['HypervisorType'] is not 4:
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Create"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def restore_snapshot(self, server_id=None):
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Restore"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def delete_snapshot(self, server_id=None):
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Delete"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def archive_vm(self, dc, server_id=None):
sid = CloudInterface(dc).get_server_detail(server_id)
if sid['HypervisorType'] is not 4:
archive_request = {
"ArchiveVirtualServer": {
"ServerId": server_id
}
}
json_scheme = self.gen_def_json_scheme('ArchiveVirtualServer', method_fields=archive_request)
json_obj = self.call_method_post(method='ArchiveVirtualServer', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def restore_vm(self, server_id=None, cpu_qty=None, ram_qty=None):
restore_request = {
"Server": {
"ServerId": server_id,
"CPUQuantity": cpu_qty,
"RAMQuantity": ram_qty
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerRestore', method_fields=restore_request)
json_obj = self.call_method_post(method='SetEnqueueServerRestore', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
|
Arubacloud/pyArubaCloud | ArubaCloud/PyArubaAPI.py | CloudInterface.remove_ip | python | def remove_ip(self, ip_id):
ip_id = ' "IpAddressResourceId": %s' % ip_id
json_scheme = self.gen_def_json_scheme('SetRemoveIpAddress', ip_id)
json_obj = self.call_method_post(method='SetRemoveIpAddress', json_scheme=json_scheme)
pprint(json_obj)
return True if json_obj['Success'] is True else False | Delete an Ip from the boughs ip list
@param (str) ip_id: a string representing the resource id of the IP
@return: True if json method had success else False | train | https://github.com/Arubacloud/pyArubaCloud/blob/ec4aecd8ca342b1e1a4f16b7cc87cb5e697cfcd4/ArubaCloud/PyArubaAPI.py#L297-L307 | [
"def gen_def_json_scheme(self, req, method_fields=None):\n \"\"\"\n Generate the scheme for the json request.\n :param req: String representing the name of the method to call\n :param method_fields: A dictionary containing the method-specified fields\n :rtype : json object representing the method call\n \"\"\"\n json_dict = dict(\n ApplicationId=req,\n RequestId=req,\n SessionId=req,\n Password=self.auth.password,\n Username=self.auth.username\n )\n if method_fields is not None:\n json_dict.update(method_fields)\n self.logger.debug(json.dumps(json_dict))\n return json.dumps(json_dict)\n",
"def call_method_post(self, method, json_scheme, debug=False):\n url = '{}/{}'.format(self.wcf_baseurl, method)\n headers = {'Content-Type': 'application/json', 'Content-Length': str(len(json_scheme))}\n response = Http.post(url=url, data=json_scheme, headers=headers)\n parsed_response = json.loads(response.content.decode('utf-8'))\n if response.status_code != 200:\n from ArubaCloud.base.Errors import MalformedJsonRequest\n raise MalformedJsonRequest(\"Request: {}, Status Code: {}\".format(json_scheme, response.status_code))\n if parsed_response['Success'] is False:\n from ArubaCloud.base.Errors import RequestFailed\n raise RequestFailed(\"Request: {}, Response: {}\".format(json_scheme, parsed_response))\n if debug is True:\n msg = \"Response Message: {}\\nHTTP Status Code: {}\".format(parsed_response, response.status_code)\n self.logger.debug(msg)\n print(msg)\n return parsed_response\n"
] | class CloudInterface(JsonInterface):
templates = []
vmlist = VMList()
iplist = IpList()
json_templates = None
json_servers = None
ip_resource = None
hypervisors = {3: "LC", 4: "SMART", 2: "VW", 1: "HV"}
def __init__(self, dc, debug_level=logging.INFO):
super(CloudInterface, self).__init__()
assert isinstance(dc, int), Exception('dc must be an integer and must be not null.')
self.wcf_baseurl = 'https://api.dc%s.computing.cloud.it/WsEndUser/v2.9/WsEndUser.svc/json' % (str(dc))
self.logger = ArubaLog(level=debug_level, log_to_file=False)
self.logger.name = self.__class__
self.auth = None
def login(self, username, password, load=True):
"""
Set the authentication data in the object, and if load is True
(default is True) it also retrieve the ip list and the vm list
in order to build the internal objects list.
@param (str) username: username of the cloud
@param (str) password: password of the cloud
@param (bool) load: define if pre cache the objects.
@return: None
"""
self.auth = Auth(username, password)
if load is True:
self.get_ip()
self.get_servers()
def poweroff_server(self, server=None, server_id=None):
"""
Poweroff a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power off,
server_id: Int or Str representing the ID of the VM to power off.
Returns:
return True if json_obj['Success'] is 'True' else False
"""
sid = server_id if server_id is not None else server.sid
if sid is None:
raise Exception('No Server Specified.')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerPowerOff', dict(ServerId=sid))
json_obj = self.call_method_post('SetEnqueueServerPowerOff', json_scheme=json_scheme)
return True if json_obj['Success'] is 'True' else False
def poweron_server(self, server=None, server_id=None):
"""
Poweron a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power on,
server_id: Int or Str representing the ID of the VM to power on.
Returns:
return True if json_obj['Success'] is 'True' else False
"""
sid = server_id if server_id is not None else server.sid
if sid is None:
raise Exception('No Server Specified.')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerStart', dict(ServerId=sid))
json_obj = self.call_method_post('SetEnqueueServerStart', json_scheme=json_scheme)
return True if json_obj['Success'] is 'True' else False
def get_hypervisors(self):
"""
Initialize the internal list containing each template available for each
hypervisor.
:return: [bool] True in case of success, otherwise False
"""
json_scheme = self.gen_def_json_scheme('GetHypervisors')
json_obj = self.call_method_post(method='GetHypervisors', json_scheme=json_scheme)
self.json_templates = json_obj
d = dict(json_obj)
for elem in d['Value']:
hv = self.hypervisors[elem['HypervisorType']]
for inner_elem in elem['Templates']:
o = Template(hv)
o.template_id = inner_elem['Id']
o.descr = inner_elem['Description']
o.id_code = inner_elem['IdentificationCode']
o.name = inner_elem['Name']
o.enabled = inner_elem['Enabled']
if hv != 'SMART':
for rb in inner_elem['ResourceBounds']:
resource_type = rb['ResourceType']
if resource_type == 1:
o.resource_bounds.max_cpu = rb['Max']
if resource_type == 2:
o.resource_bounds.max_memory = rb['Max']
if resource_type == 3:
o.resource_bounds.hdd0 = rb['Max']
if resource_type == 7:
o.resource_bounds.hdd1 = rb['Max']
if resource_type == 8:
o.resource_bounds.hdd2 = rb['Max']
if resource_type == 9:
o.resource_bounds.hdd3 = rb['Max']
self.templates.append(o)
return True if json_obj['Success'] is 'True' else False
def get_servers(self):
"""
Create the list of Server object inside the Datacenter objects.
Build an internal list of VM Objects (pro or smart) as iterator.
:return: bool
"""
json_scheme = self.gen_def_json_scheme('GetServers')
json_obj = self.call_method_post(method='GetServers', json_scheme=json_scheme)
self.json_servers = json_obj
# if this method is called I assume that i must re-read the data
# so i reinitialize the vmlist
self.vmlist = VMList()
# getting all instanced IP in case the list is empty
if len(self.iplist) <= 0:
self.get_ip()
for elem in dict(json_obj)["Value"]:
if elem['HypervisorType'] is 4:
s = Smart(interface=self, sid=elem['ServerId'])
else:
s = Pro(interface=self, sid=elem['ServerId'])
s.vm_name = elem['Name']
s.cpu_qty = elem['CPUQuantity']
s.ram_qty = elem['RAMQuantity']
s.status = elem['ServerStatus']
s.datacenter_id = elem['DatacenterId']
s.wcf_baseurl = self.wcf_baseurl
s.auth = self.auth
s.hd_qty = elem['HDQuantity']
s.hd_total_size = elem['HDTotalSize']
if elem['HypervisorType'] is 4:
ssd = self.get_server_detail(elem['ServerId'])
try:
s.ip_addr = str(ssd['EasyCloudIPAddress']['Value'])
except TypeError:
s.ip_addr = 'Not retrieved.'
else:
s.ip_addr = []
for ip in self.iplist:
if ip.serverid == s.sid:
s.ip_addr.append(ip)
self.vmlist.append(s)
return True if json_obj['Success'] is True else False
def find_template(self, name=None, hv=None):
"""
Return a list of templates that could have one or more elements.
Args:
name: name of the template to find.
hv: the ID of the hypervisor to search the template in
Returns:
A list of templates object. If hv is None will return all the
templates matching the name if every hypervisor type. Otherwise
if name is None will return all templates of an hypervisor.
Raises:
ValidationError: if name and hv are None
"""
if len(self.templates) <= 0:
self.get_hypervisors()
if name is not None and hv is not None:
template_list = filter(
lambda x: name in x.descr and x.hypervisor == self.hypervisors[hv], self.templates
)
elif name is not None and hv is None:
template_list = filter(
lambda x: name in x.descr, self.templates
)
elif name is None and hv is not None:
template_list = filter(
lambda x: x.hypervisor == self.hypervisors[hv], self.templates
)
else:
raise Exception('Error, no pattern defined')
if sys.version_info.major < (3):
return template_list
else:
return(list(template_list))
def get_vm(self, pattern=None):
if len(self.vmlist) <= 0:
self.get_servers()
if pattern is None:
return self.vmlist
else:
return self.vmlist.find(pattern)
def get_ip_by_vm(self, vm):
self.get_ip() # call get ip list to create the internal list of IPs.
vm_id = self.get_vm(vm)[0].sid
for ip in self.iplist:
if ip.serverid == vm_id:
return ip
return 'IPNOTFOUND'
def purchase_ip(self, debug=False):
"""
Return an ip object representing a new bought IP
@param debug [Boolean] if true, request and response will be printed
@return (Ip): Ip object
"""
json_scheme = self.gen_def_json_scheme('SetPurchaseIpAddress')
json_obj = self.call_method_post(method='SetPurchaseIpAddress', json_scheme=json_scheme, debug=debug)
try:
ip = Ip()
ip.ip_addr = json_obj['Value']['Value']
ip.resid = json_obj['Value']['ResourceId']
return ip
except:
raise Exception('Unknown error retrieving IP.')
def purchase_vlan(self, vlan_name, debug=False):
"""
Purchase a new VLAN.
:param debug: Log the json response if True
:param vlan_name: String representing the name of the vlan (virtual switch)
:return: a Vlan Object representing the vlan created
"""
vlan_name = {'VLanName': vlan_name}
json_scheme = self.gen_def_json_scheme('SetPurchaseVLan', vlan_name)
json_obj = self.call_method_post(method="SetPurchaseVLan", json_scheme=json_scheme)
if debug is True:
self.logger.debug(json_obj)
if json_obj['Success'] is False:
raise Exception("Cannot purchase new vlan.")
vlan = Vlan()
vlan.name = json_obj['Value']['Name']
vlan.resource_id = json_obj['Value']['ResourceId']
vlan.vlan_code = json_obj['Value']['VlanCode']
return vlan
def remove_vlan(self, vlan_resource_id):
"""
Remove a VLAN
:param vlan_resource_id:
:return:
"""
vlan_id = {'VLanResourceId': vlan_resource_id}
json_scheme = self.gen_def_json_scheme('SetRemoveVLan', vlan_id)
json_obj = self.call_method_post(method='SetRemoveVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def get_vlan(self, vlan_name=None):
json_scheme = self.gen_def_json_scheme('GetPurchasedVLans')
json_obj = self.call_method_post(method='GetPurchasedVLans', json_scheme=json_scheme)
if vlan_name is not None:
raw_vlans = filter(lambda x: vlan_name in x['Name'], json_obj['Value'])
else:
raw_vlans = json_obj['Value']
vlans = []
for raw_vlan in raw_vlans:
v = Vlan()
v.name = raw_vlan['Name']
v.vlan_code = raw_vlan['VlanCode']
v.resource_id = raw_vlan['ResourceId']
vlans.append(v)
return vlans
def get_package_id(self, name):
"""
Retrieve the smart package id given is English name
@param (str) name: the Aruba Smart package size name, ie: "small", "medium", "large", "extra large".
@return: The package id that depends on the Data center and the size choosen.
"""
json_scheme = self.gen_def_json_scheme('GetPreConfiguredPackages', dict(HypervisorType=4))
json_obj = self.call_method_post(method='GetPreConfiguredPackages ', json_scheme=json_scheme)
for package in json_obj['Value']:
packageId = package['PackageID']
for description in package['Descriptions']:
languageID = description['LanguageID']
packageName = description['Text']
if languageID == 2 and packageName.lower() == name.lower():
return packageId
def get_ip(self):
"""
Retrieve a complete list of bought ip address related only to PRO Servers.
It create an internal object (Iplist) representing all of the ips object
iterated form the WS.
@param: None
@return: None
"""
json_scheme = self.gen_def_json_scheme('GetPurchasedIpAddresses')
json_obj = self.call_method_post(method='GetPurchasedIpAddresses ', json_scheme=json_scheme)
self.iplist = IpList()
for ip in json_obj['Value']:
r = Ip()
r.ip_addr = ip['Value']
r.resid = ip['ResourceId']
r.serverid = ip['ServerId'] if 'None' not in str(ip['ServerId']) else None
self.iplist.append(r)
def delete_vm(self, server=None, server_id=None):
self.logger.debug('%s: Deleting: %s' % (self.__class__.__name__, server))
sid = server_id if server_id is not None else server.sid
self.logger.debug('%s: Deleting SID: %s' % (self.__class__.__name__, sid))
if sid is None:
raise Exception('NoServerSpecified')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerDeletion', dict(ServerId=sid))
json_obj = self.call_method_post(method='SetEnqueueServerDeletion', json_scheme=json_scheme)
print('Deletion enqueued successfully for server_id: %s' % sid)
return True if json_obj['Success'] is 'True' else False
def get_jobs(self):
json_scheme = self.gen_def_json_scheme('GetJobs')
return self.call_method_post(method='GetJobs', json_scheme=json_scheme)
def find_job(self, vm_name):
jobs_list = self.get_jobs()
if jobs_list['Value'] is None:
_i = 0
while jobs_list['Value'] is not None:
_i += 1
jobs_list = self.get_jobs()
if _i > 10:
return 'JOBNOTFOUND'
if len(jobs_list['Value']) <= 0:
return 'JOBNOTFOUND'
for job in jobs_list['Value']:
if vm_name in job['ServerName']:
return job
return 'JOBNOTFOUND'
def get_virtual_datacenter(self):
json_scheme = self.gen_def_json_scheme('GetVirtualDatacenter')
json_obj = self.call_method_post(method='GetVirtualDatacenter', json_scheme=json_scheme)
return json_obj
def get_server_detail(self, server_id):
json_scheme = self.gen_def_json_scheme('GetServerDetails', dict(ServerId=server_id))
json_obj = self.call_method_post(method='GetServerDetails', json_scheme=json_scheme)
return json_obj['Value']
def attach_vlan(self, network_adapter_id, vlan_resource_id, ip=None, subnet_mask=None, gateway=None):
if gateway is not None:
additional_fields = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "true",
"VLanResourceId": vlan_resource_id,
"PrivateIps": [{
"GateWay": gateway,
"IP": ip,
"SubNetMask": subnet_mask
}]
}
}
else:
additional_fields = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "false",
"VLanResourceId": vlan_resource_id,
"PrivateIps": [{
"GateWay": None,
"IP": None,
"SubNetMask": None
}]
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueAssociateVLan', method_fields=additional_fields)
json_obj = self.call_method_post(method='SetEnqueueAssociateVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def detach_vlan(self, network_adapter_id, vlan_resource_id):
vlan_request = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "false",
"VLanResourceId": vlan_resource_id
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueDeassociateVLan', method_fields=vlan_request)
json_obj = self.call_method_post(method='SetEnqueueDeassociateVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def create_snapshot(self, dc, server_id=None):
sid = CloudInterface(dc).get_server_detail(server_id)
if sid['HypervisorType'] is not 4:
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Create"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def restore_snapshot(self, server_id=None):
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Restore"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def delete_snapshot(self, server_id=None):
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Delete"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def archive_vm(self, dc, server_id=None):
sid = CloudInterface(dc).get_server_detail(server_id)
if sid['HypervisorType'] is not 4:
archive_request = {
"ArchiveVirtualServer": {
"ServerId": server_id
}
}
json_scheme = self.gen_def_json_scheme('ArchiveVirtualServer', method_fields=archive_request)
json_obj = self.call_method_post(method='ArchiveVirtualServer', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def restore_vm(self, server_id=None, cpu_qty=None, ram_qty=None):
restore_request = {
"Server": {
"ServerId": server_id,
"CPUQuantity": cpu_qty,
"RAMQuantity": ram_qty
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerRestore', method_fields=restore_request)
json_obj = self.call_method_post(method='SetEnqueueServerRestore', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
|
Arubacloud/pyArubaCloud | ArubaCloud/PyArubaAPI.py | CloudInterface.get_package_id | python | def get_package_id(self, name):
json_scheme = self.gen_def_json_scheme('GetPreConfiguredPackages', dict(HypervisorType=4))
json_obj = self.call_method_post(method='GetPreConfiguredPackages ', json_scheme=json_scheme)
for package in json_obj['Value']:
packageId = package['PackageID']
for description in package['Descriptions']:
languageID = description['LanguageID']
packageName = description['Text']
if languageID == 2 and packageName.lower() == name.lower():
return packageId | Retrieve the smart package id given is English name
@param (str) name: the Aruba Smart package size name, ie: "small", "medium", "large", "extra large".
@return: The package id that depends on the Data center and the size choosen. | train | https://github.com/Arubacloud/pyArubaCloud/blob/ec4aecd8ca342b1e1a4f16b7cc87cb5e697cfcd4/ArubaCloud/PyArubaAPI.py#L309-L323 | [
"def gen_def_json_scheme(self, req, method_fields=None):\n \"\"\"\n Generate the scheme for the json request.\n :param req: String representing the name of the method to call\n :param method_fields: A dictionary containing the method-specified fields\n :rtype : json object representing the method call\n \"\"\"\n json_dict = dict(\n ApplicationId=req,\n RequestId=req,\n SessionId=req,\n Password=self.auth.password,\n Username=self.auth.username\n )\n if method_fields is not None:\n json_dict.update(method_fields)\n self.logger.debug(json.dumps(json_dict))\n return json.dumps(json_dict)\n",
"def call_method_post(self, method, json_scheme, debug=False):\n url = '{}/{}'.format(self.wcf_baseurl, method)\n headers = {'Content-Type': 'application/json', 'Content-Length': str(len(json_scheme))}\n response = Http.post(url=url, data=json_scheme, headers=headers)\n parsed_response = json.loads(response.content.decode('utf-8'))\n if response.status_code != 200:\n from ArubaCloud.base.Errors import MalformedJsonRequest\n raise MalformedJsonRequest(\"Request: {}, Status Code: {}\".format(json_scheme, response.status_code))\n if parsed_response['Success'] is False:\n from ArubaCloud.base.Errors import RequestFailed\n raise RequestFailed(\"Request: {}, Response: {}\".format(json_scheme, parsed_response))\n if debug is True:\n msg = \"Response Message: {}\\nHTTP Status Code: {}\".format(parsed_response, response.status_code)\n self.logger.debug(msg)\n print(msg)\n return parsed_response\n"
] | class CloudInterface(JsonInterface):
templates = []
vmlist = VMList()
iplist = IpList()
json_templates = None
json_servers = None
ip_resource = None
hypervisors = {3: "LC", 4: "SMART", 2: "VW", 1: "HV"}
def __init__(self, dc, debug_level=logging.INFO):
super(CloudInterface, self).__init__()
assert isinstance(dc, int), Exception('dc must be an integer and must be not null.')
self.wcf_baseurl = 'https://api.dc%s.computing.cloud.it/WsEndUser/v2.9/WsEndUser.svc/json' % (str(dc))
self.logger = ArubaLog(level=debug_level, log_to_file=False)
self.logger.name = self.__class__
self.auth = None
def login(self, username, password, load=True):
"""
Set the authentication data in the object, and if load is True
(default is True) it also retrieve the ip list and the vm list
in order to build the internal objects list.
@param (str) username: username of the cloud
@param (str) password: password of the cloud
@param (bool) load: define if pre cache the objects.
@return: None
"""
self.auth = Auth(username, password)
if load is True:
self.get_ip()
self.get_servers()
def poweroff_server(self, server=None, server_id=None):
"""
Poweroff a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power off,
server_id: Int or Str representing the ID of the VM to power off.
Returns:
return True if json_obj['Success'] is 'True' else False
"""
sid = server_id if server_id is not None else server.sid
if sid is None:
raise Exception('No Server Specified.')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerPowerOff', dict(ServerId=sid))
json_obj = self.call_method_post('SetEnqueueServerPowerOff', json_scheme=json_scheme)
return True if json_obj['Success'] is 'True' else False
def poweron_server(self, server=None, server_id=None):
"""
Poweron a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power on,
server_id: Int or Str representing the ID of the VM to power on.
Returns:
return True if json_obj['Success'] is 'True' else False
"""
sid = server_id if server_id is not None else server.sid
if sid is None:
raise Exception('No Server Specified.')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerStart', dict(ServerId=sid))
json_obj = self.call_method_post('SetEnqueueServerStart', json_scheme=json_scheme)
return True if json_obj['Success'] is 'True' else False
def get_hypervisors(self):
"""
Initialize the internal list containing each template available for each
hypervisor.
:return: [bool] True in case of success, otherwise False
"""
json_scheme = self.gen_def_json_scheme('GetHypervisors')
json_obj = self.call_method_post(method='GetHypervisors', json_scheme=json_scheme)
self.json_templates = json_obj
d = dict(json_obj)
for elem in d['Value']:
hv = self.hypervisors[elem['HypervisorType']]
for inner_elem in elem['Templates']:
o = Template(hv)
o.template_id = inner_elem['Id']
o.descr = inner_elem['Description']
o.id_code = inner_elem['IdentificationCode']
o.name = inner_elem['Name']
o.enabled = inner_elem['Enabled']
if hv != 'SMART':
for rb in inner_elem['ResourceBounds']:
resource_type = rb['ResourceType']
if resource_type == 1:
o.resource_bounds.max_cpu = rb['Max']
if resource_type == 2:
o.resource_bounds.max_memory = rb['Max']
if resource_type == 3:
o.resource_bounds.hdd0 = rb['Max']
if resource_type == 7:
o.resource_bounds.hdd1 = rb['Max']
if resource_type == 8:
o.resource_bounds.hdd2 = rb['Max']
if resource_type == 9:
o.resource_bounds.hdd3 = rb['Max']
self.templates.append(o)
return True if json_obj['Success'] is 'True' else False
def get_servers(self):
"""
Create the list of Server object inside the Datacenter objects.
Build an internal list of VM Objects (pro or smart) as iterator.
:return: bool
"""
json_scheme = self.gen_def_json_scheme('GetServers')
json_obj = self.call_method_post(method='GetServers', json_scheme=json_scheme)
self.json_servers = json_obj
# if this method is called I assume that i must re-read the data
# so i reinitialize the vmlist
self.vmlist = VMList()
# getting all instanced IP in case the list is empty
if len(self.iplist) <= 0:
self.get_ip()
for elem in dict(json_obj)["Value"]:
if elem['HypervisorType'] is 4:
s = Smart(interface=self, sid=elem['ServerId'])
else:
s = Pro(interface=self, sid=elem['ServerId'])
s.vm_name = elem['Name']
s.cpu_qty = elem['CPUQuantity']
s.ram_qty = elem['RAMQuantity']
s.status = elem['ServerStatus']
s.datacenter_id = elem['DatacenterId']
s.wcf_baseurl = self.wcf_baseurl
s.auth = self.auth
s.hd_qty = elem['HDQuantity']
s.hd_total_size = elem['HDTotalSize']
if elem['HypervisorType'] is 4:
ssd = self.get_server_detail(elem['ServerId'])
try:
s.ip_addr = str(ssd['EasyCloudIPAddress']['Value'])
except TypeError:
s.ip_addr = 'Not retrieved.'
else:
s.ip_addr = []
for ip in self.iplist:
if ip.serverid == s.sid:
s.ip_addr.append(ip)
self.vmlist.append(s)
return True if json_obj['Success'] is True else False
def find_template(self, name=None, hv=None):
"""
Return a list of templates that could have one or more elements.
Args:
name: name of the template to find.
hv: the ID of the hypervisor to search the template in
Returns:
A list of templates object. If hv is None will return all the
templates matching the name if every hypervisor type. Otherwise
if name is None will return all templates of an hypervisor.
Raises:
ValidationError: if name and hv are None
"""
if len(self.templates) <= 0:
self.get_hypervisors()
if name is not None and hv is not None:
template_list = filter(
lambda x: name in x.descr and x.hypervisor == self.hypervisors[hv], self.templates
)
elif name is not None and hv is None:
template_list = filter(
lambda x: name in x.descr, self.templates
)
elif name is None and hv is not None:
template_list = filter(
lambda x: x.hypervisor == self.hypervisors[hv], self.templates
)
else:
raise Exception('Error, no pattern defined')
if sys.version_info.major < (3):
return template_list
else:
return(list(template_list))
def get_vm(self, pattern=None):
if len(self.vmlist) <= 0:
self.get_servers()
if pattern is None:
return self.vmlist
else:
return self.vmlist.find(pattern)
def get_ip_by_vm(self, vm):
self.get_ip() # call get ip list to create the internal list of IPs.
vm_id = self.get_vm(vm)[0].sid
for ip in self.iplist:
if ip.serverid == vm_id:
return ip
return 'IPNOTFOUND'
def purchase_ip(self, debug=False):
"""
Return an ip object representing a new bought IP
@param debug [Boolean] if true, request and response will be printed
@return (Ip): Ip object
"""
json_scheme = self.gen_def_json_scheme('SetPurchaseIpAddress')
json_obj = self.call_method_post(method='SetPurchaseIpAddress', json_scheme=json_scheme, debug=debug)
try:
ip = Ip()
ip.ip_addr = json_obj['Value']['Value']
ip.resid = json_obj['Value']['ResourceId']
return ip
except:
raise Exception('Unknown error retrieving IP.')
def purchase_vlan(self, vlan_name, debug=False):
"""
Purchase a new VLAN.
:param debug: Log the json response if True
:param vlan_name: String representing the name of the vlan (virtual switch)
:return: a Vlan Object representing the vlan created
"""
vlan_name = {'VLanName': vlan_name}
json_scheme = self.gen_def_json_scheme('SetPurchaseVLan', vlan_name)
json_obj = self.call_method_post(method="SetPurchaseVLan", json_scheme=json_scheme)
if debug is True:
self.logger.debug(json_obj)
if json_obj['Success'] is False:
raise Exception("Cannot purchase new vlan.")
vlan = Vlan()
vlan.name = json_obj['Value']['Name']
vlan.resource_id = json_obj['Value']['ResourceId']
vlan.vlan_code = json_obj['Value']['VlanCode']
return vlan
def remove_vlan(self, vlan_resource_id):
"""
Remove a VLAN
:param vlan_resource_id:
:return:
"""
vlan_id = {'VLanResourceId': vlan_resource_id}
json_scheme = self.gen_def_json_scheme('SetRemoveVLan', vlan_id)
json_obj = self.call_method_post(method='SetRemoveVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def get_vlan(self, vlan_name=None):
json_scheme = self.gen_def_json_scheme('GetPurchasedVLans')
json_obj = self.call_method_post(method='GetPurchasedVLans', json_scheme=json_scheme)
if vlan_name is not None:
raw_vlans = filter(lambda x: vlan_name in x['Name'], json_obj['Value'])
else:
raw_vlans = json_obj['Value']
vlans = []
for raw_vlan in raw_vlans:
v = Vlan()
v.name = raw_vlan['Name']
v.vlan_code = raw_vlan['VlanCode']
v.resource_id = raw_vlan['ResourceId']
vlans.append(v)
return vlans
def remove_ip(self, ip_id):
"""
Delete an Ip from the boughs ip list
@param (str) ip_id: a string representing the resource id of the IP
@return: True if json method had success else False
"""
ip_id = ' "IpAddressResourceId": %s' % ip_id
json_scheme = self.gen_def_json_scheme('SetRemoveIpAddress', ip_id)
json_obj = self.call_method_post(method='SetRemoveIpAddress', json_scheme=json_scheme)
pprint(json_obj)
return True if json_obj['Success'] is True else False
def get_ip(self):
"""
Retrieve a complete list of bought ip address related only to PRO Servers.
It create an internal object (Iplist) representing all of the ips object
iterated form the WS.
@param: None
@return: None
"""
json_scheme = self.gen_def_json_scheme('GetPurchasedIpAddresses')
json_obj = self.call_method_post(method='GetPurchasedIpAddresses ', json_scheme=json_scheme)
self.iplist = IpList()
for ip in json_obj['Value']:
r = Ip()
r.ip_addr = ip['Value']
r.resid = ip['ResourceId']
r.serverid = ip['ServerId'] if 'None' not in str(ip['ServerId']) else None
self.iplist.append(r)
def delete_vm(self, server=None, server_id=None):
self.logger.debug('%s: Deleting: %s' % (self.__class__.__name__, server))
sid = server_id if server_id is not None else server.sid
self.logger.debug('%s: Deleting SID: %s' % (self.__class__.__name__, sid))
if sid is None:
raise Exception('NoServerSpecified')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerDeletion', dict(ServerId=sid))
json_obj = self.call_method_post(method='SetEnqueueServerDeletion', json_scheme=json_scheme)
print('Deletion enqueued successfully for server_id: %s' % sid)
return True if json_obj['Success'] is 'True' else False
def get_jobs(self):
json_scheme = self.gen_def_json_scheme('GetJobs')
return self.call_method_post(method='GetJobs', json_scheme=json_scheme)
def find_job(self, vm_name):
jobs_list = self.get_jobs()
if jobs_list['Value'] is None:
_i = 0
while jobs_list['Value'] is not None:
_i += 1
jobs_list = self.get_jobs()
if _i > 10:
return 'JOBNOTFOUND'
if len(jobs_list['Value']) <= 0:
return 'JOBNOTFOUND'
for job in jobs_list['Value']:
if vm_name in job['ServerName']:
return job
return 'JOBNOTFOUND'
def get_virtual_datacenter(self):
json_scheme = self.gen_def_json_scheme('GetVirtualDatacenter')
json_obj = self.call_method_post(method='GetVirtualDatacenter', json_scheme=json_scheme)
return json_obj
def get_server_detail(self, server_id):
json_scheme = self.gen_def_json_scheme('GetServerDetails', dict(ServerId=server_id))
json_obj = self.call_method_post(method='GetServerDetails', json_scheme=json_scheme)
return json_obj['Value']
def attach_vlan(self, network_adapter_id, vlan_resource_id, ip=None, subnet_mask=None, gateway=None):
if gateway is not None:
additional_fields = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "true",
"VLanResourceId": vlan_resource_id,
"PrivateIps": [{
"GateWay": gateway,
"IP": ip,
"SubNetMask": subnet_mask
}]
}
}
else:
additional_fields = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "false",
"VLanResourceId": vlan_resource_id,
"PrivateIps": [{
"GateWay": None,
"IP": None,
"SubNetMask": None
}]
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueAssociateVLan', method_fields=additional_fields)
json_obj = self.call_method_post(method='SetEnqueueAssociateVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def detach_vlan(self, network_adapter_id, vlan_resource_id):
vlan_request = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "false",
"VLanResourceId": vlan_resource_id
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueDeassociateVLan', method_fields=vlan_request)
json_obj = self.call_method_post(method='SetEnqueueDeassociateVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def create_snapshot(self, dc, server_id=None):
sid = CloudInterface(dc).get_server_detail(server_id)
if sid['HypervisorType'] is not 4:
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Create"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def restore_snapshot(self, server_id=None):
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Restore"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def delete_snapshot(self, server_id=None):
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Delete"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def archive_vm(self, dc, server_id=None):
sid = CloudInterface(dc).get_server_detail(server_id)
if sid['HypervisorType'] is not 4:
archive_request = {
"ArchiveVirtualServer": {
"ServerId": server_id
}
}
json_scheme = self.gen_def_json_scheme('ArchiveVirtualServer', method_fields=archive_request)
json_obj = self.call_method_post(method='ArchiveVirtualServer', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def restore_vm(self, server_id=None, cpu_qty=None, ram_qty=None):
restore_request = {
"Server": {
"ServerId": server_id,
"CPUQuantity": cpu_qty,
"RAMQuantity": ram_qty
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerRestore', method_fields=restore_request)
json_obj = self.call_method_post(method='SetEnqueueServerRestore', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
|
Arubacloud/pyArubaCloud | ArubaCloud/PyArubaAPI.py | CloudInterface.get_ip | python | def get_ip(self):
json_scheme = self.gen_def_json_scheme('GetPurchasedIpAddresses')
json_obj = self.call_method_post(method='GetPurchasedIpAddresses ', json_scheme=json_scheme)
self.iplist = IpList()
for ip in json_obj['Value']:
r = Ip()
r.ip_addr = ip['Value']
r.resid = ip['ResourceId']
r.serverid = ip['ServerId'] if 'None' not in str(ip['ServerId']) else None
self.iplist.append(r) | Retrieve a complete list of bought ip address related only to PRO Servers.
It create an internal object (Iplist) representing all of the ips object
iterated form the WS.
@param: None
@return: None | train | https://github.com/Arubacloud/pyArubaCloud/blob/ec4aecd8ca342b1e1a4f16b7cc87cb5e697cfcd4/ArubaCloud/PyArubaAPI.py#L325-L341 | [
"def gen_def_json_scheme(self, req, method_fields=None):\n \"\"\"\n Generate the scheme for the json request.\n :param req: String representing the name of the method to call\n :param method_fields: A dictionary containing the method-specified fields\n :rtype : json object representing the method call\n \"\"\"\n json_dict = dict(\n ApplicationId=req,\n RequestId=req,\n SessionId=req,\n Password=self.auth.password,\n Username=self.auth.username\n )\n if method_fields is not None:\n json_dict.update(method_fields)\n self.logger.debug(json.dumps(json_dict))\n return json.dumps(json_dict)\n",
"def call_method_post(self, method, json_scheme, debug=False):\n url = '{}/{}'.format(self.wcf_baseurl, method)\n headers = {'Content-Type': 'application/json', 'Content-Length': str(len(json_scheme))}\n response = Http.post(url=url, data=json_scheme, headers=headers)\n parsed_response = json.loads(response.content.decode('utf-8'))\n if response.status_code != 200:\n from ArubaCloud.base.Errors import MalformedJsonRequest\n raise MalformedJsonRequest(\"Request: {}, Status Code: {}\".format(json_scheme, response.status_code))\n if parsed_response['Success'] is False:\n from ArubaCloud.base.Errors import RequestFailed\n raise RequestFailed(\"Request: {}, Response: {}\".format(json_scheme, parsed_response))\n if debug is True:\n msg = \"Response Message: {}\\nHTTP Status Code: {}\".format(parsed_response, response.status_code)\n self.logger.debug(msg)\n print(msg)\n return parsed_response\n"
] | class CloudInterface(JsonInterface):
templates = []
vmlist = VMList()
iplist = IpList()
json_templates = None
json_servers = None
ip_resource = None
hypervisors = {3: "LC", 4: "SMART", 2: "VW", 1: "HV"}
def __init__(self, dc, debug_level=logging.INFO):
super(CloudInterface, self).__init__()
assert isinstance(dc, int), Exception('dc must be an integer and must be not null.')
self.wcf_baseurl = 'https://api.dc%s.computing.cloud.it/WsEndUser/v2.9/WsEndUser.svc/json' % (str(dc))
self.logger = ArubaLog(level=debug_level, log_to_file=False)
self.logger.name = self.__class__
self.auth = None
def login(self, username, password, load=True):
"""
Set the authentication data in the object, and if load is True
(default is True) it also retrieve the ip list and the vm list
in order to build the internal objects list.
@param (str) username: username of the cloud
@param (str) password: password of the cloud
@param (bool) load: define if pre cache the objects.
@return: None
"""
self.auth = Auth(username, password)
if load is True:
self.get_ip()
self.get_servers()
def poweroff_server(self, server=None, server_id=None):
"""
Poweroff a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power off,
server_id: Int or Str representing the ID of the VM to power off.
Returns:
return True if json_obj['Success'] is 'True' else False
"""
sid = server_id if server_id is not None else server.sid
if sid is None:
raise Exception('No Server Specified.')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerPowerOff', dict(ServerId=sid))
json_obj = self.call_method_post('SetEnqueueServerPowerOff', json_scheme=json_scheme)
return True if json_obj['Success'] is 'True' else False
def poweron_server(self, server=None, server_id=None):
"""
Poweron a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power on,
server_id: Int or Str representing the ID of the VM to power on.
Returns:
return True if json_obj['Success'] is 'True' else False
"""
sid = server_id if server_id is not None else server.sid
if sid is None:
raise Exception('No Server Specified.')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerStart', dict(ServerId=sid))
json_obj = self.call_method_post('SetEnqueueServerStart', json_scheme=json_scheme)
return True if json_obj['Success'] is 'True' else False
def get_hypervisors(self):
"""
Initialize the internal list containing each template available for each
hypervisor.
:return: [bool] True in case of success, otherwise False
"""
json_scheme = self.gen_def_json_scheme('GetHypervisors')
json_obj = self.call_method_post(method='GetHypervisors', json_scheme=json_scheme)
self.json_templates = json_obj
d = dict(json_obj)
for elem in d['Value']:
hv = self.hypervisors[elem['HypervisorType']]
for inner_elem in elem['Templates']:
o = Template(hv)
o.template_id = inner_elem['Id']
o.descr = inner_elem['Description']
o.id_code = inner_elem['IdentificationCode']
o.name = inner_elem['Name']
o.enabled = inner_elem['Enabled']
if hv != 'SMART':
for rb in inner_elem['ResourceBounds']:
resource_type = rb['ResourceType']
if resource_type == 1:
o.resource_bounds.max_cpu = rb['Max']
if resource_type == 2:
o.resource_bounds.max_memory = rb['Max']
if resource_type == 3:
o.resource_bounds.hdd0 = rb['Max']
if resource_type == 7:
o.resource_bounds.hdd1 = rb['Max']
if resource_type == 8:
o.resource_bounds.hdd2 = rb['Max']
if resource_type == 9:
o.resource_bounds.hdd3 = rb['Max']
self.templates.append(o)
return True if json_obj['Success'] is 'True' else False
def get_servers(self):
"""
Create the list of Server object inside the Datacenter objects.
Build an internal list of VM Objects (pro or smart) as iterator.
:return: bool
"""
json_scheme = self.gen_def_json_scheme('GetServers')
json_obj = self.call_method_post(method='GetServers', json_scheme=json_scheme)
self.json_servers = json_obj
# if this method is called I assume that i must re-read the data
# so i reinitialize the vmlist
self.vmlist = VMList()
# getting all instanced IP in case the list is empty
if len(self.iplist) <= 0:
self.get_ip()
for elem in dict(json_obj)["Value"]:
if elem['HypervisorType'] is 4:
s = Smart(interface=self, sid=elem['ServerId'])
else:
s = Pro(interface=self, sid=elem['ServerId'])
s.vm_name = elem['Name']
s.cpu_qty = elem['CPUQuantity']
s.ram_qty = elem['RAMQuantity']
s.status = elem['ServerStatus']
s.datacenter_id = elem['DatacenterId']
s.wcf_baseurl = self.wcf_baseurl
s.auth = self.auth
s.hd_qty = elem['HDQuantity']
s.hd_total_size = elem['HDTotalSize']
if elem['HypervisorType'] is 4:
ssd = self.get_server_detail(elem['ServerId'])
try:
s.ip_addr = str(ssd['EasyCloudIPAddress']['Value'])
except TypeError:
s.ip_addr = 'Not retrieved.'
else:
s.ip_addr = []
for ip in self.iplist:
if ip.serverid == s.sid:
s.ip_addr.append(ip)
self.vmlist.append(s)
return True if json_obj['Success'] is True else False
def find_template(self, name=None, hv=None):
"""
Return a list of templates that could have one or more elements.
Args:
name: name of the template to find.
hv: the ID of the hypervisor to search the template in
Returns:
A list of templates object. If hv is None will return all the
templates matching the name if every hypervisor type. Otherwise
if name is None will return all templates of an hypervisor.
Raises:
ValidationError: if name and hv are None
"""
if len(self.templates) <= 0:
self.get_hypervisors()
if name is not None and hv is not None:
template_list = filter(
lambda x: name in x.descr and x.hypervisor == self.hypervisors[hv], self.templates
)
elif name is not None and hv is None:
template_list = filter(
lambda x: name in x.descr, self.templates
)
elif name is None and hv is not None:
template_list = filter(
lambda x: x.hypervisor == self.hypervisors[hv], self.templates
)
else:
raise Exception('Error, no pattern defined')
if sys.version_info.major < (3):
return template_list
else:
return(list(template_list))
def get_vm(self, pattern=None):
if len(self.vmlist) <= 0:
self.get_servers()
if pattern is None:
return self.vmlist
else:
return self.vmlist.find(pattern)
def get_ip_by_vm(self, vm):
self.get_ip() # call get ip list to create the internal list of IPs.
vm_id = self.get_vm(vm)[0].sid
for ip in self.iplist:
if ip.serverid == vm_id:
return ip
return 'IPNOTFOUND'
def purchase_ip(self, debug=False):
"""
Return an ip object representing a new bought IP
@param debug [Boolean] if true, request and response will be printed
@return (Ip): Ip object
"""
json_scheme = self.gen_def_json_scheme('SetPurchaseIpAddress')
json_obj = self.call_method_post(method='SetPurchaseIpAddress', json_scheme=json_scheme, debug=debug)
try:
ip = Ip()
ip.ip_addr = json_obj['Value']['Value']
ip.resid = json_obj['Value']['ResourceId']
return ip
except:
raise Exception('Unknown error retrieving IP.')
def purchase_vlan(self, vlan_name, debug=False):
"""
Purchase a new VLAN.
:param debug: Log the json response if True
:param vlan_name: String representing the name of the vlan (virtual switch)
:return: a Vlan Object representing the vlan created
"""
vlan_name = {'VLanName': vlan_name}
json_scheme = self.gen_def_json_scheme('SetPurchaseVLan', vlan_name)
json_obj = self.call_method_post(method="SetPurchaseVLan", json_scheme=json_scheme)
if debug is True:
self.logger.debug(json_obj)
if json_obj['Success'] is False:
raise Exception("Cannot purchase new vlan.")
vlan = Vlan()
vlan.name = json_obj['Value']['Name']
vlan.resource_id = json_obj['Value']['ResourceId']
vlan.vlan_code = json_obj['Value']['VlanCode']
return vlan
def remove_vlan(self, vlan_resource_id):
"""
Remove a VLAN
:param vlan_resource_id:
:return:
"""
vlan_id = {'VLanResourceId': vlan_resource_id}
json_scheme = self.gen_def_json_scheme('SetRemoveVLan', vlan_id)
json_obj = self.call_method_post(method='SetRemoveVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def get_vlan(self, vlan_name=None):
json_scheme = self.gen_def_json_scheme('GetPurchasedVLans')
json_obj = self.call_method_post(method='GetPurchasedVLans', json_scheme=json_scheme)
if vlan_name is not None:
raw_vlans = filter(lambda x: vlan_name in x['Name'], json_obj['Value'])
else:
raw_vlans = json_obj['Value']
vlans = []
for raw_vlan in raw_vlans:
v = Vlan()
v.name = raw_vlan['Name']
v.vlan_code = raw_vlan['VlanCode']
v.resource_id = raw_vlan['ResourceId']
vlans.append(v)
return vlans
def remove_ip(self, ip_id):
"""
Delete an Ip from the boughs ip list
@param (str) ip_id: a string representing the resource id of the IP
@return: True if json method had success else False
"""
ip_id = ' "IpAddressResourceId": %s' % ip_id
json_scheme = self.gen_def_json_scheme('SetRemoveIpAddress', ip_id)
json_obj = self.call_method_post(method='SetRemoveIpAddress', json_scheme=json_scheme)
pprint(json_obj)
return True if json_obj['Success'] is True else False
def get_package_id(self, name):
"""
Retrieve the smart package id given is English name
@param (str) name: the Aruba Smart package size name, ie: "small", "medium", "large", "extra large".
@return: The package id that depends on the Data center and the size choosen.
"""
json_scheme = self.gen_def_json_scheme('GetPreConfiguredPackages', dict(HypervisorType=4))
json_obj = self.call_method_post(method='GetPreConfiguredPackages ', json_scheme=json_scheme)
for package in json_obj['Value']:
packageId = package['PackageID']
for description in package['Descriptions']:
languageID = description['LanguageID']
packageName = description['Text']
if languageID == 2 and packageName.lower() == name.lower():
return packageId
def delete_vm(self, server=None, server_id=None):
self.logger.debug('%s: Deleting: %s' % (self.__class__.__name__, server))
sid = server_id if server_id is not None else server.sid
self.logger.debug('%s: Deleting SID: %s' % (self.__class__.__name__, sid))
if sid is None:
raise Exception('NoServerSpecified')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerDeletion', dict(ServerId=sid))
json_obj = self.call_method_post(method='SetEnqueueServerDeletion', json_scheme=json_scheme)
print('Deletion enqueued successfully for server_id: %s' % sid)
return True if json_obj['Success'] is 'True' else False
def get_jobs(self):
json_scheme = self.gen_def_json_scheme('GetJobs')
return self.call_method_post(method='GetJobs', json_scheme=json_scheme)
def find_job(self, vm_name):
jobs_list = self.get_jobs()
if jobs_list['Value'] is None:
_i = 0
while jobs_list['Value'] is not None:
_i += 1
jobs_list = self.get_jobs()
if _i > 10:
return 'JOBNOTFOUND'
if len(jobs_list['Value']) <= 0:
return 'JOBNOTFOUND'
for job in jobs_list['Value']:
if vm_name in job['ServerName']:
return job
return 'JOBNOTFOUND'
def get_virtual_datacenter(self):
json_scheme = self.gen_def_json_scheme('GetVirtualDatacenter')
json_obj = self.call_method_post(method='GetVirtualDatacenter', json_scheme=json_scheme)
return json_obj
def get_server_detail(self, server_id):
json_scheme = self.gen_def_json_scheme('GetServerDetails', dict(ServerId=server_id))
json_obj = self.call_method_post(method='GetServerDetails', json_scheme=json_scheme)
return json_obj['Value']
def attach_vlan(self, network_adapter_id, vlan_resource_id, ip=None, subnet_mask=None, gateway=None):
if gateway is not None:
additional_fields = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "true",
"VLanResourceId": vlan_resource_id,
"PrivateIps": [{
"GateWay": gateway,
"IP": ip,
"SubNetMask": subnet_mask
}]
}
}
else:
additional_fields = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "false",
"VLanResourceId": vlan_resource_id,
"PrivateIps": [{
"GateWay": None,
"IP": None,
"SubNetMask": None
}]
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueAssociateVLan', method_fields=additional_fields)
json_obj = self.call_method_post(method='SetEnqueueAssociateVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def detach_vlan(self, network_adapter_id, vlan_resource_id):
vlan_request = {
"VLanRequest": {
"NetworkAdapterId": network_adapter_id,
"SetOnVirtualMachine": "false",
"VLanResourceId": vlan_resource_id
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueDeassociateVLan', method_fields=vlan_request)
json_obj = self.call_method_post(method='SetEnqueueDeassociateVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def create_snapshot(self, dc, server_id=None):
sid = CloudInterface(dc).get_server_detail(server_id)
if sid['HypervisorType'] is not 4:
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Create"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def restore_snapshot(self, server_id=None):
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Restore"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def delete_snapshot(self, server_id=None):
snapshot_request = {
"Snapshot": {
"ServerId": server_id,
"SnapshotOperationTypes": "Delete"
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerSnapshot', method_fields=snapshot_request)
json_obj = self.call_method_post(method='SetEnqueueServerSnapshot', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def archive_vm(self, dc, server_id=None):
sid = CloudInterface(dc).get_server_detail(server_id)
if sid['HypervisorType'] is not 4:
archive_request = {
"ArchiveVirtualServer": {
"ServerId": server_id
}
}
json_scheme = self.gen_def_json_scheme('ArchiveVirtualServer', method_fields=archive_request)
json_obj = self.call_method_post(method='ArchiveVirtualServer', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
def restore_vm(self, server_id=None, cpu_qty=None, ram_qty=None):
restore_request = {
"Server": {
"ServerId": server_id,
"CPUQuantity": cpu_qty,
"RAMQuantity": ram_qty
}
}
json_scheme = self.gen_def_json_scheme('SetEnqueueServerRestore', method_fields=restore_request)
json_obj = self.call_method_post(method='SetEnqueueServerRestore', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
|
Arubacloud/pyArubaCloud | ArubaCloud/base/__init__.py | JsonInterfaceBase.gen_def_json_scheme | python | def gen_def_json_scheme(self, req, method_fields=None):
json_dict = dict(
ApplicationId=req,
RequestId=req,
SessionId=req,
Password=self.auth.password,
Username=self.auth.username
)
if method_fields is not None:
json_dict.update(method_fields)
self.logger.debug(json.dumps(json_dict))
return json.dumps(json_dict) | Generate the scheme for the json request.
:param req: String representing the name of the method to call
:param method_fields: A dictionary containing the method-specified fields
:rtype : json object representing the method call | train | https://github.com/Arubacloud/pyArubaCloud/blob/ec4aecd8ca342b1e1a4f16b7cc87cb5e697cfcd4/ArubaCloud/base/__init__.py#L14-L31 | null | class JsonInterfaceBase(object):
__metaclass__ = ABCMeta
def __init__(self):
pass
def call_method_post(self, method, json_scheme, debug=False):
url = '{}/{}'.format(self.wcf_baseurl, method)
headers = {'Content-Type': 'application/json', 'Content-Length': str(len(json_scheme))}
response = Http.post(url=url, data=json_scheme, headers=headers)
parsed_response = json.loads(response.content.decode('utf-8'))
if response.status_code != 200:
from ArubaCloud.base.Errors import MalformedJsonRequest
raise MalformedJsonRequest("Request: {}, Status Code: {}".format(json_scheme, response.status_code))
if parsed_response['Success'] is False:
from ArubaCloud.base.Errors import RequestFailed
raise RequestFailed("Request: {}, Response: {}".format(json_scheme, parsed_response))
if debug is True:
msg = "Response Message: {}\nHTTP Status Code: {}".format(parsed_response, response.status_code)
self.logger.debug(msg)
print(msg)
return parsed_response
|
Arubacloud/pyArubaCloud | ArubaCloud/base/__init__.py | Request._commit | python | def _commit(self):
assert self.uri is not None, Exception("BadArgument: uri property cannot be None")
url = '{}/{}'.format(self.uri, self.__class__.__name__)
serialized_json = jsonpickle.encode(self, unpicklable=False, )
headers = {'Content-Type': 'application/json', 'Content-Length': str(len(serialized_json))}
response = Http.post(url=url, data=serialized_json, headers=headers)
if response.status_code != 200:
from ArubaCloud.base.Errors import MalformedJsonRequest
raise MalformedJsonRequest("Request: {}, Status Code: {}".format(serialized_json, response.status_code))
content = jsonpickle.decode(response.content.decode("utf-8"))
if content['ResultCode'] == 17:
from ArubaCloud.base.Errors import OperationAlreadyEnqueued
raise OperationAlreadyEnqueued("{} already enqueued".format(self.__class__.__name__))
if content['Success'] is False:
from ArubaCloud.base.Errors import RequestFailed
raise RequestFailed("Request: {}, Response: {}".format(serialized_json, response.content))
return content | :return: (dict) Response object content | train | https://github.com/Arubacloud/pyArubaCloud/blob/ec4aecd8ca342b1e1a4f16b7cc87cb5e697cfcd4/ArubaCloud/base/__init__.py#L95-L114 | [
"def post(url, data=None, json=None, logger=None, **kwargs):\n if logger is not None:\n Http._log_request(logger, data=data, headers=kwargs.get('headers', None))\n response = requests.post(url, data=data, json=json, **kwargs)\n Http._log_response(logger, response)\n return response\n"
] | class Request(IRequest):
def __init__(self, logger=None, Username=str(), Password=str(), SessionId=None, ApplicationId=None, RequestId=None,
uri=None):
"""
:type logger: ArubaLog
:type Username: str
:type Password: str
:type SessionId: str
:type ApplicationId: str
:type RequestId: str
:type uri: str
:param logger: Logger object
:param Username: ArubaCloud Service Login Username
:param Password: ArubaCloud Service Login Password
:param SessionId: Can be Null, otherwise the current SessionId
:param ApplicationId: Same as RequestId
:param RequestId: The name of the Request
:param uri: WCF base URI
"""
super(Request, self).__init__()
self.logger = logger
self.Username = Username
self.Password = Password
self.SessionId = SessionId if SessionId is not None else self.__class__.__name__
self.ApplicationId = ApplicationId if ApplicationId is not None else self.__class__.__name__
self.RequestId = RequestId if RequestId is not None else self.__class__.__name__
self.uri = uri
@abstractmethod
def commit(self):
raise NotImplementedError("commit method must be implemented in the real request implementation class")
def __getstate__(self):
"""
Internal method to remove non serializable object before the object serialization
:return: (Request) A copy of the state of the object after removing unwanted fields
"""
state = self.__dict__.copy()
del state['logger']
del state['uri']
return state
def __setstate__(self, state):
self.__dict__.update(state)
|
Arubacloud/pyArubaCloud | ArubaCloud/SharedStorage/SharedStorage.py | SharedStorage.get | python | def get(self):
request = self._call(GetSharedStorages)
response = request.commit()
return response['Value'] | Retrieve the current configured SharedStorages entries
:return: [list] List containing the current SharedStorages entries | train | https://github.com/Arubacloud/pyArubaCloud/blob/ec4aecd8ca342b1e1a4f16b7cc87cb5e697cfcd4/ArubaCloud/SharedStorage/SharedStorage.py#L13-L20 | [
"def _call(self, method, *args, **kwargs):\n return method(Username=self.username, Password=self.password, uri=self.ws_uri, *args, **kwargs)\n"
] | class SharedStorage(ArubaCloudService):
def __init__(self, ws_uri, username, password):
super(SharedStorage, self).__init__(ws_uri, username, password)
def _call(self, method, *args, **kwargs):
return method(Username=self.username, Password=self.password, uri=self.ws_uri, *args, **kwargs)
def purchase_iscsi(self, quantity, iqn, name, protocol=SharedStorageProtocolType.ISCSI):
"""
:type quantity: int
:type iqn: list[str]
:type name: str
:type protocol: SharedStorageProtocols
:param quantity: Amount of GB
:param iqn: List of IQN represented in string format
:param name: Name of the resource
:param protocol: Protocol to use
:return:
"""
iqns = []
for _iqn in iqn:
iqns.append(SharedStorageIQN(Value=_iqn))
request = self._call(SetEnqueuePurchaseSharedStorage, Quantity=quantity, SharedStorageName=name,
SharedStorageIQNs=iqns, SharedStorageProtocolType=protocol)
response = request.commit()
return response['Value']
|
Arubacloud/pyArubaCloud | ArubaCloud/SharedStorage/SharedStorage.py | SharedStorage.purchase_iscsi | python | def purchase_iscsi(self, quantity, iqn, name, protocol=SharedStorageProtocolType.ISCSI):
iqns = []
for _iqn in iqn:
iqns.append(SharedStorageIQN(Value=_iqn))
request = self._call(SetEnqueuePurchaseSharedStorage, Quantity=quantity, SharedStorageName=name,
SharedStorageIQNs=iqns, SharedStorageProtocolType=protocol)
response = request.commit()
return response['Value'] | :type quantity: int
:type iqn: list[str]
:type name: str
:type protocol: SharedStorageProtocols
:param quantity: Amount of GB
:param iqn: List of IQN represented in string format
:param name: Name of the resource
:param protocol: Protocol to use
:return: | train | https://github.com/Arubacloud/pyArubaCloud/blob/ec4aecd8ca342b1e1a4f16b7cc87cb5e697cfcd4/ArubaCloud/SharedStorage/SharedStorage.py#L22-L40 | [
"def _call(self, method, *args, **kwargs):\n return method(Username=self.username, Password=self.password, uri=self.ws_uri, *args, **kwargs)\n"
] | class SharedStorage(ArubaCloudService):
def __init__(self, ws_uri, username, password):
super(SharedStorage, self).__init__(ws_uri, username, password)
def _call(self, method, *args, **kwargs):
return method(Username=self.username, Password=self.password, uri=self.ws_uri, *args, **kwargs)
def get(self):
"""
Retrieve the current configured SharedStorages entries
:return: [list] List containing the current SharedStorages entries
"""
request = self._call(GetSharedStorages)
response = request.commit()
return response['Value']
|
Arubacloud/pyArubaCloud | ArubaCloud/ReverseDns/ReverseDns.py | ReverseDns.get | python | def get(self, addresses):
request = self._call(GetReverseDns.GetReverseDns, IPs=addresses)
response = request.commit()
return response['Value'] | :type addresses: list[str]
:param addresses: (list[str]) List of addresses to retrieve their reverse dns
Retrieve the current configured ReverseDns entries
:return: (list) List containing the current ReverseDns Addresses | train | https://github.com/Arubacloud/pyArubaCloud/blob/ec4aecd8ca342b1e1a4f16b7cc87cb5e697cfcd4/ArubaCloud/ReverseDns/ReverseDns.py#L12-L21 | [
"def commit(self):\n return self._commit()\n",
"def _call(self, method, *args, **kwargs):\n return method(Username=self.username, Password=self.password, uri=self.ws_uri, *args, **kwargs)\n"
] | class ReverseDns(ArubaCloudService):
def __init__(self, ws_uri, username, password):
super(ReverseDns, self).__init__(ws_uri, username, password)
def _call(self, method, *args, **kwargs):
return method(Username=self.username, Password=self.password, uri=self.ws_uri, *args, **kwargs)
def set(self, address, host_name):
"""
Assign one or more PTR record to a single IP Address
:type address: str
:type host_name: list[str]
:param address: (str) The IP address to configure
:param host_name: (list[str]) The list of strings representing PTR records
:return: (bool) True in case of success, False in case of failure
"""
request = self._call(SetEnqueueSetReverseDns.SetEnqueueSetReverseDns, IP=address, Hosts=host_name)
response = request.commit()
return response['Success']
def reset(self, addresses):
"""
Remove all PTR records from the given address
:type addresses: List[str]
:param addresses: (List[str]) The IP Address to reset
:return: (bool) True in case of success, False in case of failure
"""
request = self._call(SetEnqueueResetReverseDns.SetEnqueueResetReverseDns, IPs=addresses)
response = request.commit()
return response['Success']
|
Arubacloud/pyArubaCloud | ArubaCloud/ReverseDns/ReverseDns.py | ReverseDns.set | python | def set(self, address, host_name):
request = self._call(SetEnqueueSetReverseDns.SetEnqueueSetReverseDns, IP=address, Hosts=host_name)
response = request.commit()
return response['Success'] | Assign one or more PTR record to a single IP Address
:type address: str
:type host_name: list[str]
:param address: (str) The IP address to configure
:param host_name: (list[str]) The list of strings representing PTR records
:return: (bool) True in case of success, False in case of failure | train | https://github.com/Arubacloud/pyArubaCloud/blob/ec4aecd8ca342b1e1a4f16b7cc87cb5e697cfcd4/ArubaCloud/ReverseDns/ReverseDns.py#L23-L34 | [
"def _call(self, method, *args, **kwargs):\n return method(Username=self.username, Password=self.password, uri=self.ws_uri, *args, **kwargs)\n",
"def commit(self):\n return self._commit()\n"
] | class ReverseDns(ArubaCloudService):
def __init__(self, ws_uri, username, password):
super(ReverseDns, self).__init__(ws_uri, username, password)
def _call(self, method, *args, **kwargs):
return method(Username=self.username, Password=self.password, uri=self.ws_uri, *args, **kwargs)
def get(self, addresses):
"""
:type addresses: list[str]
:param addresses: (list[str]) List of addresses to retrieve their reverse dns
Retrieve the current configured ReverseDns entries
:return: (list) List containing the current ReverseDns Addresses
"""
request = self._call(GetReverseDns.GetReverseDns, IPs=addresses)
response = request.commit()
return response['Value']
def reset(self, addresses):
"""
Remove all PTR records from the given address
:type addresses: List[str]
:param addresses: (List[str]) The IP Address to reset
:return: (bool) True in case of success, False in case of failure
"""
request = self._call(SetEnqueueResetReverseDns.SetEnqueueResetReverseDns, IPs=addresses)
response = request.commit()
return response['Success']
|
Arubacloud/pyArubaCloud | ArubaCloud/ReverseDns/ReverseDns.py | ReverseDns.reset | python | def reset(self, addresses):
request = self._call(SetEnqueueResetReverseDns.SetEnqueueResetReverseDns, IPs=addresses)
response = request.commit()
return response['Success'] | Remove all PTR records from the given address
:type addresses: List[str]
:param addresses: (List[str]) The IP Address to reset
:return: (bool) True in case of success, False in case of failure | train | https://github.com/Arubacloud/pyArubaCloud/blob/ec4aecd8ca342b1e1a4f16b7cc87cb5e697cfcd4/ArubaCloud/ReverseDns/ReverseDns.py#L36-L45 | [
"def _call(self, method, *args, **kwargs):\n return method(Username=self.username, Password=self.password, uri=self.ws_uri, *args, **kwargs)\n",
"def commit(self):\n return self._commit()\n"
] | class ReverseDns(ArubaCloudService):
def __init__(self, ws_uri, username, password):
super(ReverseDns, self).__init__(ws_uri, username, password)
def _call(self, method, *args, **kwargs):
return method(Username=self.username, Password=self.password, uri=self.ws_uri, *args, **kwargs)
def get(self, addresses):
"""
:type addresses: list[str]
:param addresses: (list[str]) List of addresses to retrieve their reverse dns
Retrieve the current configured ReverseDns entries
:return: (list) List containing the current ReverseDns Addresses
"""
request = self._call(GetReverseDns.GetReverseDns, IPs=addresses)
response = request.commit()
return response['Value']
def set(self, address, host_name):
"""
Assign one or more PTR record to a single IP Address
:type address: str
:type host_name: list[str]
:param address: (str) The IP address to configure
:param host_name: (list[str]) The list of strings representing PTR records
:return: (bool) True in case of success, False in case of failure
"""
request = self._call(SetEnqueueSetReverseDns.SetEnqueueSetReverseDns, IP=address, Hosts=host_name)
response = request.commit()
return response['Success']
|
Arubacloud/pyArubaCloud | ArubaCloud/Compute/LoadBalancer/LoadBalancer.py | LoadBalancer.create | python | def create(self, healthCheckNotification, instance, ipAddressResourceId, name, notificationContacts, rules,
loadBalancerClassOfServiceID=1, *args, **kwargs):
response = self._call(method=SetEnqueueLoadBalancerCreation,
healthCheckNotification=healthCheckNotification,
instance=instance,
ipAddressResourceId=ipAddressResourceId,
name=name,
notificationContacts=notificationContacts,
rules=rules,
loadBalancerClassOfServiceID=loadBalancerClassOfServiceID,
*args, **kwargs) | :type healthCheckNotification: bool
:type instance: list[Instance]
:type ipAddressResourceId: list[int]
:type loadBalancerClassOfServiceID: int
:type name: str
:type notificationContacts: NotificationContacts or list[NotificationContact]
:type rules: Rules
:param healthCheckNotification: Enable or disable notifications
:param instance: List of balanced IP Addresses (VM or server)
:param ipAddressResourceId: ID of the IP Address resource of the Load Balancer
:param loadBalancerClassOfServiceID: default 1
:param name: Name of the Load Balancer
:param notificationContacts: Nullable if notificationContacts is false
:param rules: List of NewLoadBalancerRule object containing the list of rules to be configured with the service | train | https://github.com/Arubacloud/pyArubaCloud/blob/ec4aecd8ca342b1e1a4f16b7cc87cb5e697cfcd4/ArubaCloud/Compute/LoadBalancer/LoadBalancer.py#L15-L41 | [
"def _call(self, method, *args, **kwargs):\n request = method(Username=self.username, Password=self.password, uri=self.ws_uri, *args, **kwargs)\n response = request.commit()\n return response['Value']\n"
] | class LoadBalancer(ArubaCloudService):
def __init__(self, *args, **kwargs):
super(LoadBalancer, self).__init__(*args, **kwargs)
def _call(self, method, *args, **kwargs):
request = method(Username=self.username, Password=self.password, uri=self.ws_uri, *args, **kwargs)
response = request.commit()
return response['Value']
def get(self):
"""
Get the current active and inactive Load Balancer within the Datacenter
:return: (list) List of each LoadBalancer present in the Datacenter
"""
return self._call(GetLoadBalancers)
def get_notifications(self, startDate, endDate, loadBalancerID, loadBalancerRuleID):
"""
Get the load balancer notifications for a specific rule within a specifying window time frame
:type startDate: datetime
:type endDate: datetime
:type loadBalancerID: int
:type loadBalancerRuleID: int
:param startDate: From Date
:param endDate: To Date
:param loadBalancerID: ID of the Laod Balancer
:param loadBalancerRuleID: ID of the Load Balancer Rule
"""
return self._call(GetLoadBalancerNotifications, startDate=startDate, endDate=endDate,
loadBalancerID=loadBalancerID, loadBalancerRuleID=loadBalancerRuleID)
def start(self, loadBalancerID):
"""
Start a Load Balancer instance
:type loadBalancerID: int
:param loadBalancerID: ID of the Load Balancer to start
:return:
"""
return self._call(SetEnqueueLoadBalancerStart, loadBalancerID=loadBalancerID)
def stop(self, loadBalancerID):
"""
Stop a Load Balancer instance
:type loadBalancerID: int
:param loadBalancerID: ID of the Load Balancer to stop
:return:
"""
return self._call(SetEnqueueLoadBalancerPowerOff, loadBalancerID=loadBalancerID)
def delete(self, loadBalancerID):
"""
Enqueue a Load Balancer Deletion action
:type loadBalancerID: int
:param loadBalancerID: ID of the Load Balancer to be deleted
:return:
"""
return self._call(SetEnqueueLoadBalancerDeletion, loadBalancerID=loadBalancerID)
|
Arubacloud/pyArubaCloud | ArubaCloud/Compute/LoadBalancer/LoadBalancer.py | LoadBalancer.get_notifications | python | def get_notifications(self, startDate, endDate, loadBalancerID, loadBalancerRuleID):
return self._call(GetLoadBalancerNotifications, startDate=startDate, endDate=endDate,
loadBalancerID=loadBalancerID, loadBalancerRuleID=loadBalancerRuleID) | Get the load balancer notifications for a specific rule within a specifying window time frame
:type startDate: datetime
:type endDate: datetime
:type loadBalancerID: int
:type loadBalancerRuleID: int
:param startDate: From Date
:param endDate: To Date
:param loadBalancerID: ID of the Laod Balancer
:param loadBalancerRuleID: ID of the Load Balancer Rule | train | https://github.com/Arubacloud/pyArubaCloud/blob/ec4aecd8ca342b1e1a4f16b7cc87cb5e697cfcd4/ArubaCloud/Compute/LoadBalancer/LoadBalancer.py#L50-L63 | [
"def _call(self, method, *args, **kwargs):\n request = method(Username=self.username, Password=self.password, uri=self.ws_uri, *args, **kwargs)\n response = request.commit()\n return response['Value']\n"
] | class LoadBalancer(ArubaCloudService):
def __init__(self, *args, **kwargs):
super(LoadBalancer, self).__init__(*args, **kwargs)
def _call(self, method, *args, **kwargs):
request = method(Username=self.username, Password=self.password, uri=self.ws_uri, *args, **kwargs)
response = request.commit()
return response['Value']
def create(self, healthCheckNotification, instance, ipAddressResourceId, name, notificationContacts, rules,
loadBalancerClassOfServiceID=1, *args, **kwargs):
"""
:type healthCheckNotification: bool
:type instance: list[Instance]
:type ipAddressResourceId: list[int]
:type loadBalancerClassOfServiceID: int
:type name: str
:type notificationContacts: NotificationContacts or list[NotificationContact]
:type rules: Rules
:param healthCheckNotification: Enable or disable notifications
:param instance: List of balanced IP Addresses (VM or server)
:param ipAddressResourceId: ID of the IP Address resource of the Load Balancer
:param loadBalancerClassOfServiceID: default 1
:param name: Name of the Load Balancer
:param notificationContacts: Nullable if notificationContacts is false
:param rules: List of NewLoadBalancerRule object containing the list of rules to be configured with the service
"""
response = self._call(method=SetEnqueueLoadBalancerCreation,
healthCheckNotification=healthCheckNotification,
instance=instance,
ipAddressResourceId=ipAddressResourceId,
name=name,
notificationContacts=notificationContacts,
rules=rules,
loadBalancerClassOfServiceID=loadBalancerClassOfServiceID,
*args, **kwargs)
def get(self):
"""
Get the current active and inactive Load Balancer within the Datacenter
:return: (list) List of each LoadBalancer present in the Datacenter
"""
return self._call(GetLoadBalancers)
def start(self, loadBalancerID):
"""
Start a Load Balancer instance
:type loadBalancerID: int
:param loadBalancerID: ID of the Load Balancer to start
:return:
"""
return self._call(SetEnqueueLoadBalancerStart, loadBalancerID=loadBalancerID)
def stop(self, loadBalancerID):
"""
Stop a Load Balancer instance
:type loadBalancerID: int
:param loadBalancerID: ID of the Load Balancer to stop
:return:
"""
return self._call(SetEnqueueLoadBalancerPowerOff, loadBalancerID=loadBalancerID)
def delete(self, loadBalancerID):
"""
Enqueue a Load Balancer Deletion action
:type loadBalancerID: int
:param loadBalancerID: ID of the Load Balancer to be deleted
:return:
"""
return self._call(SetEnqueueLoadBalancerDeletion, loadBalancerID=loadBalancerID)
|
wroberts/pygermanet | pygermanet/germanet.py | load_germanet | python | def load_germanet(host = None, port = None, database_name = 'germanet'):
'''
Loads a GermaNet instance connected to the given MongoDB instance.
Arguments:
- `host`: the hostname of the MongoDB instance
- `port`: the port number of the MongoDB instance
- `database_name`: the name of the GermaNet database on the
MongoDB instance
'''
client = MongoClient(host, port)
germanet_db = client[database_name]
return GermaNet(germanet_db) | Loads a GermaNet instance connected to the given MongoDB instance.
Arguments:
- `host`: the hostname of the MongoDB instance
- `port`: the port number of the MongoDB instance
- `database_name`: the name of the GermaNet database on the
MongoDB instance | train | https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/germanet.py#L664-L676 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
germanet.py
(c) Will Roberts 21 March, 2014
GermaNet interface.
'''
from __future__ import division
from builtins import dict, int
from functools import reduce
from pymongo import MongoClient
import functools
import math
import sys
try:
import repoze.lru
except ImportError:
pass
LONG_POS_TO_SHORT = {
'verben': 'v',
'nomen': 'n',
'adj': 'j',
}
SHORT_POS_TO_LONG = dict((v, k) for (k, v) in LONG_POS_TO_SHORT.items())
DEFAULT_CACHE_SIZE = 100
GERMANET_METAINFO_IGNORE_KEYS = set(['_id'])
class GermaNet(object):
'''A class representing the GermaNet database.'''
def __init__(self, mongo_db, cache_size = DEFAULT_CACHE_SIZE):
'''
Creates a new GermaNet object.
Arguments:
- `mongo_db`: a pymongo.database.Database object containing
the GermaNet lexicon
'''
self._mongo_db = mongo_db
self._lemma_cache = None
self._synset_cache = None
self.max_min_depths = {}
try:
self.__dict__.update((k, v) for (k, v)
in self._mongo_db.metainfo.find_one().items()
if k not in GERMANET_METAINFO_IGNORE_KEYS)
except AttributeError:
# ignore error generated if metainfo is not included in
# the mongo DB
pass
try:
self._lemma_cache = repoze.lru.LRUCache(cache_size)
self._synset_cache = repoze.lru.LRUCache(cache_size)
except NameError:
pass
@property
def cache_size(self):
'''
Return the current cache size used to reduce the number of
database access operations.
'''
if self._lemma_cache is not None:
return self._lemma_cache.size
return 0
@cache_size.setter
def cache_size(self, new_value):
'''
Set the cache size used to reduce the number of database
access operations.
'''
if type(new_value) == int and 0 < new_value:
if self._lemma_cache is not None:
self._lemma_cache = repoze.lru.LRUCache(new_value)
self._synset_cache = repoze.lru.LRUCache(new_value)
def all_lemmas(self):
'''
A generator over all the lemmas in the GermaNet database.
'''
for lemma_dict in self._mongo_db.lexunits.find():
yield Lemma(self, lemma_dict)
def lemmas(self, lemma, pos = None):
'''
Looks up lemmas in the GermaNet database.
Arguments:
- `lemma`:
- `pos`:
'''
if pos is not None:
if pos not in SHORT_POS_TO_LONG:
return None
pos = SHORT_POS_TO_LONG[pos]
lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma,
'category': pos})
else:
lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma})
return sorted([Lemma(self, lemma_dict) for lemma_dict in lemma_dicts])
def all_synsets(self):
'''
A generator over all the synsets in the GermaNet database.
'''
for synset_dict in self._mongo_db.synsets.find():
yield Synset(self, synset_dict)
def synsets(self, lemma, pos = None):
'''
Looks up synsets in the GermaNet database.
Arguments:
- `lemma`:
- `pos`:
'''
return sorted(set(lemma_obj.synset
for lemma_obj in self.lemmas(lemma, pos)))
def synset(self, synset_repr):
'''
Looks up a synset in GermaNet using its string representation.
Arguments:
- `synset_repr`: a unicode string containing the lemma, part
of speech, and sense number of the first lemma of the synset
>>> gn.synset(u'funktionieren.v.2')
Synset(funktionieren.v.2)
'''
parts = synset_repr.split('.')
if len(parts) != 3:
return None
lemma, pos, sensenum = parts
if not sensenum.isdigit() or pos not in SHORT_POS_TO_LONG:
return None
sensenum = int(sensenum, 10)
pos = SHORT_POS_TO_LONG[pos]
lemma_dict = self._mongo_db.lexunits.find_one({'orthForm': lemma,
'category': pos,
'sense': sensenum})
if lemma_dict:
return Lemma(self, lemma_dict).synset
def get_synset_by_id(self, mongo_id):
'''
Builds a Synset object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object
'''
cache_hit = None
if self._synset_cache is not None:
cache_hit = self._synset_cache.get(mongo_id)
if cache_hit is not None:
return cache_hit
synset_dict = self._mongo_db.synsets.find_one({'_id': mongo_id})
if synset_dict is not None:
synset = Synset(self, synset_dict)
if self._synset_cache is not None:
self._synset_cache.put(mongo_id, synset)
return synset
def get_lemma_by_id(self, mongo_id):
'''
Builds a Lemma object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object
'''
cache_hit = None
if self._lemma_cache is not None:
cache_hit = self._lemma_cache.get(mongo_id)
if cache_hit is not None:
return cache_hit
lemma_dict = self._mongo_db.lexunits.find_one({'_id': mongo_id})
if lemma_dict is not None:
lemma = Lemma(self, lemma_dict)
if self._lemma_cache is not None:
self._lemma_cache.put(mongo_id, lemma)
return lemma
def lemmatise(self, word):
'''
Tries to find the base form (lemma) of the given word, using
the data provided by the Projekt deutscher Wortschatz. This
method returns a list of potential lemmas.
>>> gn.lemmatise(u'Männer')
[u'Mann']
>>> gn.lemmatise(u'XYZ123')
[u'XYZ123']
'''
lemmas = list(self._mongo_db.lemmatiser.find({'word': word}))
if lemmas:
return [lemma['lemma'] for lemma in lemmas]
else:
return [word]
# rename some of the fields in the MongoDB dictionary
SYNSET_MEMBER_REWRITES = {
'lexunits': '_lexunits',
'rels': '_rels',
}
@functools.total_ordering
class Synset(object):
'''A class representing a synset in GermaNet.'''
def __init__(self, germanet, db_dict):
'''
Creates a new Synset object from a BSON dictionary retrieved
from MongoDB.
Arguments:
- `germanet`: a GermaNet object
- `db_dict`:
'''
self._germanet = germanet
self._id = None
self._rels = []
self.category = None
self.gn_class = None
self.id = None
self.infocont = 0.
self._lexunits = None
self.__dict__.update((SYNSET_MEMBER_REWRITES.get(k, k), v)
for (k, v) in db_dict.items())
@property
def lemmas(self):
'''
Returns the list of Lemma objects contained in this Synset.
'''
return [self._germanet.get_lemma_by_id(lemma)
for lemma in self._lexunits]
@property
def pos(self):
'''
Returns the part of speech of this Synset as a single
character. Nouns are represented by 'n', verbs by 'v', and
adjectives by 'j'.
'''
return LONG_POS_TO_SHORT[self.category]
def rels(self, rel_name = None):
'''
Returns a list of lexical relations in this Synset. If
`rel_name` is specified, returns a list of Synsets which are
reachable from this one by relations with the given name. If
`rel_name` is not specified, returns a list of all the lexical
relations of this Synset, as tuples of (rel_name, synset).
Arguments:
- `rel_name`:
'''
if rel_name is not None:
return [self._germanet.get_synset_by_id(mongo_id)
for (name, mongo_id) in self._rels if name == rel_name]
else:
return [(name, self._germanet.get_synset_by_id(mongo_id))
for (name, mongo_id) in self._rels]
@property
def causes(self): return self.rels('causes')
@property
def entails(self): return self.rels('entails')
@property
def component_holonyms(self): return self.rels('has_component_holonym')
@property
def component_meronyms(self): return self.rels('has_component_meronym')
@property
def hypernyms(self): return self.rels('has_hypernym')
@property
def hyponyms(self): return self.rels('has_hyponym')
@property
def member_holonyms(self): return self.rels('has_member_holonym')
@property
def member_meronyms(self): return self.rels('has_member_meronym')
@property
def portion_holonyms(self): return self.rels('has_portion_holonym')
@property
def portion_meronyms(self): return self.rels('has_portion_meronym')
@property
def substance_holonyms(self): return self.rels('has_substance_holonym')
@property
def substance_meronyms(self): return self.rels('has_substance_meronym')
@property
def entailed_bys(self): return self.rels('is_entailed_by')
@property
def related_tos(self): return self.rels('is_related_to')
@property
def hypernym_paths(self):
'''
Returns a list of paths following hypernym links from this
synset to the GermaNet root node.
'''
hypernyms = self.hypernyms
if hypernyms:
return reduce(list.__add__, [[path + [self]
for path in hypernym.hypernym_paths]
for hypernym in hypernyms], [])
else:
return [[self]]
@property
def hypernym_distances(self):
'''
Returns a list of synsets on the path from this synset to the root
node, counting the distance of each node on the way.
'''
retval = dict()
for (synset, dist) in reduce(
set.union,
[[(synset, idx) for (idx, synset) in enumerate(reversed(path))]
for path in self.hypernym_paths],
set()):
if synset not in retval or dist < retval[synset]:
retval[synset] = dist
return set(retval.items())
@property
def root_hypernyms(self):
'''
Get the topmost hypernym(s) of this synset in GermaNet.
Mostly GNROOT.n.1
'''
return sorted(set([path[0] for path in self.hypernym_paths]))
@property
def max_depth(self):
'''
The length of the longest hypernym path from this synset to
the root.
'''
return max([len(path) for path in self.hypernym_paths])
@property
def min_depth(self):
'''
The length of the shortest hypernym path from this synset to
the root.
'''
return min([len(path) for path in self.hypernym_paths])
def __repr__(self):
reprstr = u'Synset({0}.{1}.{2})'.format(
self.lemmas[0].orthForm,
self.pos,
self.lemmas[0].sense)
if sys.version_info.major < 3:
return reprstr.encode('utf-8')
return reprstr
def __hash__(self):
return hash(self._id)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._id == other._id
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if isinstance(other, self.__class__):
return ((self.lemmas[0].orthForm, self.pos, self.lemmas[0].sense) <
(other.lemmas[0].orthForm, other.pos,
other.lemmas[0].sense))
else:
return False
def _common_hypernyms(self, other):
'''Helper method for common_hypernyms.'''
if not isinstance(other, Synset):
return dict()
self_dists = dict(self.hypernym_distances)
other_dists = dict(other.hypernym_distances)
common = dict((synset, 0) for synset in (set(self_dists) &
set(other_dists)))
# update the distance values
for synset in common:
common[synset] = self_dists[synset] + other_dists[synset]
return common
def common_hypernyms(self, other):
'''
Finds the set of hypernyms common to both this synset and
``other``.
Arguments:
- `other`: another synset
'''
return set(synset for (synset, dist) in
self._common_hypernyms(other).items())
def lowest_common_hypernyms(self, other):
'''
Finds the set of hypernyms common to both this synset and
``other`` which are lowest in the GermaNet hierarchy (furthest
away from GNROOT).
Arguments:
- `other`: another synset
'''
if not isinstance(other, Synset):
return set()
self_hypers = set(synset for path in self.hypernym_paths
for synset in path)
other_hypers = set(synset for path in other.hypernym_paths
for synset in path)
common_hypers = self_hypers & other_hypers
common_hypers = [(synset.min_depth, synset)
for synset in common_hypers]
if not common_hypers:
return set()
max_depth = max(x[0] for x in common_hypers)
return set(synset for (depth, synset) in common_hypers
if depth == max_depth)
def nearest_common_hypernyms(self, other):
'''
Finds the set of hypernyms common to both this synset and
``other`` which are closest to the two synsets (the hypernyms
which the minimum path length joining the two synsets passes
through).
Arguments:
- `other`: another synset
'''
common_hypers = [(dist, synset) for (synset, dist) in
list(self._common_hypernyms(other).items())]
if not common_hypers:
return set()
min_dist = min(x[0] for x in common_hypers)
return set(synset for (dist, synset) in common_hypers
if dist == min_dist)
def shortest_path_length(self, other):
'''
Returns the length of the shortest path linking this synset with
``other`` via a common hypernym. If no path exists, the
method returns None.
Arguments:
- `other`:
'''
if self == other:
return 0
common_hypers = self._common_hypernyms(other)
if not common_hypers:
return None
return min(common_hypers.values())
# --------------------------------------------------
# Semantic similarity
# --------------------------------------------------
def sim_lch(self, other):
'''
Computes the Leacock-Chodorow similarity score between this synset
and the synset ``other``.
Arguments:
- `other`:
'''
if not isinstance(other, Synset):
return 0.
if self.category != other.category:
return 0.
path_length = self.shortest_path_length(other)
if path_length is None:
return 0.
return -math.log(
(path_length + 1) /
(2. * self._germanet.max_min_depths[self.category]))
def sim_res(self, other):
'''
Computes the Resnik similarity score between this synset and the
synset ``other``.
Arguments:
- `other`:
'''
if not isinstance(other, Synset):
return 0.
# find the lowest concept which subsumes both this synset and
# ``other``;
#common_hypers = self.lowest_common_hypernyms(other)
# specifically, we choose the hypernym "closest" to this
# synset and ``other``, not the hypernym which is furthest
# away from GNROOT (as is done by lowest_common_hypernyms)
common_hypers = self.nearest_common_hypernyms(other)
if not common_hypers:
return 0.
# infocont is actually the probability
infoconts = [synset.infocont for synset in common_hypers]
# filter out zero counts
infoconts = [x for x in infoconts if x != 0]
if not infoconts:
return 0.
# we take the lowest probability subsumer
least_prob = min(infoconts)
# information content is the negative log
return -math.log(least_prob)
def dist_jcn(self, other):
'''
Computes the Jiang-Conrath semantic distance between this synset
and the synset ``other``.
Arguments:
- `other`:
'''
ic1 = self.infocont
ic2 = other.infocont
if ic1 == 0 or ic2 == 0:
return 0.
ic1 = -math.log(ic1)
ic2 = -math.log(ic2)
ic_lcs = self.sim_res(other)
return ic1 + ic2 - 2. * ic_lcs
def sim_lin(self, other):
'''
Computes the Lin similarity score between this synset and the
synset ``other``.
Arguments:
- `other`:
'''
ic1 = self.infocont
ic2 = other.infocont
if ic1 == 0 or ic2 == 0:
return 0.
ic1 = -math.log(ic1)
ic2 = -math.log(ic2)
ic_lcs = self.sim_res(other)
return 2. * ic_lcs / (ic1 + ic2)
# rename some of the fields in the MongoDB dictionary
LEMMA_MEMBER_REWRITES = {
'synset': '_synset',
'rels': '_rels',
}
@functools.total_ordering
class Lemma(object):
'''A class representing a lexical unit in GermaNet.'''
def __init__(self, germanet, db_dict):
'''
Creates a new Lemma object from a BSON dictionary retrieved
from MongoDB.
Arguments:
- `germanet`: a GermaNet object
- `db_dict`:
'''
self._germanet = germanet
self._id = None
self._rels = []
self.artificial = None
self.category = None
self.examples = None
self.frames = None
self.id = None
self.namedEntity = None
self.oldOrthForm = None
self.oldOrthVar = None
self.orthForm = None
self.orthVar = None
self.paraphrases = []
self.sense = None
self.source = None
self.styleMarking = None
self._synset = None
self.__dict__.update((LEMMA_MEMBER_REWRITES.get(k, k), v)
for (k, v) in db_dict.items())
@property
def synset(self):
'''Returns the Synset that this Lemma is contained in.'''
return self._germanet.get_synset_by_id(self._synset)
@property
def pos(self):
'''
Returns the part of speech of this Lemma as a single
character. Nouns are represented by 'n', verbs by 'v', and
adjectives by 'j'.
'''
return LONG_POS_TO_SHORT[self.category]
def rels(self, rel_name = None):
'''
Returns a list of lexical relations in this Lemma. If
`rel_name` is specified, returns a list of Lemmas which are
reachable from this one by relations with the given name. If
`rel_name` is not specified, returns a list of all the lexical
relations of this Lemma, as tuples of (rel_name, lemma).
Arguments:
- `rel_name`:
'''
if rel_name is not None:
return [self._germanet.get_lemma_by_id(mongo_id)
for (name, mongo_id) in self._rels if name == rel_name]
else:
return [(name, self._germanet.get_lemma_by_id(mongo_id))
for (name, mongo_id) in self._rels]
@property
def antonyms(self): return self.rels('has_antonym')
@property
def participles(self): return self.rels('has_participle')
@property
def pertainyms(self): return self.rels('has_pertainym')
def __repr__(self):
reprstr = u'Lemma({0}.{1}.{2}.{3})'.format(
self.synset.lemmas[0].orthForm,
self.synset.pos,
self.synset.lemmas[0].sense,
self.orthForm)
if sys.version_info.major < 3:
return reprstr.encode('utf-8')
return reprstr
def __hash__(self):
return hash(self._id)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._id == other._id
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if isinstance(other, self.__class__):
return ((self.orthForm, self.pos, self.sense) <
(other.orthForm, other.pos, other.sense))
else:
return False
|
wroberts/pygermanet | pygermanet/germanet.py | GermaNet.cache_size | python | def cache_size(self, new_value):
'''
Set the cache size used to reduce the number of database
access operations.
'''
if type(new_value) == int and 0 < new_value:
if self._lemma_cache is not None:
self._lemma_cache = repoze.lru.LRUCache(new_value)
self._synset_cache = repoze.lru.LRUCache(new_value) | Set the cache size used to reduce the number of database
access operations. | train | https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/germanet.py#L75-L83 | null | class GermaNet(object):
'''A class representing the GermaNet database.'''
def __init__(self, mongo_db, cache_size = DEFAULT_CACHE_SIZE):
'''
Creates a new GermaNet object.
Arguments:
- `mongo_db`: a pymongo.database.Database object containing
the GermaNet lexicon
'''
self._mongo_db = mongo_db
self._lemma_cache = None
self._synset_cache = None
self.max_min_depths = {}
try:
self.__dict__.update((k, v) for (k, v)
in self._mongo_db.metainfo.find_one().items()
if k not in GERMANET_METAINFO_IGNORE_KEYS)
except AttributeError:
# ignore error generated if metainfo is not included in
# the mongo DB
pass
try:
self._lemma_cache = repoze.lru.LRUCache(cache_size)
self._synset_cache = repoze.lru.LRUCache(cache_size)
except NameError:
pass
@property
def cache_size(self):
'''
Return the current cache size used to reduce the number of
database access operations.
'''
if self._lemma_cache is not None:
return self._lemma_cache.size
return 0
@cache_size.setter
def all_lemmas(self):
'''
A generator over all the lemmas in the GermaNet database.
'''
for lemma_dict in self._mongo_db.lexunits.find():
yield Lemma(self, lemma_dict)
def lemmas(self, lemma, pos = None):
'''
Looks up lemmas in the GermaNet database.
Arguments:
- `lemma`:
- `pos`:
'''
if pos is not None:
if pos not in SHORT_POS_TO_LONG:
return None
pos = SHORT_POS_TO_LONG[pos]
lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma,
'category': pos})
else:
lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma})
return sorted([Lemma(self, lemma_dict) for lemma_dict in lemma_dicts])
def all_synsets(self):
'''
A generator over all the synsets in the GermaNet database.
'''
for synset_dict in self._mongo_db.synsets.find():
yield Synset(self, synset_dict)
def synsets(self, lemma, pos = None):
'''
Looks up synsets in the GermaNet database.
Arguments:
- `lemma`:
- `pos`:
'''
return sorted(set(lemma_obj.synset
for lemma_obj in self.lemmas(lemma, pos)))
def synset(self, synset_repr):
'''
Looks up a synset in GermaNet using its string representation.
Arguments:
- `synset_repr`: a unicode string containing the lemma, part
of speech, and sense number of the first lemma of the synset
>>> gn.synset(u'funktionieren.v.2')
Synset(funktionieren.v.2)
'''
parts = synset_repr.split('.')
if len(parts) != 3:
return None
lemma, pos, sensenum = parts
if not sensenum.isdigit() or pos not in SHORT_POS_TO_LONG:
return None
sensenum = int(sensenum, 10)
pos = SHORT_POS_TO_LONG[pos]
lemma_dict = self._mongo_db.lexunits.find_one({'orthForm': lemma,
'category': pos,
'sense': sensenum})
if lemma_dict:
return Lemma(self, lemma_dict).synset
def get_synset_by_id(self, mongo_id):
'''
Builds a Synset object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object
'''
cache_hit = None
if self._synset_cache is not None:
cache_hit = self._synset_cache.get(mongo_id)
if cache_hit is not None:
return cache_hit
synset_dict = self._mongo_db.synsets.find_one({'_id': mongo_id})
if synset_dict is not None:
synset = Synset(self, synset_dict)
if self._synset_cache is not None:
self._synset_cache.put(mongo_id, synset)
return synset
def get_lemma_by_id(self, mongo_id):
'''
Builds a Lemma object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object
'''
cache_hit = None
if self._lemma_cache is not None:
cache_hit = self._lemma_cache.get(mongo_id)
if cache_hit is not None:
return cache_hit
lemma_dict = self._mongo_db.lexunits.find_one({'_id': mongo_id})
if lemma_dict is not None:
lemma = Lemma(self, lemma_dict)
if self._lemma_cache is not None:
self._lemma_cache.put(mongo_id, lemma)
return lemma
def lemmatise(self, word):
'''
Tries to find the base form (lemma) of the given word, using
the data provided by the Projekt deutscher Wortschatz. This
method returns a list of potential lemmas.
>>> gn.lemmatise(u'Männer')
[u'Mann']
>>> gn.lemmatise(u'XYZ123')
[u'XYZ123']
'''
lemmas = list(self._mongo_db.lemmatiser.find({'word': word}))
if lemmas:
return [lemma['lemma'] for lemma in lemmas]
else:
return [word]
|
wroberts/pygermanet | pygermanet/germanet.py | GermaNet.all_lemmas | python | def all_lemmas(self):
'''
A generator over all the lemmas in the GermaNet database.
'''
for lemma_dict in self._mongo_db.lexunits.find():
yield Lemma(self, lemma_dict) | A generator over all the lemmas in the GermaNet database. | train | https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/germanet.py#L85-L90 | null | class GermaNet(object):
'''A class representing the GermaNet database.'''
def __init__(self, mongo_db, cache_size = DEFAULT_CACHE_SIZE):
'''
Creates a new GermaNet object.
Arguments:
- `mongo_db`: a pymongo.database.Database object containing
the GermaNet lexicon
'''
self._mongo_db = mongo_db
self._lemma_cache = None
self._synset_cache = None
self.max_min_depths = {}
try:
self.__dict__.update((k, v) for (k, v)
in self._mongo_db.metainfo.find_one().items()
if k not in GERMANET_METAINFO_IGNORE_KEYS)
except AttributeError:
# ignore error generated if metainfo is not included in
# the mongo DB
pass
try:
self._lemma_cache = repoze.lru.LRUCache(cache_size)
self._synset_cache = repoze.lru.LRUCache(cache_size)
except NameError:
pass
@property
def cache_size(self):
'''
Return the current cache size used to reduce the number of
database access operations.
'''
if self._lemma_cache is not None:
return self._lemma_cache.size
return 0
@cache_size.setter
def cache_size(self, new_value):
'''
Set the cache size used to reduce the number of database
access operations.
'''
if type(new_value) == int and 0 < new_value:
if self._lemma_cache is not None:
self._lemma_cache = repoze.lru.LRUCache(new_value)
self._synset_cache = repoze.lru.LRUCache(new_value)
def lemmas(self, lemma, pos = None):
'''
Looks up lemmas in the GermaNet database.
Arguments:
- `lemma`:
- `pos`:
'''
if pos is not None:
if pos not in SHORT_POS_TO_LONG:
return None
pos = SHORT_POS_TO_LONG[pos]
lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma,
'category': pos})
else:
lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma})
return sorted([Lemma(self, lemma_dict) for lemma_dict in lemma_dicts])
def all_synsets(self):
'''
A generator over all the synsets in the GermaNet database.
'''
for synset_dict in self._mongo_db.synsets.find():
yield Synset(self, synset_dict)
def synsets(self, lemma, pos = None):
'''
Looks up synsets in the GermaNet database.
Arguments:
- `lemma`:
- `pos`:
'''
return sorted(set(lemma_obj.synset
for lemma_obj in self.lemmas(lemma, pos)))
def synset(self, synset_repr):
'''
Looks up a synset in GermaNet using its string representation.
Arguments:
- `synset_repr`: a unicode string containing the lemma, part
of speech, and sense number of the first lemma of the synset
>>> gn.synset(u'funktionieren.v.2')
Synset(funktionieren.v.2)
'''
parts = synset_repr.split('.')
if len(parts) != 3:
return None
lemma, pos, sensenum = parts
if not sensenum.isdigit() or pos not in SHORT_POS_TO_LONG:
return None
sensenum = int(sensenum, 10)
pos = SHORT_POS_TO_LONG[pos]
lemma_dict = self._mongo_db.lexunits.find_one({'orthForm': lemma,
'category': pos,
'sense': sensenum})
if lemma_dict:
return Lemma(self, lemma_dict).synset
def get_synset_by_id(self, mongo_id):
'''
Builds a Synset object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object
'''
cache_hit = None
if self._synset_cache is not None:
cache_hit = self._synset_cache.get(mongo_id)
if cache_hit is not None:
return cache_hit
synset_dict = self._mongo_db.synsets.find_one({'_id': mongo_id})
if synset_dict is not None:
synset = Synset(self, synset_dict)
if self._synset_cache is not None:
self._synset_cache.put(mongo_id, synset)
return synset
def get_lemma_by_id(self, mongo_id):
'''
Builds a Lemma object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object
'''
cache_hit = None
if self._lemma_cache is not None:
cache_hit = self._lemma_cache.get(mongo_id)
if cache_hit is not None:
return cache_hit
lemma_dict = self._mongo_db.lexunits.find_one({'_id': mongo_id})
if lemma_dict is not None:
lemma = Lemma(self, lemma_dict)
if self._lemma_cache is not None:
self._lemma_cache.put(mongo_id, lemma)
return lemma
def lemmatise(self, word):
'''
Tries to find the base form (lemma) of the given word, using
the data provided by the Projekt deutscher Wortschatz. This
method returns a list of potential lemmas.
>>> gn.lemmatise(u'Männer')
[u'Mann']
>>> gn.lemmatise(u'XYZ123')
[u'XYZ123']
'''
lemmas = list(self._mongo_db.lemmatiser.find({'word': word}))
if lemmas:
return [lemma['lemma'] for lemma in lemmas]
else:
return [word]
|
wroberts/pygermanet | pygermanet/germanet.py | GermaNet.lemmas | python | def lemmas(self, lemma, pos = None):
'''
Looks up lemmas in the GermaNet database.
Arguments:
- `lemma`:
- `pos`:
'''
if pos is not None:
if pos not in SHORT_POS_TO_LONG:
return None
pos = SHORT_POS_TO_LONG[pos]
lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma,
'category': pos})
else:
lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma})
return sorted([Lemma(self, lemma_dict) for lemma_dict in lemma_dicts]) | Looks up lemmas in the GermaNet database.
Arguments:
- `lemma`:
- `pos`: | train | https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/germanet.py#L92-L108 | null | class GermaNet(object):
'''A class representing the GermaNet database.'''
def __init__(self, mongo_db, cache_size = DEFAULT_CACHE_SIZE):
'''
Creates a new GermaNet object.
Arguments:
- `mongo_db`: a pymongo.database.Database object containing
the GermaNet lexicon
'''
self._mongo_db = mongo_db
self._lemma_cache = None
self._synset_cache = None
self.max_min_depths = {}
try:
self.__dict__.update((k, v) for (k, v)
in self._mongo_db.metainfo.find_one().items()
if k not in GERMANET_METAINFO_IGNORE_KEYS)
except AttributeError:
# ignore error generated if metainfo is not included in
# the mongo DB
pass
try:
self._lemma_cache = repoze.lru.LRUCache(cache_size)
self._synset_cache = repoze.lru.LRUCache(cache_size)
except NameError:
pass
@property
def cache_size(self):
'''
Return the current cache size used to reduce the number of
database access operations.
'''
if self._lemma_cache is not None:
return self._lemma_cache.size
return 0
@cache_size.setter
def cache_size(self, new_value):
'''
Set the cache size used to reduce the number of database
access operations.
'''
if type(new_value) == int and 0 < new_value:
if self._lemma_cache is not None:
self._lemma_cache = repoze.lru.LRUCache(new_value)
self._synset_cache = repoze.lru.LRUCache(new_value)
def all_lemmas(self):
'''
A generator over all the lemmas in the GermaNet database.
'''
for lemma_dict in self._mongo_db.lexunits.find():
yield Lemma(self, lemma_dict)
def all_synsets(self):
'''
A generator over all the synsets in the GermaNet database.
'''
for synset_dict in self._mongo_db.synsets.find():
yield Synset(self, synset_dict)
def synsets(self, lemma, pos = None):
'''
Looks up synsets in the GermaNet database.
Arguments:
- `lemma`:
- `pos`:
'''
return sorted(set(lemma_obj.synset
for lemma_obj in self.lemmas(lemma, pos)))
def synset(self, synset_repr):
'''
Looks up a synset in GermaNet using its string representation.
Arguments:
- `synset_repr`: a unicode string containing the lemma, part
of speech, and sense number of the first lemma of the synset
>>> gn.synset(u'funktionieren.v.2')
Synset(funktionieren.v.2)
'''
parts = synset_repr.split('.')
if len(parts) != 3:
return None
lemma, pos, sensenum = parts
if not sensenum.isdigit() or pos not in SHORT_POS_TO_LONG:
return None
sensenum = int(sensenum, 10)
pos = SHORT_POS_TO_LONG[pos]
lemma_dict = self._mongo_db.lexunits.find_one({'orthForm': lemma,
'category': pos,
'sense': sensenum})
if lemma_dict:
return Lemma(self, lemma_dict).synset
def get_synset_by_id(self, mongo_id):
'''
Builds a Synset object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object
'''
cache_hit = None
if self._synset_cache is not None:
cache_hit = self._synset_cache.get(mongo_id)
if cache_hit is not None:
return cache_hit
synset_dict = self._mongo_db.synsets.find_one({'_id': mongo_id})
if synset_dict is not None:
synset = Synset(self, synset_dict)
if self._synset_cache is not None:
self._synset_cache.put(mongo_id, synset)
return synset
def get_lemma_by_id(self, mongo_id):
'''
Builds a Lemma object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object
'''
cache_hit = None
if self._lemma_cache is not None:
cache_hit = self._lemma_cache.get(mongo_id)
if cache_hit is not None:
return cache_hit
lemma_dict = self._mongo_db.lexunits.find_one({'_id': mongo_id})
if lemma_dict is not None:
lemma = Lemma(self, lemma_dict)
if self._lemma_cache is not None:
self._lemma_cache.put(mongo_id, lemma)
return lemma
def lemmatise(self, word):
'''
Tries to find the base form (lemma) of the given word, using
the data provided by the Projekt deutscher Wortschatz. This
method returns a list of potential lemmas.
>>> gn.lemmatise(u'Männer')
[u'Mann']
>>> gn.lemmatise(u'XYZ123')
[u'XYZ123']
'''
lemmas = list(self._mongo_db.lemmatiser.find({'word': word}))
if lemmas:
return [lemma['lemma'] for lemma in lemmas]
else:
return [word]
|
wroberts/pygermanet | pygermanet/germanet.py | GermaNet.all_synsets | python | def all_synsets(self):
'''
A generator over all the synsets in the GermaNet database.
'''
for synset_dict in self._mongo_db.synsets.find():
yield Synset(self, synset_dict) | A generator over all the synsets in the GermaNet database. | train | https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/germanet.py#L110-L115 | null | class GermaNet(object):
'''A class representing the GermaNet database.'''
def __init__(self, mongo_db, cache_size = DEFAULT_CACHE_SIZE):
'''
Creates a new GermaNet object.
Arguments:
- `mongo_db`: a pymongo.database.Database object containing
the GermaNet lexicon
'''
self._mongo_db = mongo_db
self._lemma_cache = None
self._synset_cache = None
self.max_min_depths = {}
try:
self.__dict__.update((k, v) for (k, v)
in self._mongo_db.metainfo.find_one().items()
if k not in GERMANET_METAINFO_IGNORE_KEYS)
except AttributeError:
# ignore error generated if metainfo is not included in
# the mongo DB
pass
try:
self._lemma_cache = repoze.lru.LRUCache(cache_size)
self._synset_cache = repoze.lru.LRUCache(cache_size)
except NameError:
pass
@property
def cache_size(self):
'''
Return the current cache size used to reduce the number of
database access operations.
'''
if self._lemma_cache is not None:
return self._lemma_cache.size
return 0
@cache_size.setter
def cache_size(self, new_value):
'''
Set the cache size used to reduce the number of database
access operations.
'''
if type(new_value) == int and 0 < new_value:
if self._lemma_cache is not None:
self._lemma_cache = repoze.lru.LRUCache(new_value)
self._synset_cache = repoze.lru.LRUCache(new_value)
def all_lemmas(self):
'''
A generator over all the lemmas in the GermaNet database.
'''
for lemma_dict in self._mongo_db.lexunits.find():
yield Lemma(self, lemma_dict)
def lemmas(self, lemma, pos = None):
'''
Looks up lemmas in the GermaNet database.
Arguments:
- `lemma`:
- `pos`:
'''
if pos is not None:
if pos not in SHORT_POS_TO_LONG:
return None
pos = SHORT_POS_TO_LONG[pos]
lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma,
'category': pos})
else:
lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma})
return sorted([Lemma(self, lemma_dict) for lemma_dict in lemma_dicts])
def synsets(self, lemma, pos = None):
'''
Looks up synsets in the GermaNet database.
Arguments:
- `lemma`:
- `pos`:
'''
return sorted(set(lemma_obj.synset
for lemma_obj in self.lemmas(lemma, pos)))
def synset(self, synset_repr):
'''
Looks up a synset in GermaNet using its string representation.
Arguments:
- `synset_repr`: a unicode string containing the lemma, part
of speech, and sense number of the first lemma of the synset
>>> gn.synset(u'funktionieren.v.2')
Synset(funktionieren.v.2)
'''
parts = synset_repr.split('.')
if len(parts) != 3:
return None
lemma, pos, sensenum = parts
if not sensenum.isdigit() or pos not in SHORT_POS_TO_LONG:
return None
sensenum = int(sensenum, 10)
pos = SHORT_POS_TO_LONG[pos]
lemma_dict = self._mongo_db.lexunits.find_one({'orthForm': lemma,
'category': pos,
'sense': sensenum})
if lemma_dict:
return Lemma(self, lemma_dict).synset
def get_synset_by_id(self, mongo_id):
'''
Builds a Synset object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object
'''
cache_hit = None
if self._synset_cache is not None:
cache_hit = self._synset_cache.get(mongo_id)
if cache_hit is not None:
return cache_hit
synset_dict = self._mongo_db.synsets.find_one({'_id': mongo_id})
if synset_dict is not None:
synset = Synset(self, synset_dict)
if self._synset_cache is not None:
self._synset_cache.put(mongo_id, synset)
return synset
def get_lemma_by_id(self, mongo_id):
'''
Builds a Lemma object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object
'''
cache_hit = None
if self._lemma_cache is not None:
cache_hit = self._lemma_cache.get(mongo_id)
if cache_hit is not None:
return cache_hit
lemma_dict = self._mongo_db.lexunits.find_one({'_id': mongo_id})
if lemma_dict is not None:
lemma = Lemma(self, lemma_dict)
if self._lemma_cache is not None:
self._lemma_cache.put(mongo_id, lemma)
return lemma
def lemmatise(self, word):
'''
Tries to find the base form (lemma) of the given word, using
the data provided by the Projekt deutscher Wortschatz. This
method returns a list of potential lemmas.
>>> gn.lemmatise(u'Männer')
[u'Mann']
>>> gn.lemmatise(u'XYZ123')
[u'XYZ123']
'''
lemmas = list(self._mongo_db.lemmatiser.find({'word': word}))
if lemmas:
return [lemma['lemma'] for lemma in lemmas]
else:
return [word]
|
wroberts/pygermanet | pygermanet/germanet.py | GermaNet.synsets | python | def synsets(self, lemma, pos = None):
'''
Looks up synsets in the GermaNet database.
Arguments:
- `lemma`:
- `pos`:
'''
return sorted(set(lemma_obj.synset
for lemma_obj in self.lemmas(lemma, pos))) | Looks up synsets in the GermaNet database.
Arguments:
- `lemma`:
- `pos`: | train | https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/germanet.py#L117-L126 | [
"def lemmas(self, lemma, pos = None):\n '''\n Looks up lemmas in the GermaNet database.\n\n Arguments:\n - `lemma`:\n - `pos`:\n '''\n if pos is not None:\n if pos not in SHORT_POS_TO_LONG:\n return None\n pos = SHORT_POS_TO_LONG[pos]\n lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma,\n 'category': pos})\n else:\n lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma})\n return sorted([Lemma(self, lemma_dict) for lemma_dict in lemma_dicts])\n"
] | class GermaNet(object):
'''A class representing the GermaNet database.'''
def __init__(self, mongo_db, cache_size = DEFAULT_CACHE_SIZE):
'''
Creates a new GermaNet object.
Arguments:
- `mongo_db`: a pymongo.database.Database object containing
the GermaNet lexicon
'''
self._mongo_db = mongo_db
self._lemma_cache = None
self._synset_cache = None
self.max_min_depths = {}
try:
self.__dict__.update((k, v) for (k, v)
in self._mongo_db.metainfo.find_one().items()
if k not in GERMANET_METAINFO_IGNORE_KEYS)
except AttributeError:
# ignore error generated if metainfo is not included in
# the mongo DB
pass
try:
self._lemma_cache = repoze.lru.LRUCache(cache_size)
self._synset_cache = repoze.lru.LRUCache(cache_size)
except NameError:
pass
@property
def cache_size(self):
'''
Return the current cache size used to reduce the number of
database access operations.
'''
if self._lemma_cache is not None:
return self._lemma_cache.size
return 0
@cache_size.setter
def cache_size(self, new_value):
'''
Set the cache size used to reduce the number of database
access operations.
'''
if type(new_value) == int and 0 < new_value:
if self._lemma_cache is not None:
self._lemma_cache = repoze.lru.LRUCache(new_value)
self._synset_cache = repoze.lru.LRUCache(new_value)
def all_lemmas(self):
'''
A generator over all the lemmas in the GermaNet database.
'''
for lemma_dict in self._mongo_db.lexunits.find():
yield Lemma(self, lemma_dict)
def lemmas(self, lemma, pos = None):
'''
Looks up lemmas in the GermaNet database.
Arguments:
- `lemma`:
- `pos`:
'''
if pos is not None:
if pos not in SHORT_POS_TO_LONG:
return None
pos = SHORT_POS_TO_LONG[pos]
lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma,
'category': pos})
else:
lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma})
return sorted([Lemma(self, lemma_dict) for lemma_dict in lemma_dicts])
def all_synsets(self):
'''
A generator over all the synsets in the GermaNet database.
'''
for synset_dict in self._mongo_db.synsets.find():
yield Synset(self, synset_dict)
def synset(self, synset_repr):
'''
Looks up a synset in GermaNet using its string representation.
Arguments:
- `synset_repr`: a unicode string containing the lemma, part
of speech, and sense number of the first lemma of the synset
>>> gn.synset(u'funktionieren.v.2')
Synset(funktionieren.v.2)
'''
parts = synset_repr.split('.')
if len(parts) != 3:
return None
lemma, pos, sensenum = parts
if not sensenum.isdigit() or pos not in SHORT_POS_TO_LONG:
return None
sensenum = int(sensenum, 10)
pos = SHORT_POS_TO_LONG[pos]
lemma_dict = self._mongo_db.lexunits.find_one({'orthForm': lemma,
'category': pos,
'sense': sensenum})
if lemma_dict:
return Lemma(self, lemma_dict).synset
def get_synset_by_id(self, mongo_id):
'''
Builds a Synset object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object
'''
cache_hit = None
if self._synset_cache is not None:
cache_hit = self._synset_cache.get(mongo_id)
if cache_hit is not None:
return cache_hit
synset_dict = self._mongo_db.synsets.find_one({'_id': mongo_id})
if synset_dict is not None:
synset = Synset(self, synset_dict)
if self._synset_cache is not None:
self._synset_cache.put(mongo_id, synset)
return synset
def get_lemma_by_id(self, mongo_id):
'''
Builds a Lemma object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object
'''
cache_hit = None
if self._lemma_cache is not None:
cache_hit = self._lemma_cache.get(mongo_id)
if cache_hit is not None:
return cache_hit
lemma_dict = self._mongo_db.lexunits.find_one({'_id': mongo_id})
if lemma_dict is not None:
lemma = Lemma(self, lemma_dict)
if self._lemma_cache is not None:
self._lemma_cache.put(mongo_id, lemma)
return lemma
def lemmatise(self, word):
'''
Tries to find the base form (lemma) of the given word, using
the data provided by the Projekt deutscher Wortschatz. This
method returns a list of potential lemmas.
>>> gn.lemmatise(u'Männer')
[u'Mann']
>>> gn.lemmatise(u'XYZ123')
[u'XYZ123']
'''
lemmas = list(self._mongo_db.lemmatiser.find({'word': word}))
if lemmas:
return [lemma['lemma'] for lemma in lemmas]
else:
return [word]
|
wroberts/pygermanet | pygermanet/germanet.py | GermaNet.synset | python | def synset(self, synset_repr):
'''
Looks up a synset in GermaNet using its string representation.
Arguments:
- `synset_repr`: a unicode string containing the lemma, part
of speech, and sense number of the first lemma of the synset
>>> gn.synset(u'funktionieren.v.2')
Synset(funktionieren.v.2)
'''
parts = synset_repr.split('.')
if len(parts) != 3:
return None
lemma, pos, sensenum = parts
if not sensenum.isdigit() or pos not in SHORT_POS_TO_LONG:
return None
sensenum = int(sensenum, 10)
pos = SHORT_POS_TO_LONG[pos]
lemma_dict = self._mongo_db.lexunits.find_one({'orthForm': lemma,
'category': pos,
'sense': sensenum})
if lemma_dict:
return Lemma(self, lemma_dict).synset | Looks up a synset in GermaNet using its string representation.
Arguments:
- `synset_repr`: a unicode string containing the lemma, part
of speech, and sense number of the first lemma of the synset
>>> gn.synset(u'funktionieren.v.2')
Synset(funktionieren.v.2) | train | https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/germanet.py#L128-L151 | null | class GermaNet(object):
'''A class representing the GermaNet database.'''
def __init__(self, mongo_db, cache_size = DEFAULT_CACHE_SIZE):
'''
Creates a new GermaNet object.
Arguments:
- `mongo_db`: a pymongo.database.Database object containing
the GermaNet lexicon
'''
self._mongo_db = mongo_db
self._lemma_cache = None
self._synset_cache = None
self.max_min_depths = {}
try:
self.__dict__.update((k, v) for (k, v)
in self._mongo_db.metainfo.find_one().items()
if k not in GERMANET_METAINFO_IGNORE_KEYS)
except AttributeError:
# ignore error generated if metainfo is not included in
# the mongo DB
pass
try:
self._lemma_cache = repoze.lru.LRUCache(cache_size)
self._synset_cache = repoze.lru.LRUCache(cache_size)
except NameError:
pass
@property
def cache_size(self):
'''
Return the current cache size used to reduce the number of
database access operations.
'''
if self._lemma_cache is not None:
return self._lemma_cache.size
return 0
@cache_size.setter
def cache_size(self, new_value):
'''
Set the cache size used to reduce the number of database
access operations.
'''
if type(new_value) == int and 0 < new_value:
if self._lemma_cache is not None:
self._lemma_cache = repoze.lru.LRUCache(new_value)
self._synset_cache = repoze.lru.LRUCache(new_value)
def all_lemmas(self):
'''
A generator over all the lemmas in the GermaNet database.
'''
for lemma_dict in self._mongo_db.lexunits.find():
yield Lemma(self, lemma_dict)
def lemmas(self, lemma, pos = None):
'''
Looks up lemmas in the GermaNet database.
Arguments:
- `lemma`:
- `pos`:
'''
if pos is not None:
if pos not in SHORT_POS_TO_LONG:
return None
pos = SHORT_POS_TO_LONG[pos]
lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma,
'category': pos})
else:
lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma})
return sorted([Lemma(self, lemma_dict) for lemma_dict in lemma_dicts])
def all_synsets(self):
'''
A generator over all the synsets in the GermaNet database.
'''
for synset_dict in self._mongo_db.synsets.find():
yield Synset(self, synset_dict)
def synsets(self, lemma, pos = None):
'''
Looks up synsets in the GermaNet database.
Arguments:
- `lemma`:
- `pos`:
'''
return sorted(set(lemma_obj.synset
for lemma_obj in self.lemmas(lemma, pos)))
def get_synset_by_id(self, mongo_id):
'''
Builds a Synset object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object
'''
cache_hit = None
if self._synset_cache is not None:
cache_hit = self._synset_cache.get(mongo_id)
if cache_hit is not None:
return cache_hit
synset_dict = self._mongo_db.synsets.find_one({'_id': mongo_id})
if synset_dict is not None:
synset = Synset(self, synset_dict)
if self._synset_cache is not None:
self._synset_cache.put(mongo_id, synset)
return synset
def get_lemma_by_id(self, mongo_id):
'''
Builds a Lemma object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object
'''
cache_hit = None
if self._lemma_cache is not None:
cache_hit = self._lemma_cache.get(mongo_id)
if cache_hit is not None:
return cache_hit
lemma_dict = self._mongo_db.lexunits.find_one({'_id': mongo_id})
if lemma_dict is not None:
lemma = Lemma(self, lemma_dict)
if self._lemma_cache is not None:
self._lemma_cache.put(mongo_id, lemma)
return lemma
def lemmatise(self, word):
'''
Tries to find the base form (lemma) of the given word, using
the data provided by the Projekt deutscher Wortschatz. This
method returns a list of potential lemmas.
>>> gn.lemmatise(u'Männer')
[u'Mann']
>>> gn.lemmatise(u'XYZ123')
[u'XYZ123']
'''
lemmas = list(self._mongo_db.lemmatiser.find({'word': word}))
if lemmas:
return [lemma['lemma'] for lemma in lemmas]
else:
return [word]
|
wroberts/pygermanet | pygermanet/germanet.py | GermaNet.get_synset_by_id | python | def get_synset_by_id(self, mongo_id):
'''
Builds a Synset object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object
'''
cache_hit = None
if self._synset_cache is not None:
cache_hit = self._synset_cache.get(mongo_id)
if cache_hit is not None:
return cache_hit
synset_dict = self._mongo_db.synsets.find_one({'_id': mongo_id})
if synset_dict is not None:
synset = Synset(self, synset_dict)
if self._synset_cache is not None:
self._synset_cache.put(mongo_id, synset)
return synset | Builds a Synset object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object | train | https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/germanet.py#L153-L171 | null | class GermaNet(object):
'''A class representing the GermaNet database.'''
def __init__(self, mongo_db, cache_size = DEFAULT_CACHE_SIZE):
'''
Creates a new GermaNet object.
Arguments:
- `mongo_db`: a pymongo.database.Database object containing
the GermaNet lexicon
'''
self._mongo_db = mongo_db
self._lemma_cache = None
self._synset_cache = None
self.max_min_depths = {}
try:
self.__dict__.update((k, v) for (k, v)
in self._mongo_db.metainfo.find_one().items()
if k not in GERMANET_METAINFO_IGNORE_KEYS)
except AttributeError:
# ignore error generated if metainfo is not included in
# the mongo DB
pass
try:
self._lemma_cache = repoze.lru.LRUCache(cache_size)
self._synset_cache = repoze.lru.LRUCache(cache_size)
except NameError:
pass
@property
def cache_size(self):
'''
Return the current cache size used to reduce the number of
database access operations.
'''
if self._lemma_cache is not None:
return self._lemma_cache.size
return 0
@cache_size.setter
def cache_size(self, new_value):
'''
Set the cache size used to reduce the number of database
access operations.
'''
if type(new_value) == int and 0 < new_value:
if self._lemma_cache is not None:
self._lemma_cache = repoze.lru.LRUCache(new_value)
self._synset_cache = repoze.lru.LRUCache(new_value)
def all_lemmas(self):
'''
A generator over all the lemmas in the GermaNet database.
'''
for lemma_dict in self._mongo_db.lexunits.find():
yield Lemma(self, lemma_dict)
def lemmas(self, lemma, pos = None):
'''
Looks up lemmas in the GermaNet database.
Arguments:
- `lemma`:
- `pos`:
'''
if pos is not None:
if pos not in SHORT_POS_TO_LONG:
return None
pos = SHORT_POS_TO_LONG[pos]
lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma,
'category': pos})
else:
lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma})
return sorted([Lemma(self, lemma_dict) for lemma_dict in lemma_dicts])
def all_synsets(self):
'''
A generator over all the synsets in the GermaNet database.
'''
for synset_dict in self._mongo_db.synsets.find():
yield Synset(self, synset_dict)
def synsets(self, lemma, pos = None):
'''
Looks up synsets in the GermaNet database.
Arguments:
- `lemma`:
- `pos`:
'''
return sorted(set(lemma_obj.synset
for lemma_obj in self.lemmas(lemma, pos)))
def synset(self, synset_repr):
'''
Looks up a synset in GermaNet using its string representation.
Arguments:
- `synset_repr`: a unicode string containing the lemma, part
of speech, and sense number of the first lemma of the synset
>>> gn.synset(u'funktionieren.v.2')
Synset(funktionieren.v.2)
'''
parts = synset_repr.split('.')
if len(parts) != 3:
return None
lemma, pos, sensenum = parts
if not sensenum.isdigit() or pos not in SHORT_POS_TO_LONG:
return None
sensenum = int(sensenum, 10)
pos = SHORT_POS_TO_LONG[pos]
lemma_dict = self._mongo_db.lexunits.find_one({'orthForm': lemma,
'category': pos,
'sense': sensenum})
if lemma_dict:
return Lemma(self, lemma_dict).synset
def get_lemma_by_id(self, mongo_id):
'''
Builds a Lemma object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object
'''
cache_hit = None
if self._lemma_cache is not None:
cache_hit = self._lemma_cache.get(mongo_id)
if cache_hit is not None:
return cache_hit
lemma_dict = self._mongo_db.lexunits.find_one({'_id': mongo_id})
if lemma_dict is not None:
lemma = Lemma(self, lemma_dict)
if self._lemma_cache is not None:
self._lemma_cache.put(mongo_id, lemma)
return lemma
def lemmatise(self, word):
'''
Tries to find the base form (lemma) of the given word, using
the data provided by the Projekt deutscher Wortschatz. This
method returns a list of potential lemmas.
>>> gn.lemmatise(u'Männer')
[u'Mann']
>>> gn.lemmatise(u'XYZ123')
[u'XYZ123']
'''
lemmas = list(self._mongo_db.lemmatiser.find({'word': word}))
if lemmas:
return [lemma['lemma'] for lemma in lemmas]
else:
return [word]
|
wroberts/pygermanet | pygermanet/germanet.py | GermaNet.get_lemma_by_id | python | def get_lemma_by_id(self, mongo_id):
'''
Builds a Lemma object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object
'''
cache_hit = None
if self._lemma_cache is not None:
cache_hit = self._lemma_cache.get(mongo_id)
if cache_hit is not None:
return cache_hit
lemma_dict = self._mongo_db.lexunits.find_one({'_id': mongo_id})
if lemma_dict is not None:
lemma = Lemma(self, lemma_dict)
if self._lemma_cache is not None:
self._lemma_cache.put(mongo_id, lemma)
return lemma | Builds a Lemma object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object | train | https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/germanet.py#L173-L191 | null | class GermaNet(object):
'''A class representing the GermaNet database.'''
def __init__(self, mongo_db, cache_size = DEFAULT_CACHE_SIZE):
'''
Creates a new GermaNet object.
Arguments:
- `mongo_db`: a pymongo.database.Database object containing
the GermaNet lexicon
'''
self._mongo_db = mongo_db
self._lemma_cache = None
self._synset_cache = None
self.max_min_depths = {}
try:
self.__dict__.update((k, v) for (k, v)
in self._mongo_db.metainfo.find_one().items()
if k not in GERMANET_METAINFO_IGNORE_KEYS)
except AttributeError:
# ignore error generated if metainfo is not included in
# the mongo DB
pass
try:
self._lemma_cache = repoze.lru.LRUCache(cache_size)
self._synset_cache = repoze.lru.LRUCache(cache_size)
except NameError:
pass
@property
def cache_size(self):
'''
Return the current cache size used to reduce the number of
database access operations.
'''
if self._lemma_cache is not None:
return self._lemma_cache.size
return 0
@cache_size.setter
def cache_size(self, new_value):
'''
Set the cache size used to reduce the number of database
access operations.
'''
if type(new_value) == int and 0 < new_value:
if self._lemma_cache is not None:
self._lemma_cache = repoze.lru.LRUCache(new_value)
self._synset_cache = repoze.lru.LRUCache(new_value)
def all_lemmas(self):
'''
A generator over all the lemmas in the GermaNet database.
'''
for lemma_dict in self._mongo_db.lexunits.find():
yield Lemma(self, lemma_dict)
def lemmas(self, lemma, pos = None):
'''
Looks up lemmas in the GermaNet database.
Arguments:
- `lemma`:
- `pos`:
'''
if pos is not None:
if pos not in SHORT_POS_TO_LONG:
return None
pos = SHORT_POS_TO_LONG[pos]
lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma,
'category': pos})
else:
lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma})
return sorted([Lemma(self, lemma_dict) for lemma_dict in lemma_dicts])
def all_synsets(self):
'''
A generator over all the synsets in the GermaNet database.
'''
for synset_dict in self._mongo_db.synsets.find():
yield Synset(self, synset_dict)
def synsets(self, lemma, pos = None):
'''
Looks up synsets in the GermaNet database.
Arguments:
- `lemma`:
- `pos`:
'''
return sorted(set(lemma_obj.synset
for lemma_obj in self.lemmas(lemma, pos)))
def synset(self, synset_repr):
'''
Looks up a synset in GermaNet using its string representation.
Arguments:
- `synset_repr`: a unicode string containing the lemma, part
of speech, and sense number of the first lemma of the synset
>>> gn.synset(u'funktionieren.v.2')
Synset(funktionieren.v.2)
'''
parts = synset_repr.split('.')
if len(parts) != 3:
return None
lemma, pos, sensenum = parts
if not sensenum.isdigit() or pos not in SHORT_POS_TO_LONG:
return None
sensenum = int(sensenum, 10)
pos = SHORT_POS_TO_LONG[pos]
lemma_dict = self._mongo_db.lexunits.find_one({'orthForm': lemma,
'category': pos,
'sense': sensenum})
if lemma_dict:
return Lemma(self, lemma_dict).synset
def get_synset_by_id(self, mongo_id):
'''
Builds a Synset object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object
'''
cache_hit = None
if self._synset_cache is not None:
cache_hit = self._synset_cache.get(mongo_id)
if cache_hit is not None:
return cache_hit
synset_dict = self._mongo_db.synsets.find_one({'_id': mongo_id})
if synset_dict is not None:
synset = Synset(self, synset_dict)
if self._synset_cache is not None:
self._synset_cache.put(mongo_id, synset)
return synset
def lemmatise(self, word):
'''
Tries to find the base form (lemma) of the given word, using
the data provided by the Projekt deutscher Wortschatz. This
method returns a list of potential lemmas.
>>> gn.lemmatise(u'Männer')
[u'Mann']
>>> gn.lemmatise(u'XYZ123')
[u'XYZ123']
'''
lemmas = list(self._mongo_db.lemmatiser.find({'word': word}))
if lemmas:
return [lemma['lemma'] for lemma in lemmas]
else:
return [word]
|
wroberts/pygermanet | pygermanet/germanet.py | GermaNet.lemmatise | python | def lemmatise(self, word):
'''
Tries to find the base form (lemma) of the given word, using
the data provided by the Projekt deutscher Wortschatz. This
method returns a list of potential lemmas.
>>> gn.lemmatise(u'Männer')
[u'Mann']
>>> gn.lemmatise(u'XYZ123')
[u'XYZ123']
'''
lemmas = list(self._mongo_db.lemmatiser.find({'word': word}))
if lemmas:
return [lemma['lemma'] for lemma in lemmas]
else:
return [word] | Tries to find the base form (lemma) of the given word, using
the data provided by the Projekt deutscher Wortschatz. This
method returns a list of potential lemmas.
>>> gn.lemmatise(u'Männer')
[u'Mann']
>>> gn.lemmatise(u'XYZ123')
[u'XYZ123'] | train | https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/germanet.py#L193-L208 | null | class GermaNet(object):
'''A class representing the GermaNet database.'''
def __init__(self, mongo_db, cache_size = DEFAULT_CACHE_SIZE):
'''
Creates a new GermaNet object.
Arguments:
- `mongo_db`: a pymongo.database.Database object containing
the GermaNet lexicon
'''
self._mongo_db = mongo_db
self._lemma_cache = None
self._synset_cache = None
self.max_min_depths = {}
try:
self.__dict__.update((k, v) for (k, v)
in self._mongo_db.metainfo.find_one().items()
if k not in GERMANET_METAINFO_IGNORE_KEYS)
except AttributeError:
# ignore error generated if metainfo is not included in
# the mongo DB
pass
try:
self._lemma_cache = repoze.lru.LRUCache(cache_size)
self._synset_cache = repoze.lru.LRUCache(cache_size)
except NameError:
pass
@property
def cache_size(self):
'''
Return the current cache size used to reduce the number of
database access operations.
'''
if self._lemma_cache is not None:
return self._lemma_cache.size
return 0
@cache_size.setter
def cache_size(self, new_value):
'''
Set the cache size used to reduce the number of database
access operations.
'''
if type(new_value) == int and 0 < new_value:
if self._lemma_cache is not None:
self._lemma_cache = repoze.lru.LRUCache(new_value)
self._synset_cache = repoze.lru.LRUCache(new_value)
def all_lemmas(self):
'''
A generator over all the lemmas in the GermaNet database.
'''
for lemma_dict in self._mongo_db.lexunits.find():
yield Lemma(self, lemma_dict)
def lemmas(self, lemma, pos = None):
'''
Looks up lemmas in the GermaNet database.
Arguments:
- `lemma`:
- `pos`:
'''
if pos is not None:
if pos not in SHORT_POS_TO_LONG:
return None
pos = SHORT_POS_TO_LONG[pos]
lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma,
'category': pos})
else:
lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma})
return sorted([Lemma(self, lemma_dict) for lemma_dict in lemma_dicts])
def all_synsets(self):
'''
A generator over all the synsets in the GermaNet database.
'''
for synset_dict in self._mongo_db.synsets.find():
yield Synset(self, synset_dict)
def synsets(self, lemma, pos = None):
'''
Looks up synsets in the GermaNet database.
Arguments:
- `lemma`:
- `pos`:
'''
return sorted(set(lemma_obj.synset
for lemma_obj in self.lemmas(lemma, pos)))
def synset(self, synset_repr):
'''
Looks up a synset in GermaNet using its string representation.
Arguments:
- `synset_repr`: a unicode string containing the lemma, part
of speech, and sense number of the first lemma of the synset
>>> gn.synset(u'funktionieren.v.2')
Synset(funktionieren.v.2)
'''
parts = synset_repr.split('.')
if len(parts) != 3:
return None
lemma, pos, sensenum = parts
if not sensenum.isdigit() or pos not in SHORT_POS_TO_LONG:
return None
sensenum = int(sensenum, 10)
pos = SHORT_POS_TO_LONG[pos]
lemma_dict = self._mongo_db.lexunits.find_one({'orthForm': lemma,
'category': pos,
'sense': sensenum})
if lemma_dict:
return Lemma(self, lemma_dict).synset
def get_synset_by_id(self, mongo_id):
'''
Builds a Synset object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object
'''
cache_hit = None
if self._synset_cache is not None:
cache_hit = self._synset_cache.get(mongo_id)
if cache_hit is not None:
return cache_hit
synset_dict = self._mongo_db.synsets.find_one({'_id': mongo_id})
if synset_dict is not None:
synset = Synset(self, synset_dict)
if self._synset_cache is not None:
self._synset_cache.put(mongo_id, synset)
return synset
def get_lemma_by_id(self, mongo_id):
'''
Builds a Lemma object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object
'''
cache_hit = None
if self._lemma_cache is not None:
cache_hit = self._lemma_cache.get(mongo_id)
if cache_hit is not None:
return cache_hit
lemma_dict = self._mongo_db.lexunits.find_one({'_id': mongo_id})
if lemma_dict is not None:
lemma = Lemma(self, lemma_dict)
if self._lemma_cache is not None:
self._lemma_cache.put(mongo_id, lemma)
return lemma
|
wroberts/pygermanet | pygermanet/mongo_import.py | find_germanet_xml_files | python | def find_germanet_xml_files(xml_path):
'''
Globs the XML files contained in the given directory and sorts
them into sections for import into the MongoDB database.
Arguments:
- `xml_path`: the path to the directory containing the GermaNet
XML files
'''
xml_files = sorted(glob.glob(os.path.join(xml_path, '*.xml')))
# sort out the lexical files
lex_files = [xml_file for xml_file in xml_files if
re.match(r'(adj|nomen|verben)\.',
os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(lex_files))
if not lex_files:
print('ERROR: cannot find lexical information files')
# sort out the GermaNet relations file
gn_rels_file = [xml_file for xml_file in xml_files if
os.path.basename(xml_file).lower() == 'gn_relations.xml']
xml_files = sorted(set(xml_files) - set(gn_rels_file))
if not gn_rels_file:
print('ERROR: cannot find relations file gn_relations.xml')
gn_rels_file = None
else:
if 1 < len(gn_rels_file):
print ('WARNING: more than one relations file gn_relations.xml, '
'taking first match')
gn_rels_file = gn_rels_file[0]
# sort out the wiktionary paraphrase files
wiktionary_files = [xml_file for xml_file in xml_files if
re.match(r'wiktionaryparaphrases-',
os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(wiktionary_files))
if not wiktionary_files:
print('WARNING: cannot find wiktionary paraphrase files')
# sort out the interlingual index file
ili_files = [xml_file for xml_file in xml_files if
os.path.basename(xml_file).lower().startswith(
'interlingualindex')]
xml_files = sorted(set(xml_files) - set(ili_files))
if not ili_files:
print('WARNING: cannot find interlingual index file')
if xml_files:
print('WARNING: unrecognised xml files:', xml_files)
return lex_files, gn_rels_file, wiktionary_files, ili_files | Globs the XML files contained in the given directory and sorts
them into sections for import into the MongoDB database.
Arguments:
- `xml_path`: the path to the directory containing the GermaNet
XML files | train | https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/mongo_import.py#L30-L85 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
mongo_import.py
(c) Will Roberts 21 March, 2014
A script to import the GermaNet lexicon into a MongoDB database.
'''
from __future__ import absolute_import, division, print_function
from . import germanet
from builtins import dict, int, str, zip
from collections import defaultdict
from io import open
from pymongo import DESCENDING, MongoClient
import glob
import gzip
import optparse
import os
import re
import sys
import xml.etree.ElementTree as etree
# ------------------------------------------------------------
# Find filenames
# ------------------------------------------------------------
# ------------------------------------------------------------
# Read lexical files
# ------------------------------------------------------------
def warn_attribs(loc,
node,
recognised_attribs,
reqd_attribs=None):
'''
Error checking of XML input: check that the given node has certain
required attributes, and does not have any unrecognised
attributes.
Arguments:
- `loc`: a string with some information about the location of the
error in the XML file
- `node`: the node to check
- `recognised_attribs`: a set of node attributes which we know how
to handle
- `reqd_attribs`: a set of node attributes which we require to be
present; if this argument is None, it will take the same value
as `recognised_attribs`
'''
if reqd_attribs is None:
reqd_attribs = recognised_attribs
found_attribs = set(node.keys())
if reqd_attribs - found_attribs:
print(loc, 'missing <{0}> attributes'.format(node.tag),
reqd_attribs - found_attribs)
if found_attribs - recognised_attribs:
print(loc, 'unrecognised <{0}> properties'.format(node.tag),
found_attribs - recognised_attribs)
SYNSET_ATTRIBS = set(['category', 'id', 'class'])
LEXUNIT_ATTRIBS = set(['styleMarking', 'namedEntity', 'artificial',
'source', 'sense', 'id'])
MODIFIER_ATTRIBS = set(['category', 'property'])
HEAD_ATTRIBS = set(['property'])
MAP_YESNO_TO_BOOL = {
'yes': True,
'no': False,
}
def read_lexical_file(filename):
'''
Reads in a GermaNet lexical information file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`: the name of the XML file to read
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
synsets = []
assert doc.getroot().tag == 'synsets'
for synset in doc.getroot():
if synset.tag != 'synset':
print('unrecognised child of <synsets>', synset)
continue
synset_dict = dict(synset.items())
synloc = '{0} synset {1},'.format(filename,
synset_dict.get('id', '???'))
warn_attribs(synloc, synset, SYNSET_ATTRIBS)
synset_dict['lexunits'] = []
synsets.append(synset_dict)
for child in synset:
if child.tag == 'lexUnit':
lexunit = child
lexunit_dict = dict(lexunit.items())
lexloc = synloc + ' lexUnit {0},'.format(
lexunit_dict.get('id', '???'))
warn_attribs(lexloc, lexunit, LEXUNIT_ATTRIBS)
# convert some properties to booleans
for key in ['styleMarking', 'artificial', 'namedEntity']:
if key in lexunit_dict:
if lexunit_dict[key] not in MAP_YESNO_TO_BOOL:
print(lexloc, ('lexunit property {0} has '
'non-boolean value').format(key),
lexunit_dict[key])
continue
lexunit_dict[key] = MAP_YESNO_TO_BOOL[lexunit_dict[key]]
# convert sense to integer number
if 'sense' in lexunit_dict:
if lexunit_dict['sense'].isdigit():
lexunit_dict['sense'] = int(lexunit_dict['sense'], 10)
else:
print(lexloc,
'lexunit property sense has non-numeric value',
lexunit_dict['sense'])
synset_dict['lexunits'].append(lexunit_dict)
lexunit_dict['examples'] = []
lexunit_dict['frames'] = []
for child in lexunit:
if child.tag in ['orthForm',
'orthVar',
'oldOrthForm',
'oldOrthVar']:
warn_attribs(lexloc, child, set())
if not child.text:
print(lexloc, '{0} with no text'.format(child.tag))
continue
if child.tag in lexunit_dict:
print(lexloc, 'more than one {0}'.format(child.tag))
lexunit_dict[child.tag] = str(child.text)
elif child.tag == 'example':
example = child
text = [child for child in example
if child.tag == 'text']
if len(text) != 1 or not text[0].text:
print(lexloc, '<example> tag without text')
example_dict = {'text': str(text[0].text)}
for child in example:
if child.tag == 'text':
continue
elif child.tag == 'exframe':
if 'exframe' in example_dict:
print(lexloc,
'more than one <exframe> '
'for <example>')
warn_attribs(lexloc, child, set())
if not child.text:
print(lexloc, '<exframe> with no text')
continue
example_dict['exframe'] = str(child.text)
else:
print(lexloc,
'unrecognised child of <example>',
child)
lexunit_dict['examples'].append(example_dict)
elif child.tag == 'frame':
frame = child
warn_attribs(lexloc, frame, set())
if 0 < len(frame):
print(lexloc, 'unrecognised <frame> children',
list(frame))
if not frame.text:
print(lexloc, '<frame> without text')
continue
lexunit_dict['frames'].append(str(frame.text))
elif child.tag == 'compound':
compound = child
warn_attribs(lexloc, compound, set())
compound_dict = {}
for child in compound:
if child.tag == 'modifier':
modifier_dict = dict(child.items())
warn_attribs(lexloc, child,
MODIFIER_ATTRIBS, set())
if not child.text:
print(lexloc, 'modifier without text')
continue
modifier_dict['text'] = str(child.text)
if 'modifier' not in compound_dict:
compound_dict['modifier'] = []
compound_dict['modifier'].append(modifier_dict)
elif child.tag == 'head':
head_dict = dict(child.items())
warn_attribs(lexloc, child, HEAD_ATTRIBS, set())
if not child.text:
print(lexloc, '<head> without text')
continue
head_dict['text'] = str(child.text)
if 'head' in compound_dict:
print(lexloc,
'more than one head in <compound>')
compound_dict['head'] = head_dict
else:
print(lexloc,
'unrecognised child of <compound>',
child)
continue
else:
print(lexloc, 'unrecognised child of <lexUnit>', child)
continue
elif child.tag == 'paraphrase':
paraphrase = child
warn_attribs(synloc, paraphrase, set())
paraphrase_text = str(paraphrase.text)
if not paraphrase_text:
print(synloc, 'WARNING: <paraphrase> tag with no text')
else:
print(synloc, 'unrecognised child of <synset>', child)
continue
return synsets
# ------------------------------------------------------------
# Read relation file
# ------------------------------------------------------------
RELATION_ATTRIBS_REQD = set(['dir', 'from', 'name', 'to'])
RELATION_ATTRIBS_OPT = set(['inv'])
RELATION_ATTRIBS = RELATION_ATTRIBS_REQD | RELATION_ATTRIBS_OPT
LEX_REL_DIRS = set(['both', 'one'])
CON_REL_DIRS = set(['both', 'revert', 'one'])
def read_relation_file(filename):
'''
Reads the GermaNet relation file ``gn_relations.xml`` which lists
all the relations holding between lexical units and synsets.
Arguments:
- `filename`:
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
lex_rels = []
con_rels = []
assert doc.getroot().tag == 'relations'
for child in doc.getroot():
if child.tag == 'lex_rel':
if 0 < len(child):
print('<lex_rel> has unexpected child node')
child_dict = dict(child.items())
warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD)
if child_dict['dir'] not in LEX_REL_DIRS:
print('unrecognized <lex_rel> dir', child_dict['dir'])
if child_dict['dir'] == 'both' and 'inv' not in child_dict:
print('<lex_rel> has dir=both but does not specify inv')
lex_rels.append(child_dict)
elif child.tag == 'con_rel':
if 0 < len(child):
print('<con_rel> has unexpected child node')
child_dict = dict(child.items())
warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD)
if child_dict['dir'] not in CON_REL_DIRS:
print('unrecognised <con_rel> dir', child_dict['dir'])
if (child_dict['dir'] in ['both', 'revert'] and
'inv' not in child_dict):
print('<con_rel> has dir={0} but does not specify inv'.format(
child_dict['dir']))
con_rels.append(child_dict)
else:
print('unrecognised child of <relations>', child)
continue
return lex_rels, con_rels
# ------------------------------------------------------------
# Read wiktionary paraphrase file
# ------------------------------------------------------------
PARAPHRASE_ATTRIBS = set(['edited', 'lexUnitId', 'wiktionaryId',
'wiktionarySense', 'wiktionarySenseId'])
def read_paraphrase_file(filename):
'''
Reads in a GermaNet wiktionary paraphrase file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`:
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
assert doc.getroot().tag == 'wiktionaryParaphrases'
paraphrases = []
for child in doc.getroot():
if child.tag == 'wiktionaryParaphrase':
paraphrase = child
warn_attribs('', paraphrase, PARAPHRASE_ATTRIBS)
if 0 < len(paraphrase):
print('unrecognised child of <wiktionaryParaphrase>',
list(paraphrase))
paraphrase_dict = dict(paraphrase.items())
if paraphrase_dict['edited'] not in MAP_YESNO_TO_BOOL:
print('<paraphrase> attribute "edited" has unexpected value',
paraphrase_dict['edited'])
else:
paraphrase_dict['edited'] = MAP_YESNO_TO_BOOL[
paraphrase_dict['edited']]
if not paraphrase_dict['wiktionarySenseId'].isdigit():
print('<paraphrase> attribute "wiktionarySenseId" has '
'non-integer value', paraphrase_dict['edited'])
else:
paraphrase_dict['wiktionarySenseId'] = \
int(paraphrase_dict['wiktionarySenseId'], 10)
paraphrases.append(paraphrase_dict)
else:
print('unknown child of <wiktionaryParaphrases>', child)
return paraphrases
# ------------------------------------------------------------
# Mongo insertion
# ------------------------------------------------------------
# we need to change the names of some synset keys because they are
# Python keywords
SYNSET_KEY_REWRITES = {
'class': 'gn_class',
}
def insert_lexical_information(germanet_db, lex_files):
'''
Reads in the given lexical information files and inserts their
contents into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `lex_files`: a list of paths to XML files containing lexial
information
'''
# drop the database collections if they already exist
germanet_db.lexunits.drop()
germanet_db.synsets.drop()
# inject data from XML files into the database
for lex_file in lex_files:
synsets = read_lexical_file(lex_file)
for synset in synsets:
synset = dict((SYNSET_KEY_REWRITES.get(key, key), value)
for (key, value) in synset.items())
lexunits = synset['lexunits']
synset['lexunits'] = germanet_db.lexunits.insert(lexunits)
synset_id = germanet_db.synsets.insert(synset)
for lexunit in lexunits:
lexunit['synset'] = synset_id
lexunit['category'] = synset['category']
germanet_db.lexunits.save(lexunit)
# index the two collections by id
germanet_db.synsets.create_index('id')
germanet_db.lexunits.create_index('id')
# also index lexunits by lemma, lemma-pos, and lemma-pos-sensenum
germanet_db.lexunits.create_index([('orthForm', DESCENDING)])
germanet_db.lexunits.create_index([('orthForm', DESCENDING),
('category', DESCENDING)])
germanet_db.lexunits.create_index([('orthForm', DESCENDING),
('category', DESCENDING),
('sense', DESCENDING)])
print('Inserted {0} synsets, {1} lexical units.'.format(
germanet_db.synsets.count(),
germanet_db.lexunits.count()))
def insert_relation_information(germanet_db, gn_rels_file):
'''
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `gn_rels_file`:
'''
lex_rels, con_rels = read_relation_file(gn_rels_file)
# cache the lexunits while we work on them
lexunits = {}
for lex_rel in lex_rels:
if lex_rel['from'] not in lexunits:
lexunits[lex_rel['from']] = germanet_db.lexunits.find_one(
{'id': lex_rel['from']})
from_lexunit = lexunits[lex_rel['from']]
if lex_rel['to'] not in lexunits:
lexunits[lex_rel['to']] = germanet_db.lexunits.find_one(
{'id': lex_rel['to']})
to_lexunit = lexunits[lex_rel['to']]
if 'rels' not in from_lexunit:
from_lexunit['rels'] = set()
from_lexunit['rels'].add((lex_rel['name'], to_lexunit['_id']))
if lex_rel['dir'] == 'both':
if 'rels' not in to_lexunit:
to_lexunit['rels'] = set()
to_lexunit['rels'].add((lex_rel['inv'], from_lexunit['_id']))
for lexunit in lexunits.values():
if 'rels' in lexunit:
lexunit['rels'] = sorted(lexunit['rels'])
germanet_db.lexunits.save(lexunit)
# cache the synsets while we work on them
synsets = {}
for con_rel in con_rels:
if con_rel['from'] not in synsets:
synsets[con_rel['from']] = germanet_db.synsets.find_one(
{'id': con_rel['from']})
from_synset = synsets[con_rel['from']]
if con_rel['to'] not in synsets:
synsets[con_rel['to']] = germanet_db.synsets.find_one(
{'id': con_rel['to']})
to_synset = synsets[con_rel['to']]
if 'rels' not in from_synset:
from_synset['rels'] = set()
from_synset['rels'].add((con_rel['name'], to_synset['_id']))
if con_rel['dir'] in ['both', 'revert']:
if 'rels' not in to_synset:
to_synset['rels'] = set()
to_synset['rels'].add((con_rel['inv'], from_synset['_id']))
for synset in synsets.values():
if 'rels' in synset:
synset['rels'] = sorted(synset['rels'])
germanet_db.synsets.save(synset)
print('Inserted {0} lexical relations, {1} synset relations.'.format(
len(lex_rels), len(con_rels)))
def insert_paraphrase_information(germanet_db, wiktionary_files):
'''
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `wiktionary_files`:
'''
num_paraphrases = 0
# cache the lexunits while we work on them
lexunits = {}
for filename in wiktionary_files:
paraphrases = read_paraphrase_file(filename)
num_paraphrases += len(paraphrases)
for paraphrase in paraphrases:
if paraphrase['lexUnitId'] not in lexunits:
lexunits[paraphrase['lexUnitId']] = \
germanet_db.lexunits.find_one(
{'id': paraphrase['lexUnitId']})
lexunit = lexunits[paraphrase['lexUnitId']]
if 'paraphrases' not in lexunit:
lexunit['paraphrases'] = []
lexunit['paraphrases'].append(paraphrase)
for lexunit in lexunits.values():
germanet_db.lexunits.save(lexunit)
print('Inserted {0} wiktionary paraphrases.'.format(num_paraphrases))
LEMMATISATION_FILE = 'baseforms_by_projekt_deutscher_wortschatz.txt.gz'
def insert_lemmatisation_data(germanet_db):
'''
Creates the lemmatiser collection in the given MongoDB instance
using the data derived from the Projekt deutscher Wortschatz.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
# drop the database collection if it already exists
germanet_db.lemmatiser.drop()
num_lemmas = 0
input_file = gzip.open(os.path.join(os.path.dirname(__file__),
LEMMATISATION_FILE))
for line in input_file:
line = line.decode('iso-8859-1').strip().split('\t')
assert len(line) == 2
germanet_db.lemmatiser.insert(dict(list(zip(('word', 'lemma'), line))))
num_lemmas += 1
input_file.close()
# index the collection on 'word'
germanet_db.lemmatiser.create_index('word')
print('Inserted {0} lemmatiser entries.'.format(num_lemmas))
# ------------------------------------------------------------
# Information content for GermaNet similarity
# ------------------------------------------------------------
WORD_COUNT_FILE = 'sdewac-gn-words.tsv.gz'
def insert_infocontent_data(germanet_db):
'''
For every synset in GermaNet, inserts count information derived
from SDEWAC.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
gnet = germanet.GermaNet(germanet_db)
# use add one smoothing
gn_counts = defaultdict(lambda: 1.)
total_count = 1
input_file = gzip.open(os.path.join(os.path.dirname(__file__),
WORD_COUNT_FILE))
num_lines_read = 0
num_lines = 0
for line in input_file:
line = line.decode('utf-8').strip().split('\t')
num_lines += 1
if len(line) != 3:
continue
count, pos, word = line
num_lines_read += 1
count = int(count)
synsets = set(gnet.synsets(word, pos))
if not synsets:
continue
# Although Resnik (1995) suggests dividing count by the number
# of synsets, Patwardhan et al (2003) argue against doing
# this.
count = float(count) / len(synsets)
for synset in synsets:
total_count += count
paths = synset.hypernym_paths
scount = float(count) / len(paths)
for path in paths:
for ss in path:
gn_counts[ss._id] += scount
print('Read {0} of {1} lines from count file.'.format(num_lines_read,
num_lines))
print('Recorded counts for {0} synsets.'.format(len(gn_counts)))
print('Total count is {0}'.format(total_count))
input_file.close()
# update all the synset records in GermaNet
num_updates = 0
for synset in germanet_db.synsets.find():
synset['infocont'] = gn_counts[synset['_id']] / total_count
germanet_db.synsets.save(synset)
num_updates += 1
print('Updated {0} synsets.'.format(num_updates))
def compute_max_min_depth(germanet_db):
'''
For every part of speech in GermaNet, computes the maximum
min_depth in that hierarchy.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
gnet = germanet.GermaNet(germanet_db)
max_min_depths = defaultdict(lambda: -1)
for synset in gnet.all_synsets():
min_depth = synset.min_depth
if max_min_depths[synset.category] < min_depth:
max_min_depths[synset.category] = min_depth
if germanet_db.metainfo.count() == 0:
germanet_db.metainfo.insert({})
metainfo = germanet_db.metainfo.find_one()
metainfo['max_min_depths'] = max_min_depths
germanet_db.metainfo.save(metainfo)
print('Computed maximum min_depth for all parts of speech:')
print(u', '.join(u'{0}: {1}'.format(k, v) for (k, v) in
sorted(max_min_depths.items())).encode('utf-8'))
# ------------------------------------------------------------
# Main function
# ------------------------------------------------------------
def main():
'''Main function.'''
usage = ('\n\n %prog [options] XML_PATH\n\nArguments:\n\n '
'XML_PATH the directory containing the '
'GermaNet .xml files')
parser = optparse.OptionParser(usage=usage)
parser.add_option('--host', default=None,
help='hostname or IP address of the MongoDB instance '
'where the GermaNet database will be inserted '
'(default: %default)')
parser.add_option('--port', type='int', default=None,
help='port number of the MongoDB instance where the '
'GermaNet database will be inserted (default: %default)')
parser.add_option('--database', dest='database_name', default='germanet',
help='the name of the database on the MongoDB instance '
'where GermaNet will be stored (default: %default)')
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
sys.exit(1)
xml_path = args[0]
client = MongoClient(options.host, options.port)
germanet_db = client[options.database_name]
lex_files, gn_rels_file, wiktionary_files, ili_files = \
find_germanet_xml_files(xml_path)
insert_lexical_information(germanet_db, lex_files)
insert_relation_information(germanet_db, gn_rels_file)
insert_paraphrase_information(germanet_db, wiktionary_files)
insert_lemmatisation_data(germanet_db)
insert_infocontent_data(germanet_db)
compute_max_min_depth(germanet_db)
client.close()
if __name__ == '__main__' and sys.argv != ['']:
main()
|
wroberts/pygermanet | pygermanet/mongo_import.py | warn_attribs | python | def warn_attribs(loc,
node,
recognised_attribs,
reqd_attribs=None):
'''
Error checking of XML input: check that the given node has certain
required attributes, and does not have any unrecognised
attributes.
Arguments:
- `loc`: a string with some information about the location of the
error in the XML file
- `node`: the node to check
- `recognised_attribs`: a set of node attributes which we know how
to handle
- `reqd_attribs`: a set of node attributes which we require to be
present; if this argument is None, it will take the same value
as `recognised_attribs`
'''
if reqd_attribs is None:
reqd_attribs = recognised_attribs
found_attribs = set(node.keys())
if reqd_attribs - found_attribs:
print(loc, 'missing <{0}> attributes'.format(node.tag),
reqd_attribs - found_attribs)
if found_attribs - recognised_attribs:
print(loc, 'unrecognised <{0}> properties'.format(node.tag),
found_attribs - recognised_attribs) | Error checking of XML input: check that the given node has certain
required attributes, and does not have any unrecognised
attributes.
Arguments:
- `loc`: a string with some information about the location of the
error in the XML file
- `node`: the node to check
- `recognised_attribs`: a set of node attributes which we know how
to handle
- `reqd_attribs`: a set of node attributes which we require to be
present; if this argument is None, it will take the same value
as `recognised_attribs` | train | https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/mongo_import.py#L92-L119 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
mongo_import.py
(c) Will Roberts 21 March, 2014
A script to import the GermaNet lexicon into a MongoDB database.
'''
from __future__ import absolute_import, division, print_function
from . import germanet
from builtins import dict, int, str, zip
from collections import defaultdict
from io import open
from pymongo import DESCENDING, MongoClient
import glob
import gzip
import optparse
import os
import re
import sys
import xml.etree.ElementTree as etree
# ------------------------------------------------------------
# Find filenames
# ------------------------------------------------------------
def find_germanet_xml_files(xml_path):
'''
Globs the XML files contained in the given directory and sorts
them into sections for import into the MongoDB database.
Arguments:
- `xml_path`: the path to the directory containing the GermaNet
XML files
'''
xml_files = sorted(glob.glob(os.path.join(xml_path, '*.xml')))
# sort out the lexical files
lex_files = [xml_file for xml_file in xml_files if
re.match(r'(adj|nomen|verben)\.',
os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(lex_files))
if not lex_files:
print('ERROR: cannot find lexical information files')
# sort out the GermaNet relations file
gn_rels_file = [xml_file for xml_file in xml_files if
os.path.basename(xml_file).lower() == 'gn_relations.xml']
xml_files = sorted(set(xml_files) - set(gn_rels_file))
if not gn_rels_file:
print('ERROR: cannot find relations file gn_relations.xml')
gn_rels_file = None
else:
if 1 < len(gn_rels_file):
print ('WARNING: more than one relations file gn_relations.xml, '
'taking first match')
gn_rels_file = gn_rels_file[0]
# sort out the wiktionary paraphrase files
wiktionary_files = [xml_file for xml_file in xml_files if
re.match(r'wiktionaryparaphrases-',
os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(wiktionary_files))
if not wiktionary_files:
print('WARNING: cannot find wiktionary paraphrase files')
# sort out the interlingual index file
ili_files = [xml_file for xml_file in xml_files if
os.path.basename(xml_file).lower().startswith(
'interlingualindex')]
xml_files = sorted(set(xml_files) - set(ili_files))
if not ili_files:
print('WARNING: cannot find interlingual index file')
if xml_files:
print('WARNING: unrecognised xml files:', xml_files)
return lex_files, gn_rels_file, wiktionary_files, ili_files
# ------------------------------------------------------------
# Read lexical files
# ------------------------------------------------------------
SYNSET_ATTRIBS = set(['category', 'id', 'class'])
LEXUNIT_ATTRIBS = set(['styleMarking', 'namedEntity', 'artificial',
'source', 'sense', 'id'])
MODIFIER_ATTRIBS = set(['category', 'property'])
HEAD_ATTRIBS = set(['property'])
MAP_YESNO_TO_BOOL = {
'yes': True,
'no': False,
}
def read_lexical_file(filename):
'''
Reads in a GermaNet lexical information file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`: the name of the XML file to read
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
synsets = []
assert doc.getroot().tag == 'synsets'
for synset in doc.getroot():
if synset.tag != 'synset':
print('unrecognised child of <synsets>', synset)
continue
synset_dict = dict(synset.items())
synloc = '{0} synset {1},'.format(filename,
synset_dict.get('id', '???'))
warn_attribs(synloc, synset, SYNSET_ATTRIBS)
synset_dict['lexunits'] = []
synsets.append(synset_dict)
for child in synset:
if child.tag == 'lexUnit':
lexunit = child
lexunit_dict = dict(lexunit.items())
lexloc = synloc + ' lexUnit {0},'.format(
lexunit_dict.get('id', '???'))
warn_attribs(lexloc, lexunit, LEXUNIT_ATTRIBS)
# convert some properties to booleans
for key in ['styleMarking', 'artificial', 'namedEntity']:
if key in lexunit_dict:
if lexunit_dict[key] not in MAP_YESNO_TO_BOOL:
print(lexloc, ('lexunit property {0} has '
'non-boolean value').format(key),
lexunit_dict[key])
continue
lexunit_dict[key] = MAP_YESNO_TO_BOOL[lexunit_dict[key]]
# convert sense to integer number
if 'sense' in lexunit_dict:
if lexunit_dict['sense'].isdigit():
lexunit_dict['sense'] = int(lexunit_dict['sense'], 10)
else:
print(lexloc,
'lexunit property sense has non-numeric value',
lexunit_dict['sense'])
synset_dict['lexunits'].append(lexunit_dict)
lexunit_dict['examples'] = []
lexunit_dict['frames'] = []
for child in lexunit:
if child.tag in ['orthForm',
'orthVar',
'oldOrthForm',
'oldOrthVar']:
warn_attribs(lexloc, child, set())
if not child.text:
print(lexloc, '{0} with no text'.format(child.tag))
continue
if child.tag in lexunit_dict:
print(lexloc, 'more than one {0}'.format(child.tag))
lexunit_dict[child.tag] = str(child.text)
elif child.tag == 'example':
example = child
text = [child for child in example
if child.tag == 'text']
if len(text) != 1 or not text[0].text:
print(lexloc, '<example> tag without text')
example_dict = {'text': str(text[0].text)}
for child in example:
if child.tag == 'text':
continue
elif child.tag == 'exframe':
if 'exframe' in example_dict:
print(lexloc,
'more than one <exframe> '
'for <example>')
warn_attribs(lexloc, child, set())
if not child.text:
print(lexloc, '<exframe> with no text')
continue
example_dict['exframe'] = str(child.text)
else:
print(lexloc,
'unrecognised child of <example>',
child)
lexunit_dict['examples'].append(example_dict)
elif child.tag == 'frame':
frame = child
warn_attribs(lexloc, frame, set())
if 0 < len(frame):
print(lexloc, 'unrecognised <frame> children',
list(frame))
if not frame.text:
print(lexloc, '<frame> without text')
continue
lexunit_dict['frames'].append(str(frame.text))
elif child.tag == 'compound':
compound = child
warn_attribs(lexloc, compound, set())
compound_dict = {}
for child in compound:
if child.tag == 'modifier':
modifier_dict = dict(child.items())
warn_attribs(lexloc, child,
MODIFIER_ATTRIBS, set())
if not child.text:
print(lexloc, 'modifier without text')
continue
modifier_dict['text'] = str(child.text)
if 'modifier' not in compound_dict:
compound_dict['modifier'] = []
compound_dict['modifier'].append(modifier_dict)
elif child.tag == 'head':
head_dict = dict(child.items())
warn_attribs(lexloc, child, HEAD_ATTRIBS, set())
if not child.text:
print(lexloc, '<head> without text')
continue
head_dict['text'] = str(child.text)
if 'head' in compound_dict:
print(lexloc,
'more than one head in <compound>')
compound_dict['head'] = head_dict
else:
print(lexloc,
'unrecognised child of <compound>',
child)
continue
else:
print(lexloc, 'unrecognised child of <lexUnit>', child)
continue
elif child.tag == 'paraphrase':
paraphrase = child
warn_attribs(synloc, paraphrase, set())
paraphrase_text = str(paraphrase.text)
if not paraphrase_text:
print(synloc, 'WARNING: <paraphrase> tag with no text')
else:
print(synloc, 'unrecognised child of <synset>', child)
continue
return synsets
# ------------------------------------------------------------
# Read relation file
# ------------------------------------------------------------
RELATION_ATTRIBS_REQD = set(['dir', 'from', 'name', 'to'])
RELATION_ATTRIBS_OPT = set(['inv'])
RELATION_ATTRIBS = RELATION_ATTRIBS_REQD | RELATION_ATTRIBS_OPT
LEX_REL_DIRS = set(['both', 'one'])
CON_REL_DIRS = set(['both', 'revert', 'one'])
def read_relation_file(filename):
'''
Reads the GermaNet relation file ``gn_relations.xml`` which lists
all the relations holding between lexical units and synsets.
Arguments:
- `filename`:
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
lex_rels = []
con_rels = []
assert doc.getroot().tag == 'relations'
for child in doc.getroot():
if child.tag == 'lex_rel':
if 0 < len(child):
print('<lex_rel> has unexpected child node')
child_dict = dict(child.items())
warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD)
if child_dict['dir'] not in LEX_REL_DIRS:
print('unrecognized <lex_rel> dir', child_dict['dir'])
if child_dict['dir'] == 'both' and 'inv' not in child_dict:
print('<lex_rel> has dir=both but does not specify inv')
lex_rels.append(child_dict)
elif child.tag == 'con_rel':
if 0 < len(child):
print('<con_rel> has unexpected child node')
child_dict = dict(child.items())
warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD)
if child_dict['dir'] not in CON_REL_DIRS:
print('unrecognised <con_rel> dir', child_dict['dir'])
if (child_dict['dir'] in ['both', 'revert'] and
'inv' not in child_dict):
print('<con_rel> has dir={0} but does not specify inv'.format(
child_dict['dir']))
con_rels.append(child_dict)
else:
print('unrecognised child of <relations>', child)
continue
return lex_rels, con_rels
# ------------------------------------------------------------
# Read wiktionary paraphrase file
# ------------------------------------------------------------
PARAPHRASE_ATTRIBS = set(['edited', 'lexUnitId', 'wiktionaryId',
'wiktionarySense', 'wiktionarySenseId'])
def read_paraphrase_file(filename):
'''
Reads in a GermaNet wiktionary paraphrase file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`:
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
assert doc.getroot().tag == 'wiktionaryParaphrases'
paraphrases = []
for child in doc.getroot():
if child.tag == 'wiktionaryParaphrase':
paraphrase = child
warn_attribs('', paraphrase, PARAPHRASE_ATTRIBS)
if 0 < len(paraphrase):
print('unrecognised child of <wiktionaryParaphrase>',
list(paraphrase))
paraphrase_dict = dict(paraphrase.items())
if paraphrase_dict['edited'] not in MAP_YESNO_TO_BOOL:
print('<paraphrase> attribute "edited" has unexpected value',
paraphrase_dict['edited'])
else:
paraphrase_dict['edited'] = MAP_YESNO_TO_BOOL[
paraphrase_dict['edited']]
if not paraphrase_dict['wiktionarySenseId'].isdigit():
print('<paraphrase> attribute "wiktionarySenseId" has '
'non-integer value', paraphrase_dict['edited'])
else:
paraphrase_dict['wiktionarySenseId'] = \
int(paraphrase_dict['wiktionarySenseId'], 10)
paraphrases.append(paraphrase_dict)
else:
print('unknown child of <wiktionaryParaphrases>', child)
return paraphrases
# ------------------------------------------------------------
# Mongo insertion
# ------------------------------------------------------------
# we need to change the names of some synset keys because they are
# Python keywords
SYNSET_KEY_REWRITES = {
'class': 'gn_class',
}
def insert_lexical_information(germanet_db, lex_files):
'''
Reads in the given lexical information files and inserts their
contents into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `lex_files`: a list of paths to XML files containing lexial
information
'''
# drop the database collections if they already exist
germanet_db.lexunits.drop()
germanet_db.synsets.drop()
# inject data from XML files into the database
for lex_file in lex_files:
synsets = read_lexical_file(lex_file)
for synset in synsets:
synset = dict((SYNSET_KEY_REWRITES.get(key, key), value)
for (key, value) in synset.items())
lexunits = synset['lexunits']
synset['lexunits'] = germanet_db.lexunits.insert(lexunits)
synset_id = germanet_db.synsets.insert(synset)
for lexunit in lexunits:
lexunit['synset'] = synset_id
lexunit['category'] = synset['category']
germanet_db.lexunits.save(lexunit)
# index the two collections by id
germanet_db.synsets.create_index('id')
germanet_db.lexunits.create_index('id')
# also index lexunits by lemma, lemma-pos, and lemma-pos-sensenum
germanet_db.lexunits.create_index([('orthForm', DESCENDING)])
germanet_db.lexunits.create_index([('orthForm', DESCENDING),
('category', DESCENDING)])
germanet_db.lexunits.create_index([('orthForm', DESCENDING),
('category', DESCENDING),
('sense', DESCENDING)])
print('Inserted {0} synsets, {1} lexical units.'.format(
germanet_db.synsets.count(),
germanet_db.lexunits.count()))
def insert_relation_information(germanet_db, gn_rels_file):
'''
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `gn_rels_file`:
'''
lex_rels, con_rels = read_relation_file(gn_rels_file)
# cache the lexunits while we work on them
lexunits = {}
for lex_rel in lex_rels:
if lex_rel['from'] not in lexunits:
lexunits[lex_rel['from']] = germanet_db.lexunits.find_one(
{'id': lex_rel['from']})
from_lexunit = lexunits[lex_rel['from']]
if lex_rel['to'] not in lexunits:
lexunits[lex_rel['to']] = germanet_db.lexunits.find_one(
{'id': lex_rel['to']})
to_lexunit = lexunits[lex_rel['to']]
if 'rels' not in from_lexunit:
from_lexunit['rels'] = set()
from_lexunit['rels'].add((lex_rel['name'], to_lexunit['_id']))
if lex_rel['dir'] == 'both':
if 'rels' not in to_lexunit:
to_lexunit['rels'] = set()
to_lexunit['rels'].add((lex_rel['inv'], from_lexunit['_id']))
for lexunit in lexunits.values():
if 'rels' in lexunit:
lexunit['rels'] = sorted(lexunit['rels'])
germanet_db.lexunits.save(lexunit)
# cache the synsets while we work on them
synsets = {}
for con_rel in con_rels:
if con_rel['from'] not in synsets:
synsets[con_rel['from']] = germanet_db.synsets.find_one(
{'id': con_rel['from']})
from_synset = synsets[con_rel['from']]
if con_rel['to'] not in synsets:
synsets[con_rel['to']] = germanet_db.synsets.find_one(
{'id': con_rel['to']})
to_synset = synsets[con_rel['to']]
if 'rels' not in from_synset:
from_synset['rels'] = set()
from_synset['rels'].add((con_rel['name'], to_synset['_id']))
if con_rel['dir'] in ['both', 'revert']:
if 'rels' not in to_synset:
to_synset['rels'] = set()
to_synset['rels'].add((con_rel['inv'], from_synset['_id']))
for synset in synsets.values():
if 'rels' in synset:
synset['rels'] = sorted(synset['rels'])
germanet_db.synsets.save(synset)
print('Inserted {0} lexical relations, {1} synset relations.'.format(
len(lex_rels), len(con_rels)))
def insert_paraphrase_information(germanet_db, wiktionary_files):
'''
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `wiktionary_files`:
'''
num_paraphrases = 0
# cache the lexunits while we work on them
lexunits = {}
for filename in wiktionary_files:
paraphrases = read_paraphrase_file(filename)
num_paraphrases += len(paraphrases)
for paraphrase in paraphrases:
if paraphrase['lexUnitId'] not in lexunits:
lexunits[paraphrase['lexUnitId']] = \
germanet_db.lexunits.find_one(
{'id': paraphrase['lexUnitId']})
lexunit = lexunits[paraphrase['lexUnitId']]
if 'paraphrases' not in lexunit:
lexunit['paraphrases'] = []
lexunit['paraphrases'].append(paraphrase)
for lexunit in lexunits.values():
germanet_db.lexunits.save(lexunit)
print('Inserted {0} wiktionary paraphrases.'.format(num_paraphrases))
LEMMATISATION_FILE = 'baseforms_by_projekt_deutscher_wortschatz.txt.gz'
def insert_lemmatisation_data(germanet_db):
'''
Creates the lemmatiser collection in the given MongoDB instance
using the data derived from the Projekt deutscher Wortschatz.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
# drop the database collection if it already exists
germanet_db.lemmatiser.drop()
num_lemmas = 0
input_file = gzip.open(os.path.join(os.path.dirname(__file__),
LEMMATISATION_FILE))
for line in input_file:
line = line.decode('iso-8859-1').strip().split('\t')
assert len(line) == 2
germanet_db.lemmatiser.insert(dict(list(zip(('word', 'lemma'), line))))
num_lemmas += 1
input_file.close()
# index the collection on 'word'
germanet_db.lemmatiser.create_index('word')
print('Inserted {0} lemmatiser entries.'.format(num_lemmas))
# ------------------------------------------------------------
# Information content for GermaNet similarity
# ------------------------------------------------------------
WORD_COUNT_FILE = 'sdewac-gn-words.tsv.gz'
def insert_infocontent_data(germanet_db):
'''
For every synset in GermaNet, inserts count information derived
from SDEWAC.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
gnet = germanet.GermaNet(germanet_db)
# use add one smoothing
gn_counts = defaultdict(lambda: 1.)
total_count = 1
input_file = gzip.open(os.path.join(os.path.dirname(__file__),
WORD_COUNT_FILE))
num_lines_read = 0
num_lines = 0
for line in input_file:
line = line.decode('utf-8').strip().split('\t')
num_lines += 1
if len(line) != 3:
continue
count, pos, word = line
num_lines_read += 1
count = int(count)
synsets = set(gnet.synsets(word, pos))
if not synsets:
continue
# Although Resnik (1995) suggests dividing count by the number
# of synsets, Patwardhan et al (2003) argue against doing
# this.
count = float(count) / len(synsets)
for synset in synsets:
total_count += count
paths = synset.hypernym_paths
scount = float(count) / len(paths)
for path in paths:
for ss in path:
gn_counts[ss._id] += scount
print('Read {0} of {1} lines from count file.'.format(num_lines_read,
num_lines))
print('Recorded counts for {0} synsets.'.format(len(gn_counts)))
print('Total count is {0}'.format(total_count))
input_file.close()
# update all the synset records in GermaNet
num_updates = 0
for synset in germanet_db.synsets.find():
synset['infocont'] = gn_counts[synset['_id']] / total_count
germanet_db.synsets.save(synset)
num_updates += 1
print('Updated {0} synsets.'.format(num_updates))
def compute_max_min_depth(germanet_db):
'''
For every part of speech in GermaNet, computes the maximum
min_depth in that hierarchy.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
gnet = germanet.GermaNet(germanet_db)
max_min_depths = defaultdict(lambda: -1)
for synset in gnet.all_synsets():
min_depth = synset.min_depth
if max_min_depths[synset.category] < min_depth:
max_min_depths[synset.category] = min_depth
if germanet_db.metainfo.count() == 0:
germanet_db.metainfo.insert({})
metainfo = germanet_db.metainfo.find_one()
metainfo['max_min_depths'] = max_min_depths
germanet_db.metainfo.save(metainfo)
print('Computed maximum min_depth for all parts of speech:')
print(u', '.join(u'{0}: {1}'.format(k, v) for (k, v) in
sorted(max_min_depths.items())).encode('utf-8'))
# ------------------------------------------------------------
# Main function
# ------------------------------------------------------------
def main():
'''Main function.'''
usage = ('\n\n %prog [options] XML_PATH\n\nArguments:\n\n '
'XML_PATH the directory containing the '
'GermaNet .xml files')
parser = optparse.OptionParser(usage=usage)
parser.add_option('--host', default=None,
help='hostname or IP address of the MongoDB instance '
'where the GermaNet database will be inserted '
'(default: %default)')
parser.add_option('--port', type='int', default=None,
help='port number of the MongoDB instance where the '
'GermaNet database will be inserted (default: %default)')
parser.add_option('--database', dest='database_name', default='germanet',
help='the name of the database on the MongoDB instance '
'where GermaNet will be stored (default: %default)')
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
sys.exit(1)
xml_path = args[0]
client = MongoClient(options.host, options.port)
germanet_db = client[options.database_name]
lex_files, gn_rels_file, wiktionary_files, ili_files = \
find_germanet_xml_files(xml_path)
insert_lexical_information(germanet_db, lex_files)
insert_relation_information(germanet_db, gn_rels_file)
insert_paraphrase_information(germanet_db, wiktionary_files)
insert_lemmatisation_data(germanet_db)
insert_infocontent_data(germanet_db)
compute_max_min_depth(germanet_db)
client.close()
if __name__ == '__main__' and sys.argv != ['']:
main()
|
wroberts/pygermanet | pygermanet/mongo_import.py | read_lexical_file | python | def read_lexical_file(filename):
'''
Reads in a GermaNet lexical information file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`: the name of the XML file to read
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
synsets = []
assert doc.getroot().tag == 'synsets'
for synset in doc.getroot():
if synset.tag != 'synset':
print('unrecognised child of <synsets>', synset)
continue
synset_dict = dict(synset.items())
synloc = '{0} synset {1},'.format(filename,
synset_dict.get('id', '???'))
warn_attribs(synloc, synset, SYNSET_ATTRIBS)
synset_dict['lexunits'] = []
synsets.append(synset_dict)
for child in synset:
if child.tag == 'lexUnit':
lexunit = child
lexunit_dict = dict(lexunit.items())
lexloc = synloc + ' lexUnit {0},'.format(
lexunit_dict.get('id', '???'))
warn_attribs(lexloc, lexunit, LEXUNIT_ATTRIBS)
# convert some properties to booleans
for key in ['styleMarking', 'artificial', 'namedEntity']:
if key in lexunit_dict:
if lexunit_dict[key] not in MAP_YESNO_TO_BOOL:
print(lexloc, ('lexunit property {0} has '
'non-boolean value').format(key),
lexunit_dict[key])
continue
lexunit_dict[key] = MAP_YESNO_TO_BOOL[lexunit_dict[key]]
# convert sense to integer number
if 'sense' in lexunit_dict:
if lexunit_dict['sense'].isdigit():
lexunit_dict['sense'] = int(lexunit_dict['sense'], 10)
else:
print(lexloc,
'lexunit property sense has non-numeric value',
lexunit_dict['sense'])
synset_dict['lexunits'].append(lexunit_dict)
lexunit_dict['examples'] = []
lexunit_dict['frames'] = []
for child in lexunit:
if child.tag in ['orthForm',
'orthVar',
'oldOrthForm',
'oldOrthVar']:
warn_attribs(lexloc, child, set())
if not child.text:
print(lexloc, '{0} with no text'.format(child.tag))
continue
if child.tag in lexunit_dict:
print(lexloc, 'more than one {0}'.format(child.tag))
lexunit_dict[child.tag] = str(child.text)
elif child.tag == 'example':
example = child
text = [child for child in example
if child.tag == 'text']
if len(text) != 1 or not text[0].text:
print(lexloc, '<example> tag without text')
example_dict = {'text': str(text[0].text)}
for child in example:
if child.tag == 'text':
continue
elif child.tag == 'exframe':
if 'exframe' in example_dict:
print(lexloc,
'more than one <exframe> '
'for <example>')
warn_attribs(lexloc, child, set())
if not child.text:
print(lexloc, '<exframe> with no text')
continue
example_dict['exframe'] = str(child.text)
else:
print(lexloc,
'unrecognised child of <example>',
child)
lexunit_dict['examples'].append(example_dict)
elif child.tag == 'frame':
frame = child
warn_attribs(lexloc, frame, set())
if 0 < len(frame):
print(lexloc, 'unrecognised <frame> children',
list(frame))
if not frame.text:
print(lexloc, '<frame> without text')
continue
lexunit_dict['frames'].append(str(frame.text))
elif child.tag == 'compound':
compound = child
warn_attribs(lexloc, compound, set())
compound_dict = {}
for child in compound:
if child.tag == 'modifier':
modifier_dict = dict(child.items())
warn_attribs(lexloc, child,
MODIFIER_ATTRIBS, set())
if not child.text:
print(lexloc, 'modifier without text')
continue
modifier_dict['text'] = str(child.text)
if 'modifier' not in compound_dict:
compound_dict['modifier'] = []
compound_dict['modifier'].append(modifier_dict)
elif child.tag == 'head':
head_dict = dict(child.items())
warn_attribs(lexloc, child, HEAD_ATTRIBS, set())
if not child.text:
print(lexloc, '<head> without text')
continue
head_dict['text'] = str(child.text)
if 'head' in compound_dict:
print(lexloc,
'more than one head in <compound>')
compound_dict['head'] = head_dict
else:
print(lexloc,
'unrecognised child of <compound>',
child)
continue
else:
print(lexloc, 'unrecognised child of <lexUnit>', child)
continue
elif child.tag == 'paraphrase':
paraphrase = child
warn_attribs(synloc, paraphrase, set())
paraphrase_text = str(paraphrase.text)
if not paraphrase_text:
print(synloc, 'WARNING: <paraphrase> tag with no text')
else:
print(synloc, 'unrecognised child of <synset>', child)
continue
return synsets | Reads in a GermaNet lexical information file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`: the name of the XML file to read | train | https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/mongo_import.py#L132-L275 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
mongo_import.py
(c) Will Roberts 21 March, 2014
A script to import the GermaNet lexicon into a MongoDB database.
'''
from __future__ import absolute_import, division, print_function
from . import germanet
from builtins import dict, int, str, zip
from collections import defaultdict
from io import open
from pymongo import DESCENDING, MongoClient
import glob
import gzip
import optparse
import os
import re
import sys
import xml.etree.ElementTree as etree
# ------------------------------------------------------------
# Find filenames
# ------------------------------------------------------------
def find_germanet_xml_files(xml_path):
'''
Globs the XML files contained in the given directory and sorts
them into sections for import into the MongoDB database.
Arguments:
- `xml_path`: the path to the directory containing the GermaNet
XML files
'''
xml_files = sorted(glob.glob(os.path.join(xml_path, '*.xml')))
# sort out the lexical files
lex_files = [xml_file for xml_file in xml_files if
re.match(r'(adj|nomen|verben)\.',
os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(lex_files))
if not lex_files:
print('ERROR: cannot find lexical information files')
# sort out the GermaNet relations file
gn_rels_file = [xml_file for xml_file in xml_files if
os.path.basename(xml_file).lower() == 'gn_relations.xml']
xml_files = sorted(set(xml_files) - set(gn_rels_file))
if not gn_rels_file:
print('ERROR: cannot find relations file gn_relations.xml')
gn_rels_file = None
else:
if 1 < len(gn_rels_file):
print ('WARNING: more than one relations file gn_relations.xml, '
'taking first match')
gn_rels_file = gn_rels_file[0]
# sort out the wiktionary paraphrase files
wiktionary_files = [xml_file for xml_file in xml_files if
re.match(r'wiktionaryparaphrases-',
os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(wiktionary_files))
if not wiktionary_files:
print('WARNING: cannot find wiktionary paraphrase files')
# sort out the interlingual index file
ili_files = [xml_file for xml_file in xml_files if
os.path.basename(xml_file).lower().startswith(
'interlingualindex')]
xml_files = sorted(set(xml_files) - set(ili_files))
if not ili_files:
print('WARNING: cannot find interlingual index file')
if xml_files:
print('WARNING: unrecognised xml files:', xml_files)
return lex_files, gn_rels_file, wiktionary_files, ili_files
# ------------------------------------------------------------
# Read lexical files
# ------------------------------------------------------------
def warn_attribs(loc,
node,
recognised_attribs,
reqd_attribs=None):
'''
Error checking of XML input: check that the given node has certain
required attributes, and does not have any unrecognised
attributes.
Arguments:
- `loc`: a string with some information about the location of the
error in the XML file
- `node`: the node to check
- `recognised_attribs`: a set of node attributes which we know how
to handle
- `reqd_attribs`: a set of node attributes which we require to be
present; if this argument is None, it will take the same value
as `recognised_attribs`
'''
if reqd_attribs is None:
reqd_attribs = recognised_attribs
found_attribs = set(node.keys())
if reqd_attribs - found_attribs:
print(loc, 'missing <{0}> attributes'.format(node.tag),
reqd_attribs - found_attribs)
if found_attribs - recognised_attribs:
print(loc, 'unrecognised <{0}> properties'.format(node.tag),
found_attribs - recognised_attribs)
SYNSET_ATTRIBS = set(['category', 'id', 'class'])
LEXUNIT_ATTRIBS = set(['styleMarking', 'namedEntity', 'artificial',
'source', 'sense', 'id'])
MODIFIER_ATTRIBS = set(['category', 'property'])
HEAD_ATTRIBS = set(['property'])
MAP_YESNO_TO_BOOL = {
'yes': True,
'no': False,
}
# ------------------------------------------------------------
# Read relation file
# ------------------------------------------------------------
RELATION_ATTRIBS_REQD = set(['dir', 'from', 'name', 'to'])
RELATION_ATTRIBS_OPT = set(['inv'])
RELATION_ATTRIBS = RELATION_ATTRIBS_REQD | RELATION_ATTRIBS_OPT
LEX_REL_DIRS = set(['both', 'one'])
CON_REL_DIRS = set(['both', 'revert', 'one'])
def read_relation_file(filename):
'''
Reads the GermaNet relation file ``gn_relations.xml`` which lists
all the relations holding between lexical units and synsets.
Arguments:
- `filename`:
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
lex_rels = []
con_rels = []
assert doc.getroot().tag == 'relations'
for child in doc.getroot():
if child.tag == 'lex_rel':
if 0 < len(child):
print('<lex_rel> has unexpected child node')
child_dict = dict(child.items())
warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD)
if child_dict['dir'] not in LEX_REL_DIRS:
print('unrecognized <lex_rel> dir', child_dict['dir'])
if child_dict['dir'] == 'both' and 'inv' not in child_dict:
print('<lex_rel> has dir=both but does not specify inv')
lex_rels.append(child_dict)
elif child.tag == 'con_rel':
if 0 < len(child):
print('<con_rel> has unexpected child node')
child_dict = dict(child.items())
warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD)
if child_dict['dir'] not in CON_REL_DIRS:
print('unrecognised <con_rel> dir', child_dict['dir'])
if (child_dict['dir'] in ['both', 'revert'] and
'inv' not in child_dict):
print('<con_rel> has dir={0} but does not specify inv'.format(
child_dict['dir']))
con_rels.append(child_dict)
else:
print('unrecognised child of <relations>', child)
continue
return lex_rels, con_rels
# ------------------------------------------------------------
# Read wiktionary paraphrase file
# ------------------------------------------------------------
PARAPHRASE_ATTRIBS = set(['edited', 'lexUnitId', 'wiktionaryId',
'wiktionarySense', 'wiktionarySenseId'])
def read_paraphrase_file(filename):
'''
Reads in a GermaNet wiktionary paraphrase file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`:
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
assert doc.getroot().tag == 'wiktionaryParaphrases'
paraphrases = []
for child in doc.getroot():
if child.tag == 'wiktionaryParaphrase':
paraphrase = child
warn_attribs('', paraphrase, PARAPHRASE_ATTRIBS)
if 0 < len(paraphrase):
print('unrecognised child of <wiktionaryParaphrase>',
list(paraphrase))
paraphrase_dict = dict(paraphrase.items())
if paraphrase_dict['edited'] not in MAP_YESNO_TO_BOOL:
print('<paraphrase> attribute "edited" has unexpected value',
paraphrase_dict['edited'])
else:
paraphrase_dict['edited'] = MAP_YESNO_TO_BOOL[
paraphrase_dict['edited']]
if not paraphrase_dict['wiktionarySenseId'].isdigit():
print('<paraphrase> attribute "wiktionarySenseId" has '
'non-integer value', paraphrase_dict['edited'])
else:
paraphrase_dict['wiktionarySenseId'] = \
int(paraphrase_dict['wiktionarySenseId'], 10)
paraphrases.append(paraphrase_dict)
else:
print('unknown child of <wiktionaryParaphrases>', child)
return paraphrases
# ------------------------------------------------------------
# Mongo insertion
# ------------------------------------------------------------
# we need to change the names of some synset keys because they are
# Python keywords
SYNSET_KEY_REWRITES = {
'class': 'gn_class',
}
def insert_lexical_information(germanet_db, lex_files):
'''
Reads in the given lexical information files and inserts their
contents into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `lex_files`: a list of paths to XML files containing lexial
information
'''
# drop the database collections if they already exist
germanet_db.lexunits.drop()
germanet_db.synsets.drop()
# inject data from XML files into the database
for lex_file in lex_files:
synsets = read_lexical_file(lex_file)
for synset in synsets:
synset = dict((SYNSET_KEY_REWRITES.get(key, key), value)
for (key, value) in synset.items())
lexunits = synset['lexunits']
synset['lexunits'] = germanet_db.lexunits.insert(lexunits)
synset_id = germanet_db.synsets.insert(synset)
for lexunit in lexunits:
lexunit['synset'] = synset_id
lexunit['category'] = synset['category']
germanet_db.lexunits.save(lexunit)
# index the two collections by id
germanet_db.synsets.create_index('id')
germanet_db.lexunits.create_index('id')
# also index lexunits by lemma, lemma-pos, and lemma-pos-sensenum
germanet_db.lexunits.create_index([('orthForm', DESCENDING)])
germanet_db.lexunits.create_index([('orthForm', DESCENDING),
('category', DESCENDING)])
germanet_db.lexunits.create_index([('orthForm', DESCENDING),
('category', DESCENDING),
('sense', DESCENDING)])
print('Inserted {0} synsets, {1} lexical units.'.format(
germanet_db.synsets.count(),
germanet_db.lexunits.count()))
def insert_relation_information(germanet_db, gn_rels_file):
'''
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `gn_rels_file`:
'''
lex_rels, con_rels = read_relation_file(gn_rels_file)
# cache the lexunits while we work on them
lexunits = {}
for lex_rel in lex_rels:
if lex_rel['from'] not in lexunits:
lexunits[lex_rel['from']] = germanet_db.lexunits.find_one(
{'id': lex_rel['from']})
from_lexunit = lexunits[lex_rel['from']]
if lex_rel['to'] not in lexunits:
lexunits[lex_rel['to']] = germanet_db.lexunits.find_one(
{'id': lex_rel['to']})
to_lexunit = lexunits[lex_rel['to']]
if 'rels' not in from_lexunit:
from_lexunit['rels'] = set()
from_lexunit['rels'].add((lex_rel['name'], to_lexunit['_id']))
if lex_rel['dir'] == 'both':
if 'rels' not in to_lexunit:
to_lexunit['rels'] = set()
to_lexunit['rels'].add((lex_rel['inv'], from_lexunit['_id']))
for lexunit in lexunits.values():
if 'rels' in lexunit:
lexunit['rels'] = sorted(lexunit['rels'])
germanet_db.lexunits.save(lexunit)
# cache the synsets while we work on them
synsets = {}
for con_rel in con_rels:
if con_rel['from'] not in synsets:
synsets[con_rel['from']] = germanet_db.synsets.find_one(
{'id': con_rel['from']})
from_synset = synsets[con_rel['from']]
if con_rel['to'] not in synsets:
synsets[con_rel['to']] = germanet_db.synsets.find_one(
{'id': con_rel['to']})
to_synset = synsets[con_rel['to']]
if 'rels' not in from_synset:
from_synset['rels'] = set()
from_synset['rels'].add((con_rel['name'], to_synset['_id']))
if con_rel['dir'] in ['both', 'revert']:
if 'rels' not in to_synset:
to_synset['rels'] = set()
to_synset['rels'].add((con_rel['inv'], from_synset['_id']))
for synset in synsets.values():
if 'rels' in synset:
synset['rels'] = sorted(synset['rels'])
germanet_db.synsets.save(synset)
print('Inserted {0} lexical relations, {1} synset relations.'.format(
len(lex_rels), len(con_rels)))
def insert_paraphrase_information(germanet_db, wiktionary_files):
'''
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `wiktionary_files`:
'''
num_paraphrases = 0
# cache the lexunits while we work on them
lexunits = {}
for filename in wiktionary_files:
paraphrases = read_paraphrase_file(filename)
num_paraphrases += len(paraphrases)
for paraphrase in paraphrases:
if paraphrase['lexUnitId'] not in lexunits:
lexunits[paraphrase['lexUnitId']] = \
germanet_db.lexunits.find_one(
{'id': paraphrase['lexUnitId']})
lexunit = lexunits[paraphrase['lexUnitId']]
if 'paraphrases' not in lexunit:
lexunit['paraphrases'] = []
lexunit['paraphrases'].append(paraphrase)
for lexunit in lexunits.values():
germanet_db.lexunits.save(lexunit)
print('Inserted {0} wiktionary paraphrases.'.format(num_paraphrases))
LEMMATISATION_FILE = 'baseforms_by_projekt_deutscher_wortschatz.txt.gz'
def insert_lemmatisation_data(germanet_db):
'''
Creates the lemmatiser collection in the given MongoDB instance
using the data derived from the Projekt deutscher Wortschatz.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
# drop the database collection if it already exists
germanet_db.lemmatiser.drop()
num_lemmas = 0
input_file = gzip.open(os.path.join(os.path.dirname(__file__),
LEMMATISATION_FILE))
for line in input_file:
line = line.decode('iso-8859-1').strip().split('\t')
assert len(line) == 2
germanet_db.lemmatiser.insert(dict(list(zip(('word', 'lemma'), line))))
num_lemmas += 1
input_file.close()
# index the collection on 'word'
germanet_db.lemmatiser.create_index('word')
print('Inserted {0} lemmatiser entries.'.format(num_lemmas))
# ------------------------------------------------------------
# Information content for GermaNet similarity
# ------------------------------------------------------------
WORD_COUNT_FILE = 'sdewac-gn-words.tsv.gz'
def insert_infocontent_data(germanet_db):
'''
For every synset in GermaNet, inserts count information derived
from SDEWAC.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
gnet = germanet.GermaNet(germanet_db)
# use add one smoothing
gn_counts = defaultdict(lambda: 1.)
total_count = 1
input_file = gzip.open(os.path.join(os.path.dirname(__file__),
WORD_COUNT_FILE))
num_lines_read = 0
num_lines = 0
for line in input_file:
line = line.decode('utf-8').strip().split('\t')
num_lines += 1
if len(line) != 3:
continue
count, pos, word = line
num_lines_read += 1
count = int(count)
synsets = set(gnet.synsets(word, pos))
if not synsets:
continue
# Although Resnik (1995) suggests dividing count by the number
# of synsets, Patwardhan et al (2003) argue against doing
# this.
count = float(count) / len(synsets)
for synset in synsets:
total_count += count
paths = synset.hypernym_paths
scount = float(count) / len(paths)
for path in paths:
for ss in path:
gn_counts[ss._id] += scount
print('Read {0} of {1} lines from count file.'.format(num_lines_read,
num_lines))
print('Recorded counts for {0} synsets.'.format(len(gn_counts)))
print('Total count is {0}'.format(total_count))
input_file.close()
# update all the synset records in GermaNet
num_updates = 0
for synset in germanet_db.synsets.find():
synset['infocont'] = gn_counts[synset['_id']] / total_count
germanet_db.synsets.save(synset)
num_updates += 1
print('Updated {0} synsets.'.format(num_updates))
def compute_max_min_depth(germanet_db):
'''
For every part of speech in GermaNet, computes the maximum
min_depth in that hierarchy.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
gnet = germanet.GermaNet(germanet_db)
max_min_depths = defaultdict(lambda: -1)
for synset in gnet.all_synsets():
min_depth = synset.min_depth
if max_min_depths[synset.category] < min_depth:
max_min_depths[synset.category] = min_depth
if germanet_db.metainfo.count() == 0:
germanet_db.metainfo.insert({})
metainfo = germanet_db.metainfo.find_one()
metainfo['max_min_depths'] = max_min_depths
germanet_db.metainfo.save(metainfo)
print('Computed maximum min_depth for all parts of speech:')
print(u', '.join(u'{0}: {1}'.format(k, v) for (k, v) in
sorted(max_min_depths.items())).encode('utf-8'))
# ------------------------------------------------------------
# Main function
# ------------------------------------------------------------
def main():
'''Main function.'''
usage = ('\n\n %prog [options] XML_PATH\n\nArguments:\n\n '
'XML_PATH the directory containing the '
'GermaNet .xml files')
parser = optparse.OptionParser(usage=usage)
parser.add_option('--host', default=None,
help='hostname or IP address of the MongoDB instance '
'where the GermaNet database will be inserted '
'(default: %default)')
parser.add_option('--port', type='int', default=None,
help='port number of the MongoDB instance where the '
'GermaNet database will be inserted (default: %default)')
parser.add_option('--database', dest='database_name', default='germanet',
help='the name of the database on the MongoDB instance '
'where GermaNet will be stored (default: %default)')
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
sys.exit(1)
xml_path = args[0]
client = MongoClient(options.host, options.port)
germanet_db = client[options.database_name]
lex_files, gn_rels_file, wiktionary_files, ili_files = \
find_germanet_xml_files(xml_path)
insert_lexical_information(germanet_db, lex_files)
insert_relation_information(germanet_db, gn_rels_file)
insert_paraphrase_information(germanet_db, wiktionary_files)
insert_lemmatisation_data(germanet_db)
insert_infocontent_data(germanet_db)
compute_max_min_depth(germanet_db)
client.close()
if __name__ == '__main__' and sys.argv != ['']:
main()
|
wroberts/pygermanet | pygermanet/mongo_import.py | read_relation_file | python | def read_relation_file(filename):
'''
Reads the GermaNet relation file ``gn_relations.xml`` which lists
all the relations holding between lexical units and synsets.
Arguments:
- `filename`:
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
lex_rels = []
con_rels = []
assert doc.getroot().tag == 'relations'
for child in doc.getroot():
if child.tag == 'lex_rel':
if 0 < len(child):
print('<lex_rel> has unexpected child node')
child_dict = dict(child.items())
warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD)
if child_dict['dir'] not in LEX_REL_DIRS:
print('unrecognized <lex_rel> dir', child_dict['dir'])
if child_dict['dir'] == 'both' and 'inv' not in child_dict:
print('<lex_rel> has dir=both but does not specify inv')
lex_rels.append(child_dict)
elif child.tag == 'con_rel':
if 0 < len(child):
print('<con_rel> has unexpected child node')
child_dict = dict(child.items())
warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD)
if child_dict['dir'] not in CON_REL_DIRS:
print('unrecognised <con_rel> dir', child_dict['dir'])
if (child_dict['dir'] in ['both', 'revert'] and
'inv' not in child_dict):
print('<con_rel> has dir={0} but does not specify inv'.format(
child_dict['dir']))
con_rels.append(child_dict)
else:
print('unrecognised child of <relations>', child)
continue
return lex_rels, con_rels | Reads the GermaNet relation file ``gn_relations.xml`` which lists
all the relations holding between lexical units and synsets.
Arguments:
- `filename`: | train | https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/mongo_import.py#L288-L329 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
mongo_import.py
(c) Will Roberts 21 March, 2014
A script to import the GermaNet lexicon into a MongoDB database.
'''
from __future__ import absolute_import, division, print_function
from . import germanet
from builtins import dict, int, str, zip
from collections import defaultdict
from io import open
from pymongo import DESCENDING, MongoClient
import glob
import gzip
import optparse
import os
import re
import sys
import xml.etree.ElementTree as etree
# ------------------------------------------------------------
# Find filenames
# ------------------------------------------------------------
def find_germanet_xml_files(xml_path):
'''
Globs the XML files contained in the given directory and sorts
them into sections for import into the MongoDB database.
Arguments:
- `xml_path`: the path to the directory containing the GermaNet
XML files
'''
xml_files = sorted(glob.glob(os.path.join(xml_path, '*.xml')))
# sort out the lexical files
lex_files = [xml_file for xml_file in xml_files if
re.match(r'(adj|nomen|verben)\.',
os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(lex_files))
if not lex_files:
print('ERROR: cannot find lexical information files')
# sort out the GermaNet relations file
gn_rels_file = [xml_file for xml_file in xml_files if
os.path.basename(xml_file).lower() == 'gn_relations.xml']
xml_files = sorted(set(xml_files) - set(gn_rels_file))
if not gn_rels_file:
print('ERROR: cannot find relations file gn_relations.xml')
gn_rels_file = None
else:
if 1 < len(gn_rels_file):
print ('WARNING: more than one relations file gn_relations.xml, '
'taking first match')
gn_rels_file = gn_rels_file[0]
# sort out the wiktionary paraphrase files
wiktionary_files = [xml_file for xml_file in xml_files if
re.match(r'wiktionaryparaphrases-',
os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(wiktionary_files))
if not wiktionary_files:
print('WARNING: cannot find wiktionary paraphrase files')
# sort out the interlingual index file
ili_files = [xml_file for xml_file in xml_files if
os.path.basename(xml_file).lower().startswith(
'interlingualindex')]
xml_files = sorted(set(xml_files) - set(ili_files))
if not ili_files:
print('WARNING: cannot find interlingual index file')
if xml_files:
print('WARNING: unrecognised xml files:', xml_files)
return lex_files, gn_rels_file, wiktionary_files, ili_files
# ------------------------------------------------------------
# Read lexical files
# ------------------------------------------------------------
def warn_attribs(loc,
node,
recognised_attribs,
reqd_attribs=None):
'''
Error checking of XML input: check that the given node has certain
required attributes, and does not have any unrecognised
attributes.
Arguments:
- `loc`: a string with some information about the location of the
error in the XML file
- `node`: the node to check
- `recognised_attribs`: a set of node attributes which we know how
to handle
- `reqd_attribs`: a set of node attributes which we require to be
present; if this argument is None, it will take the same value
as `recognised_attribs`
'''
if reqd_attribs is None:
reqd_attribs = recognised_attribs
found_attribs = set(node.keys())
if reqd_attribs - found_attribs:
print(loc, 'missing <{0}> attributes'.format(node.tag),
reqd_attribs - found_attribs)
if found_attribs - recognised_attribs:
print(loc, 'unrecognised <{0}> properties'.format(node.tag),
found_attribs - recognised_attribs)
SYNSET_ATTRIBS = set(['category', 'id', 'class'])
LEXUNIT_ATTRIBS = set(['styleMarking', 'namedEntity', 'artificial',
'source', 'sense', 'id'])
MODIFIER_ATTRIBS = set(['category', 'property'])
HEAD_ATTRIBS = set(['property'])
MAP_YESNO_TO_BOOL = {
'yes': True,
'no': False,
}
def read_lexical_file(filename):
'''
Reads in a GermaNet lexical information file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`: the name of the XML file to read
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
synsets = []
assert doc.getroot().tag == 'synsets'
for synset in doc.getroot():
if synset.tag != 'synset':
print('unrecognised child of <synsets>', synset)
continue
synset_dict = dict(synset.items())
synloc = '{0} synset {1},'.format(filename,
synset_dict.get('id', '???'))
warn_attribs(synloc, synset, SYNSET_ATTRIBS)
synset_dict['lexunits'] = []
synsets.append(synset_dict)
for child in synset:
if child.tag == 'lexUnit':
lexunit = child
lexunit_dict = dict(lexunit.items())
lexloc = synloc + ' lexUnit {0},'.format(
lexunit_dict.get('id', '???'))
warn_attribs(lexloc, lexunit, LEXUNIT_ATTRIBS)
# convert some properties to booleans
for key in ['styleMarking', 'artificial', 'namedEntity']:
if key in lexunit_dict:
if lexunit_dict[key] not in MAP_YESNO_TO_BOOL:
print(lexloc, ('lexunit property {0} has '
'non-boolean value').format(key),
lexunit_dict[key])
continue
lexunit_dict[key] = MAP_YESNO_TO_BOOL[lexunit_dict[key]]
# convert sense to integer number
if 'sense' in lexunit_dict:
if lexunit_dict['sense'].isdigit():
lexunit_dict['sense'] = int(lexunit_dict['sense'], 10)
else:
print(lexloc,
'lexunit property sense has non-numeric value',
lexunit_dict['sense'])
synset_dict['lexunits'].append(lexunit_dict)
lexunit_dict['examples'] = []
lexunit_dict['frames'] = []
for child in lexunit:
if child.tag in ['orthForm',
'orthVar',
'oldOrthForm',
'oldOrthVar']:
warn_attribs(lexloc, child, set())
if not child.text:
print(lexloc, '{0} with no text'.format(child.tag))
continue
if child.tag in lexunit_dict:
print(lexloc, 'more than one {0}'.format(child.tag))
lexunit_dict[child.tag] = str(child.text)
elif child.tag == 'example':
example = child
text = [child for child in example
if child.tag == 'text']
if len(text) != 1 or not text[0].text:
print(lexloc, '<example> tag without text')
example_dict = {'text': str(text[0].text)}
for child in example:
if child.tag == 'text':
continue
elif child.tag == 'exframe':
if 'exframe' in example_dict:
print(lexloc,
'more than one <exframe> '
'for <example>')
warn_attribs(lexloc, child, set())
if not child.text:
print(lexloc, '<exframe> with no text')
continue
example_dict['exframe'] = str(child.text)
else:
print(lexloc,
'unrecognised child of <example>',
child)
lexunit_dict['examples'].append(example_dict)
elif child.tag == 'frame':
frame = child
warn_attribs(lexloc, frame, set())
if 0 < len(frame):
print(lexloc, 'unrecognised <frame> children',
list(frame))
if not frame.text:
print(lexloc, '<frame> without text')
continue
lexunit_dict['frames'].append(str(frame.text))
elif child.tag == 'compound':
compound = child
warn_attribs(lexloc, compound, set())
compound_dict = {}
for child in compound:
if child.tag == 'modifier':
modifier_dict = dict(child.items())
warn_attribs(lexloc, child,
MODIFIER_ATTRIBS, set())
if not child.text:
print(lexloc, 'modifier without text')
continue
modifier_dict['text'] = str(child.text)
if 'modifier' not in compound_dict:
compound_dict['modifier'] = []
compound_dict['modifier'].append(modifier_dict)
elif child.tag == 'head':
head_dict = dict(child.items())
warn_attribs(lexloc, child, HEAD_ATTRIBS, set())
if not child.text:
print(lexloc, '<head> without text')
continue
head_dict['text'] = str(child.text)
if 'head' in compound_dict:
print(lexloc,
'more than one head in <compound>')
compound_dict['head'] = head_dict
else:
print(lexloc,
'unrecognised child of <compound>',
child)
continue
else:
print(lexloc, 'unrecognised child of <lexUnit>', child)
continue
elif child.tag == 'paraphrase':
paraphrase = child
warn_attribs(synloc, paraphrase, set())
paraphrase_text = str(paraphrase.text)
if not paraphrase_text:
print(synloc, 'WARNING: <paraphrase> tag with no text')
else:
print(synloc, 'unrecognised child of <synset>', child)
continue
return synsets
# ------------------------------------------------------------
# Read relation file
# ------------------------------------------------------------
RELATION_ATTRIBS_REQD = set(['dir', 'from', 'name', 'to'])
RELATION_ATTRIBS_OPT = set(['inv'])
RELATION_ATTRIBS = RELATION_ATTRIBS_REQD | RELATION_ATTRIBS_OPT
LEX_REL_DIRS = set(['both', 'one'])
CON_REL_DIRS = set(['both', 'revert', 'one'])
# ------------------------------------------------------------
# Read wiktionary paraphrase file
# ------------------------------------------------------------
PARAPHRASE_ATTRIBS = set(['edited', 'lexUnitId', 'wiktionaryId',
'wiktionarySense', 'wiktionarySenseId'])
def read_paraphrase_file(filename):
'''
Reads in a GermaNet wiktionary paraphrase file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`:
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
assert doc.getroot().tag == 'wiktionaryParaphrases'
paraphrases = []
for child in doc.getroot():
if child.tag == 'wiktionaryParaphrase':
paraphrase = child
warn_attribs('', paraphrase, PARAPHRASE_ATTRIBS)
if 0 < len(paraphrase):
print('unrecognised child of <wiktionaryParaphrase>',
list(paraphrase))
paraphrase_dict = dict(paraphrase.items())
if paraphrase_dict['edited'] not in MAP_YESNO_TO_BOOL:
print('<paraphrase> attribute "edited" has unexpected value',
paraphrase_dict['edited'])
else:
paraphrase_dict['edited'] = MAP_YESNO_TO_BOOL[
paraphrase_dict['edited']]
if not paraphrase_dict['wiktionarySenseId'].isdigit():
print('<paraphrase> attribute "wiktionarySenseId" has '
'non-integer value', paraphrase_dict['edited'])
else:
paraphrase_dict['wiktionarySenseId'] = \
int(paraphrase_dict['wiktionarySenseId'], 10)
paraphrases.append(paraphrase_dict)
else:
print('unknown child of <wiktionaryParaphrases>', child)
return paraphrases
# ------------------------------------------------------------
# Mongo insertion
# ------------------------------------------------------------
# we need to change the names of some synset keys because they are
# Python keywords
SYNSET_KEY_REWRITES = {
'class': 'gn_class',
}
def insert_lexical_information(germanet_db, lex_files):
'''
Reads in the given lexical information files and inserts their
contents into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `lex_files`: a list of paths to XML files containing lexial
information
'''
# drop the database collections if they already exist
germanet_db.lexunits.drop()
germanet_db.synsets.drop()
# inject data from XML files into the database
for lex_file in lex_files:
synsets = read_lexical_file(lex_file)
for synset in synsets:
synset = dict((SYNSET_KEY_REWRITES.get(key, key), value)
for (key, value) in synset.items())
lexunits = synset['lexunits']
synset['lexunits'] = germanet_db.lexunits.insert(lexunits)
synset_id = germanet_db.synsets.insert(synset)
for lexunit in lexunits:
lexunit['synset'] = synset_id
lexunit['category'] = synset['category']
germanet_db.lexunits.save(lexunit)
# index the two collections by id
germanet_db.synsets.create_index('id')
germanet_db.lexunits.create_index('id')
# also index lexunits by lemma, lemma-pos, and lemma-pos-sensenum
germanet_db.lexunits.create_index([('orthForm', DESCENDING)])
germanet_db.lexunits.create_index([('orthForm', DESCENDING),
('category', DESCENDING)])
germanet_db.lexunits.create_index([('orthForm', DESCENDING),
('category', DESCENDING),
('sense', DESCENDING)])
print('Inserted {0} synsets, {1} lexical units.'.format(
germanet_db.synsets.count(),
germanet_db.lexunits.count()))
def insert_relation_information(germanet_db, gn_rels_file):
'''
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `gn_rels_file`:
'''
lex_rels, con_rels = read_relation_file(gn_rels_file)
# cache the lexunits while we work on them
lexunits = {}
for lex_rel in lex_rels:
if lex_rel['from'] not in lexunits:
lexunits[lex_rel['from']] = germanet_db.lexunits.find_one(
{'id': lex_rel['from']})
from_lexunit = lexunits[lex_rel['from']]
if lex_rel['to'] not in lexunits:
lexunits[lex_rel['to']] = germanet_db.lexunits.find_one(
{'id': lex_rel['to']})
to_lexunit = lexunits[lex_rel['to']]
if 'rels' not in from_lexunit:
from_lexunit['rels'] = set()
from_lexunit['rels'].add((lex_rel['name'], to_lexunit['_id']))
if lex_rel['dir'] == 'both':
if 'rels' not in to_lexunit:
to_lexunit['rels'] = set()
to_lexunit['rels'].add((lex_rel['inv'], from_lexunit['_id']))
for lexunit in lexunits.values():
if 'rels' in lexunit:
lexunit['rels'] = sorted(lexunit['rels'])
germanet_db.lexunits.save(lexunit)
# cache the synsets while we work on them
synsets = {}
for con_rel in con_rels:
if con_rel['from'] not in synsets:
synsets[con_rel['from']] = germanet_db.synsets.find_one(
{'id': con_rel['from']})
from_synset = synsets[con_rel['from']]
if con_rel['to'] not in synsets:
synsets[con_rel['to']] = germanet_db.synsets.find_one(
{'id': con_rel['to']})
to_synset = synsets[con_rel['to']]
if 'rels' not in from_synset:
from_synset['rels'] = set()
from_synset['rels'].add((con_rel['name'], to_synset['_id']))
if con_rel['dir'] in ['both', 'revert']:
if 'rels' not in to_synset:
to_synset['rels'] = set()
to_synset['rels'].add((con_rel['inv'], from_synset['_id']))
for synset in synsets.values():
if 'rels' in synset:
synset['rels'] = sorted(synset['rels'])
germanet_db.synsets.save(synset)
print('Inserted {0} lexical relations, {1} synset relations.'.format(
len(lex_rels), len(con_rels)))
def insert_paraphrase_information(germanet_db, wiktionary_files):
'''
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `wiktionary_files`:
'''
num_paraphrases = 0
# cache the lexunits while we work on them
lexunits = {}
for filename in wiktionary_files:
paraphrases = read_paraphrase_file(filename)
num_paraphrases += len(paraphrases)
for paraphrase in paraphrases:
if paraphrase['lexUnitId'] not in lexunits:
lexunits[paraphrase['lexUnitId']] = \
germanet_db.lexunits.find_one(
{'id': paraphrase['lexUnitId']})
lexunit = lexunits[paraphrase['lexUnitId']]
if 'paraphrases' not in lexunit:
lexunit['paraphrases'] = []
lexunit['paraphrases'].append(paraphrase)
for lexunit in lexunits.values():
germanet_db.lexunits.save(lexunit)
print('Inserted {0} wiktionary paraphrases.'.format(num_paraphrases))
LEMMATISATION_FILE = 'baseforms_by_projekt_deutscher_wortschatz.txt.gz'
def insert_lemmatisation_data(germanet_db):
'''
Creates the lemmatiser collection in the given MongoDB instance
using the data derived from the Projekt deutscher Wortschatz.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
# drop the database collection if it already exists
germanet_db.lemmatiser.drop()
num_lemmas = 0
input_file = gzip.open(os.path.join(os.path.dirname(__file__),
LEMMATISATION_FILE))
for line in input_file:
line = line.decode('iso-8859-1').strip().split('\t')
assert len(line) == 2
germanet_db.lemmatiser.insert(dict(list(zip(('word', 'lemma'), line))))
num_lemmas += 1
input_file.close()
# index the collection on 'word'
germanet_db.lemmatiser.create_index('word')
print('Inserted {0} lemmatiser entries.'.format(num_lemmas))
# ------------------------------------------------------------
# Information content for GermaNet similarity
# ------------------------------------------------------------
WORD_COUNT_FILE = 'sdewac-gn-words.tsv.gz'
def insert_infocontent_data(germanet_db):
'''
For every synset in GermaNet, inserts count information derived
from SDEWAC.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
gnet = germanet.GermaNet(germanet_db)
# use add one smoothing
gn_counts = defaultdict(lambda: 1.)
total_count = 1
input_file = gzip.open(os.path.join(os.path.dirname(__file__),
WORD_COUNT_FILE))
num_lines_read = 0
num_lines = 0
for line in input_file:
line = line.decode('utf-8').strip().split('\t')
num_lines += 1
if len(line) != 3:
continue
count, pos, word = line
num_lines_read += 1
count = int(count)
synsets = set(gnet.synsets(word, pos))
if not synsets:
continue
# Although Resnik (1995) suggests dividing count by the number
# of synsets, Patwardhan et al (2003) argue against doing
# this.
count = float(count) / len(synsets)
for synset in synsets:
total_count += count
paths = synset.hypernym_paths
scount = float(count) / len(paths)
for path in paths:
for ss in path:
gn_counts[ss._id] += scount
print('Read {0} of {1} lines from count file.'.format(num_lines_read,
num_lines))
print('Recorded counts for {0} synsets.'.format(len(gn_counts)))
print('Total count is {0}'.format(total_count))
input_file.close()
# update all the synset records in GermaNet
num_updates = 0
for synset in germanet_db.synsets.find():
synset['infocont'] = gn_counts[synset['_id']] / total_count
germanet_db.synsets.save(synset)
num_updates += 1
print('Updated {0} synsets.'.format(num_updates))
def compute_max_min_depth(germanet_db):
'''
For every part of speech in GermaNet, computes the maximum
min_depth in that hierarchy.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
gnet = germanet.GermaNet(germanet_db)
max_min_depths = defaultdict(lambda: -1)
for synset in gnet.all_synsets():
min_depth = synset.min_depth
if max_min_depths[synset.category] < min_depth:
max_min_depths[synset.category] = min_depth
if germanet_db.metainfo.count() == 0:
germanet_db.metainfo.insert({})
metainfo = germanet_db.metainfo.find_one()
metainfo['max_min_depths'] = max_min_depths
germanet_db.metainfo.save(metainfo)
print('Computed maximum min_depth for all parts of speech:')
print(u', '.join(u'{0}: {1}'.format(k, v) for (k, v) in
sorted(max_min_depths.items())).encode('utf-8'))
# ------------------------------------------------------------
# Main function
# ------------------------------------------------------------
def main():
'''Main function.'''
usage = ('\n\n %prog [options] XML_PATH\n\nArguments:\n\n '
'XML_PATH the directory containing the '
'GermaNet .xml files')
parser = optparse.OptionParser(usage=usage)
parser.add_option('--host', default=None,
help='hostname or IP address of the MongoDB instance '
'where the GermaNet database will be inserted '
'(default: %default)')
parser.add_option('--port', type='int', default=None,
help='port number of the MongoDB instance where the '
'GermaNet database will be inserted (default: %default)')
parser.add_option('--database', dest='database_name', default='germanet',
help='the name of the database on the MongoDB instance '
'where GermaNet will be stored (default: %default)')
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
sys.exit(1)
xml_path = args[0]
client = MongoClient(options.host, options.port)
germanet_db = client[options.database_name]
lex_files, gn_rels_file, wiktionary_files, ili_files = \
find_germanet_xml_files(xml_path)
insert_lexical_information(germanet_db, lex_files)
insert_relation_information(germanet_db, gn_rels_file)
insert_paraphrase_information(germanet_db, wiktionary_files)
insert_lemmatisation_data(germanet_db)
insert_infocontent_data(germanet_db)
compute_max_min_depth(germanet_db)
client.close()
if __name__ == '__main__' and sys.argv != ['']:
main()
|
wroberts/pygermanet | pygermanet/mongo_import.py | read_paraphrase_file | python | def read_paraphrase_file(filename):
'''
Reads in a GermaNet wiktionary paraphrase file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`:
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
assert doc.getroot().tag == 'wiktionaryParaphrases'
paraphrases = []
for child in doc.getroot():
if child.tag == 'wiktionaryParaphrase':
paraphrase = child
warn_attribs('', paraphrase, PARAPHRASE_ATTRIBS)
if 0 < len(paraphrase):
print('unrecognised child of <wiktionaryParaphrase>',
list(paraphrase))
paraphrase_dict = dict(paraphrase.items())
if paraphrase_dict['edited'] not in MAP_YESNO_TO_BOOL:
print('<paraphrase> attribute "edited" has unexpected value',
paraphrase_dict['edited'])
else:
paraphrase_dict['edited'] = MAP_YESNO_TO_BOOL[
paraphrase_dict['edited']]
if not paraphrase_dict['wiktionarySenseId'].isdigit():
print('<paraphrase> attribute "wiktionarySenseId" has '
'non-integer value', paraphrase_dict['edited'])
else:
paraphrase_dict['wiktionarySenseId'] = \
int(paraphrase_dict['wiktionarySenseId'], 10)
paraphrases.append(paraphrase_dict)
else:
print('unknown child of <wiktionaryParaphrases>', child)
return paraphrases | Reads in a GermaNet wiktionary paraphrase file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`: | train | https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/mongo_import.py#L339-L376 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
mongo_import.py
(c) Will Roberts 21 March, 2014
A script to import the GermaNet lexicon into a MongoDB database.
'''
from __future__ import absolute_import, division, print_function
from . import germanet
from builtins import dict, int, str, zip
from collections import defaultdict
from io import open
from pymongo import DESCENDING, MongoClient
import glob
import gzip
import optparse
import os
import re
import sys
import xml.etree.ElementTree as etree
# ------------------------------------------------------------
# Find filenames
# ------------------------------------------------------------
def find_germanet_xml_files(xml_path):
'''
Globs the XML files contained in the given directory and sorts
them into sections for import into the MongoDB database.
Arguments:
- `xml_path`: the path to the directory containing the GermaNet
XML files
'''
xml_files = sorted(glob.glob(os.path.join(xml_path, '*.xml')))
# sort out the lexical files
lex_files = [xml_file for xml_file in xml_files if
re.match(r'(adj|nomen|verben)\.',
os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(lex_files))
if not lex_files:
print('ERROR: cannot find lexical information files')
# sort out the GermaNet relations file
gn_rels_file = [xml_file for xml_file in xml_files if
os.path.basename(xml_file).lower() == 'gn_relations.xml']
xml_files = sorted(set(xml_files) - set(gn_rels_file))
if not gn_rels_file:
print('ERROR: cannot find relations file gn_relations.xml')
gn_rels_file = None
else:
if 1 < len(gn_rels_file):
print ('WARNING: more than one relations file gn_relations.xml, '
'taking first match')
gn_rels_file = gn_rels_file[0]
# sort out the wiktionary paraphrase files
wiktionary_files = [xml_file for xml_file in xml_files if
re.match(r'wiktionaryparaphrases-',
os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(wiktionary_files))
if not wiktionary_files:
print('WARNING: cannot find wiktionary paraphrase files')
# sort out the interlingual index file
ili_files = [xml_file for xml_file in xml_files if
os.path.basename(xml_file).lower().startswith(
'interlingualindex')]
xml_files = sorted(set(xml_files) - set(ili_files))
if not ili_files:
print('WARNING: cannot find interlingual index file')
if xml_files:
print('WARNING: unrecognised xml files:', xml_files)
return lex_files, gn_rels_file, wiktionary_files, ili_files
# ------------------------------------------------------------
# Read lexical files
# ------------------------------------------------------------
def warn_attribs(loc,
node,
recognised_attribs,
reqd_attribs=None):
'''
Error checking of XML input: check that the given node has certain
required attributes, and does not have any unrecognised
attributes.
Arguments:
- `loc`: a string with some information about the location of the
error in the XML file
- `node`: the node to check
- `recognised_attribs`: a set of node attributes which we know how
to handle
- `reqd_attribs`: a set of node attributes which we require to be
present; if this argument is None, it will take the same value
as `recognised_attribs`
'''
if reqd_attribs is None:
reqd_attribs = recognised_attribs
found_attribs = set(node.keys())
if reqd_attribs - found_attribs:
print(loc, 'missing <{0}> attributes'.format(node.tag),
reqd_attribs - found_attribs)
if found_attribs - recognised_attribs:
print(loc, 'unrecognised <{0}> properties'.format(node.tag),
found_attribs - recognised_attribs)
SYNSET_ATTRIBS = set(['category', 'id', 'class'])
LEXUNIT_ATTRIBS = set(['styleMarking', 'namedEntity', 'artificial',
'source', 'sense', 'id'])
MODIFIER_ATTRIBS = set(['category', 'property'])
HEAD_ATTRIBS = set(['property'])
MAP_YESNO_TO_BOOL = {
'yes': True,
'no': False,
}
def read_lexical_file(filename):
'''
Reads in a GermaNet lexical information file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`: the name of the XML file to read
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
synsets = []
assert doc.getroot().tag == 'synsets'
for synset in doc.getroot():
if synset.tag != 'synset':
print('unrecognised child of <synsets>', synset)
continue
synset_dict = dict(synset.items())
synloc = '{0} synset {1},'.format(filename,
synset_dict.get('id', '???'))
warn_attribs(synloc, synset, SYNSET_ATTRIBS)
synset_dict['lexunits'] = []
synsets.append(synset_dict)
for child in synset:
if child.tag == 'lexUnit':
lexunit = child
lexunit_dict = dict(lexunit.items())
lexloc = synloc + ' lexUnit {0},'.format(
lexunit_dict.get('id', '???'))
warn_attribs(lexloc, lexunit, LEXUNIT_ATTRIBS)
# convert some properties to booleans
for key in ['styleMarking', 'artificial', 'namedEntity']:
if key in lexunit_dict:
if lexunit_dict[key] not in MAP_YESNO_TO_BOOL:
print(lexloc, ('lexunit property {0} has '
'non-boolean value').format(key),
lexunit_dict[key])
continue
lexunit_dict[key] = MAP_YESNO_TO_BOOL[lexunit_dict[key]]
# convert sense to integer number
if 'sense' in lexunit_dict:
if lexunit_dict['sense'].isdigit():
lexunit_dict['sense'] = int(lexunit_dict['sense'], 10)
else:
print(lexloc,
'lexunit property sense has non-numeric value',
lexunit_dict['sense'])
synset_dict['lexunits'].append(lexunit_dict)
lexunit_dict['examples'] = []
lexunit_dict['frames'] = []
for child in lexunit:
if child.tag in ['orthForm',
'orthVar',
'oldOrthForm',
'oldOrthVar']:
warn_attribs(lexloc, child, set())
if not child.text:
print(lexloc, '{0} with no text'.format(child.tag))
continue
if child.tag in lexunit_dict:
print(lexloc, 'more than one {0}'.format(child.tag))
lexunit_dict[child.tag] = str(child.text)
elif child.tag == 'example':
example = child
text = [child for child in example
if child.tag == 'text']
if len(text) != 1 or not text[0].text:
print(lexloc, '<example> tag without text')
example_dict = {'text': str(text[0].text)}
for child in example:
if child.tag == 'text':
continue
elif child.tag == 'exframe':
if 'exframe' in example_dict:
print(lexloc,
'more than one <exframe> '
'for <example>')
warn_attribs(lexloc, child, set())
if not child.text:
print(lexloc, '<exframe> with no text')
continue
example_dict['exframe'] = str(child.text)
else:
print(lexloc,
'unrecognised child of <example>',
child)
lexunit_dict['examples'].append(example_dict)
elif child.tag == 'frame':
frame = child
warn_attribs(lexloc, frame, set())
if 0 < len(frame):
print(lexloc, 'unrecognised <frame> children',
list(frame))
if not frame.text:
print(lexloc, '<frame> without text')
continue
lexunit_dict['frames'].append(str(frame.text))
elif child.tag == 'compound':
compound = child
warn_attribs(lexloc, compound, set())
compound_dict = {}
for child in compound:
if child.tag == 'modifier':
modifier_dict = dict(child.items())
warn_attribs(lexloc, child,
MODIFIER_ATTRIBS, set())
if not child.text:
print(lexloc, 'modifier without text')
continue
modifier_dict['text'] = str(child.text)
if 'modifier' not in compound_dict:
compound_dict['modifier'] = []
compound_dict['modifier'].append(modifier_dict)
elif child.tag == 'head':
head_dict = dict(child.items())
warn_attribs(lexloc, child, HEAD_ATTRIBS, set())
if not child.text:
print(lexloc, '<head> without text')
continue
head_dict['text'] = str(child.text)
if 'head' in compound_dict:
print(lexloc,
'more than one head in <compound>')
compound_dict['head'] = head_dict
else:
print(lexloc,
'unrecognised child of <compound>',
child)
continue
else:
print(lexloc, 'unrecognised child of <lexUnit>', child)
continue
elif child.tag == 'paraphrase':
paraphrase = child
warn_attribs(synloc, paraphrase, set())
paraphrase_text = str(paraphrase.text)
if not paraphrase_text:
print(synloc, 'WARNING: <paraphrase> tag with no text')
else:
print(synloc, 'unrecognised child of <synset>', child)
continue
return synsets
# ------------------------------------------------------------
# Read relation file
# ------------------------------------------------------------
RELATION_ATTRIBS_REQD = set(['dir', 'from', 'name', 'to'])
RELATION_ATTRIBS_OPT = set(['inv'])
RELATION_ATTRIBS = RELATION_ATTRIBS_REQD | RELATION_ATTRIBS_OPT
LEX_REL_DIRS = set(['both', 'one'])
CON_REL_DIRS = set(['both', 'revert', 'one'])
def read_relation_file(filename):
'''
Reads the GermaNet relation file ``gn_relations.xml`` which lists
all the relations holding between lexical units and synsets.
Arguments:
- `filename`:
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
lex_rels = []
con_rels = []
assert doc.getroot().tag == 'relations'
for child in doc.getroot():
if child.tag == 'lex_rel':
if 0 < len(child):
print('<lex_rel> has unexpected child node')
child_dict = dict(child.items())
warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD)
if child_dict['dir'] not in LEX_REL_DIRS:
print('unrecognized <lex_rel> dir', child_dict['dir'])
if child_dict['dir'] == 'both' and 'inv' not in child_dict:
print('<lex_rel> has dir=both but does not specify inv')
lex_rels.append(child_dict)
elif child.tag == 'con_rel':
if 0 < len(child):
print('<con_rel> has unexpected child node')
child_dict = dict(child.items())
warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD)
if child_dict['dir'] not in CON_REL_DIRS:
print('unrecognised <con_rel> dir', child_dict['dir'])
if (child_dict['dir'] in ['both', 'revert'] and
'inv' not in child_dict):
print('<con_rel> has dir={0} but does not specify inv'.format(
child_dict['dir']))
con_rels.append(child_dict)
else:
print('unrecognised child of <relations>', child)
continue
return lex_rels, con_rels
# ------------------------------------------------------------
# Read wiktionary paraphrase file
# ------------------------------------------------------------
PARAPHRASE_ATTRIBS = set(['edited', 'lexUnitId', 'wiktionaryId',
'wiktionarySense', 'wiktionarySenseId'])
# ------------------------------------------------------------
# Mongo insertion
# ------------------------------------------------------------
# we need to change the names of some synset keys because they are
# Python keywords
SYNSET_KEY_REWRITES = {
'class': 'gn_class',
}
def insert_lexical_information(germanet_db, lex_files):
'''
Reads in the given lexical information files and inserts their
contents into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `lex_files`: a list of paths to XML files containing lexial
information
'''
# drop the database collections if they already exist
germanet_db.lexunits.drop()
germanet_db.synsets.drop()
# inject data from XML files into the database
for lex_file in lex_files:
synsets = read_lexical_file(lex_file)
for synset in synsets:
synset = dict((SYNSET_KEY_REWRITES.get(key, key), value)
for (key, value) in synset.items())
lexunits = synset['lexunits']
synset['lexunits'] = germanet_db.lexunits.insert(lexunits)
synset_id = germanet_db.synsets.insert(synset)
for lexunit in lexunits:
lexunit['synset'] = synset_id
lexunit['category'] = synset['category']
germanet_db.lexunits.save(lexunit)
# index the two collections by id
germanet_db.synsets.create_index('id')
germanet_db.lexunits.create_index('id')
# also index lexunits by lemma, lemma-pos, and lemma-pos-sensenum
germanet_db.lexunits.create_index([('orthForm', DESCENDING)])
germanet_db.lexunits.create_index([('orthForm', DESCENDING),
('category', DESCENDING)])
germanet_db.lexunits.create_index([('orthForm', DESCENDING),
('category', DESCENDING),
('sense', DESCENDING)])
print('Inserted {0} synsets, {1} lexical units.'.format(
germanet_db.synsets.count(),
germanet_db.lexunits.count()))
def insert_relation_information(germanet_db, gn_rels_file):
'''
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `gn_rels_file`:
'''
lex_rels, con_rels = read_relation_file(gn_rels_file)
# cache the lexunits while we work on them
lexunits = {}
for lex_rel in lex_rels:
if lex_rel['from'] not in lexunits:
lexunits[lex_rel['from']] = germanet_db.lexunits.find_one(
{'id': lex_rel['from']})
from_lexunit = lexunits[lex_rel['from']]
if lex_rel['to'] not in lexunits:
lexunits[lex_rel['to']] = germanet_db.lexunits.find_one(
{'id': lex_rel['to']})
to_lexunit = lexunits[lex_rel['to']]
if 'rels' not in from_lexunit:
from_lexunit['rels'] = set()
from_lexunit['rels'].add((lex_rel['name'], to_lexunit['_id']))
if lex_rel['dir'] == 'both':
if 'rels' not in to_lexunit:
to_lexunit['rels'] = set()
to_lexunit['rels'].add((lex_rel['inv'], from_lexunit['_id']))
for lexunit in lexunits.values():
if 'rels' in lexunit:
lexunit['rels'] = sorted(lexunit['rels'])
germanet_db.lexunits.save(lexunit)
# cache the synsets while we work on them
synsets = {}
for con_rel in con_rels:
if con_rel['from'] not in synsets:
synsets[con_rel['from']] = germanet_db.synsets.find_one(
{'id': con_rel['from']})
from_synset = synsets[con_rel['from']]
if con_rel['to'] not in synsets:
synsets[con_rel['to']] = germanet_db.synsets.find_one(
{'id': con_rel['to']})
to_synset = synsets[con_rel['to']]
if 'rels' not in from_synset:
from_synset['rels'] = set()
from_synset['rels'].add((con_rel['name'], to_synset['_id']))
if con_rel['dir'] in ['both', 'revert']:
if 'rels' not in to_synset:
to_synset['rels'] = set()
to_synset['rels'].add((con_rel['inv'], from_synset['_id']))
for synset in synsets.values():
if 'rels' in synset:
synset['rels'] = sorted(synset['rels'])
germanet_db.synsets.save(synset)
print('Inserted {0} lexical relations, {1} synset relations.'.format(
len(lex_rels), len(con_rels)))
def insert_paraphrase_information(germanet_db, wiktionary_files):
'''
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `wiktionary_files`:
'''
num_paraphrases = 0
# cache the lexunits while we work on them
lexunits = {}
for filename in wiktionary_files:
paraphrases = read_paraphrase_file(filename)
num_paraphrases += len(paraphrases)
for paraphrase in paraphrases:
if paraphrase['lexUnitId'] not in lexunits:
lexunits[paraphrase['lexUnitId']] = \
germanet_db.lexunits.find_one(
{'id': paraphrase['lexUnitId']})
lexunit = lexunits[paraphrase['lexUnitId']]
if 'paraphrases' not in lexunit:
lexunit['paraphrases'] = []
lexunit['paraphrases'].append(paraphrase)
for lexunit in lexunits.values():
germanet_db.lexunits.save(lexunit)
print('Inserted {0} wiktionary paraphrases.'.format(num_paraphrases))
LEMMATISATION_FILE = 'baseforms_by_projekt_deutscher_wortschatz.txt.gz'
def insert_lemmatisation_data(germanet_db):
'''
Creates the lemmatiser collection in the given MongoDB instance
using the data derived from the Projekt deutscher Wortschatz.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
# drop the database collection if it already exists
germanet_db.lemmatiser.drop()
num_lemmas = 0
input_file = gzip.open(os.path.join(os.path.dirname(__file__),
LEMMATISATION_FILE))
for line in input_file:
line = line.decode('iso-8859-1').strip().split('\t')
assert len(line) == 2
germanet_db.lemmatiser.insert(dict(list(zip(('word', 'lemma'), line))))
num_lemmas += 1
input_file.close()
# index the collection on 'word'
germanet_db.lemmatiser.create_index('word')
print('Inserted {0} lemmatiser entries.'.format(num_lemmas))
# ------------------------------------------------------------
# Information content for GermaNet similarity
# ------------------------------------------------------------
WORD_COUNT_FILE = 'sdewac-gn-words.tsv.gz'
def insert_infocontent_data(germanet_db):
'''
For every synset in GermaNet, inserts count information derived
from SDEWAC.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
gnet = germanet.GermaNet(germanet_db)
# use add one smoothing
gn_counts = defaultdict(lambda: 1.)
total_count = 1
input_file = gzip.open(os.path.join(os.path.dirname(__file__),
WORD_COUNT_FILE))
num_lines_read = 0
num_lines = 0
for line in input_file:
line = line.decode('utf-8').strip().split('\t')
num_lines += 1
if len(line) != 3:
continue
count, pos, word = line
num_lines_read += 1
count = int(count)
synsets = set(gnet.synsets(word, pos))
if not synsets:
continue
# Although Resnik (1995) suggests dividing count by the number
# of synsets, Patwardhan et al (2003) argue against doing
# this.
count = float(count) / len(synsets)
for synset in synsets:
total_count += count
paths = synset.hypernym_paths
scount = float(count) / len(paths)
for path in paths:
for ss in path:
gn_counts[ss._id] += scount
print('Read {0} of {1} lines from count file.'.format(num_lines_read,
num_lines))
print('Recorded counts for {0} synsets.'.format(len(gn_counts)))
print('Total count is {0}'.format(total_count))
input_file.close()
# update all the synset records in GermaNet
num_updates = 0
for synset in germanet_db.synsets.find():
synset['infocont'] = gn_counts[synset['_id']] / total_count
germanet_db.synsets.save(synset)
num_updates += 1
print('Updated {0} synsets.'.format(num_updates))
def compute_max_min_depth(germanet_db):
'''
For every part of speech in GermaNet, computes the maximum
min_depth in that hierarchy.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
gnet = germanet.GermaNet(germanet_db)
max_min_depths = defaultdict(lambda: -1)
for synset in gnet.all_synsets():
min_depth = synset.min_depth
if max_min_depths[synset.category] < min_depth:
max_min_depths[synset.category] = min_depth
if germanet_db.metainfo.count() == 0:
germanet_db.metainfo.insert({})
metainfo = germanet_db.metainfo.find_one()
metainfo['max_min_depths'] = max_min_depths
germanet_db.metainfo.save(metainfo)
print('Computed maximum min_depth for all parts of speech:')
print(u', '.join(u'{0}: {1}'.format(k, v) for (k, v) in
sorted(max_min_depths.items())).encode('utf-8'))
# ------------------------------------------------------------
# Main function
# ------------------------------------------------------------
def main():
'''Main function.'''
usage = ('\n\n %prog [options] XML_PATH\n\nArguments:\n\n '
'XML_PATH the directory containing the '
'GermaNet .xml files')
parser = optparse.OptionParser(usage=usage)
parser.add_option('--host', default=None,
help='hostname or IP address of the MongoDB instance '
'where the GermaNet database will be inserted '
'(default: %default)')
parser.add_option('--port', type='int', default=None,
help='port number of the MongoDB instance where the '
'GermaNet database will be inserted (default: %default)')
parser.add_option('--database', dest='database_name', default='germanet',
help='the name of the database on the MongoDB instance '
'where GermaNet will be stored (default: %default)')
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
sys.exit(1)
xml_path = args[0]
client = MongoClient(options.host, options.port)
germanet_db = client[options.database_name]
lex_files, gn_rels_file, wiktionary_files, ili_files = \
find_germanet_xml_files(xml_path)
insert_lexical_information(germanet_db, lex_files)
insert_relation_information(germanet_db, gn_rels_file)
insert_paraphrase_information(germanet_db, wiktionary_files)
insert_lemmatisation_data(germanet_db)
insert_infocontent_data(germanet_db)
compute_max_min_depth(germanet_db)
client.close()
if __name__ == '__main__' and sys.argv != ['']:
main()
|
wroberts/pygermanet | pygermanet/mongo_import.py | insert_lexical_information | python | def insert_lexical_information(germanet_db, lex_files):
'''
Reads in the given lexical information files and inserts their
contents into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `lex_files`: a list of paths to XML files containing lexial
information
'''
# drop the database collections if they already exist
germanet_db.lexunits.drop()
germanet_db.synsets.drop()
# inject data from XML files into the database
for lex_file in lex_files:
synsets = read_lexical_file(lex_file)
for synset in synsets:
synset = dict((SYNSET_KEY_REWRITES.get(key, key), value)
for (key, value) in synset.items())
lexunits = synset['lexunits']
synset['lexunits'] = germanet_db.lexunits.insert(lexunits)
synset_id = germanet_db.synsets.insert(synset)
for lexunit in lexunits:
lexunit['synset'] = synset_id
lexunit['category'] = synset['category']
germanet_db.lexunits.save(lexunit)
# index the two collections by id
germanet_db.synsets.create_index('id')
germanet_db.lexunits.create_index('id')
# also index lexunits by lemma, lemma-pos, and lemma-pos-sensenum
germanet_db.lexunits.create_index([('orthForm', DESCENDING)])
germanet_db.lexunits.create_index([('orthForm', DESCENDING),
('category', DESCENDING)])
germanet_db.lexunits.create_index([('orthForm', DESCENDING),
('category', DESCENDING),
('sense', DESCENDING)])
print('Inserted {0} synsets, {1} lexical units.'.format(
germanet_db.synsets.count(),
germanet_db.lexunits.count())) | Reads in the given lexical information files and inserts their
contents into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `lex_files`: a list of paths to XML files containing lexial
information | train | https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/mongo_import.py#L389-L427 | [
"def read_lexical_file(filename):\n '''\n Reads in a GermaNet lexical information file and returns its\n contents as a list of dictionary structures.\n\n Arguments:\n - `filename`: the name of the XML file to read\n '''\n with open(filename, 'rb') as input_file:\n doc = etree.parse(input_file)\n\n synsets = []\n assert doc.getroot().tag == 'synsets'\n for synset in doc.getroot():\n if synset.tag != 'synset':\n print('unrecognised child of <synsets>', synset)\n continue\n synset_dict = dict(synset.items())\n synloc = '{0} synset {1},'.format(filename,\n synset_dict.get('id', '???'))\n warn_attribs(synloc, synset, SYNSET_ATTRIBS)\n synset_dict['lexunits'] = []\n synsets.append(synset_dict)\n\n for child in synset:\n if child.tag == 'lexUnit':\n lexunit = child\n lexunit_dict = dict(lexunit.items())\n lexloc = synloc + ' lexUnit {0},'.format(\n lexunit_dict.get('id', '???'))\n warn_attribs(lexloc, lexunit, LEXUNIT_ATTRIBS)\n # convert some properties to booleans\n for key in ['styleMarking', 'artificial', 'namedEntity']:\n if key in lexunit_dict:\n if lexunit_dict[key] not in MAP_YESNO_TO_BOOL:\n print(lexloc, ('lexunit property {0} has '\n 'non-boolean value').format(key),\n lexunit_dict[key])\n continue\n lexunit_dict[key] = MAP_YESNO_TO_BOOL[lexunit_dict[key]]\n # convert sense to integer number\n if 'sense' in lexunit_dict:\n if lexunit_dict['sense'].isdigit():\n lexunit_dict['sense'] = int(lexunit_dict['sense'], 10)\n else:\n print(lexloc,\n 'lexunit property sense has non-numeric value',\n lexunit_dict['sense'])\n synset_dict['lexunits'].append(lexunit_dict)\n lexunit_dict['examples'] = []\n lexunit_dict['frames'] = []\n for child in lexunit:\n if child.tag in ['orthForm',\n 'orthVar',\n 'oldOrthForm',\n 'oldOrthVar']:\n warn_attribs(lexloc, child, set())\n if not child.text:\n print(lexloc, '{0} with no text'.format(child.tag))\n continue\n if child.tag in lexunit_dict:\n print(lexloc, 'more than one {0}'.format(child.tag))\n lexunit_dict[child.tag] = str(child.text)\n elif child.tag == 'example':\n example = child\n text = [child for child in example\n if child.tag == 'text']\n if len(text) != 1 or not text[0].text:\n print(lexloc, '<example> tag without text')\n example_dict = {'text': str(text[0].text)}\n for child in example:\n if child.tag == 'text':\n continue\n elif child.tag == 'exframe':\n if 'exframe' in example_dict:\n print(lexloc,\n 'more than one <exframe> '\n 'for <example>')\n warn_attribs(lexloc, child, set())\n if not child.text:\n print(lexloc, '<exframe> with no text')\n continue\n example_dict['exframe'] = str(child.text)\n else:\n print(lexloc,\n 'unrecognised child of <example>',\n child)\n lexunit_dict['examples'].append(example_dict)\n elif child.tag == 'frame':\n frame = child\n warn_attribs(lexloc, frame, set())\n if 0 < len(frame):\n print(lexloc, 'unrecognised <frame> children',\n list(frame))\n if not frame.text:\n print(lexloc, '<frame> without text')\n continue\n lexunit_dict['frames'].append(str(frame.text))\n elif child.tag == 'compound':\n compound = child\n warn_attribs(lexloc, compound, set())\n compound_dict = {}\n for child in compound:\n if child.tag == 'modifier':\n modifier_dict = dict(child.items())\n warn_attribs(lexloc, child,\n MODIFIER_ATTRIBS, set())\n if not child.text:\n print(lexloc, 'modifier without text')\n continue\n modifier_dict['text'] = str(child.text)\n if 'modifier' not in compound_dict:\n compound_dict['modifier'] = []\n compound_dict['modifier'].append(modifier_dict)\n elif child.tag == 'head':\n head_dict = dict(child.items())\n warn_attribs(lexloc, child, HEAD_ATTRIBS, set())\n if not child.text:\n print(lexloc, '<head> without text')\n continue\n head_dict['text'] = str(child.text)\n if 'head' in compound_dict:\n print(lexloc,\n 'more than one head in <compound>')\n compound_dict['head'] = head_dict\n else:\n print(lexloc,\n 'unrecognised child of <compound>',\n child)\n continue\n else:\n print(lexloc, 'unrecognised child of <lexUnit>', child)\n continue\n elif child.tag == 'paraphrase':\n paraphrase = child\n warn_attribs(synloc, paraphrase, set())\n paraphrase_text = str(paraphrase.text)\n if not paraphrase_text:\n print(synloc, 'WARNING: <paraphrase> tag with no text')\n else:\n print(synloc, 'unrecognised child of <synset>', child)\n continue\n\n return synsets\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
mongo_import.py
(c) Will Roberts 21 March, 2014
A script to import the GermaNet lexicon into a MongoDB database.
'''
from __future__ import absolute_import, division, print_function
from . import germanet
from builtins import dict, int, str, zip
from collections import defaultdict
from io import open
from pymongo import DESCENDING, MongoClient
import glob
import gzip
import optparse
import os
import re
import sys
import xml.etree.ElementTree as etree
# ------------------------------------------------------------
# Find filenames
# ------------------------------------------------------------
def find_germanet_xml_files(xml_path):
'''
Globs the XML files contained in the given directory and sorts
them into sections for import into the MongoDB database.
Arguments:
- `xml_path`: the path to the directory containing the GermaNet
XML files
'''
xml_files = sorted(glob.glob(os.path.join(xml_path, '*.xml')))
# sort out the lexical files
lex_files = [xml_file for xml_file in xml_files if
re.match(r'(adj|nomen|verben)\.',
os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(lex_files))
if not lex_files:
print('ERROR: cannot find lexical information files')
# sort out the GermaNet relations file
gn_rels_file = [xml_file for xml_file in xml_files if
os.path.basename(xml_file).lower() == 'gn_relations.xml']
xml_files = sorted(set(xml_files) - set(gn_rels_file))
if not gn_rels_file:
print('ERROR: cannot find relations file gn_relations.xml')
gn_rels_file = None
else:
if 1 < len(gn_rels_file):
print ('WARNING: more than one relations file gn_relations.xml, '
'taking first match')
gn_rels_file = gn_rels_file[0]
# sort out the wiktionary paraphrase files
wiktionary_files = [xml_file for xml_file in xml_files if
re.match(r'wiktionaryparaphrases-',
os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(wiktionary_files))
if not wiktionary_files:
print('WARNING: cannot find wiktionary paraphrase files')
# sort out the interlingual index file
ili_files = [xml_file for xml_file in xml_files if
os.path.basename(xml_file).lower().startswith(
'interlingualindex')]
xml_files = sorted(set(xml_files) - set(ili_files))
if not ili_files:
print('WARNING: cannot find interlingual index file')
if xml_files:
print('WARNING: unrecognised xml files:', xml_files)
return lex_files, gn_rels_file, wiktionary_files, ili_files
# ------------------------------------------------------------
# Read lexical files
# ------------------------------------------------------------
def warn_attribs(loc,
node,
recognised_attribs,
reqd_attribs=None):
'''
Error checking of XML input: check that the given node has certain
required attributes, and does not have any unrecognised
attributes.
Arguments:
- `loc`: a string with some information about the location of the
error in the XML file
- `node`: the node to check
- `recognised_attribs`: a set of node attributes which we know how
to handle
- `reqd_attribs`: a set of node attributes which we require to be
present; if this argument is None, it will take the same value
as `recognised_attribs`
'''
if reqd_attribs is None:
reqd_attribs = recognised_attribs
found_attribs = set(node.keys())
if reqd_attribs - found_attribs:
print(loc, 'missing <{0}> attributes'.format(node.tag),
reqd_attribs - found_attribs)
if found_attribs - recognised_attribs:
print(loc, 'unrecognised <{0}> properties'.format(node.tag),
found_attribs - recognised_attribs)
SYNSET_ATTRIBS = set(['category', 'id', 'class'])
LEXUNIT_ATTRIBS = set(['styleMarking', 'namedEntity', 'artificial',
'source', 'sense', 'id'])
MODIFIER_ATTRIBS = set(['category', 'property'])
HEAD_ATTRIBS = set(['property'])
MAP_YESNO_TO_BOOL = {
'yes': True,
'no': False,
}
def read_lexical_file(filename):
'''
Reads in a GermaNet lexical information file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`: the name of the XML file to read
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
synsets = []
assert doc.getroot().tag == 'synsets'
for synset in doc.getroot():
if synset.tag != 'synset':
print('unrecognised child of <synsets>', synset)
continue
synset_dict = dict(synset.items())
synloc = '{0} synset {1},'.format(filename,
synset_dict.get('id', '???'))
warn_attribs(synloc, synset, SYNSET_ATTRIBS)
synset_dict['lexunits'] = []
synsets.append(synset_dict)
for child in synset:
if child.tag == 'lexUnit':
lexunit = child
lexunit_dict = dict(lexunit.items())
lexloc = synloc + ' lexUnit {0},'.format(
lexunit_dict.get('id', '???'))
warn_attribs(lexloc, lexunit, LEXUNIT_ATTRIBS)
# convert some properties to booleans
for key in ['styleMarking', 'artificial', 'namedEntity']:
if key in lexunit_dict:
if lexunit_dict[key] not in MAP_YESNO_TO_BOOL:
print(lexloc, ('lexunit property {0} has '
'non-boolean value').format(key),
lexunit_dict[key])
continue
lexunit_dict[key] = MAP_YESNO_TO_BOOL[lexunit_dict[key]]
# convert sense to integer number
if 'sense' in lexunit_dict:
if lexunit_dict['sense'].isdigit():
lexunit_dict['sense'] = int(lexunit_dict['sense'], 10)
else:
print(lexloc,
'lexunit property sense has non-numeric value',
lexunit_dict['sense'])
synset_dict['lexunits'].append(lexunit_dict)
lexunit_dict['examples'] = []
lexunit_dict['frames'] = []
for child in lexunit:
if child.tag in ['orthForm',
'orthVar',
'oldOrthForm',
'oldOrthVar']:
warn_attribs(lexloc, child, set())
if not child.text:
print(lexloc, '{0} with no text'.format(child.tag))
continue
if child.tag in lexunit_dict:
print(lexloc, 'more than one {0}'.format(child.tag))
lexunit_dict[child.tag] = str(child.text)
elif child.tag == 'example':
example = child
text = [child for child in example
if child.tag == 'text']
if len(text) != 1 or not text[0].text:
print(lexloc, '<example> tag without text')
example_dict = {'text': str(text[0].text)}
for child in example:
if child.tag == 'text':
continue
elif child.tag == 'exframe':
if 'exframe' in example_dict:
print(lexloc,
'more than one <exframe> '
'for <example>')
warn_attribs(lexloc, child, set())
if not child.text:
print(lexloc, '<exframe> with no text')
continue
example_dict['exframe'] = str(child.text)
else:
print(lexloc,
'unrecognised child of <example>',
child)
lexunit_dict['examples'].append(example_dict)
elif child.tag == 'frame':
frame = child
warn_attribs(lexloc, frame, set())
if 0 < len(frame):
print(lexloc, 'unrecognised <frame> children',
list(frame))
if not frame.text:
print(lexloc, '<frame> without text')
continue
lexunit_dict['frames'].append(str(frame.text))
elif child.tag == 'compound':
compound = child
warn_attribs(lexloc, compound, set())
compound_dict = {}
for child in compound:
if child.tag == 'modifier':
modifier_dict = dict(child.items())
warn_attribs(lexloc, child,
MODIFIER_ATTRIBS, set())
if not child.text:
print(lexloc, 'modifier without text')
continue
modifier_dict['text'] = str(child.text)
if 'modifier' not in compound_dict:
compound_dict['modifier'] = []
compound_dict['modifier'].append(modifier_dict)
elif child.tag == 'head':
head_dict = dict(child.items())
warn_attribs(lexloc, child, HEAD_ATTRIBS, set())
if not child.text:
print(lexloc, '<head> without text')
continue
head_dict['text'] = str(child.text)
if 'head' in compound_dict:
print(lexloc,
'more than one head in <compound>')
compound_dict['head'] = head_dict
else:
print(lexloc,
'unrecognised child of <compound>',
child)
continue
else:
print(lexloc, 'unrecognised child of <lexUnit>', child)
continue
elif child.tag == 'paraphrase':
paraphrase = child
warn_attribs(synloc, paraphrase, set())
paraphrase_text = str(paraphrase.text)
if not paraphrase_text:
print(synloc, 'WARNING: <paraphrase> tag with no text')
else:
print(synloc, 'unrecognised child of <synset>', child)
continue
return synsets
# ------------------------------------------------------------
# Read relation file
# ------------------------------------------------------------
RELATION_ATTRIBS_REQD = set(['dir', 'from', 'name', 'to'])
RELATION_ATTRIBS_OPT = set(['inv'])
RELATION_ATTRIBS = RELATION_ATTRIBS_REQD | RELATION_ATTRIBS_OPT
LEX_REL_DIRS = set(['both', 'one'])
CON_REL_DIRS = set(['both', 'revert', 'one'])
def read_relation_file(filename):
'''
Reads the GermaNet relation file ``gn_relations.xml`` which lists
all the relations holding between lexical units and synsets.
Arguments:
- `filename`:
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
lex_rels = []
con_rels = []
assert doc.getroot().tag == 'relations'
for child in doc.getroot():
if child.tag == 'lex_rel':
if 0 < len(child):
print('<lex_rel> has unexpected child node')
child_dict = dict(child.items())
warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD)
if child_dict['dir'] not in LEX_REL_DIRS:
print('unrecognized <lex_rel> dir', child_dict['dir'])
if child_dict['dir'] == 'both' and 'inv' not in child_dict:
print('<lex_rel> has dir=both but does not specify inv')
lex_rels.append(child_dict)
elif child.tag == 'con_rel':
if 0 < len(child):
print('<con_rel> has unexpected child node')
child_dict = dict(child.items())
warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD)
if child_dict['dir'] not in CON_REL_DIRS:
print('unrecognised <con_rel> dir', child_dict['dir'])
if (child_dict['dir'] in ['both', 'revert'] and
'inv' not in child_dict):
print('<con_rel> has dir={0} but does not specify inv'.format(
child_dict['dir']))
con_rels.append(child_dict)
else:
print('unrecognised child of <relations>', child)
continue
return lex_rels, con_rels
# ------------------------------------------------------------
# Read wiktionary paraphrase file
# ------------------------------------------------------------
PARAPHRASE_ATTRIBS = set(['edited', 'lexUnitId', 'wiktionaryId',
'wiktionarySense', 'wiktionarySenseId'])
def read_paraphrase_file(filename):
'''
Reads in a GermaNet wiktionary paraphrase file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`:
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
assert doc.getroot().tag == 'wiktionaryParaphrases'
paraphrases = []
for child in doc.getroot():
if child.tag == 'wiktionaryParaphrase':
paraphrase = child
warn_attribs('', paraphrase, PARAPHRASE_ATTRIBS)
if 0 < len(paraphrase):
print('unrecognised child of <wiktionaryParaphrase>',
list(paraphrase))
paraphrase_dict = dict(paraphrase.items())
if paraphrase_dict['edited'] not in MAP_YESNO_TO_BOOL:
print('<paraphrase> attribute "edited" has unexpected value',
paraphrase_dict['edited'])
else:
paraphrase_dict['edited'] = MAP_YESNO_TO_BOOL[
paraphrase_dict['edited']]
if not paraphrase_dict['wiktionarySenseId'].isdigit():
print('<paraphrase> attribute "wiktionarySenseId" has '
'non-integer value', paraphrase_dict['edited'])
else:
paraphrase_dict['wiktionarySenseId'] = \
int(paraphrase_dict['wiktionarySenseId'], 10)
paraphrases.append(paraphrase_dict)
else:
print('unknown child of <wiktionaryParaphrases>', child)
return paraphrases
# ------------------------------------------------------------
# Mongo insertion
# ------------------------------------------------------------
# we need to change the names of some synset keys because they are
# Python keywords
SYNSET_KEY_REWRITES = {
'class': 'gn_class',
}
def insert_relation_information(germanet_db, gn_rels_file):
'''
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `gn_rels_file`:
'''
lex_rels, con_rels = read_relation_file(gn_rels_file)
# cache the lexunits while we work on them
lexunits = {}
for lex_rel in lex_rels:
if lex_rel['from'] not in lexunits:
lexunits[lex_rel['from']] = germanet_db.lexunits.find_one(
{'id': lex_rel['from']})
from_lexunit = lexunits[lex_rel['from']]
if lex_rel['to'] not in lexunits:
lexunits[lex_rel['to']] = germanet_db.lexunits.find_one(
{'id': lex_rel['to']})
to_lexunit = lexunits[lex_rel['to']]
if 'rels' not in from_lexunit:
from_lexunit['rels'] = set()
from_lexunit['rels'].add((lex_rel['name'], to_lexunit['_id']))
if lex_rel['dir'] == 'both':
if 'rels' not in to_lexunit:
to_lexunit['rels'] = set()
to_lexunit['rels'].add((lex_rel['inv'], from_lexunit['_id']))
for lexunit in lexunits.values():
if 'rels' in lexunit:
lexunit['rels'] = sorted(lexunit['rels'])
germanet_db.lexunits.save(lexunit)
# cache the synsets while we work on them
synsets = {}
for con_rel in con_rels:
if con_rel['from'] not in synsets:
synsets[con_rel['from']] = germanet_db.synsets.find_one(
{'id': con_rel['from']})
from_synset = synsets[con_rel['from']]
if con_rel['to'] not in synsets:
synsets[con_rel['to']] = germanet_db.synsets.find_one(
{'id': con_rel['to']})
to_synset = synsets[con_rel['to']]
if 'rels' not in from_synset:
from_synset['rels'] = set()
from_synset['rels'].add((con_rel['name'], to_synset['_id']))
if con_rel['dir'] in ['both', 'revert']:
if 'rels' not in to_synset:
to_synset['rels'] = set()
to_synset['rels'].add((con_rel['inv'], from_synset['_id']))
for synset in synsets.values():
if 'rels' in synset:
synset['rels'] = sorted(synset['rels'])
germanet_db.synsets.save(synset)
print('Inserted {0} lexical relations, {1} synset relations.'.format(
len(lex_rels), len(con_rels)))
def insert_paraphrase_information(germanet_db, wiktionary_files):
'''
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `wiktionary_files`:
'''
num_paraphrases = 0
# cache the lexunits while we work on them
lexunits = {}
for filename in wiktionary_files:
paraphrases = read_paraphrase_file(filename)
num_paraphrases += len(paraphrases)
for paraphrase in paraphrases:
if paraphrase['lexUnitId'] not in lexunits:
lexunits[paraphrase['lexUnitId']] = \
germanet_db.lexunits.find_one(
{'id': paraphrase['lexUnitId']})
lexunit = lexunits[paraphrase['lexUnitId']]
if 'paraphrases' not in lexunit:
lexunit['paraphrases'] = []
lexunit['paraphrases'].append(paraphrase)
for lexunit in lexunits.values():
germanet_db.lexunits.save(lexunit)
print('Inserted {0} wiktionary paraphrases.'.format(num_paraphrases))
LEMMATISATION_FILE = 'baseforms_by_projekt_deutscher_wortschatz.txt.gz'
def insert_lemmatisation_data(germanet_db):
'''
Creates the lemmatiser collection in the given MongoDB instance
using the data derived from the Projekt deutscher Wortschatz.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
# drop the database collection if it already exists
germanet_db.lemmatiser.drop()
num_lemmas = 0
input_file = gzip.open(os.path.join(os.path.dirname(__file__),
LEMMATISATION_FILE))
for line in input_file:
line = line.decode('iso-8859-1').strip().split('\t')
assert len(line) == 2
germanet_db.lemmatiser.insert(dict(list(zip(('word', 'lemma'), line))))
num_lemmas += 1
input_file.close()
# index the collection on 'word'
germanet_db.lemmatiser.create_index('word')
print('Inserted {0} lemmatiser entries.'.format(num_lemmas))
# ------------------------------------------------------------
# Information content for GermaNet similarity
# ------------------------------------------------------------
WORD_COUNT_FILE = 'sdewac-gn-words.tsv.gz'
def insert_infocontent_data(germanet_db):
'''
For every synset in GermaNet, inserts count information derived
from SDEWAC.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
gnet = germanet.GermaNet(germanet_db)
# use add one smoothing
gn_counts = defaultdict(lambda: 1.)
total_count = 1
input_file = gzip.open(os.path.join(os.path.dirname(__file__),
WORD_COUNT_FILE))
num_lines_read = 0
num_lines = 0
for line in input_file:
line = line.decode('utf-8').strip().split('\t')
num_lines += 1
if len(line) != 3:
continue
count, pos, word = line
num_lines_read += 1
count = int(count)
synsets = set(gnet.synsets(word, pos))
if not synsets:
continue
# Although Resnik (1995) suggests dividing count by the number
# of synsets, Patwardhan et al (2003) argue against doing
# this.
count = float(count) / len(synsets)
for synset in synsets:
total_count += count
paths = synset.hypernym_paths
scount = float(count) / len(paths)
for path in paths:
for ss in path:
gn_counts[ss._id] += scount
print('Read {0} of {1} lines from count file.'.format(num_lines_read,
num_lines))
print('Recorded counts for {0} synsets.'.format(len(gn_counts)))
print('Total count is {0}'.format(total_count))
input_file.close()
# update all the synset records in GermaNet
num_updates = 0
for synset in germanet_db.synsets.find():
synset['infocont'] = gn_counts[synset['_id']] / total_count
germanet_db.synsets.save(synset)
num_updates += 1
print('Updated {0} synsets.'.format(num_updates))
def compute_max_min_depth(germanet_db):
'''
For every part of speech in GermaNet, computes the maximum
min_depth in that hierarchy.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
gnet = germanet.GermaNet(germanet_db)
max_min_depths = defaultdict(lambda: -1)
for synset in gnet.all_synsets():
min_depth = synset.min_depth
if max_min_depths[synset.category] < min_depth:
max_min_depths[synset.category] = min_depth
if germanet_db.metainfo.count() == 0:
germanet_db.metainfo.insert({})
metainfo = germanet_db.metainfo.find_one()
metainfo['max_min_depths'] = max_min_depths
germanet_db.metainfo.save(metainfo)
print('Computed maximum min_depth for all parts of speech:')
print(u', '.join(u'{0}: {1}'.format(k, v) for (k, v) in
sorted(max_min_depths.items())).encode('utf-8'))
# ------------------------------------------------------------
# Main function
# ------------------------------------------------------------
def main():
'''Main function.'''
usage = ('\n\n %prog [options] XML_PATH\n\nArguments:\n\n '
'XML_PATH the directory containing the '
'GermaNet .xml files')
parser = optparse.OptionParser(usage=usage)
parser.add_option('--host', default=None,
help='hostname or IP address of the MongoDB instance '
'where the GermaNet database will be inserted '
'(default: %default)')
parser.add_option('--port', type='int', default=None,
help='port number of the MongoDB instance where the '
'GermaNet database will be inserted (default: %default)')
parser.add_option('--database', dest='database_name', default='germanet',
help='the name of the database on the MongoDB instance '
'where GermaNet will be stored (default: %default)')
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
sys.exit(1)
xml_path = args[0]
client = MongoClient(options.host, options.port)
germanet_db = client[options.database_name]
lex_files, gn_rels_file, wiktionary_files, ili_files = \
find_germanet_xml_files(xml_path)
insert_lexical_information(germanet_db, lex_files)
insert_relation_information(germanet_db, gn_rels_file)
insert_paraphrase_information(germanet_db, wiktionary_files)
insert_lemmatisation_data(germanet_db)
insert_infocontent_data(germanet_db)
compute_max_min_depth(germanet_db)
client.close()
if __name__ == '__main__' and sys.argv != ['']:
main()
|
wroberts/pygermanet | pygermanet/mongo_import.py | insert_relation_information | python | def insert_relation_information(germanet_db, gn_rels_file):
'''
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `gn_rels_file`:
'''
lex_rels, con_rels = read_relation_file(gn_rels_file)
# cache the lexunits while we work on them
lexunits = {}
for lex_rel in lex_rels:
if lex_rel['from'] not in lexunits:
lexunits[lex_rel['from']] = germanet_db.lexunits.find_one(
{'id': lex_rel['from']})
from_lexunit = lexunits[lex_rel['from']]
if lex_rel['to'] not in lexunits:
lexunits[lex_rel['to']] = germanet_db.lexunits.find_one(
{'id': lex_rel['to']})
to_lexunit = lexunits[lex_rel['to']]
if 'rels' not in from_lexunit:
from_lexunit['rels'] = set()
from_lexunit['rels'].add((lex_rel['name'], to_lexunit['_id']))
if lex_rel['dir'] == 'both':
if 'rels' not in to_lexunit:
to_lexunit['rels'] = set()
to_lexunit['rels'].add((lex_rel['inv'], from_lexunit['_id']))
for lexunit in lexunits.values():
if 'rels' in lexunit:
lexunit['rels'] = sorted(lexunit['rels'])
germanet_db.lexunits.save(lexunit)
# cache the synsets while we work on them
synsets = {}
for con_rel in con_rels:
if con_rel['from'] not in synsets:
synsets[con_rel['from']] = germanet_db.synsets.find_one(
{'id': con_rel['from']})
from_synset = synsets[con_rel['from']]
if con_rel['to'] not in synsets:
synsets[con_rel['to']] = germanet_db.synsets.find_one(
{'id': con_rel['to']})
to_synset = synsets[con_rel['to']]
if 'rels' not in from_synset:
from_synset['rels'] = set()
from_synset['rels'].add((con_rel['name'], to_synset['_id']))
if con_rel['dir'] in ['both', 'revert']:
if 'rels' not in to_synset:
to_synset['rels'] = set()
to_synset['rels'].add((con_rel['inv'], from_synset['_id']))
for synset in synsets.values():
if 'rels' in synset:
synset['rels'] = sorted(synset['rels'])
germanet_db.synsets.save(synset)
print('Inserted {0} lexical relations, {1} synset relations.'.format(
len(lex_rels), len(con_rels))) | Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `gn_rels_file`: | train | https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/mongo_import.py#L429-L487 | [
"def read_relation_file(filename):\n '''\n Reads the GermaNet relation file ``gn_relations.xml`` which lists\n all the relations holding between lexical units and synsets.\n\n Arguments:\n - `filename`:\n '''\n with open(filename, 'rb') as input_file:\n doc = etree.parse(input_file)\n\n lex_rels = []\n con_rels = []\n assert doc.getroot().tag == 'relations'\n for child in doc.getroot():\n if child.tag == 'lex_rel':\n if 0 < len(child):\n print('<lex_rel> has unexpected child node')\n child_dict = dict(child.items())\n warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD)\n if child_dict['dir'] not in LEX_REL_DIRS:\n print('unrecognized <lex_rel> dir', child_dict['dir'])\n if child_dict['dir'] == 'both' and 'inv' not in child_dict:\n print('<lex_rel> has dir=both but does not specify inv')\n lex_rels.append(child_dict)\n elif child.tag == 'con_rel':\n if 0 < len(child):\n print('<con_rel> has unexpected child node')\n child_dict = dict(child.items())\n warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD)\n if child_dict['dir'] not in CON_REL_DIRS:\n print('unrecognised <con_rel> dir', child_dict['dir'])\n if (child_dict['dir'] in ['both', 'revert'] and\n 'inv' not in child_dict):\n print('<con_rel> has dir={0} but does not specify inv'.format(\n child_dict['dir']))\n con_rels.append(child_dict)\n else:\n print('unrecognised child of <relations>', child)\n continue\n\n return lex_rels, con_rels\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
mongo_import.py
(c) Will Roberts 21 March, 2014
A script to import the GermaNet lexicon into a MongoDB database.
'''
from __future__ import absolute_import, division, print_function
from . import germanet
from builtins import dict, int, str, zip
from collections import defaultdict
from io import open
from pymongo import DESCENDING, MongoClient
import glob
import gzip
import optparse
import os
import re
import sys
import xml.etree.ElementTree as etree
# ------------------------------------------------------------
# Find filenames
# ------------------------------------------------------------
def find_germanet_xml_files(xml_path):
'''
Globs the XML files contained in the given directory and sorts
them into sections for import into the MongoDB database.
Arguments:
- `xml_path`: the path to the directory containing the GermaNet
XML files
'''
xml_files = sorted(glob.glob(os.path.join(xml_path, '*.xml')))
# sort out the lexical files
lex_files = [xml_file for xml_file in xml_files if
re.match(r'(adj|nomen|verben)\.',
os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(lex_files))
if not lex_files:
print('ERROR: cannot find lexical information files')
# sort out the GermaNet relations file
gn_rels_file = [xml_file for xml_file in xml_files if
os.path.basename(xml_file).lower() == 'gn_relations.xml']
xml_files = sorted(set(xml_files) - set(gn_rels_file))
if not gn_rels_file:
print('ERROR: cannot find relations file gn_relations.xml')
gn_rels_file = None
else:
if 1 < len(gn_rels_file):
print ('WARNING: more than one relations file gn_relations.xml, '
'taking first match')
gn_rels_file = gn_rels_file[0]
# sort out the wiktionary paraphrase files
wiktionary_files = [xml_file for xml_file in xml_files if
re.match(r'wiktionaryparaphrases-',
os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(wiktionary_files))
if not wiktionary_files:
print('WARNING: cannot find wiktionary paraphrase files')
# sort out the interlingual index file
ili_files = [xml_file for xml_file in xml_files if
os.path.basename(xml_file).lower().startswith(
'interlingualindex')]
xml_files = sorted(set(xml_files) - set(ili_files))
if not ili_files:
print('WARNING: cannot find interlingual index file')
if xml_files:
print('WARNING: unrecognised xml files:', xml_files)
return lex_files, gn_rels_file, wiktionary_files, ili_files
# ------------------------------------------------------------
# Read lexical files
# ------------------------------------------------------------
def warn_attribs(loc,
node,
recognised_attribs,
reqd_attribs=None):
'''
Error checking of XML input: check that the given node has certain
required attributes, and does not have any unrecognised
attributes.
Arguments:
- `loc`: a string with some information about the location of the
error in the XML file
- `node`: the node to check
- `recognised_attribs`: a set of node attributes which we know how
to handle
- `reqd_attribs`: a set of node attributes which we require to be
present; if this argument is None, it will take the same value
as `recognised_attribs`
'''
if reqd_attribs is None:
reqd_attribs = recognised_attribs
found_attribs = set(node.keys())
if reqd_attribs - found_attribs:
print(loc, 'missing <{0}> attributes'.format(node.tag),
reqd_attribs - found_attribs)
if found_attribs - recognised_attribs:
print(loc, 'unrecognised <{0}> properties'.format(node.tag),
found_attribs - recognised_attribs)
SYNSET_ATTRIBS = set(['category', 'id', 'class'])
LEXUNIT_ATTRIBS = set(['styleMarking', 'namedEntity', 'artificial',
'source', 'sense', 'id'])
MODIFIER_ATTRIBS = set(['category', 'property'])
HEAD_ATTRIBS = set(['property'])
MAP_YESNO_TO_BOOL = {
'yes': True,
'no': False,
}
def read_lexical_file(filename):
'''
Reads in a GermaNet lexical information file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`: the name of the XML file to read
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
synsets = []
assert doc.getroot().tag == 'synsets'
for synset in doc.getroot():
if synset.tag != 'synset':
print('unrecognised child of <synsets>', synset)
continue
synset_dict = dict(synset.items())
synloc = '{0} synset {1},'.format(filename,
synset_dict.get('id', '???'))
warn_attribs(synloc, synset, SYNSET_ATTRIBS)
synset_dict['lexunits'] = []
synsets.append(synset_dict)
for child in synset:
if child.tag == 'lexUnit':
lexunit = child
lexunit_dict = dict(lexunit.items())
lexloc = synloc + ' lexUnit {0},'.format(
lexunit_dict.get('id', '???'))
warn_attribs(lexloc, lexunit, LEXUNIT_ATTRIBS)
# convert some properties to booleans
for key in ['styleMarking', 'artificial', 'namedEntity']:
if key in lexunit_dict:
if lexunit_dict[key] not in MAP_YESNO_TO_BOOL:
print(lexloc, ('lexunit property {0} has '
'non-boolean value').format(key),
lexunit_dict[key])
continue
lexunit_dict[key] = MAP_YESNO_TO_BOOL[lexunit_dict[key]]
# convert sense to integer number
if 'sense' in lexunit_dict:
if lexunit_dict['sense'].isdigit():
lexunit_dict['sense'] = int(lexunit_dict['sense'], 10)
else:
print(lexloc,
'lexunit property sense has non-numeric value',
lexunit_dict['sense'])
synset_dict['lexunits'].append(lexunit_dict)
lexunit_dict['examples'] = []
lexunit_dict['frames'] = []
for child in lexunit:
if child.tag in ['orthForm',
'orthVar',
'oldOrthForm',
'oldOrthVar']:
warn_attribs(lexloc, child, set())
if not child.text:
print(lexloc, '{0} with no text'.format(child.tag))
continue
if child.tag in lexunit_dict:
print(lexloc, 'more than one {0}'.format(child.tag))
lexunit_dict[child.tag] = str(child.text)
elif child.tag == 'example':
example = child
text = [child for child in example
if child.tag == 'text']
if len(text) != 1 or not text[0].text:
print(lexloc, '<example> tag without text')
example_dict = {'text': str(text[0].text)}
for child in example:
if child.tag == 'text':
continue
elif child.tag == 'exframe':
if 'exframe' in example_dict:
print(lexloc,
'more than one <exframe> '
'for <example>')
warn_attribs(lexloc, child, set())
if not child.text:
print(lexloc, '<exframe> with no text')
continue
example_dict['exframe'] = str(child.text)
else:
print(lexloc,
'unrecognised child of <example>',
child)
lexunit_dict['examples'].append(example_dict)
elif child.tag == 'frame':
frame = child
warn_attribs(lexloc, frame, set())
if 0 < len(frame):
print(lexloc, 'unrecognised <frame> children',
list(frame))
if not frame.text:
print(lexloc, '<frame> without text')
continue
lexunit_dict['frames'].append(str(frame.text))
elif child.tag == 'compound':
compound = child
warn_attribs(lexloc, compound, set())
compound_dict = {}
for child in compound:
if child.tag == 'modifier':
modifier_dict = dict(child.items())
warn_attribs(lexloc, child,
MODIFIER_ATTRIBS, set())
if not child.text:
print(lexloc, 'modifier without text')
continue
modifier_dict['text'] = str(child.text)
if 'modifier' not in compound_dict:
compound_dict['modifier'] = []
compound_dict['modifier'].append(modifier_dict)
elif child.tag == 'head':
head_dict = dict(child.items())
warn_attribs(lexloc, child, HEAD_ATTRIBS, set())
if not child.text:
print(lexloc, '<head> without text')
continue
head_dict['text'] = str(child.text)
if 'head' in compound_dict:
print(lexloc,
'more than one head in <compound>')
compound_dict['head'] = head_dict
else:
print(lexloc,
'unrecognised child of <compound>',
child)
continue
else:
print(lexloc, 'unrecognised child of <lexUnit>', child)
continue
elif child.tag == 'paraphrase':
paraphrase = child
warn_attribs(synloc, paraphrase, set())
paraphrase_text = str(paraphrase.text)
if not paraphrase_text:
print(synloc, 'WARNING: <paraphrase> tag with no text')
else:
print(synloc, 'unrecognised child of <synset>', child)
continue
return synsets
# ------------------------------------------------------------
# Read relation file
# ------------------------------------------------------------
RELATION_ATTRIBS_REQD = set(['dir', 'from', 'name', 'to'])
RELATION_ATTRIBS_OPT = set(['inv'])
RELATION_ATTRIBS = RELATION_ATTRIBS_REQD | RELATION_ATTRIBS_OPT
LEX_REL_DIRS = set(['both', 'one'])
CON_REL_DIRS = set(['both', 'revert', 'one'])
def read_relation_file(filename):
'''
Reads the GermaNet relation file ``gn_relations.xml`` which lists
all the relations holding between lexical units and synsets.
Arguments:
- `filename`:
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
lex_rels = []
con_rels = []
assert doc.getroot().tag == 'relations'
for child in doc.getroot():
if child.tag == 'lex_rel':
if 0 < len(child):
print('<lex_rel> has unexpected child node')
child_dict = dict(child.items())
warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD)
if child_dict['dir'] not in LEX_REL_DIRS:
print('unrecognized <lex_rel> dir', child_dict['dir'])
if child_dict['dir'] == 'both' and 'inv' not in child_dict:
print('<lex_rel> has dir=both but does not specify inv')
lex_rels.append(child_dict)
elif child.tag == 'con_rel':
if 0 < len(child):
print('<con_rel> has unexpected child node')
child_dict = dict(child.items())
warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD)
if child_dict['dir'] not in CON_REL_DIRS:
print('unrecognised <con_rel> dir', child_dict['dir'])
if (child_dict['dir'] in ['both', 'revert'] and
'inv' not in child_dict):
print('<con_rel> has dir={0} but does not specify inv'.format(
child_dict['dir']))
con_rels.append(child_dict)
else:
print('unrecognised child of <relations>', child)
continue
return lex_rels, con_rels
# ------------------------------------------------------------
# Read wiktionary paraphrase file
# ------------------------------------------------------------
PARAPHRASE_ATTRIBS = set(['edited', 'lexUnitId', 'wiktionaryId',
'wiktionarySense', 'wiktionarySenseId'])
def read_paraphrase_file(filename):
'''
Reads in a GermaNet wiktionary paraphrase file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`:
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
assert doc.getroot().tag == 'wiktionaryParaphrases'
paraphrases = []
for child in doc.getroot():
if child.tag == 'wiktionaryParaphrase':
paraphrase = child
warn_attribs('', paraphrase, PARAPHRASE_ATTRIBS)
if 0 < len(paraphrase):
print('unrecognised child of <wiktionaryParaphrase>',
list(paraphrase))
paraphrase_dict = dict(paraphrase.items())
if paraphrase_dict['edited'] not in MAP_YESNO_TO_BOOL:
print('<paraphrase> attribute "edited" has unexpected value',
paraphrase_dict['edited'])
else:
paraphrase_dict['edited'] = MAP_YESNO_TO_BOOL[
paraphrase_dict['edited']]
if not paraphrase_dict['wiktionarySenseId'].isdigit():
print('<paraphrase> attribute "wiktionarySenseId" has '
'non-integer value', paraphrase_dict['edited'])
else:
paraphrase_dict['wiktionarySenseId'] = \
int(paraphrase_dict['wiktionarySenseId'], 10)
paraphrases.append(paraphrase_dict)
else:
print('unknown child of <wiktionaryParaphrases>', child)
return paraphrases
# ------------------------------------------------------------
# Mongo insertion
# ------------------------------------------------------------
# we need to change the names of some synset keys because they are
# Python keywords
SYNSET_KEY_REWRITES = {
'class': 'gn_class',
}
def insert_lexical_information(germanet_db, lex_files):
'''
Reads in the given lexical information files and inserts their
contents into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `lex_files`: a list of paths to XML files containing lexial
information
'''
# drop the database collections if they already exist
germanet_db.lexunits.drop()
germanet_db.synsets.drop()
# inject data from XML files into the database
for lex_file in lex_files:
synsets = read_lexical_file(lex_file)
for synset in synsets:
synset = dict((SYNSET_KEY_REWRITES.get(key, key), value)
for (key, value) in synset.items())
lexunits = synset['lexunits']
synset['lexunits'] = germanet_db.lexunits.insert(lexunits)
synset_id = germanet_db.synsets.insert(synset)
for lexunit in lexunits:
lexunit['synset'] = synset_id
lexunit['category'] = synset['category']
germanet_db.lexunits.save(lexunit)
# index the two collections by id
germanet_db.synsets.create_index('id')
germanet_db.lexunits.create_index('id')
# also index lexunits by lemma, lemma-pos, and lemma-pos-sensenum
germanet_db.lexunits.create_index([('orthForm', DESCENDING)])
germanet_db.lexunits.create_index([('orthForm', DESCENDING),
('category', DESCENDING)])
germanet_db.lexunits.create_index([('orthForm', DESCENDING),
('category', DESCENDING),
('sense', DESCENDING)])
print('Inserted {0} synsets, {1} lexical units.'.format(
germanet_db.synsets.count(),
germanet_db.lexunits.count()))
def insert_paraphrase_information(germanet_db, wiktionary_files):
'''
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `wiktionary_files`:
'''
num_paraphrases = 0
# cache the lexunits while we work on them
lexunits = {}
for filename in wiktionary_files:
paraphrases = read_paraphrase_file(filename)
num_paraphrases += len(paraphrases)
for paraphrase in paraphrases:
if paraphrase['lexUnitId'] not in lexunits:
lexunits[paraphrase['lexUnitId']] = \
germanet_db.lexunits.find_one(
{'id': paraphrase['lexUnitId']})
lexunit = lexunits[paraphrase['lexUnitId']]
if 'paraphrases' not in lexunit:
lexunit['paraphrases'] = []
lexunit['paraphrases'].append(paraphrase)
for lexunit in lexunits.values():
germanet_db.lexunits.save(lexunit)
print('Inserted {0} wiktionary paraphrases.'.format(num_paraphrases))
LEMMATISATION_FILE = 'baseforms_by_projekt_deutscher_wortschatz.txt.gz'
def insert_lemmatisation_data(germanet_db):
'''
Creates the lemmatiser collection in the given MongoDB instance
using the data derived from the Projekt deutscher Wortschatz.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
# drop the database collection if it already exists
germanet_db.lemmatiser.drop()
num_lemmas = 0
input_file = gzip.open(os.path.join(os.path.dirname(__file__),
LEMMATISATION_FILE))
for line in input_file:
line = line.decode('iso-8859-1').strip().split('\t')
assert len(line) == 2
germanet_db.lemmatiser.insert(dict(list(zip(('word', 'lemma'), line))))
num_lemmas += 1
input_file.close()
# index the collection on 'word'
germanet_db.lemmatiser.create_index('word')
print('Inserted {0} lemmatiser entries.'.format(num_lemmas))
# ------------------------------------------------------------
# Information content for GermaNet similarity
# ------------------------------------------------------------
WORD_COUNT_FILE = 'sdewac-gn-words.tsv.gz'
def insert_infocontent_data(germanet_db):
'''
For every synset in GermaNet, inserts count information derived
from SDEWAC.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
gnet = germanet.GermaNet(germanet_db)
# use add one smoothing
gn_counts = defaultdict(lambda: 1.)
total_count = 1
input_file = gzip.open(os.path.join(os.path.dirname(__file__),
WORD_COUNT_FILE))
num_lines_read = 0
num_lines = 0
for line in input_file:
line = line.decode('utf-8').strip().split('\t')
num_lines += 1
if len(line) != 3:
continue
count, pos, word = line
num_lines_read += 1
count = int(count)
synsets = set(gnet.synsets(word, pos))
if not synsets:
continue
# Although Resnik (1995) suggests dividing count by the number
# of synsets, Patwardhan et al (2003) argue against doing
# this.
count = float(count) / len(synsets)
for synset in synsets:
total_count += count
paths = synset.hypernym_paths
scount = float(count) / len(paths)
for path in paths:
for ss in path:
gn_counts[ss._id] += scount
print('Read {0} of {1} lines from count file.'.format(num_lines_read,
num_lines))
print('Recorded counts for {0} synsets.'.format(len(gn_counts)))
print('Total count is {0}'.format(total_count))
input_file.close()
# update all the synset records in GermaNet
num_updates = 0
for synset in germanet_db.synsets.find():
synset['infocont'] = gn_counts[synset['_id']] / total_count
germanet_db.synsets.save(synset)
num_updates += 1
print('Updated {0} synsets.'.format(num_updates))
def compute_max_min_depth(germanet_db):
'''
For every part of speech in GermaNet, computes the maximum
min_depth in that hierarchy.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
gnet = germanet.GermaNet(germanet_db)
max_min_depths = defaultdict(lambda: -1)
for synset in gnet.all_synsets():
min_depth = synset.min_depth
if max_min_depths[synset.category] < min_depth:
max_min_depths[synset.category] = min_depth
if germanet_db.metainfo.count() == 0:
germanet_db.metainfo.insert({})
metainfo = germanet_db.metainfo.find_one()
metainfo['max_min_depths'] = max_min_depths
germanet_db.metainfo.save(metainfo)
print('Computed maximum min_depth for all parts of speech:')
print(u', '.join(u'{0}: {1}'.format(k, v) for (k, v) in
sorted(max_min_depths.items())).encode('utf-8'))
# ------------------------------------------------------------
# Main function
# ------------------------------------------------------------
def main():
'''Main function.'''
usage = ('\n\n %prog [options] XML_PATH\n\nArguments:\n\n '
'XML_PATH the directory containing the '
'GermaNet .xml files')
parser = optparse.OptionParser(usage=usage)
parser.add_option('--host', default=None,
help='hostname or IP address of the MongoDB instance '
'where the GermaNet database will be inserted '
'(default: %default)')
parser.add_option('--port', type='int', default=None,
help='port number of the MongoDB instance where the '
'GermaNet database will be inserted (default: %default)')
parser.add_option('--database', dest='database_name', default='germanet',
help='the name of the database on the MongoDB instance '
'where GermaNet will be stored (default: %default)')
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
sys.exit(1)
xml_path = args[0]
client = MongoClient(options.host, options.port)
germanet_db = client[options.database_name]
lex_files, gn_rels_file, wiktionary_files, ili_files = \
find_germanet_xml_files(xml_path)
insert_lexical_information(germanet_db, lex_files)
insert_relation_information(germanet_db, gn_rels_file)
insert_paraphrase_information(germanet_db, wiktionary_files)
insert_lemmatisation_data(germanet_db)
insert_infocontent_data(germanet_db)
compute_max_min_depth(germanet_db)
client.close()
if __name__ == '__main__' and sys.argv != ['']:
main()
|
wroberts/pygermanet | pygermanet/mongo_import.py | insert_paraphrase_information | python | def insert_paraphrase_information(germanet_db, wiktionary_files):
'''
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `wiktionary_files`:
'''
num_paraphrases = 0
# cache the lexunits while we work on them
lexunits = {}
for filename in wiktionary_files:
paraphrases = read_paraphrase_file(filename)
num_paraphrases += len(paraphrases)
for paraphrase in paraphrases:
if paraphrase['lexUnitId'] not in lexunits:
lexunits[paraphrase['lexUnitId']] = \
germanet_db.lexunits.find_one(
{'id': paraphrase['lexUnitId']})
lexunit = lexunits[paraphrase['lexUnitId']]
if 'paraphrases' not in lexunit:
lexunit['paraphrases'] = []
lexunit['paraphrases'].append(paraphrase)
for lexunit in lexunits.values():
germanet_db.lexunits.save(lexunit)
print('Inserted {0} wiktionary paraphrases.'.format(num_paraphrases)) | Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `wiktionary_files`: | train | https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/mongo_import.py#L489-L516 | [
"def read_paraphrase_file(filename):\n '''\n Reads in a GermaNet wiktionary paraphrase file and returns its\n contents as a list of dictionary structures.\n\n Arguments:\n - `filename`:\n '''\n with open(filename, 'rb') as input_file:\n doc = etree.parse(input_file)\n\n assert doc.getroot().tag == 'wiktionaryParaphrases'\n paraphrases = []\n for child in doc.getroot():\n if child.tag == 'wiktionaryParaphrase':\n paraphrase = child\n warn_attribs('', paraphrase, PARAPHRASE_ATTRIBS)\n if 0 < len(paraphrase):\n print('unrecognised child of <wiktionaryParaphrase>',\n list(paraphrase))\n paraphrase_dict = dict(paraphrase.items())\n if paraphrase_dict['edited'] not in MAP_YESNO_TO_BOOL:\n print('<paraphrase> attribute \"edited\" has unexpected value',\n paraphrase_dict['edited'])\n else:\n paraphrase_dict['edited'] = MAP_YESNO_TO_BOOL[\n paraphrase_dict['edited']]\n if not paraphrase_dict['wiktionarySenseId'].isdigit():\n print('<paraphrase> attribute \"wiktionarySenseId\" has '\n 'non-integer value', paraphrase_dict['edited'])\n else:\n paraphrase_dict['wiktionarySenseId'] = \\\n int(paraphrase_dict['wiktionarySenseId'], 10)\n paraphrases.append(paraphrase_dict)\n else:\n print('unknown child of <wiktionaryParaphrases>', child)\n\n return paraphrases\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
mongo_import.py
(c) Will Roberts 21 March, 2014
A script to import the GermaNet lexicon into a MongoDB database.
'''
from __future__ import absolute_import, division, print_function
from . import germanet
from builtins import dict, int, str, zip
from collections import defaultdict
from io import open
from pymongo import DESCENDING, MongoClient
import glob
import gzip
import optparse
import os
import re
import sys
import xml.etree.ElementTree as etree
# ------------------------------------------------------------
# Find filenames
# ------------------------------------------------------------
def find_germanet_xml_files(xml_path):
'''
Globs the XML files contained in the given directory and sorts
them into sections for import into the MongoDB database.
Arguments:
- `xml_path`: the path to the directory containing the GermaNet
XML files
'''
xml_files = sorted(glob.glob(os.path.join(xml_path, '*.xml')))
# sort out the lexical files
lex_files = [xml_file for xml_file in xml_files if
re.match(r'(adj|nomen|verben)\.',
os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(lex_files))
if not lex_files:
print('ERROR: cannot find lexical information files')
# sort out the GermaNet relations file
gn_rels_file = [xml_file for xml_file in xml_files if
os.path.basename(xml_file).lower() == 'gn_relations.xml']
xml_files = sorted(set(xml_files) - set(gn_rels_file))
if not gn_rels_file:
print('ERROR: cannot find relations file gn_relations.xml')
gn_rels_file = None
else:
if 1 < len(gn_rels_file):
print ('WARNING: more than one relations file gn_relations.xml, '
'taking first match')
gn_rels_file = gn_rels_file[0]
# sort out the wiktionary paraphrase files
wiktionary_files = [xml_file for xml_file in xml_files if
re.match(r'wiktionaryparaphrases-',
os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(wiktionary_files))
if not wiktionary_files:
print('WARNING: cannot find wiktionary paraphrase files')
# sort out the interlingual index file
ili_files = [xml_file for xml_file in xml_files if
os.path.basename(xml_file).lower().startswith(
'interlingualindex')]
xml_files = sorted(set(xml_files) - set(ili_files))
if not ili_files:
print('WARNING: cannot find interlingual index file')
if xml_files:
print('WARNING: unrecognised xml files:', xml_files)
return lex_files, gn_rels_file, wiktionary_files, ili_files
# ------------------------------------------------------------
# Read lexical files
# ------------------------------------------------------------
def warn_attribs(loc,
node,
recognised_attribs,
reqd_attribs=None):
'''
Error checking of XML input: check that the given node has certain
required attributes, and does not have any unrecognised
attributes.
Arguments:
- `loc`: a string with some information about the location of the
error in the XML file
- `node`: the node to check
- `recognised_attribs`: a set of node attributes which we know how
to handle
- `reqd_attribs`: a set of node attributes which we require to be
present; if this argument is None, it will take the same value
as `recognised_attribs`
'''
if reqd_attribs is None:
reqd_attribs = recognised_attribs
found_attribs = set(node.keys())
if reqd_attribs - found_attribs:
print(loc, 'missing <{0}> attributes'.format(node.tag),
reqd_attribs - found_attribs)
if found_attribs - recognised_attribs:
print(loc, 'unrecognised <{0}> properties'.format(node.tag),
found_attribs - recognised_attribs)
SYNSET_ATTRIBS = set(['category', 'id', 'class'])
LEXUNIT_ATTRIBS = set(['styleMarking', 'namedEntity', 'artificial',
'source', 'sense', 'id'])
MODIFIER_ATTRIBS = set(['category', 'property'])
HEAD_ATTRIBS = set(['property'])
MAP_YESNO_TO_BOOL = {
'yes': True,
'no': False,
}
def read_lexical_file(filename):
'''
Reads in a GermaNet lexical information file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`: the name of the XML file to read
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
synsets = []
assert doc.getroot().tag == 'synsets'
for synset in doc.getroot():
if synset.tag != 'synset':
print('unrecognised child of <synsets>', synset)
continue
synset_dict = dict(synset.items())
synloc = '{0} synset {1},'.format(filename,
synset_dict.get('id', '???'))
warn_attribs(synloc, synset, SYNSET_ATTRIBS)
synset_dict['lexunits'] = []
synsets.append(synset_dict)
for child in synset:
if child.tag == 'lexUnit':
lexunit = child
lexunit_dict = dict(lexunit.items())
lexloc = synloc + ' lexUnit {0},'.format(
lexunit_dict.get('id', '???'))
warn_attribs(lexloc, lexunit, LEXUNIT_ATTRIBS)
# convert some properties to booleans
for key in ['styleMarking', 'artificial', 'namedEntity']:
if key in lexunit_dict:
if lexunit_dict[key] not in MAP_YESNO_TO_BOOL:
print(lexloc, ('lexunit property {0} has '
'non-boolean value').format(key),
lexunit_dict[key])
continue
lexunit_dict[key] = MAP_YESNO_TO_BOOL[lexunit_dict[key]]
# convert sense to integer number
if 'sense' in lexunit_dict:
if lexunit_dict['sense'].isdigit():
lexunit_dict['sense'] = int(lexunit_dict['sense'], 10)
else:
print(lexloc,
'lexunit property sense has non-numeric value',
lexunit_dict['sense'])
synset_dict['lexunits'].append(lexunit_dict)
lexunit_dict['examples'] = []
lexunit_dict['frames'] = []
for child in lexunit:
if child.tag in ['orthForm',
'orthVar',
'oldOrthForm',
'oldOrthVar']:
warn_attribs(lexloc, child, set())
if not child.text:
print(lexloc, '{0} with no text'.format(child.tag))
continue
if child.tag in lexunit_dict:
print(lexloc, 'more than one {0}'.format(child.tag))
lexunit_dict[child.tag] = str(child.text)
elif child.tag == 'example':
example = child
text = [child for child in example
if child.tag == 'text']
if len(text) != 1 or not text[0].text:
print(lexloc, '<example> tag without text')
example_dict = {'text': str(text[0].text)}
for child in example:
if child.tag == 'text':
continue
elif child.tag == 'exframe':
if 'exframe' in example_dict:
print(lexloc,
'more than one <exframe> '
'for <example>')
warn_attribs(lexloc, child, set())
if not child.text:
print(lexloc, '<exframe> with no text')
continue
example_dict['exframe'] = str(child.text)
else:
print(lexloc,
'unrecognised child of <example>',
child)
lexunit_dict['examples'].append(example_dict)
elif child.tag == 'frame':
frame = child
warn_attribs(lexloc, frame, set())
if 0 < len(frame):
print(lexloc, 'unrecognised <frame> children',
list(frame))
if not frame.text:
print(lexloc, '<frame> without text')
continue
lexunit_dict['frames'].append(str(frame.text))
elif child.tag == 'compound':
compound = child
warn_attribs(lexloc, compound, set())
compound_dict = {}
for child in compound:
if child.tag == 'modifier':
modifier_dict = dict(child.items())
warn_attribs(lexloc, child,
MODIFIER_ATTRIBS, set())
if not child.text:
print(lexloc, 'modifier without text')
continue
modifier_dict['text'] = str(child.text)
if 'modifier' not in compound_dict:
compound_dict['modifier'] = []
compound_dict['modifier'].append(modifier_dict)
elif child.tag == 'head':
head_dict = dict(child.items())
warn_attribs(lexloc, child, HEAD_ATTRIBS, set())
if not child.text:
print(lexloc, '<head> without text')
continue
head_dict['text'] = str(child.text)
if 'head' in compound_dict:
print(lexloc,
'more than one head in <compound>')
compound_dict['head'] = head_dict
else:
print(lexloc,
'unrecognised child of <compound>',
child)
continue
else:
print(lexloc, 'unrecognised child of <lexUnit>', child)
continue
elif child.tag == 'paraphrase':
paraphrase = child
warn_attribs(synloc, paraphrase, set())
paraphrase_text = str(paraphrase.text)
if not paraphrase_text:
print(synloc, 'WARNING: <paraphrase> tag with no text')
else:
print(synloc, 'unrecognised child of <synset>', child)
continue
return synsets
# ------------------------------------------------------------
# Read relation file
# ------------------------------------------------------------
RELATION_ATTRIBS_REQD = set(['dir', 'from', 'name', 'to'])
RELATION_ATTRIBS_OPT = set(['inv'])
RELATION_ATTRIBS = RELATION_ATTRIBS_REQD | RELATION_ATTRIBS_OPT
LEX_REL_DIRS = set(['both', 'one'])
CON_REL_DIRS = set(['both', 'revert', 'one'])
def read_relation_file(filename):
'''
Reads the GermaNet relation file ``gn_relations.xml`` which lists
all the relations holding between lexical units and synsets.
Arguments:
- `filename`:
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
lex_rels = []
con_rels = []
assert doc.getroot().tag == 'relations'
for child in doc.getroot():
if child.tag == 'lex_rel':
if 0 < len(child):
print('<lex_rel> has unexpected child node')
child_dict = dict(child.items())
warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD)
if child_dict['dir'] not in LEX_REL_DIRS:
print('unrecognized <lex_rel> dir', child_dict['dir'])
if child_dict['dir'] == 'both' and 'inv' not in child_dict:
print('<lex_rel> has dir=both but does not specify inv')
lex_rels.append(child_dict)
elif child.tag == 'con_rel':
if 0 < len(child):
print('<con_rel> has unexpected child node')
child_dict = dict(child.items())
warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD)
if child_dict['dir'] not in CON_REL_DIRS:
print('unrecognised <con_rel> dir', child_dict['dir'])
if (child_dict['dir'] in ['both', 'revert'] and
'inv' not in child_dict):
print('<con_rel> has dir={0} but does not specify inv'.format(
child_dict['dir']))
con_rels.append(child_dict)
else:
print('unrecognised child of <relations>', child)
continue
return lex_rels, con_rels
# ------------------------------------------------------------
# Read wiktionary paraphrase file
# ------------------------------------------------------------
PARAPHRASE_ATTRIBS = set(['edited', 'lexUnitId', 'wiktionaryId',
'wiktionarySense', 'wiktionarySenseId'])
def read_paraphrase_file(filename):
'''
Reads in a GermaNet wiktionary paraphrase file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`:
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
assert doc.getroot().tag == 'wiktionaryParaphrases'
paraphrases = []
for child in doc.getroot():
if child.tag == 'wiktionaryParaphrase':
paraphrase = child
warn_attribs('', paraphrase, PARAPHRASE_ATTRIBS)
if 0 < len(paraphrase):
print('unrecognised child of <wiktionaryParaphrase>',
list(paraphrase))
paraphrase_dict = dict(paraphrase.items())
if paraphrase_dict['edited'] not in MAP_YESNO_TO_BOOL:
print('<paraphrase> attribute "edited" has unexpected value',
paraphrase_dict['edited'])
else:
paraphrase_dict['edited'] = MAP_YESNO_TO_BOOL[
paraphrase_dict['edited']]
if not paraphrase_dict['wiktionarySenseId'].isdigit():
print('<paraphrase> attribute "wiktionarySenseId" has '
'non-integer value', paraphrase_dict['edited'])
else:
paraphrase_dict['wiktionarySenseId'] = \
int(paraphrase_dict['wiktionarySenseId'], 10)
paraphrases.append(paraphrase_dict)
else:
print('unknown child of <wiktionaryParaphrases>', child)
return paraphrases
# ------------------------------------------------------------
# Mongo insertion
# ------------------------------------------------------------
# we need to change the names of some synset keys because they are
# Python keywords
SYNSET_KEY_REWRITES = {
'class': 'gn_class',
}
def insert_lexical_information(germanet_db, lex_files):
'''
Reads in the given lexical information files and inserts their
contents into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `lex_files`: a list of paths to XML files containing lexial
information
'''
# drop the database collections if they already exist
germanet_db.lexunits.drop()
germanet_db.synsets.drop()
# inject data from XML files into the database
for lex_file in lex_files:
synsets = read_lexical_file(lex_file)
for synset in synsets:
synset = dict((SYNSET_KEY_REWRITES.get(key, key), value)
for (key, value) in synset.items())
lexunits = synset['lexunits']
synset['lexunits'] = germanet_db.lexunits.insert(lexunits)
synset_id = germanet_db.synsets.insert(synset)
for lexunit in lexunits:
lexunit['synset'] = synset_id
lexunit['category'] = synset['category']
germanet_db.lexunits.save(lexunit)
# index the two collections by id
germanet_db.synsets.create_index('id')
germanet_db.lexunits.create_index('id')
# also index lexunits by lemma, lemma-pos, and lemma-pos-sensenum
germanet_db.lexunits.create_index([('orthForm', DESCENDING)])
germanet_db.lexunits.create_index([('orthForm', DESCENDING),
('category', DESCENDING)])
germanet_db.lexunits.create_index([('orthForm', DESCENDING),
('category', DESCENDING),
('sense', DESCENDING)])
print('Inserted {0} synsets, {1} lexical units.'.format(
germanet_db.synsets.count(),
germanet_db.lexunits.count()))
def insert_relation_information(germanet_db, gn_rels_file):
'''
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `gn_rels_file`:
'''
lex_rels, con_rels = read_relation_file(gn_rels_file)
# cache the lexunits while we work on them
lexunits = {}
for lex_rel in lex_rels:
if lex_rel['from'] not in lexunits:
lexunits[lex_rel['from']] = germanet_db.lexunits.find_one(
{'id': lex_rel['from']})
from_lexunit = lexunits[lex_rel['from']]
if lex_rel['to'] not in lexunits:
lexunits[lex_rel['to']] = germanet_db.lexunits.find_one(
{'id': lex_rel['to']})
to_lexunit = lexunits[lex_rel['to']]
if 'rels' not in from_lexunit:
from_lexunit['rels'] = set()
from_lexunit['rels'].add((lex_rel['name'], to_lexunit['_id']))
if lex_rel['dir'] == 'both':
if 'rels' not in to_lexunit:
to_lexunit['rels'] = set()
to_lexunit['rels'].add((lex_rel['inv'], from_lexunit['_id']))
for lexunit in lexunits.values():
if 'rels' in lexunit:
lexunit['rels'] = sorted(lexunit['rels'])
germanet_db.lexunits.save(lexunit)
# cache the synsets while we work on them
synsets = {}
for con_rel in con_rels:
if con_rel['from'] not in synsets:
synsets[con_rel['from']] = germanet_db.synsets.find_one(
{'id': con_rel['from']})
from_synset = synsets[con_rel['from']]
if con_rel['to'] not in synsets:
synsets[con_rel['to']] = germanet_db.synsets.find_one(
{'id': con_rel['to']})
to_synset = synsets[con_rel['to']]
if 'rels' not in from_synset:
from_synset['rels'] = set()
from_synset['rels'].add((con_rel['name'], to_synset['_id']))
if con_rel['dir'] in ['both', 'revert']:
if 'rels' not in to_synset:
to_synset['rels'] = set()
to_synset['rels'].add((con_rel['inv'], from_synset['_id']))
for synset in synsets.values():
if 'rels' in synset:
synset['rels'] = sorted(synset['rels'])
germanet_db.synsets.save(synset)
print('Inserted {0} lexical relations, {1} synset relations.'.format(
len(lex_rels), len(con_rels)))
LEMMATISATION_FILE = 'baseforms_by_projekt_deutscher_wortschatz.txt.gz'
def insert_lemmatisation_data(germanet_db):
'''
Creates the lemmatiser collection in the given MongoDB instance
using the data derived from the Projekt deutscher Wortschatz.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
# drop the database collection if it already exists
germanet_db.lemmatiser.drop()
num_lemmas = 0
input_file = gzip.open(os.path.join(os.path.dirname(__file__),
LEMMATISATION_FILE))
for line in input_file:
line = line.decode('iso-8859-1').strip().split('\t')
assert len(line) == 2
germanet_db.lemmatiser.insert(dict(list(zip(('word', 'lemma'), line))))
num_lemmas += 1
input_file.close()
# index the collection on 'word'
germanet_db.lemmatiser.create_index('word')
print('Inserted {0} lemmatiser entries.'.format(num_lemmas))
# ------------------------------------------------------------
# Information content for GermaNet similarity
# ------------------------------------------------------------
WORD_COUNT_FILE = 'sdewac-gn-words.tsv.gz'
def insert_infocontent_data(germanet_db):
'''
For every synset in GermaNet, inserts count information derived
from SDEWAC.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
gnet = germanet.GermaNet(germanet_db)
# use add one smoothing
gn_counts = defaultdict(lambda: 1.)
total_count = 1
input_file = gzip.open(os.path.join(os.path.dirname(__file__),
WORD_COUNT_FILE))
num_lines_read = 0
num_lines = 0
for line in input_file:
line = line.decode('utf-8').strip().split('\t')
num_lines += 1
if len(line) != 3:
continue
count, pos, word = line
num_lines_read += 1
count = int(count)
synsets = set(gnet.synsets(word, pos))
if not synsets:
continue
# Although Resnik (1995) suggests dividing count by the number
# of synsets, Patwardhan et al (2003) argue against doing
# this.
count = float(count) / len(synsets)
for synset in synsets:
total_count += count
paths = synset.hypernym_paths
scount = float(count) / len(paths)
for path in paths:
for ss in path:
gn_counts[ss._id] += scount
print('Read {0} of {1} lines from count file.'.format(num_lines_read,
num_lines))
print('Recorded counts for {0} synsets.'.format(len(gn_counts)))
print('Total count is {0}'.format(total_count))
input_file.close()
# update all the synset records in GermaNet
num_updates = 0
for synset in germanet_db.synsets.find():
synset['infocont'] = gn_counts[synset['_id']] / total_count
germanet_db.synsets.save(synset)
num_updates += 1
print('Updated {0} synsets.'.format(num_updates))
def compute_max_min_depth(germanet_db):
'''
For every part of speech in GermaNet, computes the maximum
min_depth in that hierarchy.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
gnet = germanet.GermaNet(germanet_db)
max_min_depths = defaultdict(lambda: -1)
for synset in gnet.all_synsets():
min_depth = synset.min_depth
if max_min_depths[synset.category] < min_depth:
max_min_depths[synset.category] = min_depth
if germanet_db.metainfo.count() == 0:
germanet_db.metainfo.insert({})
metainfo = germanet_db.metainfo.find_one()
metainfo['max_min_depths'] = max_min_depths
germanet_db.metainfo.save(metainfo)
print('Computed maximum min_depth for all parts of speech:')
print(u', '.join(u'{0}: {1}'.format(k, v) for (k, v) in
sorted(max_min_depths.items())).encode('utf-8'))
# ------------------------------------------------------------
# Main function
# ------------------------------------------------------------
def main():
'''Main function.'''
usage = ('\n\n %prog [options] XML_PATH\n\nArguments:\n\n '
'XML_PATH the directory containing the '
'GermaNet .xml files')
parser = optparse.OptionParser(usage=usage)
parser.add_option('--host', default=None,
help='hostname or IP address of the MongoDB instance '
'where the GermaNet database will be inserted '
'(default: %default)')
parser.add_option('--port', type='int', default=None,
help='port number of the MongoDB instance where the '
'GermaNet database will be inserted (default: %default)')
parser.add_option('--database', dest='database_name', default='germanet',
help='the name of the database on the MongoDB instance '
'where GermaNet will be stored (default: %default)')
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
sys.exit(1)
xml_path = args[0]
client = MongoClient(options.host, options.port)
germanet_db = client[options.database_name]
lex_files, gn_rels_file, wiktionary_files, ili_files = \
find_germanet_xml_files(xml_path)
insert_lexical_information(germanet_db, lex_files)
insert_relation_information(germanet_db, gn_rels_file)
insert_paraphrase_information(germanet_db, wiktionary_files)
insert_lemmatisation_data(germanet_db)
insert_infocontent_data(germanet_db)
compute_max_min_depth(germanet_db)
client.close()
if __name__ == '__main__' and sys.argv != ['']:
main()
|
wroberts/pygermanet | pygermanet/mongo_import.py | insert_lemmatisation_data | python | def insert_lemmatisation_data(germanet_db):
'''
Creates the lemmatiser collection in the given MongoDB instance
using the data derived from the Projekt deutscher Wortschatz.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
# drop the database collection if it already exists
germanet_db.lemmatiser.drop()
num_lemmas = 0
input_file = gzip.open(os.path.join(os.path.dirname(__file__),
LEMMATISATION_FILE))
for line in input_file:
line = line.decode('iso-8859-1').strip().split('\t')
assert len(line) == 2
germanet_db.lemmatiser.insert(dict(list(zip(('word', 'lemma'), line))))
num_lemmas += 1
input_file.close()
# index the collection on 'word'
germanet_db.lemmatiser.create_index('word')
print('Inserted {0} lemmatiser entries.'.format(num_lemmas)) | Creates the lemmatiser collection in the given MongoDB instance
using the data derived from the Projekt deutscher Wortschatz.
Arguments:
- `germanet_db`: a pymongo.database.Database object | train | https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/mongo_import.py#L520-L542 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
mongo_import.py
(c) Will Roberts 21 March, 2014
A script to import the GermaNet lexicon into a MongoDB database.
'''
from __future__ import absolute_import, division, print_function
from . import germanet
from builtins import dict, int, str, zip
from collections import defaultdict
from io import open
from pymongo import DESCENDING, MongoClient
import glob
import gzip
import optparse
import os
import re
import sys
import xml.etree.ElementTree as etree
# ------------------------------------------------------------
# Find filenames
# ------------------------------------------------------------
def find_germanet_xml_files(xml_path):
'''
Globs the XML files contained in the given directory and sorts
them into sections for import into the MongoDB database.
Arguments:
- `xml_path`: the path to the directory containing the GermaNet
XML files
'''
xml_files = sorted(glob.glob(os.path.join(xml_path, '*.xml')))
# sort out the lexical files
lex_files = [xml_file for xml_file in xml_files if
re.match(r'(adj|nomen|verben)\.',
os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(lex_files))
if not lex_files:
print('ERROR: cannot find lexical information files')
# sort out the GermaNet relations file
gn_rels_file = [xml_file for xml_file in xml_files if
os.path.basename(xml_file).lower() == 'gn_relations.xml']
xml_files = sorted(set(xml_files) - set(gn_rels_file))
if not gn_rels_file:
print('ERROR: cannot find relations file gn_relations.xml')
gn_rels_file = None
else:
if 1 < len(gn_rels_file):
print ('WARNING: more than one relations file gn_relations.xml, '
'taking first match')
gn_rels_file = gn_rels_file[0]
# sort out the wiktionary paraphrase files
wiktionary_files = [xml_file for xml_file in xml_files if
re.match(r'wiktionaryparaphrases-',
os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(wiktionary_files))
if not wiktionary_files:
print('WARNING: cannot find wiktionary paraphrase files')
# sort out the interlingual index file
ili_files = [xml_file for xml_file in xml_files if
os.path.basename(xml_file).lower().startswith(
'interlingualindex')]
xml_files = sorted(set(xml_files) - set(ili_files))
if not ili_files:
print('WARNING: cannot find interlingual index file')
if xml_files:
print('WARNING: unrecognised xml files:', xml_files)
return lex_files, gn_rels_file, wiktionary_files, ili_files
# ------------------------------------------------------------
# Read lexical files
# ------------------------------------------------------------
def warn_attribs(loc,
node,
recognised_attribs,
reqd_attribs=None):
'''
Error checking of XML input: check that the given node has certain
required attributes, and does not have any unrecognised
attributes.
Arguments:
- `loc`: a string with some information about the location of the
error in the XML file
- `node`: the node to check
- `recognised_attribs`: a set of node attributes which we know how
to handle
- `reqd_attribs`: a set of node attributes which we require to be
present; if this argument is None, it will take the same value
as `recognised_attribs`
'''
if reqd_attribs is None:
reqd_attribs = recognised_attribs
found_attribs = set(node.keys())
if reqd_attribs - found_attribs:
print(loc, 'missing <{0}> attributes'.format(node.tag),
reqd_attribs - found_attribs)
if found_attribs - recognised_attribs:
print(loc, 'unrecognised <{0}> properties'.format(node.tag),
found_attribs - recognised_attribs)
SYNSET_ATTRIBS = set(['category', 'id', 'class'])
LEXUNIT_ATTRIBS = set(['styleMarking', 'namedEntity', 'artificial',
'source', 'sense', 'id'])
MODIFIER_ATTRIBS = set(['category', 'property'])
HEAD_ATTRIBS = set(['property'])
MAP_YESNO_TO_BOOL = {
'yes': True,
'no': False,
}
def read_lexical_file(filename):
'''
Reads in a GermaNet lexical information file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`: the name of the XML file to read
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
synsets = []
assert doc.getroot().tag == 'synsets'
for synset in doc.getroot():
if synset.tag != 'synset':
print('unrecognised child of <synsets>', synset)
continue
synset_dict = dict(synset.items())
synloc = '{0} synset {1},'.format(filename,
synset_dict.get('id', '???'))
warn_attribs(synloc, synset, SYNSET_ATTRIBS)
synset_dict['lexunits'] = []
synsets.append(synset_dict)
for child in synset:
if child.tag == 'lexUnit':
lexunit = child
lexunit_dict = dict(lexunit.items())
lexloc = synloc + ' lexUnit {0},'.format(
lexunit_dict.get('id', '???'))
warn_attribs(lexloc, lexunit, LEXUNIT_ATTRIBS)
# convert some properties to booleans
for key in ['styleMarking', 'artificial', 'namedEntity']:
if key in lexunit_dict:
if lexunit_dict[key] not in MAP_YESNO_TO_BOOL:
print(lexloc, ('lexunit property {0} has '
'non-boolean value').format(key),
lexunit_dict[key])
continue
lexunit_dict[key] = MAP_YESNO_TO_BOOL[lexunit_dict[key]]
# convert sense to integer number
if 'sense' in lexunit_dict:
if lexunit_dict['sense'].isdigit():
lexunit_dict['sense'] = int(lexunit_dict['sense'], 10)
else:
print(lexloc,
'lexunit property sense has non-numeric value',
lexunit_dict['sense'])
synset_dict['lexunits'].append(lexunit_dict)
lexunit_dict['examples'] = []
lexunit_dict['frames'] = []
for child in lexunit:
if child.tag in ['orthForm',
'orthVar',
'oldOrthForm',
'oldOrthVar']:
warn_attribs(lexloc, child, set())
if not child.text:
print(lexloc, '{0} with no text'.format(child.tag))
continue
if child.tag in lexunit_dict:
print(lexloc, 'more than one {0}'.format(child.tag))
lexunit_dict[child.tag] = str(child.text)
elif child.tag == 'example':
example = child
text = [child for child in example
if child.tag == 'text']
if len(text) != 1 or not text[0].text:
print(lexloc, '<example> tag without text')
example_dict = {'text': str(text[0].text)}
for child in example:
if child.tag == 'text':
continue
elif child.tag == 'exframe':
if 'exframe' in example_dict:
print(lexloc,
'more than one <exframe> '
'for <example>')
warn_attribs(lexloc, child, set())
if not child.text:
print(lexloc, '<exframe> with no text')
continue
example_dict['exframe'] = str(child.text)
else:
print(lexloc,
'unrecognised child of <example>',
child)
lexunit_dict['examples'].append(example_dict)
elif child.tag == 'frame':
frame = child
warn_attribs(lexloc, frame, set())
if 0 < len(frame):
print(lexloc, 'unrecognised <frame> children',
list(frame))
if not frame.text:
print(lexloc, '<frame> without text')
continue
lexunit_dict['frames'].append(str(frame.text))
elif child.tag == 'compound':
compound = child
warn_attribs(lexloc, compound, set())
compound_dict = {}
for child in compound:
if child.tag == 'modifier':
modifier_dict = dict(child.items())
warn_attribs(lexloc, child,
MODIFIER_ATTRIBS, set())
if not child.text:
print(lexloc, 'modifier without text')
continue
modifier_dict['text'] = str(child.text)
if 'modifier' not in compound_dict:
compound_dict['modifier'] = []
compound_dict['modifier'].append(modifier_dict)
elif child.tag == 'head':
head_dict = dict(child.items())
warn_attribs(lexloc, child, HEAD_ATTRIBS, set())
if not child.text:
print(lexloc, '<head> without text')
continue
head_dict['text'] = str(child.text)
if 'head' in compound_dict:
print(lexloc,
'more than one head in <compound>')
compound_dict['head'] = head_dict
else:
print(lexloc,
'unrecognised child of <compound>',
child)
continue
else:
print(lexloc, 'unrecognised child of <lexUnit>', child)
continue
elif child.tag == 'paraphrase':
paraphrase = child
warn_attribs(synloc, paraphrase, set())
paraphrase_text = str(paraphrase.text)
if not paraphrase_text:
print(synloc, 'WARNING: <paraphrase> tag with no text')
else:
print(synloc, 'unrecognised child of <synset>', child)
continue
return synsets
# ------------------------------------------------------------
# Read relation file
# ------------------------------------------------------------
RELATION_ATTRIBS_REQD = set(['dir', 'from', 'name', 'to'])
RELATION_ATTRIBS_OPT = set(['inv'])
RELATION_ATTRIBS = RELATION_ATTRIBS_REQD | RELATION_ATTRIBS_OPT
LEX_REL_DIRS = set(['both', 'one'])
CON_REL_DIRS = set(['both', 'revert', 'one'])
def read_relation_file(filename):
'''
Reads the GermaNet relation file ``gn_relations.xml`` which lists
all the relations holding between lexical units and synsets.
Arguments:
- `filename`:
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
lex_rels = []
con_rels = []
assert doc.getroot().tag == 'relations'
for child in doc.getroot():
if child.tag == 'lex_rel':
if 0 < len(child):
print('<lex_rel> has unexpected child node')
child_dict = dict(child.items())
warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD)
if child_dict['dir'] not in LEX_REL_DIRS:
print('unrecognized <lex_rel> dir', child_dict['dir'])
if child_dict['dir'] == 'both' and 'inv' not in child_dict:
print('<lex_rel> has dir=both but does not specify inv')
lex_rels.append(child_dict)
elif child.tag == 'con_rel':
if 0 < len(child):
print('<con_rel> has unexpected child node')
child_dict = dict(child.items())
warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD)
if child_dict['dir'] not in CON_REL_DIRS:
print('unrecognised <con_rel> dir', child_dict['dir'])
if (child_dict['dir'] in ['both', 'revert'] and
'inv' not in child_dict):
print('<con_rel> has dir={0} but does not specify inv'.format(
child_dict['dir']))
con_rels.append(child_dict)
else:
print('unrecognised child of <relations>', child)
continue
return lex_rels, con_rels
# ------------------------------------------------------------
# Read wiktionary paraphrase file
# ------------------------------------------------------------
PARAPHRASE_ATTRIBS = set(['edited', 'lexUnitId', 'wiktionaryId',
'wiktionarySense', 'wiktionarySenseId'])
def read_paraphrase_file(filename):
'''
Reads in a GermaNet wiktionary paraphrase file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`:
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
assert doc.getroot().tag == 'wiktionaryParaphrases'
paraphrases = []
for child in doc.getroot():
if child.tag == 'wiktionaryParaphrase':
paraphrase = child
warn_attribs('', paraphrase, PARAPHRASE_ATTRIBS)
if 0 < len(paraphrase):
print('unrecognised child of <wiktionaryParaphrase>',
list(paraphrase))
paraphrase_dict = dict(paraphrase.items())
if paraphrase_dict['edited'] not in MAP_YESNO_TO_BOOL:
print('<paraphrase> attribute "edited" has unexpected value',
paraphrase_dict['edited'])
else:
paraphrase_dict['edited'] = MAP_YESNO_TO_BOOL[
paraphrase_dict['edited']]
if not paraphrase_dict['wiktionarySenseId'].isdigit():
print('<paraphrase> attribute "wiktionarySenseId" has '
'non-integer value', paraphrase_dict['edited'])
else:
paraphrase_dict['wiktionarySenseId'] = \
int(paraphrase_dict['wiktionarySenseId'], 10)
paraphrases.append(paraphrase_dict)
else:
print('unknown child of <wiktionaryParaphrases>', child)
return paraphrases
# ------------------------------------------------------------
# Mongo insertion
# ------------------------------------------------------------
# we need to change the names of some synset keys because they are
# Python keywords
SYNSET_KEY_REWRITES = {
'class': 'gn_class',
}
def insert_lexical_information(germanet_db, lex_files):
'''
Reads in the given lexical information files and inserts their
contents into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `lex_files`: a list of paths to XML files containing lexial
information
'''
# drop the database collections if they already exist
germanet_db.lexunits.drop()
germanet_db.synsets.drop()
# inject data from XML files into the database
for lex_file in lex_files:
synsets = read_lexical_file(lex_file)
for synset in synsets:
synset = dict((SYNSET_KEY_REWRITES.get(key, key), value)
for (key, value) in synset.items())
lexunits = synset['lexunits']
synset['lexunits'] = germanet_db.lexunits.insert(lexunits)
synset_id = germanet_db.synsets.insert(synset)
for lexunit in lexunits:
lexunit['synset'] = synset_id
lexunit['category'] = synset['category']
germanet_db.lexunits.save(lexunit)
# index the two collections by id
germanet_db.synsets.create_index('id')
germanet_db.lexunits.create_index('id')
# also index lexunits by lemma, lemma-pos, and lemma-pos-sensenum
germanet_db.lexunits.create_index([('orthForm', DESCENDING)])
germanet_db.lexunits.create_index([('orthForm', DESCENDING),
('category', DESCENDING)])
germanet_db.lexunits.create_index([('orthForm', DESCENDING),
('category', DESCENDING),
('sense', DESCENDING)])
print('Inserted {0} synsets, {1} lexical units.'.format(
germanet_db.synsets.count(),
germanet_db.lexunits.count()))
def insert_relation_information(germanet_db, gn_rels_file):
'''
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `gn_rels_file`:
'''
lex_rels, con_rels = read_relation_file(gn_rels_file)
# cache the lexunits while we work on them
lexunits = {}
for lex_rel in lex_rels:
if lex_rel['from'] not in lexunits:
lexunits[lex_rel['from']] = germanet_db.lexunits.find_one(
{'id': lex_rel['from']})
from_lexunit = lexunits[lex_rel['from']]
if lex_rel['to'] not in lexunits:
lexunits[lex_rel['to']] = germanet_db.lexunits.find_one(
{'id': lex_rel['to']})
to_lexunit = lexunits[lex_rel['to']]
if 'rels' not in from_lexunit:
from_lexunit['rels'] = set()
from_lexunit['rels'].add((lex_rel['name'], to_lexunit['_id']))
if lex_rel['dir'] == 'both':
if 'rels' not in to_lexunit:
to_lexunit['rels'] = set()
to_lexunit['rels'].add((lex_rel['inv'], from_lexunit['_id']))
for lexunit in lexunits.values():
if 'rels' in lexunit:
lexunit['rels'] = sorted(lexunit['rels'])
germanet_db.lexunits.save(lexunit)
# cache the synsets while we work on them
synsets = {}
for con_rel in con_rels:
if con_rel['from'] not in synsets:
synsets[con_rel['from']] = germanet_db.synsets.find_one(
{'id': con_rel['from']})
from_synset = synsets[con_rel['from']]
if con_rel['to'] not in synsets:
synsets[con_rel['to']] = germanet_db.synsets.find_one(
{'id': con_rel['to']})
to_synset = synsets[con_rel['to']]
if 'rels' not in from_synset:
from_synset['rels'] = set()
from_synset['rels'].add((con_rel['name'], to_synset['_id']))
if con_rel['dir'] in ['both', 'revert']:
if 'rels' not in to_synset:
to_synset['rels'] = set()
to_synset['rels'].add((con_rel['inv'], from_synset['_id']))
for synset in synsets.values():
if 'rels' in synset:
synset['rels'] = sorted(synset['rels'])
germanet_db.synsets.save(synset)
print('Inserted {0} lexical relations, {1} synset relations.'.format(
len(lex_rels), len(con_rels)))
def insert_paraphrase_information(germanet_db, wiktionary_files):
'''
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `wiktionary_files`:
'''
num_paraphrases = 0
# cache the lexunits while we work on them
lexunits = {}
for filename in wiktionary_files:
paraphrases = read_paraphrase_file(filename)
num_paraphrases += len(paraphrases)
for paraphrase in paraphrases:
if paraphrase['lexUnitId'] not in lexunits:
lexunits[paraphrase['lexUnitId']] = \
germanet_db.lexunits.find_one(
{'id': paraphrase['lexUnitId']})
lexunit = lexunits[paraphrase['lexUnitId']]
if 'paraphrases' not in lexunit:
lexunit['paraphrases'] = []
lexunit['paraphrases'].append(paraphrase)
for lexunit in lexunits.values():
germanet_db.lexunits.save(lexunit)
print('Inserted {0} wiktionary paraphrases.'.format(num_paraphrases))
LEMMATISATION_FILE = 'baseforms_by_projekt_deutscher_wortschatz.txt.gz'
# ------------------------------------------------------------
# Information content for GermaNet similarity
# ------------------------------------------------------------
WORD_COUNT_FILE = 'sdewac-gn-words.tsv.gz'
def insert_infocontent_data(germanet_db):
'''
For every synset in GermaNet, inserts count information derived
from SDEWAC.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
gnet = germanet.GermaNet(germanet_db)
# use add one smoothing
gn_counts = defaultdict(lambda: 1.)
total_count = 1
input_file = gzip.open(os.path.join(os.path.dirname(__file__),
WORD_COUNT_FILE))
num_lines_read = 0
num_lines = 0
for line in input_file:
line = line.decode('utf-8').strip().split('\t')
num_lines += 1
if len(line) != 3:
continue
count, pos, word = line
num_lines_read += 1
count = int(count)
synsets = set(gnet.synsets(word, pos))
if not synsets:
continue
# Although Resnik (1995) suggests dividing count by the number
# of synsets, Patwardhan et al (2003) argue against doing
# this.
count = float(count) / len(synsets)
for synset in synsets:
total_count += count
paths = synset.hypernym_paths
scount = float(count) / len(paths)
for path in paths:
for ss in path:
gn_counts[ss._id] += scount
print('Read {0} of {1} lines from count file.'.format(num_lines_read,
num_lines))
print('Recorded counts for {0} synsets.'.format(len(gn_counts)))
print('Total count is {0}'.format(total_count))
input_file.close()
# update all the synset records in GermaNet
num_updates = 0
for synset in germanet_db.synsets.find():
synset['infocont'] = gn_counts[synset['_id']] / total_count
germanet_db.synsets.save(synset)
num_updates += 1
print('Updated {0} synsets.'.format(num_updates))
def compute_max_min_depth(germanet_db):
'''
For every part of speech in GermaNet, computes the maximum
min_depth in that hierarchy.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
gnet = germanet.GermaNet(germanet_db)
max_min_depths = defaultdict(lambda: -1)
for synset in gnet.all_synsets():
min_depth = synset.min_depth
if max_min_depths[synset.category] < min_depth:
max_min_depths[synset.category] = min_depth
if germanet_db.metainfo.count() == 0:
germanet_db.metainfo.insert({})
metainfo = germanet_db.metainfo.find_one()
metainfo['max_min_depths'] = max_min_depths
germanet_db.metainfo.save(metainfo)
print('Computed maximum min_depth for all parts of speech:')
print(u', '.join(u'{0}: {1}'.format(k, v) for (k, v) in
sorted(max_min_depths.items())).encode('utf-8'))
# ------------------------------------------------------------
# Main function
# ------------------------------------------------------------
def main():
'''Main function.'''
usage = ('\n\n %prog [options] XML_PATH\n\nArguments:\n\n '
'XML_PATH the directory containing the '
'GermaNet .xml files')
parser = optparse.OptionParser(usage=usage)
parser.add_option('--host', default=None,
help='hostname or IP address of the MongoDB instance '
'where the GermaNet database will be inserted '
'(default: %default)')
parser.add_option('--port', type='int', default=None,
help='port number of the MongoDB instance where the '
'GermaNet database will be inserted (default: %default)')
parser.add_option('--database', dest='database_name', default='germanet',
help='the name of the database on the MongoDB instance '
'where GermaNet will be stored (default: %default)')
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
sys.exit(1)
xml_path = args[0]
client = MongoClient(options.host, options.port)
germanet_db = client[options.database_name]
lex_files, gn_rels_file, wiktionary_files, ili_files = \
find_germanet_xml_files(xml_path)
insert_lexical_information(germanet_db, lex_files)
insert_relation_information(germanet_db, gn_rels_file)
insert_paraphrase_information(germanet_db, wiktionary_files)
insert_lemmatisation_data(germanet_db)
insert_infocontent_data(germanet_db)
compute_max_min_depth(germanet_db)
client.close()
if __name__ == '__main__' and sys.argv != ['']:
main()
|
wroberts/pygermanet | pygermanet/mongo_import.py | insert_infocontent_data | python | def insert_infocontent_data(germanet_db):
'''
For every synset in GermaNet, inserts count information derived
from SDEWAC.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
gnet = germanet.GermaNet(germanet_db)
# use add one smoothing
gn_counts = defaultdict(lambda: 1.)
total_count = 1
input_file = gzip.open(os.path.join(os.path.dirname(__file__),
WORD_COUNT_FILE))
num_lines_read = 0
num_lines = 0
for line in input_file:
line = line.decode('utf-8').strip().split('\t')
num_lines += 1
if len(line) != 3:
continue
count, pos, word = line
num_lines_read += 1
count = int(count)
synsets = set(gnet.synsets(word, pos))
if not synsets:
continue
# Although Resnik (1995) suggests dividing count by the number
# of synsets, Patwardhan et al (2003) argue against doing
# this.
count = float(count) / len(synsets)
for synset in synsets:
total_count += count
paths = synset.hypernym_paths
scount = float(count) / len(paths)
for path in paths:
for ss in path:
gn_counts[ss._id] += scount
print('Read {0} of {1} lines from count file.'.format(num_lines_read,
num_lines))
print('Recorded counts for {0} synsets.'.format(len(gn_counts)))
print('Total count is {0}'.format(total_count))
input_file.close()
# update all the synset records in GermaNet
num_updates = 0
for synset in germanet_db.synsets.find():
synset['infocont'] = gn_counts[synset['_id']] / total_count
germanet_db.synsets.save(synset)
num_updates += 1
print('Updated {0} synsets.'.format(num_updates)) | For every synset in GermaNet, inserts count information derived
from SDEWAC.
Arguments:
- `germanet_db`: a pymongo.database.Database object | train | https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/mongo_import.py#L551-L600 | [
"def synsets(self, lemma, pos = None):\n '''\n Looks up synsets in the GermaNet database.\n\n Arguments:\n - `lemma`:\n - `pos`:\n '''\n return sorted(set(lemma_obj.synset\n for lemma_obj in self.lemmas(lemma, pos)))\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
mongo_import.py
(c) Will Roberts 21 March, 2014
A script to import the GermaNet lexicon into a MongoDB database.
'''
from __future__ import absolute_import, division, print_function
from . import germanet
from builtins import dict, int, str, zip
from collections import defaultdict
from io import open
from pymongo import DESCENDING, MongoClient
import glob
import gzip
import optparse
import os
import re
import sys
import xml.etree.ElementTree as etree
# ------------------------------------------------------------
# Find filenames
# ------------------------------------------------------------
def find_germanet_xml_files(xml_path):
'''
Globs the XML files contained in the given directory and sorts
them into sections for import into the MongoDB database.
Arguments:
- `xml_path`: the path to the directory containing the GermaNet
XML files
'''
xml_files = sorted(glob.glob(os.path.join(xml_path, '*.xml')))
# sort out the lexical files
lex_files = [xml_file for xml_file in xml_files if
re.match(r'(adj|nomen|verben)\.',
os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(lex_files))
if not lex_files:
print('ERROR: cannot find lexical information files')
# sort out the GermaNet relations file
gn_rels_file = [xml_file for xml_file in xml_files if
os.path.basename(xml_file).lower() == 'gn_relations.xml']
xml_files = sorted(set(xml_files) - set(gn_rels_file))
if not gn_rels_file:
print('ERROR: cannot find relations file gn_relations.xml')
gn_rels_file = None
else:
if 1 < len(gn_rels_file):
print ('WARNING: more than one relations file gn_relations.xml, '
'taking first match')
gn_rels_file = gn_rels_file[0]
# sort out the wiktionary paraphrase files
wiktionary_files = [xml_file for xml_file in xml_files if
re.match(r'wiktionaryparaphrases-',
os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(wiktionary_files))
if not wiktionary_files:
print('WARNING: cannot find wiktionary paraphrase files')
# sort out the interlingual index file
ili_files = [xml_file for xml_file in xml_files if
os.path.basename(xml_file).lower().startswith(
'interlingualindex')]
xml_files = sorted(set(xml_files) - set(ili_files))
if not ili_files:
print('WARNING: cannot find interlingual index file')
if xml_files:
print('WARNING: unrecognised xml files:', xml_files)
return lex_files, gn_rels_file, wiktionary_files, ili_files
# ------------------------------------------------------------
# Read lexical files
# ------------------------------------------------------------
def warn_attribs(loc,
node,
recognised_attribs,
reqd_attribs=None):
'''
Error checking of XML input: check that the given node has certain
required attributes, and does not have any unrecognised
attributes.
Arguments:
- `loc`: a string with some information about the location of the
error in the XML file
- `node`: the node to check
- `recognised_attribs`: a set of node attributes which we know how
to handle
- `reqd_attribs`: a set of node attributes which we require to be
present; if this argument is None, it will take the same value
as `recognised_attribs`
'''
if reqd_attribs is None:
reqd_attribs = recognised_attribs
found_attribs = set(node.keys())
if reqd_attribs - found_attribs:
print(loc, 'missing <{0}> attributes'.format(node.tag),
reqd_attribs - found_attribs)
if found_attribs - recognised_attribs:
print(loc, 'unrecognised <{0}> properties'.format(node.tag),
found_attribs - recognised_attribs)
SYNSET_ATTRIBS = set(['category', 'id', 'class'])
LEXUNIT_ATTRIBS = set(['styleMarking', 'namedEntity', 'artificial',
'source', 'sense', 'id'])
MODIFIER_ATTRIBS = set(['category', 'property'])
HEAD_ATTRIBS = set(['property'])
MAP_YESNO_TO_BOOL = {
'yes': True,
'no': False,
}
def read_lexical_file(filename):
'''
Reads in a GermaNet lexical information file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`: the name of the XML file to read
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
synsets = []
assert doc.getroot().tag == 'synsets'
for synset in doc.getroot():
if synset.tag != 'synset':
print('unrecognised child of <synsets>', synset)
continue
synset_dict = dict(synset.items())
synloc = '{0} synset {1},'.format(filename,
synset_dict.get('id', '???'))
warn_attribs(synloc, synset, SYNSET_ATTRIBS)
synset_dict['lexunits'] = []
synsets.append(synset_dict)
for child in synset:
if child.tag == 'lexUnit':
lexunit = child
lexunit_dict = dict(lexunit.items())
lexloc = synloc + ' lexUnit {0},'.format(
lexunit_dict.get('id', '???'))
warn_attribs(lexloc, lexunit, LEXUNIT_ATTRIBS)
# convert some properties to booleans
for key in ['styleMarking', 'artificial', 'namedEntity']:
if key in lexunit_dict:
if lexunit_dict[key] not in MAP_YESNO_TO_BOOL:
print(lexloc, ('lexunit property {0} has '
'non-boolean value').format(key),
lexunit_dict[key])
continue
lexunit_dict[key] = MAP_YESNO_TO_BOOL[lexunit_dict[key]]
# convert sense to integer number
if 'sense' in lexunit_dict:
if lexunit_dict['sense'].isdigit():
lexunit_dict['sense'] = int(lexunit_dict['sense'], 10)
else:
print(lexloc,
'lexunit property sense has non-numeric value',
lexunit_dict['sense'])
synset_dict['lexunits'].append(lexunit_dict)
lexunit_dict['examples'] = []
lexunit_dict['frames'] = []
for child in lexunit:
if child.tag in ['orthForm',
'orthVar',
'oldOrthForm',
'oldOrthVar']:
warn_attribs(lexloc, child, set())
if not child.text:
print(lexloc, '{0} with no text'.format(child.tag))
continue
if child.tag in lexunit_dict:
print(lexloc, 'more than one {0}'.format(child.tag))
lexunit_dict[child.tag] = str(child.text)
elif child.tag == 'example':
example = child
text = [child for child in example
if child.tag == 'text']
if len(text) != 1 or not text[0].text:
print(lexloc, '<example> tag without text')
example_dict = {'text': str(text[0].text)}
for child in example:
if child.tag == 'text':
continue
elif child.tag == 'exframe':
if 'exframe' in example_dict:
print(lexloc,
'more than one <exframe> '
'for <example>')
warn_attribs(lexloc, child, set())
if not child.text:
print(lexloc, '<exframe> with no text')
continue
example_dict['exframe'] = str(child.text)
else:
print(lexloc,
'unrecognised child of <example>',
child)
lexunit_dict['examples'].append(example_dict)
elif child.tag == 'frame':
frame = child
warn_attribs(lexloc, frame, set())
if 0 < len(frame):
print(lexloc, 'unrecognised <frame> children',
list(frame))
if not frame.text:
print(lexloc, '<frame> without text')
continue
lexunit_dict['frames'].append(str(frame.text))
elif child.tag == 'compound':
compound = child
warn_attribs(lexloc, compound, set())
compound_dict = {}
for child in compound:
if child.tag == 'modifier':
modifier_dict = dict(child.items())
warn_attribs(lexloc, child,
MODIFIER_ATTRIBS, set())
if not child.text:
print(lexloc, 'modifier without text')
continue
modifier_dict['text'] = str(child.text)
if 'modifier' not in compound_dict:
compound_dict['modifier'] = []
compound_dict['modifier'].append(modifier_dict)
elif child.tag == 'head':
head_dict = dict(child.items())
warn_attribs(lexloc, child, HEAD_ATTRIBS, set())
if not child.text:
print(lexloc, '<head> without text')
continue
head_dict['text'] = str(child.text)
if 'head' in compound_dict:
print(lexloc,
'more than one head in <compound>')
compound_dict['head'] = head_dict
else:
print(lexloc,
'unrecognised child of <compound>',
child)
continue
else:
print(lexloc, 'unrecognised child of <lexUnit>', child)
continue
elif child.tag == 'paraphrase':
paraphrase = child
warn_attribs(synloc, paraphrase, set())
paraphrase_text = str(paraphrase.text)
if not paraphrase_text:
print(synloc, 'WARNING: <paraphrase> tag with no text')
else:
print(synloc, 'unrecognised child of <synset>', child)
continue
return synsets
# ------------------------------------------------------------
# Read relation file
# ------------------------------------------------------------
RELATION_ATTRIBS_REQD = set(['dir', 'from', 'name', 'to'])
RELATION_ATTRIBS_OPT = set(['inv'])
RELATION_ATTRIBS = RELATION_ATTRIBS_REQD | RELATION_ATTRIBS_OPT
LEX_REL_DIRS = set(['both', 'one'])
CON_REL_DIRS = set(['both', 'revert', 'one'])
def read_relation_file(filename):
'''
Reads the GermaNet relation file ``gn_relations.xml`` which lists
all the relations holding between lexical units and synsets.
Arguments:
- `filename`:
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
lex_rels = []
con_rels = []
assert doc.getroot().tag == 'relations'
for child in doc.getroot():
if child.tag == 'lex_rel':
if 0 < len(child):
print('<lex_rel> has unexpected child node')
child_dict = dict(child.items())
warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD)
if child_dict['dir'] not in LEX_REL_DIRS:
print('unrecognized <lex_rel> dir', child_dict['dir'])
if child_dict['dir'] == 'both' and 'inv' not in child_dict:
print('<lex_rel> has dir=both but does not specify inv')
lex_rels.append(child_dict)
elif child.tag == 'con_rel':
if 0 < len(child):
print('<con_rel> has unexpected child node')
child_dict = dict(child.items())
warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD)
if child_dict['dir'] not in CON_REL_DIRS:
print('unrecognised <con_rel> dir', child_dict['dir'])
if (child_dict['dir'] in ['both', 'revert'] and
'inv' not in child_dict):
print('<con_rel> has dir={0} but does not specify inv'.format(
child_dict['dir']))
con_rels.append(child_dict)
else:
print('unrecognised child of <relations>', child)
continue
return lex_rels, con_rels
# ------------------------------------------------------------
# Read wiktionary paraphrase file
# ------------------------------------------------------------
PARAPHRASE_ATTRIBS = set(['edited', 'lexUnitId', 'wiktionaryId',
'wiktionarySense', 'wiktionarySenseId'])
def read_paraphrase_file(filename):
'''
Reads in a GermaNet wiktionary paraphrase file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`:
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
assert doc.getroot().tag == 'wiktionaryParaphrases'
paraphrases = []
for child in doc.getroot():
if child.tag == 'wiktionaryParaphrase':
paraphrase = child
warn_attribs('', paraphrase, PARAPHRASE_ATTRIBS)
if 0 < len(paraphrase):
print('unrecognised child of <wiktionaryParaphrase>',
list(paraphrase))
paraphrase_dict = dict(paraphrase.items())
if paraphrase_dict['edited'] not in MAP_YESNO_TO_BOOL:
print('<paraphrase> attribute "edited" has unexpected value',
paraphrase_dict['edited'])
else:
paraphrase_dict['edited'] = MAP_YESNO_TO_BOOL[
paraphrase_dict['edited']]
if not paraphrase_dict['wiktionarySenseId'].isdigit():
print('<paraphrase> attribute "wiktionarySenseId" has '
'non-integer value', paraphrase_dict['edited'])
else:
paraphrase_dict['wiktionarySenseId'] = \
int(paraphrase_dict['wiktionarySenseId'], 10)
paraphrases.append(paraphrase_dict)
else:
print('unknown child of <wiktionaryParaphrases>', child)
return paraphrases
# ------------------------------------------------------------
# Mongo insertion
# ------------------------------------------------------------
# we need to change the names of some synset keys because they are
# Python keywords
SYNSET_KEY_REWRITES = {
'class': 'gn_class',
}
def insert_lexical_information(germanet_db, lex_files):
'''
Reads in the given lexical information files and inserts their
contents into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `lex_files`: a list of paths to XML files containing lexial
information
'''
# drop the database collections if they already exist
germanet_db.lexunits.drop()
germanet_db.synsets.drop()
# inject data from XML files into the database
for lex_file in lex_files:
synsets = read_lexical_file(lex_file)
for synset in synsets:
synset = dict((SYNSET_KEY_REWRITES.get(key, key), value)
for (key, value) in synset.items())
lexunits = synset['lexunits']
synset['lexunits'] = germanet_db.lexunits.insert(lexunits)
synset_id = germanet_db.synsets.insert(synset)
for lexunit in lexunits:
lexunit['synset'] = synset_id
lexunit['category'] = synset['category']
germanet_db.lexunits.save(lexunit)
# index the two collections by id
germanet_db.synsets.create_index('id')
germanet_db.lexunits.create_index('id')
# also index lexunits by lemma, lemma-pos, and lemma-pos-sensenum
germanet_db.lexunits.create_index([('orthForm', DESCENDING)])
germanet_db.lexunits.create_index([('orthForm', DESCENDING),
('category', DESCENDING)])
germanet_db.lexunits.create_index([('orthForm', DESCENDING),
('category', DESCENDING),
('sense', DESCENDING)])
print('Inserted {0} synsets, {1} lexical units.'.format(
germanet_db.synsets.count(),
germanet_db.lexunits.count()))
def insert_relation_information(germanet_db, gn_rels_file):
'''
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `gn_rels_file`:
'''
lex_rels, con_rels = read_relation_file(gn_rels_file)
# cache the lexunits while we work on them
lexunits = {}
for lex_rel in lex_rels:
if lex_rel['from'] not in lexunits:
lexunits[lex_rel['from']] = germanet_db.lexunits.find_one(
{'id': lex_rel['from']})
from_lexunit = lexunits[lex_rel['from']]
if lex_rel['to'] not in lexunits:
lexunits[lex_rel['to']] = germanet_db.lexunits.find_one(
{'id': lex_rel['to']})
to_lexunit = lexunits[lex_rel['to']]
if 'rels' not in from_lexunit:
from_lexunit['rels'] = set()
from_lexunit['rels'].add((lex_rel['name'], to_lexunit['_id']))
if lex_rel['dir'] == 'both':
if 'rels' not in to_lexunit:
to_lexunit['rels'] = set()
to_lexunit['rels'].add((lex_rel['inv'], from_lexunit['_id']))
for lexunit in lexunits.values():
if 'rels' in lexunit:
lexunit['rels'] = sorted(lexunit['rels'])
germanet_db.lexunits.save(lexunit)
# cache the synsets while we work on them
synsets = {}
for con_rel in con_rels:
if con_rel['from'] not in synsets:
synsets[con_rel['from']] = germanet_db.synsets.find_one(
{'id': con_rel['from']})
from_synset = synsets[con_rel['from']]
if con_rel['to'] not in synsets:
synsets[con_rel['to']] = germanet_db.synsets.find_one(
{'id': con_rel['to']})
to_synset = synsets[con_rel['to']]
if 'rels' not in from_synset:
from_synset['rels'] = set()
from_synset['rels'].add((con_rel['name'], to_synset['_id']))
if con_rel['dir'] in ['both', 'revert']:
if 'rels' not in to_synset:
to_synset['rels'] = set()
to_synset['rels'].add((con_rel['inv'], from_synset['_id']))
for synset in synsets.values():
if 'rels' in synset:
synset['rels'] = sorted(synset['rels'])
germanet_db.synsets.save(synset)
print('Inserted {0} lexical relations, {1} synset relations.'.format(
len(lex_rels), len(con_rels)))
def insert_paraphrase_information(germanet_db, wiktionary_files):
'''
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `wiktionary_files`:
'''
num_paraphrases = 0
# cache the lexunits while we work on them
lexunits = {}
for filename in wiktionary_files:
paraphrases = read_paraphrase_file(filename)
num_paraphrases += len(paraphrases)
for paraphrase in paraphrases:
if paraphrase['lexUnitId'] not in lexunits:
lexunits[paraphrase['lexUnitId']] = \
germanet_db.lexunits.find_one(
{'id': paraphrase['lexUnitId']})
lexunit = lexunits[paraphrase['lexUnitId']]
if 'paraphrases' not in lexunit:
lexunit['paraphrases'] = []
lexunit['paraphrases'].append(paraphrase)
for lexunit in lexunits.values():
germanet_db.lexunits.save(lexunit)
print('Inserted {0} wiktionary paraphrases.'.format(num_paraphrases))
LEMMATISATION_FILE = 'baseforms_by_projekt_deutscher_wortschatz.txt.gz'
def insert_lemmatisation_data(germanet_db):
'''
Creates the lemmatiser collection in the given MongoDB instance
using the data derived from the Projekt deutscher Wortschatz.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
# drop the database collection if it already exists
germanet_db.lemmatiser.drop()
num_lemmas = 0
input_file = gzip.open(os.path.join(os.path.dirname(__file__),
LEMMATISATION_FILE))
for line in input_file:
line = line.decode('iso-8859-1').strip().split('\t')
assert len(line) == 2
germanet_db.lemmatiser.insert(dict(list(zip(('word', 'lemma'), line))))
num_lemmas += 1
input_file.close()
# index the collection on 'word'
germanet_db.lemmatiser.create_index('word')
print('Inserted {0} lemmatiser entries.'.format(num_lemmas))
# ------------------------------------------------------------
# Information content for GermaNet similarity
# ------------------------------------------------------------
WORD_COUNT_FILE = 'sdewac-gn-words.tsv.gz'
def compute_max_min_depth(germanet_db):
'''
For every part of speech in GermaNet, computes the maximum
min_depth in that hierarchy.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
gnet = germanet.GermaNet(germanet_db)
max_min_depths = defaultdict(lambda: -1)
for synset in gnet.all_synsets():
min_depth = synset.min_depth
if max_min_depths[synset.category] < min_depth:
max_min_depths[synset.category] = min_depth
if germanet_db.metainfo.count() == 0:
germanet_db.metainfo.insert({})
metainfo = germanet_db.metainfo.find_one()
metainfo['max_min_depths'] = max_min_depths
germanet_db.metainfo.save(metainfo)
print('Computed maximum min_depth for all parts of speech:')
print(u', '.join(u'{0}: {1}'.format(k, v) for (k, v) in
sorted(max_min_depths.items())).encode('utf-8'))
# ------------------------------------------------------------
# Main function
# ------------------------------------------------------------
def main():
'''Main function.'''
usage = ('\n\n %prog [options] XML_PATH\n\nArguments:\n\n '
'XML_PATH the directory containing the '
'GermaNet .xml files')
parser = optparse.OptionParser(usage=usage)
parser.add_option('--host', default=None,
help='hostname or IP address of the MongoDB instance '
'where the GermaNet database will be inserted '
'(default: %default)')
parser.add_option('--port', type='int', default=None,
help='port number of the MongoDB instance where the '
'GermaNet database will be inserted (default: %default)')
parser.add_option('--database', dest='database_name', default='germanet',
help='the name of the database on the MongoDB instance '
'where GermaNet will be stored (default: %default)')
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
sys.exit(1)
xml_path = args[0]
client = MongoClient(options.host, options.port)
germanet_db = client[options.database_name]
lex_files, gn_rels_file, wiktionary_files, ili_files = \
find_germanet_xml_files(xml_path)
insert_lexical_information(germanet_db, lex_files)
insert_relation_information(germanet_db, gn_rels_file)
insert_paraphrase_information(germanet_db, wiktionary_files)
insert_lemmatisation_data(germanet_db)
insert_infocontent_data(germanet_db)
compute_max_min_depth(germanet_db)
client.close()
if __name__ == '__main__' and sys.argv != ['']:
main()
|
wroberts/pygermanet | pygermanet/mongo_import.py | compute_max_min_depth | python | def compute_max_min_depth(germanet_db):
'''
For every part of speech in GermaNet, computes the maximum
min_depth in that hierarchy.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
gnet = germanet.GermaNet(germanet_db)
max_min_depths = defaultdict(lambda: -1)
for synset in gnet.all_synsets():
min_depth = synset.min_depth
if max_min_depths[synset.category] < min_depth:
max_min_depths[synset.category] = min_depth
if germanet_db.metainfo.count() == 0:
germanet_db.metainfo.insert({})
metainfo = germanet_db.metainfo.find_one()
metainfo['max_min_depths'] = max_min_depths
germanet_db.metainfo.save(metainfo)
print('Computed maximum min_depth for all parts of speech:')
print(u', '.join(u'{0}: {1}'.format(k, v) for (k, v) in
sorted(max_min_depths.items())).encode('utf-8')) | For every part of speech in GermaNet, computes the maximum
min_depth in that hierarchy.
Arguments:
- `germanet_db`: a pymongo.database.Database object | train | https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/mongo_import.py#L602-L625 | [
"def all_synsets(self):\n '''\n A generator over all the synsets in the GermaNet database.\n '''\n for synset_dict in self._mongo_db.synsets.find():\n yield Synset(self, synset_dict)\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
mongo_import.py
(c) Will Roberts 21 March, 2014
A script to import the GermaNet lexicon into a MongoDB database.
'''
from __future__ import absolute_import, division, print_function
from . import germanet
from builtins import dict, int, str, zip
from collections import defaultdict
from io import open
from pymongo import DESCENDING, MongoClient
import glob
import gzip
import optparse
import os
import re
import sys
import xml.etree.ElementTree as etree
# ------------------------------------------------------------
# Find filenames
# ------------------------------------------------------------
def find_germanet_xml_files(xml_path):
'''
Globs the XML files contained in the given directory and sorts
them into sections for import into the MongoDB database.
Arguments:
- `xml_path`: the path to the directory containing the GermaNet
XML files
'''
xml_files = sorted(glob.glob(os.path.join(xml_path, '*.xml')))
# sort out the lexical files
lex_files = [xml_file for xml_file in xml_files if
re.match(r'(adj|nomen|verben)\.',
os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(lex_files))
if not lex_files:
print('ERROR: cannot find lexical information files')
# sort out the GermaNet relations file
gn_rels_file = [xml_file for xml_file in xml_files if
os.path.basename(xml_file).lower() == 'gn_relations.xml']
xml_files = sorted(set(xml_files) - set(gn_rels_file))
if not gn_rels_file:
print('ERROR: cannot find relations file gn_relations.xml')
gn_rels_file = None
else:
if 1 < len(gn_rels_file):
print ('WARNING: more than one relations file gn_relations.xml, '
'taking first match')
gn_rels_file = gn_rels_file[0]
# sort out the wiktionary paraphrase files
wiktionary_files = [xml_file for xml_file in xml_files if
re.match(r'wiktionaryparaphrases-',
os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(wiktionary_files))
if not wiktionary_files:
print('WARNING: cannot find wiktionary paraphrase files')
# sort out the interlingual index file
ili_files = [xml_file for xml_file in xml_files if
os.path.basename(xml_file).lower().startswith(
'interlingualindex')]
xml_files = sorted(set(xml_files) - set(ili_files))
if not ili_files:
print('WARNING: cannot find interlingual index file')
if xml_files:
print('WARNING: unrecognised xml files:', xml_files)
return lex_files, gn_rels_file, wiktionary_files, ili_files
# ------------------------------------------------------------
# Read lexical files
# ------------------------------------------------------------
def warn_attribs(loc,
node,
recognised_attribs,
reqd_attribs=None):
'''
Error checking of XML input: check that the given node has certain
required attributes, and does not have any unrecognised
attributes.
Arguments:
- `loc`: a string with some information about the location of the
error in the XML file
- `node`: the node to check
- `recognised_attribs`: a set of node attributes which we know how
to handle
- `reqd_attribs`: a set of node attributes which we require to be
present; if this argument is None, it will take the same value
as `recognised_attribs`
'''
if reqd_attribs is None:
reqd_attribs = recognised_attribs
found_attribs = set(node.keys())
if reqd_attribs - found_attribs:
print(loc, 'missing <{0}> attributes'.format(node.tag),
reqd_attribs - found_attribs)
if found_attribs - recognised_attribs:
print(loc, 'unrecognised <{0}> properties'.format(node.tag),
found_attribs - recognised_attribs)
SYNSET_ATTRIBS = set(['category', 'id', 'class'])
LEXUNIT_ATTRIBS = set(['styleMarking', 'namedEntity', 'artificial',
'source', 'sense', 'id'])
MODIFIER_ATTRIBS = set(['category', 'property'])
HEAD_ATTRIBS = set(['property'])
MAP_YESNO_TO_BOOL = {
'yes': True,
'no': False,
}
def read_lexical_file(filename):
'''
Reads in a GermaNet lexical information file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`: the name of the XML file to read
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
synsets = []
assert doc.getroot().tag == 'synsets'
for synset in doc.getroot():
if synset.tag != 'synset':
print('unrecognised child of <synsets>', synset)
continue
synset_dict = dict(synset.items())
synloc = '{0} synset {1},'.format(filename,
synset_dict.get('id', '???'))
warn_attribs(synloc, synset, SYNSET_ATTRIBS)
synset_dict['lexunits'] = []
synsets.append(synset_dict)
for child in synset:
if child.tag == 'lexUnit':
lexunit = child
lexunit_dict = dict(lexunit.items())
lexloc = synloc + ' lexUnit {0},'.format(
lexunit_dict.get('id', '???'))
warn_attribs(lexloc, lexunit, LEXUNIT_ATTRIBS)
# convert some properties to booleans
for key in ['styleMarking', 'artificial', 'namedEntity']:
if key in lexunit_dict:
if lexunit_dict[key] not in MAP_YESNO_TO_BOOL:
print(lexloc, ('lexunit property {0} has '
'non-boolean value').format(key),
lexunit_dict[key])
continue
lexunit_dict[key] = MAP_YESNO_TO_BOOL[lexunit_dict[key]]
# convert sense to integer number
if 'sense' in lexunit_dict:
if lexunit_dict['sense'].isdigit():
lexunit_dict['sense'] = int(lexunit_dict['sense'], 10)
else:
print(lexloc,
'lexunit property sense has non-numeric value',
lexunit_dict['sense'])
synset_dict['lexunits'].append(lexunit_dict)
lexunit_dict['examples'] = []
lexunit_dict['frames'] = []
for child in lexunit:
if child.tag in ['orthForm',
'orthVar',
'oldOrthForm',
'oldOrthVar']:
warn_attribs(lexloc, child, set())
if not child.text:
print(lexloc, '{0} with no text'.format(child.tag))
continue
if child.tag in lexunit_dict:
print(lexloc, 'more than one {0}'.format(child.tag))
lexunit_dict[child.tag] = str(child.text)
elif child.tag == 'example':
example = child
text = [child for child in example
if child.tag == 'text']
if len(text) != 1 or not text[0].text:
print(lexloc, '<example> tag without text')
example_dict = {'text': str(text[0].text)}
for child in example:
if child.tag == 'text':
continue
elif child.tag == 'exframe':
if 'exframe' in example_dict:
print(lexloc,
'more than one <exframe> '
'for <example>')
warn_attribs(lexloc, child, set())
if not child.text:
print(lexloc, '<exframe> with no text')
continue
example_dict['exframe'] = str(child.text)
else:
print(lexloc,
'unrecognised child of <example>',
child)
lexunit_dict['examples'].append(example_dict)
elif child.tag == 'frame':
frame = child
warn_attribs(lexloc, frame, set())
if 0 < len(frame):
print(lexloc, 'unrecognised <frame> children',
list(frame))
if not frame.text:
print(lexloc, '<frame> without text')
continue
lexunit_dict['frames'].append(str(frame.text))
elif child.tag == 'compound':
compound = child
warn_attribs(lexloc, compound, set())
compound_dict = {}
for child in compound:
if child.tag == 'modifier':
modifier_dict = dict(child.items())
warn_attribs(lexloc, child,
MODIFIER_ATTRIBS, set())
if not child.text:
print(lexloc, 'modifier without text')
continue
modifier_dict['text'] = str(child.text)
if 'modifier' not in compound_dict:
compound_dict['modifier'] = []
compound_dict['modifier'].append(modifier_dict)
elif child.tag == 'head':
head_dict = dict(child.items())
warn_attribs(lexloc, child, HEAD_ATTRIBS, set())
if not child.text:
print(lexloc, '<head> without text')
continue
head_dict['text'] = str(child.text)
if 'head' in compound_dict:
print(lexloc,
'more than one head in <compound>')
compound_dict['head'] = head_dict
else:
print(lexloc,
'unrecognised child of <compound>',
child)
continue
else:
print(lexloc, 'unrecognised child of <lexUnit>', child)
continue
elif child.tag == 'paraphrase':
paraphrase = child
warn_attribs(synloc, paraphrase, set())
paraphrase_text = str(paraphrase.text)
if not paraphrase_text:
print(synloc, 'WARNING: <paraphrase> tag with no text')
else:
print(synloc, 'unrecognised child of <synset>', child)
continue
return synsets
# ------------------------------------------------------------
# Read relation file
# ------------------------------------------------------------
RELATION_ATTRIBS_REQD = set(['dir', 'from', 'name', 'to'])
RELATION_ATTRIBS_OPT = set(['inv'])
RELATION_ATTRIBS = RELATION_ATTRIBS_REQD | RELATION_ATTRIBS_OPT
LEX_REL_DIRS = set(['both', 'one'])
CON_REL_DIRS = set(['both', 'revert', 'one'])
def read_relation_file(filename):
'''
Reads the GermaNet relation file ``gn_relations.xml`` which lists
all the relations holding between lexical units and synsets.
Arguments:
- `filename`:
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
lex_rels = []
con_rels = []
assert doc.getroot().tag == 'relations'
for child in doc.getroot():
if child.tag == 'lex_rel':
if 0 < len(child):
print('<lex_rel> has unexpected child node')
child_dict = dict(child.items())
warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD)
if child_dict['dir'] not in LEX_REL_DIRS:
print('unrecognized <lex_rel> dir', child_dict['dir'])
if child_dict['dir'] == 'both' and 'inv' not in child_dict:
print('<lex_rel> has dir=both but does not specify inv')
lex_rels.append(child_dict)
elif child.tag == 'con_rel':
if 0 < len(child):
print('<con_rel> has unexpected child node')
child_dict = dict(child.items())
warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD)
if child_dict['dir'] not in CON_REL_DIRS:
print('unrecognised <con_rel> dir', child_dict['dir'])
if (child_dict['dir'] in ['both', 'revert'] and
'inv' not in child_dict):
print('<con_rel> has dir={0} but does not specify inv'.format(
child_dict['dir']))
con_rels.append(child_dict)
else:
print('unrecognised child of <relations>', child)
continue
return lex_rels, con_rels
# ------------------------------------------------------------
# Read wiktionary paraphrase file
# ------------------------------------------------------------
PARAPHRASE_ATTRIBS = set(['edited', 'lexUnitId', 'wiktionaryId',
'wiktionarySense', 'wiktionarySenseId'])
def read_paraphrase_file(filename):
'''
Reads in a GermaNet wiktionary paraphrase file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`:
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
assert doc.getroot().tag == 'wiktionaryParaphrases'
paraphrases = []
for child in doc.getroot():
if child.tag == 'wiktionaryParaphrase':
paraphrase = child
warn_attribs('', paraphrase, PARAPHRASE_ATTRIBS)
if 0 < len(paraphrase):
print('unrecognised child of <wiktionaryParaphrase>',
list(paraphrase))
paraphrase_dict = dict(paraphrase.items())
if paraphrase_dict['edited'] not in MAP_YESNO_TO_BOOL:
print('<paraphrase> attribute "edited" has unexpected value',
paraphrase_dict['edited'])
else:
paraphrase_dict['edited'] = MAP_YESNO_TO_BOOL[
paraphrase_dict['edited']]
if not paraphrase_dict['wiktionarySenseId'].isdigit():
print('<paraphrase> attribute "wiktionarySenseId" has '
'non-integer value', paraphrase_dict['edited'])
else:
paraphrase_dict['wiktionarySenseId'] = \
int(paraphrase_dict['wiktionarySenseId'], 10)
paraphrases.append(paraphrase_dict)
else:
print('unknown child of <wiktionaryParaphrases>', child)
return paraphrases
# ------------------------------------------------------------
# Mongo insertion
# ------------------------------------------------------------
# we need to change the names of some synset keys because they are
# Python keywords
SYNSET_KEY_REWRITES = {
'class': 'gn_class',
}
def insert_lexical_information(germanet_db, lex_files):
'''
Reads in the given lexical information files and inserts their
contents into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `lex_files`: a list of paths to XML files containing lexial
information
'''
# drop the database collections if they already exist
germanet_db.lexunits.drop()
germanet_db.synsets.drop()
# inject data from XML files into the database
for lex_file in lex_files:
synsets = read_lexical_file(lex_file)
for synset in synsets:
synset = dict((SYNSET_KEY_REWRITES.get(key, key), value)
for (key, value) in synset.items())
lexunits = synset['lexunits']
synset['lexunits'] = germanet_db.lexunits.insert(lexunits)
synset_id = germanet_db.synsets.insert(synset)
for lexunit in lexunits:
lexunit['synset'] = synset_id
lexunit['category'] = synset['category']
germanet_db.lexunits.save(lexunit)
# index the two collections by id
germanet_db.synsets.create_index('id')
germanet_db.lexunits.create_index('id')
# also index lexunits by lemma, lemma-pos, and lemma-pos-sensenum
germanet_db.lexunits.create_index([('orthForm', DESCENDING)])
germanet_db.lexunits.create_index([('orthForm', DESCENDING),
('category', DESCENDING)])
germanet_db.lexunits.create_index([('orthForm', DESCENDING),
('category', DESCENDING),
('sense', DESCENDING)])
print('Inserted {0} synsets, {1} lexical units.'.format(
germanet_db.synsets.count(),
germanet_db.lexunits.count()))
def insert_relation_information(germanet_db, gn_rels_file):
'''
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `gn_rels_file`:
'''
lex_rels, con_rels = read_relation_file(gn_rels_file)
# cache the lexunits while we work on them
lexunits = {}
for lex_rel in lex_rels:
if lex_rel['from'] not in lexunits:
lexunits[lex_rel['from']] = germanet_db.lexunits.find_one(
{'id': lex_rel['from']})
from_lexunit = lexunits[lex_rel['from']]
if lex_rel['to'] not in lexunits:
lexunits[lex_rel['to']] = germanet_db.lexunits.find_one(
{'id': lex_rel['to']})
to_lexunit = lexunits[lex_rel['to']]
if 'rels' not in from_lexunit:
from_lexunit['rels'] = set()
from_lexunit['rels'].add((lex_rel['name'], to_lexunit['_id']))
if lex_rel['dir'] == 'both':
if 'rels' not in to_lexunit:
to_lexunit['rels'] = set()
to_lexunit['rels'].add((lex_rel['inv'], from_lexunit['_id']))
for lexunit in lexunits.values():
if 'rels' in lexunit:
lexunit['rels'] = sorted(lexunit['rels'])
germanet_db.lexunits.save(lexunit)
# cache the synsets while we work on them
synsets = {}
for con_rel in con_rels:
if con_rel['from'] not in synsets:
synsets[con_rel['from']] = germanet_db.synsets.find_one(
{'id': con_rel['from']})
from_synset = synsets[con_rel['from']]
if con_rel['to'] not in synsets:
synsets[con_rel['to']] = germanet_db.synsets.find_one(
{'id': con_rel['to']})
to_synset = synsets[con_rel['to']]
if 'rels' not in from_synset:
from_synset['rels'] = set()
from_synset['rels'].add((con_rel['name'], to_synset['_id']))
if con_rel['dir'] in ['both', 'revert']:
if 'rels' not in to_synset:
to_synset['rels'] = set()
to_synset['rels'].add((con_rel['inv'], from_synset['_id']))
for synset in synsets.values():
if 'rels' in synset:
synset['rels'] = sorted(synset['rels'])
germanet_db.synsets.save(synset)
print('Inserted {0} lexical relations, {1} synset relations.'.format(
len(lex_rels), len(con_rels)))
def insert_paraphrase_information(germanet_db, wiktionary_files):
'''
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `wiktionary_files`:
'''
num_paraphrases = 0
# cache the lexunits while we work on them
lexunits = {}
for filename in wiktionary_files:
paraphrases = read_paraphrase_file(filename)
num_paraphrases += len(paraphrases)
for paraphrase in paraphrases:
if paraphrase['lexUnitId'] not in lexunits:
lexunits[paraphrase['lexUnitId']] = \
germanet_db.lexunits.find_one(
{'id': paraphrase['lexUnitId']})
lexunit = lexunits[paraphrase['lexUnitId']]
if 'paraphrases' not in lexunit:
lexunit['paraphrases'] = []
lexunit['paraphrases'].append(paraphrase)
for lexunit in lexunits.values():
germanet_db.lexunits.save(lexunit)
print('Inserted {0} wiktionary paraphrases.'.format(num_paraphrases))
LEMMATISATION_FILE = 'baseforms_by_projekt_deutscher_wortschatz.txt.gz'
def insert_lemmatisation_data(germanet_db):
'''
Creates the lemmatiser collection in the given MongoDB instance
using the data derived from the Projekt deutscher Wortschatz.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
# drop the database collection if it already exists
germanet_db.lemmatiser.drop()
num_lemmas = 0
input_file = gzip.open(os.path.join(os.path.dirname(__file__),
LEMMATISATION_FILE))
for line in input_file:
line = line.decode('iso-8859-1').strip().split('\t')
assert len(line) == 2
germanet_db.lemmatiser.insert(dict(list(zip(('word', 'lemma'), line))))
num_lemmas += 1
input_file.close()
# index the collection on 'word'
germanet_db.lemmatiser.create_index('word')
print('Inserted {0} lemmatiser entries.'.format(num_lemmas))
# ------------------------------------------------------------
# Information content for GermaNet similarity
# ------------------------------------------------------------
WORD_COUNT_FILE = 'sdewac-gn-words.tsv.gz'
def insert_infocontent_data(germanet_db):
'''
For every synset in GermaNet, inserts count information derived
from SDEWAC.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
gnet = germanet.GermaNet(germanet_db)
# use add one smoothing
gn_counts = defaultdict(lambda: 1.)
total_count = 1
input_file = gzip.open(os.path.join(os.path.dirname(__file__),
WORD_COUNT_FILE))
num_lines_read = 0
num_lines = 0
for line in input_file:
line = line.decode('utf-8').strip().split('\t')
num_lines += 1
if len(line) != 3:
continue
count, pos, word = line
num_lines_read += 1
count = int(count)
synsets = set(gnet.synsets(word, pos))
if not synsets:
continue
# Although Resnik (1995) suggests dividing count by the number
# of synsets, Patwardhan et al (2003) argue against doing
# this.
count = float(count) / len(synsets)
for synset in synsets:
total_count += count
paths = synset.hypernym_paths
scount = float(count) / len(paths)
for path in paths:
for ss in path:
gn_counts[ss._id] += scount
print('Read {0} of {1} lines from count file.'.format(num_lines_read,
num_lines))
print('Recorded counts for {0} synsets.'.format(len(gn_counts)))
print('Total count is {0}'.format(total_count))
input_file.close()
# update all the synset records in GermaNet
num_updates = 0
for synset in germanet_db.synsets.find():
synset['infocont'] = gn_counts[synset['_id']] / total_count
germanet_db.synsets.save(synset)
num_updates += 1
print('Updated {0} synsets.'.format(num_updates))
# ------------------------------------------------------------
# Main function
# ------------------------------------------------------------
def main():
'''Main function.'''
usage = ('\n\n %prog [options] XML_PATH\n\nArguments:\n\n '
'XML_PATH the directory containing the '
'GermaNet .xml files')
parser = optparse.OptionParser(usage=usage)
parser.add_option('--host', default=None,
help='hostname or IP address of the MongoDB instance '
'where the GermaNet database will be inserted '
'(default: %default)')
parser.add_option('--port', type='int', default=None,
help='port number of the MongoDB instance where the '
'GermaNet database will be inserted (default: %default)')
parser.add_option('--database', dest='database_name', default='germanet',
help='the name of the database on the MongoDB instance '
'where GermaNet will be stored (default: %default)')
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
sys.exit(1)
xml_path = args[0]
client = MongoClient(options.host, options.port)
germanet_db = client[options.database_name]
lex_files, gn_rels_file, wiktionary_files, ili_files = \
find_germanet_xml_files(xml_path)
insert_lexical_information(germanet_db, lex_files)
insert_relation_information(germanet_db, gn_rels_file)
insert_paraphrase_information(germanet_db, wiktionary_files)
insert_lemmatisation_data(germanet_db)
insert_infocontent_data(germanet_db)
compute_max_min_depth(germanet_db)
client.close()
if __name__ == '__main__' and sys.argv != ['']:
main()
|
wroberts/pygermanet | pygermanet/mongo_import.py | main | python | def main():
'''Main function.'''
usage = ('\n\n %prog [options] XML_PATH\n\nArguments:\n\n '
'XML_PATH the directory containing the '
'GermaNet .xml files')
parser = optparse.OptionParser(usage=usage)
parser.add_option('--host', default=None,
help='hostname or IP address of the MongoDB instance '
'where the GermaNet database will be inserted '
'(default: %default)')
parser.add_option('--port', type='int', default=None,
help='port number of the MongoDB instance where the '
'GermaNet database will be inserted (default: %default)')
parser.add_option('--database', dest='database_name', default='germanet',
help='the name of the database on the MongoDB instance '
'where GermaNet will be stored (default: %default)')
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
sys.exit(1)
xml_path = args[0]
client = MongoClient(options.host, options.port)
germanet_db = client[options.database_name]
lex_files, gn_rels_file, wiktionary_files, ili_files = \
find_germanet_xml_files(xml_path)
insert_lexical_information(germanet_db, lex_files)
insert_relation_information(germanet_db, gn_rels_file)
insert_paraphrase_information(germanet_db, wiktionary_files)
insert_lemmatisation_data(germanet_db)
insert_infocontent_data(germanet_db)
compute_max_min_depth(germanet_db)
client.close() | Main function. | train | https://github.com/wroberts/pygermanet/blob/1818c20a7e8c431c4cfb5a570ed0d850bb6dd515/pygermanet/mongo_import.py#L632-L669 | [
"def find_germanet_xml_files(xml_path):\n '''\n Globs the XML files contained in the given directory and sorts\n them into sections for import into the MongoDB database.\n\n Arguments:\n - `xml_path`: the path to the directory containing the GermaNet\n XML files\n '''\n xml_files = sorted(glob.glob(os.path.join(xml_path, '*.xml')))\n\n # sort out the lexical files\n lex_files = [xml_file for xml_file in xml_files if\n re.match(r'(adj|nomen|verben)\\.',\n os.path.basename(xml_file).lower())]\n xml_files = sorted(set(xml_files) - set(lex_files))\n\n if not lex_files:\n print('ERROR: cannot find lexical information files')\n\n # sort out the GermaNet relations file\n gn_rels_file = [xml_file for xml_file in xml_files if\n os.path.basename(xml_file).lower() == 'gn_relations.xml']\n xml_files = sorted(set(xml_files) - set(gn_rels_file))\n\n if not gn_rels_file:\n print('ERROR: cannot find relations file gn_relations.xml')\n gn_rels_file = None\n else:\n if 1 < len(gn_rels_file):\n print ('WARNING: more than one relations file gn_relations.xml, '\n 'taking first match')\n gn_rels_file = gn_rels_file[0]\n\n # sort out the wiktionary paraphrase files\n wiktionary_files = [xml_file for xml_file in xml_files if\n re.match(r'wiktionaryparaphrases-',\n os.path.basename(xml_file).lower())]\n xml_files = sorted(set(xml_files) - set(wiktionary_files))\n\n if not wiktionary_files:\n print('WARNING: cannot find wiktionary paraphrase files')\n\n # sort out the interlingual index file\n ili_files = [xml_file for xml_file in xml_files if\n os.path.basename(xml_file).lower().startswith(\n 'interlingualindex')]\n xml_files = sorted(set(xml_files) - set(ili_files))\n\n if not ili_files:\n print('WARNING: cannot find interlingual index file')\n\n if xml_files:\n print('WARNING: unrecognised xml files:', xml_files)\n\n return lex_files, gn_rels_file, wiktionary_files, ili_files\n",
"def insert_lexical_information(germanet_db, lex_files):\n '''\n Reads in the given lexical information files and inserts their\n contents into the given MongoDB database.\n\n Arguments:\n - `germanet_db`: a pymongo.database.Database object\n - `lex_files`: a list of paths to XML files containing lexial\n information\n '''\n # drop the database collections if they already exist\n germanet_db.lexunits.drop()\n germanet_db.synsets.drop()\n # inject data from XML files into the database\n for lex_file in lex_files:\n synsets = read_lexical_file(lex_file)\n for synset in synsets:\n synset = dict((SYNSET_KEY_REWRITES.get(key, key), value)\n for (key, value) in synset.items())\n lexunits = synset['lexunits']\n synset['lexunits'] = germanet_db.lexunits.insert(lexunits)\n synset_id = germanet_db.synsets.insert(synset)\n for lexunit in lexunits:\n lexunit['synset'] = synset_id\n lexunit['category'] = synset['category']\n germanet_db.lexunits.save(lexunit)\n # index the two collections by id\n germanet_db.synsets.create_index('id')\n germanet_db.lexunits.create_index('id')\n # also index lexunits by lemma, lemma-pos, and lemma-pos-sensenum\n germanet_db.lexunits.create_index([('orthForm', DESCENDING)])\n germanet_db.lexunits.create_index([('orthForm', DESCENDING),\n ('category', DESCENDING)])\n germanet_db.lexunits.create_index([('orthForm', DESCENDING),\n ('category', DESCENDING),\n ('sense', DESCENDING)])\n print('Inserted {0} synsets, {1} lexical units.'.format(\n germanet_db.synsets.count(),\n germanet_db.lexunits.count()))\n",
"def insert_relation_information(germanet_db, gn_rels_file):\n '''\n Reads in the given GermaNet relation file and inserts its contents\n into the given MongoDB database.\n\n Arguments:\n - `germanet_db`: a pymongo.database.Database object\n - `gn_rels_file`:\n '''\n lex_rels, con_rels = read_relation_file(gn_rels_file)\n\n # cache the lexunits while we work on them\n lexunits = {}\n for lex_rel in lex_rels:\n if lex_rel['from'] not in lexunits:\n lexunits[lex_rel['from']] = germanet_db.lexunits.find_one(\n {'id': lex_rel['from']})\n from_lexunit = lexunits[lex_rel['from']]\n if lex_rel['to'] not in lexunits:\n lexunits[lex_rel['to']] = germanet_db.lexunits.find_one(\n {'id': lex_rel['to']})\n to_lexunit = lexunits[lex_rel['to']]\n if 'rels' not in from_lexunit:\n from_lexunit['rels'] = set()\n from_lexunit['rels'].add((lex_rel['name'], to_lexunit['_id']))\n if lex_rel['dir'] == 'both':\n if 'rels' not in to_lexunit:\n to_lexunit['rels'] = set()\n to_lexunit['rels'].add((lex_rel['inv'], from_lexunit['_id']))\n for lexunit in lexunits.values():\n if 'rels' in lexunit:\n lexunit['rels'] = sorted(lexunit['rels'])\n germanet_db.lexunits.save(lexunit)\n\n # cache the synsets while we work on them\n synsets = {}\n for con_rel in con_rels:\n if con_rel['from'] not in synsets:\n synsets[con_rel['from']] = germanet_db.synsets.find_one(\n {'id': con_rel['from']})\n from_synset = synsets[con_rel['from']]\n if con_rel['to'] not in synsets:\n synsets[con_rel['to']] = germanet_db.synsets.find_one(\n {'id': con_rel['to']})\n to_synset = synsets[con_rel['to']]\n if 'rels' not in from_synset:\n from_synset['rels'] = set()\n from_synset['rels'].add((con_rel['name'], to_synset['_id']))\n if con_rel['dir'] in ['both', 'revert']:\n if 'rels' not in to_synset:\n to_synset['rels'] = set()\n to_synset['rels'].add((con_rel['inv'], from_synset['_id']))\n for synset in synsets.values():\n if 'rels' in synset:\n synset['rels'] = sorted(synset['rels'])\n germanet_db.synsets.save(synset)\n\n print('Inserted {0} lexical relations, {1} synset relations.'.format(\n len(lex_rels), len(con_rels)))\n",
"def insert_paraphrase_information(germanet_db, wiktionary_files):\n '''\n Reads in the given GermaNet relation file and inserts its contents\n into the given MongoDB database.\n\n Arguments:\n - `germanet_db`: a pymongo.database.Database object\n - `wiktionary_files`:\n '''\n num_paraphrases = 0\n # cache the lexunits while we work on them\n lexunits = {}\n for filename in wiktionary_files:\n paraphrases = read_paraphrase_file(filename)\n num_paraphrases += len(paraphrases)\n for paraphrase in paraphrases:\n if paraphrase['lexUnitId'] not in lexunits:\n lexunits[paraphrase['lexUnitId']] = \\\n germanet_db.lexunits.find_one(\n {'id': paraphrase['lexUnitId']})\n lexunit = lexunits[paraphrase['lexUnitId']]\n if 'paraphrases' not in lexunit:\n lexunit['paraphrases'] = []\n lexunit['paraphrases'].append(paraphrase)\n for lexunit in lexunits.values():\n germanet_db.lexunits.save(lexunit)\n\n print('Inserted {0} wiktionary paraphrases.'.format(num_paraphrases))\n",
"def insert_lemmatisation_data(germanet_db):\n '''\n Creates the lemmatiser collection in the given MongoDB instance\n using the data derived from the Projekt deutscher Wortschatz.\n\n Arguments:\n - `germanet_db`: a pymongo.database.Database object\n '''\n # drop the database collection if it already exists\n germanet_db.lemmatiser.drop()\n num_lemmas = 0\n input_file = gzip.open(os.path.join(os.path.dirname(__file__),\n LEMMATISATION_FILE))\n for line in input_file:\n line = line.decode('iso-8859-1').strip().split('\\t')\n assert len(line) == 2\n germanet_db.lemmatiser.insert(dict(list(zip(('word', 'lemma'), line))))\n num_lemmas += 1\n input_file.close()\n # index the collection on 'word'\n germanet_db.lemmatiser.create_index('word')\n\n print('Inserted {0} lemmatiser entries.'.format(num_lemmas))\n",
"def insert_infocontent_data(germanet_db):\n '''\n For every synset in GermaNet, inserts count information derived\n from SDEWAC.\n\n Arguments:\n - `germanet_db`: a pymongo.database.Database object\n '''\n gnet = germanet.GermaNet(germanet_db)\n # use add one smoothing\n gn_counts = defaultdict(lambda: 1.)\n total_count = 1\n input_file = gzip.open(os.path.join(os.path.dirname(__file__),\n WORD_COUNT_FILE))\n num_lines_read = 0\n num_lines = 0\n for line in input_file:\n line = line.decode('utf-8').strip().split('\\t')\n num_lines += 1\n if len(line) != 3:\n continue\n count, pos, word = line\n num_lines_read += 1\n count = int(count)\n synsets = set(gnet.synsets(word, pos))\n if not synsets:\n continue\n # Although Resnik (1995) suggests dividing count by the number\n # of synsets, Patwardhan et al (2003) argue against doing\n # this.\n count = float(count) / len(synsets)\n for synset in synsets:\n total_count += count\n paths = synset.hypernym_paths\n scount = float(count) / len(paths)\n for path in paths:\n for ss in path:\n gn_counts[ss._id] += scount\n print('Read {0} of {1} lines from count file.'.format(num_lines_read,\n num_lines))\n print('Recorded counts for {0} synsets.'.format(len(gn_counts)))\n print('Total count is {0}'.format(total_count))\n input_file.close()\n # update all the synset records in GermaNet\n num_updates = 0\n for synset in germanet_db.synsets.find():\n synset['infocont'] = gn_counts[synset['_id']] / total_count\n germanet_db.synsets.save(synset)\n num_updates += 1\n print('Updated {0} synsets.'.format(num_updates))\n",
"def compute_max_min_depth(germanet_db):\n '''\n For every part of speech in GermaNet, computes the maximum\n min_depth in that hierarchy.\n\n Arguments:\n - `germanet_db`: a pymongo.database.Database object\n '''\n gnet = germanet.GermaNet(germanet_db)\n max_min_depths = defaultdict(lambda: -1)\n for synset in gnet.all_synsets():\n min_depth = synset.min_depth\n if max_min_depths[synset.category] < min_depth:\n max_min_depths[synset.category] = min_depth\n\n if germanet_db.metainfo.count() == 0:\n germanet_db.metainfo.insert({})\n metainfo = germanet_db.metainfo.find_one()\n metainfo['max_min_depths'] = max_min_depths\n germanet_db.metainfo.save(metainfo)\n\n print('Computed maximum min_depth for all parts of speech:')\n print(u', '.join(u'{0}: {1}'.format(k, v) for (k, v) in\n sorted(max_min_depths.items())).encode('utf-8'))\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
mongo_import.py
(c) Will Roberts 21 March, 2014
A script to import the GermaNet lexicon into a MongoDB database.
'''
from __future__ import absolute_import, division, print_function
from . import germanet
from builtins import dict, int, str, zip
from collections import defaultdict
from io import open
from pymongo import DESCENDING, MongoClient
import glob
import gzip
import optparse
import os
import re
import sys
import xml.etree.ElementTree as etree
# ------------------------------------------------------------
# Find filenames
# ------------------------------------------------------------
def find_germanet_xml_files(xml_path):
'''
Globs the XML files contained in the given directory and sorts
them into sections for import into the MongoDB database.
Arguments:
- `xml_path`: the path to the directory containing the GermaNet
XML files
'''
xml_files = sorted(glob.glob(os.path.join(xml_path, '*.xml')))
# sort out the lexical files
lex_files = [xml_file for xml_file in xml_files if
re.match(r'(adj|nomen|verben)\.',
os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(lex_files))
if not lex_files:
print('ERROR: cannot find lexical information files')
# sort out the GermaNet relations file
gn_rels_file = [xml_file for xml_file in xml_files if
os.path.basename(xml_file).lower() == 'gn_relations.xml']
xml_files = sorted(set(xml_files) - set(gn_rels_file))
if not gn_rels_file:
print('ERROR: cannot find relations file gn_relations.xml')
gn_rels_file = None
else:
if 1 < len(gn_rels_file):
print ('WARNING: more than one relations file gn_relations.xml, '
'taking first match')
gn_rels_file = gn_rels_file[0]
# sort out the wiktionary paraphrase files
wiktionary_files = [xml_file for xml_file in xml_files if
re.match(r'wiktionaryparaphrases-',
os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(wiktionary_files))
if not wiktionary_files:
print('WARNING: cannot find wiktionary paraphrase files')
# sort out the interlingual index file
ili_files = [xml_file for xml_file in xml_files if
os.path.basename(xml_file).lower().startswith(
'interlingualindex')]
xml_files = sorted(set(xml_files) - set(ili_files))
if not ili_files:
print('WARNING: cannot find interlingual index file')
if xml_files:
print('WARNING: unrecognised xml files:', xml_files)
return lex_files, gn_rels_file, wiktionary_files, ili_files
# ------------------------------------------------------------
# Read lexical files
# ------------------------------------------------------------
def warn_attribs(loc,
node,
recognised_attribs,
reqd_attribs=None):
'''
Error checking of XML input: check that the given node has certain
required attributes, and does not have any unrecognised
attributes.
Arguments:
- `loc`: a string with some information about the location of the
error in the XML file
- `node`: the node to check
- `recognised_attribs`: a set of node attributes which we know how
to handle
- `reqd_attribs`: a set of node attributes which we require to be
present; if this argument is None, it will take the same value
as `recognised_attribs`
'''
if reqd_attribs is None:
reqd_attribs = recognised_attribs
found_attribs = set(node.keys())
if reqd_attribs - found_attribs:
print(loc, 'missing <{0}> attributes'.format(node.tag),
reqd_attribs - found_attribs)
if found_attribs - recognised_attribs:
print(loc, 'unrecognised <{0}> properties'.format(node.tag),
found_attribs - recognised_attribs)
SYNSET_ATTRIBS = set(['category', 'id', 'class'])
LEXUNIT_ATTRIBS = set(['styleMarking', 'namedEntity', 'artificial',
'source', 'sense', 'id'])
MODIFIER_ATTRIBS = set(['category', 'property'])
HEAD_ATTRIBS = set(['property'])
MAP_YESNO_TO_BOOL = {
'yes': True,
'no': False,
}
def read_lexical_file(filename):
'''
Reads in a GermaNet lexical information file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`: the name of the XML file to read
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
synsets = []
assert doc.getroot().tag == 'synsets'
for synset in doc.getroot():
if synset.tag != 'synset':
print('unrecognised child of <synsets>', synset)
continue
synset_dict = dict(synset.items())
synloc = '{0} synset {1},'.format(filename,
synset_dict.get('id', '???'))
warn_attribs(synloc, synset, SYNSET_ATTRIBS)
synset_dict['lexunits'] = []
synsets.append(synset_dict)
for child in synset:
if child.tag == 'lexUnit':
lexunit = child
lexunit_dict = dict(lexunit.items())
lexloc = synloc + ' lexUnit {0},'.format(
lexunit_dict.get('id', '???'))
warn_attribs(lexloc, lexunit, LEXUNIT_ATTRIBS)
# convert some properties to booleans
for key in ['styleMarking', 'artificial', 'namedEntity']:
if key in lexunit_dict:
if lexunit_dict[key] not in MAP_YESNO_TO_BOOL:
print(lexloc, ('lexunit property {0} has '
'non-boolean value').format(key),
lexunit_dict[key])
continue
lexunit_dict[key] = MAP_YESNO_TO_BOOL[lexunit_dict[key]]
# convert sense to integer number
if 'sense' in lexunit_dict:
if lexunit_dict['sense'].isdigit():
lexunit_dict['sense'] = int(lexunit_dict['sense'], 10)
else:
print(lexloc,
'lexunit property sense has non-numeric value',
lexunit_dict['sense'])
synset_dict['lexunits'].append(lexunit_dict)
lexunit_dict['examples'] = []
lexunit_dict['frames'] = []
for child in lexunit:
if child.tag in ['orthForm',
'orthVar',
'oldOrthForm',
'oldOrthVar']:
warn_attribs(lexloc, child, set())
if not child.text:
print(lexloc, '{0} with no text'.format(child.tag))
continue
if child.tag in lexunit_dict:
print(lexloc, 'more than one {0}'.format(child.tag))
lexunit_dict[child.tag] = str(child.text)
elif child.tag == 'example':
example = child
text = [child for child in example
if child.tag == 'text']
if len(text) != 1 or not text[0].text:
print(lexloc, '<example> tag without text')
example_dict = {'text': str(text[0].text)}
for child in example:
if child.tag == 'text':
continue
elif child.tag == 'exframe':
if 'exframe' in example_dict:
print(lexloc,
'more than one <exframe> '
'for <example>')
warn_attribs(lexloc, child, set())
if not child.text:
print(lexloc, '<exframe> with no text')
continue
example_dict['exframe'] = str(child.text)
else:
print(lexloc,
'unrecognised child of <example>',
child)
lexunit_dict['examples'].append(example_dict)
elif child.tag == 'frame':
frame = child
warn_attribs(lexloc, frame, set())
if 0 < len(frame):
print(lexloc, 'unrecognised <frame> children',
list(frame))
if not frame.text:
print(lexloc, '<frame> without text')
continue
lexunit_dict['frames'].append(str(frame.text))
elif child.tag == 'compound':
compound = child
warn_attribs(lexloc, compound, set())
compound_dict = {}
for child in compound:
if child.tag == 'modifier':
modifier_dict = dict(child.items())
warn_attribs(lexloc, child,
MODIFIER_ATTRIBS, set())
if not child.text:
print(lexloc, 'modifier without text')
continue
modifier_dict['text'] = str(child.text)
if 'modifier' not in compound_dict:
compound_dict['modifier'] = []
compound_dict['modifier'].append(modifier_dict)
elif child.tag == 'head':
head_dict = dict(child.items())
warn_attribs(lexloc, child, HEAD_ATTRIBS, set())
if not child.text:
print(lexloc, '<head> without text')
continue
head_dict['text'] = str(child.text)
if 'head' in compound_dict:
print(lexloc,
'more than one head in <compound>')
compound_dict['head'] = head_dict
else:
print(lexloc,
'unrecognised child of <compound>',
child)
continue
else:
print(lexloc, 'unrecognised child of <lexUnit>', child)
continue
elif child.tag == 'paraphrase':
paraphrase = child
warn_attribs(synloc, paraphrase, set())
paraphrase_text = str(paraphrase.text)
if not paraphrase_text:
print(synloc, 'WARNING: <paraphrase> tag with no text')
else:
print(synloc, 'unrecognised child of <synset>', child)
continue
return synsets
# ------------------------------------------------------------
# Read relation file
# ------------------------------------------------------------
RELATION_ATTRIBS_REQD = set(['dir', 'from', 'name', 'to'])
RELATION_ATTRIBS_OPT = set(['inv'])
RELATION_ATTRIBS = RELATION_ATTRIBS_REQD | RELATION_ATTRIBS_OPT
LEX_REL_DIRS = set(['both', 'one'])
CON_REL_DIRS = set(['both', 'revert', 'one'])
def read_relation_file(filename):
'''
Reads the GermaNet relation file ``gn_relations.xml`` which lists
all the relations holding between lexical units and synsets.
Arguments:
- `filename`:
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
lex_rels = []
con_rels = []
assert doc.getroot().tag == 'relations'
for child in doc.getroot():
if child.tag == 'lex_rel':
if 0 < len(child):
print('<lex_rel> has unexpected child node')
child_dict = dict(child.items())
warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD)
if child_dict['dir'] not in LEX_REL_DIRS:
print('unrecognized <lex_rel> dir', child_dict['dir'])
if child_dict['dir'] == 'both' and 'inv' not in child_dict:
print('<lex_rel> has dir=both but does not specify inv')
lex_rels.append(child_dict)
elif child.tag == 'con_rel':
if 0 < len(child):
print('<con_rel> has unexpected child node')
child_dict = dict(child.items())
warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD)
if child_dict['dir'] not in CON_REL_DIRS:
print('unrecognised <con_rel> dir', child_dict['dir'])
if (child_dict['dir'] in ['both', 'revert'] and
'inv' not in child_dict):
print('<con_rel> has dir={0} but does not specify inv'.format(
child_dict['dir']))
con_rels.append(child_dict)
else:
print('unrecognised child of <relations>', child)
continue
return lex_rels, con_rels
# ------------------------------------------------------------
# Read wiktionary paraphrase file
# ------------------------------------------------------------
PARAPHRASE_ATTRIBS = set(['edited', 'lexUnitId', 'wiktionaryId',
'wiktionarySense', 'wiktionarySenseId'])
def read_paraphrase_file(filename):
'''
Reads in a GermaNet wiktionary paraphrase file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`:
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
assert doc.getroot().tag == 'wiktionaryParaphrases'
paraphrases = []
for child in doc.getroot():
if child.tag == 'wiktionaryParaphrase':
paraphrase = child
warn_attribs('', paraphrase, PARAPHRASE_ATTRIBS)
if 0 < len(paraphrase):
print('unrecognised child of <wiktionaryParaphrase>',
list(paraphrase))
paraphrase_dict = dict(paraphrase.items())
if paraphrase_dict['edited'] not in MAP_YESNO_TO_BOOL:
print('<paraphrase> attribute "edited" has unexpected value',
paraphrase_dict['edited'])
else:
paraphrase_dict['edited'] = MAP_YESNO_TO_BOOL[
paraphrase_dict['edited']]
if not paraphrase_dict['wiktionarySenseId'].isdigit():
print('<paraphrase> attribute "wiktionarySenseId" has '
'non-integer value', paraphrase_dict['edited'])
else:
paraphrase_dict['wiktionarySenseId'] = \
int(paraphrase_dict['wiktionarySenseId'], 10)
paraphrases.append(paraphrase_dict)
else:
print('unknown child of <wiktionaryParaphrases>', child)
return paraphrases
# ------------------------------------------------------------
# Mongo insertion
# ------------------------------------------------------------
# we need to change the names of some synset keys because they are
# Python keywords
SYNSET_KEY_REWRITES = {
'class': 'gn_class',
}
def insert_lexical_information(germanet_db, lex_files):
'''
Reads in the given lexical information files and inserts their
contents into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `lex_files`: a list of paths to XML files containing lexial
information
'''
# drop the database collections if they already exist
germanet_db.lexunits.drop()
germanet_db.synsets.drop()
# inject data from XML files into the database
for lex_file in lex_files:
synsets = read_lexical_file(lex_file)
for synset in synsets:
synset = dict((SYNSET_KEY_REWRITES.get(key, key), value)
for (key, value) in synset.items())
lexunits = synset['lexunits']
synset['lexunits'] = germanet_db.lexunits.insert(lexunits)
synset_id = germanet_db.synsets.insert(synset)
for lexunit in lexunits:
lexunit['synset'] = synset_id
lexunit['category'] = synset['category']
germanet_db.lexunits.save(lexunit)
# index the two collections by id
germanet_db.synsets.create_index('id')
germanet_db.lexunits.create_index('id')
# also index lexunits by lemma, lemma-pos, and lemma-pos-sensenum
germanet_db.lexunits.create_index([('orthForm', DESCENDING)])
germanet_db.lexunits.create_index([('orthForm', DESCENDING),
('category', DESCENDING)])
germanet_db.lexunits.create_index([('orthForm', DESCENDING),
('category', DESCENDING),
('sense', DESCENDING)])
print('Inserted {0} synsets, {1} lexical units.'.format(
germanet_db.synsets.count(),
germanet_db.lexunits.count()))
def insert_relation_information(germanet_db, gn_rels_file):
'''
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `gn_rels_file`:
'''
lex_rels, con_rels = read_relation_file(gn_rels_file)
# cache the lexunits while we work on them
lexunits = {}
for lex_rel in lex_rels:
if lex_rel['from'] not in lexunits:
lexunits[lex_rel['from']] = germanet_db.lexunits.find_one(
{'id': lex_rel['from']})
from_lexunit = lexunits[lex_rel['from']]
if lex_rel['to'] not in lexunits:
lexunits[lex_rel['to']] = germanet_db.lexunits.find_one(
{'id': lex_rel['to']})
to_lexunit = lexunits[lex_rel['to']]
if 'rels' not in from_lexunit:
from_lexunit['rels'] = set()
from_lexunit['rels'].add((lex_rel['name'], to_lexunit['_id']))
if lex_rel['dir'] == 'both':
if 'rels' not in to_lexunit:
to_lexunit['rels'] = set()
to_lexunit['rels'].add((lex_rel['inv'], from_lexunit['_id']))
for lexunit in lexunits.values():
if 'rels' in lexunit:
lexunit['rels'] = sorted(lexunit['rels'])
germanet_db.lexunits.save(lexunit)
# cache the synsets while we work on them
synsets = {}
for con_rel in con_rels:
if con_rel['from'] not in synsets:
synsets[con_rel['from']] = germanet_db.synsets.find_one(
{'id': con_rel['from']})
from_synset = synsets[con_rel['from']]
if con_rel['to'] not in synsets:
synsets[con_rel['to']] = germanet_db.synsets.find_one(
{'id': con_rel['to']})
to_synset = synsets[con_rel['to']]
if 'rels' not in from_synset:
from_synset['rels'] = set()
from_synset['rels'].add((con_rel['name'], to_synset['_id']))
if con_rel['dir'] in ['both', 'revert']:
if 'rels' not in to_synset:
to_synset['rels'] = set()
to_synset['rels'].add((con_rel['inv'], from_synset['_id']))
for synset in synsets.values():
if 'rels' in synset:
synset['rels'] = sorted(synset['rels'])
germanet_db.synsets.save(synset)
print('Inserted {0} lexical relations, {1} synset relations.'.format(
len(lex_rels), len(con_rels)))
def insert_paraphrase_information(germanet_db, wiktionary_files):
'''
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `wiktionary_files`:
'''
num_paraphrases = 0
# cache the lexunits while we work on them
lexunits = {}
for filename in wiktionary_files:
paraphrases = read_paraphrase_file(filename)
num_paraphrases += len(paraphrases)
for paraphrase in paraphrases:
if paraphrase['lexUnitId'] not in lexunits:
lexunits[paraphrase['lexUnitId']] = \
germanet_db.lexunits.find_one(
{'id': paraphrase['lexUnitId']})
lexunit = lexunits[paraphrase['lexUnitId']]
if 'paraphrases' not in lexunit:
lexunit['paraphrases'] = []
lexunit['paraphrases'].append(paraphrase)
for lexunit in lexunits.values():
germanet_db.lexunits.save(lexunit)
print('Inserted {0} wiktionary paraphrases.'.format(num_paraphrases))
LEMMATISATION_FILE = 'baseforms_by_projekt_deutscher_wortschatz.txt.gz'
def insert_lemmatisation_data(germanet_db):
'''
Creates the lemmatiser collection in the given MongoDB instance
using the data derived from the Projekt deutscher Wortschatz.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
# drop the database collection if it already exists
germanet_db.lemmatiser.drop()
num_lemmas = 0
input_file = gzip.open(os.path.join(os.path.dirname(__file__),
LEMMATISATION_FILE))
for line in input_file:
line = line.decode('iso-8859-1').strip().split('\t')
assert len(line) == 2
germanet_db.lemmatiser.insert(dict(list(zip(('word', 'lemma'), line))))
num_lemmas += 1
input_file.close()
# index the collection on 'word'
germanet_db.lemmatiser.create_index('word')
print('Inserted {0} lemmatiser entries.'.format(num_lemmas))
# ------------------------------------------------------------
# Information content for GermaNet similarity
# ------------------------------------------------------------
WORD_COUNT_FILE = 'sdewac-gn-words.tsv.gz'
def insert_infocontent_data(germanet_db):
'''
For every synset in GermaNet, inserts count information derived
from SDEWAC.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
gnet = germanet.GermaNet(germanet_db)
# use add one smoothing
gn_counts = defaultdict(lambda: 1.)
total_count = 1
input_file = gzip.open(os.path.join(os.path.dirname(__file__),
WORD_COUNT_FILE))
num_lines_read = 0
num_lines = 0
for line in input_file:
line = line.decode('utf-8').strip().split('\t')
num_lines += 1
if len(line) != 3:
continue
count, pos, word = line
num_lines_read += 1
count = int(count)
synsets = set(gnet.synsets(word, pos))
if not synsets:
continue
# Although Resnik (1995) suggests dividing count by the number
# of synsets, Patwardhan et al (2003) argue against doing
# this.
count = float(count) / len(synsets)
for synset in synsets:
total_count += count
paths = synset.hypernym_paths
scount = float(count) / len(paths)
for path in paths:
for ss in path:
gn_counts[ss._id] += scount
print('Read {0} of {1} lines from count file.'.format(num_lines_read,
num_lines))
print('Recorded counts for {0} synsets.'.format(len(gn_counts)))
print('Total count is {0}'.format(total_count))
input_file.close()
# update all the synset records in GermaNet
num_updates = 0
for synset in germanet_db.synsets.find():
synset['infocont'] = gn_counts[synset['_id']] / total_count
germanet_db.synsets.save(synset)
num_updates += 1
print('Updated {0} synsets.'.format(num_updates))
def compute_max_min_depth(germanet_db):
'''
For every part of speech in GermaNet, computes the maximum
min_depth in that hierarchy.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
gnet = germanet.GermaNet(germanet_db)
max_min_depths = defaultdict(lambda: -1)
for synset in gnet.all_synsets():
min_depth = synset.min_depth
if max_min_depths[synset.category] < min_depth:
max_min_depths[synset.category] = min_depth
if germanet_db.metainfo.count() == 0:
germanet_db.metainfo.insert({})
metainfo = germanet_db.metainfo.find_one()
metainfo['max_min_depths'] = max_min_depths
germanet_db.metainfo.save(metainfo)
print('Computed maximum min_depth for all parts of speech:')
print(u', '.join(u'{0}: {1}'.format(k, v) for (k, v) in
sorted(max_min_depths.items())).encode('utf-8'))
# ------------------------------------------------------------
# Main function
# ------------------------------------------------------------
if __name__ == '__main__' and sys.argv != ['']:
main()
|
ribozz/sphinx-argparse | sphinxarg/markdown.py | customWalker | python | def customWalker(node, space=''):
txt = ''
try:
txt = node.literal
except:
pass
if txt is None or txt == '':
print('{}{}'.format(space, node.t))
else:
print('{}{}\t{}'.format(space, node.t, txt))
cur = node.first_child
if cur:
while cur is not None:
customWalker(cur, space + ' ')
cur = cur.nxt | A convenience function to ease debugging. It will print the node structure that's returned from CommonMark
The usage would be something like:
>>> content = Parser().parse('Some big text block\n===================\n\nwith content\n')
>>> customWalker(content)
document
heading
text Some big text block
paragraph
text with content
Spaces are used to convey nesting | train | https://github.com/ribozz/sphinx-argparse/blob/178672cd5c846440ff7ecd695e3708feea13e4b4/sphinxarg/markdown.py#L13-L44 | [
"def customWalker(node, space=''):\n \"\"\"\n A convenience function to ease debugging. It will print the node structure that's returned from CommonMark\n\n The usage would be something like:\n\n >>> content = Parser().parse('Some big text block\\n===================\\n\\nwith content\\n')\n >>> customWalker(content)\n document\n heading\n text\tSome big text block\n paragraph\n text\twith content\n\n Spaces are used to convey nesting\n \"\"\"\n txt = ''\n try:\n txt = node.literal\n except:\n pass\n\n if txt is None or txt == '':\n print('{}{}'.format(space, node.t))\n else:\n print('{}{}\\t{}'.format(space, node.t, txt))\n\n cur = node.first_child\n if cur:\n while cur is not None:\n customWalker(cur, space + ' ')\n cur = cur.nxt\n"
] | try:
from commonmark import Parser
except ImportError:
from CommonMark import Parser # >= 0.5.6
try:
from commonmark.node import Node
except ImportError:
from CommonMark.node import Node
from docutils import nodes
from docutils.utils.code_analyzer import Lexer
def paragraph(node):
"""
Process a paragraph, which includes all content under it
"""
text = ''
if node.string_content is not None:
text = node.string_content
o = nodes.paragraph('', ' '.join(text))
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o.append(n)
return o
def text(node):
"""
Text in a paragraph
"""
return nodes.Text(node.literal)
def hardbreak(node):
"""
A <br /> in html or "\n" in ascii
"""
return nodes.Text('\n')
def softbreak(node):
"""
A line ending or space.
"""
return nodes.Text('\n')
def reference(node):
"""
A hyperlink. Note that alt text doesn't work, since there's no apparent way to do that in docutils
"""
o = nodes.reference()
o['refuri'] = node.destination
if node.title:
o['name'] = node.title
for n in MarkDown(node):
o += n
return o
def emphasis(node):
"""
An italicized section
"""
o = nodes.emphasis()
for n in MarkDown(node):
o += n
return o
def strong(node):
"""
A bolded section
"""
o = nodes.strong()
for n in MarkDown(node):
o += n
return o
def literal(node):
"""
Inline code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal(text=node.literal, classes=classes)
for n in MarkDown(node):
o += n
return o
def literal_block(node):
"""
A block of code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal_block(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal_block(text=node.literal, classes=classes)
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def raw(node):
"""
Add some raw html (possibly as a block)
"""
o = nodes.raw(node.literal, node.literal, format='html')
if node.sourcepos is not None:
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def transition(node):
"""
An <hr> tag in html. This has no children
"""
return nodes.transition()
def title(node):
"""
A title node. It has no children
"""
return nodes.title(node.first_child.literal, node.first_child.literal)
def section(node):
"""
A section in reStructuredText, which needs a title (the first child)
This is a custom type
"""
title = '' # All sections need an id
if node.first_child is not None:
if node.first_child.t == u'heading':
title = node.first_child.first_child.literal
o = nodes.section(ids=[title], names=[title])
for n in MarkDown(node):
o += n
return o
def block_quote(node):
"""
A block quote
"""
o = nodes.block_quote()
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def image(node):
"""
An image element
The first child is the alt text. reStructuredText can't handle titles
"""
o = nodes.image(uri=node.destination)
if node.first_child is not None:
o['alt'] = node.first_child.literal
return o
def listItem(node):
"""
An item in a list
"""
o = nodes.list_item()
for n in MarkDown(node):
o += n
return o
def listNode(node):
"""
A list (numbered or not)
For numbered lists, the suffix is only rendered as . in html
"""
if node.list_data['type'] == u'bullet':
o = nodes.bullet_list(bullet=node.list_data['bullet_char'])
else:
o = nodes.enumerated_list(suffix=node.list_data['delimiter'], enumtype='arabic', start=node.list_data['start'])
for n in MarkDown(node):
o += n
return o
def MarkDown(node):
"""
Returns a list of nodes, containing CommonMark nodes converted to docutils nodes
"""
cur = node.first_child
# Go into each child, in turn
output = []
while cur is not None:
t = cur.t
if t == 'paragraph':
output.append(paragraph(cur))
elif t == 'text':
output.append(text(cur))
elif t == 'softbreak':
output.append(softbreak(cur))
elif t == 'linebreak':
output.append(hardbreak(cur))
elif t == 'link':
output.append(reference(cur))
elif t == 'heading':
output.append(title(cur))
elif t == 'emph':
output.append(emphasis(cur))
elif t == 'strong':
output.append(strong(cur))
elif t == 'code':
output.append(literal(cur))
elif t == 'code_block':
output.append(literal_block(cur))
elif t == 'html_inline' or t == 'html_block':
output.append(raw(cur))
elif t == 'block_quote':
output.append(block_quote(cur))
elif t == 'thematic_break':
output.append(transition(cur))
elif t == 'image':
output.append(image(cur))
elif t == 'list':
output.append(listNode(cur))
elif t == 'item':
output.append(listItem(cur))
elif t == 'MDsection':
output.append(section(cur))
else:
print('Received unhandled type: {}. Full print of node:'.format(t))
cur.pretty()
cur = cur.nxt
return output
def finalizeSection(section):
"""
Correct the nxt and parent for each child
"""
cur = section.first_child
last = section.last_child
if last is not None:
last.nxt = None
while cur is not None:
cur.parent = section
cur = cur.nxt
def nestSections(block, level=1):
"""
Sections aren't handled by CommonMark at the moment.
This function adds sections to a block of nodes.
'title' nodes with an assigned level below 'level' will be put in a child section.
If there are no child nodes with titles of level 'level' then nothing is done
"""
cur = block.first_child
if cur is not None:
children = []
# Do we need to do anything?
nest = False
while cur is not None:
if cur.t == 'heading' and cur.level == level:
nest = True
break
cur = cur.nxt
if not nest:
return
section = Node('MDsection', 0)
section.parent = block
cur = block.first_child
while cur is not None:
if cur.t == 'heading' and cur.level == level:
# Found a split point, flush the last section if needed
if section.first_child is not None:
finalizeSection(section)
children.append(section)
section = Node('MDsection', 0)
nxt = cur.nxt
# Avoid adding sections without titles at the start
if section.first_child is None:
if cur.t == 'heading' and cur.level == level:
section.append_child(cur)
else:
children.append(cur)
else:
section.append_child(cur)
cur = nxt
# If there's only 1 child then don't bother
if section.first_child is not None:
finalizeSection(section)
children.append(section)
block.first_child = None
block.last_child = None
nextLevel = level + 1
for child in children:
# Handle nesting
if child.t == 'MDsection':
nestSections(child, level=nextLevel)
# Append
if block.first_child is None:
block.first_child = child
else:
block.last_child.nxt = child
child.parent = block
child.nxt = None
child.prev = block.last_child
block.last_child = child
def parseMarkDownBlock(text):
"""
Parses a block of text, returning a list of docutils nodes
>>> parseMarkdownBlock("Some\n====\n\nblock of text\n\nHeader\n======\n\nblah\n")
[]
"""
block = Parser().parse(text)
# CommonMark can't nest sections, so do it manually
nestSections(block)
return MarkDown(block)
|
ribozz/sphinx-argparse | sphinxarg/markdown.py | paragraph | python | def paragraph(node):
text = ''
if node.string_content is not None:
text = node.string_content
o = nodes.paragraph('', ' '.join(text))
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o.append(n)
return o | Process a paragraph, which includes all content under it | train | https://github.com/ribozz/sphinx-argparse/blob/178672cd5c846440ff7ecd695e3708feea13e4b4/sphinxarg/markdown.py#L47-L59 | [
"def MarkDown(node):\n \"\"\"\n Returns a list of nodes, containing CommonMark nodes converted to docutils nodes\n \"\"\"\n cur = node.first_child\n\n # Go into each child, in turn\n output = []\n while cur is not None:\n t = cur.t\n if t == 'paragraph':\n output.append(paragraph(cur))\n elif t == 'text':\n output.append(text(cur))\n elif t == 'softbreak':\n output.append(softbreak(cur))\n elif t == 'linebreak':\n output.append(hardbreak(cur))\n elif t == 'link':\n output.append(reference(cur))\n elif t == 'heading':\n output.append(title(cur))\n elif t == 'emph':\n output.append(emphasis(cur))\n elif t == 'strong':\n output.append(strong(cur))\n elif t == 'code':\n output.append(literal(cur))\n elif t == 'code_block':\n output.append(literal_block(cur))\n elif t == 'html_inline' or t == 'html_block':\n output.append(raw(cur))\n elif t == 'block_quote':\n output.append(block_quote(cur))\n elif t == 'thematic_break':\n output.append(transition(cur))\n elif t == 'image':\n output.append(image(cur))\n elif t == 'list':\n output.append(listNode(cur))\n elif t == 'item':\n output.append(listItem(cur))\n elif t == 'MDsection':\n output.append(section(cur))\n else:\n print('Received unhandled type: {}. Full print of node:'.format(t))\n cur.pretty()\n\n cur = cur.nxt\n\n return output\n"
] | try:
from commonmark import Parser
except ImportError:
from CommonMark import Parser # >= 0.5.6
try:
from commonmark.node import Node
except ImportError:
from CommonMark.node import Node
from docutils import nodes
from docutils.utils.code_analyzer import Lexer
def customWalker(node, space=''):
"""
A convenience function to ease debugging. It will print the node structure that's returned from CommonMark
The usage would be something like:
>>> content = Parser().parse('Some big text block\n===================\n\nwith content\n')
>>> customWalker(content)
document
heading
text Some big text block
paragraph
text with content
Spaces are used to convey nesting
"""
txt = ''
try:
txt = node.literal
except:
pass
if txt is None or txt == '':
print('{}{}'.format(space, node.t))
else:
print('{}{}\t{}'.format(space, node.t, txt))
cur = node.first_child
if cur:
while cur is not None:
customWalker(cur, space + ' ')
cur = cur.nxt
def text(node):
"""
Text in a paragraph
"""
return nodes.Text(node.literal)
def hardbreak(node):
"""
A <br /> in html or "\n" in ascii
"""
return nodes.Text('\n')
def softbreak(node):
"""
A line ending or space.
"""
return nodes.Text('\n')
def reference(node):
"""
A hyperlink. Note that alt text doesn't work, since there's no apparent way to do that in docutils
"""
o = nodes.reference()
o['refuri'] = node.destination
if node.title:
o['name'] = node.title
for n in MarkDown(node):
o += n
return o
def emphasis(node):
"""
An italicized section
"""
o = nodes.emphasis()
for n in MarkDown(node):
o += n
return o
def strong(node):
"""
A bolded section
"""
o = nodes.strong()
for n in MarkDown(node):
o += n
return o
def literal(node):
"""
Inline code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal(text=node.literal, classes=classes)
for n in MarkDown(node):
o += n
return o
def literal_block(node):
"""
A block of code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal_block(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal_block(text=node.literal, classes=classes)
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def raw(node):
"""
Add some raw html (possibly as a block)
"""
o = nodes.raw(node.literal, node.literal, format='html')
if node.sourcepos is not None:
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def transition(node):
"""
An <hr> tag in html. This has no children
"""
return nodes.transition()
def title(node):
"""
A title node. It has no children
"""
return nodes.title(node.first_child.literal, node.first_child.literal)
def section(node):
"""
A section in reStructuredText, which needs a title (the first child)
This is a custom type
"""
title = '' # All sections need an id
if node.first_child is not None:
if node.first_child.t == u'heading':
title = node.first_child.first_child.literal
o = nodes.section(ids=[title], names=[title])
for n in MarkDown(node):
o += n
return o
def block_quote(node):
"""
A block quote
"""
o = nodes.block_quote()
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def image(node):
"""
An image element
The first child is the alt text. reStructuredText can't handle titles
"""
o = nodes.image(uri=node.destination)
if node.first_child is not None:
o['alt'] = node.first_child.literal
return o
def listItem(node):
"""
An item in a list
"""
o = nodes.list_item()
for n in MarkDown(node):
o += n
return o
def listNode(node):
"""
A list (numbered or not)
For numbered lists, the suffix is only rendered as . in html
"""
if node.list_data['type'] == u'bullet':
o = nodes.bullet_list(bullet=node.list_data['bullet_char'])
else:
o = nodes.enumerated_list(suffix=node.list_data['delimiter'], enumtype='arabic', start=node.list_data['start'])
for n in MarkDown(node):
o += n
return o
def MarkDown(node):
"""
Returns a list of nodes, containing CommonMark nodes converted to docutils nodes
"""
cur = node.first_child
# Go into each child, in turn
output = []
while cur is not None:
t = cur.t
if t == 'paragraph':
output.append(paragraph(cur))
elif t == 'text':
output.append(text(cur))
elif t == 'softbreak':
output.append(softbreak(cur))
elif t == 'linebreak':
output.append(hardbreak(cur))
elif t == 'link':
output.append(reference(cur))
elif t == 'heading':
output.append(title(cur))
elif t == 'emph':
output.append(emphasis(cur))
elif t == 'strong':
output.append(strong(cur))
elif t == 'code':
output.append(literal(cur))
elif t == 'code_block':
output.append(literal_block(cur))
elif t == 'html_inline' or t == 'html_block':
output.append(raw(cur))
elif t == 'block_quote':
output.append(block_quote(cur))
elif t == 'thematic_break':
output.append(transition(cur))
elif t == 'image':
output.append(image(cur))
elif t == 'list':
output.append(listNode(cur))
elif t == 'item':
output.append(listItem(cur))
elif t == 'MDsection':
output.append(section(cur))
else:
print('Received unhandled type: {}. Full print of node:'.format(t))
cur.pretty()
cur = cur.nxt
return output
def finalizeSection(section):
"""
Correct the nxt and parent for each child
"""
cur = section.first_child
last = section.last_child
if last is not None:
last.nxt = None
while cur is not None:
cur.parent = section
cur = cur.nxt
def nestSections(block, level=1):
"""
Sections aren't handled by CommonMark at the moment.
This function adds sections to a block of nodes.
'title' nodes with an assigned level below 'level' will be put in a child section.
If there are no child nodes with titles of level 'level' then nothing is done
"""
cur = block.first_child
if cur is not None:
children = []
# Do we need to do anything?
nest = False
while cur is not None:
if cur.t == 'heading' and cur.level == level:
nest = True
break
cur = cur.nxt
if not nest:
return
section = Node('MDsection', 0)
section.parent = block
cur = block.first_child
while cur is not None:
if cur.t == 'heading' and cur.level == level:
# Found a split point, flush the last section if needed
if section.first_child is not None:
finalizeSection(section)
children.append(section)
section = Node('MDsection', 0)
nxt = cur.nxt
# Avoid adding sections without titles at the start
if section.first_child is None:
if cur.t == 'heading' and cur.level == level:
section.append_child(cur)
else:
children.append(cur)
else:
section.append_child(cur)
cur = nxt
# If there's only 1 child then don't bother
if section.first_child is not None:
finalizeSection(section)
children.append(section)
block.first_child = None
block.last_child = None
nextLevel = level + 1
for child in children:
# Handle nesting
if child.t == 'MDsection':
nestSections(child, level=nextLevel)
# Append
if block.first_child is None:
block.first_child = child
else:
block.last_child.nxt = child
child.parent = block
child.nxt = None
child.prev = block.last_child
block.last_child = child
def parseMarkDownBlock(text):
"""
Parses a block of text, returning a list of docutils nodes
>>> parseMarkdownBlock("Some\n====\n\nblock of text\n\nHeader\n======\n\nblah\n")
[]
"""
block = Parser().parse(text)
# CommonMark can't nest sections, so do it manually
nestSections(block)
return MarkDown(block)
|
ribozz/sphinx-argparse | sphinxarg/markdown.py | reference | python | def reference(node):
o = nodes.reference()
o['refuri'] = node.destination
if node.title:
o['name'] = node.title
for n in MarkDown(node):
o += n
return o | A hyperlink. Note that alt text doesn't work, since there's no apparent way to do that in docutils | train | https://github.com/ribozz/sphinx-argparse/blob/178672cd5c846440ff7ecd695e3708feea13e4b4/sphinxarg/markdown.py#L83-L93 | [
"def MarkDown(node):\n \"\"\"\n Returns a list of nodes, containing CommonMark nodes converted to docutils nodes\n \"\"\"\n cur = node.first_child\n\n # Go into each child, in turn\n output = []\n while cur is not None:\n t = cur.t\n if t == 'paragraph':\n output.append(paragraph(cur))\n elif t == 'text':\n output.append(text(cur))\n elif t == 'softbreak':\n output.append(softbreak(cur))\n elif t == 'linebreak':\n output.append(hardbreak(cur))\n elif t == 'link':\n output.append(reference(cur))\n elif t == 'heading':\n output.append(title(cur))\n elif t == 'emph':\n output.append(emphasis(cur))\n elif t == 'strong':\n output.append(strong(cur))\n elif t == 'code':\n output.append(literal(cur))\n elif t == 'code_block':\n output.append(literal_block(cur))\n elif t == 'html_inline' or t == 'html_block':\n output.append(raw(cur))\n elif t == 'block_quote':\n output.append(block_quote(cur))\n elif t == 'thematic_break':\n output.append(transition(cur))\n elif t == 'image':\n output.append(image(cur))\n elif t == 'list':\n output.append(listNode(cur))\n elif t == 'item':\n output.append(listItem(cur))\n elif t == 'MDsection':\n output.append(section(cur))\n else:\n print('Received unhandled type: {}. Full print of node:'.format(t))\n cur.pretty()\n\n cur = cur.nxt\n\n return output\n"
] | try:
from commonmark import Parser
except ImportError:
from CommonMark import Parser # >= 0.5.6
try:
from commonmark.node import Node
except ImportError:
from CommonMark.node import Node
from docutils import nodes
from docutils.utils.code_analyzer import Lexer
def customWalker(node, space=''):
"""
A convenience function to ease debugging. It will print the node structure that's returned from CommonMark
The usage would be something like:
>>> content = Parser().parse('Some big text block\n===================\n\nwith content\n')
>>> customWalker(content)
document
heading
text Some big text block
paragraph
text with content
Spaces are used to convey nesting
"""
txt = ''
try:
txt = node.literal
except:
pass
if txt is None or txt == '':
print('{}{}'.format(space, node.t))
else:
print('{}{}\t{}'.format(space, node.t, txt))
cur = node.first_child
if cur:
while cur is not None:
customWalker(cur, space + ' ')
cur = cur.nxt
def paragraph(node):
"""
Process a paragraph, which includes all content under it
"""
text = ''
if node.string_content is not None:
text = node.string_content
o = nodes.paragraph('', ' '.join(text))
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o.append(n)
return o
def text(node):
"""
Text in a paragraph
"""
return nodes.Text(node.literal)
def hardbreak(node):
"""
A <br /> in html or "\n" in ascii
"""
return nodes.Text('\n')
def softbreak(node):
"""
A line ending or space.
"""
return nodes.Text('\n')
def emphasis(node):
"""
An italicized section
"""
o = nodes.emphasis()
for n in MarkDown(node):
o += n
return o
def strong(node):
"""
A bolded section
"""
o = nodes.strong()
for n in MarkDown(node):
o += n
return o
def literal(node):
"""
Inline code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal(text=node.literal, classes=classes)
for n in MarkDown(node):
o += n
return o
def literal_block(node):
"""
A block of code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal_block(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal_block(text=node.literal, classes=classes)
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def raw(node):
"""
Add some raw html (possibly as a block)
"""
o = nodes.raw(node.literal, node.literal, format='html')
if node.sourcepos is not None:
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def transition(node):
"""
An <hr> tag in html. This has no children
"""
return nodes.transition()
def title(node):
"""
A title node. It has no children
"""
return nodes.title(node.first_child.literal, node.first_child.literal)
def section(node):
"""
A section in reStructuredText, which needs a title (the first child)
This is a custom type
"""
title = '' # All sections need an id
if node.first_child is not None:
if node.first_child.t == u'heading':
title = node.first_child.first_child.literal
o = nodes.section(ids=[title], names=[title])
for n in MarkDown(node):
o += n
return o
def block_quote(node):
"""
A block quote
"""
o = nodes.block_quote()
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def image(node):
"""
An image element
The first child is the alt text. reStructuredText can't handle titles
"""
o = nodes.image(uri=node.destination)
if node.first_child is not None:
o['alt'] = node.first_child.literal
return o
def listItem(node):
"""
An item in a list
"""
o = nodes.list_item()
for n in MarkDown(node):
o += n
return o
def listNode(node):
"""
A list (numbered or not)
For numbered lists, the suffix is only rendered as . in html
"""
if node.list_data['type'] == u'bullet':
o = nodes.bullet_list(bullet=node.list_data['bullet_char'])
else:
o = nodes.enumerated_list(suffix=node.list_data['delimiter'], enumtype='arabic', start=node.list_data['start'])
for n in MarkDown(node):
o += n
return o
def MarkDown(node):
"""
Returns a list of nodes, containing CommonMark nodes converted to docutils nodes
"""
cur = node.first_child
# Go into each child, in turn
output = []
while cur is not None:
t = cur.t
if t == 'paragraph':
output.append(paragraph(cur))
elif t == 'text':
output.append(text(cur))
elif t == 'softbreak':
output.append(softbreak(cur))
elif t == 'linebreak':
output.append(hardbreak(cur))
elif t == 'link':
output.append(reference(cur))
elif t == 'heading':
output.append(title(cur))
elif t == 'emph':
output.append(emphasis(cur))
elif t == 'strong':
output.append(strong(cur))
elif t == 'code':
output.append(literal(cur))
elif t == 'code_block':
output.append(literal_block(cur))
elif t == 'html_inline' or t == 'html_block':
output.append(raw(cur))
elif t == 'block_quote':
output.append(block_quote(cur))
elif t == 'thematic_break':
output.append(transition(cur))
elif t == 'image':
output.append(image(cur))
elif t == 'list':
output.append(listNode(cur))
elif t == 'item':
output.append(listItem(cur))
elif t == 'MDsection':
output.append(section(cur))
else:
print('Received unhandled type: {}. Full print of node:'.format(t))
cur.pretty()
cur = cur.nxt
return output
def finalizeSection(section):
"""
Correct the nxt and parent for each child
"""
cur = section.first_child
last = section.last_child
if last is not None:
last.nxt = None
while cur is not None:
cur.parent = section
cur = cur.nxt
def nestSections(block, level=1):
"""
Sections aren't handled by CommonMark at the moment.
This function adds sections to a block of nodes.
'title' nodes with an assigned level below 'level' will be put in a child section.
If there are no child nodes with titles of level 'level' then nothing is done
"""
cur = block.first_child
if cur is not None:
children = []
# Do we need to do anything?
nest = False
while cur is not None:
if cur.t == 'heading' and cur.level == level:
nest = True
break
cur = cur.nxt
if not nest:
return
section = Node('MDsection', 0)
section.parent = block
cur = block.first_child
while cur is not None:
if cur.t == 'heading' and cur.level == level:
# Found a split point, flush the last section if needed
if section.first_child is not None:
finalizeSection(section)
children.append(section)
section = Node('MDsection', 0)
nxt = cur.nxt
# Avoid adding sections without titles at the start
if section.first_child is None:
if cur.t == 'heading' and cur.level == level:
section.append_child(cur)
else:
children.append(cur)
else:
section.append_child(cur)
cur = nxt
# If there's only 1 child then don't bother
if section.first_child is not None:
finalizeSection(section)
children.append(section)
block.first_child = None
block.last_child = None
nextLevel = level + 1
for child in children:
# Handle nesting
if child.t == 'MDsection':
nestSections(child, level=nextLevel)
# Append
if block.first_child is None:
block.first_child = child
else:
block.last_child.nxt = child
child.parent = block
child.nxt = None
child.prev = block.last_child
block.last_child = child
def parseMarkDownBlock(text):
"""
Parses a block of text, returning a list of docutils nodes
>>> parseMarkdownBlock("Some\n====\n\nblock of text\n\nHeader\n======\n\nblah\n")
[]
"""
block = Parser().parse(text)
# CommonMark can't nest sections, so do it manually
nestSections(block)
return MarkDown(block)
|
ribozz/sphinx-argparse | sphinxarg/markdown.py | emphasis | python | def emphasis(node):
o = nodes.emphasis()
for n in MarkDown(node):
o += n
return o | An italicized section | train | https://github.com/ribozz/sphinx-argparse/blob/178672cd5c846440ff7ecd695e3708feea13e4b4/sphinxarg/markdown.py#L96-L103 | [
"def MarkDown(node):\n \"\"\"\n Returns a list of nodes, containing CommonMark nodes converted to docutils nodes\n \"\"\"\n cur = node.first_child\n\n # Go into each child, in turn\n output = []\n while cur is not None:\n t = cur.t\n if t == 'paragraph':\n output.append(paragraph(cur))\n elif t == 'text':\n output.append(text(cur))\n elif t == 'softbreak':\n output.append(softbreak(cur))\n elif t == 'linebreak':\n output.append(hardbreak(cur))\n elif t == 'link':\n output.append(reference(cur))\n elif t == 'heading':\n output.append(title(cur))\n elif t == 'emph':\n output.append(emphasis(cur))\n elif t == 'strong':\n output.append(strong(cur))\n elif t == 'code':\n output.append(literal(cur))\n elif t == 'code_block':\n output.append(literal_block(cur))\n elif t == 'html_inline' or t == 'html_block':\n output.append(raw(cur))\n elif t == 'block_quote':\n output.append(block_quote(cur))\n elif t == 'thematic_break':\n output.append(transition(cur))\n elif t == 'image':\n output.append(image(cur))\n elif t == 'list':\n output.append(listNode(cur))\n elif t == 'item':\n output.append(listItem(cur))\n elif t == 'MDsection':\n output.append(section(cur))\n else:\n print('Received unhandled type: {}. Full print of node:'.format(t))\n cur.pretty()\n\n cur = cur.nxt\n\n return output\n"
] | try:
from commonmark import Parser
except ImportError:
from CommonMark import Parser # >= 0.5.6
try:
from commonmark.node import Node
except ImportError:
from CommonMark.node import Node
from docutils import nodes
from docutils.utils.code_analyzer import Lexer
def customWalker(node, space=''):
"""
A convenience function to ease debugging. It will print the node structure that's returned from CommonMark
The usage would be something like:
>>> content = Parser().parse('Some big text block\n===================\n\nwith content\n')
>>> customWalker(content)
document
heading
text Some big text block
paragraph
text with content
Spaces are used to convey nesting
"""
txt = ''
try:
txt = node.literal
except:
pass
if txt is None or txt == '':
print('{}{}'.format(space, node.t))
else:
print('{}{}\t{}'.format(space, node.t, txt))
cur = node.first_child
if cur:
while cur is not None:
customWalker(cur, space + ' ')
cur = cur.nxt
def paragraph(node):
"""
Process a paragraph, which includes all content under it
"""
text = ''
if node.string_content is not None:
text = node.string_content
o = nodes.paragraph('', ' '.join(text))
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o.append(n)
return o
def text(node):
"""
Text in a paragraph
"""
return nodes.Text(node.literal)
def hardbreak(node):
"""
A <br /> in html or "\n" in ascii
"""
return nodes.Text('\n')
def softbreak(node):
"""
A line ending or space.
"""
return nodes.Text('\n')
def reference(node):
"""
A hyperlink. Note that alt text doesn't work, since there's no apparent way to do that in docutils
"""
o = nodes.reference()
o['refuri'] = node.destination
if node.title:
o['name'] = node.title
for n in MarkDown(node):
o += n
return o
def strong(node):
"""
A bolded section
"""
o = nodes.strong()
for n in MarkDown(node):
o += n
return o
def literal(node):
"""
Inline code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal(text=node.literal, classes=classes)
for n in MarkDown(node):
o += n
return o
def literal_block(node):
"""
A block of code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal_block(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal_block(text=node.literal, classes=classes)
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def raw(node):
"""
Add some raw html (possibly as a block)
"""
o = nodes.raw(node.literal, node.literal, format='html')
if node.sourcepos is not None:
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def transition(node):
"""
An <hr> tag in html. This has no children
"""
return nodes.transition()
def title(node):
"""
A title node. It has no children
"""
return nodes.title(node.first_child.literal, node.first_child.literal)
def section(node):
"""
A section in reStructuredText, which needs a title (the first child)
This is a custom type
"""
title = '' # All sections need an id
if node.first_child is not None:
if node.first_child.t == u'heading':
title = node.first_child.first_child.literal
o = nodes.section(ids=[title], names=[title])
for n in MarkDown(node):
o += n
return o
def block_quote(node):
"""
A block quote
"""
o = nodes.block_quote()
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def image(node):
"""
An image element
The first child is the alt text. reStructuredText can't handle titles
"""
o = nodes.image(uri=node.destination)
if node.first_child is not None:
o['alt'] = node.first_child.literal
return o
def listItem(node):
"""
An item in a list
"""
o = nodes.list_item()
for n in MarkDown(node):
o += n
return o
def listNode(node):
"""
A list (numbered or not)
For numbered lists, the suffix is only rendered as . in html
"""
if node.list_data['type'] == u'bullet':
o = nodes.bullet_list(bullet=node.list_data['bullet_char'])
else:
o = nodes.enumerated_list(suffix=node.list_data['delimiter'], enumtype='arabic', start=node.list_data['start'])
for n in MarkDown(node):
o += n
return o
def MarkDown(node):
"""
Returns a list of nodes, containing CommonMark nodes converted to docutils nodes
"""
cur = node.first_child
# Go into each child, in turn
output = []
while cur is not None:
t = cur.t
if t == 'paragraph':
output.append(paragraph(cur))
elif t == 'text':
output.append(text(cur))
elif t == 'softbreak':
output.append(softbreak(cur))
elif t == 'linebreak':
output.append(hardbreak(cur))
elif t == 'link':
output.append(reference(cur))
elif t == 'heading':
output.append(title(cur))
elif t == 'emph':
output.append(emphasis(cur))
elif t == 'strong':
output.append(strong(cur))
elif t == 'code':
output.append(literal(cur))
elif t == 'code_block':
output.append(literal_block(cur))
elif t == 'html_inline' or t == 'html_block':
output.append(raw(cur))
elif t == 'block_quote':
output.append(block_quote(cur))
elif t == 'thematic_break':
output.append(transition(cur))
elif t == 'image':
output.append(image(cur))
elif t == 'list':
output.append(listNode(cur))
elif t == 'item':
output.append(listItem(cur))
elif t == 'MDsection':
output.append(section(cur))
else:
print('Received unhandled type: {}. Full print of node:'.format(t))
cur.pretty()
cur = cur.nxt
return output
def finalizeSection(section):
"""
Correct the nxt and parent for each child
"""
cur = section.first_child
last = section.last_child
if last is not None:
last.nxt = None
while cur is not None:
cur.parent = section
cur = cur.nxt
def nestSections(block, level=1):
"""
Sections aren't handled by CommonMark at the moment.
This function adds sections to a block of nodes.
'title' nodes with an assigned level below 'level' will be put in a child section.
If there are no child nodes with titles of level 'level' then nothing is done
"""
cur = block.first_child
if cur is not None:
children = []
# Do we need to do anything?
nest = False
while cur is not None:
if cur.t == 'heading' and cur.level == level:
nest = True
break
cur = cur.nxt
if not nest:
return
section = Node('MDsection', 0)
section.parent = block
cur = block.first_child
while cur is not None:
if cur.t == 'heading' and cur.level == level:
# Found a split point, flush the last section if needed
if section.first_child is not None:
finalizeSection(section)
children.append(section)
section = Node('MDsection', 0)
nxt = cur.nxt
# Avoid adding sections without titles at the start
if section.first_child is None:
if cur.t == 'heading' and cur.level == level:
section.append_child(cur)
else:
children.append(cur)
else:
section.append_child(cur)
cur = nxt
# If there's only 1 child then don't bother
if section.first_child is not None:
finalizeSection(section)
children.append(section)
block.first_child = None
block.last_child = None
nextLevel = level + 1
for child in children:
# Handle nesting
if child.t == 'MDsection':
nestSections(child, level=nextLevel)
# Append
if block.first_child is None:
block.first_child = child
else:
block.last_child.nxt = child
child.parent = block
child.nxt = None
child.prev = block.last_child
block.last_child = child
def parseMarkDownBlock(text):
"""
Parses a block of text, returning a list of docutils nodes
>>> parseMarkdownBlock("Some\n====\n\nblock of text\n\nHeader\n======\n\nblah\n")
[]
"""
block = Parser().parse(text)
# CommonMark can't nest sections, so do it manually
nestSections(block)
return MarkDown(block)
|
ribozz/sphinx-argparse | sphinxarg/markdown.py | strong | python | def strong(node):
o = nodes.strong()
for n in MarkDown(node):
o += n
return o | A bolded section | train | https://github.com/ribozz/sphinx-argparse/blob/178672cd5c846440ff7ecd695e3708feea13e4b4/sphinxarg/markdown.py#L106-L113 | [
"def MarkDown(node):\n \"\"\"\n Returns a list of nodes, containing CommonMark nodes converted to docutils nodes\n \"\"\"\n cur = node.first_child\n\n # Go into each child, in turn\n output = []\n while cur is not None:\n t = cur.t\n if t == 'paragraph':\n output.append(paragraph(cur))\n elif t == 'text':\n output.append(text(cur))\n elif t == 'softbreak':\n output.append(softbreak(cur))\n elif t == 'linebreak':\n output.append(hardbreak(cur))\n elif t == 'link':\n output.append(reference(cur))\n elif t == 'heading':\n output.append(title(cur))\n elif t == 'emph':\n output.append(emphasis(cur))\n elif t == 'strong':\n output.append(strong(cur))\n elif t == 'code':\n output.append(literal(cur))\n elif t == 'code_block':\n output.append(literal_block(cur))\n elif t == 'html_inline' or t == 'html_block':\n output.append(raw(cur))\n elif t == 'block_quote':\n output.append(block_quote(cur))\n elif t == 'thematic_break':\n output.append(transition(cur))\n elif t == 'image':\n output.append(image(cur))\n elif t == 'list':\n output.append(listNode(cur))\n elif t == 'item':\n output.append(listItem(cur))\n elif t == 'MDsection':\n output.append(section(cur))\n else:\n print('Received unhandled type: {}. Full print of node:'.format(t))\n cur.pretty()\n\n cur = cur.nxt\n\n return output\n"
] | try:
from commonmark import Parser
except ImportError:
from CommonMark import Parser # >= 0.5.6
try:
from commonmark.node import Node
except ImportError:
from CommonMark.node import Node
from docutils import nodes
from docutils.utils.code_analyzer import Lexer
def customWalker(node, space=''):
"""
A convenience function to ease debugging. It will print the node structure that's returned from CommonMark
The usage would be something like:
>>> content = Parser().parse('Some big text block\n===================\n\nwith content\n')
>>> customWalker(content)
document
heading
text Some big text block
paragraph
text with content
Spaces are used to convey nesting
"""
txt = ''
try:
txt = node.literal
except:
pass
if txt is None or txt == '':
print('{}{}'.format(space, node.t))
else:
print('{}{}\t{}'.format(space, node.t, txt))
cur = node.first_child
if cur:
while cur is not None:
customWalker(cur, space + ' ')
cur = cur.nxt
def paragraph(node):
"""
Process a paragraph, which includes all content under it
"""
text = ''
if node.string_content is not None:
text = node.string_content
o = nodes.paragraph('', ' '.join(text))
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o.append(n)
return o
def text(node):
"""
Text in a paragraph
"""
return nodes.Text(node.literal)
def hardbreak(node):
"""
A <br /> in html or "\n" in ascii
"""
return nodes.Text('\n')
def softbreak(node):
"""
A line ending or space.
"""
return nodes.Text('\n')
def reference(node):
"""
A hyperlink. Note that alt text doesn't work, since there's no apparent way to do that in docutils
"""
o = nodes.reference()
o['refuri'] = node.destination
if node.title:
o['name'] = node.title
for n in MarkDown(node):
o += n
return o
def emphasis(node):
"""
An italicized section
"""
o = nodes.emphasis()
for n in MarkDown(node):
o += n
return o
def literal(node):
"""
Inline code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal(text=node.literal, classes=classes)
for n in MarkDown(node):
o += n
return o
def literal_block(node):
"""
A block of code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal_block(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal_block(text=node.literal, classes=classes)
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def raw(node):
"""
Add some raw html (possibly as a block)
"""
o = nodes.raw(node.literal, node.literal, format='html')
if node.sourcepos is not None:
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def transition(node):
"""
An <hr> tag in html. This has no children
"""
return nodes.transition()
def title(node):
"""
A title node. It has no children
"""
return nodes.title(node.first_child.literal, node.first_child.literal)
def section(node):
"""
A section in reStructuredText, which needs a title (the first child)
This is a custom type
"""
title = '' # All sections need an id
if node.first_child is not None:
if node.first_child.t == u'heading':
title = node.first_child.first_child.literal
o = nodes.section(ids=[title], names=[title])
for n in MarkDown(node):
o += n
return o
def block_quote(node):
"""
A block quote
"""
o = nodes.block_quote()
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def image(node):
"""
An image element
The first child is the alt text. reStructuredText can't handle titles
"""
o = nodes.image(uri=node.destination)
if node.first_child is not None:
o['alt'] = node.first_child.literal
return o
def listItem(node):
"""
An item in a list
"""
o = nodes.list_item()
for n in MarkDown(node):
o += n
return o
def listNode(node):
"""
A list (numbered or not)
For numbered lists, the suffix is only rendered as . in html
"""
if node.list_data['type'] == u'bullet':
o = nodes.bullet_list(bullet=node.list_data['bullet_char'])
else:
o = nodes.enumerated_list(suffix=node.list_data['delimiter'], enumtype='arabic', start=node.list_data['start'])
for n in MarkDown(node):
o += n
return o
def MarkDown(node):
"""
Returns a list of nodes, containing CommonMark nodes converted to docutils nodes
"""
cur = node.first_child
# Go into each child, in turn
output = []
while cur is not None:
t = cur.t
if t == 'paragraph':
output.append(paragraph(cur))
elif t == 'text':
output.append(text(cur))
elif t == 'softbreak':
output.append(softbreak(cur))
elif t == 'linebreak':
output.append(hardbreak(cur))
elif t == 'link':
output.append(reference(cur))
elif t == 'heading':
output.append(title(cur))
elif t == 'emph':
output.append(emphasis(cur))
elif t == 'strong':
output.append(strong(cur))
elif t == 'code':
output.append(literal(cur))
elif t == 'code_block':
output.append(literal_block(cur))
elif t == 'html_inline' or t == 'html_block':
output.append(raw(cur))
elif t == 'block_quote':
output.append(block_quote(cur))
elif t == 'thematic_break':
output.append(transition(cur))
elif t == 'image':
output.append(image(cur))
elif t == 'list':
output.append(listNode(cur))
elif t == 'item':
output.append(listItem(cur))
elif t == 'MDsection':
output.append(section(cur))
else:
print('Received unhandled type: {}. Full print of node:'.format(t))
cur.pretty()
cur = cur.nxt
return output
def finalizeSection(section):
"""
Correct the nxt and parent for each child
"""
cur = section.first_child
last = section.last_child
if last is not None:
last.nxt = None
while cur is not None:
cur.parent = section
cur = cur.nxt
def nestSections(block, level=1):
"""
Sections aren't handled by CommonMark at the moment.
This function adds sections to a block of nodes.
'title' nodes with an assigned level below 'level' will be put in a child section.
If there are no child nodes with titles of level 'level' then nothing is done
"""
cur = block.first_child
if cur is not None:
children = []
# Do we need to do anything?
nest = False
while cur is not None:
if cur.t == 'heading' and cur.level == level:
nest = True
break
cur = cur.nxt
if not nest:
return
section = Node('MDsection', 0)
section.parent = block
cur = block.first_child
while cur is not None:
if cur.t == 'heading' and cur.level == level:
# Found a split point, flush the last section if needed
if section.first_child is not None:
finalizeSection(section)
children.append(section)
section = Node('MDsection', 0)
nxt = cur.nxt
# Avoid adding sections without titles at the start
if section.first_child is None:
if cur.t == 'heading' and cur.level == level:
section.append_child(cur)
else:
children.append(cur)
else:
section.append_child(cur)
cur = nxt
# If there's only 1 child then don't bother
if section.first_child is not None:
finalizeSection(section)
children.append(section)
block.first_child = None
block.last_child = None
nextLevel = level + 1
for child in children:
# Handle nesting
if child.t == 'MDsection':
nestSections(child, level=nextLevel)
# Append
if block.first_child is None:
block.first_child = child
else:
block.last_child.nxt = child
child.parent = block
child.nxt = None
child.prev = block.last_child
block.last_child = child
def parseMarkDownBlock(text):
"""
Parses a block of text, returning a list of docutils nodes
>>> parseMarkdownBlock("Some\n====\n\nblock of text\n\nHeader\n======\n\nblah\n")
[]
"""
block = Parser().parse(text)
# CommonMark can't nest sections, so do it manually
nestSections(block)
return MarkDown(block)
|
ribozz/sphinx-argparse | sphinxarg/markdown.py | literal | python | def literal(node):
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal(text=node.literal, classes=classes)
for n in MarkDown(node):
o += n
return o | Inline code | train | https://github.com/ribozz/sphinx-argparse/blob/178672cd5c846440ff7ecd695e3708feea13e4b4/sphinxarg/markdown.py#L116-L141 | [
"def MarkDown(node):\n \"\"\"\n Returns a list of nodes, containing CommonMark nodes converted to docutils nodes\n \"\"\"\n cur = node.first_child\n\n # Go into each child, in turn\n output = []\n while cur is not None:\n t = cur.t\n if t == 'paragraph':\n output.append(paragraph(cur))\n elif t == 'text':\n output.append(text(cur))\n elif t == 'softbreak':\n output.append(softbreak(cur))\n elif t == 'linebreak':\n output.append(hardbreak(cur))\n elif t == 'link':\n output.append(reference(cur))\n elif t == 'heading':\n output.append(title(cur))\n elif t == 'emph':\n output.append(emphasis(cur))\n elif t == 'strong':\n output.append(strong(cur))\n elif t == 'code':\n output.append(literal(cur))\n elif t == 'code_block':\n output.append(literal_block(cur))\n elif t == 'html_inline' or t == 'html_block':\n output.append(raw(cur))\n elif t == 'block_quote':\n output.append(block_quote(cur))\n elif t == 'thematic_break':\n output.append(transition(cur))\n elif t == 'image':\n output.append(image(cur))\n elif t == 'list':\n output.append(listNode(cur))\n elif t == 'item':\n output.append(listItem(cur))\n elif t == 'MDsection':\n output.append(section(cur))\n else:\n print('Received unhandled type: {}. Full print of node:'.format(t))\n cur.pretty()\n\n cur = cur.nxt\n\n return output\n"
] | try:
from commonmark import Parser
except ImportError:
from CommonMark import Parser # >= 0.5.6
try:
from commonmark.node import Node
except ImportError:
from CommonMark.node import Node
from docutils import nodes
from docutils.utils.code_analyzer import Lexer
def customWalker(node, space=''):
"""
A convenience function to ease debugging. It will print the node structure that's returned from CommonMark
The usage would be something like:
>>> content = Parser().parse('Some big text block\n===================\n\nwith content\n')
>>> customWalker(content)
document
heading
text Some big text block
paragraph
text with content
Spaces are used to convey nesting
"""
txt = ''
try:
txt = node.literal
except:
pass
if txt is None or txt == '':
print('{}{}'.format(space, node.t))
else:
print('{}{}\t{}'.format(space, node.t, txt))
cur = node.first_child
if cur:
while cur is not None:
customWalker(cur, space + ' ')
cur = cur.nxt
def paragraph(node):
"""
Process a paragraph, which includes all content under it
"""
text = ''
if node.string_content is not None:
text = node.string_content
o = nodes.paragraph('', ' '.join(text))
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o.append(n)
return o
def text(node):
"""
Text in a paragraph
"""
return nodes.Text(node.literal)
def hardbreak(node):
"""
A <br /> in html or "\n" in ascii
"""
return nodes.Text('\n')
def softbreak(node):
"""
A line ending or space.
"""
return nodes.Text('\n')
def reference(node):
"""
A hyperlink. Note that alt text doesn't work, since there's no apparent way to do that in docutils
"""
o = nodes.reference()
o['refuri'] = node.destination
if node.title:
o['name'] = node.title
for n in MarkDown(node):
o += n
return o
def emphasis(node):
"""
An italicized section
"""
o = nodes.emphasis()
for n in MarkDown(node):
o += n
return o
def strong(node):
"""
A bolded section
"""
o = nodes.strong()
for n in MarkDown(node):
o += n
return o
def literal_block(node):
"""
A block of code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal_block(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal_block(text=node.literal, classes=classes)
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def raw(node):
"""
Add some raw html (possibly as a block)
"""
o = nodes.raw(node.literal, node.literal, format='html')
if node.sourcepos is not None:
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def transition(node):
"""
An <hr> tag in html. This has no children
"""
return nodes.transition()
def title(node):
"""
A title node. It has no children
"""
return nodes.title(node.first_child.literal, node.first_child.literal)
def section(node):
"""
A section in reStructuredText, which needs a title (the first child)
This is a custom type
"""
title = '' # All sections need an id
if node.first_child is not None:
if node.first_child.t == u'heading':
title = node.first_child.first_child.literal
o = nodes.section(ids=[title], names=[title])
for n in MarkDown(node):
o += n
return o
def block_quote(node):
"""
A block quote
"""
o = nodes.block_quote()
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def image(node):
"""
An image element
The first child is the alt text. reStructuredText can't handle titles
"""
o = nodes.image(uri=node.destination)
if node.first_child is not None:
o['alt'] = node.first_child.literal
return o
def listItem(node):
"""
An item in a list
"""
o = nodes.list_item()
for n in MarkDown(node):
o += n
return o
def listNode(node):
"""
A list (numbered or not)
For numbered lists, the suffix is only rendered as . in html
"""
if node.list_data['type'] == u'bullet':
o = nodes.bullet_list(bullet=node.list_data['bullet_char'])
else:
o = nodes.enumerated_list(suffix=node.list_data['delimiter'], enumtype='arabic', start=node.list_data['start'])
for n in MarkDown(node):
o += n
return o
def MarkDown(node):
"""
Returns a list of nodes, containing CommonMark nodes converted to docutils nodes
"""
cur = node.first_child
# Go into each child, in turn
output = []
while cur is not None:
t = cur.t
if t == 'paragraph':
output.append(paragraph(cur))
elif t == 'text':
output.append(text(cur))
elif t == 'softbreak':
output.append(softbreak(cur))
elif t == 'linebreak':
output.append(hardbreak(cur))
elif t == 'link':
output.append(reference(cur))
elif t == 'heading':
output.append(title(cur))
elif t == 'emph':
output.append(emphasis(cur))
elif t == 'strong':
output.append(strong(cur))
elif t == 'code':
output.append(literal(cur))
elif t == 'code_block':
output.append(literal_block(cur))
elif t == 'html_inline' or t == 'html_block':
output.append(raw(cur))
elif t == 'block_quote':
output.append(block_quote(cur))
elif t == 'thematic_break':
output.append(transition(cur))
elif t == 'image':
output.append(image(cur))
elif t == 'list':
output.append(listNode(cur))
elif t == 'item':
output.append(listItem(cur))
elif t == 'MDsection':
output.append(section(cur))
else:
print('Received unhandled type: {}. Full print of node:'.format(t))
cur.pretty()
cur = cur.nxt
return output
def finalizeSection(section):
"""
Correct the nxt and parent for each child
"""
cur = section.first_child
last = section.last_child
if last is not None:
last.nxt = None
while cur is not None:
cur.parent = section
cur = cur.nxt
def nestSections(block, level=1):
"""
Sections aren't handled by CommonMark at the moment.
This function adds sections to a block of nodes.
'title' nodes with an assigned level below 'level' will be put in a child section.
If there are no child nodes with titles of level 'level' then nothing is done
"""
cur = block.first_child
if cur is not None:
children = []
# Do we need to do anything?
nest = False
while cur is not None:
if cur.t == 'heading' and cur.level == level:
nest = True
break
cur = cur.nxt
if not nest:
return
section = Node('MDsection', 0)
section.parent = block
cur = block.first_child
while cur is not None:
if cur.t == 'heading' and cur.level == level:
# Found a split point, flush the last section if needed
if section.first_child is not None:
finalizeSection(section)
children.append(section)
section = Node('MDsection', 0)
nxt = cur.nxt
# Avoid adding sections without titles at the start
if section.first_child is None:
if cur.t == 'heading' and cur.level == level:
section.append_child(cur)
else:
children.append(cur)
else:
section.append_child(cur)
cur = nxt
# If there's only 1 child then don't bother
if section.first_child is not None:
finalizeSection(section)
children.append(section)
block.first_child = None
block.last_child = None
nextLevel = level + 1
for child in children:
# Handle nesting
if child.t == 'MDsection':
nestSections(child, level=nextLevel)
# Append
if block.first_child is None:
block.first_child = child
else:
block.last_child.nxt = child
child.parent = block
child.nxt = None
child.prev = block.last_child
block.last_child = child
def parseMarkDownBlock(text):
"""
Parses a block of text, returning a list of docutils nodes
>>> parseMarkdownBlock("Some\n====\n\nblock of text\n\nHeader\n======\n\nblah\n")
[]
"""
block = Parser().parse(text)
# CommonMark can't nest sections, so do it manually
nestSections(block)
return MarkDown(block)
|
ribozz/sphinx-argparse | sphinxarg/markdown.py | raw | python | def raw(node):
o = nodes.raw(node.literal, node.literal, format='html')
if node.sourcepos is not None:
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o | Add some raw html (possibly as a block) | train | https://github.com/ribozz/sphinx-argparse/blob/178672cd5c846440ff7ecd695e3708feea13e4b4/sphinxarg/markdown.py#L173-L182 | [
"def MarkDown(node):\n \"\"\"\n Returns a list of nodes, containing CommonMark nodes converted to docutils nodes\n \"\"\"\n cur = node.first_child\n\n # Go into each child, in turn\n output = []\n while cur is not None:\n t = cur.t\n if t == 'paragraph':\n output.append(paragraph(cur))\n elif t == 'text':\n output.append(text(cur))\n elif t == 'softbreak':\n output.append(softbreak(cur))\n elif t == 'linebreak':\n output.append(hardbreak(cur))\n elif t == 'link':\n output.append(reference(cur))\n elif t == 'heading':\n output.append(title(cur))\n elif t == 'emph':\n output.append(emphasis(cur))\n elif t == 'strong':\n output.append(strong(cur))\n elif t == 'code':\n output.append(literal(cur))\n elif t == 'code_block':\n output.append(literal_block(cur))\n elif t == 'html_inline' or t == 'html_block':\n output.append(raw(cur))\n elif t == 'block_quote':\n output.append(block_quote(cur))\n elif t == 'thematic_break':\n output.append(transition(cur))\n elif t == 'image':\n output.append(image(cur))\n elif t == 'list':\n output.append(listNode(cur))\n elif t == 'item':\n output.append(listItem(cur))\n elif t == 'MDsection':\n output.append(section(cur))\n else:\n print('Received unhandled type: {}. Full print of node:'.format(t))\n cur.pretty()\n\n cur = cur.nxt\n\n return output\n"
] | try:
from commonmark import Parser
except ImportError:
from CommonMark import Parser # >= 0.5.6
try:
from commonmark.node import Node
except ImportError:
from CommonMark.node import Node
from docutils import nodes
from docutils.utils.code_analyzer import Lexer
def customWalker(node, space=''):
"""
A convenience function to ease debugging. It will print the node structure that's returned from CommonMark
The usage would be something like:
>>> content = Parser().parse('Some big text block\n===================\n\nwith content\n')
>>> customWalker(content)
document
heading
text Some big text block
paragraph
text with content
Spaces are used to convey nesting
"""
txt = ''
try:
txt = node.literal
except:
pass
if txt is None or txt == '':
print('{}{}'.format(space, node.t))
else:
print('{}{}\t{}'.format(space, node.t, txt))
cur = node.first_child
if cur:
while cur is not None:
customWalker(cur, space + ' ')
cur = cur.nxt
def paragraph(node):
"""
Process a paragraph, which includes all content under it
"""
text = ''
if node.string_content is not None:
text = node.string_content
o = nodes.paragraph('', ' '.join(text))
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o.append(n)
return o
def text(node):
"""
Text in a paragraph
"""
return nodes.Text(node.literal)
def hardbreak(node):
"""
A <br /> in html or "\n" in ascii
"""
return nodes.Text('\n')
def softbreak(node):
"""
A line ending or space.
"""
return nodes.Text('\n')
def reference(node):
"""
A hyperlink. Note that alt text doesn't work, since there's no apparent way to do that in docutils
"""
o = nodes.reference()
o['refuri'] = node.destination
if node.title:
o['name'] = node.title
for n in MarkDown(node):
o += n
return o
def emphasis(node):
"""
An italicized section
"""
o = nodes.emphasis()
for n in MarkDown(node):
o += n
return o
def strong(node):
"""
A bolded section
"""
o = nodes.strong()
for n in MarkDown(node):
o += n
return o
def literal(node):
"""
Inline code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal(text=node.literal, classes=classes)
for n in MarkDown(node):
o += n
return o
def literal_block(node):
"""
A block of code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal_block(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal_block(text=node.literal, classes=classes)
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def transition(node):
"""
An <hr> tag in html. This has no children
"""
return nodes.transition()
def title(node):
"""
A title node. It has no children
"""
return nodes.title(node.first_child.literal, node.first_child.literal)
def section(node):
"""
A section in reStructuredText, which needs a title (the first child)
This is a custom type
"""
title = '' # All sections need an id
if node.first_child is not None:
if node.first_child.t == u'heading':
title = node.first_child.first_child.literal
o = nodes.section(ids=[title], names=[title])
for n in MarkDown(node):
o += n
return o
def block_quote(node):
"""
A block quote
"""
o = nodes.block_quote()
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def image(node):
"""
An image element
The first child is the alt text. reStructuredText can't handle titles
"""
o = nodes.image(uri=node.destination)
if node.first_child is not None:
o['alt'] = node.first_child.literal
return o
def listItem(node):
"""
An item in a list
"""
o = nodes.list_item()
for n in MarkDown(node):
o += n
return o
def listNode(node):
"""
A list (numbered or not)
For numbered lists, the suffix is only rendered as . in html
"""
if node.list_data['type'] == u'bullet':
o = nodes.bullet_list(bullet=node.list_data['bullet_char'])
else:
o = nodes.enumerated_list(suffix=node.list_data['delimiter'], enumtype='arabic', start=node.list_data['start'])
for n in MarkDown(node):
o += n
return o
def MarkDown(node):
"""
Returns a list of nodes, containing CommonMark nodes converted to docutils nodes
"""
cur = node.first_child
# Go into each child, in turn
output = []
while cur is not None:
t = cur.t
if t == 'paragraph':
output.append(paragraph(cur))
elif t == 'text':
output.append(text(cur))
elif t == 'softbreak':
output.append(softbreak(cur))
elif t == 'linebreak':
output.append(hardbreak(cur))
elif t == 'link':
output.append(reference(cur))
elif t == 'heading':
output.append(title(cur))
elif t == 'emph':
output.append(emphasis(cur))
elif t == 'strong':
output.append(strong(cur))
elif t == 'code':
output.append(literal(cur))
elif t == 'code_block':
output.append(literal_block(cur))
elif t == 'html_inline' or t == 'html_block':
output.append(raw(cur))
elif t == 'block_quote':
output.append(block_quote(cur))
elif t == 'thematic_break':
output.append(transition(cur))
elif t == 'image':
output.append(image(cur))
elif t == 'list':
output.append(listNode(cur))
elif t == 'item':
output.append(listItem(cur))
elif t == 'MDsection':
output.append(section(cur))
else:
print('Received unhandled type: {}. Full print of node:'.format(t))
cur.pretty()
cur = cur.nxt
return output
def finalizeSection(section):
"""
Correct the nxt and parent for each child
"""
cur = section.first_child
last = section.last_child
if last is not None:
last.nxt = None
while cur is not None:
cur.parent = section
cur = cur.nxt
def nestSections(block, level=1):
"""
Sections aren't handled by CommonMark at the moment.
This function adds sections to a block of nodes.
'title' nodes with an assigned level below 'level' will be put in a child section.
If there are no child nodes with titles of level 'level' then nothing is done
"""
cur = block.first_child
if cur is not None:
children = []
# Do we need to do anything?
nest = False
while cur is not None:
if cur.t == 'heading' and cur.level == level:
nest = True
break
cur = cur.nxt
if not nest:
return
section = Node('MDsection', 0)
section.parent = block
cur = block.first_child
while cur is not None:
if cur.t == 'heading' and cur.level == level:
# Found a split point, flush the last section if needed
if section.first_child is not None:
finalizeSection(section)
children.append(section)
section = Node('MDsection', 0)
nxt = cur.nxt
# Avoid adding sections without titles at the start
if section.first_child is None:
if cur.t == 'heading' and cur.level == level:
section.append_child(cur)
else:
children.append(cur)
else:
section.append_child(cur)
cur = nxt
# If there's only 1 child then don't bother
if section.first_child is not None:
finalizeSection(section)
children.append(section)
block.first_child = None
block.last_child = None
nextLevel = level + 1
for child in children:
# Handle nesting
if child.t == 'MDsection':
nestSections(child, level=nextLevel)
# Append
if block.first_child is None:
block.first_child = child
else:
block.last_child.nxt = child
child.parent = block
child.nxt = None
child.prev = block.last_child
block.last_child = child
def parseMarkDownBlock(text):
"""
Parses a block of text, returning a list of docutils nodes
>>> parseMarkdownBlock("Some\n====\n\nblock of text\n\nHeader\n======\n\nblah\n")
[]
"""
block = Parser().parse(text)
# CommonMark can't nest sections, so do it manually
nestSections(block)
return MarkDown(block)
|
ribozz/sphinx-argparse | sphinxarg/markdown.py | title | python | def title(node):
return nodes.title(node.first_child.literal, node.first_child.literal) | A title node. It has no children | train | https://github.com/ribozz/sphinx-argparse/blob/178672cd5c846440ff7ecd695e3708feea13e4b4/sphinxarg/markdown.py#L192-L196 | null | try:
from commonmark import Parser
except ImportError:
from CommonMark import Parser # >= 0.5.6
try:
from commonmark.node import Node
except ImportError:
from CommonMark.node import Node
from docutils import nodes
from docutils.utils.code_analyzer import Lexer
def customWalker(node, space=''):
"""
A convenience function to ease debugging. It will print the node structure that's returned from CommonMark
The usage would be something like:
>>> content = Parser().parse('Some big text block\n===================\n\nwith content\n')
>>> customWalker(content)
document
heading
text Some big text block
paragraph
text with content
Spaces are used to convey nesting
"""
txt = ''
try:
txt = node.literal
except:
pass
if txt is None or txt == '':
print('{}{}'.format(space, node.t))
else:
print('{}{}\t{}'.format(space, node.t, txt))
cur = node.first_child
if cur:
while cur is not None:
customWalker(cur, space + ' ')
cur = cur.nxt
def paragraph(node):
"""
Process a paragraph, which includes all content under it
"""
text = ''
if node.string_content is not None:
text = node.string_content
o = nodes.paragraph('', ' '.join(text))
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o.append(n)
return o
def text(node):
"""
Text in a paragraph
"""
return nodes.Text(node.literal)
def hardbreak(node):
"""
A <br /> in html or "\n" in ascii
"""
return nodes.Text('\n')
def softbreak(node):
"""
A line ending or space.
"""
return nodes.Text('\n')
def reference(node):
"""
A hyperlink. Note that alt text doesn't work, since there's no apparent way to do that in docutils
"""
o = nodes.reference()
o['refuri'] = node.destination
if node.title:
o['name'] = node.title
for n in MarkDown(node):
o += n
return o
def emphasis(node):
"""
An italicized section
"""
o = nodes.emphasis()
for n in MarkDown(node):
o += n
return o
def strong(node):
"""
A bolded section
"""
o = nodes.strong()
for n in MarkDown(node):
o += n
return o
def literal(node):
"""
Inline code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal(text=node.literal, classes=classes)
for n in MarkDown(node):
o += n
return o
def literal_block(node):
"""
A block of code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal_block(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal_block(text=node.literal, classes=classes)
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def raw(node):
"""
Add some raw html (possibly as a block)
"""
o = nodes.raw(node.literal, node.literal, format='html')
if node.sourcepos is not None:
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def transition(node):
"""
An <hr> tag in html. This has no children
"""
return nodes.transition()
def section(node):
"""
A section in reStructuredText, which needs a title (the first child)
This is a custom type
"""
title = '' # All sections need an id
if node.first_child is not None:
if node.first_child.t == u'heading':
title = node.first_child.first_child.literal
o = nodes.section(ids=[title], names=[title])
for n in MarkDown(node):
o += n
return o
def block_quote(node):
"""
A block quote
"""
o = nodes.block_quote()
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def image(node):
"""
An image element
The first child is the alt text. reStructuredText can't handle titles
"""
o = nodes.image(uri=node.destination)
if node.first_child is not None:
o['alt'] = node.first_child.literal
return o
def listItem(node):
"""
An item in a list
"""
o = nodes.list_item()
for n in MarkDown(node):
o += n
return o
def listNode(node):
"""
A list (numbered or not)
For numbered lists, the suffix is only rendered as . in html
"""
if node.list_data['type'] == u'bullet':
o = nodes.bullet_list(bullet=node.list_data['bullet_char'])
else:
o = nodes.enumerated_list(suffix=node.list_data['delimiter'], enumtype='arabic', start=node.list_data['start'])
for n in MarkDown(node):
o += n
return o
def MarkDown(node):
"""
Returns a list of nodes, containing CommonMark nodes converted to docutils nodes
"""
cur = node.first_child
# Go into each child, in turn
output = []
while cur is not None:
t = cur.t
if t == 'paragraph':
output.append(paragraph(cur))
elif t == 'text':
output.append(text(cur))
elif t == 'softbreak':
output.append(softbreak(cur))
elif t == 'linebreak':
output.append(hardbreak(cur))
elif t == 'link':
output.append(reference(cur))
elif t == 'heading':
output.append(title(cur))
elif t == 'emph':
output.append(emphasis(cur))
elif t == 'strong':
output.append(strong(cur))
elif t == 'code':
output.append(literal(cur))
elif t == 'code_block':
output.append(literal_block(cur))
elif t == 'html_inline' or t == 'html_block':
output.append(raw(cur))
elif t == 'block_quote':
output.append(block_quote(cur))
elif t == 'thematic_break':
output.append(transition(cur))
elif t == 'image':
output.append(image(cur))
elif t == 'list':
output.append(listNode(cur))
elif t == 'item':
output.append(listItem(cur))
elif t == 'MDsection':
output.append(section(cur))
else:
print('Received unhandled type: {}. Full print of node:'.format(t))
cur.pretty()
cur = cur.nxt
return output
def finalizeSection(section):
"""
Correct the nxt and parent for each child
"""
cur = section.first_child
last = section.last_child
if last is not None:
last.nxt = None
while cur is not None:
cur.parent = section
cur = cur.nxt
def nestSections(block, level=1):
"""
Sections aren't handled by CommonMark at the moment.
This function adds sections to a block of nodes.
'title' nodes with an assigned level below 'level' will be put in a child section.
If there are no child nodes with titles of level 'level' then nothing is done
"""
cur = block.first_child
if cur is not None:
children = []
# Do we need to do anything?
nest = False
while cur is not None:
if cur.t == 'heading' and cur.level == level:
nest = True
break
cur = cur.nxt
if not nest:
return
section = Node('MDsection', 0)
section.parent = block
cur = block.first_child
while cur is not None:
if cur.t == 'heading' and cur.level == level:
# Found a split point, flush the last section if needed
if section.first_child is not None:
finalizeSection(section)
children.append(section)
section = Node('MDsection', 0)
nxt = cur.nxt
# Avoid adding sections without titles at the start
if section.first_child is None:
if cur.t == 'heading' and cur.level == level:
section.append_child(cur)
else:
children.append(cur)
else:
section.append_child(cur)
cur = nxt
# If there's only 1 child then don't bother
if section.first_child is not None:
finalizeSection(section)
children.append(section)
block.first_child = None
block.last_child = None
nextLevel = level + 1
for child in children:
# Handle nesting
if child.t == 'MDsection':
nestSections(child, level=nextLevel)
# Append
if block.first_child is None:
block.first_child = child
else:
block.last_child.nxt = child
child.parent = block
child.nxt = None
child.prev = block.last_child
block.last_child = child
def parseMarkDownBlock(text):
"""
Parses a block of text, returning a list of docutils nodes
>>> parseMarkdownBlock("Some\n====\n\nblock of text\n\nHeader\n======\n\nblah\n")
[]
"""
block = Parser().parse(text)
# CommonMark can't nest sections, so do it manually
nestSections(block)
return MarkDown(block)
|
ribozz/sphinx-argparse | sphinxarg/markdown.py | section | python | def section(node):
title = '' # All sections need an id
if node.first_child is not None:
if node.first_child.t == u'heading':
title = node.first_child.first_child.literal
o = nodes.section(ids=[title], names=[title])
for n in MarkDown(node):
o += n
return o | A section in reStructuredText, which needs a title (the first child)
This is a custom type | train | https://github.com/ribozz/sphinx-argparse/blob/178672cd5c846440ff7ecd695e3708feea13e4b4/sphinxarg/markdown.py#L199-L211 | [
"def MarkDown(node):\n \"\"\"\n Returns a list of nodes, containing CommonMark nodes converted to docutils nodes\n \"\"\"\n cur = node.first_child\n\n # Go into each child, in turn\n output = []\n while cur is not None:\n t = cur.t\n if t == 'paragraph':\n output.append(paragraph(cur))\n elif t == 'text':\n output.append(text(cur))\n elif t == 'softbreak':\n output.append(softbreak(cur))\n elif t == 'linebreak':\n output.append(hardbreak(cur))\n elif t == 'link':\n output.append(reference(cur))\n elif t == 'heading':\n output.append(title(cur))\n elif t == 'emph':\n output.append(emphasis(cur))\n elif t == 'strong':\n output.append(strong(cur))\n elif t == 'code':\n output.append(literal(cur))\n elif t == 'code_block':\n output.append(literal_block(cur))\n elif t == 'html_inline' or t == 'html_block':\n output.append(raw(cur))\n elif t == 'block_quote':\n output.append(block_quote(cur))\n elif t == 'thematic_break':\n output.append(transition(cur))\n elif t == 'image':\n output.append(image(cur))\n elif t == 'list':\n output.append(listNode(cur))\n elif t == 'item':\n output.append(listItem(cur))\n elif t == 'MDsection':\n output.append(section(cur))\n else:\n print('Received unhandled type: {}. Full print of node:'.format(t))\n cur.pretty()\n\n cur = cur.nxt\n\n return output\n"
] | try:
from commonmark import Parser
except ImportError:
from CommonMark import Parser # >= 0.5.6
try:
from commonmark.node import Node
except ImportError:
from CommonMark.node import Node
from docutils import nodes
from docutils.utils.code_analyzer import Lexer
def customWalker(node, space=''):
"""
A convenience function to ease debugging. It will print the node structure that's returned from CommonMark
The usage would be something like:
>>> content = Parser().parse('Some big text block\n===================\n\nwith content\n')
>>> customWalker(content)
document
heading
text Some big text block
paragraph
text with content
Spaces are used to convey nesting
"""
txt = ''
try:
txt = node.literal
except:
pass
if txt is None or txt == '':
print('{}{}'.format(space, node.t))
else:
print('{}{}\t{}'.format(space, node.t, txt))
cur = node.first_child
if cur:
while cur is not None:
customWalker(cur, space + ' ')
cur = cur.nxt
def paragraph(node):
"""
Process a paragraph, which includes all content under it
"""
text = ''
if node.string_content is not None:
text = node.string_content
o = nodes.paragraph('', ' '.join(text))
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o.append(n)
return o
def text(node):
"""
Text in a paragraph
"""
return nodes.Text(node.literal)
def hardbreak(node):
"""
A <br /> in html or "\n" in ascii
"""
return nodes.Text('\n')
def softbreak(node):
"""
A line ending or space.
"""
return nodes.Text('\n')
def reference(node):
"""
A hyperlink. Note that alt text doesn't work, since there's no apparent way to do that in docutils
"""
o = nodes.reference()
o['refuri'] = node.destination
if node.title:
o['name'] = node.title
for n in MarkDown(node):
o += n
return o
def emphasis(node):
"""
An italicized section
"""
o = nodes.emphasis()
for n in MarkDown(node):
o += n
return o
def strong(node):
"""
A bolded section
"""
o = nodes.strong()
for n in MarkDown(node):
o += n
return o
def literal(node):
"""
Inline code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal(text=node.literal, classes=classes)
for n in MarkDown(node):
o += n
return o
def literal_block(node):
"""
A block of code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal_block(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal_block(text=node.literal, classes=classes)
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def raw(node):
"""
Add some raw html (possibly as a block)
"""
o = nodes.raw(node.literal, node.literal, format='html')
if node.sourcepos is not None:
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def transition(node):
"""
An <hr> tag in html. This has no children
"""
return nodes.transition()
def title(node):
"""
A title node. It has no children
"""
return nodes.title(node.first_child.literal, node.first_child.literal)
def block_quote(node):
"""
A block quote
"""
o = nodes.block_quote()
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def image(node):
"""
An image element
The first child is the alt text. reStructuredText can't handle titles
"""
o = nodes.image(uri=node.destination)
if node.first_child is not None:
o['alt'] = node.first_child.literal
return o
def listItem(node):
"""
An item in a list
"""
o = nodes.list_item()
for n in MarkDown(node):
o += n
return o
def listNode(node):
"""
A list (numbered or not)
For numbered lists, the suffix is only rendered as . in html
"""
if node.list_data['type'] == u'bullet':
o = nodes.bullet_list(bullet=node.list_data['bullet_char'])
else:
o = nodes.enumerated_list(suffix=node.list_data['delimiter'], enumtype='arabic', start=node.list_data['start'])
for n in MarkDown(node):
o += n
return o
def MarkDown(node):
"""
Returns a list of nodes, containing CommonMark nodes converted to docutils nodes
"""
cur = node.first_child
# Go into each child, in turn
output = []
while cur is not None:
t = cur.t
if t == 'paragraph':
output.append(paragraph(cur))
elif t == 'text':
output.append(text(cur))
elif t == 'softbreak':
output.append(softbreak(cur))
elif t == 'linebreak':
output.append(hardbreak(cur))
elif t == 'link':
output.append(reference(cur))
elif t == 'heading':
output.append(title(cur))
elif t == 'emph':
output.append(emphasis(cur))
elif t == 'strong':
output.append(strong(cur))
elif t == 'code':
output.append(literal(cur))
elif t == 'code_block':
output.append(literal_block(cur))
elif t == 'html_inline' or t == 'html_block':
output.append(raw(cur))
elif t == 'block_quote':
output.append(block_quote(cur))
elif t == 'thematic_break':
output.append(transition(cur))
elif t == 'image':
output.append(image(cur))
elif t == 'list':
output.append(listNode(cur))
elif t == 'item':
output.append(listItem(cur))
elif t == 'MDsection':
output.append(section(cur))
else:
print('Received unhandled type: {}. Full print of node:'.format(t))
cur.pretty()
cur = cur.nxt
return output
def finalizeSection(section):
"""
Correct the nxt and parent for each child
"""
cur = section.first_child
last = section.last_child
if last is not None:
last.nxt = None
while cur is not None:
cur.parent = section
cur = cur.nxt
def nestSections(block, level=1):
"""
Sections aren't handled by CommonMark at the moment.
This function adds sections to a block of nodes.
'title' nodes with an assigned level below 'level' will be put in a child section.
If there are no child nodes with titles of level 'level' then nothing is done
"""
cur = block.first_child
if cur is not None:
children = []
# Do we need to do anything?
nest = False
while cur is not None:
if cur.t == 'heading' and cur.level == level:
nest = True
break
cur = cur.nxt
if not nest:
return
section = Node('MDsection', 0)
section.parent = block
cur = block.first_child
while cur is not None:
if cur.t == 'heading' and cur.level == level:
# Found a split point, flush the last section if needed
if section.first_child is not None:
finalizeSection(section)
children.append(section)
section = Node('MDsection', 0)
nxt = cur.nxt
# Avoid adding sections without titles at the start
if section.first_child is None:
if cur.t == 'heading' and cur.level == level:
section.append_child(cur)
else:
children.append(cur)
else:
section.append_child(cur)
cur = nxt
# If there's only 1 child then don't bother
if section.first_child is not None:
finalizeSection(section)
children.append(section)
block.first_child = None
block.last_child = None
nextLevel = level + 1
for child in children:
# Handle nesting
if child.t == 'MDsection':
nestSections(child, level=nextLevel)
# Append
if block.first_child is None:
block.first_child = child
else:
block.last_child.nxt = child
child.parent = block
child.nxt = None
child.prev = block.last_child
block.last_child = child
def parseMarkDownBlock(text):
"""
Parses a block of text, returning a list of docutils nodes
>>> parseMarkdownBlock("Some\n====\n\nblock of text\n\nHeader\n======\n\nblah\n")
[]
"""
block = Parser().parse(text)
# CommonMark can't nest sections, so do it manually
nestSections(block)
return MarkDown(block)
|
ribozz/sphinx-argparse | sphinxarg/markdown.py | block_quote | python | def block_quote(node):
o = nodes.block_quote()
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o | A block quote | train | https://github.com/ribozz/sphinx-argparse/blob/178672cd5c846440ff7ecd695e3708feea13e4b4/sphinxarg/markdown.py#L214-L222 | [
"def MarkDown(node):\n \"\"\"\n Returns a list of nodes, containing CommonMark nodes converted to docutils nodes\n \"\"\"\n cur = node.first_child\n\n # Go into each child, in turn\n output = []\n while cur is not None:\n t = cur.t\n if t == 'paragraph':\n output.append(paragraph(cur))\n elif t == 'text':\n output.append(text(cur))\n elif t == 'softbreak':\n output.append(softbreak(cur))\n elif t == 'linebreak':\n output.append(hardbreak(cur))\n elif t == 'link':\n output.append(reference(cur))\n elif t == 'heading':\n output.append(title(cur))\n elif t == 'emph':\n output.append(emphasis(cur))\n elif t == 'strong':\n output.append(strong(cur))\n elif t == 'code':\n output.append(literal(cur))\n elif t == 'code_block':\n output.append(literal_block(cur))\n elif t == 'html_inline' or t == 'html_block':\n output.append(raw(cur))\n elif t == 'block_quote':\n output.append(block_quote(cur))\n elif t == 'thematic_break':\n output.append(transition(cur))\n elif t == 'image':\n output.append(image(cur))\n elif t == 'list':\n output.append(listNode(cur))\n elif t == 'item':\n output.append(listItem(cur))\n elif t == 'MDsection':\n output.append(section(cur))\n else:\n print('Received unhandled type: {}. Full print of node:'.format(t))\n cur.pretty()\n\n cur = cur.nxt\n\n return output\n"
] | try:
from commonmark import Parser
except ImportError:
from CommonMark import Parser # >= 0.5.6
try:
from commonmark.node import Node
except ImportError:
from CommonMark.node import Node
from docutils import nodes
from docutils.utils.code_analyzer import Lexer
def customWalker(node, space=''):
"""
A convenience function to ease debugging. It will print the node structure that's returned from CommonMark
The usage would be something like:
>>> content = Parser().parse('Some big text block\n===================\n\nwith content\n')
>>> customWalker(content)
document
heading
text Some big text block
paragraph
text with content
Spaces are used to convey nesting
"""
txt = ''
try:
txt = node.literal
except:
pass
if txt is None or txt == '':
print('{}{}'.format(space, node.t))
else:
print('{}{}\t{}'.format(space, node.t, txt))
cur = node.first_child
if cur:
while cur is not None:
customWalker(cur, space + ' ')
cur = cur.nxt
def paragraph(node):
"""
Process a paragraph, which includes all content under it
"""
text = ''
if node.string_content is not None:
text = node.string_content
o = nodes.paragraph('', ' '.join(text))
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o.append(n)
return o
def text(node):
"""
Text in a paragraph
"""
return nodes.Text(node.literal)
def hardbreak(node):
"""
A <br /> in html or "\n" in ascii
"""
return nodes.Text('\n')
def softbreak(node):
"""
A line ending or space.
"""
return nodes.Text('\n')
def reference(node):
"""
A hyperlink. Note that alt text doesn't work, since there's no apparent way to do that in docutils
"""
o = nodes.reference()
o['refuri'] = node.destination
if node.title:
o['name'] = node.title
for n in MarkDown(node):
o += n
return o
def emphasis(node):
"""
An italicized section
"""
o = nodes.emphasis()
for n in MarkDown(node):
o += n
return o
def strong(node):
"""
A bolded section
"""
o = nodes.strong()
for n in MarkDown(node):
o += n
return o
def literal(node):
"""
Inline code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal(text=node.literal, classes=classes)
for n in MarkDown(node):
o += n
return o
def literal_block(node):
"""
A block of code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal_block(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal_block(text=node.literal, classes=classes)
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def raw(node):
"""
Add some raw html (possibly as a block)
"""
o = nodes.raw(node.literal, node.literal, format='html')
if node.sourcepos is not None:
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def transition(node):
"""
An <hr> tag in html. This has no children
"""
return nodes.transition()
def title(node):
"""
A title node. It has no children
"""
return nodes.title(node.first_child.literal, node.first_child.literal)
def section(node):
"""
A section in reStructuredText, which needs a title (the first child)
This is a custom type
"""
title = '' # All sections need an id
if node.first_child is not None:
if node.first_child.t == u'heading':
title = node.first_child.first_child.literal
o = nodes.section(ids=[title], names=[title])
for n in MarkDown(node):
o += n
return o
def image(node):
"""
An image element
The first child is the alt text. reStructuredText can't handle titles
"""
o = nodes.image(uri=node.destination)
if node.first_child is not None:
o['alt'] = node.first_child.literal
return o
def listItem(node):
"""
An item in a list
"""
o = nodes.list_item()
for n in MarkDown(node):
o += n
return o
def listNode(node):
"""
A list (numbered or not)
For numbered lists, the suffix is only rendered as . in html
"""
if node.list_data['type'] == u'bullet':
o = nodes.bullet_list(bullet=node.list_data['bullet_char'])
else:
o = nodes.enumerated_list(suffix=node.list_data['delimiter'], enumtype='arabic', start=node.list_data['start'])
for n in MarkDown(node):
o += n
return o
def MarkDown(node):
"""
Returns a list of nodes, containing CommonMark nodes converted to docutils nodes
"""
cur = node.first_child
# Go into each child, in turn
output = []
while cur is not None:
t = cur.t
if t == 'paragraph':
output.append(paragraph(cur))
elif t == 'text':
output.append(text(cur))
elif t == 'softbreak':
output.append(softbreak(cur))
elif t == 'linebreak':
output.append(hardbreak(cur))
elif t == 'link':
output.append(reference(cur))
elif t == 'heading':
output.append(title(cur))
elif t == 'emph':
output.append(emphasis(cur))
elif t == 'strong':
output.append(strong(cur))
elif t == 'code':
output.append(literal(cur))
elif t == 'code_block':
output.append(literal_block(cur))
elif t == 'html_inline' or t == 'html_block':
output.append(raw(cur))
elif t == 'block_quote':
output.append(block_quote(cur))
elif t == 'thematic_break':
output.append(transition(cur))
elif t == 'image':
output.append(image(cur))
elif t == 'list':
output.append(listNode(cur))
elif t == 'item':
output.append(listItem(cur))
elif t == 'MDsection':
output.append(section(cur))
else:
print('Received unhandled type: {}. Full print of node:'.format(t))
cur.pretty()
cur = cur.nxt
return output
def finalizeSection(section):
"""
Correct the nxt and parent for each child
"""
cur = section.first_child
last = section.last_child
if last is not None:
last.nxt = None
while cur is not None:
cur.parent = section
cur = cur.nxt
def nestSections(block, level=1):
"""
Sections aren't handled by CommonMark at the moment.
This function adds sections to a block of nodes.
'title' nodes with an assigned level below 'level' will be put in a child section.
If there are no child nodes with titles of level 'level' then nothing is done
"""
cur = block.first_child
if cur is not None:
children = []
# Do we need to do anything?
nest = False
while cur is not None:
if cur.t == 'heading' and cur.level == level:
nest = True
break
cur = cur.nxt
if not nest:
return
section = Node('MDsection', 0)
section.parent = block
cur = block.first_child
while cur is not None:
if cur.t == 'heading' and cur.level == level:
# Found a split point, flush the last section if needed
if section.first_child is not None:
finalizeSection(section)
children.append(section)
section = Node('MDsection', 0)
nxt = cur.nxt
# Avoid adding sections without titles at the start
if section.first_child is None:
if cur.t == 'heading' and cur.level == level:
section.append_child(cur)
else:
children.append(cur)
else:
section.append_child(cur)
cur = nxt
# If there's only 1 child then don't bother
if section.first_child is not None:
finalizeSection(section)
children.append(section)
block.first_child = None
block.last_child = None
nextLevel = level + 1
for child in children:
# Handle nesting
if child.t == 'MDsection':
nestSections(child, level=nextLevel)
# Append
if block.first_child is None:
block.first_child = child
else:
block.last_child.nxt = child
child.parent = block
child.nxt = None
child.prev = block.last_child
block.last_child = child
def parseMarkDownBlock(text):
"""
Parses a block of text, returning a list of docutils nodes
>>> parseMarkdownBlock("Some\n====\n\nblock of text\n\nHeader\n======\n\nblah\n")
[]
"""
block = Parser().parse(text)
# CommonMark can't nest sections, so do it manually
nestSections(block)
return MarkDown(block)
|
ribozz/sphinx-argparse | sphinxarg/markdown.py | image | python | def image(node):
o = nodes.image(uri=node.destination)
if node.first_child is not None:
o['alt'] = node.first_child.literal
return o | An image element
The first child is the alt text. reStructuredText can't handle titles | train | https://github.com/ribozz/sphinx-argparse/blob/178672cd5c846440ff7ecd695e3708feea13e4b4/sphinxarg/markdown.py#L225-L234 | null | try:
from commonmark import Parser
except ImportError:
from CommonMark import Parser # >= 0.5.6
try:
from commonmark.node import Node
except ImportError:
from CommonMark.node import Node
from docutils import nodes
from docutils.utils.code_analyzer import Lexer
def customWalker(node, space=''):
"""
A convenience function to ease debugging. It will print the node structure that's returned from CommonMark
The usage would be something like:
>>> content = Parser().parse('Some big text block\n===================\n\nwith content\n')
>>> customWalker(content)
document
heading
text Some big text block
paragraph
text with content
Spaces are used to convey nesting
"""
txt = ''
try:
txt = node.literal
except:
pass
if txt is None or txt == '':
print('{}{}'.format(space, node.t))
else:
print('{}{}\t{}'.format(space, node.t, txt))
cur = node.first_child
if cur:
while cur is not None:
customWalker(cur, space + ' ')
cur = cur.nxt
def paragraph(node):
"""
Process a paragraph, which includes all content under it
"""
text = ''
if node.string_content is not None:
text = node.string_content
o = nodes.paragraph('', ' '.join(text))
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o.append(n)
return o
def text(node):
"""
Text in a paragraph
"""
return nodes.Text(node.literal)
def hardbreak(node):
"""
A <br /> in html or "\n" in ascii
"""
return nodes.Text('\n')
def softbreak(node):
"""
A line ending or space.
"""
return nodes.Text('\n')
def reference(node):
"""
A hyperlink. Note that alt text doesn't work, since there's no apparent way to do that in docutils
"""
o = nodes.reference()
o['refuri'] = node.destination
if node.title:
o['name'] = node.title
for n in MarkDown(node):
o += n
return o
def emphasis(node):
"""
An italicized section
"""
o = nodes.emphasis()
for n in MarkDown(node):
o += n
return o
def strong(node):
"""
A bolded section
"""
o = nodes.strong()
for n in MarkDown(node):
o += n
return o
def literal(node):
"""
Inline code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal(text=node.literal, classes=classes)
for n in MarkDown(node):
o += n
return o
def literal_block(node):
"""
A block of code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal_block(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal_block(text=node.literal, classes=classes)
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def raw(node):
"""
Add some raw html (possibly as a block)
"""
o = nodes.raw(node.literal, node.literal, format='html')
if node.sourcepos is not None:
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def transition(node):
"""
An <hr> tag in html. This has no children
"""
return nodes.transition()
def title(node):
"""
A title node. It has no children
"""
return nodes.title(node.first_child.literal, node.first_child.literal)
def section(node):
"""
A section in reStructuredText, which needs a title (the first child)
This is a custom type
"""
title = '' # All sections need an id
if node.first_child is not None:
if node.first_child.t == u'heading':
title = node.first_child.first_child.literal
o = nodes.section(ids=[title], names=[title])
for n in MarkDown(node):
o += n
return o
def block_quote(node):
"""
A block quote
"""
o = nodes.block_quote()
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def listItem(node):
"""
An item in a list
"""
o = nodes.list_item()
for n in MarkDown(node):
o += n
return o
def listNode(node):
"""
A list (numbered or not)
For numbered lists, the suffix is only rendered as . in html
"""
if node.list_data['type'] == u'bullet':
o = nodes.bullet_list(bullet=node.list_data['bullet_char'])
else:
o = nodes.enumerated_list(suffix=node.list_data['delimiter'], enumtype='arabic', start=node.list_data['start'])
for n in MarkDown(node):
o += n
return o
def MarkDown(node):
"""
Returns a list of nodes, containing CommonMark nodes converted to docutils nodes
"""
cur = node.first_child
# Go into each child, in turn
output = []
while cur is not None:
t = cur.t
if t == 'paragraph':
output.append(paragraph(cur))
elif t == 'text':
output.append(text(cur))
elif t == 'softbreak':
output.append(softbreak(cur))
elif t == 'linebreak':
output.append(hardbreak(cur))
elif t == 'link':
output.append(reference(cur))
elif t == 'heading':
output.append(title(cur))
elif t == 'emph':
output.append(emphasis(cur))
elif t == 'strong':
output.append(strong(cur))
elif t == 'code':
output.append(literal(cur))
elif t == 'code_block':
output.append(literal_block(cur))
elif t == 'html_inline' or t == 'html_block':
output.append(raw(cur))
elif t == 'block_quote':
output.append(block_quote(cur))
elif t == 'thematic_break':
output.append(transition(cur))
elif t == 'image':
output.append(image(cur))
elif t == 'list':
output.append(listNode(cur))
elif t == 'item':
output.append(listItem(cur))
elif t == 'MDsection':
output.append(section(cur))
else:
print('Received unhandled type: {}. Full print of node:'.format(t))
cur.pretty()
cur = cur.nxt
return output
def finalizeSection(section):
"""
Correct the nxt and parent for each child
"""
cur = section.first_child
last = section.last_child
if last is not None:
last.nxt = None
while cur is not None:
cur.parent = section
cur = cur.nxt
def nestSections(block, level=1):
"""
Sections aren't handled by CommonMark at the moment.
This function adds sections to a block of nodes.
'title' nodes with an assigned level below 'level' will be put in a child section.
If there are no child nodes with titles of level 'level' then nothing is done
"""
cur = block.first_child
if cur is not None:
children = []
# Do we need to do anything?
nest = False
while cur is not None:
if cur.t == 'heading' and cur.level == level:
nest = True
break
cur = cur.nxt
if not nest:
return
section = Node('MDsection', 0)
section.parent = block
cur = block.first_child
while cur is not None:
if cur.t == 'heading' and cur.level == level:
# Found a split point, flush the last section if needed
if section.first_child is not None:
finalizeSection(section)
children.append(section)
section = Node('MDsection', 0)
nxt = cur.nxt
# Avoid adding sections without titles at the start
if section.first_child is None:
if cur.t == 'heading' and cur.level == level:
section.append_child(cur)
else:
children.append(cur)
else:
section.append_child(cur)
cur = nxt
# If there's only 1 child then don't bother
if section.first_child is not None:
finalizeSection(section)
children.append(section)
block.first_child = None
block.last_child = None
nextLevel = level + 1
for child in children:
# Handle nesting
if child.t == 'MDsection':
nestSections(child, level=nextLevel)
# Append
if block.first_child is None:
block.first_child = child
else:
block.last_child.nxt = child
child.parent = block
child.nxt = None
child.prev = block.last_child
block.last_child = child
def parseMarkDownBlock(text):
"""
Parses a block of text, returning a list of docutils nodes
>>> parseMarkdownBlock("Some\n====\n\nblock of text\n\nHeader\n======\n\nblah\n")
[]
"""
block = Parser().parse(text)
# CommonMark can't nest sections, so do it manually
nestSections(block)
return MarkDown(block)
|
ribozz/sphinx-argparse | sphinxarg/markdown.py | listItem | python | def listItem(node):
o = nodes.list_item()
for n in MarkDown(node):
o += n
return o | An item in a list | train | https://github.com/ribozz/sphinx-argparse/blob/178672cd5c846440ff7ecd695e3708feea13e4b4/sphinxarg/markdown.py#L237-L244 | [
"def MarkDown(node):\n \"\"\"\n Returns a list of nodes, containing CommonMark nodes converted to docutils nodes\n \"\"\"\n cur = node.first_child\n\n # Go into each child, in turn\n output = []\n while cur is not None:\n t = cur.t\n if t == 'paragraph':\n output.append(paragraph(cur))\n elif t == 'text':\n output.append(text(cur))\n elif t == 'softbreak':\n output.append(softbreak(cur))\n elif t == 'linebreak':\n output.append(hardbreak(cur))\n elif t == 'link':\n output.append(reference(cur))\n elif t == 'heading':\n output.append(title(cur))\n elif t == 'emph':\n output.append(emphasis(cur))\n elif t == 'strong':\n output.append(strong(cur))\n elif t == 'code':\n output.append(literal(cur))\n elif t == 'code_block':\n output.append(literal_block(cur))\n elif t == 'html_inline' or t == 'html_block':\n output.append(raw(cur))\n elif t == 'block_quote':\n output.append(block_quote(cur))\n elif t == 'thematic_break':\n output.append(transition(cur))\n elif t == 'image':\n output.append(image(cur))\n elif t == 'list':\n output.append(listNode(cur))\n elif t == 'item':\n output.append(listItem(cur))\n elif t == 'MDsection':\n output.append(section(cur))\n else:\n print('Received unhandled type: {}. Full print of node:'.format(t))\n cur.pretty()\n\n cur = cur.nxt\n\n return output\n"
] | try:
from commonmark import Parser
except ImportError:
from CommonMark import Parser # >= 0.5.6
try:
from commonmark.node import Node
except ImportError:
from CommonMark.node import Node
from docutils import nodes
from docutils.utils.code_analyzer import Lexer
def customWalker(node, space=''):
"""
A convenience function to ease debugging. It will print the node structure that's returned from CommonMark
The usage would be something like:
>>> content = Parser().parse('Some big text block\n===================\n\nwith content\n')
>>> customWalker(content)
document
heading
text Some big text block
paragraph
text with content
Spaces are used to convey nesting
"""
txt = ''
try:
txt = node.literal
except:
pass
if txt is None or txt == '':
print('{}{}'.format(space, node.t))
else:
print('{}{}\t{}'.format(space, node.t, txt))
cur = node.first_child
if cur:
while cur is not None:
customWalker(cur, space + ' ')
cur = cur.nxt
def paragraph(node):
"""
Process a paragraph, which includes all content under it
"""
text = ''
if node.string_content is not None:
text = node.string_content
o = nodes.paragraph('', ' '.join(text))
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o.append(n)
return o
def text(node):
"""
Text in a paragraph
"""
return nodes.Text(node.literal)
def hardbreak(node):
"""
A <br /> in html or "\n" in ascii
"""
return nodes.Text('\n')
def softbreak(node):
"""
A line ending or space.
"""
return nodes.Text('\n')
def reference(node):
"""
A hyperlink. Note that alt text doesn't work, since there's no apparent way to do that in docutils
"""
o = nodes.reference()
o['refuri'] = node.destination
if node.title:
o['name'] = node.title
for n in MarkDown(node):
o += n
return o
def emphasis(node):
"""
An italicized section
"""
o = nodes.emphasis()
for n in MarkDown(node):
o += n
return o
def strong(node):
"""
A bolded section
"""
o = nodes.strong()
for n in MarkDown(node):
o += n
return o
def literal(node):
"""
Inline code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal(text=node.literal, classes=classes)
for n in MarkDown(node):
o += n
return o
def literal_block(node):
"""
A block of code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal_block(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal_block(text=node.literal, classes=classes)
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def raw(node):
"""
Add some raw html (possibly as a block)
"""
o = nodes.raw(node.literal, node.literal, format='html')
if node.sourcepos is not None:
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def transition(node):
"""
An <hr> tag in html. This has no children
"""
return nodes.transition()
def title(node):
"""
A title node. It has no children
"""
return nodes.title(node.first_child.literal, node.first_child.literal)
def section(node):
"""
A section in reStructuredText, which needs a title (the first child)
This is a custom type
"""
title = '' # All sections need an id
if node.first_child is not None:
if node.first_child.t == u'heading':
title = node.first_child.first_child.literal
o = nodes.section(ids=[title], names=[title])
for n in MarkDown(node):
o += n
return o
def block_quote(node):
"""
A block quote
"""
o = nodes.block_quote()
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def image(node):
"""
An image element
The first child is the alt text. reStructuredText can't handle titles
"""
o = nodes.image(uri=node.destination)
if node.first_child is not None:
o['alt'] = node.first_child.literal
return o
def listNode(node):
"""
A list (numbered or not)
For numbered lists, the suffix is only rendered as . in html
"""
if node.list_data['type'] == u'bullet':
o = nodes.bullet_list(bullet=node.list_data['bullet_char'])
else:
o = nodes.enumerated_list(suffix=node.list_data['delimiter'], enumtype='arabic', start=node.list_data['start'])
for n in MarkDown(node):
o += n
return o
def MarkDown(node):
"""
Returns a list of nodes, containing CommonMark nodes converted to docutils nodes
"""
cur = node.first_child
# Go into each child, in turn
output = []
while cur is not None:
t = cur.t
if t == 'paragraph':
output.append(paragraph(cur))
elif t == 'text':
output.append(text(cur))
elif t == 'softbreak':
output.append(softbreak(cur))
elif t == 'linebreak':
output.append(hardbreak(cur))
elif t == 'link':
output.append(reference(cur))
elif t == 'heading':
output.append(title(cur))
elif t == 'emph':
output.append(emphasis(cur))
elif t == 'strong':
output.append(strong(cur))
elif t == 'code':
output.append(literal(cur))
elif t == 'code_block':
output.append(literal_block(cur))
elif t == 'html_inline' or t == 'html_block':
output.append(raw(cur))
elif t == 'block_quote':
output.append(block_quote(cur))
elif t == 'thematic_break':
output.append(transition(cur))
elif t == 'image':
output.append(image(cur))
elif t == 'list':
output.append(listNode(cur))
elif t == 'item':
output.append(listItem(cur))
elif t == 'MDsection':
output.append(section(cur))
else:
print('Received unhandled type: {}. Full print of node:'.format(t))
cur.pretty()
cur = cur.nxt
return output
def finalizeSection(section):
"""
Correct the nxt and parent for each child
"""
cur = section.first_child
last = section.last_child
if last is not None:
last.nxt = None
while cur is not None:
cur.parent = section
cur = cur.nxt
def nestSections(block, level=1):
"""
Sections aren't handled by CommonMark at the moment.
This function adds sections to a block of nodes.
'title' nodes with an assigned level below 'level' will be put in a child section.
If there are no child nodes with titles of level 'level' then nothing is done
"""
cur = block.first_child
if cur is not None:
children = []
# Do we need to do anything?
nest = False
while cur is not None:
if cur.t == 'heading' and cur.level == level:
nest = True
break
cur = cur.nxt
if not nest:
return
section = Node('MDsection', 0)
section.parent = block
cur = block.first_child
while cur is not None:
if cur.t == 'heading' and cur.level == level:
# Found a split point, flush the last section if needed
if section.first_child is not None:
finalizeSection(section)
children.append(section)
section = Node('MDsection', 0)
nxt = cur.nxt
# Avoid adding sections without titles at the start
if section.first_child is None:
if cur.t == 'heading' and cur.level == level:
section.append_child(cur)
else:
children.append(cur)
else:
section.append_child(cur)
cur = nxt
# If there's only 1 child then don't bother
if section.first_child is not None:
finalizeSection(section)
children.append(section)
block.first_child = None
block.last_child = None
nextLevel = level + 1
for child in children:
# Handle nesting
if child.t == 'MDsection':
nestSections(child, level=nextLevel)
# Append
if block.first_child is None:
block.first_child = child
else:
block.last_child.nxt = child
child.parent = block
child.nxt = None
child.prev = block.last_child
block.last_child = child
def parseMarkDownBlock(text):
"""
Parses a block of text, returning a list of docutils nodes
>>> parseMarkdownBlock("Some\n====\n\nblock of text\n\nHeader\n======\n\nblah\n")
[]
"""
block = Parser().parse(text)
# CommonMark can't nest sections, so do it manually
nestSections(block)
return MarkDown(block)
|
ribozz/sphinx-argparse | sphinxarg/markdown.py | listNode | python | def listNode(node):
if node.list_data['type'] == u'bullet':
o = nodes.bullet_list(bullet=node.list_data['bullet_char'])
else:
o = nodes.enumerated_list(suffix=node.list_data['delimiter'], enumtype='arabic', start=node.list_data['start'])
for n in MarkDown(node):
o += n
return o | A list (numbered or not)
For numbered lists, the suffix is only rendered as . in html | train | https://github.com/ribozz/sphinx-argparse/blob/178672cd5c846440ff7ecd695e3708feea13e4b4/sphinxarg/markdown.py#L247-L258 | [
"def MarkDown(node):\n \"\"\"\n Returns a list of nodes, containing CommonMark nodes converted to docutils nodes\n \"\"\"\n cur = node.first_child\n\n # Go into each child, in turn\n output = []\n while cur is not None:\n t = cur.t\n if t == 'paragraph':\n output.append(paragraph(cur))\n elif t == 'text':\n output.append(text(cur))\n elif t == 'softbreak':\n output.append(softbreak(cur))\n elif t == 'linebreak':\n output.append(hardbreak(cur))\n elif t == 'link':\n output.append(reference(cur))\n elif t == 'heading':\n output.append(title(cur))\n elif t == 'emph':\n output.append(emphasis(cur))\n elif t == 'strong':\n output.append(strong(cur))\n elif t == 'code':\n output.append(literal(cur))\n elif t == 'code_block':\n output.append(literal_block(cur))\n elif t == 'html_inline' or t == 'html_block':\n output.append(raw(cur))\n elif t == 'block_quote':\n output.append(block_quote(cur))\n elif t == 'thematic_break':\n output.append(transition(cur))\n elif t == 'image':\n output.append(image(cur))\n elif t == 'list':\n output.append(listNode(cur))\n elif t == 'item':\n output.append(listItem(cur))\n elif t == 'MDsection':\n output.append(section(cur))\n else:\n print('Received unhandled type: {}. Full print of node:'.format(t))\n cur.pretty()\n\n cur = cur.nxt\n\n return output\n"
] | try:
from commonmark import Parser
except ImportError:
from CommonMark import Parser # >= 0.5.6
try:
from commonmark.node import Node
except ImportError:
from CommonMark.node import Node
from docutils import nodes
from docutils.utils.code_analyzer import Lexer
def customWalker(node, space=''):
"""
A convenience function to ease debugging. It will print the node structure that's returned from CommonMark
The usage would be something like:
>>> content = Parser().parse('Some big text block\n===================\n\nwith content\n')
>>> customWalker(content)
document
heading
text Some big text block
paragraph
text with content
Spaces are used to convey nesting
"""
txt = ''
try:
txt = node.literal
except:
pass
if txt is None or txt == '':
print('{}{}'.format(space, node.t))
else:
print('{}{}\t{}'.format(space, node.t, txt))
cur = node.first_child
if cur:
while cur is not None:
customWalker(cur, space + ' ')
cur = cur.nxt
def paragraph(node):
"""
Process a paragraph, which includes all content under it
"""
text = ''
if node.string_content is not None:
text = node.string_content
o = nodes.paragraph('', ' '.join(text))
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o.append(n)
return o
def text(node):
"""
Text in a paragraph
"""
return nodes.Text(node.literal)
def hardbreak(node):
"""
A <br /> in html or "\n" in ascii
"""
return nodes.Text('\n')
def softbreak(node):
"""
A line ending or space.
"""
return nodes.Text('\n')
def reference(node):
"""
A hyperlink. Note that alt text doesn't work, since there's no apparent way to do that in docutils
"""
o = nodes.reference()
o['refuri'] = node.destination
if node.title:
o['name'] = node.title
for n in MarkDown(node):
o += n
return o
def emphasis(node):
"""
An italicized section
"""
o = nodes.emphasis()
for n in MarkDown(node):
o += n
return o
def strong(node):
"""
A bolded section
"""
o = nodes.strong()
for n in MarkDown(node):
o += n
return o
def literal(node):
"""
Inline code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal(text=node.literal, classes=classes)
for n in MarkDown(node):
o += n
return o
def literal_block(node):
"""
A block of code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal_block(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal_block(text=node.literal, classes=classes)
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def raw(node):
"""
Add some raw html (possibly as a block)
"""
o = nodes.raw(node.literal, node.literal, format='html')
if node.sourcepos is not None:
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def transition(node):
"""
An <hr> tag in html. This has no children
"""
return nodes.transition()
def title(node):
"""
A title node. It has no children
"""
return nodes.title(node.first_child.literal, node.first_child.literal)
def section(node):
"""
A section in reStructuredText, which needs a title (the first child)
This is a custom type
"""
title = '' # All sections need an id
if node.first_child is not None:
if node.first_child.t == u'heading':
title = node.first_child.first_child.literal
o = nodes.section(ids=[title], names=[title])
for n in MarkDown(node):
o += n
return o
def block_quote(node):
"""
A block quote
"""
o = nodes.block_quote()
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def image(node):
"""
An image element
The first child is the alt text. reStructuredText can't handle titles
"""
o = nodes.image(uri=node.destination)
if node.first_child is not None:
o['alt'] = node.first_child.literal
return o
def listItem(node):
"""
An item in a list
"""
o = nodes.list_item()
for n in MarkDown(node):
o += n
return o
def MarkDown(node):
"""
Returns a list of nodes, containing CommonMark nodes converted to docutils nodes
"""
cur = node.first_child
# Go into each child, in turn
output = []
while cur is not None:
t = cur.t
if t == 'paragraph':
output.append(paragraph(cur))
elif t == 'text':
output.append(text(cur))
elif t == 'softbreak':
output.append(softbreak(cur))
elif t == 'linebreak':
output.append(hardbreak(cur))
elif t == 'link':
output.append(reference(cur))
elif t == 'heading':
output.append(title(cur))
elif t == 'emph':
output.append(emphasis(cur))
elif t == 'strong':
output.append(strong(cur))
elif t == 'code':
output.append(literal(cur))
elif t == 'code_block':
output.append(literal_block(cur))
elif t == 'html_inline' or t == 'html_block':
output.append(raw(cur))
elif t == 'block_quote':
output.append(block_quote(cur))
elif t == 'thematic_break':
output.append(transition(cur))
elif t == 'image':
output.append(image(cur))
elif t == 'list':
output.append(listNode(cur))
elif t == 'item':
output.append(listItem(cur))
elif t == 'MDsection':
output.append(section(cur))
else:
print('Received unhandled type: {}. Full print of node:'.format(t))
cur.pretty()
cur = cur.nxt
return output
def finalizeSection(section):
"""
Correct the nxt and parent for each child
"""
cur = section.first_child
last = section.last_child
if last is not None:
last.nxt = None
while cur is not None:
cur.parent = section
cur = cur.nxt
def nestSections(block, level=1):
"""
Sections aren't handled by CommonMark at the moment.
This function adds sections to a block of nodes.
'title' nodes with an assigned level below 'level' will be put in a child section.
If there are no child nodes with titles of level 'level' then nothing is done
"""
cur = block.first_child
if cur is not None:
children = []
# Do we need to do anything?
nest = False
while cur is not None:
if cur.t == 'heading' and cur.level == level:
nest = True
break
cur = cur.nxt
if not nest:
return
section = Node('MDsection', 0)
section.parent = block
cur = block.first_child
while cur is not None:
if cur.t == 'heading' and cur.level == level:
# Found a split point, flush the last section if needed
if section.first_child is not None:
finalizeSection(section)
children.append(section)
section = Node('MDsection', 0)
nxt = cur.nxt
# Avoid adding sections without titles at the start
if section.first_child is None:
if cur.t == 'heading' and cur.level == level:
section.append_child(cur)
else:
children.append(cur)
else:
section.append_child(cur)
cur = nxt
# If there's only 1 child then don't bother
if section.first_child is not None:
finalizeSection(section)
children.append(section)
block.first_child = None
block.last_child = None
nextLevel = level + 1
for child in children:
# Handle nesting
if child.t == 'MDsection':
nestSections(child, level=nextLevel)
# Append
if block.first_child is None:
block.first_child = child
else:
block.last_child.nxt = child
child.parent = block
child.nxt = None
child.prev = block.last_child
block.last_child = child
def parseMarkDownBlock(text):
"""
Parses a block of text, returning a list of docutils nodes
>>> parseMarkdownBlock("Some\n====\n\nblock of text\n\nHeader\n======\n\nblah\n")
[]
"""
block = Parser().parse(text)
# CommonMark can't nest sections, so do it manually
nestSections(block)
return MarkDown(block)
|
ribozz/sphinx-argparse | sphinxarg/markdown.py | MarkDown | python | def MarkDown(node):
cur = node.first_child
# Go into each child, in turn
output = []
while cur is not None:
t = cur.t
if t == 'paragraph':
output.append(paragraph(cur))
elif t == 'text':
output.append(text(cur))
elif t == 'softbreak':
output.append(softbreak(cur))
elif t == 'linebreak':
output.append(hardbreak(cur))
elif t == 'link':
output.append(reference(cur))
elif t == 'heading':
output.append(title(cur))
elif t == 'emph':
output.append(emphasis(cur))
elif t == 'strong':
output.append(strong(cur))
elif t == 'code':
output.append(literal(cur))
elif t == 'code_block':
output.append(literal_block(cur))
elif t == 'html_inline' or t == 'html_block':
output.append(raw(cur))
elif t == 'block_quote':
output.append(block_quote(cur))
elif t == 'thematic_break':
output.append(transition(cur))
elif t == 'image':
output.append(image(cur))
elif t == 'list':
output.append(listNode(cur))
elif t == 'item':
output.append(listItem(cur))
elif t == 'MDsection':
output.append(section(cur))
else:
print('Received unhandled type: {}. Full print of node:'.format(t))
cur.pretty()
cur = cur.nxt
return output | Returns a list of nodes, containing CommonMark nodes converted to docutils nodes | train | https://github.com/ribozz/sphinx-argparse/blob/178672cd5c846440ff7ecd695e3708feea13e4b4/sphinxarg/markdown.py#L261-L311 | [
"def title(node):\n \"\"\"\n A title node. It has no children\n \"\"\"\n return nodes.title(node.first_child.literal, node.first_child.literal)\n",
"def text(node):\n \"\"\"\n Text in a paragraph\n \"\"\"\n return nodes.Text(node.literal)\n",
"def section(node):\n \"\"\"\n A section in reStructuredText, which needs a title (the first child)\n This is a custom type\n \"\"\"\n title = '' # All sections need an id\n if node.first_child is not None:\n if node.first_child.t == u'heading':\n title = node.first_child.first_child.literal\n o = nodes.section(ids=[title], names=[title])\n for n in MarkDown(node):\n o += n\n return o\n",
"def raw(node):\n \"\"\"\n Add some raw html (possibly as a block)\n \"\"\"\n o = nodes.raw(node.literal, node.literal, format='html')\n if node.sourcepos is not None:\n o.line = node.sourcepos[0][0]\n for n in MarkDown(node):\n o += n\n return o\n",
"def reference(node):\n \"\"\"\n A hyperlink. Note that alt text doesn't work, since there's no apparent way to do that in docutils\n \"\"\"\n o = nodes.reference()\n o['refuri'] = node.destination\n if node.title:\n o['name'] = node.title\n for n in MarkDown(node):\n o += n\n return o\n",
"def image(node):\n \"\"\"\n An image element\n\n The first child is the alt text. reStructuredText can't handle titles\n \"\"\"\n o = nodes.image(uri=node.destination)\n if node.first_child is not None:\n o['alt'] = node.first_child.literal\n return o\n",
"def literal_block(node):\n \"\"\"\n A block of code\n \"\"\"\n rendered = []\n try:\n if node.info is not None:\n l = Lexer(node.literal, node.info, tokennames=\"long\")\n for _ in l:\n rendered.append(node.inline(classes=_[0], text=_[1]))\n except:\n pass\n\n classes = ['code']\n if node.info is not None:\n classes.append(node.info)\n if len(rendered) > 0:\n o = nodes.literal_block(classes=classes)\n for element in rendered:\n o += element\n else:\n o = nodes.literal_block(text=node.literal, classes=classes)\n\n o.line = node.sourcepos[0][0]\n for n in MarkDown(node):\n o += n\n return o\n",
"def paragraph(node):\n \"\"\"\n Process a paragraph, which includes all content under it\n \"\"\"\n text = ''\n if node.string_content is not None:\n text = node.string_content\n o = nodes.paragraph('', ' '.join(text))\n o.line = node.sourcepos[0][0]\n for n in MarkDown(node):\n o.append(n)\n\n return o\n",
"def strong(node):\n \"\"\"\n A bolded section\n \"\"\"\n o = nodes.strong()\n for n in MarkDown(node):\n o += n\n return o\n",
"def literal(node):\n \"\"\"\n Inline code\n \"\"\"\n rendered = []\n try:\n if node.info is not None:\n l = Lexer(node.literal, node.info, tokennames=\"long\")\n for _ in l:\n rendered.append(node.inline(classes=_[0], text=_[1]))\n except:\n pass\n\n classes = ['code']\n if node.info is not None:\n classes.append(node.info)\n if len(rendered) > 0:\n o = nodes.literal(classes=classes)\n for element in rendered:\n o += element\n else:\n o = nodes.literal(text=node.literal, classes=classes)\n\n for n in MarkDown(node):\n o += n\n return o\n",
"def hardbreak(node):\n \"\"\"\n A <br /> in html or \"\\n\" in ascii\n \"\"\"\n return nodes.Text('\\n')\n",
"def softbreak(node):\n \"\"\"\n A line ending or space.\n \"\"\"\n return nodes.Text('\\n')\n",
"def emphasis(node):\n \"\"\"\n An italicized section\n \"\"\"\n o = nodes.emphasis()\n for n in MarkDown(node):\n o += n\n return o\n",
"def transition(node):\n \"\"\"\n An <hr> tag in html. This has no children\n \"\"\"\n return nodes.transition()\n",
"def block_quote(node):\n \"\"\"\n A block quote\n \"\"\"\n o = nodes.block_quote()\n o.line = node.sourcepos[0][0]\n for n in MarkDown(node):\n o += n\n return o\n",
"def listItem(node):\n \"\"\"\n An item in a list\n \"\"\"\n o = nodes.list_item()\n for n in MarkDown(node):\n o += n\n return o\n",
"def listNode(node):\n \"\"\"\n A list (numbered or not)\n For numbered lists, the suffix is only rendered as . in html\n \"\"\"\n if node.list_data['type'] == u'bullet':\n o = nodes.bullet_list(bullet=node.list_data['bullet_char'])\n else:\n o = nodes.enumerated_list(suffix=node.list_data['delimiter'], enumtype='arabic', start=node.list_data['start'])\n for n in MarkDown(node):\n o += n\n return o\n"
] | try:
from commonmark import Parser
except ImportError:
from CommonMark import Parser # >= 0.5.6
try:
from commonmark.node import Node
except ImportError:
from CommonMark.node import Node
from docutils import nodes
from docutils.utils.code_analyzer import Lexer
def customWalker(node, space=''):
"""
A convenience function to ease debugging. It will print the node structure that's returned from CommonMark
The usage would be something like:
>>> content = Parser().parse('Some big text block\n===================\n\nwith content\n')
>>> customWalker(content)
document
heading
text Some big text block
paragraph
text with content
Spaces are used to convey nesting
"""
txt = ''
try:
txt = node.literal
except:
pass
if txt is None or txt == '':
print('{}{}'.format(space, node.t))
else:
print('{}{}\t{}'.format(space, node.t, txt))
cur = node.first_child
if cur:
while cur is not None:
customWalker(cur, space + ' ')
cur = cur.nxt
def paragraph(node):
"""
Process a paragraph, which includes all content under it
"""
text = ''
if node.string_content is not None:
text = node.string_content
o = nodes.paragraph('', ' '.join(text))
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o.append(n)
return o
def text(node):
"""
Text in a paragraph
"""
return nodes.Text(node.literal)
def hardbreak(node):
"""
A <br /> in html or "\n" in ascii
"""
return nodes.Text('\n')
def softbreak(node):
"""
A line ending or space.
"""
return nodes.Text('\n')
def reference(node):
"""
A hyperlink. Note that alt text doesn't work, since there's no apparent way to do that in docutils
"""
o = nodes.reference()
o['refuri'] = node.destination
if node.title:
o['name'] = node.title
for n in MarkDown(node):
o += n
return o
def emphasis(node):
"""
An italicized section
"""
o = nodes.emphasis()
for n in MarkDown(node):
o += n
return o
def strong(node):
"""
A bolded section
"""
o = nodes.strong()
for n in MarkDown(node):
o += n
return o
def literal(node):
"""
Inline code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal(text=node.literal, classes=classes)
for n in MarkDown(node):
o += n
return o
def literal_block(node):
"""
A block of code
"""
rendered = []
try:
if node.info is not None:
l = Lexer(node.literal, node.info, tokennames="long")
for _ in l:
rendered.append(node.inline(classes=_[0], text=_[1]))
except:
pass
classes = ['code']
if node.info is not None:
classes.append(node.info)
if len(rendered) > 0:
o = nodes.literal_block(classes=classes)
for element in rendered:
o += element
else:
o = nodes.literal_block(text=node.literal, classes=classes)
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def raw(node):
"""
Add some raw html (possibly as a block)
"""
o = nodes.raw(node.literal, node.literal, format='html')
if node.sourcepos is not None:
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def transition(node):
"""
An <hr> tag in html. This has no children
"""
return nodes.transition()
def title(node):
"""
A title node. It has no children
"""
return nodes.title(node.first_child.literal, node.first_child.literal)
def section(node):
"""
A section in reStructuredText, which needs a title (the first child)
This is a custom type
"""
title = '' # All sections need an id
if node.first_child is not None:
if node.first_child.t == u'heading':
title = node.first_child.first_child.literal
o = nodes.section(ids=[title], names=[title])
for n in MarkDown(node):
o += n
return o
def block_quote(node):
"""
A block quote
"""
o = nodes.block_quote()
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o
def image(node):
"""
An image element
The first child is the alt text. reStructuredText can't handle titles
"""
o = nodes.image(uri=node.destination)
if node.first_child is not None:
o['alt'] = node.first_child.literal
return o
def listItem(node):
"""
An item in a list
"""
o = nodes.list_item()
for n in MarkDown(node):
o += n
return o
def listNode(node):
"""
A list (numbered or not)
For numbered lists, the suffix is only rendered as . in html
"""
if node.list_data['type'] == u'bullet':
o = nodes.bullet_list(bullet=node.list_data['bullet_char'])
else:
o = nodes.enumerated_list(suffix=node.list_data['delimiter'], enumtype='arabic', start=node.list_data['start'])
for n in MarkDown(node):
o += n
return o
def finalizeSection(section):
"""
Correct the nxt and parent for each child
"""
cur = section.first_child
last = section.last_child
if last is not None:
last.nxt = None
while cur is not None:
cur.parent = section
cur = cur.nxt
def nestSections(block, level=1):
"""
Sections aren't handled by CommonMark at the moment.
This function adds sections to a block of nodes.
'title' nodes with an assigned level below 'level' will be put in a child section.
If there are no child nodes with titles of level 'level' then nothing is done
"""
cur = block.first_child
if cur is not None:
children = []
# Do we need to do anything?
nest = False
while cur is not None:
if cur.t == 'heading' and cur.level == level:
nest = True
break
cur = cur.nxt
if not nest:
return
section = Node('MDsection', 0)
section.parent = block
cur = block.first_child
while cur is not None:
if cur.t == 'heading' and cur.level == level:
# Found a split point, flush the last section if needed
if section.first_child is not None:
finalizeSection(section)
children.append(section)
section = Node('MDsection', 0)
nxt = cur.nxt
# Avoid adding sections without titles at the start
if section.first_child is None:
if cur.t == 'heading' and cur.level == level:
section.append_child(cur)
else:
children.append(cur)
else:
section.append_child(cur)
cur = nxt
# If there's only 1 child then don't bother
if section.first_child is not None:
finalizeSection(section)
children.append(section)
block.first_child = None
block.last_child = None
nextLevel = level + 1
for child in children:
# Handle nesting
if child.t == 'MDsection':
nestSections(child, level=nextLevel)
# Append
if block.first_child is None:
block.first_child = child
else:
block.last_child.nxt = child
child.parent = block
child.nxt = None
child.prev = block.last_child
block.last_child = child
def parseMarkDownBlock(text):
"""
Parses a block of text, returning a list of docutils nodes
>>> parseMarkdownBlock("Some\n====\n\nblock of text\n\nHeader\n======\n\nblah\n")
[]
"""
block = Parser().parse(text)
# CommonMark can't nest sections, so do it manually
nestSections(block)
return MarkDown(block)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.