repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
markovmodel/PyEMMA | pyemma/_ext/variational/estimators/moments.py | _sum | def _sum(X, xmask=None, xconst=None, Y=None, ymask=None, yconst=None, symmetric=False, remove_mean=False,
weights=None):
r""" Computes the column sums and centered column sums.
If symmetric = False, the sums will be determined as
.. math:
sx &=& \frac{1}{2} \sum_t x_t
sy &=& \frac{1}{2} \sum_t y_t
If symmetric, the sums will be determined as
.. math:
sx = sy = \frac{1}{2T} \sum_t x_t + y_t
Returns
-------
w : float
statistical weight of sx, sy
sx : ndarray
effective row sum of X (including symmetrization if requested)
sx_raw_centered : ndarray
centered raw row sum of X
optional returns (only if Y is given):
sy : ndarray
effective row sum of X (including symmetrization if requested)
sy_raw_centered : ndarray
centered raw row sum of Y
"""
T = X.shape[0]
# Check if weights are given:
if weights is not None:
X = weights[:, None] * X
if Y is not None:
Y = weights[:, None] * Y
# compute raw sums on variable data
sx_raw = X.sum(axis=0) # this is the mean before subtracting it.
sy_raw = 0
if Y is not None:
sy_raw = Y.sum(axis=0)
# expand raw sums to full data
if xmask is not None:
if weights is not None:
sx_raw = _sum_sparse(sx_raw, xmask, xconst, weights.sum())
else:
sx_raw = _sum_sparse(sx_raw, xmask, xconst, T)
if ymask is not None:
if weights is not None:
sy_raw = _sum_sparse(sy_raw, ymask, yconst, weights.sum())
else:
sy_raw = _sum_sparse(sy_raw, ymask, yconst, T)
# compute effective sums and centered sums
if Y is not None and symmetric:
sx = sx_raw + sy_raw
sy = sx
if weights is not None:
w = 2*np.sum(weights)
else:
w = 2 * T
else:
sx = sx_raw
sy = sy_raw
if weights is not None:
w = np.sum(weights)
else:
w = T
sx_raw_centered = sx_raw.copy()
if Y is not None:
sy_raw_centered = sy_raw.copy()
# center mean.
if remove_mean:
if Y is not None and symmetric:
sx_raw_centered -= 0.5 * sx
sy_raw_centered -= 0.5 * sy
else:
sx_raw_centered = np.zeros(sx.size)
if Y is not None:
sy_raw_centered = np.zeros(sy.size)
# return
if Y is not None:
return w, sx, sx_raw_centered, sy, sy_raw_centered
else:
return w, sx, sx_raw_centered | python | def _sum(X, xmask=None, xconst=None, Y=None, ymask=None, yconst=None, symmetric=False, remove_mean=False,
weights=None):
r""" Computes the column sums and centered column sums.
If symmetric = False, the sums will be determined as
.. math:
sx &=& \frac{1}{2} \sum_t x_t
sy &=& \frac{1}{2} \sum_t y_t
If symmetric, the sums will be determined as
.. math:
sx = sy = \frac{1}{2T} \sum_t x_t + y_t
Returns
-------
w : float
statistical weight of sx, sy
sx : ndarray
effective row sum of X (including symmetrization if requested)
sx_raw_centered : ndarray
centered raw row sum of X
optional returns (only if Y is given):
sy : ndarray
effective row sum of X (including symmetrization if requested)
sy_raw_centered : ndarray
centered raw row sum of Y
"""
T = X.shape[0]
# Check if weights are given:
if weights is not None:
X = weights[:, None] * X
if Y is not None:
Y = weights[:, None] * Y
# compute raw sums on variable data
sx_raw = X.sum(axis=0) # this is the mean before subtracting it.
sy_raw = 0
if Y is not None:
sy_raw = Y.sum(axis=0)
# expand raw sums to full data
if xmask is not None:
if weights is not None:
sx_raw = _sum_sparse(sx_raw, xmask, xconst, weights.sum())
else:
sx_raw = _sum_sparse(sx_raw, xmask, xconst, T)
if ymask is not None:
if weights is not None:
sy_raw = _sum_sparse(sy_raw, ymask, yconst, weights.sum())
else:
sy_raw = _sum_sparse(sy_raw, ymask, yconst, T)
# compute effective sums and centered sums
if Y is not None and symmetric:
sx = sx_raw + sy_raw
sy = sx
if weights is not None:
w = 2*np.sum(weights)
else:
w = 2 * T
else:
sx = sx_raw
sy = sy_raw
if weights is not None:
w = np.sum(weights)
else:
w = T
sx_raw_centered = sx_raw.copy()
if Y is not None:
sy_raw_centered = sy_raw.copy()
# center mean.
if remove_mean:
if Y is not None and symmetric:
sx_raw_centered -= 0.5 * sx
sy_raw_centered -= 0.5 * sy
else:
sx_raw_centered = np.zeros(sx.size)
if Y is not None:
sy_raw_centered = np.zeros(sy.size)
# return
if Y is not None:
return w, sx, sx_raw_centered, sy, sy_raw_centered
else:
return w, sx, sx_raw_centered | [
"def",
"_sum",
"(",
"X",
",",
"xmask",
"=",
"None",
",",
"xconst",
"=",
"None",
",",
"Y",
"=",
"None",
",",
"ymask",
"=",
"None",
",",
"yconst",
"=",
"None",
",",
"symmetric",
"=",
"False",
",",
"remove_mean",
"=",
"False",
",",
"weights",
"=",
"... | r""" Computes the column sums and centered column sums.
If symmetric = False, the sums will be determined as
.. math:
sx &=& \frac{1}{2} \sum_t x_t
sy &=& \frac{1}{2} \sum_t y_t
If symmetric, the sums will be determined as
.. math:
sx = sy = \frac{1}{2T} \sum_t x_t + y_t
Returns
-------
w : float
statistical weight of sx, sy
sx : ndarray
effective row sum of X (including symmetrization if requested)
sx_raw_centered : ndarray
centered raw row sum of X
optional returns (only if Y is given):
sy : ndarray
effective row sum of X (including symmetrization if requested)
sy_raw_centered : ndarray
centered raw row sum of Y | [
"r",
"Computes",
"the",
"column",
"sums",
"and",
"centered",
"column",
"sums",
"."
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/_ext/variational/estimators/moments.py#L234-L323 | train | 204,200 |
markovmodel/PyEMMA | pyemma/_ext/variational/estimators/moments.py | _center | def _center(X, w, s, mask=None, const=None, inplace=True):
""" Centers the data.
Parameters
----------
w : float
statistical weight of s
inplace : bool
center in place
Returns
-------
sx : ndarray
uncentered row sum of X
sx_centered : ndarray
row sum of X after centering
optional returns (only if Y is given):
sy_raw : ndarray
uncentered row sum of Y
sy_centered : ndarray
row sum of Y after centering
"""
xmean = s / float(w)
if mask is None:
X = np.subtract(X, xmean, out=X if inplace else None)
else:
X = np.subtract(X, xmean[mask], out=X if inplace else None)
const = np.subtract(const, xmean[~mask], const if inplace else None)
return X, const | python | def _center(X, w, s, mask=None, const=None, inplace=True):
""" Centers the data.
Parameters
----------
w : float
statistical weight of s
inplace : bool
center in place
Returns
-------
sx : ndarray
uncentered row sum of X
sx_centered : ndarray
row sum of X after centering
optional returns (only if Y is given):
sy_raw : ndarray
uncentered row sum of Y
sy_centered : ndarray
row sum of Y after centering
"""
xmean = s / float(w)
if mask is None:
X = np.subtract(X, xmean, out=X if inplace else None)
else:
X = np.subtract(X, xmean[mask], out=X if inplace else None)
const = np.subtract(const, xmean[~mask], const if inplace else None)
return X, const | [
"def",
"_center",
"(",
"X",
",",
"w",
",",
"s",
",",
"mask",
"=",
"None",
",",
"const",
"=",
"None",
",",
"inplace",
"=",
"True",
")",
":",
"xmean",
"=",
"s",
"/",
"float",
"(",
"w",
")",
"if",
"mask",
"is",
"None",
":",
"X",
"=",
"np",
"."... | Centers the data.
Parameters
----------
w : float
statistical weight of s
inplace : bool
center in place
Returns
-------
sx : ndarray
uncentered row sum of X
sx_centered : ndarray
row sum of X after centering
optional returns (only if Y is given):
sy_raw : ndarray
uncentered row sum of Y
sy_centered : ndarray
row sum of Y after centering | [
"Centers",
"the",
"data",
"."
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/_ext/variational/estimators/moments.py#L326-L358 | train | 204,201 |
markovmodel/PyEMMA | pyemma/_ext/variational/estimators/moments.py | _filter_variable_indices | def _filter_variable_indices(mask, column_selection):
""" Returns column indices restricted to the variable columns as determined by the given mask.
Parameters
----------
mask : ndarray(N, dtype=bool)
Array indicating the variable columns.
column_selection : ndarray(k, dtype=int)
Column indices to be filtered and mapped.
Returns
-------
ix : ndarray(l, dtype=int)
Column indices restricted to the variable columns, mapped to the correct index range.
"""
a = np.where(mask)[0]
b = column_selection[np.in1d(column_selection, a)]
return np.searchsorted(a, b) | python | def _filter_variable_indices(mask, column_selection):
""" Returns column indices restricted to the variable columns as determined by the given mask.
Parameters
----------
mask : ndarray(N, dtype=bool)
Array indicating the variable columns.
column_selection : ndarray(k, dtype=int)
Column indices to be filtered and mapped.
Returns
-------
ix : ndarray(l, dtype=int)
Column indices restricted to the variable columns, mapped to the correct index range.
"""
a = np.where(mask)[0]
b = column_selection[np.in1d(column_selection, a)]
return np.searchsorted(a, b) | [
"def",
"_filter_variable_indices",
"(",
"mask",
",",
"column_selection",
")",
":",
"a",
"=",
"np",
".",
"where",
"(",
"mask",
")",
"[",
"0",
"]",
"b",
"=",
"column_selection",
"[",
"np",
".",
"in1d",
"(",
"column_selection",
",",
"a",
")",
"]",
"return... | Returns column indices restricted to the variable columns as determined by the given mask.
Parameters
----------
mask : ndarray(N, dtype=bool)
Array indicating the variable columns.
column_selection : ndarray(k, dtype=int)
Column indices to be filtered and mapped.
Returns
-------
ix : ndarray(l, dtype=int)
Column indices restricted to the variable columns, mapped to the correct index range. | [
"Returns",
"column",
"indices",
"restricted",
"to",
"the",
"variable",
"columns",
"as",
"determined",
"by",
"the",
"given",
"mask",
"."
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/_ext/variational/estimators/moments.py#L361-L379 | train | 204,202 |
markovmodel/PyEMMA | pyemma/_ext/variational/estimators/moments.py | _M2_dense | def _M2_dense(X, Y, weights=None, diag_only=False):
""" 2nd moment matrix using dense matrix computations.
This function is encapsulated such that we can make easy modifications of the basic algorithms
"""
if weights is not None:
if diag_only:
return np.sum(weights[:, None] * X * Y, axis=0)
else:
return np.dot((weights[:, None] * X).T, Y)
else:
if diag_only:
return np.sum(X * Y, axis=0)
else:
return np.dot(X.T, Y) | python | def _M2_dense(X, Y, weights=None, diag_only=False):
""" 2nd moment matrix using dense matrix computations.
This function is encapsulated such that we can make easy modifications of the basic algorithms
"""
if weights is not None:
if diag_only:
return np.sum(weights[:, None] * X * Y, axis=0)
else:
return np.dot((weights[:, None] * X).T, Y)
else:
if diag_only:
return np.sum(X * Y, axis=0)
else:
return np.dot(X.T, Y) | [
"def",
"_M2_dense",
"(",
"X",
",",
"Y",
",",
"weights",
"=",
"None",
",",
"diag_only",
"=",
"False",
")",
":",
"if",
"weights",
"is",
"not",
"None",
":",
"if",
"diag_only",
":",
"return",
"np",
".",
"sum",
"(",
"weights",
"[",
":",
",",
"None",
"... | 2nd moment matrix using dense matrix computations.
This function is encapsulated such that we can make easy modifications of the basic algorithms | [
"2nd",
"moment",
"matrix",
"using",
"dense",
"matrix",
"computations",
"."
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/_ext/variational/estimators/moments.py#L386-L401 | train | 204,203 |
markovmodel/PyEMMA | pyemma/_ext/variational/estimators/moments.py | _M2_const | def _M2_const(Xvar, mask_X, xvarsum, xconst, Yvar, mask_Y, yvarsum, yconst, weights=None):
r""" Computes the unnormalized covariance matrix between X and Y, exploiting constant input columns
Computes the unnormalized covariance matrix :math:`C = X^\top Y`
(for symmetric=False) or :math:`C = \frac{1}{2} (X^\top Y + Y^\top X)`
(for symmetric=True). Suppose the data matrices can be column-permuted
to have the form
.. math:
X &=& (X_{\mathrm{var}}, X_{\mathrm{const}})
Y &=& (Y_{\mathrm{var}}, Y_{\mathrm{const}})
with rows:
.. math:
x_t &=& (x_{\mathrm{var},t}, x_{\mathrm{const}})
y_t &=& (y_{\mathrm{var},t}, y_{\mathrm{const}})
where :math:`x_{\mathrm{const}},\:y_{\mathrm{const}}` are constant vectors.
The resulting matrix has the general form:
.. math:
C &=& [X_{\mathrm{var}}^\top Y_{\mathrm{var}} x_{sum} y_{\mathrm{const}}^\top ]
& & [x_{\mathrm{const}}^\top y_{sum}^\top x_{sum} x_{sum}^\top ]
where :math:`x_{sum} = \sum_t x_{\mathrm{var},t}` and
:math:`y_{sum} = \sum_t y_{\mathrm{var},t}`.
Parameters
----------
Xvar : ndarray (T, m)
Part of the data matrix X with :math:`m \le M` variable columns.
mask_X : ndarray (M)
Boolean array of size M of the full columns. False for constant column,
True for variable column in X.
xvarsum : ndarray (m)
Column sum of variable part of data matrix X
xconst : ndarray (M-m)
Values of the constant part of data matrix X
Yvar : ndarray (T, n)
Part of the data matrix Y with :math:`n \le N` variable columns.
mask_Y : ndarray (N)
Boolean array of size N of the full columns. False for constant column,
True for variable column in Y.
yvarsum : ndarray (n)
Column sum of variable part of data matrix Y
yconst : ndarray (N-n)
Values of the constant part of data matrix Y
weights : None or ndarray (N)
weights for all time steps.
Returns
-------
C : ndarray (M, N)
Unnormalized covariance matrix.
"""
C = np.zeros((len(mask_X), len(mask_Y)))
# Block 11
C[np.ix_(mask_X, mask_Y)] = _M2_dense(Xvar, Yvar, weights=weights)
# other blocks
xsum_is_0 = _is_zero(xvarsum)
ysum_is_0 = _is_zero(yvarsum)
xconst_is_0 = _is_zero(xconst)
yconst_is_0 = _is_zero(yconst)
# TODO: maybe we don't need the checking here, if we do the decision in the higher-level function M2
# TODO: if not zero, we could still exploit the zeros in const and compute (and write!) this outer product
# TODO: only to a sub-matrix
# Block 12 and 21
if weights is not None:
wsum = np.sum(weights)
xvarsum = np.sum(weights[:, None] * Xvar, axis=0)
yvarsum = np.sum(weights[:, None] * Yvar, axis=0)
else:
wsum = Xvar.shape[0]
if not (xsum_is_0 or yconst_is_0) or not (ysum_is_0 or xconst_is_0):
C[np.ix_(mask_X, ~mask_Y)] = np.outer(xvarsum, yconst)
C[np.ix_(~mask_X, mask_Y)] = np.outer(xconst, yvarsum)
# Block 22
if not (xconst_is_0 or yconst_is_0):
C[np.ix_(~mask_X, ~mask_Y)] = np.outer(wsum*xconst, yconst)
return C | python | def _M2_const(Xvar, mask_X, xvarsum, xconst, Yvar, mask_Y, yvarsum, yconst, weights=None):
r""" Computes the unnormalized covariance matrix between X and Y, exploiting constant input columns
Computes the unnormalized covariance matrix :math:`C = X^\top Y`
(for symmetric=False) or :math:`C = \frac{1}{2} (X^\top Y + Y^\top X)`
(for symmetric=True). Suppose the data matrices can be column-permuted
to have the form
.. math:
X &=& (X_{\mathrm{var}}, X_{\mathrm{const}})
Y &=& (Y_{\mathrm{var}}, Y_{\mathrm{const}})
with rows:
.. math:
x_t &=& (x_{\mathrm{var},t}, x_{\mathrm{const}})
y_t &=& (y_{\mathrm{var},t}, y_{\mathrm{const}})
where :math:`x_{\mathrm{const}},\:y_{\mathrm{const}}` are constant vectors.
The resulting matrix has the general form:
.. math:
C &=& [X_{\mathrm{var}}^\top Y_{\mathrm{var}} x_{sum} y_{\mathrm{const}}^\top ]
& & [x_{\mathrm{const}}^\top y_{sum}^\top x_{sum} x_{sum}^\top ]
where :math:`x_{sum} = \sum_t x_{\mathrm{var},t}` and
:math:`y_{sum} = \sum_t y_{\mathrm{var},t}`.
Parameters
----------
Xvar : ndarray (T, m)
Part of the data matrix X with :math:`m \le M` variable columns.
mask_X : ndarray (M)
Boolean array of size M of the full columns. False for constant column,
True for variable column in X.
xvarsum : ndarray (m)
Column sum of variable part of data matrix X
xconst : ndarray (M-m)
Values of the constant part of data matrix X
Yvar : ndarray (T, n)
Part of the data matrix Y with :math:`n \le N` variable columns.
mask_Y : ndarray (N)
Boolean array of size N of the full columns. False for constant column,
True for variable column in Y.
yvarsum : ndarray (n)
Column sum of variable part of data matrix Y
yconst : ndarray (N-n)
Values of the constant part of data matrix Y
weights : None or ndarray (N)
weights for all time steps.
Returns
-------
C : ndarray (M, N)
Unnormalized covariance matrix.
"""
C = np.zeros((len(mask_X), len(mask_Y)))
# Block 11
C[np.ix_(mask_X, mask_Y)] = _M2_dense(Xvar, Yvar, weights=weights)
# other blocks
xsum_is_0 = _is_zero(xvarsum)
ysum_is_0 = _is_zero(yvarsum)
xconst_is_0 = _is_zero(xconst)
yconst_is_0 = _is_zero(yconst)
# TODO: maybe we don't need the checking here, if we do the decision in the higher-level function M2
# TODO: if not zero, we could still exploit the zeros in const and compute (and write!) this outer product
# TODO: only to a sub-matrix
# Block 12 and 21
if weights is not None:
wsum = np.sum(weights)
xvarsum = np.sum(weights[:, None] * Xvar, axis=0)
yvarsum = np.sum(weights[:, None] * Yvar, axis=0)
else:
wsum = Xvar.shape[0]
if not (xsum_is_0 or yconst_is_0) or not (ysum_is_0 or xconst_is_0):
C[np.ix_(mask_X, ~mask_Y)] = np.outer(xvarsum, yconst)
C[np.ix_(~mask_X, mask_Y)] = np.outer(xconst, yvarsum)
# Block 22
if not (xconst_is_0 or yconst_is_0):
C[np.ix_(~mask_X, ~mask_Y)] = np.outer(wsum*xconst, yconst)
return C | [
"def",
"_M2_const",
"(",
"Xvar",
",",
"mask_X",
",",
"xvarsum",
",",
"xconst",
",",
"Yvar",
",",
"mask_Y",
",",
"yvarsum",
",",
"yconst",
",",
"weights",
"=",
"None",
")",
":",
"C",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"mask_X",
")",
",... | r""" Computes the unnormalized covariance matrix between X and Y, exploiting constant input columns
Computes the unnormalized covariance matrix :math:`C = X^\top Y`
(for symmetric=False) or :math:`C = \frac{1}{2} (X^\top Y + Y^\top X)`
(for symmetric=True). Suppose the data matrices can be column-permuted
to have the form
.. math:
X &=& (X_{\mathrm{var}}, X_{\mathrm{const}})
Y &=& (Y_{\mathrm{var}}, Y_{\mathrm{const}})
with rows:
.. math:
x_t &=& (x_{\mathrm{var},t}, x_{\mathrm{const}})
y_t &=& (y_{\mathrm{var},t}, y_{\mathrm{const}})
where :math:`x_{\mathrm{const}},\:y_{\mathrm{const}}` are constant vectors.
The resulting matrix has the general form:
.. math:
C &=& [X_{\mathrm{var}}^\top Y_{\mathrm{var}} x_{sum} y_{\mathrm{const}}^\top ]
& & [x_{\mathrm{const}}^\top y_{sum}^\top x_{sum} x_{sum}^\top ]
where :math:`x_{sum} = \sum_t x_{\mathrm{var},t}` and
:math:`y_{sum} = \sum_t y_{\mathrm{var},t}`.
Parameters
----------
Xvar : ndarray (T, m)
Part of the data matrix X with :math:`m \le M` variable columns.
mask_X : ndarray (M)
Boolean array of size M of the full columns. False for constant column,
True for variable column in X.
xvarsum : ndarray (m)
Column sum of variable part of data matrix X
xconst : ndarray (M-m)
Values of the constant part of data matrix X
Yvar : ndarray (T, n)
Part of the data matrix Y with :math:`n \le N` variable columns.
mask_Y : ndarray (N)
Boolean array of size N of the full columns. False for constant column,
True for variable column in Y.
yvarsum : ndarray (n)
Column sum of variable part of data matrix Y
yconst : ndarray (N-n)
Values of the constant part of data matrix Y
weights : None or ndarray (N)
weights for all time steps.
Returns
-------
C : ndarray (M, N)
Unnormalized covariance matrix. | [
"r",
"Computes",
"the",
"unnormalized",
"covariance",
"matrix",
"between",
"X",
"and",
"Y",
"exploiting",
"constant",
"input",
"columns"
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/_ext/variational/estimators/moments.py#L404-L485 | train | 204,204 |
markovmodel/PyEMMA | pyemma/_ext/variational/estimators/moments.py | _M2_sparse | def _M2_sparse(Xvar, mask_X, Yvar, mask_Y, weights=None):
""" 2nd moment matrix exploiting zero input columns """
C = np.zeros((len(mask_X), len(mask_Y)))
C[np.ix_(mask_X, mask_Y)] = _M2_dense(Xvar, Yvar, weights=weights)
return C | python | def _M2_sparse(Xvar, mask_X, Yvar, mask_Y, weights=None):
""" 2nd moment matrix exploiting zero input columns """
C = np.zeros((len(mask_X), len(mask_Y)))
C[np.ix_(mask_X, mask_Y)] = _M2_dense(Xvar, Yvar, weights=weights)
return C | [
"def",
"_M2_sparse",
"(",
"Xvar",
",",
"mask_X",
",",
"Yvar",
",",
"mask_Y",
",",
"weights",
"=",
"None",
")",
":",
"C",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"mask_X",
")",
",",
"len",
"(",
"mask_Y",
")",
")",
")",
"C",
"[",
"np",
"... | 2nd moment matrix exploiting zero input columns | [
"2nd",
"moment",
"matrix",
"exploiting",
"zero",
"input",
"columns"
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/_ext/variational/estimators/moments.py#L488-L492 | train | 204,205 |
markovmodel/PyEMMA | pyemma/_ext/variational/estimators/moments.py | _M2_sparse_sym | def _M2_sparse_sym(Xvar, mask_X, Yvar, mask_Y, weights=None, column_selection=None):
""" 2nd self-symmetric moment matrix exploiting zero input columns
Computes X'X + Y'Y and X'Y + Y'X
"""
assert len(mask_X) == len(mask_Y), 'X and Y need to have equal sizes for symmetrization'
if column_selection is None:
mask_Xk = mask_X
mask_Yk = mask_Y
Xvark = Xvar
Yvark = Yvar
else:
mask_Xk = mask_X[column_selection]
mask_Yk = mask_Y[column_selection]
Xvark = Xvar[:, _filter_variable_indices(mask_X, column_selection)]
Yvark = Yvar[:, _filter_variable_indices(mask_Y, column_selection)]
Cxxyy = np.zeros((len(mask_X), len(mask_Yk)))
Cxxyy[np.ix_(mask_X, mask_Xk)] = _M2_dense(Xvar, Xvark, weights=weights)
Cxxyy[np.ix_(mask_Y, mask_Yk)] += _M2_dense(Yvar, Yvark, weights=weights)
Cxyyx = np.zeros((len(mask_X), len(mask_Yk)))
Cxy = _M2_dense(Xvar, Yvark, weights=weights)
Cyx = _M2_dense(Yvar, Xvark, weights=weights)
Cxyyx[np.ix_(mask_X, mask_Yk)] = Cxy
Cxyyx[np.ix_(mask_Y, mask_Xk)] += Cyx
return Cxxyy, Cxyyx | python | def _M2_sparse_sym(Xvar, mask_X, Yvar, mask_Y, weights=None, column_selection=None):
""" 2nd self-symmetric moment matrix exploiting zero input columns
Computes X'X + Y'Y and X'Y + Y'X
"""
assert len(mask_X) == len(mask_Y), 'X and Y need to have equal sizes for symmetrization'
if column_selection is None:
mask_Xk = mask_X
mask_Yk = mask_Y
Xvark = Xvar
Yvark = Yvar
else:
mask_Xk = mask_X[column_selection]
mask_Yk = mask_Y[column_selection]
Xvark = Xvar[:, _filter_variable_indices(mask_X, column_selection)]
Yvark = Yvar[:, _filter_variable_indices(mask_Y, column_selection)]
Cxxyy = np.zeros((len(mask_X), len(mask_Yk)))
Cxxyy[np.ix_(mask_X, mask_Xk)] = _M2_dense(Xvar, Xvark, weights=weights)
Cxxyy[np.ix_(mask_Y, mask_Yk)] += _M2_dense(Yvar, Yvark, weights=weights)
Cxyyx = np.zeros((len(mask_X), len(mask_Yk)))
Cxy = _M2_dense(Xvar, Yvark, weights=weights)
Cyx = _M2_dense(Yvar, Xvark, weights=weights)
Cxyyx[np.ix_(mask_X, mask_Yk)] = Cxy
Cxyyx[np.ix_(mask_Y, mask_Xk)] += Cyx
return Cxxyy, Cxyyx | [
"def",
"_M2_sparse_sym",
"(",
"Xvar",
",",
"mask_X",
",",
"Yvar",
",",
"mask_Y",
",",
"weights",
"=",
"None",
",",
"column_selection",
"=",
"None",
")",
":",
"assert",
"len",
"(",
"mask_X",
")",
"==",
"len",
"(",
"mask_Y",
")",
",",
"'X and Y need to hav... | 2nd self-symmetric moment matrix exploiting zero input columns
Computes X'X + Y'Y and X'Y + Y'X | [
"2nd",
"self",
"-",
"symmetric",
"moment",
"matrix",
"exploiting",
"zero",
"input",
"columns"
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/_ext/variational/estimators/moments.py#L495-L524 | train | 204,206 |
markovmodel/PyEMMA | pyemma/_ext/variational/estimators/moments.py | _M2_symmetric | def _M2_symmetric(Xvar, Yvar, mask_X=None, mask_Y=None, xsum=0, xconst=0, ysum=0, yconst=0, weights=None,
column_selection=None, diag_only=False):
""" symmetric second moment matrices. Decide if we need dense, sparse, const"""
if mask_X is None and mask_Y is None:
if column_selection is None:
Xvark = Xvar
Yvark = Yvar
else:
Xvark = Xvar[:, column_selection]
Yvark = Yvar[:, column_selection]
Cxxyy = _M2_dense(Xvar, Xvark, weights=weights, diag_only=diag_only) \
+ _M2_dense(Yvar, Yvark, weights=weights, diag_only=diag_only)
Cxy = _M2_dense(Xvar, Yvark, weights=weights, diag_only=diag_only)
Cyx = _M2_dense(Yvar, Xvark, weights=weights, diag_only=diag_only)
Cxyyx = Cxy + Cyx
else:
# Check if one of the masks is not None, modify it and also adjust the constant columns:
if mask_X is None:
mask_X = np.ones(Xvar.shape[1], dtype=np.bool)
xconst = np.ones(0, dtype=float)
if mask_Y is None:
mask_Y = np.ones(Yvar.shape[1], dtype=np.bool)
yconst = np.ones(0, dtype=float)
if _is_zero(xsum) and _is_zero(ysum) or _is_zero(xconst) and _is_zero(yconst):
Cxxyy, Cxyyx = _M2_sparse_sym(Xvar, mask_X, Yvar, mask_Y, weights=weights, column_selection=column_selection)
else:
xvarsum = xsum[mask_X] # to variable part
yvarsum = ysum[mask_Y] # to variable part
if column_selection is None:
Xvark = Xvar
mask_Xk = mask_X
xkvarsum = xvarsum
xkconst = xconst
Yvark = Yvar
mask_Yk = mask_Y
ykvarsum = yvarsum
ykconst = yconst
else:
Xvark = Xvar[:, _filter_variable_indices(mask_X, column_selection)]
mask_Xk = mask_X[column_selection]
xksum = xsum[column_selection]
xkvarsum = xksum[mask_Xk]
xkconst = xconst[_filter_variable_indices(~mask_X, column_selection)]
Yvark = Yvar[:, _filter_variable_indices(mask_Y, column_selection)]
mask_Yk = mask_Y[column_selection]
yksum = ysum[column_selection]
ykvarsum = yksum[mask_Yk]
ykconst = yconst[_filter_variable_indices(~mask_Y, column_selection)]
Cxxyy = _M2_const(Xvar, mask_X, xvarsum, xconst, Xvark, mask_Xk, xkvarsum, xkconst, weights=weights) \
+ _M2_const(Yvar, mask_Y, yvarsum, yconst, Yvark, mask_Yk, ykvarsum, ykconst, weights=weights)
Cxy = _M2_const(Xvar, mask_X, xvarsum, xconst, Yvark, mask_Yk, ykvarsum, ykconst, weights=weights)
Cyx = _M2_const(Yvar, mask_Y, yvarsum, yconst, Xvark, mask_Xk, xkvarsum, xkconst, weights=weights)
Cxyyx = Cxy + Cyx
return Cxxyy, Cxyyx | python | def _M2_symmetric(Xvar, Yvar, mask_X=None, mask_Y=None, xsum=0, xconst=0, ysum=0, yconst=0, weights=None,
column_selection=None, diag_only=False):
""" symmetric second moment matrices. Decide if we need dense, sparse, const"""
if mask_X is None and mask_Y is None:
if column_selection is None:
Xvark = Xvar
Yvark = Yvar
else:
Xvark = Xvar[:, column_selection]
Yvark = Yvar[:, column_selection]
Cxxyy = _M2_dense(Xvar, Xvark, weights=weights, diag_only=diag_only) \
+ _M2_dense(Yvar, Yvark, weights=weights, diag_only=diag_only)
Cxy = _M2_dense(Xvar, Yvark, weights=weights, diag_only=diag_only)
Cyx = _M2_dense(Yvar, Xvark, weights=weights, diag_only=diag_only)
Cxyyx = Cxy + Cyx
else:
# Check if one of the masks is not None, modify it and also adjust the constant columns:
if mask_X is None:
mask_X = np.ones(Xvar.shape[1], dtype=np.bool)
xconst = np.ones(0, dtype=float)
if mask_Y is None:
mask_Y = np.ones(Yvar.shape[1], dtype=np.bool)
yconst = np.ones(0, dtype=float)
if _is_zero(xsum) and _is_zero(ysum) or _is_zero(xconst) and _is_zero(yconst):
Cxxyy, Cxyyx = _M2_sparse_sym(Xvar, mask_X, Yvar, mask_Y, weights=weights, column_selection=column_selection)
else:
xvarsum = xsum[mask_X] # to variable part
yvarsum = ysum[mask_Y] # to variable part
if column_selection is None:
Xvark = Xvar
mask_Xk = mask_X
xkvarsum = xvarsum
xkconst = xconst
Yvark = Yvar
mask_Yk = mask_Y
ykvarsum = yvarsum
ykconst = yconst
else:
Xvark = Xvar[:, _filter_variable_indices(mask_X, column_selection)]
mask_Xk = mask_X[column_selection]
xksum = xsum[column_selection]
xkvarsum = xksum[mask_Xk]
xkconst = xconst[_filter_variable_indices(~mask_X, column_selection)]
Yvark = Yvar[:, _filter_variable_indices(mask_Y, column_selection)]
mask_Yk = mask_Y[column_selection]
yksum = ysum[column_selection]
ykvarsum = yksum[mask_Yk]
ykconst = yconst[_filter_variable_indices(~mask_Y, column_selection)]
Cxxyy = _M2_const(Xvar, mask_X, xvarsum, xconst, Xvark, mask_Xk, xkvarsum, xkconst, weights=weights) \
+ _M2_const(Yvar, mask_Y, yvarsum, yconst, Yvark, mask_Yk, ykvarsum, ykconst, weights=weights)
Cxy = _M2_const(Xvar, mask_X, xvarsum, xconst, Yvark, mask_Yk, ykvarsum, ykconst, weights=weights)
Cyx = _M2_const(Yvar, mask_Y, yvarsum, yconst, Xvark, mask_Xk, xkvarsum, xkconst, weights=weights)
Cxyyx = Cxy + Cyx
return Cxxyy, Cxyyx | [
"def",
"_M2_symmetric",
"(",
"Xvar",
",",
"Yvar",
",",
"mask_X",
"=",
"None",
",",
"mask_Y",
"=",
"None",
",",
"xsum",
"=",
"0",
",",
"xconst",
"=",
"0",
",",
"ysum",
"=",
"0",
",",
"yconst",
"=",
"0",
",",
"weights",
"=",
"None",
",",
"column_se... | symmetric second moment matrices. Decide if we need dense, sparse, const | [
"symmetric",
"second",
"moment",
"matrices",
".",
"Decide",
"if",
"we",
"need",
"dense",
"sparse",
"const"
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/_ext/variational/estimators/moments.py#L545-L598 | train | 204,207 |
markovmodel/PyEMMA | pyemma/_ext/variational/estimators/moments.py | moments_XX | def moments_XX(X, remove_mean=False, modify_data=False, weights=None, sparse_mode='auto', sparse_tol=0.0,
column_selection=None, diag_only=False):
r""" Computes the first two unnormalized moments of X
Computes :math:`s = \sum_t x_t` and :math:`C = X^\top X` while exploiting
zero or constant columns in the data matrix.
Parameters
----------
X : ndarray (T, M)
Data matrix
remove_mean : bool
True: remove column mean from the data, False: don't remove mean.
modify_data : bool
If remove_mean=True, the mean will be removed in the data matrix X,
without creating an independent copy. This option is faster but might
lead to surprises because your input array is changed.
weights: None or ndarray(T, )
weights assigned to each trajectory point. If None, all data points have weight one.
If ndarray, each data point is assigned a separate weight.
sparse_mode : str
one of:
* 'dense' : always use dense mode
* 'sparse' : always use sparse mode if possible
* 'auto' : automatic
sparse_tol: float
Threshold for considering column to be zero in order to save computing
effort when the data is sparse or almost sparse.
If max(abs(X[:, i])) < sparse_tol, then row i (and also column i if Y
is not given) of the covariance matrix will be set to zero. If Y is
given and max(abs(Y[:, i])) < sparse_tol, then column i of the
covariance matrix will be set to zero.
column_selection: ndarray(k, dtype=int) or None
Indices of those columns that are to be computed. If None, all columns are computed.
diag_only: bool
If True, the computation is restricted to the diagonal entries (autocorrelations) only.
Returns
-------
w : float
statistical weight
s : ndarray (M)
sum
C : ndarray (M, M)
unnormalized covariance matrix
"""
# Check consistency of inputs:
if weights is not None:
assert X.shape[0] == weights.shape[0], 'X and weights_x must have equal length'
# diag_only is only implemented for dense mode
if diag_only and sparse_mode is not 'dense':
if sparse_mode is 'sparse':
import warnings
warnings.warn('Computing diagonal entries only is not implemented for sparse mode. Switching to dense mode.')
sparse_mode = 'dense'
# sparsify
X0, mask_X, xconst = _sparsify(X, remove_mean=remove_mean, modify_data=modify_data,
sparse_mode=sparse_mode, sparse_tol=sparse_tol)
is_sparse = mask_X is not None
# copy / convert
# TODO: do we need to copy xconst?
X0, xconst = _copy_convert(X0, const=xconst, remove_mean=remove_mean,
copy=is_sparse or (remove_mean and not modify_data))
# sum / center
w, sx, sx0_centered = _sum(X0, xmask=mask_X, xconst=xconst, symmetric=False, remove_mean=remove_mean,
weights=weights)
if remove_mean:
_center(X0, w, sx, mask=mask_X, const=xconst, inplace=True) # fast in-place centering
# TODO: we could make a second const check here. If after summation not enough zeros have appeared in the
# TODO: consts, we switch back to dense treatment here.
# compute covariance matrix
if column_selection is not None:
if is_sparse:
Xk = X[:, column_selection]
mask_Xk = mask_X[column_selection]
X0k = Xk[:, mask_Xk]
xksum = sx0_centered[column_selection]
xkconst = Xk[0, ~mask_Xk]
X0k, xkconst = _copy_convert(X0k, const=xkconst, remove_mean=remove_mean,
copy=True)
C = _M2(X0, X0k, mask_X=mask_X, mask_Y=mask_Xk, xsum=sx0_centered, xconst=xconst, ysum=xksum, yconst=xkconst,
weights=weights)
else:
X0k = X0[:, column_selection]
C = _M2(X0, X0k, mask_X=mask_X, mask_Y=mask_X, xsum=sx0_centered, xconst=xconst,
ysum=sx0_centered[column_selection], yconst=xconst, weights=weights)
else:
C = _M2(X0, X0, mask_X=mask_X, mask_Y=mask_X, xsum=sx0_centered, xconst=xconst, ysum=sx0_centered, yconst=xconst,
weights=weights, diag_only=diag_only)
return w, sx, C | python | def moments_XX(X, remove_mean=False, modify_data=False, weights=None, sparse_mode='auto', sparse_tol=0.0,
column_selection=None, diag_only=False):
r""" Computes the first two unnormalized moments of X
Computes :math:`s = \sum_t x_t` and :math:`C = X^\top X` while exploiting
zero or constant columns in the data matrix.
Parameters
----------
X : ndarray (T, M)
Data matrix
remove_mean : bool
True: remove column mean from the data, False: don't remove mean.
modify_data : bool
If remove_mean=True, the mean will be removed in the data matrix X,
without creating an independent copy. This option is faster but might
lead to surprises because your input array is changed.
weights: None or ndarray(T, )
weights assigned to each trajectory point. If None, all data points have weight one.
If ndarray, each data point is assigned a separate weight.
sparse_mode : str
one of:
* 'dense' : always use dense mode
* 'sparse' : always use sparse mode if possible
* 'auto' : automatic
sparse_tol: float
Threshold for considering column to be zero in order to save computing
effort when the data is sparse or almost sparse.
If max(abs(X[:, i])) < sparse_tol, then row i (and also column i if Y
is not given) of the covariance matrix will be set to zero. If Y is
given and max(abs(Y[:, i])) < sparse_tol, then column i of the
covariance matrix will be set to zero.
column_selection: ndarray(k, dtype=int) or None
Indices of those columns that are to be computed. If None, all columns are computed.
diag_only: bool
If True, the computation is restricted to the diagonal entries (autocorrelations) only.
Returns
-------
w : float
statistical weight
s : ndarray (M)
sum
C : ndarray (M, M)
unnormalized covariance matrix
"""
# Check consistency of inputs:
if weights is not None:
assert X.shape[0] == weights.shape[0], 'X and weights_x must have equal length'
# diag_only is only implemented for dense mode
if diag_only and sparse_mode is not 'dense':
if sparse_mode is 'sparse':
import warnings
warnings.warn('Computing diagonal entries only is not implemented for sparse mode. Switching to dense mode.')
sparse_mode = 'dense'
# sparsify
X0, mask_X, xconst = _sparsify(X, remove_mean=remove_mean, modify_data=modify_data,
sparse_mode=sparse_mode, sparse_tol=sparse_tol)
is_sparse = mask_X is not None
# copy / convert
# TODO: do we need to copy xconst?
X0, xconst = _copy_convert(X0, const=xconst, remove_mean=remove_mean,
copy=is_sparse or (remove_mean and not modify_data))
# sum / center
w, sx, sx0_centered = _sum(X0, xmask=mask_X, xconst=xconst, symmetric=False, remove_mean=remove_mean,
weights=weights)
if remove_mean:
_center(X0, w, sx, mask=mask_X, const=xconst, inplace=True) # fast in-place centering
# TODO: we could make a second const check here. If after summation not enough zeros have appeared in the
# TODO: consts, we switch back to dense treatment here.
# compute covariance matrix
if column_selection is not None:
if is_sparse:
Xk = X[:, column_selection]
mask_Xk = mask_X[column_selection]
X0k = Xk[:, mask_Xk]
xksum = sx0_centered[column_selection]
xkconst = Xk[0, ~mask_Xk]
X0k, xkconst = _copy_convert(X0k, const=xkconst, remove_mean=remove_mean,
copy=True)
C = _M2(X0, X0k, mask_X=mask_X, mask_Y=mask_Xk, xsum=sx0_centered, xconst=xconst, ysum=xksum, yconst=xkconst,
weights=weights)
else:
X0k = X0[:, column_selection]
C = _M2(X0, X0k, mask_X=mask_X, mask_Y=mask_X, xsum=sx0_centered, xconst=xconst,
ysum=sx0_centered[column_selection], yconst=xconst, weights=weights)
else:
C = _M2(X0, X0, mask_X=mask_X, mask_Y=mask_X, xsum=sx0_centered, xconst=xconst, ysum=sx0_centered, yconst=xconst,
weights=weights, diag_only=diag_only)
return w, sx, C | [
"def",
"moments_XX",
"(",
"X",
",",
"remove_mean",
"=",
"False",
",",
"modify_data",
"=",
"False",
",",
"weights",
"=",
"None",
",",
"sparse_mode",
"=",
"'auto'",
",",
"sparse_tol",
"=",
"0.0",
",",
"column_selection",
"=",
"None",
",",
"diag_only",
"=",
... | r""" Computes the first two unnormalized moments of X
Computes :math:`s = \sum_t x_t` and :math:`C = X^\top X` while exploiting
zero or constant columns in the data matrix.
Parameters
----------
X : ndarray (T, M)
Data matrix
remove_mean : bool
True: remove column mean from the data, False: don't remove mean.
modify_data : bool
If remove_mean=True, the mean will be removed in the data matrix X,
without creating an independent copy. This option is faster but might
lead to surprises because your input array is changed.
weights: None or ndarray(T, )
weights assigned to each trajectory point. If None, all data points have weight one.
If ndarray, each data point is assigned a separate weight.
sparse_mode : str
one of:
* 'dense' : always use dense mode
* 'sparse' : always use sparse mode if possible
* 'auto' : automatic
sparse_tol: float
Threshold for considering column to be zero in order to save computing
effort when the data is sparse or almost sparse.
If max(abs(X[:, i])) < sparse_tol, then row i (and also column i if Y
is not given) of the covariance matrix will be set to zero. If Y is
given and max(abs(Y[:, i])) < sparse_tol, then column i of the
covariance matrix will be set to zero.
column_selection: ndarray(k, dtype=int) or None
Indices of those columns that are to be computed. If None, all columns are computed.
diag_only: bool
If True, the computation is restricted to the diagonal entries (autocorrelations) only.
Returns
-------
w : float
statistical weight
s : ndarray (M)
sum
C : ndarray (M, M)
unnormalized covariance matrix | [
"r",
"Computes",
"the",
"first",
"two",
"unnormalized",
"moments",
"of",
"X"
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/_ext/variational/estimators/moments.py#L606-L696 | train | 204,208 |
markovmodel/PyEMMA | pyemma/_ext/variational/estimators/moments.py | covar | def covar(X, remove_mean=False, modify_data=False, weights=None, sparse_mode='auto', sparse_tol=0.0):
""" Computes the covariance matrix of X
Computes
.. math:
C_XX &=& X^\top X
while exploiting zero or constant columns in the data matrix.
WARNING: Directly use moments_XX if you can. This function does an additional
constant-matrix multiplication and does not return the mean.
Parameters
----------
X : ndarray (T, M)
Data matrix
remove_mean : bool
True: remove column mean from the data, False: don't remove mean.
modify_data : bool
If remove_mean=True, the mean will be removed in the data matrix X,
without creating an independent copy. This option is faster but might
lead to surprises because your input array is changed.
weights : None or ndarray(T, )
weights assigned to each trajectory point of X. If None, all data points have weight one.
If ndarray, each data point is assigned a separate weight.
sparse_mode : str
one of:
* 'dense' : always use dense mode
* 'sparse' : always use sparse mode if possible
* 'auto' : automatic
sparse_tol: float
Threshold for considering column to be zero in order to save computing
effort when the data is sparse or almost sparse.
If max(abs(X[:, i])) < sparse_tol, then row i (and also column i if Y
is not given) of the covariance matrix will be set to zero. If Y is
given and max(abs(Y[:, i])) < sparse_tol, then column i of the
covariance matrix will be set to zero.
Returns
-------
C_XX : ndarray (M, M)
Covariance matrix of X
See also
--------
moments_XX
"""
w, s, M = moments_XX(X, remove_mean=remove_mean, weights=weights, modify_data=modify_data,
sparse_mode=sparse_mode, sparse_tol=sparse_tol)
return M / float(w) | python | def covar(X, remove_mean=False, modify_data=False, weights=None, sparse_mode='auto', sparse_tol=0.0):
""" Computes the covariance matrix of X
Computes
.. math:
C_XX &=& X^\top X
while exploiting zero or constant columns in the data matrix.
WARNING: Directly use moments_XX if you can. This function does an additional
constant-matrix multiplication and does not return the mean.
Parameters
----------
X : ndarray (T, M)
Data matrix
remove_mean : bool
True: remove column mean from the data, False: don't remove mean.
modify_data : bool
If remove_mean=True, the mean will be removed in the data matrix X,
without creating an independent copy. This option is faster but might
lead to surprises because your input array is changed.
weights : None or ndarray(T, )
weights assigned to each trajectory point of X. If None, all data points have weight one.
If ndarray, each data point is assigned a separate weight.
sparse_mode : str
one of:
* 'dense' : always use dense mode
* 'sparse' : always use sparse mode if possible
* 'auto' : automatic
sparse_tol: float
Threshold for considering column to be zero in order to save computing
effort when the data is sparse or almost sparse.
If max(abs(X[:, i])) < sparse_tol, then row i (and also column i if Y
is not given) of the covariance matrix will be set to zero. If Y is
given and max(abs(Y[:, i])) < sparse_tol, then column i of the
covariance matrix will be set to zero.
Returns
-------
C_XX : ndarray (M, M)
Covariance matrix of X
See also
--------
moments_XX
"""
w, s, M = moments_XX(X, remove_mean=remove_mean, weights=weights, modify_data=modify_data,
sparse_mode=sparse_mode, sparse_tol=sparse_tol)
return M / float(w) | [
"def",
"covar",
"(",
"X",
",",
"remove_mean",
"=",
"False",
",",
"modify_data",
"=",
"False",
",",
"weights",
"=",
"None",
",",
"sparse_mode",
"=",
"'auto'",
",",
"sparse_tol",
"=",
"0.0",
")",
":",
"w",
",",
"s",
",",
"M",
"=",
"moments_XX",
"(",
... | Computes the covariance matrix of X
Computes
.. math:
C_XX &=& X^\top X
while exploiting zero or constant columns in the data matrix.
WARNING: Directly use moments_XX if you can. This function does an additional
constant-matrix multiplication and does not return the mean.
Parameters
----------
X : ndarray (T, M)
Data matrix
remove_mean : bool
True: remove column mean from the data, False: don't remove mean.
modify_data : bool
If remove_mean=True, the mean will be removed in the data matrix X,
without creating an independent copy. This option is faster but might
lead to surprises because your input array is changed.
weights : None or ndarray(T, )
weights assigned to each trajectory point of X. If None, all data points have weight one.
If ndarray, each data point is assigned a separate weight.
sparse_mode : str
one of:
* 'dense' : always use dense mode
* 'sparse' : always use sparse mode if possible
* 'auto' : automatic
sparse_tol: float
Threshold for considering column to be zero in order to save computing
effort when the data is sparse or almost sparse.
If max(abs(X[:, i])) < sparse_tol, then row i (and also column i if Y
is not given) of the covariance matrix will be set to zero. If Y is
given and max(abs(Y[:, i])) < sparse_tol, then column i of the
covariance matrix will be set to zero.
Returns
-------
C_XX : ndarray (M, M)
Covariance matrix of X
See also
--------
moments_XX | [
"Computes",
"the",
"covariance",
"matrix",
"of",
"X"
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/_ext/variational/estimators/moments.py#L976-L1026 | train | 204,209 |
markovmodel/PyEMMA | pyemma/_ext/variational/estimators/moments.py | covars | def covars(X, Y, remove_mean=False, modify_data=False, symmetrize=False, weights=None, sparse_mode='auto',
sparse_tol=0.0):
""" Computes the covariance and cross-covariance matrix of X and Y
If symmetrize is False, computes
.. math:
C_XX &=& X^\top X
C_XY &=& X^\top Y
If symmetrize is True, computes
.. math:
C_XX &=& \frac{1}{2} (X^\top X + Y^\top Y)
C_XY &=& \frac{1}{2} (X^\top Y + Y^\top X)
while exploiting zero or constant columns in the data matrix.
WARNING: Directly use moments_XXXY if you can. This function does an additional
constant-matrix multiplication and does not return the mean.
Parameters
----------
X : ndarray (T, M)
Data matrix
Y : ndarray (T, N)
Second data matrix
remove_mean : bool
True: remove column mean from the data, False: don't remove mean.
modify_data : bool
If remove_mean=True, the mean will be removed in the data matrix X,
without creating an independent copy. This option is faster but might
lead to surprises because your input array is changed.
symmetrize : bool
Computes symmetrized means and moments (see above)
weights : None or ndarray(T, )
weights assigned to each trajectory point of X. If None, all data points have weight one.
If ndarray, each data point is assigned a separate weight.
sparse_mode : str
one of:
* 'dense' : always use dense mode
* 'sparse' : always use sparse mode if possible
* 'auto' : automatic
sparse_tol: float
Threshold for considering column to be zero in order to save computing
effort when the data is sparse or almost sparse.
If max(abs(X[:, i])) < sparse_tol, then row i (and also column i if Y
is not given) of the covariance matrix will be set to zero. If Y is
given and max(abs(Y[:, i])) < sparse_tol, then column i of the
covariance matrix will be set to zero.
Returns
-------
C_XX : ndarray (M, M)
Covariance matrix of X
C_XY : ndarray (M, N)
Covariance matrix of XY
See also
--------
moments_XXXY
"""
w, sx, sy, Mxx, Mxy = moments_XXXY(X, Y, remove_mean=remove_mean, modify_data=modify_data, weights=weights,
symmetrize=symmetrize, sparse_mode=sparse_mode, sparse_tol=sparse_tol)
return Mxx / float(w), Mxy / float(w) | python | def covars(X, Y, remove_mean=False, modify_data=False, symmetrize=False, weights=None, sparse_mode='auto',
sparse_tol=0.0):
""" Computes the covariance and cross-covariance matrix of X and Y
If symmetrize is False, computes
.. math:
C_XX &=& X^\top X
C_XY &=& X^\top Y
If symmetrize is True, computes
.. math:
C_XX &=& \frac{1}{2} (X^\top X + Y^\top Y)
C_XY &=& \frac{1}{2} (X^\top Y + Y^\top X)
while exploiting zero or constant columns in the data matrix.
WARNING: Directly use moments_XXXY if you can. This function does an additional
constant-matrix multiplication and does not return the mean.
Parameters
----------
X : ndarray (T, M)
Data matrix
Y : ndarray (T, N)
Second data matrix
remove_mean : bool
True: remove column mean from the data, False: don't remove mean.
modify_data : bool
If remove_mean=True, the mean will be removed in the data matrix X,
without creating an independent copy. This option is faster but might
lead to surprises because your input array is changed.
symmetrize : bool
Computes symmetrized means and moments (see above)
weights : None or ndarray(T, )
weights assigned to each trajectory point of X. If None, all data points have weight one.
If ndarray, each data point is assigned a separate weight.
sparse_mode : str
one of:
* 'dense' : always use dense mode
* 'sparse' : always use sparse mode if possible
* 'auto' : automatic
sparse_tol: float
Threshold for considering column to be zero in order to save computing
effort when the data is sparse or almost sparse.
If max(abs(X[:, i])) < sparse_tol, then row i (and also column i if Y
is not given) of the covariance matrix will be set to zero. If Y is
given and max(abs(Y[:, i])) < sparse_tol, then column i of the
covariance matrix will be set to zero.
Returns
-------
C_XX : ndarray (M, M)
Covariance matrix of X
C_XY : ndarray (M, N)
Covariance matrix of XY
See also
--------
moments_XXXY
"""
w, sx, sy, Mxx, Mxy = moments_XXXY(X, Y, remove_mean=remove_mean, modify_data=modify_data, weights=weights,
symmetrize=symmetrize, sparse_mode=sparse_mode, sparse_tol=sparse_tol)
return Mxx / float(w), Mxy / float(w) | [
"def",
"covars",
"(",
"X",
",",
"Y",
",",
"remove_mean",
"=",
"False",
",",
"modify_data",
"=",
"False",
",",
"symmetrize",
"=",
"False",
",",
"weights",
"=",
"None",
",",
"sparse_mode",
"=",
"'auto'",
",",
"sparse_tol",
"=",
"0.0",
")",
":",
"w",
",... | Computes the covariance and cross-covariance matrix of X and Y
If symmetrize is False, computes
.. math:
C_XX &=& X^\top X
C_XY &=& X^\top Y
If symmetrize is True, computes
.. math:
C_XX &=& \frac{1}{2} (X^\top X + Y^\top Y)
C_XY &=& \frac{1}{2} (X^\top Y + Y^\top X)
while exploiting zero or constant columns in the data matrix.
WARNING: Directly use moments_XXXY if you can. This function does an additional
constant-matrix multiplication and does not return the mean.
Parameters
----------
X : ndarray (T, M)
Data matrix
Y : ndarray (T, N)
Second data matrix
remove_mean : bool
True: remove column mean from the data, False: don't remove mean.
modify_data : bool
If remove_mean=True, the mean will be removed in the data matrix X,
without creating an independent copy. This option is faster but might
lead to surprises because your input array is changed.
symmetrize : bool
Computes symmetrized means and moments (see above)
weights : None or ndarray(T, )
weights assigned to each trajectory point of X. If None, all data points have weight one.
If ndarray, each data point is assigned a separate weight.
sparse_mode : str
one of:
* 'dense' : always use dense mode
* 'sparse' : always use sparse mode if possible
* 'auto' : automatic
sparse_tol: float
Threshold for considering column to be zero in order to save computing
effort when the data is sparse or almost sparse.
If max(abs(X[:, i])) < sparse_tol, then row i (and also column i if Y
is not given) of the covariance matrix will be set to zero. If Y is
given and max(abs(Y[:, i])) < sparse_tol, then column i of the
covariance matrix will be set to zero.
Returns
-------
C_XX : ndarray (M, M)
Covariance matrix of X
C_XY : ndarray (M, N)
Covariance matrix of XY
See also
--------
moments_XXXY | [
"Computes",
"the",
"covariance",
"and",
"cross",
"-",
"covariance",
"matrix",
"of",
"X",
"and",
"Y"
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/_ext/variational/estimators/moments.py#L1029-L1093 | train | 204,210 |
markovmodel/PyEMMA | pyemma/util/reflection.py | getargspec_no_self | def getargspec_no_self(func):
"""inspect.getargspec replacement using inspect.signature.
inspect.getargspec is deprecated in python 3. This is a replacement
based on the (new in python 3.3) `inspect.signature`.
Parameters
----------
func : callable
A callable to inspect
Returns
-------
argspec : ArgSpec(args, varargs, varkw, defaults)
This is similar to the result of inspect.getargspec(func) under
python 2.x.
NOTE: if the first argument of `func` is self, it is *not*, I repeat
*not* included in argspec.args.
This is done for consistency between inspect.getargspec() under
python 2.x, and inspect.signature() under python 3.x.
"""
sig = inspect.signature(func)
args = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
]
varargs = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_POSITIONAL
]
varargs = varargs[0] if varargs else None
varkw = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_KEYWORD
]
varkw = varkw[0] if varkw else None
defaults = [
p.default for p in sig.parameters.values()
if (p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and
p.default is not p.empty)
] or None
if args[0] == 'self':
args.pop(0)
return ArgSpec(args, varargs, varkw, defaults) | python | def getargspec_no_self(func):
"""inspect.getargspec replacement using inspect.signature.
inspect.getargspec is deprecated in python 3. This is a replacement
based on the (new in python 3.3) `inspect.signature`.
Parameters
----------
func : callable
A callable to inspect
Returns
-------
argspec : ArgSpec(args, varargs, varkw, defaults)
This is similar to the result of inspect.getargspec(func) under
python 2.x.
NOTE: if the first argument of `func` is self, it is *not*, I repeat
*not* included in argspec.args.
This is done for consistency between inspect.getargspec() under
python 2.x, and inspect.signature() under python 3.x.
"""
sig = inspect.signature(func)
args = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
]
varargs = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_POSITIONAL
]
varargs = varargs[0] if varargs else None
varkw = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_KEYWORD
]
varkw = varkw[0] if varkw else None
defaults = [
p.default for p in sig.parameters.values()
if (p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and
p.default is not p.empty)
] or None
if args[0] == 'self':
args.pop(0)
return ArgSpec(args, varargs, varkw, defaults) | [
"def",
"getargspec_no_self",
"(",
"func",
")",
":",
"sig",
"=",
"inspect",
".",
"signature",
"(",
"func",
")",
"args",
"=",
"[",
"p",
".",
"name",
"for",
"p",
"in",
"sig",
".",
"parameters",
".",
"values",
"(",
")",
"if",
"p",
".",
"kind",
"==",
... | inspect.getargspec replacement using inspect.signature.
inspect.getargspec is deprecated in python 3. This is a replacement
based on the (new in python 3.3) `inspect.signature`.
Parameters
----------
func : callable
A callable to inspect
Returns
-------
argspec : ArgSpec(args, varargs, varkw, defaults)
This is similar to the result of inspect.getargspec(func) under
python 2.x.
NOTE: if the first argument of `func` is self, it is *not*, I repeat
*not* included in argspec.args.
This is done for consistency between inspect.getargspec() under
python 2.x, and inspect.signature() under python 3.x. | [
"inspect",
".",
"getargspec",
"replacement",
"using",
"inspect",
".",
"signature",
"."
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/util/reflection.py#L43-L88 | train | 204,211 |
markovmodel/PyEMMA | pyemma/msm/estimators/_dtraj_stats.py | blocksplit_dtrajs | def blocksplit_dtrajs(dtrajs, lag=1, sliding=True, shift=None):
""" Splits the discrete trajectories into approximately uncorrelated fragments
Will split trajectories into fragments of lengths lag or longer. These fragments
are overlapping in order to conserve the transition counts at given lag.
If sliding=True, the resulting trajectories will lead to exactly the same count
matrix as when counted from dtrajs. If sliding=False (sampling at lag), the
count matrices are only equal when also setting shift=0.
Parameters
----------
dtrajs : list of ndarray(int)
Discrete trajectories
lag : int
Lag time at which counting will be done. If sh
sliding : bool
True for splitting trajectories for sliding count, False if lag-sampling will be applied
shift : None or int
Start of first full tau-window. If None, shift will be randomly generated
"""
dtrajs_new = []
for dtraj in dtrajs:
if len(dtraj) <= lag:
continue
if shift is None:
s = np.random.randint(min(lag, dtraj.size-lag))
else:
s = shift
if sliding:
if s > 0:
dtrajs_new.append(dtraj[0:lag+s])
for t0 in range(s, dtraj.size-lag, lag):
dtrajs_new.append(dtraj[t0:t0+2*lag])
else:
for t0 in range(s, dtraj.size-lag, lag):
dtrajs_new.append(dtraj[t0:t0+lag+1])
return dtrajs_new | python | def blocksplit_dtrajs(dtrajs, lag=1, sliding=True, shift=None):
""" Splits the discrete trajectories into approximately uncorrelated fragments
Will split trajectories into fragments of lengths lag or longer. These fragments
are overlapping in order to conserve the transition counts at given lag.
If sliding=True, the resulting trajectories will lead to exactly the same count
matrix as when counted from dtrajs. If sliding=False (sampling at lag), the
count matrices are only equal when also setting shift=0.
Parameters
----------
dtrajs : list of ndarray(int)
Discrete trajectories
lag : int
Lag time at which counting will be done. If sh
sliding : bool
True for splitting trajectories for sliding count, False if lag-sampling will be applied
shift : None or int
Start of first full tau-window. If None, shift will be randomly generated
"""
dtrajs_new = []
for dtraj in dtrajs:
if len(dtraj) <= lag:
continue
if shift is None:
s = np.random.randint(min(lag, dtraj.size-lag))
else:
s = shift
if sliding:
if s > 0:
dtrajs_new.append(dtraj[0:lag+s])
for t0 in range(s, dtraj.size-lag, lag):
dtrajs_new.append(dtraj[t0:t0+2*lag])
else:
for t0 in range(s, dtraj.size-lag, lag):
dtrajs_new.append(dtraj[t0:t0+lag+1])
return dtrajs_new | [
"def",
"blocksplit_dtrajs",
"(",
"dtrajs",
",",
"lag",
"=",
"1",
",",
"sliding",
"=",
"True",
",",
"shift",
"=",
"None",
")",
":",
"dtrajs_new",
"=",
"[",
"]",
"for",
"dtraj",
"in",
"dtrajs",
":",
"if",
"len",
"(",
"dtraj",
")",
"<=",
"lag",
":",
... | Splits the discrete trajectories into approximately uncorrelated fragments
Will split trajectories into fragments of lengths lag or longer. These fragments
are overlapping in order to conserve the transition counts at given lag.
If sliding=True, the resulting trajectories will lead to exactly the same count
matrix as when counted from dtrajs. If sliding=False (sampling at lag), the
count matrices are only equal when also setting shift=0.
Parameters
----------
dtrajs : list of ndarray(int)
Discrete trajectories
lag : int
Lag time at which counting will be done. If sh
sliding : bool
True for splitting trajectories for sliding count, False if lag-sampling will be applied
shift : None or int
Start of first full tau-window. If None, shift will be randomly generated | [
"Splits",
"the",
"discrete",
"trajectories",
"into",
"approximately",
"uncorrelated",
"fragments"
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/msm/estimators/_dtraj_stats.py#L32-L69 | train | 204,212 |
markovmodel/PyEMMA | pyemma/msm/estimators/_dtraj_stats.py | cvsplit_dtrajs | def cvsplit_dtrajs(dtrajs):
""" Splits the trajectories into a training and test set with approximately equal number of trajectories
Parameters
----------
dtrajs : list of ndarray(int)
Discrete trajectories
"""
if len(dtrajs) == 1:
raise ValueError('Only have a single trajectory. Cannot be split into train and test set')
I0 = np.random.choice(len(dtrajs), int(len(dtrajs)/2), replace=False)
I1 = np.array(list(set(list(np.arange(len(dtrajs)))) - set(list(I0))))
dtrajs_train = [dtrajs[i] for i in I0]
dtrajs_test = [dtrajs[i] for i in I1]
return dtrajs_train, dtrajs_test | python | def cvsplit_dtrajs(dtrajs):
""" Splits the trajectories into a training and test set with approximately equal number of trajectories
Parameters
----------
dtrajs : list of ndarray(int)
Discrete trajectories
"""
if len(dtrajs) == 1:
raise ValueError('Only have a single trajectory. Cannot be split into train and test set')
I0 = np.random.choice(len(dtrajs), int(len(dtrajs)/2), replace=False)
I1 = np.array(list(set(list(np.arange(len(dtrajs)))) - set(list(I0))))
dtrajs_train = [dtrajs[i] for i in I0]
dtrajs_test = [dtrajs[i] for i in I1]
return dtrajs_train, dtrajs_test | [
"def",
"cvsplit_dtrajs",
"(",
"dtrajs",
")",
":",
"if",
"len",
"(",
"dtrajs",
")",
"==",
"1",
":",
"raise",
"ValueError",
"(",
"'Only have a single trajectory. Cannot be split into train and test set'",
")",
"I0",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"l... | Splits the trajectories into a training and test set with approximately equal number of trajectories
Parameters
----------
dtrajs : list of ndarray(int)
Discrete trajectories | [
"Splits",
"the",
"trajectories",
"into",
"a",
"training",
"and",
"test",
"set",
"with",
"approximately",
"equal",
"number",
"of",
"trajectories"
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/msm/estimators/_dtraj_stats.py#L73-L88 | train | 204,213 |
markovmodel/PyEMMA | pyemma/util/contexts.py | numpy_random_seed | def numpy_random_seed(seed=42):
""" sets the random seed of numpy within the context.
Example
-------
>>> import numpy as np
>>> with numpy_random_seed(seed=0):
... np.random.randint(1000)
684
"""
old_state = np.random.get_state()
np.random.seed(seed)
try:
yield
finally:
np.random.set_state(old_state) | python | def numpy_random_seed(seed=42):
""" sets the random seed of numpy within the context.
Example
-------
>>> import numpy as np
>>> with numpy_random_seed(seed=0):
... np.random.randint(1000)
684
"""
old_state = np.random.get_state()
np.random.seed(seed)
try:
yield
finally:
np.random.set_state(old_state) | [
"def",
"numpy_random_seed",
"(",
"seed",
"=",
"42",
")",
":",
"old_state",
"=",
"np",
".",
"random",
".",
"get_state",
"(",
")",
"np",
".",
"random",
".",
"seed",
"(",
"seed",
")",
"try",
":",
"yield",
"finally",
":",
"np",
".",
"random",
".",
"set... | sets the random seed of numpy within the context.
Example
-------
>>> import numpy as np
>>> with numpy_random_seed(seed=0):
... np.random.randint(1000)
684 | [
"sets",
"the",
"random",
"seed",
"of",
"numpy",
"within",
"the",
"context",
"."
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/util/contexts.py#L32-L47 | train | 204,214 |
markovmodel/PyEMMA | pyemma/util/contexts.py | random_seed | def random_seed(seed=42):
""" sets the random seed of Python within the context.
Example
-------
>>> import random
>>> with random_seed(seed=0):
... random.randint(0, 1000) # doctest: +SKIP
864
"""
old_state = random.getstate()
random.seed(seed)
try:
yield
finally:
random.setstate(old_state) | python | def random_seed(seed=42):
""" sets the random seed of Python within the context.
Example
-------
>>> import random
>>> with random_seed(seed=0):
... random.randint(0, 1000) # doctest: +SKIP
864
"""
old_state = random.getstate()
random.seed(seed)
try:
yield
finally:
random.setstate(old_state) | [
"def",
"random_seed",
"(",
"seed",
"=",
"42",
")",
":",
"old_state",
"=",
"random",
".",
"getstate",
"(",
")",
"random",
".",
"seed",
"(",
"seed",
")",
"try",
":",
"yield",
"finally",
":",
"random",
".",
"setstate",
"(",
"old_state",
")"
] | sets the random seed of Python within the context.
Example
-------
>>> import random
>>> with random_seed(seed=0):
... random.randint(0, 1000) # doctest: +SKIP
864 | [
"sets",
"the",
"random",
"seed",
"of",
"Python",
"within",
"the",
"context",
"."
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/util/contexts.py#L51-L66 | train | 204,215 |
markovmodel/PyEMMA | pyemma/util/contexts.py | settings | def settings(**kwargs):
""" apply given PyEMMA config values temporarily within the given context."""
from pyemma import config
old_settings = {}
try:
# remember old setting, set new one. May raise ValueError, if invalid setting is given.
for k, v in kwargs.items():
old_settings[k] = getattr(config, k)
setattr(config, k, v)
yield
finally:
# restore old settings
for k, v in old_settings.items():
setattr(config, k, v) | python | def settings(**kwargs):
""" apply given PyEMMA config values temporarily within the given context."""
from pyemma import config
old_settings = {}
try:
# remember old setting, set new one. May raise ValueError, if invalid setting is given.
for k, v in kwargs.items():
old_settings[k] = getattr(config, k)
setattr(config, k, v)
yield
finally:
# restore old settings
for k, v in old_settings.items():
setattr(config, k, v) | [
"def",
"settings",
"(",
"*",
"*",
"kwargs",
")",
":",
"from",
"pyemma",
"import",
"config",
"old_settings",
"=",
"{",
"}",
"try",
":",
"# remember old setting, set new one. May raise ValueError, if invalid setting is given.",
"for",
"k",
",",
"v",
"in",
"kwargs",
".... | apply given PyEMMA config values temporarily within the given context. | [
"apply",
"given",
"PyEMMA",
"config",
"values",
"temporarily",
"within",
"the",
"given",
"context",
"."
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/util/contexts.py#L70-L84 | train | 204,216 |
markovmodel/PyEMMA | pyemma/plots/plots2d.py | get_histogram | def get_histogram(
xall, yall, nbins=100,
weights=None, avoid_zero_count=False):
"""Compute a two-dimensional histogram.
Parameters
----------
xall : ndarray(T)
Sample x-coordinates.
yall : ndarray(T)
Sample y-coordinates.
nbins : int, optional, default=100
Number of histogram bins used in each dimension.
weights : ndarray(T), optional, default=None
Sample weights; by default all samples have the same weight.
avoid_zero_count : bool, optional, default=True
Avoid zero counts by lifting all histogram elements to the
minimum value before computing the free energy. If False,
zero histogram counts would yield infinity in the free energy.
Returns
-------
x : ndarray(nbins, nbins)
The bins' x-coordinates in meshgrid format.
y : ndarray(nbins, nbins)
The bins' y-coordinates in meshgrid format.
z : ndarray(nbins, nbins)
Histogram counts in meshgrid format.
"""
z, xedge, yedge = _np.histogram2d(
xall, yall, bins=nbins, weights=weights)
x = 0.5 * (xedge[:-1] + xedge[1:])
y = 0.5 * (yedge[:-1] + yedge[1:])
if avoid_zero_count:
z = _np.maximum(z, _np.min(z[z.nonzero()]))
return x, y, z.T | python | def get_histogram(
xall, yall, nbins=100,
weights=None, avoid_zero_count=False):
"""Compute a two-dimensional histogram.
Parameters
----------
xall : ndarray(T)
Sample x-coordinates.
yall : ndarray(T)
Sample y-coordinates.
nbins : int, optional, default=100
Number of histogram bins used in each dimension.
weights : ndarray(T), optional, default=None
Sample weights; by default all samples have the same weight.
avoid_zero_count : bool, optional, default=True
Avoid zero counts by lifting all histogram elements to the
minimum value before computing the free energy. If False,
zero histogram counts would yield infinity in the free energy.
Returns
-------
x : ndarray(nbins, nbins)
The bins' x-coordinates in meshgrid format.
y : ndarray(nbins, nbins)
The bins' y-coordinates in meshgrid format.
z : ndarray(nbins, nbins)
Histogram counts in meshgrid format.
"""
z, xedge, yedge = _np.histogram2d(
xall, yall, bins=nbins, weights=weights)
x = 0.5 * (xedge[:-1] + xedge[1:])
y = 0.5 * (yedge[:-1] + yedge[1:])
if avoid_zero_count:
z = _np.maximum(z, _np.min(z[z.nonzero()]))
return x, y, z.T | [
"def",
"get_histogram",
"(",
"xall",
",",
"yall",
",",
"nbins",
"=",
"100",
",",
"weights",
"=",
"None",
",",
"avoid_zero_count",
"=",
"False",
")",
":",
"z",
",",
"xedge",
",",
"yedge",
"=",
"_np",
".",
"histogram2d",
"(",
"xall",
",",
"yall",
",",
... | Compute a two-dimensional histogram.
Parameters
----------
xall : ndarray(T)
Sample x-coordinates.
yall : ndarray(T)
Sample y-coordinates.
nbins : int, optional, default=100
Number of histogram bins used in each dimension.
weights : ndarray(T), optional, default=None
Sample weights; by default all samples have the same weight.
avoid_zero_count : bool, optional, default=True
Avoid zero counts by lifting all histogram elements to the
minimum value before computing the free energy. If False,
zero histogram counts would yield infinity in the free energy.
Returns
-------
x : ndarray(nbins, nbins)
The bins' x-coordinates in meshgrid format.
y : ndarray(nbins, nbins)
The bins' y-coordinates in meshgrid format.
z : ndarray(nbins, nbins)
Histogram counts in meshgrid format. | [
"Compute",
"a",
"two",
"-",
"dimensional",
"histogram",
"."
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/plots/plots2d.py#L118-L154 | train | 204,217 |
markovmodel/PyEMMA | pyemma/plots/plots2d.py | get_grid_data | def get_grid_data(xall, yall, zall, nbins=100, method='nearest'):
"""Interpolate unstructured two-dimensional data.
Parameters
----------
xall : ndarray(T)
Sample x-coordinates.
yall : ndarray(T)
Sample y-coordinates.
zall : ndarray(T)
Sample z-coordinates.
nbins : int, optional, default=100
Number of histogram bins used in x/y-dimensions.
method : str, optional, default='nearest'
Assignment method; scipy.interpolate.griddata supports the
methods 'nearest', 'linear', and 'cubic'.
Returns
-------
x : ndarray(nbins, nbins)
The bins' x-coordinates in meshgrid format.
y : ndarray(nbins, nbins)
The bins' y-coordinates in meshgrid format.
z : ndarray(nbins, nbins)
Interpolated z-data in meshgrid format.
"""
from scipy.interpolate import griddata
x, y = _np.meshgrid(
_np.linspace(xall.min(), xall.max(), nbins),
_np.linspace(yall.min(), yall.max(), nbins),
indexing='ij')
z = griddata(
_np.hstack([xall[:,None], yall[:,None]]),
zall, (x, y), method=method)
return x, y, z | python | def get_grid_data(xall, yall, zall, nbins=100, method='nearest'):
"""Interpolate unstructured two-dimensional data.
Parameters
----------
xall : ndarray(T)
Sample x-coordinates.
yall : ndarray(T)
Sample y-coordinates.
zall : ndarray(T)
Sample z-coordinates.
nbins : int, optional, default=100
Number of histogram bins used in x/y-dimensions.
method : str, optional, default='nearest'
Assignment method; scipy.interpolate.griddata supports the
methods 'nearest', 'linear', and 'cubic'.
Returns
-------
x : ndarray(nbins, nbins)
The bins' x-coordinates in meshgrid format.
y : ndarray(nbins, nbins)
The bins' y-coordinates in meshgrid format.
z : ndarray(nbins, nbins)
Interpolated z-data in meshgrid format.
"""
from scipy.interpolate import griddata
x, y = _np.meshgrid(
_np.linspace(xall.min(), xall.max(), nbins),
_np.linspace(yall.min(), yall.max(), nbins),
indexing='ij')
z = griddata(
_np.hstack([xall[:,None], yall[:,None]]),
zall, (x, y), method=method)
return x, y, z | [
"def",
"get_grid_data",
"(",
"xall",
",",
"yall",
",",
"zall",
",",
"nbins",
"=",
"100",
",",
"method",
"=",
"'nearest'",
")",
":",
"from",
"scipy",
".",
"interpolate",
"import",
"griddata",
"x",
",",
"y",
"=",
"_np",
".",
"meshgrid",
"(",
"_np",
"."... | Interpolate unstructured two-dimensional data.
Parameters
----------
xall : ndarray(T)
Sample x-coordinates.
yall : ndarray(T)
Sample y-coordinates.
zall : ndarray(T)
Sample z-coordinates.
nbins : int, optional, default=100
Number of histogram bins used in x/y-dimensions.
method : str, optional, default='nearest'
Assignment method; scipy.interpolate.griddata supports the
methods 'nearest', 'linear', and 'cubic'.
Returns
-------
x : ndarray(nbins, nbins)
The bins' x-coordinates in meshgrid format.
y : ndarray(nbins, nbins)
The bins' y-coordinates in meshgrid format.
z : ndarray(nbins, nbins)
Interpolated z-data in meshgrid format. | [
"Interpolate",
"unstructured",
"two",
"-",
"dimensional",
"data",
"."
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/plots/plots2d.py#L157-L192 | train | 204,218 |
markovmodel/PyEMMA | pyemma/plots/plots2d.py | _to_free_energy | def _to_free_energy(z, minener_zero=False):
"""Compute free energies from histogram counts.
Parameters
----------
z : ndarray(T)
Histogram counts.
minener_zero : boolean, optional, default=False
Shifts the energy minimum to zero.
Returns
-------
free_energy : ndarray(T)
The free energy values in units of kT.
"""
pi = _to_density(z)
free_energy = _np.inf * _np.ones(shape=z.shape)
nonzero = pi.nonzero()
free_energy[nonzero] = -_np.log(pi[nonzero])
if minener_zero:
free_energy[nonzero] -= _np.min(free_energy[nonzero])
return free_energy | python | def _to_free_energy(z, minener_zero=False):
"""Compute free energies from histogram counts.
Parameters
----------
z : ndarray(T)
Histogram counts.
minener_zero : boolean, optional, default=False
Shifts the energy minimum to zero.
Returns
-------
free_energy : ndarray(T)
The free energy values in units of kT.
"""
pi = _to_density(z)
free_energy = _np.inf * _np.ones(shape=z.shape)
nonzero = pi.nonzero()
free_energy[nonzero] = -_np.log(pi[nonzero])
if minener_zero:
free_energy[nonzero] -= _np.min(free_energy[nonzero])
return free_energy | [
"def",
"_to_free_energy",
"(",
"z",
",",
"minener_zero",
"=",
"False",
")",
":",
"pi",
"=",
"_to_density",
"(",
"z",
")",
"free_energy",
"=",
"_np",
".",
"inf",
"*",
"_np",
".",
"ones",
"(",
"shape",
"=",
"z",
".",
"shape",
")",
"nonzero",
"=",
"pi... | Compute free energies from histogram counts.
Parameters
----------
z : ndarray(T)
Histogram counts.
minener_zero : boolean, optional, default=False
Shifts the energy minimum to zero.
Returns
-------
free_energy : ndarray(T)
The free energy values in units of kT. | [
"Compute",
"free",
"energies",
"from",
"histogram",
"counts",
"."
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/plots/plots2d.py#L207-L229 | train | 204,219 |
markovmodel/PyEMMA | pyemma/plots/plots2d.py | _prune_kwargs | def _prune_kwargs(kwargs):
"""Remove non-allowed keys from a kwargs dictionary.
Parameters
----------
kwargs : dict
Named parameters to prune.
"""
allowed_keys = [
'corner_mask', 'alpha', 'locator', 'extend', 'xunits',
'yunits', 'antialiased', 'nchunk', 'hatches', 'zorder']
ignored = [key for key in kwargs.keys() if key not in allowed_keys]
for key in ignored:
_warn(
'{}={} is not an allowed optional parameter and will'
' be ignored'.format(key, kwargs[key]))
kwargs.pop(key, None)
return kwargs | python | def _prune_kwargs(kwargs):
"""Remove non-allowed keys from a kwargs dictionary.
Parameters
----------
kwargs : dict
Named parameters to prune.
"""
allowed_keys = [
'corner_mask', 'alpha', 'locator', 'extend', 'xunits',
'yunits', 'antialiased', 'nchunk', 'hatches', 'zorder']
ignored = [key for key in kwargs.keys() if key not in allowed_keys]
for key in ignored:
_warn(
'{}={} is not an allowed optional parameter and will'
' be ignored'.format(key, kwargs[key]))
kwargs.pop(key, None)
return kwargs | [
"def",
"_prune_kwargs",
"(",
"kwargs",
")",
":",
"allowed_keys",
"=",
"[",
"'corner_mask'",
",",
"'alpha'",
",",
"'locator'",
",",
"'extend'",
",",
"'xunits'",
",",
"'yunits'",
",",
"'antialiased'",
",",
"'nchunk'",
",",
"'hatches'",
",",
"'zorder'",
"]",
"i... | Remove non-allowed keys from a kwargs dictionary.
Parameters
----------
kwargs : dict
Named parameters to prune. | [
"Remove",
"non",
"-",
"allowed",
"keys",
"from",
"a",
"kwargs",
"dictionary",
"."
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/plots/plots2d.py#L232-L250 | train | 204,220 |
markovmodel/PyEMMA | pyemma/plots/plots2d.py | plot_map | def plot_map(
x, y, z, ax=None, cmap=None,
ncontours=100, vmin=None, vmax=None, levels=None,
cbar=True, cax=None, cbar_label=None,
cbar_orientation='vertical', norm=None,
**kwargs):
"""Plot a two-dimensional map from data on a grid.
Parameters
----------
x : ndarray(T)
Binned x-coordinates.
y : ndarray(T)
Binned y-coordinates.
z : ndarray(T)
Binned z-coordinates.
ax : matplotlib.Axes object, optional, default=None
The ax to plot to; if ax=None, a new ax (and fig) is created.
cmap : matplotlib colormap, optional, default=None
The color map to use.
ncontours : int, optional, default=100
Number of contour levels.
vmin : float, optional, default=None
Lowest z-value to be plotted.
vmax : float, optional, default=None
Highest z-value to be plotted.
levels : iterable of float, optional, default=None
Contour levels to plot.
cbar : boolean, optional, default=True
Plot a color bar.
cax : matplotlib.Axes object, optional, default=None
Plot the colorbar into a custom axes object instead of
stealing space from ax.
cbar_label : str, optional, default=None
Colorbar label string; use None to suppress it.
cbar_orientation : str, optional, default='vertical'
Colorbar orientation; choose 'vertical' or 'horizontal'.
norm : matplotlib norm, optional, default=None
Use a norm when coloring the contour plot.
Optional parameters for contourf (**kwargs)
-------------------------------------------
corner_mask : boolean, optional
Enable/disable corner masking, which only has an effect if
z is a masked array. If False, any quad touching a masked
point is masked out. If True, only the triangular corners
of quads nearest those points are always masked out, other
triangular corners comprising three unmasked points are
contoured as usual.
Defaults to rcParams['contour.corner_mask'], which
defaults to True.
alpha : float
The alpha blending value.
locator : [ None | ticker.Locator subclass ]
If locator is None, the default MaxNLocator is used. The
locator is used to determine the contour levels if they are
not given explicitly via the levels argument.
extend : [ ‘neither’ | ‘both’ | ‘min’ | ‘max’ ]
Unless this is ‘neither’, contour levels are automatically
added to one or both ends of the range so that all data are
included. These added ranges are then mapped to the special
colormap values which default to the ends of the
colormap range, but can be set via
matplotlib.colors.Colormap.set_under() and
matplotlib.colors.Colormap.set_over() methods.
xunits, yunits : [ None | registered units ]
Override axis units by specifying an instance of a
matplotlib.units.ConversionInterface.
antialiased : boolean, optional
Enable antialiasing, overriding the defaults. For filled
contours, the default is True. For line contours, it is
taken from rcParams[‘lines.antialiased’].
nchunk : [ 0 | integer ]
If 0, no subdivision of the domain. Specify a positive
integer to divide the domain into subdomains of nchunk by
nchunk quads. Chunking reduces the maximum length of polygons
generated by the contouring algorithm which reduces the
rendering workload passed on to the backend and also requires
slightly less RAM. It can however introduce rendering
artifacts at chunk boundaries depending on the backend, the
antialiased flag and value of alpha.
hatches :
A list of cross hatch patterns to use on the filled areas.
If None, no hatching will be added to the contour. Hatching
is supported in the PostScript, PDF, SVG and Agg backends
only.
zorder : float
Set the zorder for the artist. Artists with lower zorder
values are drawn first.
Returns
-------
fig : matplotlib.Figure object
The figure in which the used ax resides.
ax : matplotlib.Axes object
The ax in which the map was plotted.
misc : dict
Contains a matplotlib.contour.QuadContourSet 'mappable' and,
if requested, a matplotlib.Colorbar object 'cbar'.
"""
import matplotlib.pyplot as _plt
if ax is None:
fig, ax = _plt.subplots()
else:
fig = ax.get_figure()
mappable = ax.contourf(
x, y, z, ncontours, norm=norm,
vmin=vmin, vmax=vmax, cmap=cmap,
levels=levels, **_prune_kwargs(kwargs))
misc = dict(mappable=mappable)
if cbar_orientation not in ('horizontal', 'vertical'):
raise ValueError(
'cbar_orientation must be "horizontal" or "vertical"')
if cbar:
if cax is None:
cbar_ = fig.colorbar(
mappable, ax=ax, orientation=cbar_orientation)
else:
cbar_ = fig.colorbar(
mappable, cax=cax, orientation=cbar_orientation)
if cbar_label is not None:
cbar_.set_label(cbar_label)
misc.update(cbar=cbar_)
return fig, ax, misc | python | def plot_map(
x, y, z, ax=None, cmap=None,
ncontours=100, vmin=None, vmax=None, levels=None,
cbar=True, cax=None, cbar_label=None,
cbar_orientation='vertical', norm=None,
**kwargs):
"""Plot a two-dimensional map from data on a grid.
Parameters
----------
x : ndarray(T)
Binned x-coordinates.
y : ndarray(T)
Binned y-coordinates.
z : ndarray(T)
Binned z-coordinates.
ax : matplotlib.Axes object, optional, default=None
The ax to plot to; if ax=None, a new ax (and fig) is created.
cmap : matplotlib colormap, optional, default=None
The color map to use.
ncontours : int, optional, default=100
Number of contour levels.
vmin : float, optional, default=None
Lowest z-value to be plotted.
vmax : float, optional, default=None
Highest z-value to be plotted.
levels : iterable of float, optional, default=None
Contour levels to plot.
cbar : boolean, optional, default=True
Plot a color bar.
cax : matplotlib.Axes object, optional, default=None
Plot the colorbar into a custom axes object instead of
stealing space from ax.
cbar_label : str, optional, default=None
Colorbar label string; use None to suppress it.
cbar_orientation : str, optional, default='vertical'
Colorbar orientation; choose 'vertical' or 'horizontal'.
norm : matplotlib norm, optional, default=None
Use a norm when coloring the contour plot.
Optional parameters for contourf (**kwargs)
-------------------------------------------
corner_mask : boolean, optional
Enable/disable corner masking, which only has an effect if
z is a masked array. If False, any quad touching a masked
point is masked out. If True, only the triangular corners
of quads nearest those points are always masked out, other
triangular corners comprising three unmasked points are
contoured as usual.
Defaults to rcParams['contour.corner_mask'], which
defaults to True.
alpha : float
The alpha blending value.
locator : [ None | ticker.Locator subclass ]
If locator is None, the default MaxNLocator is used. The
locator is used to determine the contour levels if they are
not given explicitly via the levels argument.
extend : [ ‘neither’ | ‘both’ | ‘min’ | ‘max’ ]
Unless this is ‘neither’, contour levels are automatically
added to one or both ends of the range so that all data are
included. These added ranges are then mapped to the special
colormap values which default to the ends of the
colormap range, but can be set via
matplotlib.colors.Colormap.set_under() and
matplotlib.colors.Colormap.set_over() methods.
xunits, yunits : [ None | registered units ]
Override axis units by specifying an instance of a
matplotlib.units.ConversionInterface.
antialiased : boolean, optional
Enable antialiasing, overriding the defaults. For filled
contours, the default is True. For line contours, it is
taken from rcParams[‘lines.antialiased’].
nchunk : [ 0 | integer ]
If 0, no subdivision of the domain. Specify a positive
integer to divide the domain into subdomains of nchunk by
nchunk quads. Chunking reduces the maximum length of polygons
generated by the contouring algorithm which reduces the
rendering workload passed on to the backend and also requires
slightly less RAM. It can however introduce rendering
artifacts at chunk boundaries depending on the backend, the
antialiased flag and value of alpha.
hatches :
A list of cross hatch patterns to use on the filled areas.
If None, no hatching will be added to the contour. Hatching
is supported in the PostScript, PDF, SVG and Agg backends
only.
zorder : float
Set the zorder for the artist. Artists with lower zorder
values are drawn first.
Returns
-------
fig : matplotlib.Figure object
The figure in which the used ax resides.
ax : matplotlib.Axes object
The ax in which the map was plotted.
misc : dict
Contains a matplotlib.contour.QuadContourSet 'mappable' and,
if requested, a matplotlib.Colorbar object 'cbar'.
"""
import matplotlib.pyplot as _plt
if ax is None:
fig, ax = _plt.subplots()
else:
fig = ax.get_figure()
mappable = ax.contourf(
x, y, z, ncontours, norm=norm,
vmin=vmin, vmax=vmax, cmap=cmap,
levels=levels, **_prune_kwargs(kwargs))
misc = dict(mappable=mappable)
if cbar_orientation not in ('horizontal', 'vertical'):
raise ValueError(
'cbar_orientation must be "horizontal" or "vertical"')
if cbar:
if cax is None:
cbar_ = fig.colorbar(
mappable, ax=ax, orientation=cbar_orientation)
else:
cbar_ = fig.colorbar(
mappable, cax=cax, orientation=cbar_orientation)
if cbar_label is not None:
cbar_.set_label(cbar_label)
misc.update(cbar=cbar_)
return fig, ax, misc | [
"def",
"plot_map",
"(",
"x",
",",
"y",
",",
"z",
",",
"ax",
"=",
"None",
",",
"cmap",
"=",
"None",
",",
"ncontours",
"=",
"100",
",",
"vmin",
"=",
"None",
",",
"vmax",
"=",
"None",
",",
"levels",
"=",
"None",
",",
"cbar",
"=",
"True",
",",
"c... | Plot a two-dimensional map from data on a grid.
Parameters
----------
x : ndarray(T)
Binned x-coordinates.
y : ndarray(T)
Binned y-coordinates.
z : ndarray(T)
Binned z-coordinates.
ax : matplotlib.Axes object, optional, default=None
The ax to plot to; if ax=None, a new ax (and fig) is created.
cmap : matplotlib colormap, optional, default=None
The color map to use.
ncontours : int, optional, default=100
Number of contour levels.
vmin : float, optional, default=None
Lowest z-value to be plotted.
vmax : float, optional, default=None
Highest z-value to be plotted.
levels : iterable of float, optional, default=None
Contour levels to plot.
cbar : boolean, optional, default=True
Plot a color bar.
cax : matplotlib.Axes object, optional, default=None
Plot the colorbar into a custom axes object instead of
stealing space from ax.
cbar_label : str, optional, default=None
Colorbar label string; use None to suppress it.
cbar_orientation : str, optional, default='vertical'
Colorbar orientation; choose 'vertical' or 'horizontal'.
norm : matplotlib norm, optional, default=None
Use a norm when coloring the contour plot.
Optional parameters for contourf (**kwargs)
-------------------------------------------
corner_mask : boolean, optional
Enable/disable corner masking, which only has an effect if
z is a masked array. If False, any quad touching a masked
point is masked out. If True, only the triangular corners
of quads nearest those points are always masked out, other
triangular corners comprising three unmasked points are
contoured as usual.
Defaults to rcParams['contour.corner_mask'], which
defaults to True.
alpha : float
The alpha blending value.
locator : [ None | ticker.Locator subclass ]
If locator is None, the default MaxNLocator is used. The
locator is used to determine the contour levels if they are
not given explicitly via the levels argument.
extend : [ ‘neither’ | ‘both’ | ‘min’ | ‘max’ ]
Unless this is ‘neither’, contour levels are automatically
added to one or both ends of the range so that all data are
included. These added ranges are then mapped to the special
colormap values which default to the ends of the
colormap range, but can be set via
matplotlib.colors.Colormap.set_under() and
matplotlib.colors.Colormap.set_over() methods.
xunits, yunits : [ None | registered units ]
Override axis units by specifying an instance of a
matplotlib.units.ConversionInterface.
antialiased : boolean, optional
Enable antialiasing, overriding the defaults. For filled
contours, the default is True. For line contours, it is
taken from rcParams[‘lines.antialiased’].
nchunk : [ 0 | integer ]
If 0, no subdivision of the domain. Specify a positive
integer to divide the domain into subdomains of nchunk by
nchunk quads. Chunking reduces the maximum length of polygons
generated by the contouring algorithm which reduces the
rendering workload passed on to the backend and also requires
slightly less RAM. It can however introduce rendering
artifacts at chunk boundaries depending on the backend, the
antialiased flag and value of alpha.
hatches :
A list of cross hatch patterns to use on the filled areas.
If None, no hatching will be added to the contour. Hatching
is supported in the PostScript, PDF, SVG and Agg backends
only.
zorder : float
Set the zorder for the artist. Artists with lower zorder
values are drawn first.
Returns
-------
fig : matplotlib.Figure object
The figure in which the used ax resides.
ax : matplotlib.Axes object
The ax in which the map was plotted.
misc : dict
Contains a matplotlib.contour.QuadContourSet 'mappable' and,
if requested, a matplotlib.Colorbar object 'cbar'. | [
"Plot",
"a",
"two",
"-",
"dimensional",
"map",
"from",
"data",
"on",
"a",
"grid",
"."
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/plots/plots2d.py#L253-L377 | train | 204,221 |
markovmodel/PyEMMA | pyemma/plots/plots2d.py | plot_density | def plot_density(
xall, yall, ax=None, cmap=None,
ncontours=100, vmin=None, vmax=None, levels=None,
cbar=True, cax=None, cbar_label='sample density',
cbar_orientation='vertical', logscale=False, nbins=100,
weights=None, avoid_zero_count=False, **kwargs):
"""Plot a two-dimensional density map using a histogram of
scattered data.
Parameters
----------
xall : ndarray(T)
Sample x-coordinates.
yall : ndarray(T)
Sample y-coordinates.
ax : matplotlib.Axes object, optional, default=None
The ax to plot to; if ax=None, a new ax (and fig) is created.
cmap : matplotlib colormap, optional, default=None
The color map to use.
ncontours : int, optional, default=100
Number of contour levels.
vmin : float, optional, default=None
Lowest z-value to be plotted.
vmax : float, optional, default=None
Highest z-value to be plotted.
levels : iterable of float, optional, default=None
Contour levels to plot.
cbar : boolean, optional, default=True
Plot a color bar.
cax : matplotlib.Axes object, optional, default=None
Plot the colorbar into a custom axes object instead of
stealing space from ax.
cbar_label : str, optional, default='sample density'
Colorbar label string; use None to suppress it.
cbar_orientation : str, optional, default='vertical'
Colorbar orientation; choose 'vertical' or 'horizontal'.
logscale : boolean, optional, default=False
Plot the z-values in logscale.
nbins : int, optional, default=100
Number of histogram bins used in each dimension.
weights : ndarray(T), optional, default=None
Sample weights; by default all samples have the same weight.
avoid_zero_count : bool, optional, default=True
Avoid zero counts by lifting all histogram elements to the
minimum value before computing the free energy. If False,
zero histogram counts would yield infinity in the free energy.
Optional parameters for contourf (**kwargs)
-------------------------------------------
corner_mask : boolean, optional
Enable/disable corner masking, which only has an effect if
z is a masked array. If False, any quad touching a masked
point is masked out. If True, only the triangular corners
of quads nearest those points are always masked out, other
triangular corners comprising three unmasked points are
contoured as usual.
Defaults to rcParams['contour.corner_mask'], which
defaults to True.
alpha : float
The alpha blending value.
locator : [ None | ticker.Locator subclass ]
If locator is None, the default MaxNLocator is used. The
locator is used to determine the contour levels if they are
not given explicitly via the levels argument.
extend : [ ‘neither’ | ‘both’ | ‘min’ | ‘max’ ]
Unless this is ‘neither’, contour levels are automatically
added to one or both ends of the range so that all data are
included. These added ranges are then mapped to the special
colormap values which default to the ends of the
colormap range, but can be set via
matplotlib.colors.Colormap.set_under() and
matplotlib.colors.Colormap.set_over() methods.
xunits, yunits : [ None | registered units ]
Override axis units by specifying an instance of a
matplotlib.units.ConversionInterface.
antialiased : boolean, optional
Enable antialiasing, overriding the defaults. For filled
contours, the default is True. For line contours, it is
taken from rcParams[‘lines.antialiased’].
nchunk : [ 0 | integer ]
If 0, no subdivision of the domain. Specify a positive
integer to divide the domain into subdomains of nchunk by
nchunk quads. Chunking reduces the maximum length of polygons
generated by the contouring algorithm which reduces the
rendering workload passed on to the backend and also requires
slightly less RAM. It can however introduce rendering
artifacts at chunk boundaries depending on the backend, the
antialiased flag and value of alpha.
hatches :
A list of cross hatch patterns to use on the filled areas.
If None, no hatching will be added to the contour. Hatching
is supported in the PostScript, PDF, SVG and Agg backends
only.
zorder : float
Set the zorder for the artist. Artists with lower zorder
values are drawn first.
Returns
-------
fig : matplotlib.Figure object
The figure in which the used ax resides.
ax : matplotlib.Axes object
The ax in which the map was plotted.
misc : dict
Contains a matplotlib.contour.QuadContourSet 'mappable' and,
if requested, a matplotlib.Colorbar object 'cbar'.
"""
x, y, z = get_histogram(
xall, yall, nbins=nbins, weights=weights,
avoid_zero_count=avoid_zero_count)
pi = _to_density(z)
pi = _np.ma.masked_where(pi <= 0, pi)
if logscale:
from matplotlib.colors import LogNorm
norm = LogNorm(vmin=vmin, vmax=vmax)
if levels is None:
levels = _np.logspace(
_np.floor(_np.log10(pi.min())),
_np.ceil(_np.log10(pi.max())),
ncontours + 1)
else:
norm = None
fig, ax, misc = plot_map(
x, y, pi, ax=ax, cmap=cmap,
ncontours=ncontours, vmin=vmin, vmax=vmax, levels=levels,
cbar=cbar, cax=cax, cbar_label=cbar_label,
cbar_orientation=cbar_orientation, norm=norm,
**kwargs)
if cbar and logscale:
from matplotlib.ticker import LogLocator
misc['cbar'].set_ticks(LogLocator(base=10.0, subs=range(10)))
return fig, ax, misc | python | def plot_density(
xall, yall, ax=None, cmap=None,
ncontours=100, vmin=None, vmax=None, levels=None,
cbar=True, cax=None, cbar_label='sample density',
cbar_orientation='vertical', logscale=False, nbins=100,
weights=None, avoid_zero_count=False, **kwargs):
"""Plot a two-dimensional density map using a histogram of
scattered data.
Parameters
----------
xall : ndarray(T)
Sample x-coordinates.
yall : ndarray(T)
Sample y-coordinates.
ax : matplotlib.Axes object, optional, default=None
The ax to plot to; if ax=None, a new ax (and fig) is created.
cmap : matplotlib colormap, optional, default=None
The color map to use.
ncontours : int, optional, default=100
Number of contour levels.
vmin : float, optional, default=None
Lowest z-value to be plotted.
vmax : float, optional, default=None
Highest z-value to be plotted.
levels : iterable of float, optional, default=None
Contour levels to plot.
cbar : boolean, optional, default=True
Plot a color bar.
cax : matplotlib.Axes object, optional, default=None
Plot the colorbar into a custom axes object instead of
stealing space from ax.
cbar_label : str, optional, default='sample density'
Colorbar label string; use None to suppress it.
cbar_orientation : str, optional, default='vertical'
Colorbar orientation; choose 'vertical' or 'horizontal'.
logscale : boolean, optional, default=False
Plot the z-values in logscale.
nbins : int, optional, default=100
Number of histogram bins used in each dimension.
weights : ndarray(T), optional, default=None
Sample weights; by default all samples have the same weight.
avoid_zero_count : bool, optional, default=True
Avoid zero counts by lifting all histogram elements to the
minimum value before computing the free energy. If False,
zero histogram counts would yield infinity in the free energy.
Optional parameters for contourf (**kwargs)
-------------------------------------------
corner_mask : boolean, optional
Enable/disable corner masking, which only has an effect if
z is a masked array. If False, any quad touching a masked
point is masked out. If True, only the triangular corners
of quads nearest those points are always masked out, other
triangular corners comprising three unmasked points are
contoured as usual.
Defaults to rcParams['contour.corner_mask'], which
defaults to True.
alpha : float
The alpha blending value.
locator : [ None | ticker.Locator subclass ]
If locator is None, the default MaxNLocator is used. The
locator is used to determine the contour levels if they are
not given explicitly via the levels argument.
extend : [ ‘neither’ | ‘both’ | ‘min’ | ‘max’ ]
Unless this is ‘neither’, contour levels are automatically
added to one or both ends of the range so that all data are
included. These added ranges are then mapped to the special
colormap values which default to the ends of the
colormap range, but can be set via
matplotlib.colors.Colormap.set_under() and
matplotlib.colors.Colormap.set_over() methods.
xunits, yunits : [ None | registered units ]
Override axis units by specifying an instance of a
matplotlib.units.ConversionInterface.
antialiased : boolean, optional
Enable antialiasing, overriding the defaults. For filled
contours, the default is True. For line contours, it is
taken from rcParams[‘lines.antialiased’].
nchunk : [ 0 | integer ]
If 0, no subdivision of the domain. Specify a positive
integer to divide the domain into subdomains of nchunk by
nchunk quads. Chunking reduces the maximum length of polygons
generated by the contouring algorithm which reduces the
rendering workload passed on to the backend and also requires
slightly less RAM. It can however introduce rendering
artifacts at chunk boundaries depending on the backend, the
antialiased flag and value of alpha.
hatches :
A list of cross hatch patterns to use on the filled areas.
If None, no hatching will be added to the contour. Hatching
is supported in the PostScript, PDF, SVG and Agg backends
only.
zorder : float
Set the zorder for the artist. Artists with lower zorder
values are drawn first.
Returns
-------
fig : matplotlib.Figure object
The figure in which the used ax resides.
ax : matplotlib.Axes object
The ax in which the map was plotted.
misc : dict
Contains a matplotlib.contour.QuadContourSet 'mappable' and,
if requested, a matplotlib.Colorbar object 'cbar'.
"""
x, y, z = get_histogram(
xall, yall, nbins=nbins, weights=weights,
avoid_zero_count=avoid_zero_count)
pi = _to_density(z)
pi = _np.ma.masked_where(pi <= 0, pi)
if logscale:
from matplotlib.colors import LogNorm
norm = LogNorm(vmin=vmin, vmax=vmax)
if levels is None:
levels = _np.logspace(
_np.floor(_np.log10(pi.min())),
_np.ceil(_np.log10(pi.max())),
ncontours + 1)
else:
norm = None
fig, ax, misc = plot_map(
x, y, pi, ax=ax, cmap=cmap,
ncontours=ncontours, vmin=vmin, vmax=vmax, levels=levels,
cbar=cbar, cax=cax, cbar_label=cbar_label,
cbar_orientation=cbar_orientation, norm=norm,
**kwargs)
if cbar and logscale:
from matplotlib.ticker import LogLocator
misc['cbar'].set_ticks(LogLocator(base=10.0, subs=range(10)))
return fig, ax, misc | [
"def",
"plot_density",
"(",
"xall",
",",
"yall",
",",
"ax",
"=",
"None",
",",
"cmap",
"=",
"None",
",",
"ncontours",
"=",
"100",
",",
"vmin",
"=",
"None",
",",
"vmax",
"=",
"None",
",",
"levels",
"=",
"None",
",",
"cbar",
"=",
"True",
",",
"cax",... | Plot a two-dimensional density map using a histogram of
scattered data.
Parameters
----------
xall : ndarray(T)
Sample x-coordinates.
yall : ndarray(T)
Sample y-coordinates.
ax : matplotlib.Axes object, optional, default=None
The ax to plot to; if ax=None, a new ax (and fig) is created.
cmap : matplotlib colormap, optional, default=None
The color map to use.
ncontours : int, optional, default=100
Number of contour levels.
vmin : float, optional, default=None
Lowest z-value to be plotted.
vmax : float, optional, default=None
Highest z-value to be plotted.
levels : iterable of float, optional, default=None
Contour levels to plot.
cbar : boolean, optional, default=True
Plot a color bar.
cax : matplotlib.Axes object, optional, default=None
Plot the colorbar into a custom axes object instead of
stealing space from ax.
cbar_label : str, optional, default='sample density'
Colorbar label string; use None to suppress it.
cbar_orientation : str, optional, default='vertical'
Colorbar orientation; choose 'vertical' or 'horizontal'.
logscale : boolean, optional, default=False
Plot the z-values in logscale.
nbins : int, optional, default=100
Number of histogram bins used in each dimension.
weights : ndarray(T), optional, default=None
Sample weights; by default all samples have the same weight.
avoid_zero_count : bool, optional, default=True
Avoid zero counts by lifting all histogram elements to the
minimum value before computing the free energy. If False,
zero histogram counts would yield infinity in the free energy.
Optional parameters for contourf (**kwargs)
-------------------------------------------
corner_mask : boolean, optional
Enable/disable corner masking, which only has an effect if
z is a masked array. If False, any quad touching a masked
point is masked out. If True, only the triangular corners
of quads nearest those points are always masked out, other
triangular corners comprising three unmasked points are
contoured as usual.
Defaults to rcParams['contour.corner_mask'], which
defaults to True.
alpha : float
The alpha blending value.
locator : [ None | ticker.Locator subclass ]
If locator is None, the default MaxNLocator is used. The
locator is used to determine the contour levels if they are
not given explicitly via the levels argument.
extend : [ ‘neither’ | ‘both’ | ‘min’ | ‘max’ ]
Unless this is ‘neither’, contour levels are automatically
added to one or both ends of the range so that all data are
included. These added ranges are then mapped to the special
colormap values which default to the ends of the
colormap range, but can be set via
matplotlib.colors.Colormap.set_under() and
matplotlib.colors.Colormap.set_over() methods.
xunits, yunits : [ None | registered units ]
Override axis units by specifying an instance of a
matplotlib.units.ConversionInterface.
antialiased : boolean, optional
Enable antialiasing, overriding the defaults. For filled
contours, the default is True. For line contours, it is
taken from rcParams[‘lines.antialiased’].
nchunk : [ 0 | integer ]
If 0, no subdivision of the domain. Specify a positive
integer to divide the domain into subdomains of nchunk by
nchunk quads. Chunking reduces the maximum length of polygons
generated by the contouring algorithm which reduces the
rendering workload passed on to the backend and also requires
slightly less RAM. It can however introduce rendering
artifacts at chunk boundaries depending on the backend, the
antialiased flag and value of alpha.
hatches :
A list of cross hatch patterns to use on the filled areas.
If None, no hatching will be added to the contour. Hatching
is supported in the PostScript, PDF, SVG and Agg backends
only.
zorder : float
Set the zorder for the artist. Artists with lower zorder
values are drawn first.
Returns
-------
fig : matplotlib.Figure object
The figure in which the used ax resides.
ax : matplotlib.Axes object
The ax in which the map was plotted.
misc : dict
Contains a matplotlib.contour.QuadContourSet 'mappable' and,
if requested, a matplotlib.Colorbar object 'cbar'. | [
"Plot",
"a",
"two",
"-",
"dimensional",
"density",
"map",
"using",
"a",
"histogram",
"of",
"scattered",
"data",
"."
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/plots/plots2d.py#L387-L519 | train | 204,222 |
markovmodel/PyEMMA | pyemma/plots/plots2d.py | plot_free_energy | def plot_free_energy(
xall, yall, weights=None, ax=None, nbins=100, ncontours=100,
offset=-1, avoid_zero_count=False, minener_zero=True, kT=1.0,
vmin=None, vmax=None, cmap='nipy_spectral', cbar=True,
cbar_label='free energy / kT', cax=None, levels=None,
legacy=True, ncountours=None, cbar_orientation='vertical',
**kwargs):
"""Plot a two-dimensional free energy map using a histogram of
scattered data.
Parameters
----------
xall : ndarray(T)
Sample x-coordinates.
yall : ndarray(T)
Sample y-coordinates.
weights : ndarray(T), optional, default=None
Sample weights; by default all samples have the same weight.
ax : matplotlib.Axes object, optional, default=None
The ax to plot to; if ax=None, a new ax (and fig) is created.
Number of contour levels.
nbins : int, optional, default=100
Number of histogram bins used in each dimension.
ncontours : int, optional, default=100
Number of contour levels.
offset : float, optional, default=-1
Deprecated and ineffective; raises a ValueError
outside legacy mode.
avoid_zero_count : bool, optional, default=False
Avoid zero counts by lifting all histogram elements to the
minimum value before computing the free energy. If False,
zero histogram counts would yield infinity in the free energy.
minener_zero : boolean, optional, default=True
Shifts the energy minimum to zero.
kT : float, optional, default=1.0
The value of kT in the desired energy unit. By default,
energies are computed in kT (setting 1.0). If you want to
measure the energy in kJ/mol at 298 K, use kT=2.479 and
change the cbar_label accordingly.
vmin : float, optional, default=None
Lowest free energy value to be plotted.
(default=0.0 in legacy mode)
vmax : float, optional, default=None
Highest free energy value to be plotted.
cmap : matplotlib colormap, optional, default='nipy_spectral'
The color map to use.
cbar : boolean, optional, default=True
Plot a color bar.
cbar_label : str, optional, default='free energy / kT'
Colorbar label string; use None to suppress it.
cax : matplotlib.Axes object, optional, default=None
Plot the colorbar into a custom axes object instead of
stealing space from ax.
levels : iterable of float, optional, default=None
Contour levels to plot.
legacy : boolean, optional, default=True
Switch to use the function in legacy mode (deprecated).
ncountours : int, optional, default=None
Legacy parameter (typo) for number of contour levels.
cbar_orientation : str, optional, default='vertical'
Colorbar orientation; choose 'vertical' or 'horizontal'.
Optional parameters for contourf (**kwargs)
-------------------------------------------
corner_mask : boolean, optional
Enable/disable corner masking, which only has an effect if
z is a masked array. If False, any quad touching a masked
point is masked out. If True, only the triangular corners
of quads nearest those points are always masked out, other
triangular corners comprising three unmasked points are
contoured as usual.
Defaults to rcParams['contour.corner_mask'], which
defaults to True.
alpha : float
The alpha blending value.
locator : [ None | ticker.Locator subclass ]
If locator is None, the default MaxNLocator is used. The
locator is used to determine the contour levels if they are
not given explicitly via the levels argument.
extend : [ ‘neither’ | ‘both’ | ‘min’ | ‘max’ ]
Unless this is ‘neither’, contour levels are automatically
added to one or both ends of the range so that all data are
included. These added ranges are then mapped to the special
colormap values which default to the ends of the
colormap range, but can be set via
matplotlib.colors.Colormap.set_under() and
matplotlib.colors.Colormap.set_over() methods.
xunits, yunits : [ None | registered units ]
Override axis units by specifying an instance of a
matplotlib.units.ConversionInterface.
antialiased : boolean, optional
Enable antialiasing, overriding the defaults. For filled
contours, the default is True. For line contours, it is
taken from rcParams[‘lines.antialiased’].
nchunk : [ 0 | integer ]
If 0, no subdivision of the domain. Specify a positive
integer to divide the domain into subdomains of nchunk by
nchunk quads. Chunking reduces the maximum length of polygons
generated by the contouring algorithm which reduces the
rendering workload passed on to the backend and also requires
slightly less RAM. It can however introduce rendering
artifacts at chunk boundaries depending on the backend, the
antialiased flag and value of alpha.
hatches :
A list of cross hatch patterns to use on the filled areas.
If None, no hatching will be added to the contour. Hatching
is supported in the PostScript, PDF, SVG and Agg backends
only.
zorder : float
Set the zorder for the artist. Artists with lower zorder
values are drawn first.
Returns
-------
fig : matplotlib.Figure object
The figure in which the used ax resides.
ax : matplotlib.Axes object
The ax in which the map was plotted.
misc : dict
Contains a matplotlib.contour.QuadContourSet 'mappable' and,
if requested, a matplotlib.Colorbar object 'cbar'.
"""
if legacy:
_warn(
'Legacy mode is deprecated is will be removed in the'
' next major release. Until then use legacy=False',
DeprecationWarning)
cmap = _get_cmap(cmap)
if offset != -1:
_warn(
'Parameter offset is deprecated and will be ignored',
DeprecationWarning)
if ncountours is not None:
_warn(
'Parameter ncountours is deprecated;'
' use ncontours instead',
DeprecationWarning)
ncontours = ncountours
if vmin is None:
vmin = 0.0
else:
if offset != -1:
raise ValueError(
'Parameter offset is not allowed outside legacy mode')
if ncountours is not None:
raise ValueError(
'Parameter ncountours is not allowed outside'
' legacy mode; use ncontours instead')
x, y, z = get_histogram(
xall, yall, nbins=nbins, weights=weights,
avoid_zero_count=avoid_zero_count)
f = _to_free_energy(z, minener_zero=minener_zero) * kT
fig, ax, misc = plot_map(
x, y, f, ax=ax, cmap=cmap,
ncontours=ncontours, vmin=vmin, vmax=vmax, levels=levels,
cbar=cbar, cax=cax, cbar_label=cbar_label,
cbar_orientation=cbar_orientation, norm=None,
**kwargs)
if legacy:
return fig, ax
return fig, ax, misc | python | def plot_free_energy(
xall, yall, weights=None, ax=None, nbins=100, ncontours=100,
offset=-1, avoid_zero_count=False, minener_zero=True, kT=1.0,
vmin=None, vmax=None, cmap='nipy_spectral', cbar=True,
cbar_label='free energy / kT', cax=None, levels=None,
legacy=True, ncountours=None, cbar_orientation='vertical',
**kwargs):
"""Plot a two-dimensional free energy map using a histogram of
scattered data.
Parameters
----------
xall : ndarray(T)
Sample x-coordinates.
yall : ndarray(T)
Sample y-coordinates.
weights : ndarray(T), optional, default=None
Sample weights; by default all samples have the same weight.
ax : matplotlib.Axes object, optional, default=None
The ax to plot to; if ax=None, a new ax (and fig) is created.
Number of contour levels.
nbins : int, optional, default=100
Number of histogram bins used in each dimension.
ncontours : int, optional, default=100
Number of contour levels.
offset : float, optional, default=-1
Deprecated and ineffective; raises a ValueError
outside legacy mode.
avoid_zero_count : bool, optional, default=False
Avoid zero counts by lifting all histogram elements to the
minimum value before computing the free energy. If False,
zero histogram counts would yield infinity in the free energy.
minener_zero : boolean, optional, default=True
Shifts the energy minimum to zero.
kT : float, optional, default=1.0
The value of kT in the desired energy unit. By default,
energies are computed in kT (setting 1.0). If you want to
measure the energy in kJ/mol at 298 K, use kT=2.479 and
change the cbar_label accordingly.
vmin : float, optional, default=None
Lowest free energy value to be plotted.
(default=0.0 in legacy mode)
vmax : float, optional, default=None
Highest free energy value to be plotted.
cmap : matplotlib colormap, optional, default='nipy_spectral'
The color map to use.
cbar : boolean, optional, default=True
Plot a color bar.
cbar_label : str, optional, default='free energy / kT'
Colorbar label string; use None to suppress it.
cax : matplotlib.Axes object, optional, default=None
Plot the colorbar into a custom axes object instead of
stealing space from ax.
levels : iterable of float, optional, default=None
Contour levels to plot.
legacy : boolean, optional, default=True
Switch to use the function in legacy mode (deprecated).
ncountours : int, optional, default=None
Legacy parameter (typo) for number of contour levels.
cbar_orientation : str, optional, default='vertical'
Colorbar orientation; choose 'vertical' or 'horizontal'.
Optional parameters for contourf (**kwargs)
-------------------------------------------
corner_mask : boolean, optional
Enable/disable corner masking, which only has an effect if
z is a masked array. If False, any quad touching a masked
point is masked out. If True, only the triangular corners
of quads nearest those points are always masked out, other
triangular corners comprising three unmasked points are
contoured as usual.
Defaults to rcParams['contour.corner_mask'], which
defaults to True.
alpha : float
The alpha blending value.
locator : [ None | ticker.Locator subclass ]
If locator is None, the default MaxNLocator is used. The
locator is used to determine the contour levels if they are
not given explicitly via the levels argument.
extend : [ ‘neither’ | ‘both’ | ‘min’ | ‘max’ ]
Unless this is ‘neither’, contour levels are automatically
added to one or both ends of the range so that all data are
included. These added ranges are then mapped to the special
colormap values which default to the ends of the
colormap range, but can be set via
matplotlib.colors.Colormap.set_under() and
matplotlib.colors.Colormap.set_over() methods.
xunits, yunits : [ None | registered units ]
Override axis units by specifying an instance of a
matplotlib.units.ConversionInterface.
antialiased : boolean, optional
Enable antialiasing, overriding the defaults. For filled
contours, the default is True. For line contours, it is
taken from rcParams[‘lines.antialiased’].
nchunk : [ 0 | integer ]
If 0, no subdivision of the domain. Specify a positive
integer to divide the domain into subdomains of nchunk by
nchunk quads. Chunking reduces the maximum length of polygons
generated by the contouring algorithm which reduces the
rendering workload passed on to the backend and also requires
slightly less RAM. It can however introduce rendering
artifacts at chunk boundaries depending on the backend, the
antialiased flag and value of alpha.
hatches :
A list of cross hatch patterns to use on the filled areas.
If None, no hatching will be added to the contour. Hatching
is supported in the PostScript, PDF, SVG and Agg backends
only.
zorder : float
Set the zorder for the artist. Artists with lower zorder
values are drawn first.
Returns
-------
fig : matplotlib.Figure object
The figure in which the used ax resides.
ax : matplotlib.Axes object
The ax in which the map was plotted.
misc : dict
Contains a matplotlib.contour.QuadContourSet 'mappable' and,
if requested, a matplotlib.Colorbar object 'cbar'.
"""
if legacy:
_warn(
'Legacy mode is deprecated is will be removed in the'
' next major release. Until then use legacy=False',
DeprecationWarning)
cmap = _get_cmap(cmap)
if offset != -1:
_warn(
'Parameter offset is deprecated and will be ignored',
DeprecationWarning)
if ncountours is not None:
_warn(
'Parameter ncountours is deprecated;'
' use ncontours instead',
DeprecationWarning)
ncontours = ncountours
if vmin is None:
vmin = 0.0
else:
if offset != -1:
raise ValueError(
'Parameter offset is not allowed outside legacy mode')
if ncountours is not None:
raise ValueError(
'Parameter ncountours is not allowed outside'
' legacy mode; use ncontours instead')
x, y, z = get_histogram(
xall, yall, nbins=nbins, weights=weights,
avoid_zero_count=avoid_zero_count)
f = _to_free_energy(z, minener_zero=minener_zero) * kT
fig, ax, misc = plot_map(
x, y, f, ax=ax, cmap=cmap,
ncontours=ncontours, vmin=vmin, vmax=vmax, levels=levels,
cbar=cbar, cax=cax, cbar_label=cbar_label,
cbar_orientation=cbar_orientation, norm=None,
**kwargs)
if legacy:
return fig, ax
return fig, ax, misc | [
"def",
"plot_free_energy",
"(",
"xall",
",",
"yall",
",",
"weights",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"nbins",
"=",
"100",
",",
"ncontours",
"=",
"100",
",",
"offset",
"=",
"-",
"1",
",",
"avoid_zero_count",
"=",
"False",
",",
"minener_zero",
... | Plot a two-dimensional free energy map using a histogram of
scattered data.
Parameters
----------
xall : ndarray(T)
Sample x-coordinates.
yall : ndarray(T)
Sample y-coordinates.
weights : ndarray(T), optional, default=None
Sample weights; by default all samples have the same weight.
ax : matplotlib.Axes object, optional, default=None
The ax to plot to; if ax=None, a new ax (and fig) is created.
Number of contour levels.
nbins : int, optional, default=100
Number of histogram bins used in each dimension.
ncontours : int, optional, default=100
Number of contour levels.
offset : float, optional, default=-1
Deprecated and ineffective; raises a ValueError
outside legacy mode.
avoid_zero_count : bool, optional, default=False
Avoid zero counts by lifting all histogram elements to the
minimum value before computing the free energy. If False,
zero histogram counts would yield infinity in the free energy.
minener_zero : boolean, optional, default=True
Shifts the energy minimum to zero.
kT : float, optional, default=1.0
The value of kT in the desired energy unit. By default,
energies are computed in kT (setting 1.0). If you want to
measure the energy in kJ/mol at 298 K, use kT=2.479 and
change the cbar_label accordingly.
vmin : float, optional, default=None
Lowest free energy value to be plotted.
(default=0.0 in legacy mode)
vmax : float, optional, default=None
Highest free energy value to be plotted.
cmap : matplotlib colormap, optional, default='nipy_spectral'
The color map to use.
cbar : boolean, optional, default=True
Plot a color bar.
cbar_label : str, optional, default='free energy / kT'
Colorbar label string; use None to suppress it.
cax : matplotlib.Axes object, optional, default=None
Plot the colorbar into a custom axes object instead of
stealing space from ax.
levels : iterable of float, optional, default=None
Contour levels to plot.
legacy : boolean, optional, default=True
Switch to use the function in legacy mode (deprecated).
ncountours : int, optional, default=None
Legacy parameter (typo) for number of contour levels.
cbar_orientation : str, optional, default='vertical'
Colorbar orientation; choose 'vertical' or 'horizontal'.
Optional parameters for contourf (**kwargs)
-------------------------------------------
corner_mask : boolean, optional
Enable/disable corner masking, which only has an effect if
z is a masked array. If False, any quad touching a masked
point is masked out. If True, only the triangular corners
of quads nearest those points are always masked out, other
triangular corners comprising three unmasked points are
contoured as usual.
Defaults to rcParams['contour.corner_mask'], which
defaults to True.
alpha : float
The alpha blending value.
locator : [ None | ticker.Locator subclass ]
If locator is None, the default MaxNLocator is used. The
locator is used to determine the contour levels if they are
not given explicitly via the levels argument.
extend : [ ‘neither’ | ‘both’ | ‘min’ | ‘max’ ]
Unless this is ‘neither’, contour levels are automatically
added to one or both ends of the range so that all data are
included. These added ranges are then mapped to the special
colormap values which default to the ends of the
colormap range, but can be set via
matplotlib.colors.Colormap.set_under() and
matplotlib.colors.Colormap.set_over() methods.
xunits, yunits : [ None | registered units ]
Override axis units by specifying an instance of a
matplotlib.units.ConversionInterface.
antialiased : boolean, optional
Enable antialiasing, overriding the defaults. For filled
contours, the default is True. For line contours, it is
taken from rcParams[‘lines.antialiased’].
nchunk : [ 0 | integer ]
If 0, no subdivision of the domain. Specify a positive
integer to divide the domain into subdomains of nchunk by
nchunk quads. Chunking reduces the maximum length of polygons
generated by the contouring algorithm which reduces the
rendering workload passed on to the backend and also requires
slightly less RAM. It can however introduce rendering
artifacts at chunk boundaries depending on the backend, the
antialiased flag and value of alpha.
hatches :
A list of cross hatch patterns to use on the filled areas.
If None, no hatching will be added to the contour. Hatching
is supported in the PostScript, PDF, SVG and Agg backends
only.
zorder : float
Set the zorder for the artist. Artists with lower zorder
values are drawn first.
Returns
-------
fig : matplotlib.Figure object
The figure in which the used ax resides.
ax : matplotlib.Axes object
The ax in which the map was plotted.
misc : dict
Contains a matplotlib.contour.QuadContourSet 'mappable' and,
if requested, a matplotlib.Colorbar object 'cbar'. | [
"Plot",
"a",
"two",
"-",
"dimensional",
"free",
"energy",
"map",
"using",
"a",
"histogram",
"of",
"scattered",
"data",
"."
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/plots/plots2d.py#L522-L683 | train | 204,223 |
markovmodel/PyEMMA | pyemma/plots/plots2d.py | plot_contour | def plot_contour(
xall, yall, zall, ax=None, cmap=None,
ncontours=100, vmin=None, vmax=None, levels=None,
cbar=True, cax=None, cbar_label=None,
cbar_orientation='vertical', norm=None, nbins=100,
method='nearest', mask=False, **kwargs):
"""Plot a two-dimensional contour map by interpolating
scattered data on a grid.
Parameters
----------
xall : ndarray(T)
Sample x-coordinates.
yall : ndarray(T)
Sample y-coordinates.
zall : ndarray(T)
Sample z-coordinates.
ax : matplotlib.Axes object, optional, default=None
The ax to plot to; if ax=None, a new ax (and fig) is created.
cmap : matplotlib colormap, optional, default=None
The color map to use.
ncontours : int, optional, default=100
Number of contour levels.
vmin : float, optional, default=None
Lowest z-value to be plotted.
vmax : float, optional, default=None
Highest z-value to be plotted.
levels : iterable of float, optional, default=None
Contour levels to plot; use legacy style calculation
if 'legacy'.
cbar : boolean, optional, default=True
Plot a color bar.
cax : matplotlib.Axes object, optional, default=None
Plot the colorbar into a custom axes object instead of
stealing space from ax.
cbar_label : str, optional, default=None
Colorbar label string; use None to suppress it.
cbar_orientation : str, optional, default='vertical'
Colorbar orientation; choose 'vertical' or 'horizontal'.
norm : matplotlib norm, optional, default=None
Use a norm when coloring the contour plot.
nbins : int, optional, default=100
Number of grid points used in each dimension.
method : str, optional, default='nearest'
Assignment method; scipy.interpolate.griddata supports the
methods 'nearest', 'linear', and 'cubic'.
mask : boolean, optional, default=False
Hide unsampled areas is True.
Optional parameters for contourf (**kwargs)
-------------------------------------------
corner_mask : boolean, optional
Enable/disable corner masking, which only has an effect if
z is a masked array. If False, any quad touching a masked
point is masked out. If True, only the triangular corners
of quads nearest those points are always masked out, other
triangular corners comprising three unmasked points are
contoured as usual.
Defaults to rcParams['contour.corner_mask'], which
defaults to True.
alpha : float
The alpha blending value.
locator : [ None | ticker.Locator subclass ]
If locator is None, the default MaxNLocator is used. The
locator is used to determine the contour levels if they are
not given explicitly via the levels argument.
extend : [ ‘neither’ | ‘both’ | ‘min’ | ‘max’ ]
Unless this is ‘neither’, contour levels are automatically
added to one or both ends of the range so that all data are
included. These added ranges are then mapped to the special
colormap values which default to the ends of the
colormap range, but can be set via
matplotlib.colors.Colormap.set_under() and
matplotlib.colors.Colormap.set_over() methods.
xunits, yunits : [ None | registered units ]
Override axis units by specifying an instance of a
matplotlib.units.ConversionInterface.
antialiased : boolean, optional
Enable antialiasing, overriding the defaults. For filled
contours, the default is True. For line contours, it is
taken from rcParams[‘lines.antialiased’].
nchunk : [ 0 | integer ]
If 0, no subdivision of the domain. Specify a positive
integer to divide the domain into subdomains of nchunk by
nchunk quads. Chunking reduces the maximum length of polygons
generated by the contouring algorithm which reduces the
rendering workload passed on to the backend and also requires
slightly less RAM. It can however introduce rendering
artifacts at chunk boundaries depending on the backend, the
antialiased flag and value of alpha.
hatches :
A list of cross hatch patterns to use on the filled areas.
If None, no hatching will be added to the contour. Hatching
is supported in the PostScript, PDF, SVG and Agg backends
only.
zorder : float
Set the zorder for the artist. Artists with lower zorder
values are drawn first.
Returns
-------
fig : matplotlib.Figure object
The figure in which the used ax resides.
ax : matplotlib.Axes object
The ax in which the map was plotted.
misc : dict
Contains a matplotlib.contour.QuadContourSet 'mappable' and,
if requested, a matplotlib.Colorbar object 'cbar'.
"""
x, y, z = get_grid_data(
xall, yall, zall, nbins=nbins, method=method)
if vmin is None:
vmin = _np.min(zall[zall > -_np.inf])
if vmax is None:
vmax = _np.max(zall[zall < _np.inf])
if levels == 'legacy':
eps = (vmax - vmin) / float(ncontours)
levels = _np.linspace(vmin - eps, vmax + eps)
if mask:
_, _, counts = get_histogram(
xall, yall, nbins=nbins, weights=None,
avoid_zero_count=None)
z = _np.ma.masked_where(counts.T <= 0, z)
return plot_map(
x, y, z, ax=ax, cmap=cmap,
ncontours=ncontours, vmin=vmin, vmax=vmax, levels=levels,
cbar=cbar, cax=cax, cbar_label=cbar_label,
cbar_orientation=cbar_orientation, norm=norm,
**kwargs) | python | def plot_contour(
xall, yall, zall, ax=None, cmap=None,
ncontours=100, vmin=None, vmax=None, levels=None,
cbar=True, cax=None, cbar_label=None,
cbar_orientation='vertical', norm=None, nbins=100,
method='nearest', mask=False, **kwargs):
"""Plot a two-dimensional contour map by interpolating
scattered data on a grid.
Parameters
----------
xall : ndarray(T)
Sample x-coordinates.
yall : ndarray(T)
Sample y-coordinates.
zall : ndarray(T)
Sample z-coordinates.
ax : matplotlib.Axes object, optional, default=None
The ax to plot to; if ax=None, a new ax (and fig) is created.
cmap : matplotlib colormap, optional, default=None
The color map to use.
ncontours : int, optional, default=100
Number of contour levels.
vmin : float, optional, default=None
Lowest z-value to be plotted.
vmax : float, optional, default=None
Highest z-value to be plotted.
levels : iterable of float, optional, default=None
Contour levels to plot; use legacy style calculation
if 'legacy'.
cbar : boolean, optional, default=True
Plot a color bar.
cax : matplotlib.Axes object, optional, default=None
Plot the colorbar into a custom axes object instead of
stealing space from ax.
cbar_label : str, optional, default=None
Colorbar label string; use None to suppress it.
cbar_orientation : str, optional, default='vertical'
Colorbar orientation; choose 'vertical' or 'horizontal'.
norm : matplotlib norm, optional, default=None
Use a norm when coloring the contour plot.
nbins : int, optional, default=100
Number of grid points used in each dimension.
method : str, optional, default='nearest'
Assignment method; scipy.interpolate.griddata supports the
methods 'nearest', 'linear', and 'cubic'.
mask : boolean, optional, default=False
Hide unsampled areas is True.
Optional parameters for contourf (**kwargs)
-------------------------------------------
corner_mask : boolean, optional
Enable/disable corner masking, which only has an effect if
z is a masked array. If False, any quad touching a masked
point is masked out. If True, only the triangular corners
of quads nearest those points are always masked out, other
triangular corners comprising three unmasked points are
contoured as usual.
Defaults to rcParams['contour.corner_mask'], which
defaults to True.
alpha : float
The alpha blending value.
locator : [ None | ticker.Locator subclass ]
If locator is None, the default MaxNLocator is used. The
locator is used to determine the contour levels if they are
not given explicitly via the levels argument.
extend : [ ‘neither’ | ‘both’ | ‘min’ | ‘max’ ]
Unless this is ‘neither’, contour levels are automatically
added to one or both ends of the range so that all data are
included. These added ranges are then mapped to the special
colormap values which default to the ends of the
colormap range, but can be set via
matplotlib.colors.Colormap.set_under() and
matplotlib.colors.Colormap.set_over() methods.
xunits, yunits : [ None | registered units ]
Override axis units by specifying an instance of a
matplotlib.units.ConversionInterface.
antialiased : boolean, optional
Enable antialiasing, overriding the defaults. For filled
contours, the default is True. For line contours, it is
taken from rcParams[‘lines.antialiased’].
nchunk : [ 0 | integer ]
If 0, no subdivision of the domain. Specify a positive
integer to divide the domain into subdomains of nchunk by
nchunk quads. Chunking reduces the maximum length of polygons
generated by the contouring algorithm which reduces the
rendering workload passed on to the backend and also requires
slightly less RAM. It can however introduce rendering
artifacts at chunk boundaries depending on the backend, the
antialiased flag and value of alpha.
hatches :
A list of cross hatch patterns to use on the filled areas.
If None, no hatching will be added to the contour. Hatching
is supported in the PostScript, PDF, SVG and Agg backends
only.
zorder : float
Set the zorder for the artist. Artists with lower zorder
values are drawn first.
Returns
-------
fig : matplotlib.Figure object
The figure in which the used ax resides.
ax : matplotlib.Axes object
The ax in which the map was plotted.
misc : dict
Contains a matplotlib.contour.QuadContourSet 'mappable' and,
if requested, a matplotlib.Colorbar object 'cbar'.
"""
x, y, z = get_grid_data(
xall, yall, zall, nbins=nbins, method=method)
if vmin is None:
vmin = _np.min(zall[zall > -_np.inf])
if vmax is None:
vmax = _np.max(zall[zall < _np.inf])
if levels == 'legacy':
eps = (vmax - vmin) / float(ncontours)
levels = _np.linspace(vmin - eps, vmax + eps)
if mask:
_, _, counts = get_histogram(
xall, yall, nbins=nbins, weights=None,
avoid_zero_count=None)
z = _np.ma.masked_where(counts.T <= 0, z)
return plot_map(
x, y, z, ax=ax, cmap=cmap,
ncontours=ncontours, vmin=vmin, vmax=vmax, levels=levels,
cbar=cbar, cax=cax, cbar_label=cbar_label,
cbar_orientation=cbar_orientation, norm=norm,
**kwargs) | [
"def",
"plot_contour",
"(",
"xall",
",",
"yall",
",",
"zall",
",",
"ax",
"=",
"None",
",",
"cmap",
"=",
"None",
",",
"ncontours",
"=",
"100",
",",
"vmin",
"=",
"None",
",",
"vmax",
"=",
"None",
",",
"levels",
"=",
"None",
",",
"cbar",
"=",
"True"... | Plot a two-dimensional contour map by interpolating
scattered data on a grid.
Parameters
----------
xall : ndarray(T)
Sample x-coordinates.
yall : ndarray(T)
Sample y-coordinates.
zall : ndarray(T)
Sample z-coordinates.
ax : matplotlib.Axes object, optional, default=None
The ax to plot to; if ax=None, a new ax (and fig) is created.
cmap : matplotlib colormap, optional, default=None
The color map to use.
ncontours : int, optional, default=100
Number of contour levels.
vmin : float, optional, default=None
Lowest z-value to be plotted.
vmax : float, optional, default=None
Highest z-value to be plotted.
levels : iterable of float, optional, default=None
Contour levels to plot; use legacy style calculation
if 'legacy'.
cbar : boolean, optional, default=True
Plot a color bar.
cax : matplotlib.Axes object, optional, default=None
Plot the colorbar into a custom axes object instead of
stealing space from ax.
cbar_label : str, optional, default=None
Colorbar label string; use None to suppress it.
cbar_orientation : str, optional, default='vertical'
Colorbar orientation; choose 'vertical' or 'horizontal'.
norm : matplotlib norm, optional, default=None
Use a norm when coloring the contour plot.
nbins : int, optional, default=100
Number of grid points used in each dimension.
method : str, optional, default='nearest'
Assignment method; scipy.interpolate.griddata supports the
methods 'nearest', 'linear', and 'cubic'.
mask : boolean, optional, default=False
Hide unsampled areas is True.
Optional parameters for contourf (**kwargs)
-------------------------------------------
corner_mask : boolean, optional
Enable/disable corner masking, which only has an effect if
z is a masked array. If False, any quad touching a masked
point is masked out. If True, only the triangular corners
of quads nearest those points are always masked out, other
triangular corners comprising three unmasked points are
contoured as usual.
Defaults to rcParams['contour.corner_mask'], which
defaults to True.
alpha : float
The alpha blending value.
locator : [ None | ticker.Locator subclass ]
If locator is None, the default MaxNLocator is used. The
locator is used to determine the contour levels if they are
not given explicitly via the levels argument.
extend : [ ‘neither’ | ‘both’ | ‘min’ | ‘max’ ]
Unless this is ‘neither’, contour levels are automatically
added to one or both ends of the range so that all data are
included. These added ranges are then mapped to the special
colormap values which default to the ends of the
colormap range, but can be set via
matplotlib.colors.Colormap.set_under() and
matplotlib.colors.Colormap.set_over() methods.
xunits, yunits : [ None | registered units ]
Override axis units by specifying an instance of a
matplotlib.units.ConversionInterface.
antialiased : boolean, optional
Enable antialiasing, overriding the defaults. For filled
contours, the default is True. For line contours, it is
taken from rcParams[‘lines.antialiased’].
nchunk : [ 0 | integer ]
If 0, no subdivision of the domain. Specify a positive
integer to divide the domain into subdomains of nchunk by
nchunk quads. Chunking reduces the maximum length of polygons
generated by the contouring algorithm which reduces the
rendering workload passed on to the backend and also requires
slightly less RAM. It can however introduce rendering
artifacts at chunk boundaries depending on the backend, the
antialiased flag and value of alpha.
hatches :
A list of cross hatch patterns to use on the filled areas.
If None, no hatching will be added to the contour. Hatching
is supported in the PostScript, PDF, SVG and Agg backends
only.
zorder : float
Set the zorder for the artist. Artists with lower zorder
values are drawn first.
Returns
-------
fig : matplotlib.Figure object
The figure in which the used ax resides.
ax : matplotlib.Axes object
The ax in which the map was plotted.
misc : dict
Contains a matplotlib.contour.QuadContourSet 'mappable' and,
if requested, a matplotlib.Colorbar object 'cbar'. | [
"Plot",
"a",
"two",
"-",
"dimensional",
"contour",
"map",
"by",
"interpolating",
"scattered",
"data",
"on",
"a",
"grid",
"."
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/plots/plots2d.py#L686-L815 | train | 204,224 |
markovmodel/PyEMMA | pyemma/plots/plots2d.py | plot_state_map | def plot_state_map(
xall, yall, states, ax=None, ncontours=100, cmap=None,
cbar=True, cax=None, cbar_label='state',
cbar_orientation='vertical', nbins=100, mask=True,
**kwargs):
"""Plot a two-dimensional contour map of states by interpolating
labels of scattered data on a grid.
Parameters
----------
xall : ndarray(T)
Sample x-coordinates.
yall : ndarray(T)
Sample y-coordinates.
states : ndarray(T)
Sample state labels.
ax : matplotlib.Axes object, optional, default=None
The ax to plot to; if ax=None, a new ax (and fig) is created.
cmap : matplotlib colormap, optional, default=None
The color map to use.
ncontours : int, optional, default=100
Number of contour levels.
cbar : boolean, optional, default=True
Plot a color bar.
cax : matplotlib.Axes object, optional, default=None
Plot the colorbar into a custom axes object instead of
stealing space from ax.
cbar_label : str, optional, default='state'
Colorbar label string; use None to suppress it.
cbar_orientation : str, optional, default='vertical'
Colorbar orientation; choose 'vertical' or 'horizontal'.
nbins : int, optional, default=100
Number of grid points used in each dimension.
mask : boolean, optional, default=False
Hide unsampled areas is True.
Optional parameters for contourf (**kwargs)
-------------------------------------------
corner_mask : boolean, optional
Enable/disable corner masking, which only has an effect if
z is a masked array. If False, any quad touching a masked
point is masked out. If True, only the triangular corners
of quads nearest those points are always masked out, other
triangular corners comprising three unmasked points are
contoured as usual.
Defaults to rcParams['contour.corner_mask'], which
defaults to True.
alpha : float
The alpha blending value.
locator : [ None | ticker.Locator subclass ]
If locator is None, the default MaxNLocator is used. The
locator is used to determine the contour levels if they are
not given explicitly via the levels argument.
extend : [ ‘neither’ | ‘both’ | ‘min’ | ‘max’ ]
Unless this is ‘neither’, contour levels are automatically
added to one or both ends of the range so that all data are
included. These added ranges are then mapped to the special
colormap values which default to the ends of the
colormap range, but can be set via
matplotlib.colors.Colormap.set_under() and
matplotlib.colors.Colormap.set_over() methods.
xunits, yunits : [ None | registered units ]
Override axis units by specifying an instance of a
matplotlib.units.ConversionInterface.
antialiased : boolean, optional
Enable antialiasing, overriding the defaults. For filled
contours, the default is True. For line contours, it is
taken from rcParams[‘lines.antialiased’].
nchunk : [ 0 | integer ]
If 0, no subdivision of the domain. Specify a positive
integer to divide the domain into subdomains of nchunk by
nchunk quads. Chunking reduces the maximum length of polygons
generated by the contouring algorithm which reduces the
rendering workload passed on to the backend and also requires
slightly less RAM. It can however introduce rendering
artifacts at chunk boundaries depending on the backend, the
antialiased flag and value of alpha.
hatches :
A list of cross hatch patterns to use on the filled areas.
If None, no hatching will be added to the contour. Hatching
is supported in the PostScript, PDF, SVG and Agg backends
only.
zorder : float
Set the zorder for the artist. Artists with lower zorder
values are drawn first.
Returns
-------
fig : matplotlib.Figure object
The figure in which the used ax resides.
ax : matplotlib.Axes object
The ax in which the map was plotted.
misc : dict
Contains a matplotlib.contour.QuadContourSet 'mappable' and,
if requested, a matplotlib.Colorbar object 'cbar'.
Notes
-----
Please note that this plot is an approximative visualization:
the underlying matplotlib.countourf function smoothes transitions
between different values and, thus, coloring at state boundaries
might be imprecise.
"""
from matplotlib.cm import get_cmap
nstates = int(_np.max(states) + 1)
cmap_ = get_cmap(cmap, nstates)
fig, ax, misc = plot_contour(
xall, yall, states, ax=ax, cmap=cmap_,
ncontours=ncontours, vmin=None, vmax=None, levels=None,
cbar=cbar, cax=cax, cbar_label=cbar_label,
cbar_orientation=cbar_orientation, norm=None, nbins=nbins,
method='nearest', mask=mask, **kwargs)
if cbar:
cmin, cmax = misc['mappable'].get_clim()
f = (cmax - cmin) / float(nstates)
n = _np.arange(nstates)
misc['cbar'].set_ticks((n + 0.5) * f)
misc['cbar'].set_ticklabels(n)
return fig, ax, misc | python | def plot_state_map(
xall, yall, states, ax=None, ncontours=100, cmap=None,
cbar=True, cax=None, cbar_label='state',
cbar_orientation='vertical', nbins=100, mask=True,
**kwargs):
"""Plot a two-dimensional contour map of states by interpolating
labels of scattered data on a grid.
Parameters
----------
xall : ndarray(T)
Sample x-coordinates.
yall : ndarray(T)
Sample y-coordinates.
states : ndarray(T)
Sample state labels.
ax : matplotlib.Axes object, optional, default=None
The ax to plot to; if ax=None, a new ax (and fig) is created.
cmap : matplotlib colormap, optional, default=None
The color map to use.
ncontours : int, optional, default=100
Number of contour levels.
cbar : boolean, optional, default=True
Plot a color bar.
cax : matplotlib.Axes object, optional, default=None
Plot the colorbar into a custom axes object instead of
stealing space from ax.
cbar_label : str, optional, default='state'
Colorbar label string; use None to suppress it.
cbar_orientation : str, optional, default='vertical'
Colorbar orientation; choose 'vertical' or 'horizontal'.
nbins : int, optional, default=100
Number of grid points used in each dimension.
mask : boolean, optional, default=False
Hide unsampled areas is True.
Optional parameters for contourf (**kwargs)
-------------------------------------------
corner_mask : boolean, optional
Enable/disable corner masking, which only has an effect if
z is a masked array. If False, any quad touching a masked
point is masked out. If True, only the triangular corners
of quads nearest those points are always masked out, other
triangular corners comprising three unmasked points are
contoured as usual.
Defaults to rcParams['contour.corner_mask'], which
defaults to True.
alpha : float
The alpha blending value.
locator : [ None | ticker.Locator subclass ]
If locator is None, the default MaxNLocator is used. The
locator is used to determine the contour levels if they are
not given explicitly via the levels argument.
extend : [ ‘neither’ | ‘both’ | ‘min’ | ‘max’ ]
Unless this is ‘neither’, contour levels are automatically
added to one or both ends of the range so that all data are
included. These added ranges are then mapped to the special
colormap values which default to the ends of the
colormap range, but can be set via
matplotlib.colors.Colormap.set_under() and
matplotlib.colors.Colormap.set_over() methods.
xunits, yunits : [ None | registered units ]
Override axis units by specifying an instance of a
matplotlib.units.ConversionInterface.
antialiased : boolean, optional
Enable antialiasing, overriding the defaults. For filled
contours, the default is True. For line contours, it is
taken from rcParams[‘lines.antialiased’].
nchunk : [ 0 | integer ]
If 0, no subdivision of the domain. Specify a positive
integer to divide the domain into subdomains of nchunk by
nchunk quads. Chunking reduces the maximum length of polygons
generated by the contouring algorithm which reduces the
rendering workload passed on to the backend and also requires
slightly less RAM. It can however introduce rendering
artifacts at chunk boundaries depending on the backend, the
antialiased flag and value of alpha.
hatches :
A list of cross hatch patterns to use on the filled areas.
If None, no hatching will be added to the contour. Hatching
is supported in the PostScript, PDF, SVG and Agg backends
only.
zorder : float
Set the zorder for the artist. Artists with lower zorder
values are drawn first.
Returns
-------
fig : matplotlib.Figure object
The figure in which the used ax resides.
ax : matplotlib.Axes object
The ax in which the map was plotted.
misc : dict
Contains a matplotlib.contour.QuadContourSet 'mappable' and,
if requested, a matplotlib.Colorbar object 'cbar'.
Notes
-----
Please note that this plot is an approximative visualization:
the underlying matplotlib.countourf function smoothes transitions
between different values and, thus, coloring at state boundaries
might be imprecise.
"""
from matplotlib.cm import get_cmap
nstates = int(_np.max(states) + 1)
cmap_ = get_cmap(cmap, nstates)
fig, ax, misc = plot_contour(
xall, yall, states, ax=ax, cmap=cmap_,
ncontours=ncontours, vmin=None, vmax=None, levels=None,
cbar=cbar, cax=cax, cbar_label=cbar_label,
cbar_orientation=cbar_orientation, norm=None, nbins=nbins,
method='nearest', mask=mask, **kwargs)
if cbar:
cmin, cmax = misc['mappable'].get_clim()
f = (cmax - cmin) / float(nstates)
n = _np.arange(nstates)
misc['cbar'].set_ticks((n + 0.5) * f)
misc['cbar'].set_ticklabels(n)
return fig, ax, misc | [
"def",
"plot_state_map",
"(",
"xall",
",",
"yall",
",",
"states",
",",
"ax",
"=",
"None",
",",
"ncontours",
"=",
"100",
",",
"cmap",
"=",
"None",
",",
"cbar",
"=",
"True",
",",
"cax",
"=",
"None",
",",
"cbar_label",
"=",
"'state'",
",",
"cbar_orienta... | Plot a two-dimensional contour map of states by interpolating
labels of scattered data on a grid.
Parameters
----------
xall : ndarray(T)
Sample x-coordinates.
yall : ndarray(T)
Sample y-coordinates.
states : ndarray(T)
Sample state labels.
ax : matplotlib.Axes object, optional, default=None
The ax to plot to; if ax=None, a new ax (and fig) is created.
cmap : matplotlib colormap, optional, default=None
The color map to use.
ncontours : int, optional, default=100
Number of contour levels.
cbar : boolean, optional, default=True
Plot a color bar.
cax : matplotlib.Axes object, optional, default=None
Plot the colorbar into a custom axes object instead of
stealing space from ax.
cbar_label : str, optional, default='state'
Colorbar label string; use None to suppress it.
cbar_orientation : str, optional, default='vertical'
Colorbar orientation; choose 'vertical' or 'horizontal'.
nbins : int, optional, default=100
Number of grid points used in each dimension.
mask : boolean, optional, default=False
Hide unsampled areas is True.
Optional parameters for contourf (**kwargs)
-------------------------------------------
corner_mask : boolean, optional
Enable/disable corner masking, which only has an effect if
z is a masked array. If False, any quad touching a masked
point is masked out. If True, only the triangular corners
of quads nearest those points are always masked out, other
triangular corners comprising three unmasked points are
contoured as usual.
Defaults to rcParams['contour.corner_mask'], which
defaults to True.
alpha : float
The alpha blending value.
locator : [ None | ticker.Locator subclass ]
If locator is None, the default MaxNLocator is used. The
locator is used to determine the contour levels if they are
not given explicitly via the levels argument.
extend : [ ‘neither’ | ‘both’ | ‘min’ | ‘max’ ]
Unless this is ‘neither’, contour levels are automatically
added to one or both ends of the range so that all data are
included. These added ranges are then mapped to the special
colormap values which default to the ends of the
colormap range, but can be set via
matplotlib.colors.Colormap.set_under() and
matplotlib.colors.Colormap.set_over() methods.
xunits, yunits : [ None | registered units ]
Override axis units by specifying an instance of a
matplotlib.units.ConversionInterface.
antialiased : boolean, optional
Enable antialiasing, overriding the defaults. For filled
contours, the default is True. For line contours, it is
taken from rcParams[‘lines.antialiased’].
nchunk : [ 0 | integer ]
If 0, no subdivision of the domain. Specify a positive
integer to divide the domain into subdomains of nchunk by
nchunk quads. Chunking reduces the maximum length of polygons
generated by the contouring algorithm which reduces the
rendering workload passed on to the backend and also requires
slightly less RAM. It can however introduce rendering
artifacts at chunk boundaries depending on the backend, the
antialiased flag and value of alpha.
hatches :
A list of cross hatch patterns to use on the filled areas.
If None, no hatching will be added to the contour. Hatching
is supported in the PostScript, PDF, SVG and Agg backends
only.
zorder : float
Set the zorder for the artist. Artists with lower zorder
values are drawn first.
Returns
-------
fig : matplotlib.Figure object
The figure in which the used ax resides.
ax : matplotlib.Axes object
The ax in which the map was plotted.
misc : dict
Contains a matplotlib.contour.QuadContourSet 'mappable' and,
if requested, a matplotlib.Colorbar object 'cbar'.
Notes
-----
Please note that this plot is an approximative visualization:
the underlying matplotlib.countourf function smoothes transitions
between different values and, thus, coloring at state boundaries
might be imprecise. | [
"Plot",
"a",
"two",
"-",
"dimensional",
"contour",
"map",
"of",
"states",
"by",
"interpolating",
"labels",
"of",
"scattered",
"data",
"on",
"a",
"grid",
"."
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/plots/plots2d.py#L818-L937 | train | 204,225 |
markovmodel/PyEMMA | pyemma/coordinates/data/util/reader_utils.py | single_traj_from_n_files | def single_traj_from_n_files(file_list, top):
""" Creates a single trajectory object from a list of files
"""
traj = None
for ff in file_list:
if traj is None:
traj = md.load(ff, top=top)
else:
traj = traj.join(md.load(ff, top=top))
return traj | python | def single_traj_from_n_files(file_list, top):
""" Creates a single trajectory object from a list of files
"""
traj = None
for ff in file_list:
if traj is None:
traj = md.load(ff, top=top)
else:
traj = traj.join(md.load(ff, top=top))
return traj | [
"def",
"single_traj_from_n_files",
"(",
"file_list",
",",
"top",
")",
":",
"traj",
"=",
"None",
"for",
"ff",
"in",
"file_list",
":",
"if",
"traj",
"is",
"None",
":",
"traj",
"=",
"md",
".",
"load",
"(",
"ff",
",",
"top",
"=",
"top",
")",
"else",
":... | Creates a single trajectory object from a list of files | [
"Creates",
"a",
"single",
"trajectory",
"object",
"from",
"a",
"list",
"of",
"files"
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/data/util/reader_utils.py#L120-L131 | train | 204,226 |
markovmodel/PyEMMA | pyemma/coordinates/pipelines.py | Pipeline.add_element | def add_element(self, e):
r""" Appends a pipeline stage.
Appends the given element to the end of the current chain.
"""
if not isinstance(e, Iterable):
raise TypeError("given element {} is not iterable in terms of "
"PyEMMAs coordinate pipeline.".format(e))
# only if we have more than one element
if not e.is_reader and len(self._chain) >= 1:
data_producer = self._chain[-1]
# avoid calling the setter of StreamingTransformer.data_producer, since this
# triggers a re-parametrization even on readers (where it makes not sense)
e._data_producer = data_producer
e.chunksize = self.chunksize
self._chain.append(e) | python | def add_element(self, e):
r""" Appends a pipeline stage.
Appends the given element to the end of the current chain.
"""
if not isinstance(e, Iterable):
raise TypeError("given element {} is not iterable in terms of "
"PyEMMAs coordinate pipeline.".format(e))
# only if we have more than one element
if not e.is_reader and len(self._chain) >= 1:
data_producer = self._chain[-1]
# avoid calling the setter of StreamingTransformer.data_producer, since this
# triggers a re-parametrization even on readers (where it makes not sense)
e._data_producer = data_producer
e.chunksize = self.chunksize
self._chain.append(e) | [
"def",
"add_element",
"(",
"self",
",",
"e",
")",
":",
"if",
"not",
"isinstance",
"(",
"e",
",",
"Iterable",
")",
":",
"raise",
"TypeError",
"(",
"\"given element {} is not iterable in terms of \"",
"\"PyEMMAs coordinate pipeline.\"",
".",
"format",
"(",
"e",
")",... | r""" Appends a pipeline stage.
Appends the given element to the end of the current chain. | [
"r",
"Appends",
"a",
"pipeline",
"stage",
"."
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/pipelines.py#L74-L90 | train | 204,227 |
markovmodel/PyEMMA | pyemma/coordinates/pipelines.py | Pipeline.set_element | def set_element(self, index, e):
r""" Replaces a pipeline stage.
Replace an element in chain and return replaced element.
"""
if index > len(self._chain):
raise IndexError("tried to access element %i, but chain has only %i"
" elements" % (index, len(self._chain)))
if type(index) is not int:
raise ValueError(
"index is not a integer but '%s'" % str(type(index)))
# if e is already in chain, we're finished
if self._chain[index] is e:
return
# remove current index and its data producer
replaced = self._chain.pop(index)
if not replaced.is_reader:
replaced.data_producer = None
self._chain.insert(index, e)
if index == 0:
e.data_producer = e
else:
# rewire data_producers
e.data_producer = self._chain[index - 1]
# if e has a successive element, need to set data_producer
try:
successor = self._chain[index + 1]
successor.data_producer = e
except IndexError:
pass
# set data_producer for predecessor of e
# self._chain[max(0, index - 1)].data_producer = self._chain[index]
# since data producer of element after insertion changed, reset its status
# TODO: make parameterized a property?
self._chain[index]._estimated = False
return replaced | python | def set_element(self, index, e):
r""" Replaces a pipeline stage.
Replace an element in chain and return replaced element.
"""
if index > len(self._chain):
raise IndexError("tried to access element %i, but chain has only %i"
" elements" % (index, len(self._chain)))
if type(index) is not int:
raise ValueError(
"index is not a integer but '%s'" % str(type(index)))
# if e is already in chain, we're finished
if self._chain[index] is e:
return
# remove current index and its data producer
replaced = self._chain.pop(index)
if not replaced.is_reader:
replaced.data_producer = None
self._chain.insert(index, e)
if index == 0:
e.data_producer = e
else:
# rewire data_producers
e.data_producer = self._chain[index - 1]
# if e has a successive element, need to set data_producer
try:
successor = self._chain[index + 1]
successor.data_producer = e
except IndexError:
pass
# set data_producer for predecessor of e
# self._chain[max(0, index - 1)].data_producer = self._chain[index]
# since data producer of element after insertion changed, reset its status
# TODO: make parameterized a property?
self._chain[index]._estimated = False
return replaced | [
"def",
"set_element",
"(",
"self",
",",
"index",
",",
"e",
")",
":",
"if",
"index",
">",
"len",
"(",
"self",
".",
"_chain",
")",
":",
"raise",
"IndexError",
"(",
"\"tried to access element %i, but chain has only %i\"",
"\" elements\"",
"%",
"(",
"index",
",",
... | r""" Replaces a pipeline stage.
Replace an element in chain and return replaced element. | [
"r",
"Replaces",
"a",
"pipeline",
"stage",
"."
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/pipelines.py#L92-L135 | train | 204,228 |
markovmodel/PyEMMA | pyemma/coordinates/pipelines.py | Pipeline.parametrize | def parametrize(self):
r"""
Reads all data and discretizes it into discrete trajectories.
"""
for element in self._chain:
if not element.is_reader and not element._estimated:
element.estimate(element.data_producer, stride=self.param_stride, chunksize=self.chunksize)
self._estimated = True | python | def parametrize(self):
r"""
Reads all data and discretizes it into discrete trajectories.
"""
for element in self._chain:
if not element.is_reader and not element._estimated:
element.estimate(element.data_producer, stride=self.param_stride, chunksize=self.chunksize)
self._estimated = True | [
"def",
"parametrize",
"(",
"self",
")",
":",
"for",
"element",
"in",
"self",
".",
"_chain",
":",
"if",
"not",
"element",
".",
"is_reader",
"and",
"not",
"element",
".",
"_estimated",
":",
"element",
".",
"estimate",
"(",
"element",
".",
"data_producer",
... | r"""
Reads all data and discretizes it into discrete trajectories. | [
"r",
"Reads",
"all",
"data",
"and",
"discretizes",
"it",
"into",
"discrete",
"trajectories",
"."
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/pipelines.py#L138-L146 | train | 204,229 |
markovmodel/PyEMMA | pyemma/coordinates/pipelines.py | Pipeline._is_estimated | def _is_estimated(self):
r"""
Iterates through the pipeline elements and checks if every element is parametrized.
"""
result = self._estimated
for el in self._chain:
if not el.is_reader:
result &= el._estimated
return result | python | def _is_estimated(self):
r"""
Iterates through the pipeline elements and checks if every element is parametrized.
"""
result = self._estimated
for el in self._chain:
if not el.is_reader:
result &= el._estimated
return result | [
"def",
"_is_estimated",
"(",
"self",
")",
":",
"result",
"=",
"self",
".",
"_estimated",
"for",
"el",
"in",
"self",
".",
"_chain",
":",
"if",
"not",
"el",
".",
"is_reader",
":",
"result",
"&=",
"el",
".",
"_estimated",
"return",
"result"
] | r"""
Iterates through the pipeline elements and checks if every element is parametrized. | [
"r",
"Iterates",
"through",
"the",
"pipeline",
"elements",
"and",
"checks",
"if",
"every",
"element",
"is",
"parametrized",
"."
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/pipelines.py#L148-L156 | train | 204,230 |
markovmodel/PyEMMA | pyemma/coordinates/pipelines.py | Discretizer.dtrajs | def dtrajs(self):
""" get discrete trajectories """
if not self._estimated:
self.logger.info("not yet parametrized, running now.")
self.parametrize()
return self._chain[-1].dtrajs | python | def dtrajs(self):
""" get discrete trajectories """
if not self._estimated:
self.logger.info("not yet parametrized, running now.")
self.parametrize()
return self._chain[-1].dtrajs | [
"def",
"dtrajs",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_estimated",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"not yet parametrized, running now.\"",
")",
"self",
".",
"parametrize",
"(",
")",
"return",
"self",
".",
"_chain",
"[",
"-",
... | get discrete trajectories | [
"get",
"discrete",
"trajectories"
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/pipelines.py#L216-L221 | train | 204,231 |
markovmodel/PyEMMA | pyemma/coordinates/pipelines.py | Discretizer.save_dtrajs | def save_dtrajs(self, prefix='', output_dir='.',
output_format='ascii', extension='.dtraj'):
r"""Saves calculated discrete trajectories. Filenames are taken from
given reader. If data comes from memory dtrajs are written to a default
filename.
Parameters
----------
prefix : str
prepend prefix to filenames.
output_dir : str (optional)
save files to this directory. Defaults to current working directory.
output_format : str
if format is 'ascii' dtrajs will be written as csv files, otherwise
they will be written as NumPy .npy files.
extension : str
file extension to append (eg. '.itraj')
"""
clustering = self._chain[-1]
reader = self._chain[0]
from pyemma.coordinates.clustering.interface import AbstractClustering
assert isinstance(clustering, AbstractClustering)
trajfiles = None
if isinstance(reader, FeatureReader):
trajfiles = reader.filenames
clustering.save_dtrajs(
trajfiles, prefix, output_dir, output_format, extension) | python | def save_dtrajs(self, prefix='', output_dir='.',
output_format='ascii', extension='.dtraj'):
r"""Saves calculated discrete trajectories. Filenames are taken from
given reader. If data comes from memory dtrajs are written to a default
filename.
Parameters
----------
prefix : str
prepend prefix to filenames.
output_dir : str (optional)
save files to this directory. Defaults to current working directory.
output_format : str
if format is 'ascii' dtrajs will be written as csv files, otherwise
they will be written as NumPy .npy files.
extension : str
file extension to append (eg. '.itraj')
"""
clustering = self._chain[-1]
reader = self._chain[0]
from pyemma.coordinates.clustering.interface import AbstractClustering
assert isinstance(clustering, AbstractClustering)
trajfiles = None
if isinstance(reader, FeatureReader):
trajfiles = reader.filenames
clustering.save_dtrajs(
trajfiles, prefix, output_dir, output_format, extension) | [
"def",
"save_dtrajs",
"(",
"self",
",",
"prefix",
"=",
"''",
",",
"output_dir",
"=",
"'.'",
",",
"output_format",
"=",
"'ascii'",
",",
"extension",
"=",
"'.dtraj'",
")",
":",
"clustering",
"=",
"self",
".",
"_chain",
"[",
"-",
"1",
"]",
"reader",
"=",
... | r"""Saves calculated discrete trajectories. Filenames are taken from
given reader. If data comes from memory dtrajs are written to a default
filename.
Parameters
----------
prefix : str
prepend prefix to filenames.
output_dir : str (optional)
save files to this directory. Defaults to current working directory.
output_format : str
if format is 'ascii' dtrajs will be written as csv files, otherwise
they will be written as NumPy .npy files.
extension : str
file extension to append (eg. '.itraj') | [
"r",
"Saves",
"calculated",
"discrete",
"trajectories",
".",
"Filenames",
"are",
"taken",
"from",
"given",
"reader",
".",
"If",
"data",
"comes",
"from",
"memory",
"dtrajs",
"are",
"written",
"to",
"a",
"default",
"filename",
"."
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/pipelines.py#L223-L254 | train | 204,232 |
markovmodel/PyEMMA | pyemma/_base/serialization/serialization.py | SerializableMixIn.save | def save(self, file_name, model_name='default', overwrite=False, save_streaming_chain=False):
r""" saves the current state of this object to given file and name.
Parameters
-----------
file_name: str
path to desired output file
model_name: str, default='default'
creates a group named 'model_name' in the given file, which will contain all of the data.
If the name already exists, and overwrite is False (default) will raise a RuntimeError.
overwrite: bool, default=False
Should overwrite existing model names?
save_streaming_chain : boolean, default=False
if True, the data_producer(s) of this object will also be saved in the given file.
Examples
--------
>>> import pyemma, numpy as np
>>> from pyemma.util.contexts import named_temporary_file
>>> m = pyemma.msm.MSM(P=np.array([[0.1, 0.9], [0.9, 0.1]]))
>>> with named_temporary_file() as file: # doctest: +SKIP
... m.save(file, 'simple') # doctest: +SKIP
... inst_restored = pyemma.load(file, 'simple') # doctest: +SKIP
>>> np.testing.assert_equal(m.P, inst_restored.P) # doctest: +SKIP
"""
from pyemma._base.serialization.h5file import H5File
try:
with H5File(file_name=file_name, mode='a') as f:
f.add_serializable(model_name, obj=self, overwrite=overwrite, save_streaming_chain=save_streaming_chain)
except Exception as e:
msg = ('During saving the object {obj}") '
'the following error occurred: {error}'.format(obj=self, error=e))
if isinstance(self, Loggable):
self.logger.exception(msg)
else:
logger.exception(msg)
raise | python | def save(self, file_name, model_name='default', overwrite=False, save_streaming_chain=False):
r""" saves the current state of this object to given file and name.
Parameters
-----------
file_name: str
path to desired output file
model_name: str, default='default'
creates a group named 'model_name' in the given file, which will contain all of the data.
If the name already exists, and overwrite is False (default) will raise a RuntimeError.
overwrite: bool, default=False
Should overwrite existing model names?
save_streaming_chain : boolean, default=False
if True, the data_producer(s) of this object will also be saved in the given file.
Examples
--------
>>> import pyemma, numpy as np
>>> from pyemma.util.contexts import named_temporary_file
>>> m = pyemma.msm.MSM(P=np.array([[0.1, 0.9], [0.9, 0.1]]))
>>> with named_temporary_file() as file: # doctest: +SKIP
... m.save(file, 'simple') # doctest: +SKIP
... inst_restored = pyemma.load(file, 'simple') # doctest: +SKIP
>>> np.testing.assert_equal(m.P, inst_restored.P) # doctest: +SKIP
"""
from pyemma._base.serialization.h5file import H5File
try:
with H5File(file_name=file_name, mode='a') as f:
f.add_serializable(model_name, obj=self, overwrite=overwrite, save_streaming_chain=save_streaming_chain)
except Exception as e:
msg = ('During saving the object {obj}") '
'the following error occurred: {error}'.format(obj=self, error=e))
if isinstance(self, Loggable):
self.logger.exception(msg)
else:
logger.exception(msg)
raise | [
"def",
"save",
"(",
"self",
",",
"file_name",
",",
"model_name",
"=",
"'default'",
",",
"overwrite",
"=",
"False",
",",
"save_streaming_chain",
"=",
"False",
")",
":",
"from",
"pyemma",
".",
"_base",
".",
"serialization",
".",
"h5file",
"import",
"H5File",
... | r""" saves the current state of this object to given file and name.
Parameters
-----------
file_name: str
path to desired output file
model_name: str, default='default'
creates a group named 'model_name' in the given file, which will contain all of the data.
If the name already exists, and overwrite is False (default) will raise a RuntimeError.
overwrite: bool, default=False
Should overwrite existing model names?
save_streaming_chain : boolean, default=False
if True, the data_producer(s) of this object will also be saved in the given file.
Examples
--------
>>> import pyemma, numpy as np
>>> from pyemma.util.contexts import named_temporary_file
>>> m = pyemma.msm.MSM(P=np.array([[0.1, 0.9], [0.9, 0.1]]))
>>> with named_temporary_file() as file: # doctest: +SKIP
... m.save(file, 'simple') # doctest: +SKIP
... inst_restored = pyemma.load(file, 'simple') # doctest: +SKIP
>>> np.testing.assert_equal(m.P, inst_restored.P) # doctest: +SKIP | [
"r",
"saves",
"the",
"current",
"state",
"of",
"this",
"object",
"to",
"given",
"file",
"and",
"name",
"."
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/_base/serialization/serialization.py#L212-L249 | train | 204,233 |
markovmodel/PyEMMA | pyemma/_base/serialization/serialization.py | SerializableMixIn.load | def load(cls, file_name, model_name='default'):
""" Loads a previously saved PyEMMA object from disk.
Parameters
----------
file_name : str or file like object (has to provide read method).
The file like object tried to be read for a serialized object.
model_name: str, default='default'
if multiple models are contained in the file, these can be accessed by
their name. Use :func:`pyemma.list_models` to get a representation of all stored models.
Returns
-------
obj : the de-serialized object
"""
from .h5file import H5File
with H5File(file_name, model_name=model_name, mode='r') as f:
return f.model | python | def load(cls, file_name, model_name='default'):
""" Loads a previously saved PyEMMA object from disk.
Parameters
----------
file_name : str or file like object (has to provide read method).
The file like object tried to be read for a serialized object.
model_name: str, default='default'
if multiple models are contained in the file, these can be accessed by
their name. Use :func:`pyemma.list_models` to get a representation of all stored models.
Returns
-------
obj : the de-serialized object
"""
from .h5file import H5File
with H5File(file_name, model_name=model_name, mode='r') as f:
return f.model | [
"def",
"load",
"(",
"cls",
",",
"file_name",
",",
"model_name",
"=",
"'default'",
")",
":",
"from",
".",
"h5file",
"import",
"H5File",
"with",
"H5File",
"(",
"file_name",
",",
"model_name",
"=",
"model_name",
",",
"mode",
"=",
"'r'",
")",
"as",
"f",
":... | Loads a previously saved PyEMMA object from disk.
Parameters
----------
file_name : str or file like object (has to provide read method).
The file like object tried to be read for a serialized object.
model_name: str, default='default'
if multiple models are contained in the file, these can be accessed by
their name. Use :func:`pyemma.list_models` to get a representation of all stored models.
Returns
-------
obj : the de-serialized object | [
"Loads",
"a",
"previously",
"saved",
"PyEMMA",
"object",
"from",
"disk",
"."
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/_base/serialization/serialization.py#L252-L269 | train | 204,234 |
markovmodel/PyEMMA | pyemma/_base/serialization/serialization.py | SerializableMixIn._get_version_for_class_from_state | def _get_version_for_class_from_state(state, klass):
""" retrieves the version of the current klass from the state mapping from old locations to new ones. """
# klass may have been renamed, so we have to look this up in the class rename registry.
names = [_importable_name(klass)]
# lookup old names, handled by current klass.
from .util import class_rename_registry
names.extend(class_rename_registry.old_handled_by(klass))
for n in names:
try:
return state['class_tree_versions'][n]
except KeyError:
continue
# if we did not find a suitable version number return infinity.
if _debug:
logger.debug('unable to obtain a __serialize_version for class %s', klass)
return float('inf') | python | def _get_version_for_class_from_state(state, klass):
""" retrieves the version of the current klass from the state mapping from old locations to new ones. """
# klass may have been renamed, so we have to look this up in the class rename registry.
names = [_importable_name(klass)]
# lookup old names, handled by current klass.
from .util import class_rename_registry
names.extend(class_rename_registry.old_handled_by(klass))
for n in names:
try:
return state['class_tree_versions'][n]
except KeyError:
continue
# if we did not find a suitable version number return infinity.
if _debug:
logger.debug('unable to obtain a __serialize_version for class %s', klass)
return float('inf') | [
"def",
"_get_version_for_class_from_state",
"(",
"state",
",",
"klass",
")",
":",
"# klass may have been renamed, so we have to look this up in the class rename registry.",
"names",
"=",
"[",
"_importable_name",
"(",
"klass",
")",
"]",
"# lookup old names, handled by current klass.... | retrieves the version of the current klass from the state mapping from old locations to new ones. | [
"retrieves",
"the",
"version",
"of",
"the",
"current",
"klass",
"from",
"the",
"state",
"mapping",
"from",
"old",
"locations",
"to",
"new",
"ones",
"."
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/_base/serialization/serialization.py#L336-L351 | train | 204,235 |
markovmodel/PyEMMA | pyemma/coordinates/estimation/covariance.py | LaggedCovariance.partial_fit | def partial_fit(self, X):
""" incrementally update the estimates
Parameters
----------
X: array, list of arrays, PyEMMA reader
input data.
"""
from pyemma.coordinates import source
self._estimate(source(X), partial_fit=True)
self._estimated = True
return self | python | def partial_fit(self, X):
""" incrementally update the estimates
Parameters
----------
X: array, list of arrays, PyEMMA reader
input data.
"""
from pyemma.coordinates import source
self._estimate(source(X), partial_fit=True)
self._estimated = True
return self | [
"def",
"partial_fit",
"(",
"self",
",",
"X",
")",
":",
"from",
"pyemma",
".",
"coordinates",
"import",
"source",
"self",
".",
"_estimate",
"(",
"source",
"(",
"X",
")",
",",
"partial_fit",
"=",
"True",
")",
"self",
".",
"_estimated",
"=",
"True",
"retu... | incrementally update the estimates
Parameters
----------
X: array, list of arrays, PyEMMA reader
input data. | [
"incrementally",
"update",
"the",
"estimates"
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/estimation/covariance.py#L248-L261 | train | 204,236 |
markovmodel/PyEMMA | pyemma/coordinates/estimation/covariance.py | LaggedCovariance.C00_ | def C00_(self):
""" Instantaneous covariance matrix """
self._check_estimated()
return self._rc.cov_XX(bessel=self.bessel) | python | def C00_(self):
""" Instantaneous covariance matrix """
self._check_estimated()
return self._rc.cov_XX(bessel=self.bessel) | [
"def",
"C00_",
"(",
"self",
")",
":",
"self",
".",
"_check_estimated",
"(",
")",
"return",
"self",
".",
"_rc",
".",
"cov_XX",
"(",
"bessel",
"=",
"self",
".",
"bessel",
")"
] | Instantaneous covariance matrix | [
"Instantaneous",
"covariance",
"matrix"
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/estimation/covariance.py#L280-L283 | train | 204,237 |
markovmodel/PyEMMA | pyemma/coordinates/estimation/covariance.py | LaggedCovariance.C0t_ | def C0t_(self):
""" Time-lagged covariance matrix """
self._check_estimated()
return self._rc.cov_XY(bessel=self.bessel) | python | def C0t_(self):
""" Time-lagged covariance matrix """
self._check_estimated()
return self._rc.cov_XY(bessel=self.bessel) | [
"def",
"C0t_",
"(",
"self",
")",
":",
"self",
".",
"_check_estimated",
"(",
")",
"return",
"self",
".",
"_rc",
".",
"cov_XY",
"(",
"bessel",
"=",
"self",
".",
"bessel",
")"
] | Time-lagged covariance matrix | [
"Time",
"-",
"lagged",
"covariance",
"matrix"
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/estimation/covariance.py#L292-L295 | train | 204,238 |
markovmodel/PyEMMA | pyemma/coordinates/estimation/covariance.py | LaggedCovariance.Ctt_ | def Ctt_(self):
""" Covariance matrix of the time shifted data"""
self._check_estimated()
return self._rc.cov_YY(bessel=self.bessel) | python | def Ctt_(self):
""" Covariance matrix of the time shifted data"""
self._check_estimated()
return self._rc.cov_YY(bessel=self.bessel) | [
"def",
"Ctt_",
"(",
"self",
")",
":",
"self",
".",
"_check_estimated",
"(",
")",
"return",
"self",
".",
"_rc",
".",
"cov_YY",
"(",
"bessel",
"=",
"self",
".",
"bessel",
")"
] | Covariance matrix of the time shifted data | [
"Covariance",
"matrix",
"of",
"the",
"time",
"shifted",
"data"
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/estimation/covariance.py#L298-L301 | train | 204,239 |
markovmodel/PyEMMA | pyemma/thermo/api.py | tram | def tram(
ttrajs, dtrajs, bias, lag, unbiased_state=None,
count_mode='sliding', connectivity='post_hoc_RE',
maxiter=10000, maxerr=1.0E-15, save_convergence_info=0, dt_traj='1 step',
connectivity_factor=1.0, nn=None, direct_space=False, N_dtram_accelerations=0, callback=None,
init='mbar', init_maxiter=10000, init_maxerr=1e-8, equilibrium=None, overcounting_factor=1.0):
r"""
Transition-based reweighting analysis method
Parameters
----------
ttrajs : numpy.ndarray(T), or list of numpy.ndarray(T_i)
A single discrete trajectory or a list of discrete trajectories. The integers are
indexes in 0,...,num_therm_states-1 enumerating the thermodynamic states the trajectory is
in at any time.
dtrajs : numpy.ndarray(T) of int, or list of numpy.ndarray(T_i) of int
A single discrete trajectory or a list of discrete trajectories. The integers are indexes
in 0,...,num_conf_states-1 enumerating the num_conf_states Markov states or the bins the
trajectory is in at any time.
bias : numpy.ndarray(T, num_therm_states), or list of numpy.ndarray(T_i, num_therm_states)
A single reduced bias energy trajectory or a list of reduced bias energy trajectories.
For every simulation frame seen in trajectory i and time step t, btrajs[i][t, k] is the
reduced bias energy of that frame evaluated in the k'th thermodynamic state (i.e. at
the k'th umbrella/Hamiltonian/temperature)
lag : int or list of int, optional, default=1
Integer lag time at which transitions are counted. Providing a list of lag times will
trigger one estimation per lag time.
unbiased_state : int, optional, default=None
Index of the unbiased thermodynamic state or None if there is no unbiased data available.
maxiter : int, optional, default=10000
The maximum number of dTRAM iterations before the estimator exits unsuccessfully.
maxerr : float, optional, default=1e-15
Convergence criterion based on the maximal free energy change in a self-consistent
iteration step.
save_convergence_info : int, optional, default=0
Every save_convergence_info iteration steps, store the actual increment
and the actual loglikelihood; 0 means no storage.
dt_traj : str, optional, default='1 step'
Description of the physical time corresponding to the lag. May be used by analysis
algorithms such as plotting tools to pretty-print the axes. By default '1 step', i.e.
there is no physical time unit. Specify by a number, whitespace and unit. Permitted
units are (* is an arbitrary string):
| 'fs', 'femtosecond*'
| 'ps', 'picosecond*'
| 'ns', 'nanosecond*'
| 'us', 'microsecond*'
| 'ms', 'millisecond*'
| 's', 'second*'
connectivity : str, optional, default='post_hoc_RE'
One of 'post_hoc_RE', 'BAR_variance', 'reversible_pathways' or
'summed_count_matrix'. Defines what should be considered a connected set
in the joint (product) space of conformations and thermodynamic ensembles.
* 'reversible_pathways' : requires that every state in the connected set
can be reached by following a pathway of reversible transitions. A
reversible transition between two Markov states (within the same
thermodynamic state k) is a pair of Markov states that belong to the
same strongly connected component of the count matrix (from
thermodynamic state k). A pathway of reversible transitions is a list of
reversible transitions [(i_1, i_2), (i_2, i_3),..., (i_(N-2), i_(N-1)),
(i_(N-1), i_N)]. The thermodynamic state where the reversible
transitions happen, is ignored in constructing the reversible pathways.
This is equivalent to assuming that two ensembles overlap at some Markov
state whenever there exist frames from both ensembles in that Markov
state.
* 'post_hoc_RE' : similar to 'reversible_pathways' but with a more strict
requirement for the overlap between thermodynamic states. It is required
that every state in the connected set can be reached by following a
pathway of reversible transitions or jumping between overlapping
thermodynamic states while staying in the same Markov state. A reversible
transition between two Markov states (within the same thermodynamic
state k) is a pair of Markov states that belong to the same strongly
connected component of the count matrix (from thermodynamic state k).
Two thermodynamic states k and l are defined to overlap at Markov state
n if a replica exchange simulation [2]_ restricted to state n would show
at least one transition from k to l or one transition from from l to k.
The expected number of replica exchanges is estimated from the
simulation data. The minimal number required of replica exchanges
per Markov state can be increased by decreasing `connectivity_factor`.
* 'BAR_variance' : like 'post_hoc_RE' but with a different condition to
define the thermodynamic overlap based on the variance of the BAR
estimator [3]_. Two thermodynamic states k and l are defined to overlap
at Markov state n if the variance of the free energy difference Delta
f_{kl} computed with BAR (and restricted to conformations form Markov
state n) is less or equal than one. The minimally required variance
can be controlled with `connectivity_factor`.
* 'summed_count_matrix' : all thermodynamic states are assumed to overlap.
The connected set is then computed by summing the count matrices over
all thermodynamic states and taking it's largest strongly connected set.
Not recommended!
For more details see :func:`pyemma.thermo.extensions.cset.compute_csets_TRAM`.
connectivity_factor : float, optional, default=1.0
Only needed if connectivity='post_hoc_RE' or 'BAR_variance'. Values
greater than 1.0 weaken the connectivity conditions. For 'post_hoc_RE'
this multiplies the number of hypothetically observed transitions. For
'BAR_variance' this scales the threshold for the minimal allowed variance
of free energy differences.
direct_space : bool, optional, default=False
Whether to perform the self-consistent iteration with Boltzmann factors
(direct space) or free energies (log-space). When analyzing data from
multi-temperature simulations, direct-space is not recommended.
N_dtram_accelerations : int, optional, default=0
Convergence of TRAM can be speeded up by interleaving the updates
in the self-consistent iteration with a dTRAM-like update step.
N_dtram_accelerations says how many times the dTRAM-like update
step should be applied in every iteration of the TRAM equations.
Currently this is only effective if direct_space=True.
init : str, optional, default=None
Use a specific initialization for self-consistent iteration:
| None: use a hard-coded guess for free energies and Lagrangian multipliers
| 'wham': perform a short WHAM estimate to initialize the free energies
init_maxiter : int, optional, default=10000
The maximum number of self-consistent iterations during the initialization.
init_maxerr : float, optional, default=1.0E-8
Convergence criterion for the initialization.
Returns
-------
A :class:`MEMM <pyemma.thermo.models.memm.MEMM>` object or list thereof
A multi-ensemble Markov state model (for each given lag time) which consists of stationary
and kinetic quantities at all temperatures/thermodynamic states.
Example
-------
**Umbrella sampling**: Suppose we simulate in K umbrellas, centered at
positions :math:`y_0,...,y_{K-1}` with bias energies
.. math::
b_k(x) = \frac{c_k}{2 \textrm{kT}} \cdot (x - y_k)^2
Suppose we have one simulation of length T in each umbrella, and they are ordered from 0 to K-1.
We have discretized the x-coordinate into 100 bins.
Then dtrajs and ttrajs should each be a list of :math:`K` arrays.
dtrajs would look for example like this::
[ (0, 0, 0, 0, 1, 1, 1, 0, 0, 0, ...), (0, 1, 0, 1, 0, 1, 1, 0, 0, 1, ...), ... ]
where each array has length T, and is the sequence of bins (in the range 0 to 99) visited along
the trajectory. ttrajs would look like this::
[ (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...), (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...), ... ]
Because trajectory 1 stays in umbrella 1 (index 0), trajectory 2 stays in umbrella 2 (index 1),
and so forth.
The bias would be a list of :math:`T \times K` arrays which specify each frame's bias energy in
all thermodynamic states:
[ ((0, 1.7, 2.3, 6.1, ...), ...), ((0, 2.4, 3.1, 9,5, ...), ...), ... ]
Let us try the above example:
>>> from pyemma.thermo import tram
>>> import numpy as np
>>> ttrajs = [np.array([0,0,0,0,0,0,0]), np.array([1,1,1,1,1,1,1])]
>>> dtrajs = [np.array([0,0,0,0,1,1,1]), np.array([0,1,0,1,0,1,1])]
>>> bias = [np.array([[1,0],[1,0],[0,0],[0,0],[0,0],[0,0],[0,0]],dtype=np.float64), np.array([[1,0],[0,0],[0,0],[1,0],[0,0],[1,0],[1,0]],dtype=np.float64)]
>>> tram_obj = tram(ttrajs, dtrajs, bias, 1)
>>> tram_obj.log_likelihood() # doctest: +ELLIPSIS
-29.111...
>>> tram_obj.count_matrices # doctest: +SKIP
array([[[1 1]
[0 4]]
[[0 3]
[2 1]]], dtype=int32)
>>> tram_obj.stationary_distribution # doctest: +ELLIPSIS
array([ 0.38... 0.61...])
See :class:`MEMM <pyemma.thermo.models.memm.MEMM>` for a full documentation.
.. autoclass:: pyemma.thermo.models.memm.MEMM
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.thermo.models.memm.MEMM
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.thermo.models.memm.MEMM
:attributes:
References
----------
.. [1] Wu, H. et al 2016
Multiensemble Markov models of molecular thermodynamics and kinetics
Proc. Natl. Acad. Sci. USA 113 E3221--E3230
.. [2]_ Hukushima et al, Exchange Monte Carlo method and application to spin
glass simulations, J. Phys. Soc. Jan. 65, 1604 (1996)
.. [3]_ Shirts and Chodera, Statistically optimal analysis of samples
from multiple equilibrium states, J. Chem. Phys. 129, 124105 (2008)
"""
# prepare trajectories
ttrajs = _types.ensure_dtraj_list(ttrajs)
dtrajs = _types.ensure_dtraj_list(dtrajs)
if len(ttrajs) != len(dtrajs):
raise ValueError("Unmatching number of dtraj/ttraj elements: %d!=%d" % (
len(dtrajs), len(ttrajs)))
if len(ttrajs) != len(bias):
raise ValueError("Unmatching number of ttraj/bias elements: %d!=%d" % (
len(ttrajs), len(bias)))
for ttraj, dtraj, btraj in zip(ttrajs, dtrajs, bias):
if len(ttraj) != len(dtraj):
raise ValueError("Unmatching number of data points in ttraj/dtraj: %d!=%d" % (
len(ttraj), len(dtraj)))
if len(ttraj) != btraj.shape[0]:
raise ValueError("Unmatching number of data points in ttraj/bias trajectory: %d!=%d" % (
len(ttraj), len(btraj)))
# check lag time(s)
lags = _np.asarray(lag, dtype=_np.intc).reshape((-1,)).tolist()
# build TRAM and run estimation
from pyemma.thermo import TRAM as _TRAM
tram_estimators = []
from pyemma._base.progress import ProgressReporter
pg = ProgressReporter()
pg.register(amount_of_work=len(lags), description='Estimating TRAM for lags')
with pg.context():
for lag in lags:
t = _TRAM(
lag, count_mode=count_mode, connectivity=connectivity,
maxiter=maxiter, maxerr=maxerr, save_convergence_info=save_convergence_info,
dt_traj=dt_traj, connectivity_factor=connectivity_factor, nn=nn,
direct_space=direct_space, N_dtram_accelerations=N_dtram_accelerations,
callback=callback, init=init, init_maxiter=init_maxiter, init_maxerr=init_maxerr,
equilibrium=equilibrium, overcounting_factor=overcounting_factor).estimate((ttrajs, dtrajs, bias))
tram_estimators.append(t)
pg.update(1)
_assign_unbiased_state_label(tram_estimators, unbiased_state)
# return
if len(tram_estimators) == 1:
return tram_estimators[0]
return tram_estimators | python | def tram(
ttrajs, dtrajs, bias, lag, unbiased_state=None,
count_mode='sliding', connectivity='post_hoc_RE',
maxiter=10000, maxerr=1.0E-15, save_convergence_info=0, dt_traj='1 step',
connectivity_factor=1.0, nn=None, direct_space=False, N_dtram_accelerations=0, callback=None,
init='mbar', init_maxiter=10000, init_maxerr=1e-8, equilibrium=None, overcounting_factor=1.0):
r"""
Transition-based reweighting analysis method
Parameters
----------
ttrajs : numpy.ndarray(T), or list of numpy.ndarray(T_i)
A single discrete trajectory or a list of discrete trajectories. The integers are
indexes in 0,...,num_therm_states-1 enumerating the thermodynamic states the trajectory is
in at any time.
dtrajs : numpy.ndarray(T) of int, or list of numpy.ndarray(T_i) of int
A single discrete trajectory or a list of discrete trajectories. The integers are indexes
in 0,...,num_conf_states-1 enumerating the num_conf_states Markov states or the bins the
trajectory is in at any time.
bias : numpy.ndarray(T, num_therm_states), or list of numpy.ndarray(T_i, num_therm_states)
A single reduced bias energy trajectory or a list of reduced bias energy trajectories.
For every simulation frame seen in trajectory i and time step t, btrajs[i][t, k] is the
reduced bias energy of that frame evaluated in the k'th thermodynamic state (i.e. at
the k'th umbrella/Hamiltonian/temperature)
lag : int or list of int, optional, default=1
Integer lag time at which transitions are counted. Providing a list of lag times will
trigger one estimation per lag time.
unbiased_state : int, optional, default=None
Index of the unbiased thermodynamic state or None if there is no unbiased data available.
maxiter : int, optional, default=10000
The maximum number of dTRAM iterations before the estimator exits unsuccessfully.
maxerr : float, optional, default=1e-15
Convergence criterion based on the maximal free energy change in a self-consistent
iteration step.
save_convergence_info : int, optional, default=0
Every save_convergence_info iteration steps, store the actual increment
and the actual loglikelihood; 0 means no storage.
dt_traj : str, optional, default='1 step'
Description of the physical time corresponding to the lag. May be used by analysis
algorithms such as plotting tools to pretty-print the axes. By default '1 step', i.e.
there is no physical time unit. Specify by a number, whitespace and unit. Permitted
units are (* is an arbitrary string):
| 'fs', 'femtosecond*'
| 'ps', 'picosecond*'
| 'ns', 'nanosecond*'
| 'us', 'microsecond*'
| 'ms', 'millisecond*'
| 's', 'second*'
connectivity : str, optional, default='post_hoc_RE'
One of 'post_hoc_RE', 'BAR_variance', 'reversible_pathways' or
'summed_count_matrix'. Defines what should be considered a connected set
in the joint (product) space of conformations and thermodynamic ensembles.
* 'reversible_pathways' : requires that every state in the connected set
can be reached by following a pathway of reversible transitions. A
reversible transition between two Markov states (within the same
thermodynamic state k) is a pair of Markov states that belong to the
same strongly connected component of the count matrix (from
thermodynamic state k). A pathway of reversible transitions is a list of
reversible transitions [(i_1, i_2), (i_2, i_3),..., (i_(N-2), i_(N-1)),
(i_(N-1), i_N)]. The thermodynamic state where the reversible
transitions happen, is ignored in constructing the reversible pathways.
This is equivalent to assuming that two ensembles overlap at some Markov
state whenever there exist frames from both ensembles in that Markov
state.
* 'post_hoc_RE' : similar to 'reversible_pathways' but with a more strict
requirement for the overlap between thermodynamic states. It is required
that every state in the connected set can be reached by following a
pathway of reversible transitions or jumping between overlapping
thermodynamic states while staying in the same Markov state. A reversible
transition between two Markov states (within the same thermodynamic
state k) is a pair of Markov states that belong to the same strongly
connected component of the count matrix (from thermodynamic state k).
Two thermodynamic states k and l are defined to overlap at Markov state
n if a replica exchange simulation [2]_ restricted to state n would show
at least one transition from k to l or one transition from from l to k.
The expected number of replica exchanges is estimated from the
simulation data. The minimal number required of replica exchanges
per Markov state can be increased by decreasing `connectivity_factor`.
* 'BAR_variance' : like 'post_hoc_RE' but with a different condition to
define the thermodynamic overlap based on the variance of the BAR
estimator [3]_. Two thermodynamic states k and l are defined to overlap
at Markov state n if the variance of the free energy difference Delta
f_{kl} computed with BAR (and restricted to conformations form Markov
state n) is less or equal than one. The minimally required variance
can be controlled with `connectivity_factor`.
* 'summed_count_matrix' : all thermodynamic states are assumed to overlap.
The connected set is then computed by summing the count matrices over
all thermodynamic states and taking it's largest strongly connected set.
Not recommended!
For more details see :func:`pyemma.thermo.extensions.cset.compute_csets_TRAM`.
connectivity_factor : float, optional, default=1.0
Only needed if connectivity='post_hoc_RE' or 'BAR_variance'. Values
greater than 1.0 weaken the connectivity conditions. For 'post_hoc_RE'
this multiplies the number of hypothetically observed transitions. For
'BAR_variance' this scales the threshold for the minimal allowed variance
of free energy differences.
direct_space : bool, optional, default=False
Whether to perform the self-consistent iteration with Boltzmann factors
(direct space) or free energies (log-space). When analyzing data from
multi-temperature simulations, direct-space is not recommended.
N_dtram_accelerations : int, optional, default=0
Convergence of TRAM can be speeded up by interleaving the updates
in the self-consistent iteration with a dTRAM-like update step.
N_dtram_accelerations says how many times the dTRAM-like update
step should be applied in every iteration of the TRAM equations.
Currently this is only effective if direct_space=True.
init : str, optional, default=None
Use a specific initialization for self-consistent iteration:
| None: use a hard-coded guess for free energies and Lagrangian multipliers
| 'wham': perform a short WHAM estimate to initialize the free energies
init_maxiter : int, optional, default=10000
The maximum number of self-consistent iterations during the initialization.
init_maxerr : float, optional, default=1.0E-8
Convergence criterion for the initialization.
Returns
-------
A :class:`MEMM <pyemma.thermo.models.memm.MEMM>` object or list thereof
A multi-ensemble Markov state model (for each given lag time) which consists of stationary
and kinetic quantities at all temperatures/thermodynamic states.
Example
-------
**Umbrella sampling**: Suppose we simulate in K umbrellas, centered at
positions :math:`y_0,...,y_{K-1}` with bias energies
.. math::
b_k(x) = \frac{c_k}{2 \textrm{kT}} \cdot (x - y_k)^2
Suppose we have one simulation of length T in each umbrella, and they are ordered from 0 to K-1.
We have discretized the x-coordinate into 100 bins.
Then dtrajs and ttrajs should each be a list of :math:`K` arrays.
dtrajs would look for example like this::
[ (0, 0, 0, 0, 1, 1, 1, 0, 0, 0, ...), (0, 1, 0, 1, 0, 1, 1, 0, 0, 1, ...), ... ]
where each array has length T, and is the sequence of bins (in the range 0 to 99) visited along
the trajectory. ttrajs would look like this::
[ (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...), (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...), ... ]
Because trajectory 1 stays in umbrella 1 (index 0), trajectory 2 stays in umbrella 2 (index 1),
and so forth.
The bias would be a list of :math:`T \times K` arrays which specify each frame's bias energy in
all thermodynamic states:
[ ((0, 1.7, 2.3, 6.1, ...), ...), ((0, 2.4, 3.1, 9,5, ...), ...), ... ]
Let us try the above example:
>>> from pyemma.thermo import tram
>>> import numpy as np
>>> ttrajs = [np.array([0,0,0,0,0,0,0]), np.array([1,1,1,1,1,1,1])]
>>> dtrajs = [np.array([0,0,0,0,1,1,1]), np.array([0,1,0,1,0,1,1])]
>>> bias = [np.array([[1,0],[1,0],[0,0],[0,0],[0,0],[0,0],[0,0]],dtype=np.float64), np.array([[1,0],[0,0],[0,0],[1,0],[0,0],[1,0],[1,0]],dtype=np.float64)]
>>> tram_obj = tram(ttrajs, dtrajs, bias, 1)
>>> tram_obj.log_likelihood() # doctest: +ELLIPSIS
-29.111...
>>> tram_obj.count_matrices # doctest: +SKIP
array([[[1 1]
[0 4]]
[[0 3]
[2 1]]], dtype=int32)
>>> tram_obj.stationary_distribution # doctest: +ELLIPSIS
array([ 0.38... 0.61...])
See :class:`MEMM <pyemma.thermo.models.memm.MEMM>` for a full documentation.
.. autoclass:: pyemma.thermo.models.memm.MEMM
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.thermo.models.memm.MEMM
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.thermo.models.memm.MEMM
:attributes:
References
----------
.. [1] Wu, H. et al 2016
Multiensemble Markov models of molecular thermodynamics and kinetics
Proc. Natl. Acad. Sci. USA 113 E3221--E3230
.. [2]_ Hukushima et al, Exchange Monte Carlo method and application to spin
glass simulations, J. Phys. Soc. Jan. 65, 1604 (1996)
.. [3]_ Shirts and Chodera, Statistically optimal analysis of samples
from multiple equilibrium states, J. Chem. Phys. 129, 124105 (2008)
"""
# prepare trajectories
ttrajs = _types.ensure_dtraj_list(ttrajs)
dtrajs = _types.ensure_dtraj_list(dtrajs)
if len(ttrajs) != len(dtrajs):
raise ValueError("Unmatching number of dtraj/ttraj elements: %d!=%d" % (
len(dtrajs), len(ttrajs)))
if len(ttrajs) != len(bias):
raise ValueError("Unmatching number of ttraj/bias elements: %d!=%d" % (
len(ttrajs), len(bias)))
for ttraj, dtraj, btraj in zip(ttrajs, dtrajs, bias):
if len(ttraj) != len(dtraj):
raise ValueError("Unmatching number of data points in ttraj/dtraj: %d!=%d" % (
len(ttraj), len(dtraj)))
if len(ttraj) != btraj.shape[0]:
raise ValueError("Unmatching number of data points in ttraj/bias trajectory: %d!=%d" % (
len(ttraj), len(btraj)))
# check lag time(s)
lags = _np.asarray(lag, dtype=_np.intc).reshape((-1,)).tolist()
# build TRAM and run estimation
from pyemma.thermo import TRAM as _TRAM
tram_estimators = []
from pyemma._base.progress import ProgressReporter
pg = ProgressReporter()
pg.register(amount_of_work=len(lags), description='Estimating TRAM for lags')
with pg.context():
for lag in lags:
t = _TRAM(
lag, count_mode=count_mode, connectivity=connectivity,
maxiter=maxiter, maxerr=maxerr, save_convergence_info=save_convergence_info,
dt_traj=dt_traj, connectivity_factor=connectivity_factor, nn=nn,
direct_space=direct_space, N_dtram_accelerations=N_dtram_accelerations,
callback=callback, init=init, init_maxiter=init_maxiter, init_maxerr=init_maxerr,
equilibrium=equilibrium, overcounting_factor=overcounting_factor).estimate((ttrajs, dtrajs, bias))
tram_estimators.append(t)
pg.update(1)
_assign_unbiased_state_label(tram_estimators, unbiased_state)
# return
if len(tram_estimators) == 1:
return tram_estimators[0]
return tram_estimators | [
"def",
"tram",
"(",
"ttrajs",
",",
"dtrajs",
",",
"bias",
",",
"lag",
",",
"unbiased_state",
"=",
"None",
",",
"count_mode",
"=",
"'sliding'",
",",
"connectivity",
"=",
"'post_hoc_RE'",
",",
"maxiter",
"=",
"10000",
",",
"maxerr",
"=",
"1.0E-15",
",",
"s... | r"""
Transition-based reweighting analysis method
Parameters
----------
ttrajs : numpy.ndarray(T), or list of numpy.ndarray(T_i)
A single discrete trajectory or a list of discrete trajectories. The integers are
indexes in 0,...,num_therm_states-1 enumerating the thermodynamic states the trajectory is
in at any time.
dtrajs : numpy.ndarray(T) of int, or list of numpy.ndarray(T_i) of int
A single discrete trajectory or a list of discrete trajectories. The integers are indexes
in 0,...,num_conf_states-1 enumerating the num_conf_states Markov states or the bins the
trajectory is in at any time.
bias : numpy.ndarray(T, num_therm_states), or list of numpy.ndarray(T_i, num_therm_states)
A single reduced bias energy trajectory or a list of reduced bias energy trajectories.
For every simulation frame seen in trajectory i and time step t, btrajs[i][t, k] is the
reduced bias energy of that frame evaluated in the k'th thermodynamic state (i.e. at
the k'th umbrella/Hamiltonian/temperature)
lag : int or list of int, optional, default=1
Integer lag time at which transitions are counted. Providing a list of lag times will
trigger one estimation per lag time.
unbiased_state : int, optional, default=None
Index of the unbiased thermodynamic state or None if there is no unbiased data available.
maxiter : int, optional, default=10000
The maximum number of dTRAM iterations before the estimator exits unsuccessfully.
maxerr : float, optional, default=1e-15
Convergence criterion based on the maximal free energy change in a self-consistent
iteration step.
save_convergence_info : int, optional, default=0
Every save_convergence_info iteration steps, store the actual increment
and the actual loglikelihood; 0 means no storage.
dt_traj : str, optional, default='1 step'
Description of the physical time corresponding to the lag. May be used by analysis
algorithms such as plotting tools to pretty-print the axes. By default '1 step', i.e.
there is no physical time unit. Specify by a number, whitespace and unit. Permitted
units are (* is an arbitrary string):
| 'fs', 'femtosecond*'
| 'ps', 'picosecond*'
| 'ns', 'nanosecond*'
| 'us', 'microsecond*'
| 'ms', 'millisecond*'
| 's', 'second*'
connectivity : str, optional, default='post_hoc_RE'
One of 'post_hoc_RE', 'BAR_variance', 'reversible_pathways' or
'summed_count_matrix'. Defines what should be considered a connected set
in the joint (product) space of conformations and thermodynamic ensembles.
* 'reversible_pathways' : requires that every state in the connected set
can be reached by following a pathway of reversible transitions. A
reversible transition between two Markov states (within the same
thermodynamic state k) is a pair of Markov states that belong to the
same strongly connected component of the count matrix (from
thermodynamic state k). A pathway of reversible transitions is a list of
reversible transitions [(i_1, i_2), (i_2, i_3),..., (i_(N-2), i_(N-1)),
(i_(N-1), i_N)]. The thermodynamic state where the reversible
transitions happen, is ignored in constructing the reversible pathways.
This is equivalent to assuming that two ensembles overlap at some Markov
state whenever there exist frames from both ensembles in that Markov
state.
* 'post_hoc_RE' : similar to 'reversible_pathways' but with a more strict
requirement for the overlap between thermodynamic states. It is required
that every state in the connected set can be reached by following a
pathway of reversible transitions or jumping between overlapping
thermodynamic states while staying in the same Markov state. A reversible
transition between two Markov states (within the same thermodynamic
state k) is a pair of Markov states that belong to the same strongly
connected component of the count matrix (from thermodynamic state k).
Two thermodynamic states k and l are defined to overlap at Markov state
n if a replica exchange simulation [2]_ restricted to state n would show
at least one transition from k to l or one transition from from l to k.
The expected number of replica exchanges is estimated from the
simulation data. The minimal number required of replica exchanges
per Markov state can be increased by decreasing `connectivity_factor`.
* 'BAR_variance' : like 'post_hoc_RE' but with a different condition to
define the thermodynamic overlap based on the variance of the BAR
estimator [3]_. Two thermodynamic states k and l are defined to overlap
at Markov state n if the variance of the free energy difference Delta
f_{kl} computed with BAR (and restricted to conformations form Markov
state n) is less or equal than one. The minimally required variance
can be controlled with `connectivity_factor`.
* 'summed_count_matrix' : all thermodynamic states are assumed to overlap.
The connected set is then computed by summing the count matrices over
all thermodynamic states and taking it's largest strongly connected set.
Not recommended!
For more details see :func:`pyemma.thermo.extensions.cset.compute_csets_TRAM`.
connectivity_factor : float, optional, default=1.0
Only needed if connectivity='post_hoc_RE' or 'BAR_variance'. Values
greater than 1.0 weaken the connectivity conditions. For 'post_hoc_RE'
this multiplies the number of hypothetically observed transitions. For
'BAR_variance' this scales the threshold for the minimal allowed variance
of free energy differences.
direct_space : bool, optional, default=False
Whether to perform the self-consistent iteration with Boltzmann factors
(direct space) or free energies (log-space). When analyzing data from
multi-temperature simulations, direct-space is not recommended.
N_dtram_accelerations : int, optional, default=0
Convergence of TRAM can be speeded up by interleaving the updates
in the self-consistent iteration with a dTRAM-like update step.
N_dtram_accelerations says how many times the dTRAM-like update
step should be applied in every iteration of the TRAM equations.
Currently this is only effective if direct_space=True.
init : str, optional, default=None
Use a specific initialization for self-consistent iteration:
| None: use a hard-coded guess for free energies and Lagrangian multipliers
| 'wham': perform a short WHAM estimate to initialize the free energies
init_maxiter : int, optional, default=10000
The maximum number of self-consistent iterations during the initialization.
init_maxerr : float, optional, default=1.0E-8
Convergence criterion for the initialization.
Returns
-------
A :class:`MEMM <pyemma.thermo.models.memm.MEMM>` object or list thereof
A multi-ensemble Markov state model (for each given lag time) which consists of stationary
and kinetic quantities at all temperatures/thermodynamic states.
Example
-------
**Umbrella sampling**: Suppose we simulate in K umbrellas, centered at
positions :math:`y_0,...,y_{K-1}` with bias energies
.. math::
b_k(x) = \frac{c_k}{2 \textrm{kT}} \cdot (x - y_k)^2
Suppose we have one simulation of length T in each umbrella, and they are ordered from 0 to K-1.
We have discretized the x-coordinate into 100 bins.
Then dtrajs and ttrajs should each be a list of :math:`K` arrays.
dtrajs would look for example like this::
[ (0, 0, 0, 0, 1, 1, 1, 0, 0, 0, ...), (0, 1, 0, 1, 0, 1, 1, 0, 0, 1, ...), ... ]
where each array has length T, and is the sequence of bins (in the range 0 to 99) visited along
the trajectory. ttrajs would look like this::
[ (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...), (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...), ... ]
Because trajectory 1 stays in umbrella 1 (index 0), trajectory 2 stays in umbrella 2 (index 1),
and so forth.
The bias would be a list of :math:`T \times K` arrays which specify each frame's bias energy in
all thermodynamic states:
[ ((0, 1.7, 2.3, 6.1, ...), ...), ((0, 2.4, 3.1, 9,5, ...), ...), ... ]
Let us try the above example:
>>> from pyemma.thermo import tram
>>> import numpy as np
>>> ttrajs = [np.array([0,0,0,0,0,0,0]), np.array([1,1,1,1,1,1,1])]
>>> dtrajs = [np.array([0,0,0,0,1,1,1]), np.array([0,1,0,1,0,1,1])]
>>> bias = [np.array([[1,0],[1,0],[0,0],[0,0],[0,0],[0,0],[0,0]],dtype=np.float64), np.array([[1,0],[0,0],[0,0],[1,0],[0,0],[1,0],[1,0]],dtype=np.float64)]
>>> tram_obj = tram(ttrajs, dtrajs, bias, 1)
>>> tram_obj.log_likelihood() # doctest: +ELLIPSIS
-29.111...
>>> tram_obj.count_matrices # doctest: +SKIP
array([[[1 1]
[0 4]]
[[0 3]
[2 1]]], dtype=int32)
>>> tram_obj.stationary_distribution # doctest: +ELLIPSIS
array([ 0.38... 0.61...])
See :class:`MEMM <pyemma.thermo.models.memm.MEMM>` for a full documentation.
.. autoclass:: pyemma.thermo.models.memm.MEMM
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.thermo.models.memm.MEMM
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.thermo.models.memm.MEMM
:attributes:
References
----------
.. [1] Wu, H. et al 2016
Multiensemble Markov models of molecular thermodynamics and kinetics
Proc. Natl. Acad. Sci. USA 113 E3221--E3230
.. [2]_ Hukushima et al, Exchange Monte Carlo method and application to spin
glass simulations, J. Phys. Soc. Jan. 65, 1604 (1996)
.. [3]_ Shirts and Chodera, Statistically optimal analysis of samples
from multiple equilibrium states, J. Chem. Phys. 129, 124105 (2008) | [
"r",
"Transition",
"-",
"based",
"reweighting",
"analysis",
"method"
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/thermo/api.py#L464-L702 | train | 204,240 |
markovmodel/PyEMMA | pyemma/thermo/api.py | dtram | def dtram(
ttrajs, dtrajs, bias, lag, unbiased_state=None,
count_mode='sliding', connectivity='reversible_pathways',
maxiter=10000, maxerr=1.0E-15, save_convergence_info=0, dt_traj='1 step',
init=None, init_maxiter=10000, init_maxerr=1.0E-8):
r"""
Discrete transition-based reweighting analysis method
Parameters
----------
ttrajs : numpy.ndarray(T) of int, or list of numpy.ndarray(T_i) of int
A single discrete trajectory or a list of discrete trajectories. The integers are
indexes in 0,...,num_therm_states-1 enumerating the thermodynamic states the trajectory is
in at any time.
dtrajs : numpy.ndarray(T) of int, or list of numpy.ndarray(T_i) of int
A single discrete trajectory or a list of discrete trajectories. The integers are indexes
in 0,...,num_conf_states-1 enumerating the num_conf_states Markov states or the bins the
trajectory is in at any time.
bias : numpy.ndarray(shape=(num_therm_states, num_conf_states)) object
bias_energies_full[j, i] is the bias energy in units of kT for each discrete state i
at thermodynamic state j.
lag : int or list of int, optional, default=1
Integer lag time at which transitions are counted. Providing a list of lag times will
trigger one estimation per lag time.
unbiased_state : int, optional, default=None
Index of the unbiased thermodynamic state or None if there is no unbiased data available.
count_mode : str, optional, default='sliding'
Mode to obtain count matrices from discrete trajectories. Should be one of:
* 'sliding' : a trajectory of length T will have :math:`T-\tau` counts at time indexes
.. math::
(0 \rightarrow \tau), (1 \rightarrow \tau+1), ..., (T-\tau-1 \rightarrow T-1)
* 'sample' : a trajectory of length T will have :math:`T/\tau` counts at time indexes
.. math::
(0 \rightarrow \tau), (\tau \rightarrow 2 \tau), ..., ((T/\tau-1) \tau \rightarrow T)
Currently only 'sliding' is supported.
connectivity : str, optional, default='reversible_pathways'
One of 'reversible_pathways', 'summed_count_matrix' or None.
Defines what should be considered a connected set in the joint (product)
space of conformations and thermodynamic ensembles.
* 'reversible_pathways' : requires that every state in the connected set
can be reached by following a pathway of reversible transitions. A
reversible transition between two Markov states (within the same
thermodynamic state k) is a pair of Markov states that belong to the
same strongly connected component of the count matrix (from
thermodynamic state k). A pathway of reversible transitions is a list of
reversible transitions [(i_1, i_2), (i_2, i_3),..., (i_(N-2), i_(N-1)),
(i_(N-1), i_N)]. The thermodynamic state where the reversible
transitions happen, is ignored in constructing the reversible pathways.
This is equivalent to assuming that two ensembles overlap at some Markov
state whenever there exist frames from both ensembles in that Markov
state.
* 'summed_count_matrix' : all thermodynamic states are assumed to overlap.
The connected set is then computed by summing the count matrices over
all thermodynamic states and taking it's largest strongly connected set.
Not recommended!
* None : assume that everything is connected. For debugging.
For more details see :func:`pyemma.thermo.extensions.cset.compute_csets_dTRAM`.
maxiter : int, optional, default=10000
The maximum number of dTRAM iterations before the estimator exits unsuccessfully.
maxerr : float, optional, default=1e-15
Convergence criterion based on the maximal free energy change in a self-consistent
iteration step.
save_convergence_info : int, optional, default=0
Every save_convergence_info iteration steps, store the actual increment
and the actual loglikelihood; 0 means no storage.
dt_traj : str, optional, default='1 step'
Description of the physical time corresponding to the lag. May be used by analysis
algorithms such as plotting tools to pretty-print the axes. By default '1 step', i.e.
there is no physical time unit. Specify by a number, whitespace and unit. Permitted
units are (* is an arbitrary string):
| 'fs', 'femtosecond*'
| 'ps', 'picosecond*'
| 'ns', 'nanosecond*'
| 'us', 'microsecond*'
| 'ms', 'millisecond*'
| 's', 'second*'
init : str, optional, default=None
Use a specific initialization for self-consistent iteration:
| None: use a hard-coded guess for free energies and Lagrangian multipliers
| 'wham': perform a short WHAM estimate to initialize the free energies
init_maxiter : int, optional, default=10000
The maximum number of self-consistent iterations during the initialization.
init_maxerr : float, optional, default=1.0E-8
Convergence criterion for the initialization.
Returns
-------
A :class:`MEMM <pyemma.thermo.models.memm.MEMM>` object or list thereof
A multi-ensemble Markov state model (for each given lag time) which consists of stationary
and kinetic quantities at all temperatures/thermodynamic states.
Example
-------
**Umbrella sampling**: Suppose we simulate in K umbrellas, centered at
positions :math:`y_0,...,y_{K-1}` with bias energies
.. math::
b_k(x) = \frac{c_k}{2 \textrm{kT}} \cdot (x - y_k)^2
Suppose we have one simulation of length T in each umbrella, and they are ordered from 0 to K-1.
We have discretized the x-coordinate into 100 bins.
Then dtrajs and ttrajs should each be a list of :math:`K` arrays.
dtrajs would look for example like this::
[ (0, 0, 0, 0, 1, 1, 1, 0, 0, 0, ...), (0, 1, 0, 1, 0, 1, 1, 0, 0, 1, ...), ... ]
where each array has length T, and is the sequence of bins (in the range 0 to 99) visited along
the trajectory. ttrajs would look like this::
[ (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...), (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...), ... ]
Because trajectory 1 stays in umbrella 1 (index 0), trajectory 2 stays in umbrella 2 (index 1),
and so forth. bias is a :math:`K \times n` matrix with all reduced bias energies evaluated at
all centers:
.. math::
\left(\begin{array}{cccc}
b_0(y_0) & b_0(y_1) & ... & b_0(y_{n-1}) \\
b_1(y_0) & b_1(y_1) & ... & b_1(y_{n-1}) \\
... \\
b_{K-1}(y_0) & b_{K-1}(y_1) & ... & b_{K-1}(y_{n-1})
\end{array}\right)
Let us try the above example:
>>> from pyemma.thermo import dtram
>>> import numpy as np
>>> ttrajs = [np.array([0,0,0,0,0,0,0,0,0,0]), np.array([1,1,1,1,1,1,1,1,1,1])]
>>> dtrajs = [np.array([0,0,0,0,1,1,1,0,0,0]), np.array([0,1,0,1,0,1,1,0,0,1])]
>>> bias = np.array([[0.0, 0.0], [0.5, 1.0]])
>>> dtram_obj = dtram(ttrajs, dtrajs, bias, 1)
>>> dtram_obj.log_likelihood() # doctest: +ELLIPSIS
-9.805...
>>> dtram_obj.count_matrices # doctest: +SKIP
array([[[5, 1],
[1, 2]],
[[1, 4],
[3, 1]]], dtype=int32)
>>> dtram_obj.stationary_distribution # doctest: +ELLIPSIS
array([ 0.38..., 0.61...])
See :class:`MEMM <pyemma.thermo.models.memm.MEMM>` for a full documentation.
.. autoclass:: pyemma.thermo.models.memm.MEMM
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.thermo.models.memm.MEMM
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.thermo.models.memm.MEMM
:attributes:
References
----------
.. [1] Wu, H. et al 2014
Statistically optimal analysis of state-discretized trajectory data from multiple thermodynamic states
J. Chem. Phys. 141, 214106
"""
# prepare trajectories
ttrajs = _types.ensure_dtraj_list(ttrajs)
dtrajs = _types.ensure_dtraj_list(dtrajs)
if len(ttrajs) != len(dtrajs):
raise ValueError("Unmatching number of dtraj/ttraj elements: %d!=%d" % (
len(dtrajs), len(ttrajs)) )
for ttraj, dtraj in zip(ttrajs, dtrajs):
if len(ttraj) != len(dtraj):
raise ValueError("Unmatching number of data points in ttraj/dtraj: %d!=%d" % (
len(ttraj), len(dtraj)))
# check lag time(s)
lags = _np.asarray(lag, dtype=_np.intc).reshape((-1,)).tolist()
# build DTRAM and run estimation
from pyemma.thermo import DTRAM
from pyemma._base.progress import ProgressReporter
pg = ProgressReporter()
pg.register(len(lags), description='Estimating DTRAM for lags')
dtram_estimators = []
with pg.context():
for _lag in lags:
d = DTRAM(
bias, _lag,
count_mode=count_mode, connectivity=connectivity,
maxiter=maxiter, maxerr=maxerr, save_convergence_info=save_convergence_info,
dt_traj=dt_traj, init=init, init_maxiter=init_maxiter,
init_maxerr=init_maxerr).estimate((ttrajs, dtrajs))
dtram_estimators.append(d)
pg.update(1)
_assign_unbiased_state_label(dtram_estimators, unbiased_state)
# return
if len(dtram_estimators) == 1:
return dtram_estimators[0]
return dtram_estimators | python | def dtram(
ttrajs, dtrajs, bias, lag, unbiased_state=None,
count_mode='sliding', connectivity='reversible_pathways',
maxiter=10000, maxerr=1.0E-15, save_convergence_info=0, dt_traj='1 step',
init=None, init_maxiter=10000, init_maxerr=1.0E-8):
r"""
Discrete transition-based reweighting analysis method
Parameters
----------
ttrajs : numpy.ndarray(T) of int, or list of numpy.ndarray(T_i) of int
A single discrete trajectory or a list of discrete trajectories. The integers are
indexes in 0,...,num_therm_states-1 enumerating the thermodynamic states the trajectory is
in at any time.
dtrajs : numpy.ndarray(T) of int, or list of numpy.ndarray(T_i) of int
A single discrete trajectory or a list of discrete trajectories. The integers are indexes
in 0,...,num_conf_states-1 enumerating the num_conf_states Markov states or the bins the
trajectory is in at any time.
bias : numpy.ndarray(shape=(num_therm_states, num_conf_states)) object
bias_energies_full[j, i] is the bias energy in units of kT for each discrete state i
at thermodynamic state j.
lag : int or list of int, optional, default=1
Integer lag time at which transitions are counted. Providing a list of lag times will
trigger one estimation per lag time.
unbiased_state : int, optional, default=None
Index of the unbiased thermodynamic state or None if there is no unbiased data available.
count_mode : str, optional, default='sliding'
Mode to obtain count matrices from discrete trajectories. Should be one of:
* 'sliding' : a trajectory of length T will have :math:`T-\tau` counts at time indexes
.. math::
(0 \rightarrow \tau), (1 \rightarrow \tau+1), ..., (T-\tau-1 \rightarrow T-1)
* 'sample' : a trajectory of length T will have :math:`T/\tau` counts at time indexes
.. math::
(0 \rightarrow \tau), (\tau \rightarrow 2 \tau), ..., ((T/\tau-1) \tau \rightarrow T)
Currently only 'sliding' is supported.
connectivity : str, optional, default='reversible_pathways'
One of 'reversible_pathways', 'summed_count_matrix' or None.
Defines what should be considered a connected set in the joint (product)
space of conformations and thermodynamic ensembles.
* 'reversible_pathways' : requires that every state in the connected set
can be reached by following a pathway of reversible transitions. A
reversible transition between two Markov states (within the same
thermodynamic state k) is a pair of Markov states that belong to the
same strongly connected component of the count matrix (from
thermodynamic state k). A pathway of reversible transitions is a list of
reversible transitions [(i_1, i_2), (i_2, i_3),..., (i_(N-2), i_(N-1)),
(i_(N-1), i_N)]. The thermodynamic state where the reversible
transitions happen, is ignored in constructing the reversible pathways.
This is equivalent to assuming that two ensembles overlap at some Markov
state whenever there exist frames from both ensembles in that Markov
state.
* 'summed_count_matrix' : all thermodynamic states are assumed to overlap.
The connected set is then computed by summing the count matrices over
all thermodynamic states and taking it's largest strongly connected set.
Not recommended!
* None : assume that everything is connected. For debugging.
For more details see :func:`pyemma.thermo.extensions.cset.compute_csets_dTRAM`.
maxiter : int, optional, default=10000
The maximum number of dTRAM iterations before the estimator exits unsuccessfully.
maxerr : float, optional, default=1e-15
Convergence criterion based on the maximal free energy change in a self-consistent
iteration step.
save_convergence_info : int, optional, default=0
Every save_convergence_info iteration steps, store the actual increment
and the actual loglikelihood; 0 means no storage.
dt_traj : str, optional, default='1 step'
Description of the physical time corresponding to the lag. May be used by analysis
algorithms such as plotting tools to pretty-print the axes. By default '1 step', i.e.
there is no physical time unit. Specify by a number, whitespace and unit. Permitted
units are (* is an arbitrary string):
| 'fs', 'femtosecond*'
| 'ps', 'picosecond*'
| 'ns', 'nanosecond*'
| 'us', 'microsecond*'
| 'ms', 'millisecond*'
| 's', 'second*'
init : str, optional, default=None
Use a specific initialization for self-consistent iteration:
| None: use a hard-coded guess for free energies and Lagrangian multipliers
| 'wham': perform a short WHAM estimate to initialize the free energies
init_maxiter : int, optional, default=10000
The maximum number of self-consistent iterations during the initialization.
init_maxerr : float, optional, default=1.0E-8
Convergence criterion for the initialization.
Returns
-------
A :class:`MEMM <pyemma.thermo.models.memm.MEMM>` object or list thereof
A multi-ensemble Markov state model (for each given lag time) which consists of stationary
and kinetic quantities at all temperatures/thermodynamic states.
Example
-------
**Umbrella sampling**: Suppose we simulate in K umbrellas, centered at
positions :math:`y_0,...,y_{K-1}` with bias energies
.. math::
b_k(x) = \frac{c_k}{2 \textrm{kT}} \cdot (x - y_k)^2
Suppose we have one simulation of length T in each umbrella, and they are ordered from 0 to K-1.
We have discretized the x-coordinate into 100 bins.
Then dtrajs and ttrajs should each be a list of :math:`K` arrays.
dtrajs would look for example like this::
[ (0, 0, 0, 0, 1, 1, 1, 0, 0, 0, ...), (0, 1, 0, 1, 0, 1, 1, 0, 0, 1, ...), ... ]
where each array has length T, and is the sequence of bins (in the range 0 to 99) visited along
the trajectory. ttrajs would look like this::
[ (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...), (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...), ... ]
Because trajectory 1 stays in umbrella 1 (index 0), trajectory 2 stays in umbrella 2 (index 1),
and so forth. bias is a :math:`K \times n` matrix with all reduced bias energies evaluated at
all centers:
.. math::
\left(\begin{array}{cccc}
b_0(y_0) & b_0(y_1) & ... & b_0(y_{n-1}) \\
b_1(y_0) & b_1(y_1) & ... & b_1(y_{n-1}) \\
... \\
b_{K-1}(y_0) & b_{K-1}(y_1) & ... & b_{K-1}(y_{n-1})
\end{array}\right)
Let us try the above example:
>>> from pyemma.thermo import dtram
>>> import numpy as np
>>> ttrajs = [np.array([0,0,0,0,0,0,0,0,0,0]), np.array([1,1,1,1,1,1,1,1,1,1])]
>>> dtrajs = [np.array([0,0,0,0,1,1,1,0,0,0]), np.array([0,1,0,1,0,1,1,0,0,1])]
>>> bias = np.array([[0.0, 0.0], [0.5, 1.0]])
>>> dtram_obj = dtram(ttrajs, dtrajs, bias, 1)
>>> dtram_obj.log_likelihood() # doctest: +ELLIPSIS
-9.805...
>>> dtram_obj.count_matrices # doctest: +SKIP
array([[[5, 1],
[1, 2]],
[[1, 4],
[3, 1]]], dtype=int32)
>>> dtram_obj.stationary_distribution # doctest: +ELLIPSIS
array([ 0.38..., 0.61...])
See :class:`MEMM <pyemma.thermo.models.memm.MEMM>` for a full documentation.
.. autoclass:: pyemma.thermo.models.memm.MEMM
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.thermo.models.memm.MEMM
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.thermo.models.memm.MEMM
:attributes:
References
----------
.. [1] Wu, H. et al 2014
Statistically optimal analysis of state-discretized trajectory data from multiple thermodynamic states
J. Chem. Phys. 141, 214106
"""
# prepare trajectories
ttrajs = _types.ensure_dtraj_list(ttrajs)
dtrajs = _types.ensure_dtraj_list(dtrajs)
if len(ttrajs) != len(dtrajs):
raise ValueError("Unmatching number of dtraj/ttraj elements: %d!=%d" % (
len(dtrajs), len(ttrajs)) )
for ttraj, dtraj in zip(ttrajs, dtrajs):
if len(ttraj) != len(dtraj):
raise ValueError("Unmatching number of data points in ttraj/dtraj: %d!=%d" % (
len(ttraj), len(dtraj)))
# check lag time(s)
lags = _np.asarray(lag, dtype=_np.intc).reshape((-1,)).tolist()
# build DTRAM and run estimation
from pyemma.thermo import DTRAM
from pyemma._base.progress import ProgressReporter
pg = ProgressReporter()
pg.register(len(lags), description='Estimating DTRAM for lags')
dtram_estimators = []
with pg.context():
for _lag in lags:
d = DTRAM(
bias, _lag,
count_mode=count_mode, connectivity=connectivity,
maxiter=maxiter, maxerr=maxerr, save_convergence_info=save_convergence_info,
dt_traj=dt_traj, init=init, init_maxiter=init_maxiter,
init_maxerr=init_maxerr).estimate((ttrajs, dtrajs))
dtram_estimators.append(d)
pg.update(1)
_assign_unbiased_state_label(dtram_estimators, unbiased_state)
# return
if len(dtram_estimators) == 1:
return dtram_estimators[0]
return dtram_estimators | [
"def",
"dtram",
"(",
"ttrajs",
",",
"dtrajs",
",",
"bias",
",",
"lag",
",",
"unbiased_state",
"=",
"None",
",",
"count_mode",
"=",
"'sliding'",
",",
"connectivity",
"=",
"'reversible_pathways'",
",",
"maxiter",
"=",
"10000",
",",
"maxerr",
"=",
"1.0E-15",
... | r"""
Discrete transition-based reweighting analysis method
Parameters
----------
ttrajs : numpy.ndarray(T) of int, or list of numpy.ndarray(T_i) of int
A single discrete trajectory or a list of discrete trajectories. The integers are
indexes in 0,...,num_therm_states-1 enumerating the thermodynamic states the trajectory is
in at any time.
dtrajs : numpy.ndarray(T) of int, or list of numpy.ndarray(T_i) of int
A single discrete trajectory or a list of discrete trajectories. The integers are indexes
in 0,...,num_conf_states-1 enumerating the num_conf_states Markov states or the bins the
trajectory is in at any time.
bias : numpy.ndarray(shape=(num_therm_states, num_conf_states)) object
bias_energies_full[j, i] is the bias energy in units of kT for each discrete state i
at thermodynamic state j.
lag : int or list of int, optional, default=1
Integer lag time at which transitions are counted. Providing a list of lag times will
trigger one estimation per lag time.
unbiased_state : int, optional, default=None
Index of the unbiased thermodynamic state or None if there is no unbiased data available.
count_mode : str, optional, default='sliding'
Mode to obtain count matrices from discrete trajectories. Should be one of:
* 'sliding' : a trajectory of length T will have :math:`T-\tau` counts at time indexes
.. math::
(0 \rightarrow \tau), (1 \rightarrow \tau+1), ..., (T-\tau-1 \rightarrow T-1)
* 'sample' : a trajectory of length T will have :math:`T/\tau` counts at time indexes
.. math::
(0 \rightarrow \tau), (\tau \rightarrow 2 \tau), ..., ((T/\tau-1) \tau \rightarrow T)
Currently only 'sliding' is supported.
connectivity : str, optional, default='reversible_pathways'
One of 'reversible_pathways', 'summed_count_matrix' or None.
Defines what should be considered a connected set in the joint (product)
space of conformations and thermodynamic ensembles.
* 'reversible_pathways' : requires that every state in the connected set
can be reached by following a pathway of reversible transitions. A
reversible transition between two Markov states (within the same
thermodynamic state k) is a pair of Markov states that belong to the
same strongly connected component of the count matrix (from
thermodynamic state k). A pathway of reversible transitions is a list of
reversible transitions [(i_1, i_2), (i_2, i_3),..., (i_(N-2), i_(N-1)),
(i_(N-1), i_N)]. The thermodynamic state where the reversible
transitions happen, is ignored in constructing the reversible pathways.
This is equivalent to assuming that two ensembles overlap at some Markov
state whenever there exist frames from both ensembles in that Markov
state.
* 'summed_count_matrix' : all thermodynamic states are assumed to overlap.
The connected set is then computed by summing the count matrices over
all thermodynamic states and taking it's largest strongly connected set.
Not recommended!
* None : assume that everything is connected. For debugging.
For more details see :func:`pyemma.thermo.extensions.cset.compute_csets_dTRAM`.
maxiter : int, optional, default=10000
The maximum number of dTRAM iterations before the estimator exits unsuccessfully.
maxerr : float, optional, default=1e-15
Convergence criterion based on the maximal free energy change in a self-consistent
iteration step.
save_convergence_info : int, optional, default=0
Every save_convergence_info iteration steps, store the actual increment
and the actual loglikelihood; 0 means no storage.
dt_traj : str, optional, default='1 step'
Description of the physical time corresponding to the lag. May be used by analysis
algorithms such as plotting tools to pretty-print the axes. By default '1 step', i.e.
there is no physical time unit. Specify by a number, whitespace and unit. Permitted
units are (* is an arbitrary string):
| 'fs', 'femtosecond*'
| 'ps', 'picosecond*'
| 'ns', 'nanosecond*'
| 'us', 'microsecond*'
| 'ms', 'millisecond*'
| 's', 'second*'
init : str, optional, default=None
Use a specific initialization for self-consistent iteration:
| None: use a hard-coded guess for free energies and Lagrangian multipliers
| 'wham': perform a short WHAM estimate to initialize the free energies
init_maxiter : int, optional, default=10000
The maximum number of self-consistent iterations during the initialization.
init_maxerr : float, optional, default=1.0E-8
Convergence criterion for the initialization.
Returns
-------
A :class:`MEMM <pyemma.thermo.models.memm.MEMM>` object or list thereof
A multi-ensemble Markov state model (for each given lag time) which consists of stationary
and kinetic quantities at all temperatures/thermodynamic states.
Example
-------
**Umbrella sampling**: Suppose we simulate in K umbrellas, centered at
positions :math:`y_0,...,y_{K-1}` with bias energies
.. math::
b_k(x) = \frac{c_k}{2 \textrm{kT}} \cdot (x - y_k)^2
Suppose we have one simulation of length T in each umbrella, and they are ordered from 0 to K-1.
We have discretized the x-coordinate into 100 bins.
Then dtrajs and ttrajs should each be a list of :math:`K` arrays.
dtrajs would look for example like this::
[ (0, 0, 0, 0, 1, 1, 1, 0, 0, 0, ...), (0, 1, 0, 1, 0, 1, 1, 0, 0, 1, ...), ... ]
where each array has length T, and is the sequence of bins (in the range 0 to 99) visited along
the trajectory. ttrajs would look like this::
[ (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...), (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...), ... ]
Because trajectory 1 stays in umbrella 1 (index 0), trajectory 2 stays in umbrella 2 (index 1),
and so forth. bias is a :math:`K \times n` matrix with all reduced bias energies evaluated at
all centers:
.. math::
\left(\begin{array}{cccc}
b_0(y_0) & b_0(y_1) & ... & b_0(y_{n-1}) \\
b_1(y_0) & b_1(y_1) & ... & b_1(y_{n-1}) \\
... \\
b_{K-1}(y_0) & b_{K-1}(y_1) & ... & b_{K-1}(y_{n-1})
\end{array}\right)
Let us try the above example:
>>> from pyemma.thermo import dtram
>>> import numpy as np
>>> ttrajs = [np.array([0,0,0,0,0,0,0,0,0,0]), np.array([1,1,1,1,1,1,1,1,1,1])]
>>> dtrajs = [np.array([0,0,0,0,1,1,1,0,0,0]), np.array([0,1,0,1,0,1,1,0,0,1])]
>>> bias = np.array([[0.0, 0.0], [0.5, 1.0]])
>>> dtram_obj = dtram(ttrajs, dtrajs, bias, 1)
>>> dtram_obj.log_likelihood() # doctest: +ELLIPSIS
-9.805...
>>> dtram_obj.count_matrices # doctest: +SKIP
array([[[5, 1],
[1, 2]],
[[1, 4],
[3, 1]]], dtype=int32)
>>> dtram_obj.stationary_distribution # doctest: +ELLIPSIS
array([ 0.38..., 0.61...])
See :class:`MEMM <pyemma.thermo.models.memm.MEMM>` for a full documentation.
.. autoclass:: pyemma.thermo.models.memm.MEMM
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.thermo.models.memm.MEMM
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.thermo.models.memm.MEMM
:attributes:
References
----------
.. [1] Wu, H. et al 2014
Statistically optimal analysis of state-discretized trajectory data from multiple thermodynamic states
J. Chem. Phys. 141, 214106 | [
"r",
"Discrete",
"transition",
"-",
"based",
"reweighting",
"analysis",
"method"
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/thermo/api.py#L704-L907 | train | 204,241 |
markovmodel/PyEMMA | pyemma/thermo/api.py | wham | def wham(
ttrajs, dtrajs, bias,
maxiter=100000, maxerr=1.0E-15, save_convergence_info=0, dt_traj='1 step'):
r"""
Weighted histogram analysis method
Parameters
----------
ttrajs : numpy.ndarray(T) of int, or list of numpy.ndarray(T_i) of int
A single discrete trajectory or a list of discrete trajectories. The integers are
indexes in 0,...,num_therm_states-1 enumerating the thermodynamic states the trajectory is
in at any time.
dtrajs : numpy.ndarray(T) of int, or list of numpy.ndarray(T_i) of int
A single discrete trajectory or a list of discrete trajectories. The integers are indexes
in 0,...,num_conf_states-1 enumerating the num_conf_states Markov states or the bins the
trajectory is in at any time.
bias : numpy.ndarray(shape=(num_therm_states, num_conf_states)) object
bias_energies_full[j, i] is the bias energy in units of kT for each discrete state i
at thermodynamic state j.
maxiter : int, optional, default=10000
The maximum number of dTRAM iterations before the estimator exits unsuccessfully.
maxerr : float, optional, default=1e-15
Convergence criterion based on the maximal free energy change in a self-consistent
iteration step.
save_convergence_info : int, optional, default=0
Every save_convergence_info iteration steps, store the actual increment
and the actual loglikelihood; 0 means no storage.
dt_traj : str, optional, default='1 step'
Description of the physical time corresponding to the lag. May be used by analysis
algorithms such as plotting tools to pretty-print the axes. By default '1 step', i.e.
there is no physical time unit. Specify by a number, whitespace and unit. Permitted
units are (* is an arbitrary string):
| 'fs', 'femtosecond*'
| 'ps', 'picosecond*'
| 'ns', 'nanosecond*'
| 'us', 'microsecond*'
| 'ms', 'millisecond*'
| 's', 'second*'
Returns
-------
A :class:`MultiThermModel <pyemma.thermo.models.multi_therm.MultiThermModel>` object
A stationary model which consists of thermodynamic quantities at all
temperatures/thermodynamic states.
Example
-------
**Umbrella sampling**: Suppose we simulate in K umbrellas, centered at
positions :math:`y_0,...,y_{K-1}` with bias energies
.. math::
b_k(x) = \frac{c_k}{2 \textrm{kT}} \cdot (x - y_k)^2
Suppose we have one simulation of length T in each umbrella, and they are ordered from 0 to K-1.
We have discretized the x-coordinate into 100 bins.
Then dtrajs and ttrajs should each be a list of :math:`K` arrays.
dtrajs would look for example like this::
[ (0, 0, 0, 0, 1, 1, 1, 0, 0, 0, ...), (0, 1, 0, 1, 0, 1, 1, 0, 0, 1, ...), ... ]
where each array has length T, and is the sequence of bins (in the range 0 to 99) visited along
the trajectory. ttrajs would look like this::
[ (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...), (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...), ... ]
Because trajectory 1 stays in umbrella 1 (index 0), trajectory 2 stays in umbrella 2 (index 1),
and so forth. bias is a :math:`K \times n` matrix with all reduced bias energies evaluated at
all centers:
.. math::
\left(\begin{array}{cccc}
b_0(y_0) & b_0(y_1) & ... & b_0(y_{n-1}) \\
b_1(y_0) & b_1(y_1) & ... & b_1(y_{n-1}) \\
... \\
b_{K-1}(y_0) & b_{K-1}(y_1) & ... & b_{K-1}(y_{n-1})
\end{array}\right)
Let us try the above example:
>>> from pyemma.thermo import wham
>>> import numpy as np
>>> ttrajs = [np.array([0,0,0,0,0,0,0,0,0,0]), np.array([1,1,1,1,1,1,1,1,1,1])]
>>> dtrajs = [np.array([0,0,0,0,1,1,1,0,0,0]), np.array([0,1,0,1,0,1,1,0,0,1])]
>>> bias = np.array([[0.0, 0.0], [0.5, 1.0]])
>>> wham_obj = wham(ttrajs, dtrajs, bias)
>>> wham_obj.log_likelihood() # doctest: +ELLIPSIS
-6.6...
>>> wham_obj.state_counts # doctest: +SKIP
array([[7, 3],
[5, 5]])
>>> wham_obj.stationary_distribution # doctest: +ELLIPSIS +REPORT_NDIFF
array([ 0.5..., 0.4...])
See :class:`MultiThermModel <pyemma.thermo.models.multi_therm.MultiThermModel>` for a full
documentation.
.. autoclass:: pyemma.thermo.models.multi_therm.MultiThermModel
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.thermo.models.multi_therm.MultiThermModel
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.thermo.models.multi_therm.MultiThermModel
:attributes:
References
----------
.. [1] Ferrenberg, A.M. and Swensen, R.H. 1988.
New Monte Carlo Technique for Studying Phase Transitions.
Phys. Rev. Lett. 23, 2635--2638
.. [2] Kumar, S. et al 1992.
The Weighted Histogram Analysis Method for Free-Energy Calculations on Biomolecules. I. The Method.
J. Comp. Chem. 13, 1011--1021
"""
# check trajectories
ttrajs = _types.ensure_dtraj_list(ttrajs)
dtrajs = _types.ensure_dtraj_list(dtrajs)
if len(ttrajs) != len(dtrajs):
raise ValueError("Unmatching number of dtraj/ttraj elements: %d!=%d" % (
len(dtrajs), len(ttrajs)) )
for ttraj, dtraj in zip(ttrajs, dtrajs):
if len(ttraj) != len(dtraj):
raise ValueError("Unmatching number of data points in ttraj/dtraj: %d!=%d" % (
len(ttraj), len(dtraj)))
# build WHAM
from pyemma.thermo import WHAM
wham_estimator = WHAM(
bias,
maxiter=maxiter, maxerr=maxerr,
save_convergence_info=save_convergence_info, dt_traj=dt_traj)
# run estimation
return wham_estimator.estimate((ttrajs, dtrajs)) | python | def wham(
ttrajs, dtrajs, bias,
maxiter=100000, maxerr=1.0E-15, save_convergence_info=0, dt_traj='1 step'):
r"""
Weighted histogram analysis method
Parameters
----------
ttrajs : numpy.ndarray(T) of int, or list of numpy.ndarray(T_i) of int
A single discrete trajectory or a list of discrete trajectories. The integers are
indexes in 0,...,num_therm_states-1 enumerating the thermodynamic states the trajectory is
in at any time.
dtrajs : numpy.ndarray(T) of int, or list of numpy.ndarray(T_i) of int
A single discrete trajectory or a list of discrete trajectories. The integers are indexes
in 0,...,num_conf_states-1 enumerating the num_conf_states Markov states or the bins the
trajectory is in at any time.
bias : numpy.ndarray(shape=(num_therm_states, num_conf_states)) object
bias_energies_full[j, i] is the bias energy in units of kT for each discrete state i
at thermodynamic state j.
maxiter : int, optional, default=10000
The maximum number of dTRAM iterations before the estimator exits unsuccessfully.
maxerr : float, optional, default=1e-15
Convergence criterion based on the maximal free energy change in a self-consistent
iteration step.
save_convergence_info : int, optional, default=0
Every save_convergence_info iteration steps, store the actual increment
and the actual loglikelihood; 0 means no storage.
dt_traj : str, optional, default='1 step'
Description of the physical time corresponding to the lag. May be used by analysis
algorithms such as plotting tools to pretty-print the axes. By default '1 step', i.e.
there is no physical time unit. Specify by a number, whitespace and unit. Permitted
units are (* is an arbitrary string):
| 'fs', 'femtosecond*'
| 'ps', 'picosecond*'
| 'ns', 'nanosecond*'
| 'us', 'microsecond*'
| 'ms', 'millisecond*'
| 's', 'second*'
Returns
-------
A :class:`MultiThermModel <pyemma.thermo.models.multi_therm.MultiThermModel>` object
A stationary model which consists of thermodynamic quantities at all
temperatures/thermodynamic states.
Example
-------
**Umbrella sampling**: Suppose we simulate in K umbrellas, centered at
positions :math:`y_0,...,y_{K-1}` with bias energies
.. math::
b_k(x) = \frac{c_k}{2 \textrm{kT}} \cdot (x - y_k)^2
Suppose we have one simulation of length T in each umbrella, and they are ordered from 0 to K-1.
We have discretized the x-coordinate into 100 bins.
Then dtrajs and ttrajs should each be a list of :math:`K` arrays.
dtrajs would look for example like this::
[ (0, 0, 0, 0, 1, 1, 1, 0, 0, 0, ...), (0, 1, 0, 1, 0, 1, 1, 0, 0, 1, ...), ... ]
where each array has length T, and is the sequence of bins (in the range 0 to 99) visited along
the trajectory. ttrajs would look like this::
[ (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...), (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...), ... ]
Because trajectory 1 stays in umbrella 1 (index 0), trajectory 2 stays in umbrella 2 (index 1),
and so forth. bias is a :math:`K \times n` matrix with all reduced bias energies evaluated at
all centers:
.. math::
\left(\begin{array}{cccc}
b_0(y_0) & b_0(y_1) & ... & b_0(y_{n-1}) \\
b_1(y_0) & b_1(y_1) & ... & b_1(y_{n-1}) \\
... \\
b_{K-1}(y_0) & b_{K-1}(y_1) & ... & b_{K-1}(y_{n-1})
\end{array}\right)
Let us try the above example:
>>> from pyemma.thermo import wham
>>> import numpy as np
>>> ttrajs = [np.array([0,0,0,0,0,0,0,0,0,0]), np.array([1,1,1,1,1,1,1,1,1,1])]
>>> dtrajs = [np.array([0,0,0,0,1,1,1,0,0,0]), np.array([0,1,0,1,0,1,1,0,0,1])]
>>> bias = np.array([[0.0, 0.0], [0.5, 1.0]])
>>> wham_obj = wham(ttrajs, dtrajs, bias)
>>> wham_obj.log_likelihood() # doctest: +ELLIPSIS
-6.6...
>>> wham_obj.state_counts # doctest: +SKIP
array([[7, 3],
[5, 5]])
>>> wham_obj.stationary_distribution # doctest: +ELLIPSIS +REPORT_NDIFF
array([ 0.5..., 0.4...])
See :class:`MultiThermModel <pyemma.thermo.models.multi_therm.MultiThermModel>` for a full
documentation.
.. autoclass:: pyemma.thermo.models.multi_therm.MultiThermModel
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.thermo.models.multi_therm.MultiThermModel
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.thermo.models.multi_therm.MultiThermModel
:attributes:
References
----------
.. [1] Ferrenberg, A.M. and Swensen, R.H. 1988.
New Monte Carlo Technique for Studying Phase Transitions.
Phys. Rev. Lett. 23, 2635--2638
.. [2] Kumar, S. et al 1992.
The Weighted Histogram Analysis Method for Free-Energy Calculations on Biomolecules. I. The Method.
J. Comp. Chem. 13, 1011--1021
"""
# check trajectories
ttrajs = _types.ensure_dtraj_list(ttrajs)
dtrajs = _types.ensure_dtraj_list(dtrajs)
if len(ttrajs) != len(dtrajs):
raise ValueError("Unmatching number of dtraj/ttraj elements: %d!=%d" % (
len(dtrajs), len(ttrajs)) )
for ttraj, dtraj in zip(ttrajs, dtrajs):
if len(ttraj) != len(dtraj):
raise ValueError("Unmatching number of data points in ttraj/dtraj: %d!=%d" % (
len(ttraj), len(dtraj)))
# build WHAM
from pyemma.thermo import WHAM
wham_estimator = WHAM(
bias,
maxiter=maxiter, maxerr=maxerr,
save_convergence_info=save_convergence_info, dt_traj=dt_traj)
# run estimation
return wham_estimator.estimate((ttrajs, dtrajs)) | [
"def",
"wham",
"(",
"ttrajs",
",",
"dtrajs",
",",
"bias",
",",
"maxiter",
"=",
"100000",
",",
"maxerr",
"=",
"1.0E-15",
",",
"save_convergence_info",
"=",
"0",
",",
"dt_traj",
"=",
"'1 step'",
")",
":",
"# check trajectories",
"ttrajs",
"=",
"_types",
".",... | r"""
Weighted histogram analysis method
Parameters
----------
ttrajs : numpy.ndarray(T) of int, or list of numpy.ndarray(T_i) of int
A single discrete trajectory or a list of discrete trajectories. The integers are
indexes in 0,...,num_therm_states-1 enumerating the thermodynamic states the trajectory is
in at any time.
dtrajs : numpy.ndarray(T) of int, or list of numpy.ndarray(T_i) of int
A single discrete trajectory or a list of discrete trajectories. The integers are indexes
in 0,...,num_conf_states-1 enumerating the num_conf_states Markov states or the bins the
trajectory is in at any time.
bias : numpy.ndarray(shape=(num_therm_states, num_conf_states)) object
bias_energies_full[j, i] is the bias energy in units of kT for each discrete state i
at thermodynamic state j.
maxiter : int, optional, default=10000
The maximum number of dTRAM iterations before the estimator exits unsuccessfully.
maxerr : float, optional, default=1e-15
Convergence criterion based on the maximal free energy change in a self-consistent
iteration step.
save_convergence_info : int, optional, default=0
Every save_convergence_info iteration steps, store the actual increment
and the actual loglikelihood; 0 means no storage.
dt_traj : str, optional, default='1 step'
Description of the physical time corresponding to the lag. May be used by analysis
algorithms such as plotting tools to pretty-print the axes. By default '1 step', i.e.
there is no physical time unit. Specify by a number, whitespace and unit. Permitted
units are (* is an arbitrary string):
| 'fs', 'femtosecond*'
| 'ps', 'picosecond*'
| 'ns', 'nanosecond*'
| 'us', 'microsecond*'
| 'ms', 'millisecond*'
| 's', 'second*'
Returns
-------
A :class:`MultiThermModel <pyemma.thermo.models.multi_therm.MultiThermModel>` object
A stationary model which consists of thermodynamic quantities at all
temperatures/thermodynamic states.
Example
-------
**Umbrella sampling**: Suppose we simulate in K umbrellas, centered at
positions :math:`y_0,...,y_{K-1}` with bias energies
.. math::
b_k(x) = \frac{c_k}{2 \textrm{kT}} \cdot (x - y_k)^2
Suppose we have one simulation of length T in each umbrella, and they are ordered from 0 to K-1.
We have discretized the x-coordinate into 100 bins.
Then dtrajs and ttrajs should each be a list of :math:`K` arrays.
dtrajs would look for example like this::
[ (0, 0, 0, 0, 1, 1, 1, 0, 0, 0, ...), (0, 1, 0, 1, 0, 1, 1, 0, 0, 1, ...), ... ]
where each array has length T, and is the sequence of bins (in the range 0 to 99) visited along
the trajectory. ttrajs would look like this::
[ (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...), (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...), ... ]
Because trajectory 1 stays in umbrella 1 (index 0), trajectory 2 stays in umbrella 2 (index 1),
and so forth. bias is a :math:`K \times n` matrix with all reduced bias energies evaluated at
all centers:
.. math::
\left(\begin{array}{cccc}
b_0(y_0) & b_0(y_1) & ... & b_0(y_{n-1}) \\
b_1(y_0) & b_1(y_1) & ... & b_1(y_{n-1}) \\
... \\
b_{K-1}(y_0) & b_{K-1}(y_1) & ... & b_{K-1}(y_{n-1})
\end{array}\right)
Let us try the above example:
>>> from pyemma.thermo import wham
>>> import numpy as np
>>> ttrajs = [np.array([0,0,0,0,0,0,0,0,0,0]), np.array([1,1,1,1,1,1,1,1,1,1])]
>>> dtrajs = [np.array([0,0,0,0,1,1,1,0,0,0]), np.array([0,1,0,1,0,1,1,0,0,1])]
>>> bias = np.array([[0.0, 0.0], [0.5, 1.0]])
>>> wham_obj = wham(ttrajs, dtrajs, bias)
>>> wham_obj.log_likelihood() # doctest: +ELLIPSIS
-6.6...
>>> wham_obj.state_counts # doctest: +SKIP
array([[7, 3],
[5, 5]])
>>> wham_obj.stationary_distribution # doctest: +ELLIPSIS +REPORT_NDIFF
array([ 0.5..., 0.4...])
See :class:`MultiThermModel <pyemma.thermo.models.multi_therm.MultiThermModel>` for a full
documentation.
.. autoclass:: pyemma.thermo.models.multi_therm.MultiThermModel
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.thermo.models.multi_therm.MultiThermModel
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.thermo.models.multi_therm.MultiThermModel
:attributes:
References
----------
.. [1] Ferrenberg, A.M. and Swensen, R.H. 1988.
New Monte Carlo Technique for Studying Phase Transitions.
Phys. Rev. Lett. 23, 2635--2638
.. [2] Kumar, S. et al 1992.
The Weighted Histogram Analysis Method for Free-Energy Calculations on Biomolecules. I. The Method.
J. Comp. Chem. 13, 1011--1021 | [
"r",
"Weighted",
"histogram",
"analysis",
"method"
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/thermo/api.py#L909-L1050 | train | 204,242 |
markovmodel/PyEMMA | pyemma/thermo/api.py | mbar | def mbar(
ttrajs, dtrajs, bias,
maxiter=100000, maxerr=1.0E-15, save_convergence_info=0,
dt_traj='1 step', direct_space=False):
r"""
Multi-state Bennet acceptance ratio
Parameters
----------
ttrajs : numpy.ndarray(T) of int, or list of numpy.ndarray(T_i) of int
A single discrete trajectory or a list of discrete trajectories. The integers are
indexes in 0,...,num_therm_states-1 enumerating the thermodynamic states the trajectory is
in at any time.
dtrajs : numpy.ndarray(T) of int, or list of numpy.ndarray(T_i) of int
A single discrete trajectory or a list of discrete trajectories. The integers are indexes
in 0,...,num_conf_states-1 enumerating the num_conf_states Markov states or the bins the
trajectory is in at any time.
bias : numpy.ndarray(T, num_therm_states), or list of numpy.ndarray(T_i, num_therm_states)
A single reduced bias energy trajectory or a list of reduced bias energy trajectories.
For every simulation frame seen in trajectory i and time step t, btrajs[i][t, k] is the
reduced bias energy of that frame evaluated in the k'th thermodynamic state (i.e. at
the k'th umbrella/Hamiltonian/temperature)
maxiter : int, optional, default=10000
The maximum number of dTRAM iterations before the estimator exits unsuccessfully.
maxerr : float, optional, default=1e-15
Convergence criterion based on the maximal free energy change in a self-consistent
iteration step.
save_convergence_info : int, optional, default=0
Every save_convergence_info iteration steps, store the actual increment
and the actual loglikelihood; 0 means no storage.
dt_traj : str, optional, default='1 step'
Description of the physical time corresponding to the lag. May be used by analysis
algorithms such as plotting tools to pretty-print the axes. By default '1 step', i.e.
there is no physical time unit. Specify by a number, whitespace and unit. Permitted
units are (* is an arbitrary string):
| 'fs', 'femtosecond*'
| 'ps', 'picosecond*'
| 'ns', 'nanosecond*'
| 'us', 'microsecond*'
| 'ms', 'millisecond*'
| 's', 'second*'
direct_space : bool, optional, default=False
Whether to perform the self-consitent iteration with Boltzmann factors
(direct space) or free energies (log-space). When analyzing data from
multi-temperature simulations, direct-space is not recommended.
Returns
-------
A :class:`MultiThermModel <pyemma.thermo.models.multi_therm.MultiThermModel>` object
A stationary model which consists of thermodynamic quantities at all
temperatures/thermodynamic states.
Example
-------
**Umbrella sampling**: Suppose we simulate in K umbrellas, centered at
positions :math:`y_0,...,y_{K-1}` with bias energies
.. math::
b_k(x) = \frac{c_k}{2 \textrm{kT}} \cdot (x - y_k)^2
Suppose we have one simulation of length T in each umbrella, and they are ordered from 0 to K-1.
We have discretized the x-coordinate into 100 bins.
Then dtrajs and ttrajs should each be a list of :math:`K` arrays.
dtrajs would look for example like this::
[ (0, 0, 0, 0, 1, 1, 1, 0, 0, 0, ...), (0, 1, 0, 1, 0, 1, 1, 0, 0, 1, ...), ... ]
where each array has length T, and is the sequence of bins (in the range 0 to 99) visited along
the trajectory. ttrajs would look like this::
[ (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...), (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...), ... ]
Because trajectory 1 stays in umbrella 1 (index 0), trajectory 2 stays in umbrella 2 (index 1),
and so forth.
The bias would be a list of :math:`T \times K` arrays which specify each frame's bias energy in
all thermodynamic states:
[ ((0, 1.7, 2.3, 6.1, ...), ...), ((0, 2.4, 3.1, 9,5, ...), ...), ... ]
Let us try the above example:
>>> from pyemma.thermo import mbar
>>> import numpy as np
>>> ttrajs = [np.array([0,0,0,0,0,0,0]), np.array([1,1,1,1,1,1,1])]
>>> dtrajs = [np.array([0,0,0,0,1,1,1]), np.array([0,1,0,1,0,1,1])]
>>> bias = [np.array([[1,0],[1,0],[0,0],[0,0],[0,0],[0,0],[0,0]],dtype=np.float64), np.array([[1,0],[0,0],[0,0],[1,0],[0,0],[1,0],[1,0]],dtype=np.float64)]
>>> mbar_obj = mbar(ttrajs, dtrajs, bias, maxiter=1000000, maxerr=1.0E-14)
>>> mbar_obj.stationary_distribution # doctest: +ELLIPSIS
array([ 0.5... 0.5...])
See :class:`MultiThermModel <pyemma.thermo.models.multi_therm.MultiThermModel>` for a full
documentation.
.. autoclass:: pyemma.thermo.models.multi_therm.MultiThermModel
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.thermo.models.multi_therm.MultiThermModel
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.thermo.models.multi_therm.MultiThermModel
:attributes:
References
----------
.. [1] Shirts, M.R. and Chodera, J.D. 2008
Statistically optimal analysis of samples from multiple equilibrium states
J. Chem. Phys. 129, 124105
"""
# check trajectories
ttrajs = _types.ensure_dtraj_list(ttrajs)
dtrajs = _types.ensure_dtraj_list(dtrajs)
if len(ttrajs) != len(dtrajs):
raise ValueError("Unmatching number of dtraj/ttraj elements: %d!=%d" % (
len(dtrajs), len(ttrajs)))
if len(ttrajs) != len(bias):
raise ValueError("Unmatching number of ttraj/bias elements: %d!=%d" % (
len(ttrajs), len(bias)))
for ttraj, dtraj, btraj in zip(ttrajs, dtrajs, bias):
if len(ttraj) != len(dtraj):
raise ValueError("Unmatching number of data points in ttraj/dtraj: %d!=%d" % (
len(ttraj), len(dtraj)))
if len(ttraj) != btraj.shape[0]:
raise ValueError("Unmatching number of data points in ttraj/bias trajectory: %d!=%d" % (
len(ttraj), len(btraj)))
# build MBAR
from pyemma.thermo import MBAR
mbar_estimator = MBAR(
maxiter=maxiter, maxerr=maxerr, save_convergence_info=save_convergence_info,
dt_traj=dt_traj, direct_space=direct_space)
# run estimation
return mbar_estimator.estimate((ttrajs, dtrajs, bias)) | python | def mbar(
ttrajs, dtrajs, bias,
maxiter=100000, maxerr=1.0E-15, save_convergence_info=0,
dt_traj='1 step', direct_space=False):
r"""
Multi-state Bennet acceptance ratio
Parameters
----------
ttrajs : numpy.ndarray(T) of int, or list of numpy.ndarray(T_i) of int
A single discrete trajectory or a list of discrete trajectories. The integers are
indexes in 0,...,num_therm_states-1 enumerating the thermodynamic states the trajectory is
in at any time.
dtrajs : numpy.ndarray(T) of int, or list of numpy.ndarray(T_i) of int
A single discrete trajectory or a list of discrete trajectories. The integers are indexes
in 0,...,num_conf_states-1 enumerating the num_conf_states Markov states or the bins the
trajectory is in at any time.
bias : numpy.ndarray(T, num_therm_states), or list of numpy.ndarray(T_i, num_therm_states)
A single reduced bias energy trajectory or a list of reduced bias energy trajectories.
For every simulation frame seen in trajectory i and time step t, btrajs[i][t, k] is the
reduced bias energy of that frame evaluated in the k'th thermodynamic state (i.e. at
the k'th umbrella/Hamiltonian/temperature)
maxiter : int, optional, default=10000
The maximum number of dTRAM iterations before the estimator exits unsuccessfully.
maxerr : float, optional, default=1e-15
Convergence criterion based on the maximal free energy change in a self-consistent
iteration step.
save_convergence_info : int, optional, default=0
Every save_convergence_info iteration steps, store the actual increment
and the actual loglikelihood; 0 means no storage.
dt_traj : str, optional, default='1 step'
Description of the physical time corresponding to the lag. May be used by analysis
algorithms such as plotting tools to pretty-print the axes. By default '1 step', i.e.
there is no physical time unit. Specify by a number, whitespace and unit. Permitted
units are (* is an arbitrary string):
| 'fs', 'femtosecond*'
| 'ps', 'picosecond*'
| 'ns', 'nanosecond*'
| 'us', 'microsecond*'
| 'ms', 'millisecond*'
| 's', 'second*'
direct_space : bool, optional, default=False
Whether to perform the self-consitent iteration with Boltzmann factors
(direct space) or free energies (log-space). When analyzing data from
multi-temperature simulations, direct-space is not recommended.
Returns
-------
A :class:`MultiThermModel <pyemma.thermo.models.multi_therm.MultiThermModel>` object
A stationary model which consists of thermodynamic quantities at all
temperatures/thermodynamic states.
Example
-------
**Umbrella sampling**: Suppose we simulate in K umbrellas, centered at
positions :math:`y_0,...,y_{K-1}` with bias energies
.. math::
b_k(x) = \frac{c_k}{2 \textrm{kT}} \cdot (x - y_k)^2
Suppose we have one simulation of length T in each umbrella, and they are ordered from 0 to K-1.
We have discretized the x-coordinate into 100 bins.
Then dtrajs and ttrajs should each be a list of :math:`K` arrays.
dtrajs would look for example like this::
[ (0, 0, 0, 0, 1, 1, 1, 0, 0, 0, ...), (0, 1, 0, 1, 0, 1, 1, 0, 0, 1, ...), ... ]
where each array has length T, and is the sequence of bins (in the range 0 to 99) visited along
the trajectory. ttrajs would look like this::
[ (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...), (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...), ... ]
Because trajectory 1 stays in umbrella 1 (index 0), trajectory 2 stays in umbrella 2 (index 1),
and so forth.
The bias would be a list of :math:`T \times K` arrays which specify each frame's bias energy in
all thermodynamic states:
[ ((0, 1.7, 2.3, 6.1, ...), ...), ((0, 2.4, 3.1, 9,5, ...), ...), ... ]
Let us try the above example:
>>> from pyemma.thermo import mbar
>>> import numpy as np
>>> ttrajs = [np.array([0,0,0,0,0,0,0]), np.array([1,1,1,1,1,1,1])]
>>> dtrajs = [np.array([0,0,0,0,1,1,1]), np.array([0,1,0,1,0,1,1])]
>>> bias = [np.array([[1,0],[1,0],[0,0],[0,0],[0,0],[0,0],[0,0]],dtype=np.float64), np.array([[1,0],[0,0],[0,0],[1,0],[0,0],[1,0],[1,0]],dtype=np.float64)]
>>> mbar_obj = mbar(ttrajs, dtrajs, bias, maxiter=1000000, maxerr=1.0E-14)
>>> mbar_obj.stationary_distribution # doctest: +ELLIPSIS
array([ 0.5... 0.5...])
See :class:`MultiThermModel <pyemma.thermo.models.multi_therm.MultiThermModel>` for a full
documentation.
.. autoclass:: pyemma.thermo.models.multi_therm.MultiThermModel
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.thermo.models.multi_therm.MultiThermModel
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.thermo.models.multi_therm.MultiThermModel
:attributes:
References
----------
.. [1] Shirts, M.R. and Chodera, J.D. 2008
Statistically optimal analysis of samples from multiple equilibrium states
J. Chem. Phys. 129, 124105
"""
# check trajectories
ttrajs = _types.ensure_dtraj_list(ttrajs)
dtrajs = _types.ensure_dtraj_list(dtrajs)
if len(ttrajs) != len(dtrajs):
raise ValueError("Unmatching number of dtraj/ttraj elements: %d!=%d" % (
len(dtrajs), len(ttrajs)))
if len(ttrajs) != len(bias):
raise ValueError("Unmatching number of ttraj/bias elements: %d!=%d" % (
len(ttrajs), len(bias)))
for ttraj, dtraj, btraj in zip(ttrajs, dtrajs, bias):
if len(ttraj) != len(dtraj):
raise ValueError("Unmatching number of data points in ttraj/dtraj: %d!=%d" % (
len(ttraj), len(dtraj)))
if len(ttraj) != btraj.shape[0]:
raise ValueError("Unmatching number of data points in ttraj/bias trajectory: %d!=%d" % (
len(ttraj), len(btraj)))
# build MBAR
from pyemma.thermo import MBAR
mbar_estimator = MBAR(
maxiter=maxiter, maxerr=maxerr, save_convergence_info=save_convergence_info,
dt_traj=dt_traj, direct_space=direct_space)
# run estimation
return mbar_estimator.estimate((ttrajs, dtrajs, bias)) | [
"def",
"mbar",
"(",
"ttrajs",
",",
"dtrajs",
",",
"bias",
",",
"maxiter",
"=",
"100000",
",",
"maxerr",
"=",
"1.0E-15",
",",
"save_convergence_info",
"=",
"0",
",",
"dt_traj",
"=",
"'1 step'",
",",
"direct_space",
"=",
"False",
")",
":",
"# check trajector... | r"""
Multi-state Bennet acceptance ratio
Parameters
----------
ttrajs : numpy.ndarray(T) of int, or list of numpy.ndarray(T_i) of int
A single discrete trajectory or a list of discrete trajectories. The integers are
indexes in 0,...,num_therm_states-1 enumerating the thermodynamic states the trajectory is
in at any time.
dtrajs : numpy.ndarray(T) of int, or list of numpy.ndarray(T_i) of int
A single discrete trajectory or a list of discrete trajectories. The integers are indexes
in 0,...,num_conf_states-1 enumerating the num_conf_states Markov states or the bins the
trajectory is in at any time.
bias : numpy.ndarray(T, num_therm_states), or list of numpy.ndarray(T_i, num_therm_states)
A single reduced bias energy trajectory or a list of reduced bias energy trajectories.
For every simulation frame seen in trajectory i and time step t, btrajs[i][t, k] is the
reduced bias energy of that frame evaluated in the k'th thermodynamic state (i.e. at
the k'th umbrella/Hamiltonian/temperature)
maxiter : int, optional, default=10000
The maximum number of dTRAM iterations before the estimator exits unsuccessfully.
maxerr : float, optional, default=1e-15
Convergence criterion based on the maximal free energy change in a self-consistent
iteration step.
save_convergence_info : int, optional, default=0
Every save_convergence_info iteration steps, store the actual increment
and the actual loglikelihood; 0 means no storage.
dt_traj : str, optional, default='1 step'
Description of the physical time corresponding to the lag. May be used by analysis
algorithms such as plotting tools to pretty-print the axes. By default '1 step', i.e.
there is no physical time unit. Specify by a number, whitespace and unit. Permitted
units are (* is an arbitrary string):
| 'fs', 'femtosecond*'
| 'ps', 'picosecond*'
| 'ns', 'nanosecond*'
| 'us', 'microsecond*'
| 'ms', 'millisecond*'
| 's', 'second*'
direct_space : bool, optional, default=False
Whether to perform the self-consitent iteration with Boltzmann factors
(direct space) or free energies (log-space). When analyzing data from
multi-temperature simulations, direct-space is not recommended.
Returns
-------
A :class:`MultiThermModel <pyemma.thermo.models.multi_therm.MultiThermModel>` object
A stationary model which consists of thermodynamic quantities at all
temperatures/thermodynamic states.
Example
-------
**Umbrella sampling**: Suppose we simulate in K umbrellas, centered at
positions :math:`y_0,...,y_{K-1}` with bias energies
.. math::
b_k(x) = \frac{c_k}{2 \textrm{kT}} \cdot (x - y_k)^2
Suppose we have one simulation of length T in each umbrella, and they are ordered from 0 to K-1.
We have discretized the x-coordinate into 100 bins.
Then dtrajs and ttrajs should each be a list of :math:`K` arrays.
dtrajs would look for example like this::
[ (0, 0, 0, 0, 1, 1, 1, 0, 0, 0, ...), (0, 1, 0, 1, 0, 1, 1, 0, 0, 1, ...), ... ]
where each array has length T, and is the sequence of bins (in the range 0 to 99) visited along
the trajectory. ttrajs would look like this::
[ (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...), (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...), ... ]
Because trajectory 1 stays in umbrella 1 (index 0), trajectory 2 stays in umbrella 2 (index 1),
and so forth.
The bias would be a list of :math:`T \times K` arrays which specify each frame's bias energy in
all thermodynamic states:
[ ((0, 1.7, 2.3, 6.1, ...), ...), ((0, 2.4, 3.1, 9,5, ...), ...), ... ]
Let us try the above example:
>>> from pyemma.thermo import mbar
>>> import numpy as np
>>> ttrajs = [np.array([0,0,0,0,0,0,0]), np.array([1,1,1,1,1,1,1])]
>>> dtrajs = [np.array([0,0,0,0,1,1,1]), np.array([0,1,0,1,0,1,1])]
>>> bias = [np.array([[1,0],[1,0],[0,0],[0,0],[0,0],[0,0],[0,0]],dtype=np.float64), np.array([[1,0],[0,0],[0,0],[1,0],[0,0],[1,0],[1,0]],dtype=np.float64)]
>>> mbar_obj = mbar(ttrajs, dtrajs, bias, maxiter=1000000, maxerr=1.0E-14)
>>> mbar_obj.stationary_distribution # doctest: +ELLIPSIS
array([ 0.5... 0.5...])
See :class:`MultiThermModel <pyemma.thermo.models.multi_therm.MultiThermModel>` for a full
documentation.
.. autoclass:: pyemma.thermo.models.multi_therm.MultiThermModel
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.thermo.models.multi_therm.MultiThermModel
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.thermo.models.multi_therm.MultiThermModel
:attributes:
References
----------
.. [1] Shirts, M.R. and Chodera, J.D. 2008
Statistically optimal analysis of samples from multiple equilibrium states
J. Chem. Phys. 129, 124105 | [
"r",
"Multi",
"-",
"state",
"Bennet",
"acceptance",
"ratio"
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/thermo/api.py#L1052-L1193 | train | 204,243 |
markovmodel/PyEMMA | pyemma/coordinates/data/_base/iterable.py | Iterable.default_chunksize | def default_chunksize(self):
""" How much data will be processed at once, in case no chunksize has been provided.
Notes
-----
This variable respects your setting for maximum memory in pyemma.config.default_chunksize
"""
if self._default_chunksize is None:
try:
# TODO: if dimension is not yet fixed (eg tica var cutoff, use dim of data_producer.
self.dimension()
self.output_type()
except:
self._default_chunksize = Iterable._FALLBACK_CHUNKSIZE
else:
self._default_chunksize = Iterable._compute_default_cs(self.dimension(),
self.output_type().itemsize, self.logger)
return self._default_chunksize | python | def default_chunksize(self):
""" How much data will be processed at once, in case no chunksize has been provided.
Notes
-----
This variable respects your setting for maximum memory in pyemma.config.default_chunksize
"""
if self._default_chunksize is None:
try:
# TODO: if dimension is not yet fixed (eg tica var cutoff, use dim of data_producer.
self.dimension()
self.output_type()
except:
self._default_chunksize = Iterable._FALLBACK_CHUNKSIZE
else:
self._default_chunksize = Iterable._compute_default_cs(self.dimension(),
self.output_type().itemsize, self.logger)
return self._default_chunksize | [
"def",
"default_chunksize",
"(",
"self",
")",
":",
"if",
"self",
".",
"_default_chunksize",
"is",
"None",
":",
"try",
":",
"# TODO: if dimension is not yet fixed (eg tica var cutoff, use dim of data_producer.",
"self",
".",
"dimension",
"(",
")",
"self",
".",
"output_ty... | How much data will be processed at once, in case no chunksize has been provided.
Notes
-----
This variable respects your setting for maximum memory in pyemma.config.default_chunksize | [
"How",
"much",
"data",
"will",
"be",
"processed",
"at",
"once",
"in",
"case",
"no",
"chunksize",
"has",
"been",
"provided",
"."
] | 5c3124398217de05ba5ce9c8fb01519222481ab8 | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/data/_base/iterable.py#L64-L81 | train | 204,244 |
larsmans/seqlearn | seqlearn/hmm.py | MultinomialHMM.fit | def fit(self, X, y, lengths):
"""Fit HMM model to data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Feature matrix of individual samples.
y : array-like, shape (n_samples,)
Target labels.
lengths : array-like of integers, shape (n_sequences,)
Lengths of the individual sequences in X, y. The sum of these
should be n_samples.
Notes
-----
Make sure the training set (X) is one-hot encoded; if more than one
feature in X is on, the emission probabilities will be multiplied.
Returns
-------
self : MultinomialHMM
"""
alpha = self.alpha
if alpha <= 0:
raise ValueError("alpha should be >0, got {0!r}".format(alpha))
X = atleast2d_or_csr(X)
classes, y = np.unique(y, return_inverse=True)
lengths = np.asarray(lengths)
Y = y.reshape(-1, 1) == np.arange(len(classes))
end = np.cumsum(lengths)
start = end - lengths
init_prob = np.log(Y[start].sum(axis=0) + alpha)
init_prob -= logsumexp(init_prob)
final_prob = np.log(Y[start].sum(axis=0) + alpha)
final_prob -= logsumexp(final_prob)
feature_prob = np.log(safe_sparse_dot(Y.T, X) + alpha)
feature_prob -= logsumexp(feature_prob, axis=0)
trans_prob = np.log(count_trans(y, len(classes)) + alpha)
trans_prob -= logsumexp(trans_prob, axis=0)
self.coef_ = feature_prob
self.intercept_init_ = init_prob
self.intercept_final_ = final_prob
self.intercept_trans_ = trans_prob
self.classes_ = classes
return self | python | def fit(self, X, y, lengths):
"""Fit HMM model to data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Feature matrix of individual samples.
y : array-like, shape (n_samples,)
Target labels.
lengths : array-like of integers, shape (n_sequences,)
Lengths of the individual sequences in X, y. The sum of these
should be n_samples.
Notes
-----
Make sure the training set (X) is one-hot encoded; if more than one
feature in X is on, the emission probabilities will be multiplied.
Returns
-------
self : MultinomialHMM
"""
alpha = self.alpha
if alpha <= 0:
raise ValueError("alpha should be >0, got {0!r}".format(alpha))
X = atleast2d_or_csr(X)
classes, y = np.unique(y, return_inverse=True)
lengths = np.asarray(lengths)
Y = y.reshape(-1, 1) == np.arange(len(classes))
end = np.cumsum(lengths)
start = end - lengths
init_prob = np.log(Y[start].sum(axis=0) + alpha)
init_prob -= logsumexp(init_prob)
final_prob = np.log(Y[start].sum(axis=0) + alpha)
final_prob -= logsumexp(final_prob)
feature_prob = np.log(safe_sparse_dot(Y.T, X) + alpha)
feature_prob -= logsumexp(feature_prob, axis=0)
trans_prob = np.log(count_trans(y, len(classes)) + alpha)
trans_prob -= logsumexp(trans_prob, axis=0)
self.coef_ = feature_prob
self.intercept_init_ = init_prob
self.intercept_final_ = final_prob
self.intercept_trans_ = trans_prob
self.classes_ = classes
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
",",
"lengths",
")",
":",
"alpha",
"=",
"self",
".",
"alpha",
"if",
"alpha",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"alpha should be >0, got {0!r}\"",
".",
"format",
"(",
"alpha",
")",
")",
"X",
"... | Fit HMM model to data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Feature matrix of individual samples.
y : array-like, shape (n_samples,)
Target labels.
lengths : array-like of integers, shape (n_sequences,)
Lengths of the individual sequences in X, y. The sum of these
should be n_samples.
Notes
-----
Make sure the training set (X) is one-hot encoded; if more than one
feature in X is on, the emission probabilities will be multiplied.
Returns
-------
self : MultinomialHMM | [
"Fit",
"HMM",
"model",
"to",
"data",
"."
] | 32d4bfaebdd877733f180ea6072e8fc1266bc559 | https://github.com/larsmans/seqlearn/blob/32d4bfaebdd877733f180ea6072e8fc1266bc559/seqlearn/hmm.py#L28-L83 | train | 204,245 |
larsmans/seqlearn | doc/hash-tree.py | _lstree | def _lstree(files, dirs):
"""Make git ls-tree like output."""
for f, sha1 in files:
yield "100644 blob {}\t{}\0".format(sha1, f)
for d, sha1 in dirs:
yield "040000 tree {}\t{}\0".format(sha1, d) | python | def _lstree(files, dirs):
"""Make git ls-tree like output."""
for f, sha1 in files:
yield "100644 blob {}\t{}\0".format(sha1, f)
for d, sha1 in dirs:
yield "040000 tree {}\t{}\0".format(sha1, d) | [
"def",
"_lstree",
"(",
"files",
",",
"dirs",
")",
":",
"for",
"f",
",",
"sha1",
"in",
"files",
":",
"yield",
"\"100644 blob {}\\t{}\\0\"",
".",
"format",
"(",
"sha1",
",",
"f",
")",
"for",
"d",
",",
"sha1",
"in",
"dirs",
":",
"yield",
"\"040000 tree {}... | Make git ls-tree like output. | [
"Make",
"git",
"ls",
"-",
"tree",
"like",
"output",
"."
] | 32d4bfaebdd877733f180ea6072e8fc1266bc559 | https://github.com/larsmans/seqlearn/blob/32d4bfaebdd877733f180ea6072e8fc1266bc559/doc/hash-tree.py#L21-L27 | train | 204,246 |
larsmans/seqlearn | doc/hash-tree.py | hash_dir | def hash_dir(path):
"""Write directory at path to Git index, return its SHA1 as a string."""
dir_hash = {}
for root, dirs, files in os.walk(path, topdown=False):
f_hash = ((f, hash_file(join(root, f))) for f in files)
d_hash = ((d, dir_hash[join(root, d)]) for d in dirs)
# split+join normalizes paths on Windows (note the imports)
dir_hash[join(*split(root))] = _mktree(f_hash, d_hash)
return dir_hash[path] | python | def hash_dir(path):
"""Write directory at path to Git index, return its SHA1 as a string."""
dir_hash = {}
for root, dirs, files in os.walk(path, topdown=False):
f_hash = ((f, hash_file(join(root, f))) for f in files)
d_hash = ((d, dir_hash[join(root, d)]) for d in dirs)
# split+join normalizes paths on Windows (note the imports)
dir_hash[join(*split(root))] = _mktree(f_hash, d_hash)
return dir_hash[path] | [
"def",
"hash_dir",
"(",
"path",
")",
":",
"dir_hash",
"=",
"{",
"}",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"path",
",",
"topdown",
"=",
"False",
")",
":",
"f_hash",
"=",
"(",
"(",
"f",
",",
"hash_file",
"(",
"j... | Write directory at path to Git index, return its SHA1 as a string. | [
"Write",
"directory",
"at",
"path",
"to",
"Git",
"index",
"return",
"its",
"SHA1",
"as",
"a",
"string",
"."
] | 32d4bfaebdd877733f180ea6072e8fc1266bc559 | https://github.com/larsmans/seqlearn/blob/32d4bfaebdd877733f180ea6072e8fc1266bc559/doc/hash-tree.py#L35-L45 | train | 204,247 |
larsmans/seqlearn | examples/conll.py | features | def features(sentence, i):
"""Features for i'th token in sentence.
Currently baseline named-entity recognition features, but these can
easily be changed to do POS tagging or chunking.
"""
word = sentence[i]
yield "word:{}" + word.lower()
if word[0].isupper():
yield "CAP"
if i > 0:
yield "word-1:{}" + sentence[i - 1].lower()
if i > 1:
yield "word-2:{}" + sentence[i - 2].lower()
if i + 1 < len(sentence):
yield "word+1:{}" + sentence[i + 1].lower()
if i + 2 < len(sentence):
yield "word+2:{}" + sentence[i + 2].lower() | python | def features(sentence, i):
"""Features for i'th token in sentence.
Currently baseline named-entity recognition features, but these can
easily be changed to do POS tagging or chunking.
"""
word = sentence[i]
yield "word:{}" + word.lower()
if word[0].isupper():
yield "CAP"
if i > 0:
yield "word-1:{}" + sentence[i - 1].lower()
if i > 1:
yield "word-2:{}" + sentence[i - 2].lower()
if i + 1 < len(sentence):
yield "word+1:{}" + sentence[i + 1].lower()
if i + 2 < len(sentence):
yield "word+2:{}" + sentence[i + 2].lower() | [
"def",
"features",
"(",
"sentence",
",",
"i",
")",
":",
"word",
"=",
"sentence",
"[",
"i",
"]",
"yield",
"\"word:{}\"",
"+",
"word",
".",
"lower",
"(",
")",
"if",
"word",
"[",
"0",
"]",
".",
"isupper",
"(",
")",
":",
"yield",
"\"CAP\"",
"if",
"i"... | Features for i'th token in sentence.
Currently baseline named-entity recognition features, but these can
easily be changed to do POS tagging or chunking. | [
"Features",
"for",
"i",
"th",
"token",
"in",
"sentence",
"."
] | 32d4bfaebdd877733f180ea6072e8fc1266bc559 | https://github.com/larsmans/seqlearn/blob/32d4bfaebdd877733f180ea6072e8fc1266bc559/examples/conll.py#L18-L39 | train | 204,248 |
larsmans/seqlearn | seqlearn/evaluation.py | whole_sequence_accuracy | def whole_sequence_accuracy(y_true, y_pred, lengths):
"""Average accuracy measured on whole sequences.
Returns the fraction of sequences in y_true that occur in y_pred without a
single error.
"""
lengths = np.asarray(lengths)
end = np.cumsum(lengths)
start = end - lengths
bounds = np.vstack([start, end]).T
errors = sum(1. for i, j in bounds
if np.any(y_true[i:j] != y_pred[i:j]))
return 1 - errors / len(lengths) | python | def whole_sequence_accuracy(y_true, y_pred, lengths):
"""Average accuracy measured on whole sequences.
Returns the fraction of sequences in y_true that occur in y_pred without a
single error.
"""
lengths = np.asarray(lengths)
end = np.cumsum(lengths)
start = end - lengths
bounds = np.vstack([start, end]).T
errors = sum(1. for i, j in bounds
if np.any(y_true[i:j] != y_pred[i:j]))
return 1 - errors / len(lengths) | [
"def",
"whole_sequence_accuracy",
"(",
"y_true",
",",
"y_pred",
",",
"lengths",
")",
":",
"lengths",
"=",
"np",
".",
"asarray",
"(",
"lengths",
")",
"end",
"=",
"np",
".",
"cumsum",
"(",
"lengths",
")",
"start",
"=",
"end",
"-",
"lengths",
"bounds",
"=... | Average accuracy measured on whole sequences.
Returns the fraction of sequences in y_true that occur in y_pred without a
single error. | [
"Average",
"accuracy",
"measured",
"on",
"whole",
"sequences",
"."
] | 32d4bfaebdd877733f180ea6072e8fc1266bc559 | https://github.com/larsmans/seqlearn/blob/32d4bfaebdd877733f180ea6072e8fc1266bc559/seqlearn/evaluation.py#L75-L88 | train | 204,249 |
larsmans/seqlearn | seqlearn/datasets.py | load_conll | def load_conll(f, features, n_features=(2 ** 16), split=False):
"""Load CoNLL file, extract features on the tokens and vectorize them.
The ConLL file format is a line-oriented text format that describes
sequences in a space-separated format, separating the sequences with
blank lines. Typically, the last space-separated part is a label.
Since the tab-separated parts are usually tokens (and maybe things like
part-of-speech tags) rather than feature vectors, a function must be
supplied that does the actual feature extraction. This function has access
to the entire sequence, so that it can extract context features.
A ``sklearn.feature_extraction.FeatureHasher`` (the "hashing trick")
is used to map symbolic input feature names to columns, so this function
dos not remember the actual input feature names.
Parameters
----------
f : {string, file-like}
Input file.
features : callable
Feature extraction function. Must take a list of tokens l that
represent a single sequence and an index i into this list, and must
return an iterator over strings that represent the features of l[i].
n_features : integer, optional
Number of columns in the output.
split : boolean, default=False
Whether to split lines on whitespace beyond what is needed to parse
out the labels. This is useful for CoNLL files that have extra columns
containing information like part of speech tags.
Returns
-------
X : scipy.sparse matrix, shape (n_samples, n_features)
Samples (feature vectors), as a single sparse matrix.
y : np.ndarray, dtype np.string, shape n_samples
Per-sample labels.
lengths : np.ndarray, dtype np.int32, shape n_sequences
Lengths of sequences within (X, y). The sum of these is equal to
n_samples.
"""
fh = FeatureHasher(n_features=n_features, input_type="string")
labels = []
lengths = []
with _open(f) as f:
raw_X = _conll_sequences(f, features, labels, lengths, split)
X = fh.transform(raw_X)
return X, np.asarray(labels), np.asarray(lengths, dtype=np.int32) | python | def load_conll(f, features, n_features=(2 ** 16), split=False):
"""Load CoNLL file, extract features on the tokens and vectorize them.
The ConLL file format is a line-oriented text format that describes
sequences in a space-separated format, separating the sequences with
blank lines. Typically, the last space-separated part is a label.
Since the tab-separated parts are usually tokens (and maybe things like
part-of-speech tags) rather than feature vectors, a function must be
supplied that does the actual feature extraction. This function has access
to the entire sequence, so that it can extract context features.
A ``sklearn.feature_extraction.FeatureHasher`` (the "hashing trick")
is used to map symbolic input feature names to columns, so this function
dos not remember the actual input feature names.
Parameters
----------
f : {string, file-like}
Input file.
features : callable
Feature extraction function. Must take a list of tokens l that
represent a single sequence and an index i into this list, and must
return an iterator over strings that represent the features of l[i].
n_features : integer, optional
Number of columns in the output.
split : boolean, default=False
Whether to split lines on whitespace beyond what is needed to parse
out the labels. This is useful for CoNLL files that have extra columns
containing information like part of speech tags.
Returns
-------
X : scipy.sparse matrix, shape (n_samples, n_features)
Samples (feature vectors), as a single sparse matrix.
y : np.ndarray, dtype np.string, shape n_samples
Per-sample labels.
lengths : np.ndarray, dtype np.int32, shape n_sequences
Lengths of sequences within (X, y). The sum of these is equal to
n_samples.
"""
fh = FeatureHasher(n_features=n_features, input_type="string")
labels = []
lengths = []
with _open(f) as f:
raw_X = _conll_sequences(f, features, labels, lengths, split)
X = fh.transform(raw_X)
return X, np.asarray(labels), np.asarray(lengths, dtype=np.int32) | [
"def",
"load_conll",
"(",
"f",
",",
"features",
",",
"n_features",
"=",
"(",
"2",
"**",
"16",
")",
",",
"split",
"=",
"False",
")",
":",
"fh",
"=",
"FeatureHasher",
"(",
"n_features",
"=",
"n_features",
",",
"input_type",
"=",
"\"string\"",
")",
"label... | Load CoNLL file, extract features on the tokens and vectorize them.
The ConLL file format is a line-oriented text format that describes
sequences in a space-separated format, separating the sequences with
blank lines. Typically, the last space-separated part is a label.
Since the tab-separated parts are usually tokens (and maybe things like
part-of-speech tags) rather than feature vectors, a function must be
supplied that does the actual feature extraction. This function has access
to the entire sequence, so that it can extract context features.
A ``sklearn.feature_extraction.FeatureHasher`` (the "hashing trick")
is used to map symbolic input feature names to columns, so this function
dos not remember the actual input feature names.
Parameters
----------
f : {string, file-like}
Input file.
features : callable
Feature extraction function. Must take a list of tokens l that
represent a single sequence and an index i into this list, and must
return an iterator over strings that represent the features of l[i].
n_features : integer, optional
Number of columns in the output.
split : boolean, default=False
Whether to split lines on whitespace beyond what is needed to parse
out the labels. This is useful for CoNLL files that have extra columns
containing information like part of speech tags.
Returns
-------
X : scipy.sparse matrix, shape (n_samples, n_features)
Samples (feature vectors), as a single sparse matrix.
y : np.ndarray, dtype np.string, shape n_samples
Per-sample labels.
lengths : np.ndarray, dtype np.int32, shape n_sequences
Lengths of sequences within (X, y). The sum of these is equal to
n_samples. | [
"Load",
"CoNLL",
"file",
"extract",
"features",
"on",
"the",
"tokens",
"and",
"vectorize",
"them",
"."
] | 32d4bfaebdd877733f180ea6072e8fc1266bc559 | https://github.com/larsmans/seqlearn/blob/32d4bfaebdd877733f180ea6072e8fc1266bc559/seqlearn/datasets.py#L11-L60 | train | 204,250 |
larsmans/seqlearn | seqlearn/_utils/__init__.py | atleast2d_or_csr | def atleast2d_or_csr(X, dtype=None, order=None, copy=False):
"""Like numpy.atleast_2d, but converts sparse matrices to CSR format
Also, converts np.matrix to np.ndarray.
"""
return _atleast2d_or_sparse(X, dtype, order, copy, sp.csr_matrix,
"tocsr", sp.isspmatrix_csr) | python | def atleast2d_or_csr(X, dtype=None, order=None, copy=False):
"""Like numpy.atleast_2d, but converts sparse matrices to CSR format
Also, converts np.matrix to np.ndarray.
"""
return _atleast2d_or_sparse(X, dtype, order, copy, sp.csr_matrix,
"tocsr", sp.isspmatrix_csr) | [
"def",
"atleast2d_or_csr",
"(",
"X",
",",
"dtype",
"=",
"None",
",",
"order",
"=",
"None",
",",
"copy",
"=",
"False",
")",
":",
"return",
"_atleast2d_or_sparse",
"(",
"X",
",",
"dtype",
",",
"order",
",",
"copy",
",",
"sp",
".",
"csr_matrix",
",",
"\... | Like numpy.atleast_2d, but converts sparse matrices to CSR format
Also, converts np.matrix to np.ndarray. | [
"Like",
"numpy",
".",
"atleast_2d",
"but",
"converts",
"sparse",
"matrices",
"to",
"CSR",
"format"
] | 32d4bfaebdd877733f180ea6072e8fc1266bc559 | https://github.com/larsmans/seqlearn/blob/32d4bfaebdd877733f180ea6072e8fc1266bc559/seqlearn/_utils/__init__.py#L70-L76 | train | 204,251 |
larsmans/seqlearn | seqlearn/_utils/__init__.py | validate_lengths | def validate_lengths(n_samples, lengths):
"""Validate lengths array against n_samples.
Parameters
----------
n_samples : integer
Total number of samples.
lengths : array-like of integers, shape (n_sequences,), optional
Lengths of individual sequences in the input.
Returns
-------
start : array of integers, shape (n_sequences,)
Start indices of sequences.
end : array of integers, shape (n_sequences,)
One-past-the-end indices of sequences.
"""
if lengths is None:
lengths = [n_samples]
lengths = np.asarray(lengths, dtype=np.int32)
if lengths.sum() > n_samples:
msg = "More than {0:d} samples in lengths array {1!s}"
raise ValueError(msg.format(n_samples, lengths))
end = np.cumsum(lengths)
start = end - lengths
return start, end | python | def validate_lengths(n_samples, lengths):
"""Validate lengths array against n_samples.
Parameters
----------
n_samples : integer
Total number of samples.
lengths : array-like of integers, shape (n_sequences,), optional
Lengths of individual sequences in the input.
Returns
-------
start : array of integers, shape (n_sequences,)
Start indices of sequences.
end : array of integers, shape (n_sequences,)
One-past-the-end indices of sequences.
"""
if lengths is None:
lengths = [n_samples]
lengths = np.asarray(lengths, dtype=np.int32)
if lengths.sum() > n_samples:
msg = "More than {0:d} samples in lengths array {1!s}"
raise ValueError(msg.format(n_samples, lengths))
end = np.cumsum(lengths)
start = end - lengths
return start, end | [
"def",
"validate_lengths",
"(",
"n_samples",
",",
"lengths",
")",
":",
"if",
"lengths",
"is",
"None",
":",
"lengths",
"=",
"[",
"n_samples",
"]",
"lengths",
"=",
"np",
".",
"asarray",
"(",
"lengths",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"if",
... | Validate lengths array against n_samples.
Parameters
----------
n_samples : integer
Total number of samples.
lengths : array-like of integers, shape (n_sequences,), optional
Lengths of individual sequences in the input.
Returns
-------
start : array of integers, shape (n_sequences,)
Start indices of sequences.
end : array of integers, shape (n_sequences,)
One-past-the-end indices of sequences. | [
"Validate",
"lengths",
"array",
"against",
"n_samples",
"."
] | 32d4bfaebdd877733f180ea6072e8fc1266bc559 | https://github.com/larsmans/seqlearn/blob/32d4bfaebdd877733f180ea6072e8fc1266bc559/seqlearn/_utils/__init__.py#L79-L108 | train | 204,252 |
larsmans/seqlearn | seqlearn/_utils/transmatrix.py | make_trans_matrix | def make_trans_matrix(y, n_classes, dtype=np.float64):
"""Make a sparse transition matrix for y.
Takes a label sequence y and returns an indicator matrix with n_classes²
columns of the label transitions in y: M[i, j, k] means y[i-1] == j and
y[i] == k. The first row will be empty.
"""
indices = np.empty(len(y), dtype=np.int32)
for i in six.moves.xrange(len(y) - 1):
indices[i] = y[i] * i + y[i + 1]
indptr = np.arange(len(y) + 1)
indptr[-1] = indptr[-2]
return csr_matrix((np.ones(len(y), dtype=dtype), indices, indptr),
shape=(len(y), n_classes ** 2)) | python | def make_trans_matrix(y, n_classes, dtype=np.float64):
"""Make a sparse transition matrix for y.
Takes a label sequence y and returns an indicator matrix with n_classes²
columns of the label transitions in y: M[i, j, k] means y[i-1] == j and
y[i] == k. The first row will be empty.
"""
indices = np.empty(len(y), dtype=np.int32)
for i in six.moves.xrange(len(y) - 1):
indices[i] = y[i] * i + y[i + 1]
indptr = np.arange(len(y) + 1)
indptr[-1] = indptr[-2]
return csr_matrix((np.ones(len(y), dtype=dtype), indices, indptr),
shape=(len(y), n_classes ** 2)) | [
"def",
"make_trans_matrix",
"(",
"y",
",",
"n_classes",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
":",
"indices",
"=",
"np",
".",
"empty",
"(",
"len",
"(",
"y",
")",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"for",
"i",
"in",
"six",
".",
... | Make a sparse transition matrix for y.
Takes a label sequence y and returns an indicator matrix with n_classes²
columns of the label transitions in y: M[i, j, k] means y[i-1] == j and
y[i] == k. The first row will be empty. | [
"Make",
"a",
"sparse",
"transition",
"matrix",
"for",
"y",
"."
] | 32d4bfaebdd877733f180ea6072e8fc1266bc559 | https://github.com/larsmans/seqlearn/blob/32d4bfaebdd877733f180ea6072e8fc1266bc559/seqlearn/_utils/transmatrix.py#L9-L25 | train | 204,253 |
CircleUp/aresponses | aresponses/main.py | ResponsesMockServer.passthrough | async def passthrough(self, request):
"""Make non-mocked network request"""
connector = TCPConnector()
connector._resolve_host = partial(self._old_resolver_mock, connector)
new_is_ssl = ClientRequest.is_ssl
ClientRequest.is_ssl = self._old_is_ssl
try:
original_request = request.clone(scheme="https" if request.headers["AResponsesIsSSL"] else "http")
headers = {k: v for k, v in request.headers.items() if k != "AResponsesIsSSL"}
async with ClientSession(connector=connector) as session:
async with getattr(session, request.method.lower())(original_request.url, headers=headers, data=(await request.read())) as r:
headers = {k: v for k, v in r.headers.items() if k.lower() == "content-type"}
text = await r.text()
response = self.Response(text=text, status=r.status, headers=headers)
return response
finally:
ClientRequest.is_ssl = new_is_ssl | python | async def passthrough(self, request):
"""Make non-mocked network request"""
connector = TCPConnector()
connector._resolve_host = partial(self._old_resolver_mock, connector)
new_is_ssl = ClientRequest.is_ssl
ClientRequest.is_ssl = self._old_is_ssl
try:
original_request = request.clone(scheme="https" if request.headers["AResponsesIsSSL"] else "http")
headers = {k: v for k, v in request.headers.items() if k != "AResponsesIsSSL"}
async with ClientSession(connector=connector) as session:
async with getattr(session, request.method.lower())(original_request.url, headers=headers, data=(await request.read())) as r:
headers = {k: v for k, v in r.headers.items() if k.lower() == "content-type"}
text = await r.text()
response = self.Response(text=text, status=r.status, headers=headers)
return response
finally:
ClientRequest.is_ssl = new_is_ssl | [
"async",
"def",
"passthrough",
"(",
"self",
",",
"request",
")",
":",
"connector",
"=",
"TCPConnector",
"(",
")",
"connector",
".",
"_resolve_host",
"=",
"partial",
"(",
"self",
".",
"_old_resolver_mock",
",",
"connector",
")",
"new_is_ssl",
"=",
"ClientReques... | Make non-mocked network request | [
"Make",
"non",
"-",
"mocked",
"network",
"request"
] | 41813371d7073b5f2fc0d8a9773e3b98ef8186c8 | https://github.com/CircleUp/aresponses/blob/41813371d7073b5f2fc0d8a9773e3b98ef8186c8/aresponses/main.py#L110-L129 | train | 204,254 |
MahjongRepository/mahjong | mahjong/tile.py | TilesConverter.to_one_line_string | def to_one_line_string(tiles):
"""
Convert 136 tiles array to the one line string
Example of output 123s123p123m33z
"""
tiles = sorted(tiles)
man = [t for t in tiles if t < 36]
pin = [t for t in tiles if 36 <= t < 72]
pin = [t - 36 for t in pin]
sou = [t for t in tiles if 72 <= t < 108]
sou = [t - 72 for t in sou]
honors = [t for t in tiles if t >= 108]
honors = [t - 108 for t in honors]
sou = sou and ''.join([str((i // 4) + 1) for i in sou]) + 's' or ''
pin = pin and ''.join([str((i // 4) + 1) for i in pin]) + 'p' or ''
man = man and ''.join([str((i // 4) + 1) for i in man]) + 'm' or ''
honors = honors and ''.join([str((i // 4) + 1) for i in honors]) + 'z' or ''
return man + pin + sou + honors | python | def to_one_line_string(tiles):
"""
Convert 136 tiles array to the one line string
Example of output 123s123p123m33z
"""
tiles = sorted(tiles)
man = [t for t in tiles if t < 36]
pin = [t for t in tiles if 36 <= t < 72]
pin = [t - 36 for t in pin]
sou = [t for t in tiles if 72 <= t < 108]
sou = [t - 72 for t in sou]
honors = [t for t in tiles if t >= 108]
honors = [t - 108 for t in honors]
sou = sou and ''.join([str((i // 4) + 1) for i in sou]) + 's' or ''
pin = pin and ''.join([str((i // 4) + 1) for i in pin]) + 'p' or ''
man = man and ''.join([str((i // 4) + 1) for i in man]) + 'm' or ''
honors = honors and ''.join([str((i // 4) + 1) for i in honors]) + 'z' or ''
return man + pin + sou + honors | [
"def",
"to_one_line_string",
"(",
"tiles",
")",
":",
"tiles",
"=",
"sorted",
"(",
"tiles",
")",
"man",
"=",
"[",
"t",
"for",
"t",
"in",
"tiles",
"if",
"t",
"<",
"36",
"]",
"pin",
"=",
"[",
"t",
"for",
"t",
"in",
"tiles",
"if",
"36",
"<=",
"t",
... | Convert 136 tiles array to the one line string
Example of output 123s123p123m33z | [
"Convert",
"136",
"tiles",
"array",
"to",
"the",
"one",
"line",
"string",
"Example",
"of",
"output",
"123s123p123m33z"
] | a269cc4649b545f965a2bb0c2a6e704492567f13 | https://github.com/MahjongRepository/mahjong/blob/a269cc4649b545f965a2bb0c2a6e704492567f13/mahjong/tile.py#L17-L40 | train | 204,255 |
MahjongRepository/mahjong | mahjong/tile.py | TilesConverter.to_136_array | def to_136_array(tiles):
"""
Convert 34 array to the 136 tiles array
"""
temp = []
results = []
for x in range(0, 34):
if tiles[x]:
temp_value = [x * 4] * tiles[x]
for tile in temp_value:
if tile in results:
count_of_tiles = len([x for x in temp if x == tile])
new_tile = tile + count_of_tiles
results.append(new_tile)
temp.append(tile)
else:
results.append(tile)
temp.append(tile)
return results | python | def to_136_array(tiles):
"""
Convert 34 array to the 136 tiles array
"""
temp = []
results = []
for x in range(0, 34):
if tiles[x]:
temp_value = [x * 4] * tiles[x]
for tile in temp_value:
if tile in results:
count_of_tiles = len([x for x in temp if x == tile])
new_tile = tile + count_of_tiles
results.append(new_tile)
temp.append(tile)
else:
results.append(tile)
temp.append(tile)
return results | [
"def",
"to_136_array",
"(",
"tiles",
")",
":",
"temp",
"=",
"[",
"]",
"results",
"=",
"[",
"]",
"for",
"x",
"in",
"range",
"(",
"0",
",",
"34",
")",
":",
"if",
"tiles",
"[",
"x",
"]",
":",
"temp_value",
"=",
"[",
"x",
"*",
"4",
"]",
"*",
"t... | Convert 34 array to the 136 tiles array | [
"Convert",
"34",
"array",
"to",
"the",
"136",
"tiles",
"array"
] | a269cc4649b545f965a2bb0c2a6e704492567f13 | https://github.com/MahjongRepository/mahjong/blob/a269cc4649b545f965a2bb0c2a6e704492567f13/mahjong/tile.py#L54-L73 | train | 204,256 |
MahjongRepository/mahjong | mahjong/tile.py | TilesConverter.string_to_136_array | def string_to_136_array(sou=None, pin=None, man=None, honors=None, has_aka_dora=False):
"""
Method to convert one line string tiles format to the 136 array.
You can pass r instead of 5 for it to become a red five from
that suit. To prevent old usage without red,
has_aka_dora has to be True for this to do that.
We need it to increase readability of our tests
"""
def _split_string(string, offset, red=None):
data = []
temp = []
if not string:
return []
for i in string:
if i == 'r' and has_aka_dora:
temp.append(red)
data.append(red)
else:
tile = offset + (int(i) - 1) * 4
if tile == red and has_aka_dora:
# prevent non reds to become red
tile += 1
if tile in data:
count_of_tiles = len([x for x in temp if x == tile])
new_tile = tile + count_of_tiles
data.append(new_tile)
temp.append(tile)
else:
data.append(tile)
temp.append(tile)
return data
results = _split_string(man, 0, FIVE_RED_MAN)
results += _split_string(pin, 36, FIVE_RED_PIN)
results += _split_string(sou, 72, FIVE_RED_SOU)
results += _split_string(honors, 108)
return results | python | def string_to_136_array(sou=None, pin=None, man=None, honors=None, has_aka_dora=False):
"""
Method to convert one line string tiles format to the 136 array.
You can pass r instead of 5 for it to become a red five from
that suit. To prevent old usage without red,
has_aka_dora has to be True for this to do that.
We need it to increase readability of our tests
"""
def _split_string(string, offset, red=None):
data = []
temp = []
if not string:
return []
for i in string:
if i == 'r' and has_aka_dora:
temp.append(red)
data.append(red)
else:
tile = offset + (int(i) - 1) * 4
if tile == red and has_aka_dora:
# prevent non reds to become red
tile += 1
if tile in data:
count_of_tiles = len([x for x in temp if x == tile])
new_tile = tile + count_of_tiles
data.append(new_tile)
temp.append(tile)
else:
data.append(tile)
temp.append(tile)
return data
results = _split_string(man, 0, FIVE_RED_MAN)
results += _split_string(pin, 36, FIVE_RED_PIN)
results += _split_string(sou, 72, FIVE_RED_SOU)
results += _split_string(honors, 108)
return results | [
"def",
"string_to_136_array",
"(",
"sou",
"=",
"None",
",",
"pin",
"=",
"None",
",",
"man",
"=",
"None",
",",
"honors",
"=",
"None",
",",
"has_aka_dora",
"=",
"False",
")",
":",
"def",
"_split_string",
"(",
"string",
",",
"offset",
",",
"red",
"=",
"... | Method to convert one line string tiles format to the 136 array.
You can pass r instead of 5 for it to become a red five from
that suit. To prevent old usage without red,
has_aka_dora has to be True for this to do that.
We need it to increase readability of our tests | [
"Method",
"to",
"convert",
"one",
"line",
"string",
"tiles",
"format",
"to",
"the",
"136",
"array",
".",
"You",
"can",
"pass",
"r",
"instead",
"of",
"5",
"for",
"it",
"to",
"become",
"a",
"red",
"five",
"from",
"that",
"suit",
".",
"To",
"prevent",
"... | a269cc4649b545f965a2bb0c2a6e704492567f13 | https://github.com/MahjongRepository/mahjong/blob/a269cc4649b545f965a2bb0c2a6e704492567f13/mahjong/tile.py#L76-L117 | train | 204,257 |
MahjongRepository/mahjong | mahjong/tile.py | TilesConverter.string_to_34_array | def string_to_34_array(sou=None, pin=None, man=None, honors=None):
"""
Method to convert one line string tiles format to the 34 array
We need it to increase readability of our tests
"""
results = TilesConverter.string_to_136_array(sou, pin, man, honors)
results = TilesConverter.to_34_array(results)
return results | python | def string_to_34_array(sou=None, pin=None, man=None, honors=None):
"""
Method to convert one line string tiles format to the 34 array
We need it to increase readability of our tests
"""
results = TilesConverter.string_to_136_array(sou, pin, man, honors)
results = TilesConverter.to_34_array(results)
return results | [
"def",
"string_to_34_array",
"(",
"sou",
"=",
"None",
",",
"pin",
"=",
"None",
",",
"man",
"=",
"None",
",",
"honors",
"=",
"None",
")",
":",
"results",
"=",
"TilesConverter",
".",
"string_to_136_array",
"(",
"sou",
",",
"pin",
",",
"man",
",",
"honors... | Method to convert one line string tiles format to the 34 array
We need it to increase readability of our tests | [
"Method",
"to",
"convert",
"one",
"line",
"string",
"tiles",
"format",
"to",
"the",
"34",
"array",
"We",
"need",
"it",
"to",
"increase",
"readability",
"of",
"our",
"tests"
] | a269cc4649b545f965a2bb0c2a6e704492567f13 | https://github.com/MahjongRepository/mahjong/blob/a269cc4649b545f965a2bb0c2a6e704492567f13/mahjong/tile.py#L120-L127 | train | 204,258 |
MahjongRepository/mahjong | mahjong/tile.py | TilesConverter.find_34_tile_in_136_array | def find_34_tile_in_136_array(tile34, tiles):
"""
Our shanten calculator will operate with 34 tiles format,
after calculations we need to find calculated 34 tile
in player's 136 tiles.
For example we had 0 tile from 34 array
in 136 array it can be present as 0, 1, 2, 3
"""
if tile34 is None or tile34 > 33:
return None
tile = tile34 * 4
possible_tiles = [tile] + [tile + i for i in range(1, 4)]
found_tile = None
for possible_tile in possible_tiles:
if possible_tile in tiles:
found_tile = possible_tile
break
return found_tile | python | def find_34_tile_in_136_array(tile34, tiles):
"""
Our shanten calculator will operate with 34 tiles format,
after calculations we need to find calculated 34 tile
in player's 136 tiles.
For example we had 0 tile from 34 array
in 136 array it can be present as 0, 1, 2, 3
"""
if tile34 is None or tile34 > 33:
return None
tile = tile34 * 4
possible_tiles = [tile] + [tile + i for i in range(1, 4)]
found_tile = None
for possible_tile in possible_tiles:
if possible_tile in tiles:
found_tile = possible_tile
break
return found_tile | [
"def",
"find_34_tile_in_136_array",
"(",
"tile34",
",",
"tiles",
")",
":",
"if",
"tile34",
"is",
"None",
"or",
"tile34",
">",
"33",
":",
"return",
"None",
"tile",
"=",
"tile34",
"*",
"4",
"possible_tiles",
"=",
"[",
"tile",
"]",
"+",
"[",
"tile",
"+",
... | Our shanten calculator will operate with 34 tiles format,
after calculations we need to find calculated 34 tile
in player's 136 tiles.
For example we had 0 tile from 34 array
in 136 array it can be present as 0, 1, 2, 3 | [
"Our",
"shanten",
"calculator",
"will",
"operate",
"with",
"34",
"tiles",
"format",
"after",
"calculations",
"we",
"need",
"to",
"find",
"calculated",
"34",
"tile",
"in",
"player",
"s",
"136",
"tiles",
"."
] | a269cc4649b545f965a2bb0c2a6e704492567f13 | https://github.com/MahjongRepository/mahjong/blob/a269cc4649b545f965a2bb0c2a6e704492567f13/mahjong/tile.py#L130-L152 | train | 204,259 |
bsolomon1124/pyfinance | pyfinance/options.py | BSM.value | def value(self):
"""Compute option value according to BSM model."""
return self._sign[1] * self.S0 * norm.cdf(
self._sign[1] * self.d1, 0.0, 1.0
) - self._sign[1] * self.K * np.exp(-self.r * self.T) * norm.cdf(
self._sign[1] * self.d2, 0.0, 1.0
) | python | def value(self):
"""Compute option value according to BSM model."""
return self._sign[1] * self.S0 * norm.cdf(
self._sign[1] * self.d1, 0.0, 1.0
) - self._sign[1] * self.K * np.exp(-self.r * self.T) * norm.cdf(
self._sign[1] * self.d2, 0.0, 1.0
) | [
"def",
"value",
"(",
"self",
")",
":",
"return",
"self",
".",
"_sign",
"[",
"1",
"]",
"*",
"self",
".",
"S0",
"*",
"norm",
".",
"cdf",
"(",
"self",
".",
"_sign",
"[",
"1",
"]",
"*",
"self",
".",
"d1",
",",
"0.0",
",",
"1.0",
")",
"-",
"self... | Compute option value according to BSM model. | [
"Compute",
"option",
"value",
"according",
"to",
"BSM",
"model",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/options.py#L172-L178 | train | 204,260 |
bsolomon1124/pyfinance | pyfinance/options.py | OpStrat.add_option | def add_option(self, K=None, price=None, St=None, kind="call", pos="long"):
"""Add an option to the object's `options` container."""
kinds = {
"call": Call,
"Call": Call,
"c": Call,
"C": Call,
"put": Put,
"Put": Put,
"p": Put,
"P": Put,
}
St = self.St if St is None else St
option = kinds[kind](St=St, K=K, price=price, pos=pos)
self.options.append(option) | python | def add_option(self, K=None, price=None, St=None, kind="call", pos="long"):
"""Add an option to the object's `options` container."""
kinds = {
"call": Call,
"Call": Call,
"c": Call,
"C": Call,
"put": Put,
"Put": Put,
"p": Put,
"P": Put,
}
St = self.St if St is None else St
option = kinds[kind](St=St, K=K, price=price, pos=pos)
self.options.append(option) | [
"def",
"add_option",
"(",
"self",
",",
"K",
"=",
"None",
",",
"price",
"=",
"None",
",",
"St",
"=",
"None",
",",
"kind",
"=",
"\"call\"",
",",
"pos",
"=",
"\"long\"",
")",
":",
"kinds",
"=",
"{",
"\"call\"",
":",
"Call",
",",
"\"Call\"",
":",
"Ca... | Add an option to the object's `options` container. | [
"Add",
"an",
"option",
"to",
"the",
"object",
"s",
"options",
"container",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/options.py#L335-L349 | train | 204,261 |
bsolomon1124/pyfinance | pyfinance/ols.py | _rolling_lstsq | def _rolling_lstsq(x, y):
"""Finds solution for the rolling case. Matrix formulation."""
if x.ndim == 2:
# Treat everything as 3d and avoid AxisError on .swapaxes(1, 2) below
# This means an original input of:
# array([0., 1., 2., 3., 4., 5., 6.])
# becomes:
# array([[[0.],
# [1.],
# [2.],
# [3.]],
#
# [[1.],
# [2.],
# ...
x = x[:, :, None]
elif x.ndim <= 1:
raise np.AxisError("x should have ndmi >= 2")
return np.squeeze(
np.matmul(
np.linalg.inv(np.matmul(x.swapaxes(1, 2), x)),
np.matmul(x.swapaxes(1, 2), np.atleast_3d(y)),
)
) | python | def _rolling_lstsq(x, y):
"""Finds solution for the rolling case. Matrix formulation."""
if x.ndim == 2:
# Treat everything as 3d and avoid AxisError on .swapaxes(1, 2) below
# This means an original input of:
# array([0., 1., 2., 3., 4., 5., 6.])
# becomes:
# array([[[0.],
# [1.],
# [2.],
# [3.]],
#
# [[1.],
# [2.],
# ...
x = x[:, :, None]
elif x.ndim <= 1:
raise np.AxisError("x should have ndmi >= 2")
return np.squeeze(
np.matmul(
np.linalg.inv(np.matmul(x.swapaxes(1, 2), x)),
np.matmul(x.swapaxes(1, 2), np.atleast_3d(y)),
)
) | [
"def",
"_rolling_lstsq",
"(",
"x",
",",
"y",
")",
":",
"if",
"x",
".",
"ndim",
"==",
"2",
":",
"# Treat everything as 3d and avoid AxisError on .swapaxes(1, 2) below\r",
"# This means an original input of:\r",
"# array([0., 1., 2., 3., 4., 5., 6.])\r",
"# becomes:\r",
"# ar... | Finds solution for the rolling case. Matrix formulation. | [
"Finds",
"solution",
"for",
"the",
"rolling",
"case",
".",
"Matrix",
"formulation",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/ols.py#L18-L41 | train | 204,262 |
bsolomon1124/pyfinance | pyfinance/ols.py | _confirm_constant | def _confirm_constant(a):
"""Confirm `a` has volumn vector of 1s."""
a = np.asanyarray(a)
return np.isclose(a, 1.0).all(axis=0).any() | python | def _confirm_constant(a):
"""Confirm `a` has volumn vector of 1s."""
a = np.asanyarray(a)
return np.isclose(a, 1.0).all(axis=0).any() | [
"def",
"_confirm_constant",
"(",
"a",
")",
":",
"a",
"=",
"np",
".",
"asanyarray",
"(",
"a",
")",
"return",
"np",
".",
"isclose",
"(",
"a",
",",
"1.0",
")",
".",
"all",
"(",
"axis",
"=",
"0",
")",
".",
"any",
"(",
")"
] | Confirm `a` has volumn vector of 1s. | [
"Confirm",
"a",
"has",
"volumn",
"vector",
"of",
"1s",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/ols.py#L44-L47 | train | 204,263 |
bsolomon1124/pyfinance | pyfinance/ols.py | OLS._pvalues_all | def _pvalues_all(self):
"""Two-tailed p values for t-stats of all parameters."""
return 2.0 * (1.0 - scs.t.cdf(np.abs(self._tstat_all), self.df_err)) | python | def _pvalues_all(self):
"""Two-tailed p values for t-stats of all parameters."""
return 2.0 * (1.0 - scs.t.cdf(np.abs(self._tstat_all), self.df_err)) | [
"def",
"_pvalues_all",
"(",
"self",
")",
":",
"return",
"2.0",
"*",
"(",
"1.0",
"-",
"scs",
".",
"t",
".",
"cdf",
"(",
"np",
".",
"abs",
"(",
"self",
".",
"_tstat_all",
")",
",",
"self",
".",
"df_err",
")",
")"
] | Two-tailed p values for t-stats of all parameters. | [
"Two",
"-",
"tailed",
"p",
"values",
"for",
"t",
"-",
"stats",
"of",
"all",
"parameters",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/ols.py#L249-L251 | train | 204,264 |
bsolomon1124/pyfinance | pyfinance/general.py | BestFitDist.best | def best(self):
"""The resulting best-fit distribution, its parameters, and SSE."""
return pd.Series(
{
"name": self.best_dist.name,
"params": self.best_param,
"sse": self.best_sse,
}
) | python | def best(self):
"""The resulting best-fit distribution, its parameters, and SSE."""
return pd.Series(
{
"name": self.best_dist.name,
"params": self.best_param,
"sse": self.best_sse,
}
) | [
"def",
"best",
"(",
"self",
")",
":",
"return",
"pd",
".",
"Series",
"(",
"{",
"\"name\"",
":",
"self",
".",
"best_dist",
".",
"name",
",",
"\"params\"",
":",
"self",
".",
"best_param",
",",
"\"sse\"",
":",
"self",
".",
"best_sse",
",",
"}",
")"
] | The resulting best-fit distribution, its parameters, and SSE. | [
"The",
"resulting",
"best",
"-",
"fit",
"distribution",
"its",
"parameters",
"and",
"SSE",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/general.py#L277-L285 | train | 204,265 |
bsolomon1124/pyfinance | pyfinance/general.py | BestFitDist.all | def all(self, by="name", ascending=True):
"""All tested distributions, their parameters, and SSEs."""
res = pd.DataFrame(
{
"name": self.distributions,
"params": self.params,
"sse": self.sses,
}
)[["name", "sse", "params"]]
res.sort_values(by=by, ascending=ascending, inplace=True)
return res | python | def all(self, by="name", ascending=True):
"""All tested distributions, their parameters, and SSEs."""
res = pd.DataFrame(
{
"name": self.distributions,
"params": self.params,
"sse": self.sses,
}
)[["name", "sse", "params"]]
res.sort_values(by=by, ascending=ascending, inplace=True)
return res | [
"def",
"all",
"(",
"self",
",",
"by",
"=",
"\"name\"",
",",
"ascending",
"=",
"True",
")",
":",
"res",
"=",
"pd",
".",
"DataFrame",
"(",
"{",
"\"name\"",
":",
"self",
".",
"distributions",
",",
"\"params\"",
":",
"self",
".",
"params",
",",
"\"sse\""... | All tested distributions, their parameters, and SSEs. | [
"All",
"tested",
"distributions",
"their",
"parameters",
"and",
"SSEs",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/general.py#L287-L298 | train | 204,266 |
bsolomon1124/pyfinance | pyfinance/general.py | BestFitDist.plot | def plot(self):
"""Plot the empirical histogram versus best-fit distribution's PDF."""
plt.plot(self.bin_edges, self.hist, self.bin_edges, self.best_pdf) | python | def plot(self):
"""Plot the empirical histogram versus best-fit distribution's PDF."""
plt.plot(self.bin_edges, self.hist, self.bin_edges, self.best_pdf) | [
"def",
"plot",
"(",
"self",
")",
":",
"plt",
".",
"plot",
"(",
"self",
".",
"bin_edges",
",",
"self",
".",
"hist",
",",
"self",
".",
"bin_edges",
",",
"self",
".",
"best_pdf",
")"
] | Plot the empirical histogram versus best-fit distribution's PDF. | [
"Plot",
"the",
"empirical",
"histogram",
"versus",
"best",
"-",
"fit",
"distribution",
"s",
"PDF",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/general.py#L300-L302 | train | 204,267 |
bsolomon1124/pyfinance | pyfinance/general.py | PCA.eigen_table | def eigen_table(self):
"""Eigenvalues, expl. variance, and cumulative expl. variance."""
idx = ["Eigenvalue", "Variability (%)", "Cumulative (%)"]
table = pd.DataFrame(
np.array(
[self.eigenvalues, self.inertia, self.cumulative_inertia]
),
columns=["F%s" % i for i in range(1, self.keep + 1)],
index=idx,
)
return table | python | def eigen_table(self):
"""Eigenvalues, expl. variance, and cumulative expl. variance."""
idx = ["Eigenvalue", "Variability (%)", "Cumulative (%)"]
table = pd.DataFrame(
np.array(
[self.eigenvalues, self.inertia, self.cumulative_inertia]
),
columns=["F%s" % i for i in range(1, self.keep + 1)],
index=idx,
)
return table | [
"def",
"eigen_table",
"(",
"self",
")",
":",
"idx",
"=",
"[",
"\"Eigenvalue\"",
",",
"\"Variability (%)\"",
",",
"\"Cumulative (%)\"",
"]",
"table",
"=",
"pd",
".",
"DataFrame",
"(",
"np",
".",
"array",
"(",
"[",
"self",
".",
"eigenvalues",
",",
"self",
... | Eigenvalues, expl. variance, and cumulative expl. variance. | [
"Eigenvalues",
"expl",
".",
"variance",
"and",
"cumulative",
"expl",
".",
"variance",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/general.py#L702-L713 | train | 204,268 |
bsolomon1124/pyfinance | pyfinance/general.py | TEOpt.optimize | def optimize(self):
"""Analogous to `sklearn`'s fit. Returns `self` to enable chaining."""
def te(weights, r, proxies):
"""Helper func. `pyfinance.tracking_error` doesn't work here."""
if isinstance(weights, list):
weights = np.array(weights)
proxy = np.sum(proxies * weights, axis=1)
te = np.std(proxy - r) # not anlzd...
return te
ew = utils.equal_weights(n=self.n, sumto=self.sumto)
bnds = tuple((0, 1) for x in range(self.n))
cons = {"type": "eq", "fun": lambda x: np.sum(x) - self.sumto}
xs = []
funs = []
for i, j in zip(self._r, self._proxies):
opt = sco.minimize(
te,
x0=ew,
args=(i, j),
method="SLSQP",
bounds=bnds,
constraints=cons,
)
x, fun = opt["x"], opt["fun"]
xs.append(x)
funs.append(fun)
self._xs = np.array(xs)
self._funs = np.array(funs)
return self | python | def optimize(self):
"""Analogous to `sklearn`'s fit. Returns `self` to enable chaining."""
def te(weights, r, proxies):
"""Helper func. `pyfinance.tracking_error` doesn't work here."""
if isinstance(weights, list):
weights = np.array(weights)
proxy = np.sum(proxies * weights, axis=1)
te = np.std(proxy - r) # not anlzd...
return te
ew = utils.equal_weights(n=self.n, sumto=self.sumto)
bnds = tuple((0, 1) for x in range(self.n))
cons = {"type": "eq", "fun": lambda x: np.sum(x) - self.sumto}
xs = []
funs = []
for i, j in zip(self._r, self._proxies):
opt = sco.minimize(
te,
x0=ew,
args=(i, j),
method="SLSQP",
bounds=bnds,
constraints=cons,
)
x, fun = opt["x"], opt["fun"]
xs.append(x)
funs.append(fun)
self._xs = np.array(xs)
self._funs = np.array(funs)
return self | [
"def",
"optimize",
"(",
"self",
")",
":",
"def",
"te",
"(",
"weights",
",",
"r",
",",
"proxies",
")",
":",
"\"\"\"Helper func. `pyfinance.tracking_error` doesn't work here.\"\"\"",
"if",
"isinstance",
"(",
"weights",
",",
"list",
")",
":",
"weights",
"=",
"np",... | Analogous to `sklearn`'s fit. Returns `self` to enable chaining. | [
"Analogous",
"to",
"sklearn",
"s",
"fit",
".",
"Returns",
"self",
"to",
"enable",
"chaining",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/general.py#L952-L984 | train | 204,269 |
bsolomon1124/pyfinance | pyfinance/general.py | TEOpt.replicate | def replicate(self):
"""Forward-month returns of the replicating portfolio."""
return np.sum(
self.proxies[self.window :] * self._xs[:-1], axis=1
).reindex(self.r.index) | python | def replicate(self):
"""Forward-month returns of the replicating portfolio."""
return np.sum(
self.proxies[self.window :] * self._xs[:-1], axis=1
).reindex(self.r.index) | [
"def",
"replicate",
"(",
"self",
")",
":",
"return",
"np",
".",
"sum",
"(",
"self",
".",
"proxies",
"[",
"self",
".",
"window",
":",
"]",
"*",
"self",
".",
"_xs",
"[",
":",
"-",
"1",
"]",
",",
"axis",
"=",
"1",
")",
".",
"reindex",
"(",
"self... | Forward-month returns of the replicating portfolio. | [
"Forward",
"-",
"month",
"returns",
"of",
"the",
"replicating",
"portfolio",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/general.py#L995-L999 | train | 204,270 |
bsolomon1124/pyfinance | pyfinance/returns.py | _try_to_squeeze | def _try_to_squeeze(obj, raise_=False):
"""Attempt to squeeze to 1d Series.
Parameters
----------
obj : {pd.Series, pd.DataFrame}
raise_ : bool, default False
"""
if isinstance(obj, pd.Series):
return obj
elif isinstance(obj, pd.DataFrame) and obj.shape[-1] == 1:
return obj.squeeze()
else:
if raise_:
raise ValueError("Input cannot be squeezed.")
return obj | python | def _try_to_squeeze(obj, raise_=False):
"""Attempt to squeeze to 1d Series.
Parameters
----------
obj : {pd.Series, pd.DataFrame}
raise_ : bool, default False
"""
if isinstance(obj, pd.Series):
return obj
elif isinstance(obj, pd.DataFrame) and obj.shape[-1] == 1:
return obj.squeeze()
else:
if raise_:
raise ValueError("Input cannot be squeezed.")
return obj | [
"def",
"_try_to_squeeze",
"(",
"obj",
",",
"raise_",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"pd",
".",
"Series",
")",
":",
"return",
"obj",
"elif",
"isinstance",
"(",
"obj",
",",
"pd",
".",
"DataFrame",
")",
"and",
"obj",
".",
... | Attempt to squeeze to 1d Series.
Parameters
----------
obj : {pd.Series, pd.DataFrame}
raise_ : bool, default False | [
"Attempt",
"to",
"squeeze",
"to",
"1d",
"Series",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L1233-L1249 | train | 204,271 |
bsolomon1124/pyfinance | pyfinance/returns.py | TSeries.anlzd_stdev | def anlzd_stdev(self, ddof=0, freq=None, **kwargs):
"""Annualized standard deviation with `ddof` degrees of freedom.
Parameters
----------
ddof : int, default 0
Degrees of freedom, passed to pd.Series.std().
freq : str or None, default None
A frequency string used to create an annualization factor.
If None, `self.freq` will be used. If that is also None,
a frequency will be inferred. If none can be inferred,
an exception is raised.
It may be any frequency string or anchored offset string
recognized by Pandas, such as 'D', '5D', 'Q', 'Q-DEC', or
'BQS-APR'.
**kwargs
Passed to pd.Series.std().
TODO: freq
Returns
-------
float
"""
if freq is None:
freq = self._try_get_freq()
if freq is None:
raise FrequencyError(msg)
return nanstd(self, ddof=ddof) * freq ** 0.5 | python | def anlzd_stdev(self, ddof=0, freq=None, **kwargs):
"""Annualized standard deviation with `ddof` degrees of freedom.
Parameters
----------
ddof : int, default 0
Degrees of freedom, passed to pd.Series.std().
freq : str or None, default None
A frequency string used to create an annualization factor.
If None, `self.freq` will be used. If that is also None,
a frequency will be inferred. If none can be inferred,
an exception is raised.
It may be any frequency string or anchored offset string
recognized by Pandas, such as 'D', '5D', 'Q', 'Q-DEC', or
'BQS-APR'.
**kwargs
Passed to pd.Series.std().
TODO: freq
Returns
-------
float
"""
if freq is None:
freq = self._try_get_freq()
if freq is None:
raise FrequencyError(msg)
return nanstd(self, ddof=ddof) * freq ** 0.5 | [
"def",
"anlzd_stdev",
"(",
"self",
",",
"ddof",
"=",
"0",
",",
"freq",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"freq",
"is",
"None",
":",
"freq",
"=",
"self",
".",
"_try_get_freq",
"(",
")",
"if",
"freq",
"is",
"None",
":",
"raise",... | Annualized standard deviation with `ddof` degrees of freedom.
Parameters
----------
ddof : int, default 0
Degrees of freedom, passed to pd.Series.std().
freq : str or None, default None
A frequency string used to create an annualization factor.
If None, `self.freq` will be used. If that is also None,
a frequency will be inferred. If none can be inferred,
an exception is raised.
It may be any frequency string or anchored offset string
recognized by Pandas, such as 'D', '5D', 'Q', 'Q-DEC', or
'BQS-APR'.
**kwargs
Passed to pd.Series.std().
TODO: freq
Returns
-------
float | [
"Annualized",
"standard",
"deviation",
"with",
"ddof",
"degrees",
"of",
"freedom",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L143-L172 | train | 204,272 |
bsolomon1124/pyfinance | pyfinance/returns.py | TSeries.batting_avg | def batting_avg(self, benchmark):
"""Percentage of periods when `self` outperformed `benchmark`.
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
Returns
-------
float
"""
diff = self.excess_ret(benchmark)
return np.count_nonzero(diff > 0.0) / diff.count() | python | def batting_avg(self, benchmark):
"""Percentage of periods when `self` outperformed `benchmark`.
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
Returns
-------
float
"""
diff = self.excess_ret(benchmark)
return np.count_nonzero(diff > 0.0) / diff.count() | [
"def",
"batting_avg",
"(",
"self",
",",
"benchmark",
")",
":",
"diff",
"=",
"self",
".",
"excess_ret",
"(",
"benchmark",
")",
"return",
"np",
".",
"count_nonzero",
"(",
"diff",
">",
"0.0",
")",
"/",
"diff",
".",
"count",
"(",
")"
] | Percentage of periods when `self` outperformed `benchmark`.
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
Returns
-------
float | [
"Percentage",
"of",
"periods",
"when",
"self",
"outperformed",
"benchmark",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L174-L188 | train | 204,273 |
bsolomon1124/pyfinance | pyfinance/returns.py | TSeries.beta_adj | def beta_adj(self, benchmark, adj_factor=2 / 3, **kwargs):
"""Adjusted beta.
Beta that is adjusted to reflect the tendency of beta to
be mean reverting.
[Source: CFA Institute]
Formula:
adj_factor * raw_beta + (1 - adj_factor)
Parameters
----------
benchmark : {pd.Series, TSeries, pd.DataFrame, np.ndarray}
The benchmark securitie(s) to which `self` is compared.
Returns
-------
float or np.ndarray
If `benchmark` is 1d, returns a scalar.
If `benchmark` is 2d, returns a 1d ndarray.
Reference
---------
.. _Blume, Marshall. "Betas and Their Regression Tendencies."
http://www.stat.ucla.edu/~nchristo/statistics417/blume_betas.pdf
"""
beta = self.beta(benchmark=benchmark, **kwargs)
return adj_factor * beta + (1 - adj_factor) | python | def beta_adj(self, benchmark, adj_factor=2 / 3, **kwargs):
"""Adjusted beta.
Beta that is adjusted to reflect the tendency of beta to
be mean reverting.
[Source: CFA Institute]
Formula:
adj_factor * raw_beta + (1 - adj_factor)
Parameters
----------
benchmark : {pd.Series, TSeries, pd.DataFrame, np.ndarray}
The benchmark securitie(s) to which `self` is compared.
Returns
-------
float or np.ndarray
If `benchmark` is 1d, returns a scalar.
If `benchmark` is 2d, returns a 1d ndarray.
Reference
---------
.. _Blume, Marshall. "Betas and Their Regression Tendencies."
http://www.stat.ucla.edu/~nchristo/statistics417/blume_betas.pdf
"""
beta = self.beta(benchmark=benchmark, **kwargs)
return adj_factor * beta + (1 - adj_factor) | [
"def",
"beta_adj",
"(",
"self",
",",
"benchmark",
",",
"adj_factor",
"=",
"2",
"/",
"3",
",",
"*",
"*",
"kwargs",
")",
":",
"beta",
"=",
"self",
".",
"beta",
"(",
"benchmark",
"=",
"benchmark",
",",
"*",
"*",
"kwargs",
")",
"return",
"adj_factor",
... | Adjusted beta.
Beta that is adjusted to reflect the tendency of beta to
be mean reverting.
[Source: CFA Institute]
Formula:
adj_factor * raw_beta + (1 - adj_factor)
Parameters
----------
benchmark : {pd.Series, TSeries, pd.DataFrame, np.ndarray}
The benchmark securitie(s) to which `self` is compared.
Returns
-------
float or np.ndarray
If `benchmark` is 1d, returns a scalar.
If `benchmark` is 2d, returns a 1d ndarray.
Reference
---------
.. _Blume, Marshall. "Betas and Their Regression Tendencies."
http://www.stat.ucla.edu/~nchristo/statistics417/blume_betas.pdf | [
"Adjusted",
"beta",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L213-L241 | train | 204,274 |
bsolomon1124/pyfinance | pyfinance/returns.py | TSeries.down_capture | def down_capture(self, benchmark, threshold=0.0, compare_op="lt"):
"""Downside capture ratio.
Measures the performance of `self` relative to benchmark
conditioned on periods where `benchmark` is lt or le to
`threshold`.
Downside capture ratios are calculated by taking the fund's
monthly return during the periods of negative benchmark
performance and dividing it by the benchmark return.
[Source: CFA Institute]
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
threshold : float, default 0.
The threshold at which the comparison should be done.
`self` and `benchmark` are "filtered" to periods where
`benchmark` is lt/le `threshold`.
compare_op : {'lt', 'le'}
Comparison operator used to compare to `threshold`.
'lt' is less-than; 'le' is less-than-or-equal.
Returns
-------
float
Note
----
This metric uses geometric, not arithmetic, mean return.
"""
slf, bm = self.downmarket_filter(
benchmark=benchmark,
threshold=threshold,
compare_op=compare_op,
include_benchmark=True,
)
return slf.geomean() / bm.geomean() | python | def down_capture(self, benchmark, threshold=0.0, compare_op="lt"):
"""Downside capture ratio.
Measures the performance of `self` relative to benchmark
conditioned on periods where `benchmark` is lt or le to
`threshold`.
Downside capture ratios are calculated by taking the fund's
monthly return during the periods of negative benchmark
performance and dividing it by the benchmark return.
[Source: CFA Institute]
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
threshold : float, default 0.
The threshold at which the comparison should be done.
`self` and `benchmark` are "filtered" to periods where
`benchmark` is lt/le `threshold`.
compare_op : {'lt', 'le'}
Comparison operator used to compare to `threshold`.
'lt' is less-than; 'le' is less-than-or-equal.
Returns
-------
float
Note
----
This metric uses geometric, not arithmetic, mean return.
"""
slf, bm = self.downmarket_filter(
benchmark=benchmark,
threshold=threshold,
compare_op=compare_op,
include_benchmark=True,
)
return slf.geomean() / bm.geomean() | [
"def",
"down_capture",
"(",
"self",
",",
"benchmark",
",",
"threshold",
"=",
"0.0",
",",
"compare_op",
"=",
"\"lt\"",
")",
":",
"slf",
",",
"bm",
"=",
"self",
".",
"downmarket_filter",
"(",
"benchmark",
"=",
"benchmark",
",",
"threshold",
"=",
"threshold",... | Downside capture ratio.
Measures the performance of `self` relative to benchmark
conditioned on periods where `benchmark` is lt or le to
`threshold`.
Downside capture ratios are calculated by taking the fund's
monthly return during the periods of negative benchmark
performance and dividing it by the benchmark return.
[Source: CFA Institute]
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
threshold : float, default 0.
The threshold at which the comparison should be done.
`self` and `benchmark` are "filtered" to periods where
`benchmark` is lt/le `threshold`.
compare_op : {'lt', 'le'}
Comparison operator used to compare to `threshold`.
'lt' is less-than; 'le' is less-than-or-equal.
Returns
-------
float
Note
----
This metric uses geometric, not arithmetic, mean return. | [
"Downside",
"capture",
"ratio",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L315-L354 | train | 204,275 |
bsolomon1124/pyfinance | pyfinance/returns.py | TSeries.downmarket_filter | def downmarket_filter(
self,
benchmark,
threshold=0.0,
compare_op="lt",
include_benchmark=False,
):
"""Drop elementwise samples where `benchmark` > `threshold`.
Filters `self` (and optionally, `benchmark`) to periods
where `benchmark` < `threshold`. (Or <= `threshold`.)
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
threshold : float, default 0.0
The threshold at which the comparison should be done.
`self` and `benchmark` are "filtered" to periods where
`benchmark` is lt/le `threshold`.
compare_op : {'lt', 'le'}
Comparison operator used to compare to `threshold`.
'lt' is less-than; 'le' is less-than-or-equal.
include_benchmark : bool, default False
If True, return tuple of (`self`, `benchmark`) both
filtered. If False, return only `self` filtered.
Returns
-------
TSeries or tuple of TSeries
TSeries if `include_benchmark=False`, otherwise, tuple.
"""
return self._mkt_filter(
benchmark=benchmark,
threshold=threshold,
compare_op=compare_op,
include_benchmark=include_benchmark,
) | python | def downmarket_filter(
self,
benchmark,
threshold=0.0,
compare_op="lt",
include_benchmark=False,
):
"""Drop elementwise samples where `benchmark` > `threshold`.
Filters `self` (and optionally, `benchmark`) to periods
where `benchmark` < `threshold`. (Or <= `threshold`.)
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
threshold : float, default 0.0
The threshold at which the comparison should be done.
`self` and `benchmark` are "filtered" to periods where
`benchmark` is lt/le `threshold`.
compare_op : {'lt', 'le'}
Comparison operator used to compare to `threshold`.
'lt' is less-than; 'le' is less-than-or-equal.
include_benchmark : bool, default False
If True, return tuple of (`self`, `benchmark`) both
filtered. If False, return only `self` filtered.
Returns
-------
TSeries or tuple of TSeries
TSeries if `include_benchmark=False`, otherwise, tuple.
"""
return self._mkt_filter(
benchmark=benchmark,
threshold=threshold,
compare_op=compare_op,
include_benchmark=include_benchmark,
) | [
"def",
"downmarket_filter",
"(",
"self",
",",
"benchmark",
",",
"threshold",
"=",
"0.0",
",",
"compare_op",
"=",
"\"lt\"",
",",
"include_benchmark",
"=",
"False",
",",
")",
":",
"return",
"self",
".",
"_mkt_filter",
"(",
"benchmark",
"=",
"benchmark",
",",
... | Drop elementwise samples where `benchmark` > `threshold`.
Filters `self` (and optionally, `benchmark`) to periods
where `benchmark` < `threshold`. (Or <= `threshold`.)
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
threshold : float, default 0.0
The threshold at which the comparison should be done.
`self` and `benchmark` are "filtered" to periods where
`benchmark` is lt/le `threshold`.
compare_op : {'lt', 'le'}
Comparison operator used to compare to `threshold`.
'lt' is less-than; 'le' is less-than-or-equal.
include_benchmark : bool, default False
If True, return tuple of (`self`, `benchmark`) both
filtered. If False, return only `self` filtered.
Returns
-------
TSeries or tuple of TSeries
TSeries if `include_benchmark=False`, otherwise, tuple. | [
"Drop",
"elementwise",
"samples",
"where",
"benchmark",
">",
"threshold",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L356-L394 | train | 204,276 |
bsolomon1124/pyfinance | pyfinance/returns.py | TSeries.drawdown_end | def drawdown_end(self, return_date=False):
"""The date of the drawdown trough.
Date at which the drawdown was most negative.
Parameters
----------
return_date : bool, default False
If True, return a `datetime.date` object.
If False, return a Pandas Timestamp object.
Returns
-------
datetime.date or pandas._libs.tslib.Timestamp
"""
end = self.drawdown_idx().idxmin()
if return_date:
return end.date()
return end | python | def drawdown_end(self, return_date=False):
"""The date of the drawdown trough.
Date at which the drawdown was most negative.
Parameters
----------
return_date : bool, default False
If True, return a `datetime.date` object.
If False, return a Pandas Timestamp object.
Returns
-------
datetime.date or pandas._libs.tslib.Timestamp
"""
end = self.drawdown_idx().idxmin()
if return_date:
return end.date()
return end | [
"def",
"drawdown_end",
"(",
"self",
",",
"return_date",
"=",
"False",
")",
":",
"end",
"=",
"self",
".",
"drawdown_idx",
"(",
")",
".",
"idxmin",
"(",
")",
"if",
"return_date",
":",
"return",
"end",
".",
"date",
"(",
")",
"return",
"end"
] | The date of the drawdown trough.
Date at which the drawdown was most negative.
Parameters
----------
return_date : bool, default False
If True, return a `datetime.date` object.
If False, return a Pandas Timestamp object.
Returns
-------
datetime.date or pandas._libs.tslib.Timestamp | [
"The",
"date",
"of",
"the",
"drawdown",
"trough",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L396-L415 | train | 204,277 |
bsolomon1124/pyfinance | pyfinance/returns.py | TSeries.drawdown_idx | def drawdown_idx(self):
"""Drawdown index; TSeries of drawdown from running HWM.
Returns
-------
TSeries
"""
ri = self.ret_idx()
return ri / np.maximum(ri.cummax(), 1.0) - 1.0 | python | def drawdown_idx(self):
"""Drawdown index; TSeries of drawdown from running HWM.
Returns
-------
TSeries
"""
ri = self.ret_idx()
return ri / np.maximum(ri.cummax(), 1.0) - 1.0 | [
"def",
"drawdown_idx",
"(",
"self",
")",
":",
"ri",
"=",
"self",
".",
"ret_idx",
"(",
")",
"return",
"ri",
"/",
"np",
".",
"maximum",
"(",
"ri",
".",
"cummax",
"(",
")",
",",
"1.0",
")",
"-",
"1.0"
] | Drawdown index; TSeries of drawdown from running HWM.
Returns
-------
TSeries | [
"Drawdown",
"index",
";",
"TSeries",
"of",
"drawdown",
"from",
"running",
"HWM",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L417-L426 | train | 204,278 |
bsolomon1124/pyfinance | pyfinance/returns.py | TSeries.drawdown_length | def drawdown_length(self, return_int=False):
"""Length of drawdown in days.
This is the duration from peak to trough.
Parameters
----------
return_int : bool, default False
If True, return the number of days as an int.
If False, return a Pandas Timedelta object.
Returns
-------
int or pandas._libs.tslib.Timedelta
"""
td = self.drawdown_end() - self.drawdown_start()
if return_int:
return td.days
return td | python | def drawdown_length(self, return_int=False):
"""Length of drawdown in days.
This is the duration from peak to trough.
Parameters
----------
return_int : bool, default False
If True, return the number of days as an int.
If False, return a Pandas Timedelta object.
Returns
-------
int or pandas._libs.tslib.Timedelta
"""
td = self.drawdown_end() - self.drawdown_start()
if return_int:
return td.days
return td | [
"def",
"drawdown_length",
"(",
"self",
",",
"return_int",
"=",
"False",
")",
":",
"td",
"=",
"self",
".",
"drawdown_end",
"(",
")",
"-",
"self",
".",
"drawdown_start",
"(",
")",
"if",
"return_int",
":",
"return",
"td",
".",
"days",
"return",
"td"
] | Length of drawdown in days.
This is the duration from peak to trough.
Parameters
----------
return_int : bool, default False
If True, return the number of days as an int.
If False, return a Pandas Timedelta object.
Returns
-------
int or pandas._libs.tslib.Timedelta | [
"Length",
"of",
"drawdown",
"in",
"days",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L428-L447 | train | 204,279 |
bsolomon1124/pyfinance | pyfinance/returns.py | TSeries.drawdown_recov | def drawdown_recov(self, return_int=False):
"""Length of drawdown recovery in days.
This is the duration from trough to recovery date.
Parameters
----------
return_int : bool, default False
If True, return the number of days as an int.
If False, return a Pandas Timedelta object.
Returns
-------
int or pandas._libs.tslib.Timedelta
"""
td = self.recov_date() - self.drawdown_end()
if return_int:
return td.days
return td | python | def drawdown_recov(self, return_int=False):
"""Length of drawdown recovery in days.
This is the duration from trough to recovery date.
Parameters
----------
return_int : bool, default False
If True, return the number of days as an int.
If False, return a Pandas Timedelta object.
Returns
-------
int or pandas._libs.tslib.Timedelta
"""
td = self.recov_date() - self.drawdown_end()
if return_int:
return td.days
return td | [
"def",
"drawdown_recov",
"(",
"self",
",",
"return_int",
"=",
"False",
")",
":",
"td",
"=",
"self",
".",
"recov_date",
"(",
")",
"-",
"self",
".",
"drawdown_end",
"(",
")",
"if",
"return_int",
":",
"return",
"td",
".",
"days",
"return",
"td"
] | Length of drawdown recovery in days.
This is the duration from trough to recovery date.
Parameters
----------
return_int : bool, default False
If True, return the number of days as an int.
If False, return a Pandas Timedelta object.
Returns
-------
int or pandas._libs.tslib.Timedelta | [
"Length",
"of",
"drawdown",
"recovery",
"in",
"days",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L449-L468 | train | 204,280 |
bsolomon1124/pyfinance | pyfinance/returns.py | TSeries.drawdown_start | def drawdown_start(self, return_date=False):
"""The date of the peak at which most severe drawdown began.
Parameters
----------
return_date : bool, default False
If True, return a `datetime.date` object.
If False, return a Pandas Timestamp object.
Returns
-------
datetime.date or pandas._libs.tslib.Timestamp
"""
# Thank you @cᴏʟᴅsᴘᴇᴇᴅ
# https://stackoverflow.com/a/47892766/7954504
dd = self.drawdown_idx()
mask = nancumsum(dd == nanmin(dd.min)).astype(bool)
start = dd.mask(mask)[::-1].idxmax()
if return_date:
return start.date()
return start | python | def drawdown_start(self, return_date=False):
"""The date of the peak at which most severe drawdown began.
Parameters
----------
return_date : bool, default False
If True, return a `datetime.date` object.
If False, return a Pandas Timestamp object.
Returns
-------
datetime.date or pandas._libs.tslib.Timestamp
"""
# Thank you @cᴏʟᴅsᴘᴇᴇᴅ
# https://stackoverflow.com/a/47892766/7954504
dd = self.drawdown_idx()
mask = nancumsum(dd == nanmin(dd.min)).astype(bool)
start = dd.mask(mask)[::-1].idxmax()
if return_date:
return start.date()
return start | [
"def",
"drawdown_start",
"(",
"self",
",",
"return_date",
"=",
"False",
")",
":",
"# Thank you @cᴏʟᴅsᴘᴇᴇᴅ",
"# https://stackoverflow.com/a/47892766/7954504",
"dd",
"=",
"self",
".",
"drawdown_idx",
"(",
")",
"mask",
"=",
"nancumsum",
"(",
"dd",
"==",
"nanmin",
"("... | The date of the peak at which most severe drawdown began.
Parameters
----------
return_date : bool, default False
If True, return a `datetime.date` object.
If False, return a Pandas Timestamp object.
Returns
-------
datetime.date or pandas._libs.tslib.Timestamp | [
"The",
"date",
"of",
"the",
"peak",
"at",
"which",
"most",
"severe",
"drawdown",
"began",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L470-L491 | train | 204,281 |
bsolomon1124/pyfinance | pyfinance/returns.py | TSeries.excess_drawdown_idx | def excess_drawdown_idx(self, benchmark, method="caer"):
"""Excess drawdown index; TSeries of excess drawdowns.
There are several ways of computing this metric. For highly
volatile returns, the `method` specified will have a
non-negligible effect on the result.
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
method : {'caer' (0), 'cger' (1), 'ecr' (2), 'ecrr' (3)}
Indicates the methodology used.
"""
# TODO: plot these (compared) in docs.
if isinstance(method, (int, float)):
method = ["caer", "cger", "ecr", "ecrr"][method]
method = method.lower()
if method == "caer":
er = self.excess_ret(benchmark=benchmark, method="arithmetic")
return er.drawdown_idx()
elif method == "cger":
er = self.excess_ret(benchmark=benchmark, method="geometric")
return er.drawdown_idx()
elif method == "ecr":
er = self.ret_idx() - benchmark.ret_idx() + 1
if er.isnull().any():
return er / er.cummax() - 1.0
else:
return er / np.maximum.accumulate(er) - 1.0
elif method == "ecrr":
# Credit to: SO @piRSquared
# https://stackoverflow.com/a/36848867/7954504
p = self.ret_idx().values
b = benchmark.ret_idx().values
er = p - b
if er.isnull().any():
# The slower route but NaN-friendly.
cam = self.expanding(min_periods=1).apply(lambda x: x.argmax())
else:
cam = utils.cumargmax(er)
p0 = p[cam]
b0 = b[cam]
return (p * b0 - b * p0) / (p0 * b0)
else:
raise ValueError(
"`method` must be one of"
" ('caer', 'cger', 'ecr', 'ecrr'),"
" case-insensitive, or"
" an integer mapping to these methods"
" (1 thru 4)."
) | python | def excess_drawdown_idx(self, benchmark, method="caer"):
"""Excess drawdown index; TSeries of excess drawdowns.
There are several ways of computing this metric. For highly
volatile returns, the `method` specified will have a
non-negligible effect on the result.
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
method : {'caer' (0), 'cger' (1), 'ecr' (2), 'ecrr' (3)}
Indicates the methodology used.
"""
# TODO: plot these (compared) in docs.
if isinstance(method, (int, float)):
method = ["caer", "cger", "ecr", "ecrr"][method]
method = method.lower()
if method == "caer":
er = self.excess_ret(benchmark=benchmark, method="arithmetic")
return er.drawdown_idx()
elif method == "cger":
er = self.excess_ret(benchmark=benchmark, method="geometric")
return er.drawdown_idx()
elif method == "ecr":
er = self.ret_idx() - benchmark.ret_idx() + 1
if er.isnull().any():
return er / er.cummax() - 1.0
else:
return er / np.maximum.accumulate(er) - 1.0
elif method == "ecrr":
# Credit to: SO @piRSquared
# https://stackoverflow.com/a/36848867/7954504
p = self.ret_idx().values
b = benchmark.ret_idx().values
er = p - b
if er.isnull().any():
# The slower route but NaN-friendly.
cam = self.expanding(min_periods=1).apply(lambda x: x.argmax())
else:
cam = utils.cumargmax(er)
p0 = p[cam]
b0 = b[cam]
return (p * b0 - b * p0) / (p0 * b0)
else:
raise ValueError(
"`method` must be one of"
" ('caer', 'cger', 'ecr', 'ecrr'),"
" case-insensitive, or"
" an integer mapping to these methods"
" (1 thru 4)."
) | [
"def",
"excess_drawdown_idx",
"(",
"self",
",",
"benchmark",
",",
"method",
"=",
"\"caer\"",
")",
":",
"# TODO: plot these (compared) in docs.",
"if",
"isinstance",
"(",
"method",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"method",
"=",
"[",
"\"caer\"",
... | Excess drawdown index; TSeries of excess drawdowns.
There are several ways of computing this metric. For highly
volatile returns, the `method` specified will have a
non-negligible effect on the result.
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
method : {'caer' (0), 'cger' (1), 'ecr' (2), 'ecrr' (3)}
Indicates the methodology used. | [
"Excess",
"drawdown",
"index",
";",
"TSeries",
"of",
"excess",
"drawdowns",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L493-L550 | train | 204,282 |
bsolomon1124/pyfinance | pyfinance/returns.py | TSeries.excess_ret | def excess_ret(self, benchmark, method="arithmetic"):
"""Excess return.
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
method : {{'arith', 'arithmetic'}, {'geo', 'geometric'}}
The methodology used. An arithmetic excess return is a
straightforward subtraction. A geometric excess return
is the ratio of return-relatives of `self` to `benchmark`,
minus one.
Also known as: active return.
Reference
---------
.. _Essex River Analytics - A Case for Arithmetic Attribution
http://www.northinfo.com/documents/563.pdf
.. _Bacon, Carl. Excess Returns - Arithmetic or Geometric?
https://www.cfapubs.org/doi/full/10.2469/dig.v33.n1.1235
"""
if method.startswith("arith"):
return self - _try_to_squeeze(benchmark)
elif method.startswith("geo"):
# Geometric excess return,
# (1 + `self`) / (1 + `benchmark`) - 1.
return (
self.ret_rels() / _try_to_squeeze(benchmark).ret_rels() - 1.0
) | python | def excess_ret(self, benchmark, method="arithmetic"):
"""Excess return.
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
method : {{'arith', 'arithmetic'}, {'geo', 'geometric'}}
The methodology used. An arithmetic excess return is a
straightforward subtraction. A geometric excess return
is the ratio of return-relatives of `self` to `benchmark`,
minus one.
Also known as: active return.
Reference
---------
.. _Essex River Analytics - A Case for Arithmetic Attribution
http://www.northinfo.com/documents/563.pdf
.. _Bacon, Carl. Excess Returns - Arithmetic or Geometric?
https://www.cfapubs.org/doi/full/10.2469/dig.v33.n1.1235
"""
if method.startswith("arith"):
return self - _try_to_squeeze(benchmark)
elif method.startswith("geo"):
# Geometric excess return,
# (1 + `self`) / (1 + `benchmark`) - 1.
return (
self.ret_rels() / _try_to_squeeze(benchmark).ret_rels() - 1.0
) | [
"def",
"excess_ret",
"(",
"self",
",",
"benchmark",
",",
"method",
"=",
"\"arithmetic\"",
")",
":",
"if",
"method",
".",
"startswith",
"(",
"\"arith\"",
")",
":",
"return",
"self",
"-",
"_try_to_squeeze",
"(",
"benchmark",
")",
"elif",
"method",
".",
"star... | Excess return.
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
method : {{'arith', 'arithmetic'}, {'geo', 'geometric'}}
The methodology used. An arithmetic excess return is a
straightforward subtraction. A geometric excess return
is the ratio of return-relatives of `self` to `benchmark`,
minus one.
Also known as: active return.
Reference
---------
.. _Essex River Analytics - A Case for Arithmetic Attribution
http://www.northinfo.com/documents/563.pdf
.. _Bacon, Carl. Excess Returns - Arithmetic or Geometric?
https://www.cfapubs.org/doi/full/10.2469/dig.v33.n1.1235 | [
"Excess",
"return",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L552-L583 | train | 204,283 |
bsolomon1124/pyfinance | pyfinance/returns.py | TSeries.gain_to_loss_ratio | def gain_to_loss_ratio(self):
"""Gain-to-loss ratio, ratio of positive to negative returns.
Formula:
(n pos. / n neg.) * (avg. up-month return / avg. down-month return)
[Source: CFA Institute]
Returns
-------
float
"""
gt = self > 0
lt = self < 0
return (nansum(gt) / nansum(lt)) * (self[gt].mean() / self[lt].mean()) | python | def gain_to_loss_ratio(self):
"""Gain-to-loss ratio, ratio of positive to negative returns.
Formula:
(n pos. / n neg.) * (avg. up-month return / avg. down-month return)
[Source: CFA Institute]
Returns
-------
float
"""
gt = self > 0
lt = self < 0
return (nansum(gt) / nansum(lt)) * (self[gt].mean() / self[lt].mean()) | [
"def",
"gain_to_loss_ratio",
"(",
"self",
")",
":",
"gt",
"=",
"self",
">",
"0",
"lt",
"=",
"self",
"<",
"0",
"return",
"(",
"nansum",
"(",
"gt",
")",
"/",
"nansum",
"(",
"lt",
")",
")",
"*",
"(",
"self",
"[",
"gt",
"]",
".",
"mean",
"(",
")"... | Gain-to-loss ratio, ratio of positive to negative returns.
Formula:
(n pos. / n neg.) * (avg. up-month return / avg. down-month return)
[Source: CFA Institute]
Returns
-------
float | [
"Gain",
"-",
"to",
"-",
"loss",
"ratio",
"ratio",
"of",
"positive",
"to",
"negative",
"returns",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L585-L599 | train | 204,284 |
bsolomon1124/pyfinance | pyfinance/returns.py | TSeries.msquared | def msquared(self, benchmark, rf=0.02, ddof=0):
"""M-squared, return scaled by relative total risk.
A measure of what a portfolio would have returned if it had
taken on the same *total* risk as the market index.
[Source: CFA Institute]
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
rf : {float, TSeries, pd.Series}, default 0.02
If float, this represents an *compounded annualized*
risk-free rate; 2.0% is the default.
If a TSeries or pd.Series, this represents a time series
of periodic returns to a risk-free security.
To download a risk-free rate return series using
3-month US T-bill yields, see:`pyfinance.datasets.load_rf`.
ddof : int, default 0
Degrees of freedom, passed to pd.Series.std().
Returns
-------
float
"""
rf = self._validate_rf(rf)
scaling = benchmark.anlzd_stdev(ddof) / self.anlzd_stdev(ddof)
diff = self.anlzd_ret() - rf
return rf + diff * scaling | python | def msquared(self, benchmark, rf=0.02, ddof=0):
"""M-squared, return scaled by relative total risk.
A measure of what a portfolio would have returned if it had
taken on the same *total* risk as the market index.
[Source: CFA Institute]
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
rf : {float, TSeries, pd.Series}, default 0.02
If float, this represents an *compounded annualized*
risk-free rate; 2.0% is the default.
If a TSeries or pd.Series, this represents a time series
of periodic returns to a risk-free security.
To download a risk-free rate return series using
3-month US T-bill yields, see:`pyfinance.datasets.load_rf`.
ddof : int, default 0
Degrees of freedom, passed to pd.Series.std().
Returns
-------
float
"""
rf = self._validate_rf(rf)
scaling = benchmark.anlzd_stdev(ddof) / self.anlzd_stdev(ddof)
diff = self.anlzd_ret() - rf
return rf + diff * scaling | [
"def",
"msquared",
"(",
"self",
",",
"benchmark",
",",
"rf",
"=",
"0.02",
",",
"ddof",
"=",
"0",
")",
":",
"rf",
"=",
"self",
".",
"_validate_rf",
"(",
"rf",
")",
"scaling",
"=",
"benchmark",
".",
"anlzd_stdev",
"(",
"ddof",
")",
"/",
"self",
".",
... | M-squared, return scaled by relative total risk.
A measure of what a portfolio would have returned if it had
taken on the same *total* risk as the market index.
[Source: CFA Institute]
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
rf : {float, TSeries, pd.Series}, default 0.02
If float, this represents an *compounded annualized*
risk-free rate; 2.0% is the default.
If a TSeries or pd.Series, this represents a time series
of periodic returns to a risk-free security.
To download a risk-free rate return series using
3-month US T-bill yields, see:`pyfinance.datasets.load_rf`.
ddof : int, default 0
Degrees of freedom, passed to pd.Series.std().
Returns
-------
float | [
"M",
"-",
"squared",
"return",
"scaled",
"by",
"relative",
"total",
"risk",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L655-L685 | train | 204,285 |
bsolomon1124/pyfinance | pyfinance/returns.py | TSeries.pct_negative | def pct_negative(self, threshold=0.0):
"""Pct. of periods in which `self` is less than `threshold.`
Parameters
----------
threshold : {float, TSeries, pd.Series}, default 0.
Returns
-------
float
"""
return np.count_nonzero(self[self < threshold]) / self.count() | python | def pct_negative(self, threshold=0.0):
"""Pct. of periods in which `self` is less than `threshold.`
Parameters
----------
threshold : {float, TSeries, pd.Series}, default 0.
Returns
-------
float
"""
return np.count_nonzero(self[self < threshold]) / self.count() | [
"def",
"pct_negative",
"(",
"self",
",",
"threshold",
"=",
"0.0",
")",
":",
"return",
"np",
".",
"count_nonzero",
"(",
"self",
"[",
"self",
"<",
"threshold",
"]",
")",
"/",
"self",
".",
"count",
"(",
")"
] | Pct. of periods in which `self` is less than `threshold.`
Parameters
----------
threshold : {float, TSeries, pd.Series}, default 0.
Returns
-------
float | [
"Pct",
".",
"of",
"periods",
"in",
"which",
"self",
"is",
"less",
"than",
"threshold",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L687-L699 | train | 204,286 |
bsolomon1124/pyfinance | pyfinance/returns.py | TSeries.pct_positive | def pct_positive(self, threshold=0.0):
"""Pct. of periods in which `self` is greater than `threshold.`
Parameters
----------
threshold : {float, TSeries, pd.Series}, default 0.
Returns
-------
float
"""
return np.count_nonzero(self[self > threshold]) / self.count() | python | def pct_positive(self, threshold=0.0):
"""Pct. of periods in which `self` is greater than `threshold.`
Parameters
----------
threshold : {float, TSeries, pd.Series}, default 0.
Returns
-------
float
"""
return np.count_nonzero(self[self > threshold]) / self.count() | [
"def",
"pct_positive",
"(",
"self",
",",
"threshold",
"=",
"0.0",
")",
":",
"return",
"np",
".",
"count_nonzero",
"(",
"self",
"[",
"self",
">",
"threshold",
"]",
")",
"/",
"self",
".",
"count",
"(",
")"
] | Pct. of periods in which `self` is greater than `threshold.`
Parameters
----------
threshold : {float, TSeries, pd.Series}, default 0.
Returns
-------
float | [
"Pct",
".",
"of",
"periods",
"in",
"which",
"self",
"is",
"greater",
"than",
"threshold",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L701-L713 | train | 204,287 |
bsolomon1124/pyfinance | pyfinance/returns.py | TSeries.recov_date | def recov_date(self, return_date=False):
"""Drawdown recovery date.
Date at which `self` recovered to previous high-water mark.
Parameters
----------
return_date : bool, default False
If True, return a `datetime.date` object.
If False, return a Pandas Timestamp object.
Returns
-------
{datetime.date, pandas._libs.tslib.Timestamp, pd.NaT}
Returns NaT if recovery has not occured.
"""
dd = self.drawdown_idx()
# False beginning on trough date and all later dates.
mask = nancumprod(dd != nanmin(dd)).astype(bool)
res = dd.mask(mask) == 0
# If `res` is all False (recovery has not occured),
# .idxmax() will return `res.index[0]`.
if not res.any():
recov = pd.NaT
else:
recov = res.idxmax()
if return_date:
return recov.date()
return recov | python | def recov_date(self, return_date=False):
"""Drawdown recovery date.
Date at which `self` recovered to previous high-water mark.
Parameters
----------
return_date : bool, default False
If True, return a `datetime.date` object.
If False, return a Pandas Timestamp object.
Returns
-------
{datetime.date, pandas._libs.tslib.Timestamp, pd.NaT}
Returns NaT if recovery has not occured.
"""
dd = self.drawdown_idx()
# False beginning on trough date and all later dates.
mask = nancumprod(dd != nanmin(dd)).astype(bool)
res = dd.mask(mask) == 0
# If `res` is all False (recovery has not occured),
# .idxmax() will return `res.index[0]`.
if not res.any():
recov = pd.NaT
else:
recov = res.idxmax()
if return_date:
return recov.date()
return recov | [
"def",
"recov_date",
"(",
"self",
",",
"return_date",
"=",
"False",
")",
":",
"dd",
"=",
"self",
".",
"drawdown_idx",
"(",
")",
"# False beginning on trough date and all later dates.",
"mask",
"=",
"nancumprod",
"(",
"dd",
"!=",
"nanmin",
"(",
"dd",
")",
")",
... | Drawdown recovery date.
Date at which `self` recovered to previous high-water mark.
Parameters
----------
return_date : bool, default False
If True, return a `datetime.date` object.
If False, return a Pandas Timestamp object.
Returns
-------
{datetime.date, pandas._libs.tslib.Timestamp, pd.NaT}
Returns NaT if recovery has not occured. | [
"Drawdown",
"recovery",
"date",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L715-L746 | train | 204,288 |
bsolomon1124/pyfinance | pyfinance/returns.py | TSeries.rollup | def rollup(self, freq, **kwargs):
"""Downsample `self` through geometric linking.
Parameters
----------
freq : {'D', 'W', 'M', 'Q', 'A'}
The frequency of the result.
**kwargs
Passed to `self.resample()`.
Returns
-------
TSeries
Example
-------
# Derive quarterly returns from monthly returns.
>>> import numpy as np
>>> from pyfinance import TSeries
>>> np.random.seed(444)
>>> ts = TSeries(np.random.randn(12) / 100 + 0.002,
... index=pd.date_range('2016', periods=12, freq='M'))
>>> ts.rollup('Q')
2016-03-31 0.0274
2016-06-30 -0.0032
2016-09-30 -0.0028
2016-12-31 0.0127
Freq: Q-DEC, dtype: float64
"""
return self.ret_rels().resample(freq, **kwargs).prod() - 1.0 | python | def rollup(self, freq, **kwargs):
"""Downsample `self` through geometric linking.
Parameters
----------
freq : {'D', 'W', 'M', 'Q', 'A'}
The frequency of the result.
**kwargs
Passed to `self.resample()`.
Returns
-------
TSeries
Example
-------
# Derive quarterly returns from monthly returns.
>>> import numpy as np
>>> from pyfinance import TSeries
>>> np.random.seed(444)
>>> ts = TSeries(np.random.randn(12) / 100 + 0.002,
... index=pd.date_range('2016', periods=12, freq='M'))
>>> ts.rollup('Q')
2016-03-31 0.0274
2016-06-30 -0.0032
2016-09-30 -0.0028
2016-12-31 0.0127
Freq: Q-DEC, dtype: float64
"""
return self.ret_rels().resample(freq, **kwargs).prod() - 1.0 | [
"def",
"rollup",
"(",
"self",
",",
"freq",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"ret_rels",
"(",
")",
".",
"resample",
"(",
"freq",
",",
"*",
"*",
"kwargs",
")",
".",
"prod",
"(",
")",
"-",
"1.0"
] | Downsample `self` through geometric linking.
Parameters
----------
freq : {'D', 'W', 'M', 'Q', 'A'}
The frequency of the result.
**kwargs
Passed to `self.resample()`.
Returns
-------
TSeries
Example
-------
# Derive quarterly returns from monthly returns.
>>> import numpy as np
>>> from pyfinance import TSeries
>>> np.random.seed(444)
>>> ts = TSeries(np.random.randn(12) / 100 + 0.002,
... index=pd.date_range('2016', periods=12, freq='M'))
>>> ts.rollup('Q')
2016-03-31 0.0274
2016-06-30 -0.0032
2016-09-30 -0.0028
2016-12-31 0.0127
Freq: Q-DEC, dtype: float64 | [
"Downsample",
"self",
"through",
"geometric",
"linking",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L774-L804 | train | 204,289 |
bsolomon1124/pyfinance | pyfinance/returns.py | TSeries.semi_stdev | def semi_stdev(self, threshold=0.0, ddof=0, freq=None):
"""Semi-standard deviation; stdev of downside returns.
It is designed to address that fact that plain standard
deviation penalizes "upside volatility.""
Formula: `sqrt( sum([min(self - thresh, 0] **2 ) / (n - ddof) )`
Also known as: downside deviation.
Parameters
----------
threshold : {float, TSeries, pd.Series}, default 0.
While zero is the default, it is also customary to use
a "minimum acceptable return" (MAR) or a risk-free rate.
Note: this is assumed to be a *periodic*, not necessarily
annualized, return.
ddof : int, default 0
Degrees of freedom, passed to pd.Series.std().
freq : str or None, default None
A frequency string used to create an annualization factor.
If None, `self.freq` will be used. If that is also None,
a frequency will be inferred. If none can be inferred,
an exception is raised.
It may be any frequency string or anchored offset string
recognized by Pandas, such as 'D', '5D', 'Q', 'Q-DEC', or
'BQS-APR'.
Returns
-------
float
"""
if freq is None:
freq = self._try_get_freq()
if freq is None:
raise FrequencyError(msg)
n = self.count() - ddof
ss = (nansum(np.minimum(self - threshold, 0.0) ** 2) ** 0.5) / n
return ss * freq ** 0.5 | python | def semi_stdev(self, threshold=0.0, ddof=0, freq=None):
"""Semi-standard deviation; stdev of downside returns.
It is designed to address that fact that plain standard
deviation penalizes "upside volatility.""
Formula: `sqrt( sum([min(self - thresh, 0] **2 ) / (n - ddof) )`
Also known as: downside deviation.
Parameters
----------
threshold : {float, TSeries, pd.Series}, default 0.
While zero is the default, it is also customary to use
a "minimum acceptable return" (MAR) or a risk-free rate.
Note: this is assumed to be a *periodic*, not necessarily
annualized, return.
ddof : int, default 0
Degrees of freedom, passed to pd.Series.std().
freq : str or None, default None
A frequency string used to create an annualization factor.
If None, `self.freq` will be used. If that is also None,
a frequency will be inferred. If none can be inferred,
an exception is raised.
It may be any frequency string or anchored offset string
recognized by Pandas, such as 'D', '5D', 'Q', 'Q-DEC', or
'BQS-APR'.
Returns
-------
float
"""
if freq is None:
freq = self._try_get_freq()
if freq is None:
raise FrequencyError(msg)
n = self.count() - ddof
ss = (nansum(np.minimum(self - threshold, 0.0) ** 2) ** 0.5) / n
return ss * freq ** 0.5 | [
"def",
"semi_stdev",
"(",
"self",
",",
"threshold",
"=",
"0.0",
",",
"ddof",
"=",
"0",
",",
"freq",
"=",
"None",
")",
":",
"if",
"freq",
"is",
"None",
":",
"freq",
"=",
"self",
".",
"_try_get_freq",
"(",
")",
"if",
"freq",
"is",
"None",
":",
"rai... | Semi-standard deviation; stdev of downside returns.
It is designed to address that fact that plain standard
deviation penalizes "upside volatility.""
Formula: `sqrt( sum([min(self - thresh, 0] **2 ) / (n - ddof) )`
Also known as: downside deviation.
Parameters
----------
threshold : {float, TSeries, pd.Series}, default 0.
While zero is the default, it is also customary to use
a "minimum acceptable return" (MAR) or a risk-free rate.
Note: this is assumed to be a *periodic*, not necessarily
annualized, return.
ddof : int, default 0
Degrees of freedom, passed to pd.Series.std().
freq : str or None, default None
A frequency string used to create an annualization factor.
If None, `self.freq` will be used. If that is also None,
a frequency will be inferred. If none can be inferred,
an exception is raised.
It may be any frequency string or anchored offset string
recognized by Pandas, such as 'D', '5D', 'Q', 'Q-DEC', or
'BQS-APR'.
Returns
-------
float | [
"Semi",
"-",
"standard",
"deviation",
";",
"stdev",
"of",
"downside",
"returns",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L847-L887 | train | 204,290 |
bsolomon1124/pyfinance | pyfinance/returns.py | TSeries.sharpe_ratio | def sharpe_ratio(self, rf=0.02, ddof=0):
"""Return over `rf` per unit of total risk.
The average return in excess of the risk-free rate divided
by the standard deviation of return; a measure of the average
excess return earned per unit of standard deviation of return.
[Source: CFA Institute]
Parameters
----------
rf : {float, TSeries, pd.Series}, default 0.02
If float, this represents an *compounded annualized*
risk-free rate; 2.0% is the default.
If a TSeries or pd.Series, this represents a time series
of periodic returns to a risk-free security.
To download a risk-free rate return series using
3-month US T-bill yields, see:`pyfinance.datasets.load_rf`.
ddof : int, default 0
Degrees of freedom, passed to pd.Series.std().
Returns
-------
float
"""
rf = self._validate_rf(rf)
stdev = self.anlzd_stdev(ddof=ddof)
return (self.anlzd_ret() - rf) / stdev | python | def sharpe_ratio(self, rf=0.02, ddof=0):
"""Return over `rf` per unit of total risk.
The average return in excess of the risk-free rate divided
by the standard deviation of return; a measure of the average
excess return earned per unit of standard deviation of return.
[Source: CFA Institute]
Parameters
----------
rf : {float, TSeries, pd.Series}, default 0.02
If float, this represents an *compounded annualized*
risk-free rate; 2.0% is the default.
If a TSeries or pd.Series, this represents a time series
of periodic returns to a risk-free security.
To download a risk-free rate return series using
3-month US T-bill yields, see:`pyfinance.datasets.load_rf`.
ddof : int, default 0
Degrees of freedom, passed to pd.Series.std().
Returns
-------
float
"""
rf = self._validate_rf(rf)
stdev = self.anlzd_stdev(ddof=ddof)
return (self.anlzd_ret() - rf) / stdev | [
"def",
"sharpe_ratio",
"(",
"self",
",",
"rf",
"=",
"0.02",
",",
"ddof",
"=",
"0",
")",
":",
"rf",
"=",
"self",
".",
"_validate_rf",
"(",
"rf",
")",
"stdev",
"=",
"self",
".",
"anlzd_stdev",
"(",
"ddof",
"=",
"ddof",
")",
"return",
"(",
"self",
"... | Return over `rf` per unit of total risk.
The average return in excess of the risk-free rate divided
by the standard deviation of return; a measure of the average
excess return earned per unit of standard deviation of return.
[Source: CFA Institute]
Parameters
----------
rf : {float, TSeries, pd.Series}, default 0.02
If float, this represents an *compounded annualized*
risk-free rate; 2.0% is the default.
If a TSeries or pd.Series, this represents a time series
of periodic returns to a risk-free security.
To download a risk-free rate return series using
3-month US T-bill yields, see:`pyfinance.datasets.load_rf`.
ddof : int, default 0
Degrees of freedom, passed to pd.Series.std().
Returns
-------
float | [
"Return",
"over",
"rf",
"per",
"unit",
"of",
"total",
"risk",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L889-L917 | train | 204,291 |
bsolomon1124/pyfinance | pyfinance/returns.py | TSeries.sortino_ratio | def sortino_ratio(self, threshold=0.0, ddof=0, freq=None):
"""Return over a threshold per unit of downside deviation.
A performance appraisal ratio that replaces standard deviation
in the Sharpe ratio with downside deviation.
[Source: CFA Institute]
Parameters
----------
threshold : {float, TSeries, pd.Series}, default 0.
While zero is the default, it is also customary to use
a "minimum acceptable return" (MAR) or a risk-free rate.
Note: this is assumed to be a *periodic*, not necessarily
annualized, return.
ddof : int, default 0
Degrees of freedom, passed to pd.Series.std().
freq : str or None, default None
A frequency string used to create an annualization factor.
If None, `self.freq` will be used. If that is also None,
a frequency will be inferred. If none can be inferred,
an exception is raised.
It may be any frequency string or anchored offset string
recognized by Pandas, such as 'D', '5D', 'Q', 'Q-DEC', or
'BQS-APR'.
Returns
-------
float
"""
stdev = self.semi_stdev(threshold=threshold, ddof=ddof, freq=freq)
return (self.anlzd_ret() - threshold) / stdev | python | def sortino_ratio(self, threshold=0.0, ddof=0, freq=None):
"""Return over a threshold per unit of downside deviation.
A performance appraisal ratio that replaces standard deviation
in the Sharpe ratio with downside deviation.
[Source: CFA Institute]
Parameters
----------
threshold : {float, TSeries, pd.Series}, default 0.
While zero is the default, it is also customary to use
a "minimum acceptable return" (MAR) or a risk-free rate.
Note: this is assumed to be a *periodic*, not necessarily
annualized, return.
ddof : int, default 0
Degrees of freedom, passed to pd.Series.std().
freq : str or None, default None
A frequency string used to create an annualization factor.
If None, `self.freq` will be used. If that is also None,
a frequency will be inferred. If none can be inferred,
an exception is raised.
It may be any frequency string or anchored offset string
recognized by Pandas, such as 'D', '5D', 'Q', 'Q-DEC', or
'BQS-APR'.
Returns
-------
float
"""
stdev = self.semi_stdev(threshold=threshold, ddof=ddof, freq=freq)
return (self.anlzd_ret() - threshold) / stdev | [
"def",
"sortino_ratio",
"(",
"self",
",",
"threshold",
"=",
"0.0",
",",
"ddof",
"=",
"0",
",",
"freq",
"=",
"None",
")",
":",
"stdev",
"=",
"self",
".",
"semi_stdev",
"(",
"threshold",
"=",
"threshold",
",",
"ddof",
"=",
"ddof",
",",
"freq",
"=",
"... | Return over a threshold per unit of downside deviation.
A performance appraisal ratio that replaces standard deviation
in the Sharpe ratio with downside deviation.
[Source: CFA Institute]
Parameters
----------
threshold : {float, TSeries, pd.Series}, default 0.
While zero is the default, it is also customary to use
a "minimum acceptable return" (MAR) or a risk-free rate.
Note: this is assumed to be a *periodic*, not necessarily
annualized, return.
ddof : int, default 0
Degrees of freedom, passed to pd.Series.std().
freq : str or None, default None
A frequency string used to create an annualization factor.
If None, `self.freq` will be used. If that is also None,
a frequency will be inferred. If none can be inferred,
an exception is raised.
It may be any frequency string or anchored offset string
recognized by Pandas, such as 'D', '5D', 'Q', 'Q-DEC', or
'BQS-APR'.
Returns
-------
float | [
"Return",
"over",
"a",
"threshold",
"per",
"unit",
"of",
"downside",
"deviation",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L919-L951 | train | 204,292 |
bsolomon1124/pyfinance | pyfinance/returns.py | TSeries.tracking_error | def tracking_error(self, benchmark, ddof=0):
"""Standard deviation of excess returns.
The standard deviation of the differences between
a portfolio's returns and its benchmark's returns.
[Source: CFA Institute]
Also known as: tracking risk; active risk
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
ddof : int, default 0
Degrees of freedom, passed to pd.Series.std().
Returns
-------
float
"""
er = self.excess_ret(benchmark=benchmark)
return er.anlzd_stdev(ddof=ddof) | python | def tracking_error(self, benchmark, ddof=0):
"""Standard deviation of excess returns.
The standard deviation of the differences between
a portfolio's returns and its benchmark's returns.
[Source: CFA Institute]
Also known as: tracking risk; active risk
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
ddof : int, default 0
Degrees of freedom, passed to pd.Series.std().
Returns
-------
float
"""
er = self.excess_ret(benchmark=benchmark)
return er.anlzd_stdev(ddof=ddof) | [
"def",
"tracking_error",
"(",
"self",
",",
"benchmark",
",",
"ddof",
"=",
"0",
")",
":",
"er",
"=",
"self",
".",
"excess_ret",
"(",
"benchmark",
"=",
"benchmark",
")",
"return",
"er",
".",
"anlzd_stdev",
"(",
"ddof",
"=",
"ddof",
")"
] | Standard deviation of excess returns.
The standard deviation of the differences between
a portfolio's returns and its benchmark's returns.
[Source: CFA Institute]
Also known as: tracking risk; active risk
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
ddof : int, default 0
Degrees of freedom, passed to pd.Series.std().
Returns
-------
float | [
"Standard",
"deviation",
"of",
"excess",
"returns",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L953-L975 | train | 204,293 |
bsolomon1124/pyfinance | pyfinance/returns.py | TSeries.treynor_ratio | def treynor_ratio(self, benchmark, rf=0.02):
"""Return over `rf` per unit of systematic risk.
A measure of risk-adjusted performance that relates a
portfolio's excess returns to the portfolio's beta.
[Source: CFA Institute]
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
rf : {float, TSeries, pd.Series}, default 0.02
If float, this represents an *compounded annualized*
risk-free rate; 2.0% is the default.
If a TSeries or pd.Series, this represents a time series
of periodic returns to a risk-free security.
To download a risk-free rate return series using
3-month US T-bill yields, see:`pyfinance.datasets.load_rf`.
Returns
-------
float
"""
benchmark = _try_to_squeeze(benchmark)
if benchmark.ndim > 1:
raise ValueError("Treynor ratio requires a single benchmark")
rf = self._validate_rf(rf)
beta = self.beta(benchmark)
return (self.anlzd_ret() - rf) / beta | python | def treynor_ratio(self, benchmark, rf=0.02):
"""Return over `rf` per unit of systematic risk.
A measure of risk-adjusted performance that relates a
portfolio's excess returns to the portfolio's beta.
[Source: CFA Institute]
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
rf : {float, TSeries, pd.Series}, default 0.02
If float, this represents an *compounded annualized*
risk-free rate; 2.0% is the default.
If a TSeries or pd.Series, this represents a time series
of periodic returns to a risk-free security.
To download a risk-free rate return series using
3-month US T-bill yields, see:`pyfinance.datasets.load_rf`.
Returns
-------
float
"""
benchmark = _try_to_squeeze(benchmark)
if benchmark.ndim > 1:
raise ValueError("Treynor ratio requires a single benchmark")
rf = self._validate_rf(rf)
beta = self.beta(benchmark)
return (self.anlzd_ret() - rf) / beta | [
"def",
"treynor_ratio",
"(",
"self",
",",
"benchmark",
",",
"rf",
"=",
"0.02",
")",
":",
"benchmark",
"=",
"_try_to_squeeze",
"(",
"benchmark",
")",
"if",
"benchmark",
".",
"ndim",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"Treynor ratio requires a single b... | Return over `rf` per unit of systematic risk.
A measure of risk-adjusted performance that relates a
portfolio's excess returns to the portfolio's beta.
[Source: CFA Institute]
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
rf : {float, TSeries, pd.Series}, default 0.02
If float, this represents an *compounded annualized*
risk-free rate; 2.0% is the default.
If a TSeries or pd.Series, this represents a time series
of periodic returns to a risk-free security.
To download a risk-free rate return series using
3-month US T-bill yields, see:`pyfinance.datasets.load_rf`.
Returns
-------
float | [
"Return",
"over",
"rf",
"per",
"unit",
"of",
"systematic",
"risk",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L977-L1007 | train | 204,294 |
bsolomon1124/pyfinance | pyfinance/returns.py | TSeries.up_capture | def up_capture(self, benchmark, threshold=0.0, compare_op="ge"):
"""Upside capture ratio.
Measures the performance of `self` relative to benchmark
conditioned on periods where `benchmark` is gt or ge to
`threshold`.
Upside capture ratios are calculated by taking the fund's
monthly return during the periods of positive benchmark
performance and dividing it by the benchmark return.
[Source: CFA Institute]
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
threshold : float, default 0.
The threshold at which the comparison should be done.
`self` and `benchmark` are "filtered" to periods where
`benchmark` is gt/ge `threshold`.
compare_op : {'ge', 'gt'}
Comparison operator used to compare to `threshold`.
'gt' is greater-than; 'ge' is greater-than-or-equal.
Returns
-------
float
Note
----
This metric uses geometric, not arithmetic, mean return.
"""
slf, bm = self.upmarket_filter(
benchmark=benchmark,
threshold=threshold,
compare_op=compare_op,
include_benchmark=True,
)
return slf.geomean() / bm.geomean() | python | def up_capture(self, benchmark, threshold=0.0, compare_op="ge"):
"""Upside capture ratio.
Measures the performance of `self` relative to benchmark
conditioned on periods where `benchmark` is gt or ge to
`threshold`.
Upside capture ratios are calculated by taking the fund's
monthly return during the periods of positive benchmark
performance and dividing it by the benchmark return.
[Source: CFA Institute]
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
threshold : float, default 0.
The threshold at which the comparison should be done.
`self` and `benchmark` are "filtered" to periods where
`benchmark` is gt/ge `threshold`.
compare_op : {'ge', 'gt'}
Comparison operator used to compare to `threshold`.
'gt' is greater-than; 'ge' is greater-than-or-equal.
Returns
-------
float
Note
----
This metric uses geometric, not arithmetic, mean return.
"""
slf, bm = self.upmarket_filter(
benchmark=benchmark,
threshold=threshold,
compare_op=compare_op,
include_benchmark=True,
)
return slf.geomean() / bm.geomean() | [
"def",
"up_capture",
"(",
"self",
",",
"benchmark",
",",
"threshold",
"=",
"0.0",
",",
"compare_op",
"=",
"\"ge\"",
")",
":",
"slf",
",",
"bm",
"=",
"self",
".",
"upmarket_filter",
"(",
"benchmark",
"=",
"benchmark",
",",
"threshold",
"=",
"threshold",
"... | Upside capture ratio.
Measures the performance of `self` relative to benchmark
conditioned on periods where `benchmark` is gt or ge to
`threshold`.
Upside capture ratios are calculated by taking the fund's
monthly return during the periods of positive benchmark
performance and dividing it by the benchmark return.
[Source: CFA Institute]
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
threshold : float, default 0.
The threshold at which the comparison should be done.
`self` and `benchmark` are "filtered" to periods where
`benchmark` is gt/ge `threshold`.
compare_op : {'ge', 'gt'}
Comparison operator used to compare to `threshold`.
'gt' is greater-than; 'ge' is greater-than-or-equal.
Returns
-------
float
Note
----
This metric uses geometric, not arithmetic, mean return. | [
"Upside",
"capture",
"ratio",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L1065-L1104 | train | 204,295 |
bsolomon1124/pyfinance | pyfinance/returns.py | TSeries.upmarket_filter | def upmarket_filter(
self,
benchmark,
threshold=0.0,
compare_op="ge",
include_benchmark=False,
):
"""Drop elementwise samples where `benchmark` < `threshold`.
Filters `self` (and optionally, `benchmark`) to periods
where `benchmark` > `threshold`. (Or >= `threshold`.)
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
threshold : float, default 0.0
The threshold at which the comparison should be done.
`self` and `benchmark` are "filtered" to periods where
`benchmark` is gt/ge `threshold`.
compare_op : {'ge', 'gt'}
Comparison operator used to compare to `threshold`.
'gt' is greater-than; 'ge' is greater-than-or-equal.
include_benchmark : bool, default False
If True, return tuple of (`self`, `benchmark`) both
filtered. If False, return only `self` filtered.
Returns
-------
TSeries or tuple of TSeries
TSeries if `include_benchmark=False`, otherwise, tuple.
"""
return self._mkt_filter(
benchmark=benchmark,
threshold=threshold,
compare_op=compare_op,
include_benchmark=include_benchmark,
) | python | def upmarket_filter(
self,
benchmark,
threshold=0.0,
compare_op="ge",
include_benchmark=False,
):
"""Drop elementwise samples where `benchmark` < `threshold`.
Filters `self` (and optionally, `benchmark`) to periods
where `benchmark` > `threshold`. (Or >= `threshold`.)
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
threshold : float, default 0.0
The threshold at which the comparison should be done.
`self` and `benchmark` are "filtered" to periods where
`benchmark` is gt/ge `threshold`.
compare_op : {'ge', 'gt'}
Comparison operator used to compare to `threshold`.
'gt' is greater-than; 'ge' is greater-than-or-equal.
include_benchmark : bool, default False
If True, return tuple of (`self`, `benchmark`) both
filtered. If False, return only `self` filtered.
Returns
-------
TSeries or tuple of TSeries
TSeries if `include_benchmark=False`, otherwise, tuple.
"""
return self._mkt_filter(
benchmark=benchmark,
threshold=threshold,
compare_op=compare_op,
include_benchmark=include_benchmark,
) | [
"def",
"upmarket_filter",
"(",
"self",
",",
"benchmark",
",",
"threshold",
"=",
"0.0",
",",
"compare_op",
"=",
"\"ge\"",
",",
"include_benchmark",
"=",
"False",
",",
")",
":",
"return",
"self",
".",
"_mkt_filter",
"(",
"benchmark",
"=",
"benchmark",
",",
"... | Drop elementwise samples where `benchmark` < `threshold`.
Filters `self` (and optionally, `benchmark`) to periods
where `benchmark` > `threshold`. (Or >= `threshold`.)
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
threshold : float, default 0.0
The threshold at which the comparison should be done.
`self` and `benchmark` are "filtered" to periods where
`benchmark` is gt/ge `threshold`.
compare_op : {'ge', 'gt'}
Comparison operator used to compare to `threshold`.
'gt' is greater-than; 'ge' is greater-than-or-equal.
include_benchmark : bool, default False
If True, return tuple of (`self`, `benchmark`) both
filtered. If False, return only `self` filtered.
Returns
-------
TSeries or tuple of TSeries
TSeries if `include_benchmark=False`, otherwise, tuple. | [
"Drop",
"elementwise",
"samples",
"where",
"benchmark",
"<",
"threshold",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L1106-L1144 | train | 204,296 |
bsolomon1124/pyfinance | pyfinance/returns.py | TSeries.CAPM | def CAPM(self, benchmark, has_const=False, use_const=True):
"""Interface to OLS regression against `benchmark`.
`self.alpha()`, `self.beta()` and several other methods
stem from here. For the full method set, see
`pyfinance.ols.OLS`.
Parameters
----------
benchmark : {pd.Series, TSeries, pd.DataFrame, np.ndarray}
The benchmark securitie(s) to which `self` is compared.
has_const : bool, default False
Specifies whether `benchmark` includes a user-supplied constant
(a column vector). If False, it is added at instantiation.
use_const : bool, default True
Whether to include an intercept term in the model output. Note the
difference between `has_const` and `use_const`: the former
specifies whether a column vector of 1s is included in the
input; the latter specifies whether the model itself
should include a constant (intercept) term. Exogenous
data that is ~N(0,1) would have a constant equal to zero;
specify use_const=False in this situation.
Returns
-------
pyfinance.ols.OLS
"""
return ols.OLS(
y=self, x=benchmark, has_const=has_const, use_const=use_const
) | python | def CAPM(self, benchmark, has_const=False, use_const=True):
"""Interface to OLS regression against `benchmark`.
`self.alpha()`, `self.beta()` and several other methods
stem from here. For the full method set, see
`pyfinance.ols.OLS`.
Parameters
----------
benchmark : {pd.Series, TSeries, pd.DataFrame, np.ndarray}
The benchmark securitie(s) to which `self` is compared.
has_const : bool, default False
Specifies whether `benchmark` includes a user-supplied constant
(a column vector). If False, it is added at instantiation.
use_const : bool, default True
Whether to include an intercept term in the model output. Note the
difference between `has_const` and `use_const`: the former
specifies whether a column vector of 1s is included in the
input; the latter specifies whether the model itself
should include a constant (intercept) term. Exogenous
data that is ~N(0,1) would have a constant equal to zero;
specify use_const=False in this situation.
Returns
-------
pyfinance.ols.OLS
"""
return ols.OLS(
y=self, x=benchmark, has_const=has_const, use_const=use_const
) | [
"def",
"CAPM",
"(",
"self",
",",
"benchmark",
",",
"has_const",
"=",
"False",
",",
"use_const",
"=",
"True",
")",
":",
"return",
"ols",
".",
"OLS",
"(",
"y",
"=",
"self",
",",
"x",
"=",
"benchmark",
",",
"has_const",
"=",
"has_const",
",",
"use_const... | Interface to OLS regression against `benchmark`.
`self.alpha()`, `self.beta()` and several other methods
stem from here. For the full method set, see
`pyfinance.ols.OLS`.
Parameters
----------
benchmark : {pd.Series, TSeries, pd.DataFrame, np.ndarray}
The benchmark securitie(s) to which `self` is compared.
has_const : bool, default False
Specifies whether `benchmark` includes a user-supplied constant
(a column vector). If False, it is added at instantiation.
use_const : bool, default True
Whether to include an intercept term in the model output. Note the
difference between `has_const` and `use_const`: the former
specifies whether a column vector of 1s is included in the
input; the latter specifies whether the model itself
should include a constant (intercept) term. Exogenous
data that is ~N(0,1) would have a constant equal to zero;
specify use_const=False in this situation.
Returns
-------
pyfinance.ols.OLS | [
"Interface",
"to",
"OLS",
"regression",
"against",
"benchmark",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L1155-L1185 | train | 204,297 |
bsolomon1124/pyfinance | pyfinance/datasets.py | load_retaildata | def load_retaildata():
"""Monthly retail trade data from census.gov."""
# full = 'https://www.census.gov/retail/mrts/www/mrtssales92-present.xls'
# indiv = 'https://www.census.gov/retail/marts/www/timeseries.html'
db = {
"Auto, other Motor Vehicle": "https://www.census.gov/retail/marts/www/adv441x0.txt",
"Building Material and Garden Equipment and Supplies Dealers": "https://www.census.gov/retail/marts/www/adv44400.txt",
"Clothing and Clothing Accessories Stores": "https://www.census.gov/retail/marts/www/adv44800.txt",
"Dept. Stores (ex. leased depts)": "https://www.census.gov/retail/marts/www/adv45210.txt",
"Electronics and Appliance Stores": "https://www.census.gov/retail/marts/www/adv44300.txt",
"Food Services and Drinking Places": "https://www.census.gov/retail/marts/www/adv72200.txt",
"Food and Beverage Stores": "https://www.census.gov/retail/marts/www/adv44500.txt",
"Furniture and Home Furnishings Stores": "https://www.census.gov/retail/marts/www/adv44200.txt",
"Gasoline Stations": "https://www.census.gov/retail/marts/www/adv44700.txt",
"General Merchandise Stores": "https://www.census.gov/retail/marts/www/adv45200.txt",
"Grocery Stores": "https://www.census.gov/retail/marts/www/adv44510.txt",
"Health and Personal Care Stores": "https://www.census.gov/retail/marts/www/adv44600.txt",
"Miscellaneous Store Retailers": "https://www.census.gov/retail/marts/www/adv45300.txt",
"Motor Vehicle and Parts Dealers": "https://www.census.gov/retail/marts/www/adv44100.txt",
"Nonstore Retailers": "https://www.census.gov/retail/marts/www/adv45400.txt",
"Retail and Food Services, total": "https://www.census.gov/retail/marts/www/adv44x72.txt",
"Retail, total": "https://www.census.gov/retail/marts/www/adv44000.txt",
"Sporting Goods, Hobby, Book, and Music Stores": "https://www.census.gov/retail/marts/www/adv45100.txt",
"Total (excl. Motor Vehicle)": "https://www.census.gov/retail/marts/www/adv44y72.txt",
"Retail (excl. Motor Vehicle and Parts Dealers)": "https://www.census.gov/retail/marts/www/adv4400a.txt",
}
dct = {}
for key, value in db.items():
data = pd.read_csv(
value,
skiprows=5,
skip_blank_lines=True,
header=None,
sep="\s+",
index_col=0,
)
try:
cut = data.index.get_loc("SEASONAL")
except KeyError:
cut = data.index.get_loc("NO")
data = data.iloc[:cut]
data = data.apply(lambda col: pd.to_numeric(col, downcast="float"))
data = data.stack()
year = data.index.get_level_values(0)
month = data.index.get_level_values(1)
idx = pd.to_datetime(
{"year": year, "month": month, "day": 1}
) + offsets.MonthEnd(1)
data.index = idx
data.name = key
dct[key] = data
sales = pd.DataFrame(dct)
sales = sales.reindex(
pd.date_range(sales.index[0], sales.index[-1], freq="M")
)
# TODO: account for any skipped months; could specify a DateOffset to
# `freq` param of `pandas.DataFrame.shift`
yoy = sales.pct_change(periods=12)
return sales, yoy | python | def load_retaildata():
"""Monthly retail trade data from census.gov."""
# full = 'https://www.census.gov/retail/mrts/www/mrtssales92-present.xls'
# indiv = 'https://www.census.gov/retail/marts/www/timeseries.html'
db = {
"Auto, other Motor Vehicle": "https://www.census.gov/retail/marts/www/adv441x0.txt",
"Building Material and Garden Equipment and Supplies Dealers": "https://www.census.gov/retail/marts/www/adv44400.txt",
"Clothing and Clothing Accessories Stores": "https://www.census.gov/retail/marts/www/adv44800.txt",
"Dept. Stores (ex. leased depts)": "https://www.census.gov/retail/marts/www/adv45210.txt",
"Electronics and Appliance Stores": "https://www.census.gov/retail/marts/www/adv44300.txt",
"Food Services and Drinking Places": "https://www.census.gov/retail/marts/www/adv72200.txt",
"Food and Beverage Stores": "https://www.census.gov/retail/marts/www/adv44500.txt",
"Furniture and Home Furnishings Stores": "https://www.census.gov/retail/marts/www/adv44200.txt",
"Gasoline Stations": "https://www.census.gov/retail/marts/www/adv44700.txt",
"General Merchandise Stores": "https://www.census.gov/retail/marts/www/adv45200.txt",
"Grocery Stores": "https://www.census.gov/retail/marts/www/adv44510.txt",
"Health and Personal Care Stores": "https://www.census.gov/retail/marts/www/adv44600.txt",
"Miscellaneous Store Retailers": "https://www.census.gov/retail/marts/www/adv45300.txt",
"Motor Vehicle and Parts Dealers": "https://www.census.gov/retail/marts/www/adv44100.txt",
"Nonstore Retailers": "https://www.census.gov/retail/marts/www/adv45400.txt",
"Retail and Food Services, total": "https://www.census.gov/retail/marts/www/adv44x72.txt",
"Retail, total": "https://www.census.gov/retail/marts/www/adv44000.txt",
"Sporting Goods, Hobby, Book, and Music Stores": "https://www.census.gov/retail/marts/www/adv45100.txt",
"Total (excl. Motor Vehicle)": "https://www.census.gov/retail/marts/www/adv44y72.txt",
"Retail (excl. Motor Vehicle and Parts Dealers)": "https://www.census.gov/retail/marts/www/adv4400a.txt",
}
dct = {}
for key, value in db.items():
data = pd.read_csv(
value,
skiprows=5,
skip_blank_lines=True,
header=None,
sep="\s+",
index_col=0,
)
try:
cut = data.index.get_loc("SEASONAL")
except KeyError:
cut = data.index.get_loc("NO")
data = data.iloc[:cut]
data = data.apply(lambda col: pd.to_numeric(col, downcast="float"))
data = data.stack()
year = data.index.get_level_values(0)
month = data.index.get_level_values(1)
idx = pd.to_datetime(
{"year": year, "month": month, "day": 1}
) + offsets.MonthEnd(1)
data.index = idx
data.name = key
dct[key] = data
sales = pd.DataFrame(dct)
sales = sales.reindex(
pd.date_range(sales.index[0], sales.index[-1], freq="M")
)
# TODO: account for any skipped months; could specify a DateOffset to
# `freq` param of `pandas.DataFrame.shift`
yoy = sales.pct_change(periods=12)
return sales, yoy | [
"def",
"load_retaildata",
"(",
")",
":",
"# full = 'https://www.census.gov/retail/mrts/www/mrtssales92-present.xls'\r",
"# indiv = 'https://www.census.gov/retail/marts/www/timeseries.html'\r",
"db",
"=",
"{",
"\"Auto, other Motor Vehicle\"",
":",
"\"https://www.census.gov/retail/marts/www/ad... | Monthly retail trade data from census.gov. | [
"Monthly",
"retail",
"trade",
"data",
"from",
"census",
".",
"gov",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/datasets.py#L597-L659 | train | 204,298 |
bsolomon1124/pyfinance | pyfinance/utils.py | avail | def avail(df):
"""Return start & end availability for each column in a DataFrame."""
avail = DataFrame(
{
"start": df.apply(lambda col: col.first_valid_index()),
"end": df.apply(lambda col: col.last_valid_index()),
}
)
return avail[["start", "end"]] | python | def avail(df):
"""Return start & end availability for each column in a DataFrame."""
avail = DataFrame(
{
"start": df.apply(lambda col: col.first_valid_index()),
"end": df.apply(lambda col: col.last_valid_index()),
}
)
return avail[["start", "end"]] | [
"def",
"avail",
"(",
"df",
")",
":",
"avail",
"=",
"DataFrame",
"(",
"{",
"\"start\"",
":",
"df",
".",
"apply",
"(",
"lambda",
"col",
":",
"col",
".",
"first_valid_index",
"(",
")",
")",
",",
"\"end\"",
":",
"df",
".",
"apply",
"(",
"lambda",
"col"... | Return start & end availability for each column in a DataFrame. | [
"Return",
"start",
"&",
"end",
"availability",
"for",
"each",
"column",
"in",
"a",
"DataFrame",
"."
] | c95925209a809b4e648e79cbeaf7711d8e5ff1a6 | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/utils.py#L135-L143 | train | 204,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.