text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list_models(filename): """ Lists all models in given filename. Parameters filename: str path to filename, where the model has been stored. Returns ------- obj: dict A mapping by name and a comprehensive description like this: """
from .h5file import H5File with H5File(filename, mode='r') as f: return f.models_descriptive
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_iterable_of_int(l): r""" Checks if l is iterable and contains only integral types """
if not is_iterable(l): return False return all(is_int(value) for value in l)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_iterable_of_float(l): r""" Checks if l is iterable and contains only floating point types """
if not is_iterable(l): return False return all(is_float(value) for value in l)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_int_vector(l): r"""Checks if l is a numpy array of integers """
if isinstance(l, np.ndarray): if l.ndim == 1 and (l.dtype.kind == 'i' or l.dtype.kind == 'u'): return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_bool_matrix(l): r"""Checks if l is a 2D numpy array of bools """
if isinstance(l, np.ndarray): if l.ndim == 2 and (l.dtype == bool): return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_float_array(l): r"""Checks if l is a numpy array of floats (any dimension """
if isinstance(l, np.ndarray): if l.dtype.kind == 'f': return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ensure_int_vector(I, require_order = False): """Checks if the argument can be converted to an array of ints and does that. Parameters I: int or iterable of int require_order : bool If False (default), an unordered set is accepted. If True, a set is not accepted. Returns ------- arr : ndarray(n) numpy array with the integers contained in the argument """
if is_int_vector(I): return I elif is_int(I): return np.array([I]) elif is_list_of_int(I): return np.array(I) elif is_tuple_of_int(I): return np.array(I) elif isinstance(I, set): if require_order: raise TypeError('Argument is an unordered set, but I require an ordered array of integers') else: lI = list(I) if is_list_of_int(lI): return np.array(lI) else: raise TypeError('Argument is not of a type that is convertible to an array of integers.')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ensure_float_vector(F, require_order = False): """Ensures that F is a numpy array of floats If F is already a numpy array of floats, F is returned (no copied!) Otherwise, checks if the argument can be converted to an array of floats and does that. Parameters F: float, or iterable of float require_order : bool If False (default), an unordered set is accepted. If True, a set is not accepted. Returns ------- arr : ndarray(n) numpy array with the floats contained in the argument """
if is_float_vector(F): return F elif is_float(F): return np.array([F]) elif is_iterable_of_float(F): return np.array(F) elif isinstance(F, set): if require_order: raise TypeError('Argument is an unordered set, but I require an ordered array of floats') else: lF = list(F) if is_list_of_float(lF): return np.array(lF) else: raise TypeError('Argument is not of a type that is convertible to an array of floats.')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ensure_dtype_float(x, default=np.float64): r"""Makes sure that x is type of float """
if isinstance(x, np.ndarray): if x.dtype.kind == 'f': return x elif x.dtype.kind == 'i': return x.astype(default) else: raise TypeError('x is of type '+str(x.dtype)+' that cannot be converted to float') else: raise TypeError('x is not an array')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ensure_ndarray(A, shape=None, uniform=None, ndim=None, size=None, dtype=None, kind=None): r""" Ensures A is an ndarray and does an assert_array with the given parameters Returns ------- A : ndarray If A is already an ndarray, it is just returned. Otherwise this is an independent copy as an ndarray """
if not isinstance(A, np.ndarray): try: A = np.array(A) except: raise AssertionError('Given argument cannot be converted to an ndarray:\n'+str(A)) assert_array(A, shape=shape, uniform=uniform, ndim=ndim, size=size, dtype=dtype, kind=kind) return A
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ensure_ndarray_or_sparse(A, shape=None, uniform=None, ndim=None, size=None, dtype=None, kind=None): r""" Ensures A is an ndarray or a scipy sparse matrix and does an assert_array with the given parameters Returns ------- A : ndarray If A is already an ndarray, it is just returned. Otherwise this is an independent copy as an ndarray """
if not isinstance(A, np.ndarray) and not scisp.issparse(A): try: A = np.array(A) except: raise AssertionError('Given argument cannot be converted to an ndarray:\n'+str(A)) assert_array(A, shape=shape, uniform=uniform, ndim=ndim, size=size, dtype=dtype, kind=kind) return A
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ensure_ndarray_or_None(A, shape=None, uniform=None, ndim=None, size=None, dtype=None, kind=None): r""" Ensures A is None or an ndarray and does an assert_array with the given parameters """
if A is not None: return ensure_ndarray(A, shape=shape, uniform=uniform, ndim=ndim, size=size, dtype=dtype, kind=kind) else: return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _describe_atom(topology, index): """ Returns a string describing the given atom :param topology: :param index: :return: """
at = topology.atom(index) if topology.n_chains > 1: return "%s %i %s %i %i" % (at.residue.name, at.residue.resSeq, at.name, at.index, at.residue.chain.index ) else: return "%s %i %s %i" % (at.residue.name, at.residue.resSeq, at.name, at.index)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _first_and_last_element(arr): """Returns first and last element of numpy array or sparse matrix."""
if isinstance(arr, np.ndarray) or hasattr(arr, 'data'): # numpy array or sparse matrix with .data attribute data = arr.data if sparse.issparse(arr) else arr return data.flat[0], data.flat[-1] else: # Sparse matrices without .data attribute. Only dok_matrix at # the time of writing, in this case indexing is fast return arr[0, 0], arr[-1, -1]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def running_covar(xx=True, xy=False, yy=False, remove_mean=False, symmetrize=False, sparse_mode='auto', modify_data=False, column_selection=None, diag_only=False, nsave=5): """ Returns a running covariance estimator Returns an estimator object that can be fed chunks of X and Y data, and that can generate on-the-fly estimates of mean, covariance, running sum and second moment matrix. Parameters xx : bool Estimate the covariance of X xy : bool Estimate the cross-covariance of X and Y yy : bool Estimate the covariance of Y remove_mean : bool Remove the data mean in the covariance estimation symmetrize : bool Use symmetric estimates with sum defined by sum_t x_t + y_t and second moment matrices defined by X'X + Y'Y and Y'X + X'Y. modify_data : bool If remove_mean=True, the mean will be removed in the input data, without creating an independent copy. This option is faster but should only be selected if the input data is not used elsewhere. sparse_mode : str one of: * 'dense' : always use dense mode * 'sparse' : always use sparse mode if possible * 'auto' : automatic column_selection: ndarray(k, dtype=int) or None Indices of those columns that are to be computed. If None, all columns are computed. diag_only: bool If True, the computation is restricted to the diagonal entries (autocorrelations) only. nsave : int Depth of Moment storage. Moments computed from each chunk will be combined with Moments of similar statistical weight using the pairwise combination algorithm described in [1]_. References .. [1] http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf """
return RunningCovar(compute_XX=xx, compute_XY=xy, compute_YY=yy, sparse_mode=sparse_mode, modify_data=modify_data, remove_mean=remove_mean, symmetrize=symmetrize, column_selection=column_selection, diag_only=diag_only, nsave=nsave)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _can_merge_tail(self): """ Checks if the two last list elements can be merged """
if len(self.storage) < 2: return False return self.storage[-2].w <= self.storage[-1].w * self.rtol
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def store(self, moments): """ Store object X with weight w """
if len(self.storage) == self.nsave: # merge if we must # print 'must merge' self.storage[-1].combine(moments, mean_free=self.remove_mean) else: # append otherwise # print 'append' self.storage.append(moments) # merge if possible while self._can_merge_tail(): # print 'merge: ',self.storage M = self.storage.pop() # print 'pop last: ',self.storage self.storage[-1].combine(M, mean_free=self.remove_mean)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def submodel(self, states=None, obs=None): """Returns a HMM with restricted state space Parameters states : None or int-array Hidden states to restrict the model to (if not None). obs : None, str or int-array Observed states to restrict the model to (if not None). Returns ------- hmm : HMM The restricted HMM. """
# get the reference HMM submodel ref = super(SampledHMSM, self).submodel(states=states, obs=obs) # get the sample submodels samples_sub = [sample.submodel(states=states, obs=obs) for sample in self.samples] # new model return SampledHMSM(samples_sub, ref=ref, conf=self.conf)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _generate_lags(maxlag, multiplier): r"""Generate a set of lag times starting from 1 to maxlag, using the given multiplier between successive lags """
# determine lag times lags = [1] # build default lag list lag = 1.0 import decimal while lag <= maxlag: lag = lag*multiplier # round up, like python 2 lag = int(decimal.Decimal(lag).quantize(decimal.Decimal('1'), rounding=decimal.ROUND_HALF_UP)) if lag <= maxlag: ilag = int(lag) lags.append(ilag) # always include the maximal requested lag time. if maxlag not in lags: lags.append(maxlag) return np.array(lags)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def combinations(seq, k): """ Return j length subsequences of elements from the input iterable. This version uses Numpy/Scipy and should be preferred over itertools. It avoids the creation of all intermediate Python objects. Examples -------- [[0 1] [0 2] [1 2]] [[0 1] [0 2] [1 2]] """
from itertools import combinations as _combinations, chain from scipy.special import comb count = comb(len(seq), k, exact=True) res = np.fromiter(chain.from_iterable(_combinations(seq, k)), int, count=count*k) return res.reshape(-1, k)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def folding_model_energy(rvec, rcut): r"""computes the potential energy at point rvec"""
r = np.linalg.norm(rvec) - rcut rr = r ** 2 if r < 0.0: return -2.5 * rr return 0.5 * (r - 2.0) * rr
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def folding_model_gradient(rvec, rcut): r"""computes the potential's gradient at point rvec"""
rnorm = np.linalg.norm(rvec) if rnorm == 0.0: return np.zeros(rvec.shape) r = rnorm - rcut if r < 0.0: return -5.0 * r * rvec / rnorm return (1.5 * r - 2.0) * rvec / rnorm
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_asymmetric_double_well_data(nstep, x0=0., nskip=1, dt=0.01, kT=10.0, mass=1.0, damping=1.0): r"""wrapper for the asymmetric double well generator"""
adw = AsymmetricDoubleWell(dt, kT, mass=mass, damping=damping) return adw.sample(x0, nstep, nskip=nskip)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_folding_model_data( nstep, rvec0=np.zeros((5)), nskip=1, dt=0.01, kT=10.0, mass=1.0, damping=1.0, rcut=3.0): r"""wrapper for the folding model generator"""
fm = FoldingModel(dt, kT, mass=mass, damping=damping, rcut=rcut) return fm.sample(rvec0, nstep, nskip=nskip)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_prinz_pot(nstep, x0=0., nskip=1, dt=0.01, kT=10.0, mass=1.0, damping=1.0): r"""wrapper for the Prinz model generator"""
pw = PrinzModel(dt, kT, mass=mass, damping=damping) return pw.sample(x0, nstep, nskip=nskip)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def step(self, x): r"""perform a single Brownian dynamics step"""
return x - self.coeff_A * self.gradient(x) \ + self.coeff_B * np.random.normal(size=self.dim)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plot_markov_model( P, pos=None, state_sizes=None, state_scale=1.0, state_colors='#ff5500', state_labels='auto', minflux=1e-6, arrow_scale=1.0, arrow_curvature=1.0, arrow_labels='weights', arrow_label_format='%2.e', max_width=12, max_height=12, figpadding=0.2, show_frame=False, ax=None, **textkwargs): r"""Network representation of MSM transition matrix This visualization is not optimized for large matrices. It is meant to be used for the visualization of small models with up to 10-20 states, e.g. obtained by a HMM coarse-graining. If used with large network, the automatic node positioning will be very slow and may still look ugly. Parameters P : ndarray(n,n) or MSM object with attribute 'transition matrix' Transition matrix or MSM object pos : ndarray(n,2), optional, default=None User-defined positions to draw the states on. If not given, will try to place them automatically. state_sizes : ndarray(n), optional, default=None User-defined areas of the discs drawn for each state. If not given, the stationary probability of P will be used. state_colors : string, ndarray(n), or list, optional, default='#ff5500' (orange) string : a Hex code for a single color used for all states array : n values in [0,1] which will result in a grayscale plot list : of len = nstates, with a color for each state. The list can mix strings, RGB values and hex codes, e.g. :py:obj:`state_colors` = ['g', 'red', [.23, .34, .35], '#ff5500'] is possible. state_labels : list of strings, optional, default is 'auto' A list with a label for each state, to be displayed at the center of each node/state. If left to 'auto', the labels are automatically set to the state indices. minflux : float, optional, default=1e-6 The minimal flux (p_i * p_ij) for a transition to be drawn arrow_scale : float, optional, default=1.0 Relative arrow scale. Set to a value different from 1 to increase or decrease the arrow width. arrow_curvature : float, optional, default=1.0 Relative arrow curvature. Set to a value different from 1 to make arrows more or less curved. arrow_labels : 'weights', None or a ndarray(n,n) with label strings. Optional, default='weights' Strings to be placed upon arrows. If None, no labels will be used. If 'weights', the elements of P will be used. If a matrix of strings is given by the user these will be used. arrow_label_format : str, optional, default='%10.2f' The numeric format to print the arrow labels max_width = 12 The maximum figure width max_height = 12 The maximum figure height figpadding = 0.2 The relative figure size used for the padding show_frame: boolean (default=False) Draw a frame around the network. ax : matplotlib Axes object, optional, default=None The axes to plot to. When set to None a new Axes (and Figure) object will be used. textkwargs : optional argument for the text of the state and arrow labels. See http://matplotlib.org/api/text_api.html#matplotlib.text.Text for more info. The parameter 'size' refers to the size of the state and arrow labels and overwrites the matplotlib default. The parameter 'arrow_label_size' is only used for the arrow labels; please note that 'arrow_label_size' is not part of matplotlib.text.Text's set of parameters and will raise an exception when passed to matplotlib.text.Text directly. Returns ------- fig, pos : matplotlib.Figure, ndarray(n,2) a Figure object containing the plot and the positions of states. Can be used later to plot a different network representation (e.g. the flux) Examples -------- """
from msmtools import analysis as msmana if isinstance(P, _np.ndarray): P = P.copy() else: # MSM object? then get transition matrix first P = P.transition_matrix.copy() if state_sizes is None: state_sizes = msmana.stationary_distribution(P) if minflux > 0: F = _np.dot(_np.diag(msmana.stationary_distribution(P)), P) I, J = _np.where(F < minflux) P[I, J] = 0.0 plot = NetworkPlot(P, pos=pos, ax=ax) fig = plot.plot_network( state_sizes=state_sizes, state_scale=state_scale, state_colors=state_colors, state_labels=state_labels, arrow_scale=arrow_scale, arrow_curvature=arrow_curvature, arrow_labels=arrow_labels, arrow_label_format=arrow_label_format, max_width=max_width, max_height=max_height, figpadding=figpadding, xticks=False, yticks=False, show_frame=show_frame, **textkwargs) return fig, plot.pos
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plot_flux( flux, pos=None, state_sizes=None, flux_scale=1.0, state_scale=1.0, state_colors='#ff5500', state_labels='auto', minflux=1e-9, arrow_scale=1.0, arrow_curvature=1.0, arrow_labels='weights', arrow_label_format='%2.e', max_width=12, max_height=12, figpadding=0.2, attribute_to_plot='net_flux', show_frame=False, show_committor=True, ax=None, **textkwargs): r"""Network representation of reactive flux This visualization is not optimized for large fluxes. It is meant to be used for the visualization of small models with up to 10-20 states, e.g. obtained by a PCCA-based coarse-graining of the full flux. If used with large network, the automatic node positioning will be very slow and may still look ugly. Parameters flux : :class:`ReactiveFlux <pyemma.msm.flux.ReactiveFlux>` reactive flux object pos : ndarray(n,2), optional, default=None User-defined positions to draw the states on. If not given, will set the x coordinates equal to the committor probability and try to place the y coordinates automatically state_sizes : ndarray(n), optional, default=None User-defined areas of the discs drawn for each state. If not given, the stationary probability of P will be used flux_scale : float, optional, default=1.0 scaling of the flux values state_scale : float, optional, default=1.0 scaling of the state circles state_colors : string, ndarray(n), or list, optional, default='#ff5500' (orange) string : a Hex code for a single color used for all states array : n values in [0,1] which will result in a grayscale plot list : of len = nstates, with a color for each state. The list can mix strings, RGB values and hex codes, e.g. :py:obj:`state_colors` = ['g', 'red', [.23, .34, .35], '#ff5500'] is possible. state_labels : list of strings, optional, default is 'auto' A list with a label for each state, to be displayed at the center of each node/state. If left to 'auto', the labels are automatically set to the state indices. minflux : float, optional, default=1e-9 The minimal flux for a transition to be drawn arrow_scale : float, optional, default=1.0 Relative arrow scale. Set to a value different from 1 to increase or decrease the arrow width. arrow_curvature : float, optional, default=1.0 Relative arrow curvature. Set to a value different from 1 to make arrows more or less curved. arrow_labels : 'weights', None or a ndarray(n,n) with label strings. Optional, default='weights' Strings to be placed upon arrows. If None, no labels will be used. If 'weights', the elements of P will be used. If a matrix of strings is given by the user these will be used. arrow_label_format : str, optional, default='%10.2f' The numeric format to print the arrow labels max_width : int (default = 12) The maximum figure width max_height: int (default = 12) The maximum figure height figpadding: float (default = 0.2) The relative figure size used for the padding attribute_to_plot : str, optional, default='net_flux' specify the attribute of the flux object to plot. show_frame: boolean (default=False) Draw a frame around the network. show_committor: boolean (default=False) Print the committor value on the x-axis. ax : matplotlib Axes object, optional, default=None The axes to plot to. When set to None a new Axes (and Figure) object will be used. textkwargs : optional argument for the text of the state and arrow labels. See http://matplotlib.org/api/text_api.html#matplotlib.text.Text for more info. The parameter 'size' refers to the size of the state and arrow labels and overwrites the matplotlib default. The parameter 'arrow_label_size' is only used for the arrow labels; please note that 'arrow_label_size' is not part of matplotlib.text.Text's set of parameters and will raise an exception when passed to matplotlib.text.Text directly. Returns ------- (fig, pos) : matpotlib.Figure instance, ndarray Axes instances containing the plot. Use pyplot.show() to display it. The positions of states. Can be used later to plot a different network representation (e.g. the flux). Examples -------- We define first define a reactive flux by taking the following transition matrix and computing TPT from state 2 to 3 Scale the flux by 100 is basically a change of units to get numbers close to 1 (avoid printing many zeros). Now we visualize the flux: """
from matplotlib import pylab as plt F = flux_scale * getattr(flux, attribute_to_plot) c = flux.committor if state_sizes is None: state_sizes = flux.stationary_distribution plot = NetworkPlot(F, pos=pos, xpos=c, ax=ax) if minflux > 0: I, J = _np.where(F < minflux) F[I, J] = 0.0 if isinstance(state_labels, str) and state_labels == 'auto': # the first and last element correspond to A and B in ReactiveFlux state_labels = _np.array([str(i) for i in range(flux.nstates)]) state_labels[_np.array(flux.A)] = "A" state_labels[_np.array(flux.B)] = "B" elif isinstance(state_labels, (_np.ndarray, list, tuple)): if len(state_labels) != flux.nstates: raise ValueError("length of state_labels({}) has to match length of states({})." .format(len(state_labels), flux.nstates)) fig = plot.plot_network( state_sizes=state_sizes, state_scale=state_scale, state_colors=state_colors, state_labels=state_labels, arrow_scale=arrow_scale, arrow_curvature=arrow_curvature, arrow_labels=arrow_labels, arrow_label_format=arrow_label_format, max_width=max_width, max_height=max_height, figpadding=figpadding, xticks=show_committor, yticks=False, show_frame=show_frame, **textkwargs) if show_committor: plt.xlabel('Committor probability') return fig, plot.pos
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plot_network( weights, pos=None, xpos=None, ypos=None, state_sizes=None, state_scale=1.0, state_colors='#ff5500', state_labels='auto', arrow_scale=1.0, arrow_curvature=1.0, arrow_labels='weights', arrow_label_format='%2.e', max_width=12, max_height=12, figpadding=0.2, attribute_to_plot='net_flux', show_frame=False, xticks=False, yticks=False, ax=None, **textkwargs): r"""Network representation of given matrix This visualization is not optimized for large networks. It is meant to be used for the visualization of small models with up to 10-20 states. If used with large network, the automatic node positioning will be very slow and may still look ugly. Parameters weights : ndarray(n, n) weight matrix pos : ndarray(n,2), optional, default=None User-defined positions to draw the states on. xpos : ndarray(n,), optional, default=None Fixes the x positions while the y positions are optimized ypos : ndarray(n,), optional, default=None Fixes the y positions while the x positions are optimized state_sizes : ndarray(n), optional, default=None User-defined areas of the discs drawn for each state. If not given, the stationary probability of P will be used state_colors : string, ndarray(n), or list, optional, default='#ff5500' (orange) string : a Hex code for a single color used for all states array : n values in [0,1] which will result in a grayscale plot list : of len = nstates, with a color for each state. The list can mix strings, RGB values and hex codes, e.g. :py:obj:`state_colors` = ['g', 'red', [.23, .34, .35], '#ff5500'] is possible. state_labels : list of strings, optional, default is 'auto' A list with a label for each state, to be displayed at the center of each node/state. If left to 'auto', the labels are automatically set to the state indices. arrow_scale : float, optional, default=1.0 Relative arrow scale. Set to a value different from 1 to increase or decrease the arrow width. arrow_curvature : float, optional, default=1.0 Relative arrow curvature. Set to a value different from 1 to make arrows more or less curved. arrow_labels : 'weights', None or a ndarray(n,n) with label strings. Optional, default='weights' Strings to be placed upon arrows. If None, no labels will be used. If 'weights', the elements of P will be used. If a matrix of strings is given by the user these will be used. arrow_label_format : str, optional, default='%10.2f' The numeric format to print the arrow labels max_width : int (default = 12) The maximum figure width max_height: int (default = 12) The maximum figure height figpadding: float (default = 0.2) The relative figure size used for the padding show_frame: boolean (default=False) Draw a frame around the network. xticks: boolean (default=False) Show x ticks yticks: boolean (default=False) Show y ticks ax : matplotlib Axes object, optional, default=None The axes to plot to. When set to None a new Axes (and Figure) object will be used. textkwargs : optional argument for the text of the state and arrow labels. See http://matplotlib.org/api/text_api.html#matplotlib.text.Text for more info. The parameter 'size' refers to the size of the state and arrow labels and overwrites the matplotlib default. The parameter 'arrow_label_size' is only used for the arrow labels; please note that 'arrow_label_size' is not part of matplotlib.text.Text's set of parameters and will raise an exception when passed to matplotlib.text.Text directly. Returns ------- (fig, pos) : matpotlib.Figure instance, ndarray Axes instances containing the plot. Use pyplot.show() to display it. The positions of states. Can be used later to plot a different network representation (e.g. the flux). Examples -------- We define first define a reactive flux by taking the following transition matrix and computing TPT from state 2 to 3 Scale the flux by 100 is basically a change of units to get numbers close to 1 (avoid printing many zeros). Now we visualize the flux: """
plot = NetworkPlot(weights, pos=pos, xpos=xpos, ypos=ypos, ax=ax) fig = plot.plot_network( state_sizes=state_sizes, state_scale=state_scale, state_colors=state_colors, state_labels=state_labels, arrow_scale=arrow_scale, arrow_curvature=arrow_curvature, arrow_labels=arrow_labels, arrow_label_format=arrow_label_format, max_width=max_width, max_height=max_height, figpadding=figpadding, xticks=xticks, yticks=yticks, show_frame=show_frame, **textkwargs) return fig, plot.pos
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compute_csets_TRAM( connectivity, state_counts, count_matrices, equilibrium_state_counts=None, ttrajs=None, dtrajs=None, bias_trajs=None, nn=None, factor=1.0, callback=None): r""" Computes the largest connected sets in the produce space of Markov state and thermodynamic states for TRAM data. Parameters connectivity : string one of None, 'reversible_pathways', 'post_hoc_RE' or 'BAR_variance', 'neighbors', 'summed_count_matrix' or None. Selects the algorithm for measuring overlap between thermodynamic and Markov states. * 'reversible_pathways' : requires that every state in the connected set can be reached by following a pathway of reversible transitions. A reversible transition between two Markov states (within the same thermodynamic state k) is a pair of Markov states that belong to the same strongly connected component of the count matrix (from thermodynamic state k). A pathway of reversible transitions is a list of (i_(N-1), i_N)]. The thermodynamic state where the reversible transitions happen, is ignored in constructing the reversible pathways. This is equivalent to assuming that two ensembles overlap at some Markov state whenever there exist frames from both ensembles in that Markov state. * 'largest' : alias for reversible_pathways * 'post_hoc_RE' : similar to 'reversible_pathways' but with a more strict requirement for the overlap between thermodynamic states. It is required that every state in the connected set can be reached by following a pathway of reversible transitions or jumping between overlapping thermodynamic states while staying in the same Markov state. A reversible transition between two Markov states (within the same thermodynamic state k) is a pair of Markov states that belong to the same strongly connected component of the count matrix (from thermodynamic state k). Two thermodynamic states k and l are defined to overlap at Markov state n if a replica exchange simulation [2]_ restricted to state n would show at least one transition from k to l or one transition from from l to k. The expected number of replica exchanges is estimated from the simulation data. The minimal number required of replica exchanges per Markov state can be increased by decreasing `connectivity_factor`. * 'BAR_variance' : like 'post_hoc_RE' but with a different condition to define the thermodynamic overlap based on the variance of the BAR estimator [3]_. Two thermodynamic states k and l are defined to overlap at Markov state n if the variance of the free energy difference Delta f_{kl} computed with BAR (and restricted to conformations form Markov state n) is less or equal than one. The minimally required variance can be controlled with `connectivity_factor`. * 'neighbors' : like 'post_hoc_RE' or 'BAR_variance' but assume a overlap between "neighboring" thermodynamic states. It is assumed that the data comes from an Umbrella sampling simulation and the number of the thermodynamic state matches the position of the Umbrella along the order parameter. The overlap of thermodynamic states k and l within Markov state n is set according to the value of nn; if there are samples in both product-space states (k,n) and (l,n) and |l-n|<=nn, the states are overlapping. * 'summed_count_matrix' : all thermodynamic states are assumed to overlap. The connected set is then computed by summing the count matrices over all thermodynamic states and taking it's largest strongly connected set. Not recommended! * None : assume that everything is connected. For debugging. state_counts : numpy.ndarray((T, M), dtype=numpy.intc) Number of visits to the combinations of thermodynamic state t and Markov state m count_matrices : numpy.ndarray((T, M, M), dtype=numpy.intc) Count matrices for all T thermodynamic states. equilibrium_state_counts : numpy.dnarray((T, M)), optional Number of visits to the combinations of thermodynamic state t and Markov state m in the equilibrium data (for use with TRAMMBAR). ttrajs : list of numpy.ndarray(X_i, dtype=numpy.intc), optional List of generating thermodynamic state trajectories. dtrajs : list of numpy.ndarray(X_i, dtype=numpy.intc), optional List of configurational state trajectories (disctrajs). bias_trajs : list of numpy.ndarray((X_i, T), dtype=numpy.float64), optional List of bias energy trajectories. The last three parameters are only required for connectivity = 'post_hoc_RE' or connectivity = 'BAR_variance'. nn : int, optional Number of neighbors that are assumed to overlap when connectivity='neighbors' factor : int, default=1.0 scaling factor used for connectivity = 'post_hoc_RE' or 'BAR_variance'. Values greater than 1.0 weaken the connectivity conditions. For 'post_hoc_RE' this multiplies the number of hypothetically observed transitions. For 'BAR_variance' this scales the threshold for the minimal allowed variance of free energy differences. Returns ------- csets, projected_cset csets : list of ndarrays((X_i,), dtype=int) List indexed by thermodynamic state. Every element csets[k] is the largest connected set at thermodynamic state k. projected_cset : ndarray(M, dtype=int) The overall connected set. This is the union of the individual connected sets of the thermodynamic states. References: [1]_ Hukushima et al, Exchange Monte Carlo method and application to spin glass simulations, J. Phys. Soc. Jan. 65, 1604 (1996) [2]_ Shirts and Chodera, Statistically optimal analysis of samples from multiple equilibrium states, J. Chem. Phys. 129, 124105 (2008) """
return _compute_csets( connectivity, state_counts, count_matrices, ttrajs, dtrajs, bias_trajs, nn=nn, equilibrium_state_counts=equilibrium_state_counts, factor=factor, callback=callback)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compute_csets_dTRAM(connectivity, count_matrices, nn=None, callback=None): r""" Computes the largest connected sets for dTRAM data. Parameters connectivity : string one 'reversible_pathways', 'neighbors', 'summed_count_matrix' or None. Selects the algorithm for measuring overlap between thermodynamic and Markov states. * 'reversible_pathways' : requires that every state in the connected set can be reached by following a pathway of reversible transitions. A reversible transition between two Markov states (within the same thermodynamic state k) is a pair of Markov states that belong to the same strongly connected component of the count matrix (from thermodynamic state k). A pathway of reversible transitions is a list of (i_(N-1), i_N)]. The thermodynamic state where the reversible transitions happen, is ignored in constructing the reversible pathways. This is equivalent to assuming that two ensembles overlap at some Markov state whenever there exist frames from both ensembles in that Markov state. * 'largest' : alias for reversible_pathways * 'neighbors' : similar to 'reversible_pathways' but with a more strict requirement for the overlap between thermodynamic states. It is required that every state in the connected set can be reached by following a pathway of reversible transitions or jumping between overlapping thermodynamic states while staying in the same Markov state. A reversible transition between two Markov states (within the same thermodynamic state k) is a pair of Markov states that belong to the same strongly connected component of the count matrix (from thermodynamic state k). It is assumed that the data comes from an Umbrella sampling simulation and the number of the thermodynamic state matches the position of the Umbrella along the order parameter. The overlap of thermodynamic states k and l within Markov state n is set according to the value of nn; if there are samples in both product-space states (k,n) and (l,n) and |l-n|<=nn, the states are overlapping. * 'summed_count_matrix' : all thermodynamic states are assumed to overlap. The connected set is then computed by summing the count matrices over all thermodynamic states and taking it's largest strongly connected set. Not recommended! * None : assume that everything is connected. For debugging. count_matrices : numpy.ndarray((T, M, M)) Count matrices for all T thermodynamic states. nn : int or None, optional Number of neighbors that are assumed to overlap when connectivity='neighbors' Returns ------- csets, projected_cset csets : list of numpy.ndarray((M_prime_k,), dtype=int) List indexed by thermodynamic state. Every element csets[k] is the largest connected set at thermodynamic state k. projected_cset : numpy.ndarray(M_prime, dtype=int) The overall connected set. This is the union of the individual connected sets of the thermodynamic states. """
if connectivity=='post_hoc_RE' or connectivity=='BAR_variance': raise Exception('Connectivity type %s not supported for dTRAM data.'%connectivity) state_counts = _np.maximum(count_matrices.sum(axis=1), count_matrices.sum(axis=2)) return _compute_csets( connectivity, state_counts, count_matrices, None, None, None, nn=nn, callback=callback)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _indexes(arr): """ Returns the list of all indexes of the given array. Currently works for one and two-dimensional arrays """
myarr = np.array(arr) if myarr.ndim == 1: return list(range(len(myarr))) elif myarr.ndim == 2: return tuple(itertools.product(list(range(arr.shape[0])), list(range(arr.shape[1])))) else: raise NotImplementedError('Only supporting arrays of dimension 1 and 2 as yet.')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _column(arr, indexes): """ Returns a column with given indexes from a deep array For example, if the array is a matrix and indexes is a single int, will return arr[:,indexes]. If the array is an order 3 tensor and indexes is a pair of ints, will return arr[:,indexes[0],indexes[1]], etc. """
if arr.ndim == 2 and types.is_int(indexes): return arr[:, indexes] elif arr.ndim == 3 and len(indexes) == 2: return arr[:, indexes[0], indexes[1]] else: raise NotImplementedError('Only supporting arrays of dimension 2 and 3 as yet.')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def statistical_inefficiency(X, truncate_acf=True): """ Estimates the statistical inefficiency from univariate time series X The statistical inefficiency [1]_ is a measure of the correlatedness of samples in a signal. Given a signal :math:`{x_t}` with :math:`N` samples and statistical inefficiency :math:`I \in (0,1]`, there are only :math:`I \cdot N` effective or uncorrelated samples in the signal. This means that :math:`I \cdot N` should be used in order to compute statistical uncertainties. See [2]_ for a review. The statistical inefficiency is computed as :math:`I = (2 \tau)^{-1}` using the damped autocorrelation time ..1: \tau = \frac{1}{2}+\sum_{K=1}^{N} A(k) \left(1-\frac{k}{N}\right) where ..1: A(k) = \frac{\langle x_t x_{t+k} \rangle_t - \langle x^2 \rangle_t}{\mathrm{var}(x)} is the autocorrelation function of the signal :math:`{x_t}`, which is computed either for a single or multiple trajectories. Parameters X : float array or list of float arrays Univariate time series (single or multiple trajectories) truncate_acf : bool, optional, default=True When the normalized autocorrelation function passes through 0, it is truncated in order to avoid integrating random noise References .. [1] Anderson, T. W.: The Statistical Analysis of Time Series (Wiley, New York, 1971) .. [2] Janke, W: Statistical Analysis of Simulations: Data Correlations and Error Estimation Quantum Simulations of Complex Many-Body Systems: From Theory to Algorithms, Lecture Notes, J. Grotendorst, D. Marx, A. Muramatsu (Eds.), John von Neumann Institute for Computing, Juelich NIC Series 10, pp. 423-445, 2002. """
# check input assert np.ndim(X[0]) == 1, 'Data must be 1-dimensional' N = _maxlength(X) # length # mean-free data xflat = np.concatenate(X) Xmean = np.mean(xflat) X0 = [x-Xmean for x in X] # moments x2m = np.mean(xflat ** 2) # integrate damped autocorrelation corrsum = 0.0 for lag in range(N): acf = 0.0 n = 0.0 for x in X0: Nx = len(x) # length of this trajectory if (Nx > lag): # only use trajectories that are long enough acf += np.sum(x[0:Nx-lag] * x[lag:Nx]) n += float(Nx-lag) acf /= n if acf <= 0 and truncate_acf: # zero autocorrelation. Exit break elif lag > 0: # start integrating at lag 1 (effect of lag 0 is contained in the 0.5 below corrsum += acf * (1.0 - (float(lag)/float(N))) # compute damped correlation time corrtime = 0.5 + corrsum / x2m # return statistical inefficiency return 1.0 / (2 * corrtime)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _database_from_key(self, key): """ gets the database name for the given key. Should ensure a uniform spread of keys over the databases in order to minimize waiting times. Since the database has to be locked for updates and multiple processes want to write, each process has to wait until the lock has been released. By default the LRU databases will be stored in a sub directory "traj_info_usage" lying next to the main database. :param key: hash of the TrajInfo instance :return: str, database path """
if not self.filename: return None from pyemma.util.files import mkdir_p hash_value_long = int(key, 16) # bin hash to one of either 10 different databases # TODO: make a configuration parameter out of this number db_name = str(hash_value_long)[-1] + '.db' directory = os.path.dirname(self.filename) + os.path.sep + 'traj_info_usage' mkdir_p(directory) return os.path.join(directory, db_name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _clean(self, n): """ obtain n% oldest entries by looking into the usage databases. Then these entries are deleted first from the traj_info db and afterwards from the associated LRU dbs. :param n: delete n% entries in traj_info db [and associated LRU (usage) dbs]. """
# delete the n % oldest entries in the database import sqlite3 num_delete = int(self.num_entries / 100.0 * n) logger.debug("removing %i entries from db" % num_delete) lru_dbs = self._database.execute("select hash, lru_db from traj_info").fetchall() lru_dbs.sort(key=itemgetter(1)) hashs_by_db = {} age_by_hash = [] for k, v in itertools.groupby(lru_dbs, key=itemgetter(1)): hashs_by_db[k] = list(x[0] for x in v) # debug: distribution len_by_db = {os.path.basename(db): len(hashs_by_db[db]) for db in hashs_by_db.keys()} logger.debug("distribution of lru: %s", str(len_by_db)) ### end dbg # collect timestamps from databases for db in hashs_by_db.keys(): with sqlite3.connect(db, timeout=self.lru_timeout) as conn: rows = conn.execute("select hash, last_read from usage").fetchall() for r in rows: age_by_hash.append((r[0], float(r[1]), db)) # sort by age age_by_hash.sort(key=itemgetter(1)) if len(age_by_hash)>=2: assert[age_by_hash[-1] > age_by_hash[-2]] ids = map(itemgetter(0), age_by_hash[:num_delete]) ids = tuple(map(str, ids)) sql_compatible_ids = SqliteDB._format_tuple_for_sql(ids) with self._database as c: c.execute("DELETE FROM traj_info WHERE hash in (%s)" % sql_compatible_ids) # iterate over all LRU databases and delete those ids, we've just deleted from the main db. # Do this within the same execution block of the main database, because we do not want the entry to be deleted, # in case of a subsequent failure. age_by_hash.sort(key=itemgetter(2)) for db, values in itertools.groupby(age_by_hash, key=itemgetter(2)): values = tuple(v[0] for v in values) with sqlite3.connect(db, timeout=self.lru_timeout) as conn: stmnt = "DELETE FROM usage WHERE hash IN (%s)" \ % SqliteDB._format_tuple_for_sql(values) curr = conn.execute(stmnt) assert curr.rowcount == len(values), curr.rowcount
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def log_likelihood(self): r""" Returns the value of the log-likelihood of the converged TRAM estimate. """
# TODO: check that we are estimated... return _tram.log_likelihood_lower_bound( self.log_lagrangian_mult, self.biased_conf_energies, self.count_matrices, self.btrajs, self.dtrajs, self.state_counts, None, None, None, None, None)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def histogram(transform, dimensions, nbins): '''Computes the N-dimensional histogram of the transformed data. Parameters ---------- transform : pyemma.coordinates.transfrom.Transformer object transform that provides the input data dimensions : tuple of indices indices of the dimensions you want to examine nbins : tuple of ints number of bins along each dimension Returns ------- counts : (bins[0],bins[1],...) ndarray of ints counts compatible with pyplot.pcolormesh and pyplot.bar edges : list of (bins[i]) ndarrays bin edges compatible with pyplot.pcolormesh and pyplot.bar, see below. Examples -------- >>> import matplotlib.pyplot as plt # doctest: +SKIP Only for ipython notebook >> %matplotlib inline # doctest: +SKIP >>> counts, edges=histogram(transform, dimensions=(0,1), nbins=(20, 30)) # doctest: +SKIP >>> plt.pcolormesh(edges[0], edges[1], counts.T) # doctest: +SKIP >>> counts, edges=histogram(transform, dimensions=(1,), nbins=(50,)) # doctest: +SKIP >>> plt.bar(edges[0][:-1], counts, width=edges[0][1:]-edges[0][:-1]) # doctest: +SKIP ''' maximum = np.ones(len(dimensions)) * (-np.inf) minimum = np.ones(len(dimensions)) * np.inf # compute min and max for _, chunk in transform: maximum = np.max( np.vstack(( maximum, np.max(chunk[:, dimensions], axis=0))), axis=0) minimum = np.min( np.vstack(( minimum, np.min(chunk[:, dimensions], axis=0))), axis=0) # define bins bins = [np.linspace(m, M, num=n) for m, M, n in zip(minimum, maximum, nbins)] res = np.zeros(np.array(nbins) - 1) # compute actual histogram for _, chunk in transform: part, _ = np.histogramdd(chunk[:, dimensions], bins=bins) res += part return res, bins
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(self, filename=None): """ load runtime configuration from given filename. If filename is None try to read from default file from default location. """
if not filename: filename = self.default_config_file files = self._cfgs_to_read() # insert last, so it will override all values, # which have already been set in previous files. files.insert(-1, filename) try: config = self.__read_cfg(files) except ReadConfigException as e: print(Config._format_msg('config.load("{file}") failed with {error}'.format(file=filename, error=e))) else: self._conf_values = config # notice user? if self.show_config_notification and not self.cfg_dir: print(Config._format_msg("no configuration directory set or usable." " Falling back to defaults."))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self, filename=None): """ Saves the runtime configuration to disk. Parameters filename: str or None, default=None writeable path to configuration filename. If None, use default location and filename. """
if not filename: filename = self.DEFAULT_CONFIG_FILE_NAME else: filename = str(filename) # try to extract the path from filename and use is as cfg_dir head, tail = os.path.split(filename) if head: self._cfg_dir = head # we are search for .cfg files in cfg_dir so make sure it contains the proper extension. base, ext = os.path.splitext(tail) if ext != ".cfg": filename += ".cfg" # if we have no cfg dir, try to create it first. Return if it failed. if not self.cfg_dir or not os.path.isdir(self.cfg_dir) or not os.stat(self.cfg_dir) != os.W_OK: try: self.cfg_dir = self.DEFAULT_CONFIG_DIR except ConfigDirectoryException as cde: print(Config._format_msg('Could not create configuration directory "{dir}"! config.save() failed.' ' Please set a writeable location with config.cfg_dir = val. Error was {exc}' .format(dir=self.cfg_dir, exc=cde))) return filename = os.path.join(self.cfg_dir, filename) try: with open(filename, 'w') as fh: self._conf_values.write(fh) except IOError as ioe: print(Config._format_msg("Save failed with error %s" % ioe))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def default_config_file(self): """ default config file living in PyEMMA package """
import os.path as p import pyemma return p.join(pyemma.__path__[0], Config.DEFAULT_CONFIG_FILE_NAME)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def default_logging_file(self): """ default logging configuration"""
import os.path as p import pyemma return p.join(pyemma.__path__[0], Config.DEFAULT_LOGGING_FILE_NAME)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cfg_dir(self, pyemma_cfg_dir): """ Sets PyEMMAs configuration directory. Also creates it with some default files, if does not exists. """
if not os.path.exists(pyemma_cfg_dir): try: mkdir_p(pyemma_cfg_dir) except NotADirectoryError: # on Python 3 raise ConfigDirectoryException("pyemma cfg dir (%s) is not a directory" % pyemma_cfg_dir) except EnvironmentError: raise ConfigDirectoryException("could not create configuration directory '%s'" % pyemma_cfg_dir) if not os.path.isdir(pyemma_cfg_dir): raise ConfigDirectoryException("%s is no valid directory" % pyemma_cfg_dir) if not os.access(pyemma_cfg_dir, os.W_OK): raise ConfigDirectoryException("%s is not writeable" % pyemma_cfg_dir) # give user the default cfg file, if its not there self.__copy_default_files_to_cfg_dir(pyemma_cfg_dir) self._cfg_dir = pyemma_cfg_dir if self.show_config_notification: stars = '*' * 80 print(stars, '\n', 'Changed PyEMMAs config directory to "{dir}".\n' 'To make this change permanent, export the environment variable' ' "PYEMMA_CFG_DIR" \nto point to this location. Eg. edit your .bashrc file!' .format(dir=pyemma_cfg_dir), '\n', stars, sep='')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def logging_config(self): """ currently used logging configuration file. Can not be changed during runtime. """
cfg = self._conf_values.get('pyemma', 'logging_config') if cfg == 'DEFAULT': cfg = os.path.join(self.cfg_dir, Config.DEFAULT_LOGGING_FILE_NAME) return cfg
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _cfgs_to_read(self): """ reads config files from various locations to build final config. """
# use these files to extend/overwrite the conf_values. # Last red file always overwrites existing values! cfg = Config.DEFAULT_CONFIG_FILE_NAME filenames = [ self.default_config_file, cfg, # conf_values in current directory os.path.join(os.path.expanduser('~' + os.path.sep), cfg), # config in user dir '.pyemma.cfg', ] # look for user defined files if self.cfg_dir: from glob import glob filenames.extend(glob(self.cfg_dir + os.path.sep + "*.cfg")) return filenames
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _calculate_new_overlap(stride, traj_len, skip): """ Given two trajectories T_1 and T_2, this function calculates for the first trajectory an overlap, i.e., a skip parameter for T_2 such that the trajectory fragments T_1 and T_2 appear as one under the given stride. Idea for deriving the formula: It is K = ((traj_len - skip - 1) // stride + 1) = #(data points in trajectory of length (traj_len - skip)). Therefore, the first point's position that is not contained in T_1 anymore is given by pos = skip + s * K. Thus the needed skip of T_2 such that the same stride parameter makes T_1 and T_2 "look as one" is overlap = pos - traj_len. :param stride: the (global) stride parameter :param traj_len: length of T_1 :param skip: skip of T_1 :return: skip of T_2 """
overlap = stride * ((traj_len - skip - 1) // stride + 1) - traj_len + skip return overlap
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def assert_allclose(actual, desired, rtol=1.e-5, atol=1.e-8, err_msg='', verbose=True): r"""wrapper for numpy.testing.allclose with default tolerances of numpy.allclose. Needed since testing method has different values."""
return assert_allclose_np(actual, desired, rtol=rtol, atol=atol, err_msg=err_msg, verbose=verbose)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def topology_to_numpy(top): """Convert this topology into a pandas dataframe Returns ------- atoms : np.ndarray dtype=[("serial", 'i4'), ("name", 'S4'), ("element", 'S3'), ("resSeq", 'i4'), ("resName",'S4'), ("chainID", 'i4'), ("segmentID", 'S4')] The atoms in the topology, represented as a data frame. bonds : np.ndarray The bonds in this topology, represented as an n_bonds x 2 array of the indices of the atoms involved in each bond. """
data = [(atom.serial, atom.name, atom.element.symbol, atom.residue.resSeq, atom.residue.name, atom.residue.chain.index, atom.segment_id) for atom in top.atoms] atoms = np.array(data, dtype=[("serial", 'i4'), ("name", 'S4'), ("element", 'S3'), ("resSeq", 'i4'), ("resName", 'S4'), ("chainID", 'i4'), ("segmentID", 'S4')]) bonds = np.fromiter(((a.index, b.index) for (a, b) in top.bonds), dtype='i4,i4', count=top.n_bonds) return atoms, bonds
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def topology_from_numpy(atoms, bonds=None): """Create a mdtraj topology from numpy arrays Parameters atoms : np.ndarray The atoms in the topology, represented as a data frame. This data frame should have columns "serial" (atom index), "name" (atom name), "element" (atom's element), "resSeq" (index of the residue) "resName" (name of the residue), "chainID" (index of the chain), and optionally "segmentID", following the same conventions as wwPDB 3.0 format. bonds : np.ndarray, shape=(n_bonds, 2), dtype=int, optional The bonds in the topology, represented as an n_bonds x 2 array of the indices of the atoms involved in each bond. Specifiying bonds here is optional. To create standard protein bonds, you can use `create_standard_bonds` to "fill in" the bonds on your newly created Topology object See Also -------- create_standard_bonds """
if bonds is None: bonds = np.zeros((0, 2)) for col in ["name", "element", "resSeq", "resName", "chainID", "serial"]: if col not in atoms.dtype.names: raise ValueError('dataframe must have column %s' % col) if "segmentID" not in atoms.dtype.names: atoms["segmentID"] = "" from mdtraj.core.topology import Atom from mdtraj.core import element as elem out = mdtraj.Topology() # TODO: allow for h5py data sets here, is there a way to check generic ndarray interface? #if not isinstance(bonds, np.ndarray): # raise TypeError('bonds must be an instance of numpy.ndarray. ' # 'You supplied a %s' % type(bonds)) out._atoms = [None for _ in range(len(atoms))] N = np.arange(0, len(atoms)) for ci in np.unique(atoms['chainID']): chain_atoms = atoms[atoms['chainID'] == ci] subN = N[atoms['chainID'] == ci] c = out.add_chain() for ri in np.unique(chain_atoms['resSeq']): residue_atoms = chain_atoms[chain_atoms['resSeq'] == ri] mask = subN[chain_atoms['resSeq'] == ri] indices = N[mask] rnames = residue_atoms['resName'] residue_name = np.array(rnames)[0] segids = residue_atoms['segmentID'] segment_id = np.array(segids)[0] if not np.all(rnames == residue_name): raise ValueError('All of the atoms with residue index %d ' 'do not share the same residue name' % ri) r = out.add_residue(residue_name.decode('ascii'), c, ri, segment_id.decode('ascii')) for ix, atom in enumerate(residue_atoms): e = atom['element'].decode('ascii') a = Atom(atom['name'].decode('ascii'), elem.get_by_symbol(e), int(indices[ix]), r, serial=atom['serial']) out._atoms[indices[ix]] = a r._atoms.append(a) for ai1, ai2 in bonds: out.add_bond(out.atom(ai1), out.atom(ai2)) out._numAtoms = out.n_atoms return out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_discrete_trajectory(filename): """Read discrete trajectory from ascii file. The ascii file containing a single column with integer entries is read into an array of integers. Parameters filename : str The filename of the discrete state trajectory file. The filename can either contain the full or the relative path to the file. Returns ------- dtraj : (M, ) ndarray Discrete state trajectory. """
with open(filename, "r") as f: lines=f.read() dtraj=np.fromstring(lines, dtype=int, sep="\n") return dtraj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_discrete_trajectory(filename, dtraj): r"""Write discrete trajectory to ascii file. The discrete trajectory is written to a single column ascii file with integer entries Parameters filename : str The filename of the discrete state trajectory file. The filename can either contain the full or the relative path to the file. dtraj : array-like Discrete state trajectory. """
dtraj=np.asarray(dtraj) with open(filename, 'w') as f: dtraj.tofile(f, sep='\n', format='%d')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_discrete_trajectory(filename, dtraj): r"""Write discrete trajectory to binary file. The discrete trajectory is stored as ndarray of integers in numpy .npy format. Parameters filename : str The filename of the discrete state trajectory file. The filename can either contain the full or the relative path to the file. dtraj : array-like Discrete state trajectory. """
dtraj=np.asarray(dtraj) np.save(filename, dtraj)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def count_states(dtrajs, ignore_negative=False): r"""returns a histogram count Parameters dtrajs : array_like or list of array_like Discretized trajectory or list of discretized trajectories ignore_negative, bool, default=False Ignore negative elements. By default, a negative element will cause an exception Returns ------- count : ndarray((n), dtype=int) the number of occurrences of each state. n=max+1 where max is the largest state index found. """
# format input dtrajs = _ensure_dtraj_list(dtrajs) # make bincounts for each input trajectory nmax = 0 bcs = [] for dtraj in dtrajs: if ignore_negative: dtraj = dtraj[np.where(dtraj >= 0)] bc = np.bincount(dtraj) nmax = max(nmax, bc.shape[0]) bcs.append(bc) # construct total bincount res = np.zeros(nmax, dtype=int) # add up individual bincounts for i, bc in enumerate(bcs): res[:bc.shape[0]] += bc return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def number_of_states(dtrajs, only_used = False): r"""returns the number of states in the given trajectories. Parameters dtraj : array_like or list of array_like Discretized trajectory or list of discretized trajectories only_used = False : boolean If False, will return max+1, where max is the largest index used. If True, will return the number of states that occur at least once. """
dtrajs = _ensure_dtraj_list(dtrajs) if only_used: # only states with counts > 0 wanted. Make a bincount and count nonzeros bc = count_states(dtrajs) return np.count_nonzero(bc) else: # all states wanted, included nonpopulated ones. return max + 1 imax = 0 for dtraj in dtrajs: imax = max(imax, np.max(dtraj)) return imax+1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stdchannel_redirected(stdchannel, dest_filename, fake=False): """ A context manager to temporarily redirect stdout or stderr e.g.: with stdchannel_redirected(sys.stderr, os.devnull): if compiler.has_function('clock_gettime', libraries=['rt']): libraries.append('rt') """
if fake: yield return oldstdchannel = dest_file = None try: oldstdchannel = os.dup(stdchannel.fileno()) dest_file = open(dest_filename, 'w') os.dup2(dest_file.fileno(), stdchannel.fileno()) yield finally: if oldstdchannel is not None: os.dup2(oldstdchannel, stdchannel.fileno()) if dest_file is not None: dest_file.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _transform_array(self, X): r"""Projects the data onto the dominant independent components. Parameters X : ndarray(n, m) the input data Returns ------- Y : ndarray(n,) the projected data """
X_meanfree = X - self.mean Y = np.dot(X_meanfree, self.eigenvectors[:, 0:self.dimension()]) return Y.astype(self.output_type())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def timescales(self): r"""Implied timescales of the TICA transformation For each :math:`i`-th eigenvalue, this returns .. math:: t_i = -\frac{\tau}{\log(|\lambda_i|)} where :math:`\tau` is the :py:obj:`lag` of the TICA object and :math:`\lambda_i` is the `i`-th :py:obj:`eigenvalue <eigenvalues>` of the TICA object. Returns ------- timescales: 1D np.array numpy array with the implied timescales. In principle, one should expect as many timescales as input coordinates were available. However, less eigenvalues will be returned if the TICA matrices were not full rank or :py:obj:`var_cutoff` was parsed """
return -self.lag / np.log(np.abs(self.eigenvalues))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def feature_TIC_correlation(self): r"""Instantaneous correlation matrix between mean-free input features and TICs Denoting the input features as :math:`X_i` and the TICs as :math:`\theta_j`, the instantaneous, linear correlation between them can be written as .. math:: \mathbf{Corr}(X_i - \mu_i, \mathbf{\theta}_j) = \frac{1}{\sigma_{X_i - \mu_i}}\sum_l \sigma_{(X_i - \mu_i)(X_l - \mu_l} \mathbf{U}_{li} The matrix :math:`\mathbf{U}` is the matrix containing, as column vectors, the eigenvectors of the TICA generalized eigenvalue problem . Returns ------- feature_TIC_correlation : ndarray(n,m) correlation matrix between input features and TICs. There is a row for each feature and a column for each TIC. """
feature_sigma = np.sqrt(np.diag(self.cov)) return np.dot(self.cov, self.eigenvectors[:, : self.dimension()]) / feature_sigma[:, np.newaxis]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _svd_sym_koopman(K, C00_train, Ctt_train): """ Computes the SVD of the symmetrized Koopman operator in the empirical distribution. """
from pyemma._ext.variational.solvers.direct import spd_inv_sqrt # reweight operator to empirical distribution C0t_re = mdot(C00_train, K) # symmetrized operator and SVD K_sym = mdot(spd_inv_sqrt(C00_train), C0t_re, spd_inv_sqrt(Ctt_train)) U, S, Vt = np.linalg.svd(K_sym, compute_uv=True, full_matrices=False) # projects back to singular functions of K U = mdot(spd_inv_sqrt(C00_train), U) Vt = mdot(Vt,spd_inv_sqrt(Ctt_train)) return U, S, Vt.T
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def vamp_1_score(K, C00_train, C0t_train, Ctt_train, C00_test, C0t_test, Ctt_test, k=None): """ Computes the VAMP-1 score of a kinetic model. Ranks the kinetic model described by the estimation of covariances C00, C0t and Ctt, defined by: :math:`C_{0t}^{train} = E_t[x_t x_{t+\tau}^T]` :math:`C_{tt}^{train} = E_t[x_{t+\tau} x_{t+\tau}^T]` These model covariances might have been subject to symmetrization or reweighting, depending on the type of model used. The covariances C00, C0t and Ctt of the test data are direct empirical estimates. singular vectors U and V using the test data with covariances C00, C0t, Ctt. U and V should come from the SVD of the symmetrized transition matrix or Koopman matrix: :math:`(C00^{train})^{-(1/2)} C0t^{train} (Ctt^{train})^{-(1/2)} = U S V.T` Parameters: K : ndarray(n, k) left singular vectors of the symmetrized transition matrix or Koopman matrix C00_train : ndarray(n, n) covariance matrix of the training data, defined by :math:`C_{00}^{train} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_t^T` C0t_train : ndarray(n, n) time-lagged covariance matrix of the training data, defined by :math:`C_{0t}^{train} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_{t+\tau}^T` Ctt_train : ndarray(n, n) covariance matrix of the training data, defined by :math:`C_{tt}^{train} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_{t+\tau} x_{t+\tau}^T` C00_test : ndarray(n, n) covariance matrix of the test data, defined by :math:`C_{00}^{test} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_t^T` C0t_test : ndarray(n, n) time-lagged covariance matrix of the test data, defined by :math:`C_{0t}^{test} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_{t+\tau}^T` Ctt_test : ndarray(n, n) covariance matrix of the test data, defined by :math:`C_{tt}^{test} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_{t+\tau} x_{t+\tau}^T` k : int number of slow processes to consider in the score Returns: -------- vamp1 : float VAMP-1 score """
from pyemma._ext.variational.solvers.direct import spd_inv_sqrt # SVD of symmetrized operator in empirical distribution U, S, V = _svd_sym_koopman(K, C00_train, Ctt_train) if k is not None: U = U[:, :k] # S = S[:k][:, :k] V = V[:, :k] A = spd_inv_sqrt(mdot(U.T, C00_test, U)) B = mdot(U.T, C0t_test, V) C = spd_inv_sqrt(mdot(V.T, Ctt_test, V)) # compute trace norm (nuclear norm), equal to the sum of singular values score = np.linalg.norm(mdot(A, B, C), ord='nuc') return score
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def vamp_e_score(K, C00_train, C0t_train, Ctt_train, C00_test, C0t_test, Ctt_test, k=None): """ Computes the VAMP-E score of a kinetic model. Ranks the kinetic model described by the estimation of covariances C00, C0t and Ctt, defined by: :math:`C_{0t}^{train} = E_t[x_t x_{t+\tau}^T]` :math:`C_{tt}^{train} = E_t[x_{t+\tau} x_{t+\tau}^T]` These model covariances might have been subject to symmetrization or reweighting, depending on the type of model used. The covariances C00, C0t and Ctt of the test data are direct empirical estimates. singular vectors U and V using the test data with covariances C00, C0t, Ctt. U and V should come from the SVD of the symmetrized transition matrix or Koopman matrix: :math:`(C00^{train})^{-(1/2)} C0t^{train} (Ctt^{train})^{-(1/2)} = U S V.T` Parameters: K : ndarray(n, k) left singular vectors of the symmetrized transition matrix or Koopman matrix C00_train : ndarray(n, n) covariance matrix of the training data, defined by :math:`C_{00}^{train} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_t^T` C0t_train : ndarray(n, n) time-lagged covariance matrix of the training data, defined by :math:`C_{0t}^{train} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_{t+\tau}^T` Ctt_train : ndarray(n, n) covariance matrix of the training data, defined by :math:`C_{tt}^{train} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_{t+\tau} x_{t+\tau}^T` C00_test : ndarray(n, n) covariance matrix of the test data, defined by :math:`C_{00}^{test} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_t^T` C0t_test : ndarray(n, n) time-lagged covariance matrix of the test data, defined by :math:`C_{0t}^{test} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_{t+\tau}^T` Ctt_test : ndarray(n, n) covariance matrix of the test data, defined by :math:`C_{tt}^{test} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_{t+\tau} x_{t+\tau}^T` k : int number of slow processes to consider in the score Returns: -------- vampE : float VAMP-E score """
# SVD of symmetrized operator in empirical distribution U, s, V = _svd_sym_koopman(K, C00_train, Ctt_train) if k is not None: U = U[:, :k] S = np.diag(s[:k]) V = V[:, :k] score = np.trace(2.0 * mdot(V, S, U.T, C0t_test) - mdot(V, S, U.T, C00_test, U, S, V.T, Ctt_test)) return score
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_culprit(omit_top_frames=1): """get the filename and line number calling this. Parameters omit_top_frames: int, default=1 omit n frames from top of stack stack. Purpose is to get the real culprit and not intermediate functions on the stack. Returns ------- (filename: str, fileno: int) filename and line number of the culprit. """
try: caller_stack = stack()[omit_top_frames:] while len(caller_stack) > 0: frame = caller_stack.pop(0) filename = frame[1] # skip callee frames if they are other decorators or this file(func) if '<decorator' in filename or __file__ in filename: continue else: break lineno = frame[2] # avoid cyclic references! del caller_stack, frame except OSError: # eg. os.getcwd() fails in conda-test, since cwd gets deleted. filename = 'unknown' lineno = -1 return filename, lineno
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_averaged_bias_matrix(bias_sequences, dtrajs, nstates=None): r""" Computes a bias matrix via an exponential average of the observed frame wise bias energies. Parameters bias_sequences : list of numpy.ndarray(T_i, num_therm_states) A single reduced bias energy trajectory or a list of reduced bias energy trajectories. For every simulation frame seen in trajectory i and time step t, btrajs[i][t, k] is the reduced bias energy of that frame evaluated in the k'th thermodynamic state (i.e. at the k'th Umbrella/Hamiltonian/temperature) dtrajs : list of numpy.ndarray(T_i) of int A single discrete trajectory or a list of discrete trajectories. The integers are indexes trajectory is in at any time. nstates : int, optional, default=None Number of configuration states. Returns ------- bias_matrix : numpy.ndarray(shape=(num_therm_states, num_conf_states)) object bias_energies_full[j, i] is the bias energy in units of kT for each discrete state i at thermodynamic state j. """
from pyemma.thermo.extensions.util import (logsumexp as _logsumexp, logsumexp_pair as _logsumexp_pair) nmax = int(_np.max([dtraj.max() for dtraj in dtrajs])) if nstates is None: nstates = nmax + 1 elif nstates < nmax + 1: raise ValueError("nstates is smaller than the number of observed microstates") nthermo = bias_sequences[0].shape[1] bias_matrix = -_np.ones(shape=(nthermo, nstates), dtype=_np.float64) * _np.inf counts = _np.zeros(shape=(nstates,), dtype=_np.intc) for s in range(len(bias_sequences)): for i in range(nstates): idx = (dtrajs[s] == i) nidx = idx.sum() if nidx == 0: continue counts[i] += nidx selected_bias_sequence = bias_sequences[s][idx, :] for k in range(nthermo): bias_matrix[k, i] = _logsumexp_pair( bias_matrix[k, i], _logsumexp( _np.ascontiguousarray(-selected_bias_sequence[:, k]), inplace=False)) idx = counts.nonzero() log_counts = _np.log(counts[idx]) bias_matrix *= -1.0 bias_matrix[:, idx] += log_counts[_np.newaxis, :] return bias_matrix
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_umbrella_sampling_data( us_trajs, us_centers, us_force_constants, md_trajs=None, kT=None, width=None): r""" Wraps umbrella sampling data or a mix of umbrella sampling and and direct molecular dynamics. Parameters us_trajs : list of N arrays, each of shape (T_i, d) List of arrays, each having T_i rows, one for each time step, and d columns where d is the dimension in which umbrella sampling was applied. Often d=1, and thus us_trajs will be a list of 1d-arrays. us_centers : array-like of size N List or array of N center positions. Each position must be a d-dimensional vector. For 1d us_force_constants : float or array-like of float The force constants used in the umbrellas, unit-less (e.g. kT per length unit). If different force constants were used for different umbrellas, a list or array of N force constants can be given. For multidimensional umbrella sampling, the force matrix must be used. md_trajs : list of M arrays, each of shape (T_i, d), optional, default=None Unbiased molecular dynamics simulations. Format like umbrella_trajs. kT : float (optinal) Use this attribute if the supplied force constants are NOT unit-less. width : array-like of float, optional, default=None Specify periodicity for individual us_traj dimensions. Each positive entry will make the corresponding feature periodic and use the given value as width. None/zero values will be treated as non-periodic. Returns ------- ttrajs : list of N+M int arrays, each of shape (T_i,) are in at any time. btrajs : list of N+M float arrays, each of shape (T_i, K) The floats are the reduced bias energies for each thermodynamic state and configuration. umbrella_centers : float array of shape (K, d) The individual umbrella centers labelled accordingly to ttrajs. force_constants : float array of shape (K, d, d) The individual force matrices labelled accordingly to ttrajs. unbiased_state : int or None Index of the unbiased thermodynamic state (if present). """
ttrajs, umbrella_centers, force_constants, unbiased_state = _get_umbrella_sampling_parameters( us_trajs, us_centers, us_force_constants, md_trajs=md_trajs, kT=kT) if md_trajs is None: md_trajs = [] if width is None: width = _np.zeros(shape=(umbrella_centers.shape[1],), dtype=_np.float64) else: width = _np.asarray( map(lambda w: w if w is not None and w > 0.0 else 0.0, width), dtype=_np.float64) if width.shape[0] != umbrella_centers.shape[1]: raise ValueError('Unmatching number of width components.') btrajs = _get_umbrella_bias_sequences( us_trajs + md_trajs, umbrella_centers, force_constants, width) return ttrajs, btrajs, umbrella_centers, force_constants, unbiased_state
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_multi_temperature_data( energy_trajs, temp_trajs, energy_unit, temp_unit, reference_temperature=None): r""" Wraps data from multi-temperature molecular dynamics. Parameters energy_trajs : list of N arrays, each of shape (T_i,) List of arrays, each having T_i rows, one for each time step, containing the potential energies time series in units of kT, kcal/mol or kJ/mol. temp_trajs : list of N int arrays, each of shape (T_i,) List of arrays, each having T_i rows, one for each time step, containing the heat bath temperature time series (at which temperatures the frames were created) in units of K or C. Alternatively, these trajectories may contain kT values instead of temperatures. energy_unit: str, optional, default='kcal/mol' The physical unit used for energies. Current options: kcal/mol, kJ/mol, kT. temp_unit : str, optional, default='K' The physical unit used for the temperature. Current options: K, C, kT reference_temperature : float or None, optional, default=None Reference temperature against which the bias energies are computed. If not given, the lowest temperature or kT value is used. If given, this parameter must have the same unit as the temp_trajs. Returns ------- ttrajs : list of N+M int arrays, each of shape (T_i,) are in at any time. btrajs : list of N+M float arrays, each of shape (T_i, K) The floats are the reduced bias energies for each thermodynamic state and configuration. temperatures : float array of length K The individual temperatures labelled accordingly to ttrajs. unbiased_state : int or None Index of the unbiased thermodynamic state (if present). """
ttrajs, temperatures = _get_multi_temperature_parameters(temp_trajs) if reference_temperature is None: reference_temperature = temperatures.min() else: assert isinstance(reference_temperature, (int, float)), \ 'reference_temperature must be numeric' assert reference_temperature > 0.0, 'reference_temperature must be positive' btrajs = _get_multi_temperature_bias_sequences( energy_trajs, temp_trajs, temperatures, reference_temperature, energy_unit, temp_unit) if reference_temperature in temperatures: unbiased_state = _np.where(temperatures == reference_temperature)[0] try: unbiased_state = unbiased_state[0] except IndexError: unbiased_state = None else: unbiased_state = None return ttrajs, btrajs, temperatures, unbiased_state
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def assign_unbiased_state_label(memm_list, unbiased_state): r""" Sets the msm label for the given list of estimated MEMM objects. Parameters memm_list : list of estimated MEMM objects The MEMM objects which shall have the msm label set. unbiased_state : int or None Index of the unbiased thermodynamic state (if present). """
if unbiased_state is None: return for memm in memm_list: assert 0 <= unbiased_state < len(memm.models), "invalid state: " + str(unbiased_state) memm._unbiased_state = unbiased_state
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def timescales_msm(dtrajs, lags=None, nits=None, reversible=True, connected=True, weights='empirical', errors=None, nsamples=50, n_jobs=None, show_progress=True, mincount_connectivity='1/n', only_timescales=False): # format data r""" Implied timescales from Markov state models estimated at a series of lag times. Parameters dtrajs : array-like or list of array-likes discrete trajectories lags : int, array-like with integers or None, optional integer lag times at which the implied timescales will be calculated. If set to None (default) as list of lag times will be automatically generated. For a single int, generate a set of lag times starting from 1 to lags, using a multiplier of 1.5 between successive lags. nits : int, optional number of implied timescales to be computed. Will compute less if the number of states are smaller. If None, the number of timescales will be automatically determined. reversible : boolean, optional Estimate transition matrix reversibly (True) or nonreversibly (False) connected : boolean, optional If true compute the connected set before transition matrix estimation at each lag separately weights : str, optional can be used to re-weight non-equilibrium data to equilibrium. Must be one of the following: * 'empirical': Each trajectory frame counts as one. (default) * 'oom': Each transition is re-weighted using OOM theory, see [5]_. errors : None | 'bayes', optional Specifies whether to compute statistical uncertainties (by default not), an which algorithm to use if yes. Currently the only option is: * 'bayes' for Bayesian sampling of the posterior Attention: * The Bayes mode will use an estimate for the effective count matrix that may produce somewhat different estimates than the 'sliding window' estimate used with ``errors=None`` by default. * Computing errors can be// slow if the MSM has many states. * There are still unsolved theoretical problems in the computation of effective count matrices, and therefore the uncertainty interval and the maximum likelihood estimator can be inconsistent. Use this as a rough guess for statistical uncertainties. nsamples : int, optional The number of approximately independent transition matrix samples generated for each lag time for uncertainty quantification. Only used if errors is not None. n_jobs : int, optional how many subprocesses to start to estimate the models for each lag time. show_progress : bool, default=True whether to show progress of estimation. mincount_connectivity : float or '1/n' minimum number of counts to consider a connection between two states. Counts lower than that will count zero in the connectivity check and may thus separate the resulting transition matrix. The default evaluates to 1/nstates. only_timescales: bool, default=False If you are only interested in the timescales and its samples, you can consider turning this on in order to save memory. This can be useful to avoid blowing up memory with BayesianMSM and lots of samples. Returns ------- itsobj : :class:`ImpliedTimescales <pyemma.msm.estimators.implied_timescales.ImpliedTimescales>` object Example ------- See also -------- ImpliedTimescales The object returned by this function. pyemma.plots.plot_implied_timescales Implied timescales plotting function. Just call it with the :class:`ImpliedTimescales <pyemma.msm.estimators.ImpliedTimescales>` object produced by this function as an argument. .. autoclass:: pyemma.msm.estimators.implied_timescales.ImpliedTimescales :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.msm.estimators.implied_timescales.ImpliedTimescales :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.msm.estimators.implied_timescales.ImpliedTimescales :attributes: References Implied timescales as a lagtime-selection and MSM-validation approach were suggested in [1]_. Error estimation is done either using moving block bootstrapping [2]_ or a Bayesian analysis using Metropolis-Hastings Monte Carlo sampling of the posterior. Nonreversible Bayesian sampling is done by independently sampling Dirichtlet distributions of the transition matrix rows. A Monte Carlo method for sampling reversible MSMs was introduced in [3]_. Here we employ a much more efficient algorithm introduced in [4]_. .. [1] Swope, W. C. and J. W. Pitera and F. Suits: Describing protein folding kinetics by molecular dynamics simulations: 1. Theory. J. Phys. Chem. B 108: 6571-6581 (2004) .. [2] Kuensch, H. R.: The jackknife and the bootstrap for general stationary observations. Ann. Stat. 17, 1217-1241 (1989) .. [3] Noe, F.: Probability Distributions of Molecular Observables computed from Markov Models. J. Chem. Phys. 128, 244103 (2008) .. [4] Trendelkamp-Schroer, B, H. Wu, F. Paul and F. Noe: Estimation and uncertainty of reversible Markov models. http://arxiv.org/abs/1507.05990 .. [5] Nueske, F., Wu, H., Prinz, J.-H., Wehmeyer, C., Clementi, C. and Noe, F.: Markov State Models from short non-Equilibrium Simulations - Analysis and Correction of Estimation Bias J. Chem. Phys. (submitted) (2017) """
# Catch invalid inputs for weights: if isinstance(weights, str): if weights not in ['empirical', 'oom']: raise ValueError("Weights must be either \'empirical\' or \'oom\'") else: raise ValueError("Weights must be either \'empirical\' or \'oom\'") # Set errors to None if weights==oom: if weights == 'oom' and (errors is not None): errors = None # format data dtrajs = _types.ensure_dtraj_list(dtrajs) if connected: connectivity = 'largest' else: connectivity = 'none' # Choose estimator: if errors is None: if weights == 'empirical': estimator = _ML_MSM(reversible=reversible, connectivity=connectivity) else: estimator = _OOM_MSM(reversible=reversible, connectivity=connectivity) elif errors == 'bayes': estimator = _Bayes_MSM(reversible=reversible, connectivity=connectivity, nsamples=nsamples, show_progress=show_progress) else: raise NotImplementedError('Error estimation method {errors} currently not implemented'.format(errors=errors)) if hasattr(estimator, 'mincount_connectivity'): estimator.mincount_connectivity = mincount_connectivity # go itsobj = _ImpliedTimescales(estimator, lags=lags, nits=nits, n_jobs=n_jobs, show_progress=show_progress, only_timescales=only_timescales) itsobj.estimate(dtrajs) return itsobj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def estimate_markov_model(dtrajs, lag, reversible=True, statdist=None, count_mode='sliding', weights='empirical', sparse=False, connectivity='largest', dt_traj='1 step', maxiter=1000000, maxerr=1e-8, score_method='VAMP2', score_k=10, mincount_connectivity='1/n'): r""" Estimates a Markov model from discrete trajectories Returns a :class:`MaximumLikelihoodMSM` that contains the estimated transition matrix and allows to compute a large number of quantities related to Markov models. Parameters dtrajs : list containing ndarrays(dtype=int) or ndarray(n, dtype=int) discrete trajectories, stored as integer ndarrays (arbitrary size) or a single ndarray for only one trajectory. lag : int lag time at which transitions are counted and the transition matrix is estimated. reversible : bool, optional If true compute reversible MSM, else non-reversible MSM statdist : (M,) ndarray, optional Stationary vector on the full state-space. Transition matrix will be estimated such that statdist is its equilibrium distribution. count_mode : str, optional, default='sliding' mode to obtain count matrices from discrete trajectories. Should be one of: * 'sliding' : A trajectory of length T will have :math:`T-\tau` counts at time indexes .. math:: * 'effective' : Uses an estimate of the transition counts that are statistically uncorrelated. Recommended when used with a Bayesian MSM. * 'sample' : A trajectory of length T will have :math:`T/\tau` counts at time indexes .. math:: weights : str, optional can be used to re-weight non-equilibrium data to equilibrium. Must be one of the following: * 'empirical': Each trajectory frame counts as one. (default) * 'oom': Each transition is re-weighted using OOM theory, see [11]_. sparse : bool, optional If true compute count matrix, transition matrix and all derived quantities using sparse matrix algebra. In this case python sparse matrices will be returned by the corresponding functions instead of numpy arrays. This behavior is suggested for very large numbers of states (e.g. > 4000) because it is likely to be much more efficient. connectivity : str, optional Connectivity mode. Three methods are intended (currently only 'largest' is implemented) * 'largest' : The active set is the largest reversibly connected set. All estimation will be done on this subset and all quantities (transition matrix, stationary distribution, etc) are only defined on this subset and are correspondingly smaller than the full set of states * 'all' : The active set is the full set of states. Estimation will be conducted on each reversibly connected set separately. That means the transition matrix will decompose into disconnected submatrices, the stationary vector is only defined within subsets, etc. Currently not implemented. * 'none' : The active set is the full set of states. Estimation will be conducted on the full set of states without ensuring connectivity. This only permits nonreversible estimation. Currently not implemented. dt_traj : str, optional Description of the physical time corresponding to the lag. May be used by analysis algorithms such as plotting tools to pretty-print the axes. By default '1 step', i.e. there is no physical time unit. Specify by a number, whitespace and unit. Permitted units are (* is an arbitrary string): * 'fs', 'femtosecond*' * 'ps', 'picosecond*' * 'ns', 'nanosecond*' * 'us', 'microsecond*' * 'ms', 'millisecond*' * 's', 'second*' maxiter : int, optional Optional parameter with reversible = True. maximum number of iterations before the transition matrix estimation method exits maxerr : float, optional Optional parameter with reversible = True. convergence tolerance for transition matrix estimation. This specifies the maximum change of the Euclidean norm of relative stationary probabilities (:math:`x_i = \sum_k x_{ik}`). The relative stationary probability changes :math:`e_i = (x_i^{(1)} - x_i^{(2)})/(x_i^{(1)} + x_i^{(2)})` are used in order to track changes in small probabilities. The Euclidean norm of the change vector, :math:`|e_i|_2`, is compared to maxerr. score_method : str, optional, default='VAMP2' Score to be used with MSM score function. Available scores are based on the variational approach for Markov processes [13]_ [14]_: * 'VAMP1' Sum of singular values of the symmetrized transition matrix [14]_ . If the MSM is reversible, this is equal to the sum of transition matrix eigenvalues, also called Rayleigh quotient [13]_ [15]_ . * 'VAMP2' Sum of squared singular values of the symmetrized transition matrix [14]_ . If the MSM is reversible, this is equal to the kinetic variance [16]_ . score_k : int or None The maximum number of eigenvalues or singular values used in the score. If set to None, all available eigenvalues will be used. mincount_connectivity : float or '1/n' minimum number of counts to consider a connection between two states. Counts lower than that will count zero in the connectivity check and may thus separate the resulting transition matrix. The default evaluates to 1/nstates. Returns ------- msm : :class:`MaximumLikelihoodMSM <pyemma.msm.MaximumLikelihoodMSM>` Estimator object containing the MSM and estimation information. See also -------- MaximumLikelihoodMSM An MSM object that has been estimated from data .. autoclass:: pyemma.msm.estimators.maximum_likelihood_msm.MaximumLikelihoodMSM :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.msm.estimators.maximum_likelihood_msm.MaximumLikelihoodMSM :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.msm.estimators.maximum_likelihood_msm.MaximumLikelihoodMSM :attributes: References The mathematical theory of Markov (state) model estimation was introduced in [1]_ . Further theoretical developments were made in [2]_ . The term Markov state model was coined in [3]_ . Continuous-time Markov models (Master equation models) were suggested in [4]_. Reversible Markov model estimation was introduced in [5]_ , and further developed in [6]_ [7]_ [9]_ . It was shown in [8]_ that the quality of Markov state models does in fact not depend on memory loss, but rather on where the discretization is suitable to approximate the eigenfunctions of the Markov operator (the 'reaction coordinates'). With a suitable choice of discretization and lag time, MSMs can thus become very accurate. [9]_ introduced a number of methodological improvements and gives a good overview of the methodological basics of Markov state modeling today. [10]_ is a more extensive review book of theory, methods and applications. .. [1] Schuette, C. , A. Fischer, W. Huisinga and P. Deuflhard: A Direct Approach to Conformational Dynamics based on Hybrid Monte Carlo. J. Comput. Phys., 151, 146-168 (1999) .. [2] Swope, W. C., J. W. Pitera and F. Suits: Describing protein folding kinetics by molecular dynamics simulations: 1. Theory J. Phys. Chem. B 108, 6571-6581 (2004) .. [3] Singhal, N., C. D. Snow, V. S. Pande: Using path sampling to build better Markovian state models: Predicting the folding rate and mechanism of a tryptophan zipper beta hairpin. J. Chem. Phys. 121, 415 (2004). .. [4] Sriraman, S., I. G. Kevrekidis and G. Hummer, G. J. Phys. Chem. B 109, 6479-6484 (2005) .. [5] Noe, F.: Probability Distributions of Molecular Observables computed from Markov Models. J. Chem. Phys. 128, 244103 (2008) .. [6] Buchete, N.-V. and Hummer, G.: Coarse master equations for peptide folding dynamics. J. Phys. Chem. B 112, 6057--6069 (2008) .. [7] Bowman, G. R., K. A. Beauchamp, G. Boxer and V. S. Pande: Progress and challenges in the automated construction of Markov state models for full protein systems. J. Chem. Phys. 131, 124101 (2009) .. [8] Sarich, M., F. Noe and C. Schuette: On the approximation quality of Markov state models. SIAM Multiscale Model. Simul. 8, 1154-1177 (2010) .. [9] Prinz, J.-H., H. Wu, M. Sarich, B. Keller, M. Senne, M. Held, J. D. Chodera, C. Schuette and F. Noe: Markov models of molecular kinetics: Generation and Validation J. Chem. Phys. 134, 174105 (2011) .. [10] Bowman, G. R., V. S. Pande and F. Noe: An Introduction to Markov State Models and Their Application to Long Timescale Molecular Simulation. Advances in Experimental Medicine and Biology 797, Springer, Heidelberg (2014) .. [11] Nueske, F., Wu, H., Prinz, J.-H., Wehmeyer, C., Clementi, C. and Noe, F.: Markov State Models from short non-Equilibrium Simulations - Analysis and Correction of Estimation Bias J. Chem. Phys. (submitted) (2017) .. [12] H. Wu and F. Noe: Variational approach for learning Markov processes from time series data (in preparation) .. [13] Noe, F. and F. Nueske: A variational approach to modeling slow processes in stochastic dynamical systems. SIAM Multiscale Model. Simul. 11, 635-655 (2013). .. [14] Wu, H and F. Noe: Variational approach for learning Markov processes from time series data (in preparation) .. [15] McGibbon, R and V. S. Pande: Variational cross-validation of slow dynamical modes in molecular kinetics, J. Chem. Phys. 142, 124105 (2015) .. [16] Noe, F. and C. Clementi: Kinetic distance and kinetic maps from molecular dynamics simulation. J. Chem. Theory Comput. 11, 5002-5011 (2015) Example ------- Which is the active set of states we are working on? [0 1 2] Show the count matrix [[ 7. 2. 1.] [ 2. 0. 4.] [ 2. 3. 9.]] Show the estimated transition matrix [[ 0.7 0.167 0.133] [ 0.388 0. 0.612] [ 0.119 0.238 0.643]] Is this model reversible (i.e. does it fulfill detailed balance)? True What is the equilibrium distribution of states? [ 0.393 0.17 0.437] Relaxation timescales? [ 3.415 1.297] Mean first passage time from state 0 to 2: """
# Catch invalid inputs for weights: if isinstance(weights, str): if weights not in ['empirical', 'oom']: raise ValueError("Weights must be either \'empirical\' or \'oom\'") else: raise ValueError("Weights must be either \'empirical\' or \'oom\'") # transition matrix estimator if weights == 'empirical': mlmsm = _ML_MSM(lag=lag, reversible=reversible, statdist_constraint=statdist, count_mode=count_mode, sparse=sparse, connectivity=connectivity, dt_traj=dt_traj, maxiter=maxiter, maxerr=maxerr, score_method=score_method, score_k=score_k, mincount_connectivity=mincount_connectivity) # estimate and return return mlmsm.estimate(dtrajs) elif weights == 'oom': if (statdist is not None) or (maxiter != 1000000) or (maxerr != 1e-8): import warnings warnings.warn("Values for statdist, maxiter or maxerr are ignored if OOM-correction is used.") oom_msm = _OOM_MSM(lag=lag, reversible=reversible, count_mode=count_mode, sparse=sparse, connectivity=connectivity, dt_traj=dt_traj, score_method=score_method, score_k=score_k, mincount_connectivity=mincount_connectivity) # estimate and return return oom_msm.estimate(dtrajs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bayesian_markov_model(dtrajs, lag, reversible=True, statdist=None, sparse=False, connectivity='largest', count_mode='effective', nsamples=100, conf=0.95, dt_traj='1 step', show_progress=True, mincount_connectivity='1/n'): r""" Bayesian Markov model estimate using Gibbs sampling of the posterior Returns a :class:`BayesianMSM` that contains the estimated transition matrix and allows to compute a large number of quantities related to Markov models as well as their statistical uncertainties. Parameters dtrajs : list containing ndarrays(dtype=int) or ndarray(n, dtype=int) discrete trajectories, stored as integer ndarrays (arbitrary size) or a single ndarray for only one trajectory. lag : int lagtime for the MSM estimation in multiples of trajectory steps reversible : bool, optional, default = True If true compute reversible MSM, else non-reversible MSM sparse : bool, optional, default = False If true compute count matrix, transition matrix and all derived quantities using sparse matrix algebra. In this case python sparse matrices will be returned by the corresponding functions instead of numpy arrays. This behavior is suggested for very large numbers of states (e.g. > 4000) because it is likely to be much more efficient. statdist : (M,) ndarray, optional Stationary vector on the full state-space. Transition matrix will be estimated such that statdist is its equilibrium distribution. count_mode : str, optional, default='sliding' mode to obtain count matrices from discrete trajectories. Should be one of: * 'sliding' : A trajectory of length T will have :math:`T-tau` counts at time indexes .. math:: * 'effective' : Uses an estimate of the transition counts that are statistically uncorrelated. Recommended when used with a Bayesian MSM. * 'sample' : A trajectory of length T will have :math:`T/tau` counts at time indexes .. math:: connectivity : str, optional, default = None Defines if the resulting HMM will be defined on all hidden states or on a connected subset. Connectivity is defined by counting only transitions with at least mincount_connectivity counts. If a subset of states is used, all estimated quantities (transition matrix, stationary distribution, etc) are only defined on this subset and are correspondingly smaller than nstates. Following modes are available: * None or 'all' : The active set is the full set of states. Estimation is done on all weakly connected subsets separately. The resulting transition matrix may be disconnected. * 'largest' : The active set is the largest reversibly connected set. * 'populous' : The active set is the reversibly connected set with most counts. nsample : int, optional, default=100 number of transition matrix samples to compute and store conf : float, optional, default=0.95 size of confidence intervals dt_traj : str, optional, default='1 step' Description of the physical time corresponding to the trajectory time step. May be used by analysis algorithms such as plotting tools to pretty-print the axes. By default '1 step', i.e. there is no physical time unit. Specify by a number, whitespace and unit. Permitted units are (* is an arbitrary string): | 'fs', 'femtosecond*' | 'ps', 'picosecond*' | 'ns', 'nanosecond*' | 'us', 'microsecond*' | 'ms', 'millisecond*' | 's', 'second*' show_progress : bool, default=True Show progressbars for calculation mincount_connectivity : float or '1/n' minimum number of counts to consider a connection between two states. Counts lower than that will count zero in the connectivity check and may thus separate the resulting transition matrix. The default evaluates to 1/nstates. Returns ------- An :class:`BayesianMSM` object containing the Bayesian MSM estimator and the model. Example ------- Note that the following example is only qualitatively and not quantitatively reproducible because it involves random numbers. We build a Bayesian Markov model for the following two trajectories at lag time 2: The resulting Model is an MSM just like you get with estimate_markov_model Its transition matrix does also come from a maximum likelihood estimation, but it's slightly different from the estimate_markov_mode result because bayesian_markov_model uses an effective count matrix with statistically uncorrelated counts: [[ 0.70000001 0.16463699 0.135363 ] [ 0.38169055 0. 0.61830945] [ 0.12023989 0.23690297 0.64285714]] However bayesian_markov_model returns a SampledMSM object which is able to compute the probability distribution and statistical models of all methods that are offered by the MSM object. This works as follows. You can ask for the sample mean and specify the method you wanna evaluate as a string: [[ 0.71108663 0.15947371 0.12943966] [ 0.41076105 0. 0.58923895] [ 0.13079372 0.23005443 0.63915185]] Likewise, the standard deviation by element: [[ 0.13707029 0.09479627 0.09200214] [ 0.15247454 0. 0.15247454] [ 0.07701315 0.09385258 0.1119089 ]] And this is the 95% (2 sigma) confidence interval. You can control the percentile using the conf argument in this function: [[ 0.44083423 0.03926518 0.0242113 ] [ 0.14102544 0. 0.30729828] [ 0.02440188 0.07629456 0.43682481]] [[ 0.93571706 0.37522581 0.40180041] [ 0.69307665 0. 0.8649215 ] [ 0.31029752 0.44035732 0.85994006]] If you wanna compute expectations of functions that require arguments, just pass these arguments as well: 12.9049811296 And if you want to histogram the distribution or compute more complex statistical moment such as the covariance between different quantities, just get the full sample of your quantity of interest and evaluate it at will: [7.9763615793248155, 8.6540958274695701, 26.295326015231058, 17.909895469938899] Internally, the SampledMSM object has 100 transition matrices (the number can be controlled by nsamples), that were computed by the transition matrix sampling method. All of the above sample functions iterate over these 100 transition matrices and evaluate the requested function with the given parameters on each of them. .. autoclass:: pyemma.msm.estimators.bayesian_msm.BayesianMSM :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.msm.estimators.bayesian_msm.BayesianMSM :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.msm.estimators.bayesian_msm.BayesianMSM :attributes: References .. [1] Trendelkamp-Schroer, B, H. Wu, F. Paul and F. Noe: Estimation and uncertainty of reversible Markov models. http://arxiv.org/abs/1507.05990 """
# TODO: store_data=True bmsm_estimator = _Bayes_MSM(lag=lag, reversible=reversible, statdist_constraint=statdist, count_mode=count_mode, sparse=sparse, connectivity=connectivity, dt_traj=dt_traj, nsamples=nsamples, conf=conf, show_progress=show_progress, mincount_connectivity=mincount_connectivity) return bmsm_estimator.estimate(dtrajs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def timescales_hmsm(dtrajs, nstates, lags=None, nits=None, reversible=True, stationary=False, connectivity=None, mincount_connectivity='1/n', separate=None, errors=None, nsamples=100, stride=None, n_jobs=None, show_progress=True): r""" Calculate implied timescales from Hidden Markov state models estimated at a series of lag times. Warning: this can be slow! Parameters dtrajs : array-like or list of array-likes discrete trajectories nstates : int number of hidden states lags : int, array-like with integers or None, optional integer lag times at which the implied timescales will be calculated. If set to None (default) as list of lag times will be automatically generated. For a single int, generate a set of lag times starting from 1 to lags, using a multiplier of 1.5 between successive lags. nits : int (optional) number of implied timescales to be computed. Will compute less if the number of states are smaller. None means the number of timescales will be determined automatically. connectivity : str, optional, default = None Defines if the resulting HMM will be defined on all hidden states or on a connected subset. Connectivity is defined by counting only transitions with at least mincount_connectivity counts. If a subset of states is used, all estimated quantities (transition matrix, stationary distribution, etc) are only defined on this subset and are correspondingly smaller than nstates. Following modes are available: * None or 'all' : The active set is the full set of states. Estimation is done on all weakly connected subsets separately. The resulting transition matrix may be disconnected. * 'largest' : The active set is the largest reversibly connected set. * 'populous' : The active set is the reversibly connected set with most counts. mincount_connectivity : float or '1/n' minimum number of counts to consider a connection between two states. Counts lower than that will count zero in the connectivity check and may thus separate the resulting transition matrix. The default evaluates to 1/nstates. separate : None or iterable of int Force the given set of observed states to stay in a separate hidden state. The remaining nstates-1 states will be assigned by a metastable decomposition. reversible : boolean (optional) Estimate transition matrix reversibly (True) or nonreversibly (False) stationary : bool, optional, default=False If True, the initial distribution of hidden states is self-consistently computed as the stationary distribution of the transition matrix. If False, it will be estimated from the starting states. Only set this to true if you're sure that the observation trajectories are initiated from a global equilibrium distribution. errors : None | 'bayes' Specifies whether to compute statistical uncertainties (by default not), an which algorithm to use if yes. The only option is currently 'bayes'. This algorithm is much faster than MSM-based error calculation because the involved matrices are much smaller. nsamples : int Number of approximately independent HMSM samples generated for each lag time for uncertainty quantification. Only used if errors is not None. n_jobs : int how many subprocesses to start to estimate the models for each lag time. show_progress : bool, default=True Show progressbars for calculation? Returns ------- itsobj : :class:`ImpliedTimescales <pyemma.msm.ImpliedTimescales>` object See also -------- ImpliedTimescales The object returned by this function. pyemma.plots.plot_implied_timescales Plotting function for the :class:`ImpliedTimescales <pyemma.msm.ImpliedTimescales>` object Example ------- [[ 5.786] [ 5.143] [ 4.44 ] [ 3.677]] .. autoclass:: pyemma.msm.estimators.implied_timescales.ImpliedTimescales :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.msm.estimators.implied_timescales.ImpliedTimescales :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.msm.estimators.implied_timescales.ImpliedTimescales :attributes: References Implied timescales as a lagtime-selection and MSM-validation approach were suggested in [1]_. Hidden Markov state model estimation is done here as described in [2]_. For uncertainty quantification we employ the Bayesian sampling algorithm described in [3]_. .. [1] Swope, W. C. and J. W. Pitera and F. Suits: Describing protein folding kinetics by molecular dynamics simulations: 1. Theory. J. Phys. Chem. B 108: 6571-6581 (2004) .. [2] F. Noe, H. Wu, J.-H. Prinz and N. Plattner: Projected and hidden Markov models for calculating kinetics and metastable states of complex molecules. J. Chem. Phys. 139, 184114 (2013) .. [3] J. D. Chodera et al: Bayesian hidden Markov model analysis of single-molecule force spectroscopy: Characterizing kinetics under measurement uncertainty arXiv:1108.1430 (2011) """
# format data dtrajs = _types.ensure_dtraj_list(dtrajs) # MLE or error estimation? if errors is None: if stride is None: stride = 1 estimator = _ML_HMSM(nstates=nstates, reversible=reversible, stationary=stationary, connectivity=connectivity, stride=stride, mincount_connectivity=mincount_connectivity, separate=separate) elif errors == 'bayes': if stride is None: stride = 'effective' estimator = _Bayes_HMSM(nstates=nstates, reversible=reversible, stationary=stationary, connectivity=connectivity, mincount_connectivity=mincount_connectivity, stride=stride, separate=separate, show_progress=show_progress, nsamples=nsamples) else: raise NotImplementedError('Error estimation method'+str(errors)+'currently not implemented') # go itsobj = _ImpliedTimescales(estimator, lags=lags, nits=nits, n_jobs=n_jobs, show_progress=show_progress) itsobj.estimate(dtrajs) return itsobj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def estimate_hidden_markov_model(dtrajs, nstates, lag, reversible=True, stationary=False, connectivity=None, mincount_connectivity='1/n', separate=None, observe_nonempty=True, stride=1, dt_traj='1 step', accuracy=1e-3, maxit=1000): r""" Estimates a Hidden Markov state model from discrete trajectories Returns a :class:`MaximumLikelihoodHMSM` that contains a transition matrix between a few (hidden) metastable states. Each metastable state has a probability distribution of visiting the discrete 'microstates' contained in the input trajectories. The resulting object is a hidden Markov model that allows to compute a large number of quantities. Parameters dtrajs : list containing ndarrays(dtype=int) or ndarray(n, dtype=int) discrete trajectories, stored as integer ndarrays (arbitrary size) or a single ndarray for only one trajectory. lag : int lagtime for the MSM estimation in multiples of trajectory steps nstates : int the number of metastable states in the resulting HMM reversible : bool, optional, default = True If true compute reversible MSM, else non-reversible MSM stationary : bool, optional, default=False If True, the initial distribution of hidden states is self-consistently computed as the stationary distribution of the transition matrix. If False, it will be estimated from the starting states. Only set this to true if you're sure that the observation trajectories are initiated from a global equilibrium distribution. connectivity : str, optional, default = None Defines if the resulting HMM will be defined on all hidden states or on a connected subset. Connectivity is defined by counting only transitions with at least mincount_connectivity counts. If a subset of states is used, all estimated quantities (transition matrix, stationary distribution, etc) are only defined on this subset and are correspondingly smaller than nstates. Following modes are available: * None or 'all' : The active set is the full set of states. Estimation is done on all weakly connected subsets separately. The resulting transition matrix may be disconnected. * 'largest' : The active set is the largest reversibly connected set. * 'populous' : The active set is the reversibly connected set with most counts. mincount_connectivity : float or '1/n' minimum number of counts to consider a connection between two states. Counts lower than that will count zero in the connectivity check and may thus separate the resulting transition matrix. The default evaluates to 1/nstates. separate : None or iterable of int Force the given set of observed states to stay in a separate hidden state. The remaining nstates-1 states will be assigned by a metastable decomposition. observe_nonempty : bool If True, will restricted the observed states to the states that have at least one observation in the lagged input trajectories. dt_traj : str, optional, default='1 step' Description of the physical time corresponding to the trajectory time step. May be used by analysis algorithms such as plotting tools to pretty-print the axes. By default '1 step', i.e. there is no physical time unit. Specify by a number, whitespace and unit. Permitted units are (* is an arbitrary string): | 'fs', 'femtosecond*' | 'ps', 'picosecond*' | 'ns', 'nanosecond*' | 'us', 'microsecond*' | 'ms', 'millisecond*' | 's', 'second*' accuracy : float convergence threshold for EM iteration. When two the likelihood does not increase by more than accuracy, the iteration is stopped successfully. maxit : int stopping criterion for EM iteration. When so many iterations are performed without reaching the requested accuracy, the iteration is stopped without convergence (a warning is given) Returns ------- hmsm : :class:`MaximumLikelihoodHMSM <pyemma.msm.MaximumLikelihoodHMSM>` Estimator object containing the HMSM and estimation information. Example ------- We have estimated a 2x2 hidden transition matrix between the metastable states: [[ 0.684 0.316] [ 0.242 0.758]] With the equilibrium distribution: The observed states are the three discrete clusters that we have in our discrete trajectory: [0 1 2] The metastable distributions (mm.metastable_distributions), or equivalently the observation probabilities are the probability to be in a given cluster ('microstate') if we are in one of the hidden metastable states. So it's a 2 x 3 matrix: [[ 0.9620883 0.0379117 0. ] [ 0. 0.28014352 0.71985648]] The first metastable state ist mostly in cluster 0, and a little bit in the transition state cluster 1. The second metastable state is less well defined, but mostly in cluster 2 and less prominently in the transition state cluster 1. We can print the lifetimes of the metastable states: And the timescale of the hidden transition matrix - now we only have one relaxation timescale: The mean first passage times can also be computed between metastable states: .. autoclass:: pyemma.msm.estimators.maximum_likelihood_hmsm.MaximumLikelihoodHMSM :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.msm.estimators.maximum_likelihood_hmsm.MaximumLikelihoodHMSM :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.msm.estimators.maximum_likelihood_hmsm.MaximumLikelihoodHMSM :attributes: References [1]_ is an excellent review of estimation algorithms for discrete Hidden Markov Models. This function estimates a discrete HMM on the discrete input states using the Baum-Welch algorithm [2]_. We use a maximum-likelihood Markov state model to initialize the HMM estimation as described in [3]_. .. [1] L. R. Rabiner: A Tutorial on Hidden Markov Models and Selected Applications in Speech Recognition. Proc. IEEE 77, 257-286 (1989) .. [2] L. Baum, T. Petrie, G. Soules and N. Weiss N: A maximization technique occurring in the statistical analysis of probabilistic functions of Markov chains. Ann. Math. Statist. 41, 164-171 (1970) .. [3] F. Noe, H. Wu, J.-H. Prinz and N. Plattner: Projected and hidden Markov models for calculating kinetics and metastable states of complex molecules. J. Chem. Phys. 139, 184114 (2013) """
# initialize HMSM estimator hmsm_estimator = _ML_HMSM(lag=lag, nstates=nstates, reversible=reversible, stationary=stationary, msm_init='largest-strong', connectivity=connectivity, mincount_connectivity=mincount_connectivity, separate=separate, observe_nonempty=observe_nonempty, stride=stride, dt_traj=dt_traj, accuracy=accuracy, maxit=maxit) # run estimation return hmsm_estimator.estimate(dtrajs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bayesian_hidden_markov_model(dtrajs, nstates, lag, nsamples=100, reversible=True, stationary=False, connectivity=None, mincount_connectivity='1/n', separate=None, observe_nonempty=True, stride='effective', conf=0.95, dt_traj='1 step', store_hidden=False, show_progress=True): r""" Bayesian Hidden Markov model estimate using Gibbs sampling of the posterior Returns a :class:`BayesianHMSM` that contains the estimated hidden Markov model [1]_ and a Bayesian estimate [2]_ that contains samples around this estimate to estimate uncertainties. Parameters dtrajs : list containing ndarrays(dtype=int) or ndarray(n, dtype=int) discrete trajectories, stored as integer ndarrays (arbitrary size) or a single ndarray for only one trajectory. lag : int lagtime for the MSM estimation in multiples of trajectory steps nstates : int the number of metastable states in the resulting HMM reversible : bool, optional, default = True If true compute reversible MSM, else non-reversible MSM stationary : bool, optional, default=False If True, the initial distribution of hidden states is self-consistently computed as the stationary distribution of the transition matrix. If False, it will be estimated from the starting states. Only set this to true if you're sure that the observation trajectories are initiated from a global equilibrium distribution. connectivity : str, optional, default = None Defines if the resulting HMM will be defined on all hidden states or on a connected subset. Connectivity is defined by counting only transitions with at least mincount_connectivity counts. If a subset of states is used, all estimated quantities (transition matrix, stationary distribution, etc) are only defined on this subset and are correspondingly smaller than nstates. Following modes are available: * None or 'all' : The active set is the full set of states. Estimation is done on all weakly connected subsets separately. The resulting transition matrix may be disconnected. * 'largest' : The active set is the largest reversibly connected set. * 'populous' : The active set is the reversibly connected set with most counts. mincount_connectivity : float or '1/n' minimum number of counts to consider a connection between two states. Counts lower than that will count zero in the connectivity check and may thus separate the resulting transition matrix. The default evaluates to 1/nstates. separate : None or iterable of int Force the given set of observed states to stay in a separate hidden state. The remaining nstates-1 states will be assigned by a metastable decomposition. observe_nonempty : bool If True, will restricted the observed states to the states that have at least one observation in the lagged input trajectories. nsamples : int, optional, default=100 number of transition matrix samples to compute and store stride : str or int, default='effective' stride between two lagged trajectories extracted from the input trajectories. Given trajectory s[t], stride and lag will result in trajectories Setting stride = 1 will result in using all data (useful for maximum likelihood estimator), while a Bayesian estimator requires a longer stride in order to have statistically uncorrelated trajectories. Setting stride = None 'effective' uses the largest neglected timescale as an estimate for the correlation time and sets the stride accordingly. conf : float, optional, default=0.95 size of confidence intervals dt_traj : str, optional, default='1 step' Description of the physical time corresponding to the trajectory time step. May be used by analysis algorithms such as plotting tools to pretty-print the axes. By default '1 step', i.e. there is no physical time unit. Specify by a number, whitespace and unit. Permitted units are (* is an arbitrary string): | 'fs', 'femtosecond*' | 'ps', 'picosecond*' | 'ns', 'nanosecond*' | 'us', 'microsecond*' | 'ms', 'millisecond*' | 's', 'second*' store_hidden : bool, optional, default=False store hidden trajectories in sampled HMMs show_progress : bool, default=True Show progressbars for calculation? Returns ------- An :class:`BayesianHMSM` object containing a transition matrix and various other HMM-related quantities and statistical uncertainties. Example ------- Note that the following example is only qualitative and not quantitatively reproducible because random numbers are involved We compute the stationary distribution (here given by the maximum likelihood estimate), and the 1-sigma uncertainty interval. You can see that the uncertainties are quite large (we have seen only very few transitions between the metastable states: 0.459176653019 - 0.268314552886 + 0.715326151685 0.540823346981 - 0.284761476984 + 0.731730375713 Let's look at the lifetimes of metastable states. Now we have really huge uncertainties. In states where one state is more probable than the other, the mean first passage time from the more probable to the less probable state is much higher than the reverse: 7.18543434854 - 6.03617757784 + 80.1298222741 8.65699332061 - 5.35089540896 + 30.1719505772 In contrast the relaxation timescale is less uncertain. This is because for a two-state system the relaxation timescale is dominated by the faster passage, which is less uncertain than the slower passage time: 3.35310468086 - 2.24574587978 + 8.34383177258 .. autoclass:: pyemma.msm.estimators.bayesian_hmsm.BayesianHMSM :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.msm.estimators.bayesian_hmsm.BayesianHMSM :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.msm.estimators.bayesian_hmsm.BayesianHMSM :attributes: References .. [1] F. Noe, H. Wu, J.-H. Prinz and N. Plattner: Projected and hidden Markov models for calculating kinetics and metastable states of complex molecules. J. Chem. Phys. 139, 184114 (2013) .. [2] J. D. Chodera Et Al: Bayesian hidden Markov model analysis of single-molecule force spectroscopy: Characterizing kinetics under measurement uncertainty. arXiv:1108.1430 (2011) """
bhmsm_estimator = _Bayes_HMSM(lag=lag, nstates=nstates, stride=stride, nsamples=nsamples, reversible=reversible, stationary=stationary, connectivity=connectivity, mincount_connectivity=mincount_connectivity, separate=separate, observe_nonempty=observe_nonempty, dt_traj=dt_traj, conf=conf, store_hidden=store_hidden, show_progress=show_progress) return bhmsm_estimator.estimate(dtrajs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def estimate_augmented_markov_model(dtrajs, ftrajs, lag, m, sigmas, count_mode='sliding', connectivity='largest', dt_traj='1 step', maxiter=1000000, eps=0.05, maxcache=3000): r""" Estimates an Augmented Markov model from discrete trajectories and experimental data Returns a :class:`AugmentedMarkovModel` that contains the estimated transition matrix and allows to compute a large number of quantities related to Markov models. Parameters dtrajs : list containing ndarrays(dtype=int) or ndarray(n, dtype=int) discrete trajectories, stored as integer ndarrays (arbitrary size) or a single ndarray for only one trajectory. ftrajs : list of trajectories of microscopic observables. Has to have the same shape (number of trajectories and timesteps) as dtrajs. Each timestep in each trajectory should match the shape of m and sigma, k. lag : int lag time at which transitions are counted and the transition matrix is estimated. m : ndarray(k) Experimental averages. sigmas : ndarray(k) Standard error for each experimental observable. count_mode : str, optional, default='sliding' mode to obtain count matrices from discrete trajectories. Should be one of: * 'sliding' : A trajectory of length T will have :math:`T-\tau` counts at time indexes .. math:: * 'effective' : Uses an estimate of the transition counts that are statistically uncorrelated. Recommended when used with a Bayesian MSM. * 'sample' : A trajectory of length T will have :math:`T/\tau` counts at time indexes .. math:: connectivity : str, optional Connectivity mode. Three methods are intended (currently only 'largest' is implemented) * 'largest' : The active set is the largest reversibly connected set. All estimation will be done on this subset and all quantities (transition matrix, stationary distribution, etc) are only defined on this subset and are correspondingly smaller than the full set of states * 'all' : The active set is the full set of states. Estimation will be conducted on each reversibly connected set separately. That means the transition matrix will decompose into disconnected submatrices, the stationary vector is only defined within subsets, etc. Currently not implemented. * 'none' : The active set is the full set of states. Estimation will be conducted on the full set of states without ensuring connectivity. This only permits nonreversible estimation. Currently not implemented. dt_traj : str, optional Description of the physical time corresponding to the lag. May be used by analysis algorithms such as plotting tools to pretty-print the axes. By default '1 step', i.e. there is no physical time unit. Specify by a number, whitespace and unit. Permitted units are (* is an arbitrary string): * 'fs', 'femtosecond*' * 'ps', 'picosecond*' * 'ns', 'nanosecond*' * 'us', 'microsecond*' * 'ms', 'millisecond*' * 's', 'second*' maxiter : int, optional Optional parameter with specifies the maximum number of updates for Lagrange multiplier estimation. eps : float, optional Additional convergence criterion used when some experimental data are outside the support of the simulation. The value of the eps parameter is the threshold of the relative change in the predicted observables as a function of fixed-point iteration: $$ \mathrm{eps} > \frac{\mid o_{\mathrm{pred}}^{(i+1)}-o_{\mathrm{pred}}^{(i)}\mid }{\sigma}. $$ maxcache : int, optional Parameter which specifies the maximum size of cache used when performing estimation of AMM, in megabytes. Returns ------- amm : :class:`AugmentedMarkovModel <pyemma.msm.AugmentedMarkovModel>` Estimator object containing the AMM and estimation information. See also -------- AugmentedMarkovModel An AMM object that has been estimated from data .. autoclass:: pyemma.msm.estimators.maximum_likelihood_msm.AugmentedMarkovModel :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.msm.estimators.maximum_likelihood_msm.AugmentedMarkovModel :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.msm.estimators.maximum_likelihood_msm.AugmentedMarkovModel :attributes: References .. [1] Olsson S, Wu H, Paul F, Clementi C, Noe F "Combining experimental and simulation data of molecular processes via augmented Markov models" PNAS (2017), 114(31), pp. 8265-8270 doi: 10.1073/pnas.1704803114 """
# check input if _np.all(sigmas>0): _w = 1./(2*sigmas**2.) else: raise ValueError('Zero or negative standard errors supplied. Please revise input') if ftrajs[0].ndim < 2: raise ValueError("Supplied feature trajectories have inappropriate dimensions (%d) should be atleast 2."%ftrajs[0].ndim) if len(dtrajs) != len(ftrajs): raise ValueError("A different number of dtrajs and ftrajs were supplied as input. They must have exactly a one-to-one correspondence.") elif not _np.all([len(dt)==len(ft) for dt,ft in zip(dtrajs, ftrajs)]): raise ValueError("One or more supplied dtraj-ftraj pairs do not have the same length.") else: # MAKE E matrix dta = _np.concatenate(dtrajs) fta = _np.concatenate(ftrajs) all_markov_states = set(dta) _E = _np.zeros((len(all_markov_states), fta.shape[1])) for i, s in enumerate(all_markov_states): _E[i, :] = fta[_np.where(dta == s)].mean(axis = 0) # transition matrix estimator mlamm = _ML_AMM(lag=lag, count_mode=count_mode, connectivity=connectivity, dt_traj=dt_traj, maxiter=maxiter, max_cache=maxcache, E=_E, w=_w, m=m) # estimate and return return mlamm.estimate(dtrajs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _is_zero(x): """ Returns True if x is numerically 0 or an array with 0's. """
if x is None: return True if isinstance(x, numbers.Number): return x == 0.0 if isinstance(x, np.ndarray): return np.all(x == 0) return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _sparsify(X, remove_mean=False, modify_data=False, sparse_mode='auto', sparse_tol=0.0): """ Determines the sparsity of X and returns a selected sub-matrix Only conducts sparsification if the number of constant columns is at least max(a N - b, min_const_col_number), Parameters X : ndarray data matrix remove_mean : bool True: remove column mean from the data, False: don't remove mean. modify_data : bool If remove_mean=True, the mean will be removed in the data matrix X, without creating an independent copy. This option is faster but might lead to surprises because your input array is changed. sparse_mode : str one of: * 'dense' : always use dense mode * 'sparse' : always use sparse mode if possible * 'auto' : automatic Returns ------- X0 : ndarray (view of X) Either X itself (if not sufficiently sparse), or a sliced view of X, containing only the variable columns mask : ndarray(N, dtype=bool) or None Bool selection array that indicates which columns of X were selected for X0, i.e. X0 = X[:, mask]. mask is None if no sparse selection was made. xconst : ndarray(N) Constant column values that are outside the sparse selection, i.e. X[i, ~mask] = xconst for any row i. xconst=0 if no sparse selection was made. """
if sparse_mode.lower() == 'sparse': min_const_col_number = 0 # enforce sparsity. A single constant column will lead to sparse treatment elif sparse_mode.lower() == 'dense': min_const_col_number = X.shape[1] + 1 # never use sparsity else: if remove_mean and not modify_data: # in this case we have to copy the data anyway, and can be permissive min_const_col_number = max(0.1 * X.shape[1], 50) else: # This is a rough heuristic to choose a minimum column number for which sparsity may pay off. # This heuristic is good for large number of samples, i.e. it may be inadequate for small matrices X. if X.shape[1] < 250: min_const_col_number = X.shape[1] - 0.25 * X.shape[1] elif X.shape[1] < 1000: min_const_col_number = X.shape[1] - (0.5 * X.shape[1] - 100) else: min_const_col_number = X.shape[1] - (0.8 * X.shape[1] - 400) # ensure we have an integer again. min_const_col_number = int(min_const_col_number) if X.shape[1] > min_const_col_number: mask = covartools.variable_cols(X, tol=sparse_tol, min_constant=min_const_col_number) # bool vector nconst = len(np.where(~mask)[0]) if nconst > min_const_col_number: xconst = X[0, ~mask] X = X[:, mask] # sparsify else: xconst = None mask = None else: xconst = None mask = None return X, mask, xconst
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _copy_convert(X, const=None, remove_mean=False, copy=True): r""" Makes a copy or converts the data type if needed Copies the data and converts the data type if unsuitable for covariance calculation. The standard data type for covariance computations is float64, because the double precision (but not single precision) is usually sufficient to compute the long sums involved in covariance matrix computations. Integer types are avoided even if the data is integer, because the BLAS matrix multiplication is very fast with floats, but very slow with integers. If X is of boolean type (0/1), the standard data type is float32, because this will be sufficient to represent numbers up to 2^23 without rounding error, which is usually sufficient sufficient as the largest element in np.dot(X.T, X) can then be T, the number of data points. Parameters remove_mean : bool If True, will enforce float64 even if the input is boolean copy : bool If True, enforces a copy even if the data type doesn't require it. Return ------ X : ndarray copy or reference to X if no copy was needed. const : ndarray or None copy or reference to const if no copy was needed. """
# determine type dtype = np.float64 # default: convert to float64 in order to avoid cancellation errors if X.dtype.kind == 'b' and X.shape[0] < 2**23 and not remove_mean: dtype = np.float32 # convert to float32 if we can represent all numbers # copy/convert if needed if X.dtype not in (np.float64, dtype): # leave as float64 (conversion is expensive), otherwise convert to dtype X = X.astype(dtype, order='C') if const is not None: const = const.astype(dtype, order='C') elif copy: X = X.copy(order='C') if const is not None: const = const.copy(order='C') return X, const
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _sum(X, xmask=None, xconst=None, Y=None, ymask=None, yconst=None, symmetric=False, remove_mean=False, weights=None): r""" Computes the column sums and centered column sums. If symmetric = False, the sums will be determined as .. math: sx &=& \frac{1}{2} \sum_t x_t sy &=& \frac{1}{2} \sum_t y_t If symmetric, the sums will be determined as .. math: sx = sy = \frac{1}{2T} \sum_t x_t + y_t Returns ------- w : float statistical weight of sx, sy sx : ndarray effective row sum of X (including symmetrization if requested) sx_raw_centered : ndarray centered raw row sum of X optional returns (only if Y is given): sy : ndarray effective row sum of X (including symmetrization if requested) sy_raw_centered : ndarray centered raw row sum of Y """
T = X.shape[0] # Check if weights are given: if weights is not None: X = weights[:, None] * X if Y is not None: Y = weights[:, None] * Y # compute raw sums on variable data sx_raw = X.sum(axis=0) # this is the mean before subtracting it. sy_raw = 0 if Y is not None: sy_raw = Y.sum(axis=0) # expand raw sums to full data if xmask is not None: if weights is not None: sx_raw = _sum_sparse(sx_raw, xmask, xconst, weights.sum()) else: sx_raw = _sum_sparse(sx_raw, xmask, xconst, T) if ymask is not None: if weights is not None: sy_raw = _sum_sparse(sy_raw, ymask, yconst, weights.sum()) else: sy_raw = _sum_sparse(sy_raw, ymask, yconst, T) # compute effective sums and centered sums if Y is not None and symmetric: sx = sx_raw + sy_raw sy = sx if weights is not None: w = 2*np.sum(weights) else: w = 2 * T else: sx = sx_raw sy = sy_raw if weights is not None: w = np.sum(weights) else: w = T sx_raw_centered = sx_raw.copy() if Y is not None: sy_raw_centered = sy_raw.copy() # center mean. if remove_mean: if Y is not None and symmetric: sx_raw_centered -= 0.5 * sx sy_raw_centered -= 0.5 * sy else: sx_raw_centered = np.zeros(sx.size) if Y is not None: sy_raw_centered = np.zeros(sy.size) # return if Y is not None: return w, sx, sx_raw_centered, sy, sy_raw_centered else: return w, sx, sx_raw_centered
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _center(X, w, s, mask=None, const=None, inplace=True): """ Centers the data. Parameters w : float statistical weight of s inplace : bool center in place Returns ------- sx : ndarray uncentered row sum of X sx_centered : ndarray row sum of X after centering optional returns (only if Y is given): sy_raw : ndarray uncentered row sum of Y sy_centered : ndarray row sum of Y after centering """
xmean = s / float(w) if mask is None: X = np.subtract(X, xmean, out=X if inplace else None) else: X = np.subtract(X, xmean[mask], out=X if inplace else None) const = np.subtract(const, xmean[~mask], const if inplace else None) return X, const
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _filter_variable_indices(mask, column_selection): """ Returns column indices restricted to the variable columns as determined by the given mask. Parameters mask : ndarray(N, dtype=bool) Array indicating the variable columns. column_selection : ndarray(k, dtype=int) Column indices to be filtered and mapped. Returns ------- ix : ndarray(l, dtype=int) Column indices restricted to the variable columns, mapped to the correct index range. """
a = np.where(mask)[0] b = column_selection[np.in1d(column_selection, a)] return np.searchsorted(a, b)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _M2_dense(X, Y, weights=None, diag_only=False): """ 2nd moment matrix using dense matrix computations. This function is encapsulated such that we can make easy modifications of the basic algorithms """
if weights is not None: if diag_only: return np.sum(weights[:, None] * X * Y, axis=0) else: return np.dot((weights[:, None] * X).T, Y) else: if diag_only: return np.sum(X * Y, axis=0) else: return np.dot(X.T, Y)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _M2_const(Xvar, mask_X, xvarsum, xconst, Yvar, mask_Y, yvarsum, yconst, weights=None): r""" Computes the unnormalized covariance matrix between X and Y, exploiting constant input columns Computes the unnormalized covariance matrix :math:`C = X^\top Y` (for symmetric=False) or :math:`C = \frac{1}{2} (X^\top Y + Y^\top X)` (for symmetric=True). Suppose the data matrices can be column-permuted to have the form .. math: X &=& (X_{\mathrm{var}}, X_{\mathrm{const}}) Y &=& (Y_{\mathrm{var}}, Y_{\mathrm{const}}) with rows: .. math: x_t &=& (x_{\mathrm{var},t}, x_{\mathrm{const}}) y_t &=& (y_{\mathrm{var},t}, y_{\mathrm{const}}) where :math:`x_{\mathrm{const}},\:y_{\mathrm{const}}` are constant vectors. The resulting matrix has the general form: .. math: C &=& [X_{\mathrm{var}}^\top Y_{\mathrm{var}} x_{sum} y_{\mathrm{const}}^\top ] & & [x_{\mathrm{const}}^\top y_{sum}^\top x_{sum} x_{sum}^\top ] where :math:`x_{sum} = \sum_t x_{\mathrm{var},t}` and :math:`y_{sum} = \sum_t y_{\mathrm{var},t}`. Parameters Xvar : ndarray (T, m) Part of the data matrix X with :math:`m \le M` variable columns. mask_X : ndarray (M) Boolean array of size M of the full columns. False for constant column, True for variable column in X. xvarsum : ndarray (m) Column sum of variable part of data matrix X xconst : ndarray (M-m) Values of the constant part of data matrix X Yvar : ndarray (T, n) Part of the data matrix Y with :math:`n \le N` variable columns. mask_Y : ndarray (N) Boolean array of size N of the full columns. False for constant column, True for variable column in Y. yvarsum : ndarray (n) Column sum of variable part of data matrix Y yconst : ndarray (N-n) Values of the constant part of data matrix Y weights : None or ndarray (N) weights for all time steps. Returns ------- C : ndarray (M, N) Unnormalized covariance matrix. """
C = np.zeros((len(mask_X), len(mask_Y))) # Block 11 C[np.ix_(mask_X, mask_Y)] = _M2_dense(Xvar, Yvar, weights=weights) # other blocks xsum_is_0 = _is_zero(xvarsum) ysum_is_0 = _is_zero(yvarsum) xconst_is_0 = _is_zero(xconst) yconst_is_0 = _is_zero(yconst) # TODO: maybe we don't need the checking here, if we do the decision in the higher-level function M2 # TODO: if not zero, we could still exploit the zeros in const and compute (and write!) this outer product # TODO: only to a sub-matrix # Block 12 and 21 if weights is not None: wsum = np.sum(weights) xvarsum = np.sum(weights[:, None] * Xvar, axis=0) yvarsum = np.sum(weights[:, None] * Yvar, axis=0) else: wsum = Xvar.shape[0] if not (xsum_is_0 or yconst_is_0) or not (ysum_is_0 or xconst_is_0): C[np.ix_(mask_X, ~mask_Y)] = np.outer(xvarsum, yconst) C[np.ix_(~mask_X, mask_Y)] = np.outer(xconst, yvarsum) # Block 22 if not (xconst_is_0 or yconst_is_0): C[np.ix_(~mask_X, ~mask_Y)] = np.outer(wsum*xconst, yconst) return C
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _M2_sparse(Xvar, mask_X, Yvar, mask_Y, weights=None): """ 2nd moment matrix exploiting zero input columns """
C = np.zeros((len(mask_X), len(mask_Y))) C[np.ix_(mask_X, mask_Y)] = _M2_dense(Xvar, Yvar, weights=weights) return C
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _M2_sparse_sym(Xvar, mask_X, Yvar, mask_Y, weights=None, column_selection=None): """ 2nd self-symmetric moment matrix exploiting zero input columns Computes X'X + Y'Y and X'Y + Y'X """
assert len(mask_X) == len(mask_Y), 'X and Y need to have equal sizes for symmetrization' if column_selection is None: mask_Xk = mask_X mask_Yk = mask_Y Xvark = Xvar Yvark = Yvar else: mask_Xk = mask_X[column_selection] mask_Yk = mask_Y[column_selection] Xvark = Xvar[:, _filter_variable_indices(mask_X, column_selection)] Yvark = Yvar[:, _filter_variable_indices(mask_Y, column_selection)] Cxxyy = np.zeros((len(mask_X), len(mask_Yk))) Cxxyy[np.ix_(mask_X, mask_Xk)] = _M2_dense(Xvar, Xvark, weights=weights) Cxxyy[np.ix_(mask_Y, mask_Yk)] += _M2_dense(Yvar, Yvark, weights=weights) Cxyyx = np.zeros((len(mask_X), len(mask_Yk))) Cxy = _M2_dense(Xvar, Yvark, weights=weights) Cyx = _M2_dense(Yvar, Xvark, weights=weights) Cxyyx[np.ix_(mask_X, mask_Yk)] = Cxy Cxyyx[np.ix_(mask_Y, mask_Xk)] += Cyx return Cxxyy, Cxyyx
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _M2_symmetric(Xvar, Yvar, mask_X=None, mask_Y=None, xsum=0, xconst=0, ysum=0, yconst=0, weights=None, column_selection=None, diag_only=False): """ symmetric second moment matrices. Decide if we need dense, sparse, const"""
if mask_X is None and mask_Y is None: if column_selection is None: Xvark = Xvar Yvark = Yvar else: Xvark = Xvar[:, column_selection] Yvark = Yvar[:, column_selection] Cxxyy = _M2_dense(Xvar, Xvark, weights=weights, diag_only=diag_only) \ + _M2_dense(Yvar, Yvark, weights=weights, diag_only=diag_only) Cxy = _M2_dense(Xvar, Yvark, weights=weights, diag_only=diag_only) Cyx = _M2_dense(Yvar, Xvark, weights=weights, diag_only=diag_only) Cxyyx = Cxy + Cyx else: # Check if one of the masks is not None, modify it and also adjust the constant columns: if mask_X is None: mask_X = np.ones(Xvar.shape[1], dtype=np.bool) xconst = np.ones(0, dtype=float) if mask_Y is None: mask_Y = np.ones(Yvar.shape[1], dtype=np.bool) yconst = np.ones(0, dtype=float) if _is_zero(xsum) and _is_zero(ysum) or _is_zero(xconst) and _is_zero(yconst): Cxxyy, Cxyyx = _M2_sparse_sym(Xvar, mask_X, Yvar, mask_Y, weights=weights, column_selection=column_selection) else: xvarsum = xsum[mask_X] # to variable part yvarsum = ysum[mask_Y] # to variable part if column_selection is None: Xvark = Xvar mask_Xk = mask_X xkvarsum = xvarsum xkconst = xconst Yvark = Yvar mask_Yk = mask_Y ykvarsum = yvarsum ykconst = yconst else: Xvark = Xvar[:, _filter_variable_indices(mask_X, column_selection)] mask_Xk = mask_X[column_selection] xksum = xsum[column_selection] xkvarsum = xksum[mask_Xk] xkconst = xconst[_filter_variable_indices(~mask_X, column_selection)] Yvark = Yvar[:, _filter_variable_indices(mask_Y, column_selection)] mask_Yk = mask_Y[column_selection] yksum = ysum[column_selection] ykvarsum = yksum[mask_Yk] ykconst = yconst[_filter_variable_indices(~mask_Y, column_selection)] Cxxyy = _M2_const(Xvar, mask_X, xvarsum, xconst, Xvark, mask_Xk, xkvarsum, xkconst, weights=weights) \ + _M2_const(Yvar, mask_Y, yvarsum, yconst, Yvark, mask_Yk, ykvarsum, ykconst, weights=weights) Cxy = _M2_const(Xvar, mask_X, xvarsum, xconst, Yvark, mask_Yk, ykvarsum, ykconst, weights=weights) Cyx = _M2_const(Yvar, mask_Y, yvarsum, yconst, Xvark, mask_Xk, xkvarsum, xkconst, weights=weights) Cxyyx = Cxy + Cyx return Cxxyy, Cxyyx
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def moments_XX(X, remove_mean=False, modify_data=False, weights=None, sparse_mode='auto', sparse_tol=0.0, column_selection=None, diag_only=False): r""" Computes the first two unnormalized moments of X Computes :math:`s = \sum_t x_t` and :math:`C = X^\top X` while exploiting zero or constant columns in the data matrix. Parameters X : ndarray (T, M) Data matrix remove_mean : bool True: remove column mean from the data, False: don't remove mean. modify_data : bool If remove_mean=True, the mean will be removed in the data matrix X, without creating an independent copy. This option is faster but might lead to surprises because your input array is changed. weights: None or ndarray(T, ) weights assigned to each trajectory point. If None, all data points have weight one. If ndarray, each data point is assigned a separate weight. sparse_mode : str one of: * 'dense' : always use dense mode * 'sparse' : always use sparse mode if possible * 'auto' : automatic sparse_tol: float Threshold for considering column to be zero in order to save computing effort when the data is sparse or almost sparse. If max(abs(X[:, i])) < sparse_tol, then row i (and also column i if Y is not given) of the covariance matrix will be set to zero. If Y is given and max(abs(Y[:, i])) < sparse_tol, then column i of the covariance matrix will be set to zero. column_selection: ndarray(k, dtype=int) or None Indices of those columns that are to be computed. If None, all columns are computed. diag_only: bool If True, the computation is restricted to the diagonal entries (autocorrelations) only. Returns ------- w : float statistical weight s : ndarray (M) sum C : ndarray (M, M) unnormalized covariance matrix """
# Check consistency of inputs: if weights is not None: assert X.shape[0] == weights.shape[0], 'X and weights_x must have equal length' # diag_only is only implemented for dense mode if diag_only and sparse_mode is not 'dense': if sparse_mode is 'sparse': import warnings warnings.warn('Computing diagonal entries only is not implemented for sparse mode. Switching to dense mode.') sparse_mode = 'dense' # sparsify X0, mask_X, xconst = _sparsify(X, remove_mean=remove_mean, modify_data=modify_data, sparse_mode=sparse_mode, sparse_tol=sparse_tol) is_sparse = mask_X is not None # copy / convert # TODO: do we need to copy xconst? X0, xconst = _copy_convert(X0, const=xconst, remove_mean=remove_mean, copy=is_sparse or (remove_mean and not modify_data)) # sum / center w, sx, sx0_centered = _sum(X0, xmask=mask_X, xconst=xconst, symmetric=False, remove_mean=remove_mean, weights=weights) if remove_mean: _center(X0, w, sx, mask=mask_X, const=xconst, inplace=True) # fast in-place centering # TODO: we could make a second const check here. If after summation not enough zeros have appeared in the # TODO: consts, we switch back to dense treatment here. # compute covariance matrix if column_selection is not None: if is_sparse: Xk = X[:, column_selection] mask_Xk = mask_X[column_selection] X0k = Xk[:, mask_Xk] xksum = sx0_centered[column_selection] xkconst = Xk[0, ~mask_Xk] X0k, xkconst = _copy_convert(X0k, const=xkconst, remove_mean=remove_mean, copy=True) C = _M2(X0, X0k, mask_X=mask_X, mask_Y=mask_Xk, xsum=sx0_centered, xconst=xconst, ysum=xksum, yconst=xkconst, weights=weights) else: X0k = X0[:, column_selection] C = _M2(X0, X0k, mask_X=mask_X, mask_Y=mask_X, xsum=sx0_centered, xconst=xconst, ysum=sx0_centered[column_selection], yconst=xconst, weights=weights) else: C = _M2(X0, X0, mask_X=mask_X, mask_Y=mask_X, xsum=sx0_centered, xconst=xconst, ysum=sx0_centered, yconst=xconst, weights=weights, diag_only=diag_only) return w, sx, C
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def covar(X, remove_mean=False, modify_data=False, weights=None, sparse_mode='auto', sparse_tol=0.0): """ Computes the covariance matrix of X Computes .. math: C_XX &=& X^\top X while exploiting zero or constant columns in the data matrix. WARNING: Directly use moments_XX if you can. This function does an additional constant-matrix multiplication and does not return the mean. Parameters X : ndarray (T, M) Data matrix remove_mean : bool True: remove column mean from the data, False: don't remove mean. modify_data : bool If remove_mean=True, the mean will be removed in the data matrix X, without creating an independent copy. This option is faster but might lead to surprises because your input array is changed. weights : None or ndarray(T, ) weights assigned to each trajectory point of X. If None, all data points have weight one. If ndarray, each data point is assigned a separate weight. sparse_mode : str one of: * 'dense' : always use dense mode * 'sparse' : always use sparse mode if possible * 'auto' : automatic sparse_tol: float Threshold for considering column to be zero in order to save computing effort when the data is sparse or almost sparse. If max(abs(X[:, i])) < sparse_tol, then row i (and also column i if Y is not given) of the covariance matrix will be set to zero. If Y is given and max(abs(Y[:, i])) < sparse_tol, then column i of the covariance matrix will be set to zero. Returns ------- C_XX : ndarray (M, M) Covariance matrix of X See also -------- moments_XX """
w, s, M = moments_XX(X, remove_mean=remove_mean, weights=weights, modify_data=modify_data, sparse_mode=sparse_mode, sparse_tol=sparse_tol) return M / float(w)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def covars(X, Y, remove_mean=False, modify_data=False, symmetrize=False, weights=None, sparse_mode='auto', sparse_tol=0.0): """ Computes the covariance and cross-covariance matrix of X and Y If symmetrize is False, computes .. math: C_XX &=& X^\top X C_XY &=& X^\top Y If symmetrize is True, computes .. math: C_XX &=& \frac{1}{2} (X^\top X + Y^\top Y) C_XY &=& \frac{1}{2} (X^\top Y + Y^\top X) while exploiting zero or constant columns in the data matrix. WARNING: Directly use moments_XXXY if you can. This function does an additional constant-matrix multiplication and does not return the mean. Parameters X : ndarray (T, M) Data matrix Y : ndarray (T, N) Second data matrix remove_mean : bool True: remove column mean from the data, False: don't remove mean. modify_data : bool If remove_mean=True, the mean will be removed in the data matrix X, without creating an independent copy. This option is faster but might lead to surprises because your input array is changed. symmetrize : bool Computes symmetrized means and moments (see above) weights : None or ndarray(T, ) weights assigned to each trajectory point of X. If None, all data points have weight one. If ndarray, each data point is assigned a separate weight. sparse_mode : str one of: * 'dense' : always use dense mode * 'sparse' : always use sparse mode if possible * 'auto' : automatic sparse_tol: float Threshold for considering column to be zero in order to save computing effort when the data is sparse or almost sparse. If max(abs(X[:, i])) < sparse_tol, then row i (and also column i if Y is not given) of the covariance matrix will be set to zero. If Y is given and max(abs(Y[:, i])) < sparse_tol, then column i of the covariance matrix will be set to zero. Returns ------- C_XX : ndarray (M, M) Covariance matrix of X C_XY : ndarray (M, N) Covariance matrix of XY See also -------- moments_XXXY """
w, sx, sy, Mxx, Mxy = moments_XXXY(X, Y, remove_mean=remove_mean, modify_data=modify_data, weights=weights, symmetrize=symmetrize, sparse_mode=sparse_mode, sparse_tol=sparse_tol) return Mxx / float(w), Mxy / float(w)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getargspec_no_self(func): """inspect.getargspec replacement using inspect.signature. inspect.getargspec is deprecated in python 3. This is a replacement based on the (new in python 3.3) `inspect.signature`. Parameters func : callable A callable to inspect Returns ------- argspec : ArgSpec(args, varargs, varkw, defaults) This is similar to the result of inspect.getargspec(func) under python 2.x. NOTE: if the first argument of `func` is self, it is *not*, I repeat *not* included in argspec.args. This is done for consistency between inspect.getargspec() under python 2.x, and inspect.signature() under python 3.x. """
sig = inspect.signature(func) args = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD ] varargs = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_POSITIONAL ] varargs = varargs[0] if varargs else None varkw = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_KEYWORD ] varkw = varkw[0] if varkw else None defaults = [ p.default for p in sig.parameters.values() if (p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and p.default is not p.empty) ] or None if args[0] == 'self': args.pop(0) return ArgSpec(args, varargs, varkw, defaults)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def blocksplit_dtrajs(dtrajs, lag=1, sliding=True, shift=None): """ Splits the discrete trajectories into approximately uncorrelated fragments Will split trajectories into fragments of lengths lag or longer. These fragments are overlapping in order to conserve the transition counts at given lag. If sliding=True, the resulting trajectories will lead to exactly the same count matrix as when counted from dtrajs. If sliding=False (sampling at lag), the count matrices are only equal when also setting shift=0. Parameters dtrajs : list of ndarray(int) Discrete trajectories lag : int Lag time at which counting will be done. If sh sliding : bool True for splitting trajectories for sliding count, False if lag-sampling will be applied shift : None or int Start of first full tau-window. If None, shift will be randomly generated """
dtrajs_new = [] for dtraj in dtrajs: if len(dtraj) <= lag: continue if shift is None: s = np.random.randint(min(lag, dtraj.size-lag)) else: s = shift if sliding: if s > 0: dtrajs_new.append(dtraj[0:lag+s]) for t0 in range(s, dtraj.size-lag, lag): dtrajs_new.append(dtraj[t0:t0+2*lag]) else: for t0 in range(s, dtraj.size-lag, lag): dtrajs_new.append(dtraj[t0:t0+lag+1]) return dtrajs_new
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cvsplit_dtrajs(dtrajs): """ Splits the trajectories into a training and test set with approximately equal number of trajectories Parameters dtrajs : list of ndarray(int) Discrete trajectories """
if len(dtrajs) == 1: raise ValueError('Only have a single trajectory. Cannot be split into train and test set') I0 = np.random.choice(len(dtrajs), int(len(dtrajs)/2), replace=False) I1 = np.array(list(set(list(np.arange(len(dtrajs)))) - set(list(I0)))) dtrajs_train = [dtrajs[i] for i in I0] dtrajs_test = [dtrajs[i] for i in I1] return dtrajs_train, dtrajs_test
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def numpy_random_seed(seed=42): """ sets the random seed of numpy within the context. Example ------- 684 """
old_state = np.random.get_state() np.random.seed(seed) try: yield finally: np.random.set_state(old_state)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def random_seed(seed=42): """ sets the random seed of Python within the context. Example ------- 864 """
old_state = random.getstate() random.seed(seed) try: yield finally: random.setstate(old_state)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def settings(**kwargs): """ apply given PyEMMA config values temporarily within the given context."""
from pyemma import config old_settings = {} try: # remember old setting, set new one. May raise ValueError, if invalid setting is given. for k, v in kwargs.items(): old_settings[k] = getattr(config, k) setattr(config, k, v) yield finally: # restore old settings for k, v in old_settings.items(): setattr(config, k, v)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_histogram( xall, yall, nbins=100, weights=None, avoid_zero_count=False): """Compute a two-dimensional histogram. Parameters xall : ndarray(T) Sample x-coordinates. yall : ndarray(T) Sample y-coordinates. nbins : int, optional, default=100 Number of histogram bins used in each dimension. weights : ndarray(T), optional, default=None Sample weights; by default all samples have the same weight. avoid_zero_count : bool, optional, default=True Avoid zero counts by lifting all histogram elements to the minimum value before computing the free energy. If False, zero histogram counts would yield infinity in the free energy. Returns ------- x : ndarray(nbins, nbins) The bins' x-coordinates in meshgrid format. y : ndarray(nbins, nbins) The bins' y-coordinates in meshgrid format. z : ndarray(nbins, nbins) Histogram counts in meshgrid format. """
z, xedge, yedge = _np.histogram2d( xall, yall, bins=nbins, weights=weights) x = 0.5 * (xedge[:-1] + xedge[1:]) y = 0.5 * (yedge[:-1] + yedge[1:]) if avoid_zero_count: z = _np.maximum(z, _np.min(z[z.nonzero()])) return x, y, z.T
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_grid_data(xall, yall, zall, nbins=100, method='nearest'): """Interpolate unstructured two-dimensional data. Parameters xall : ndarray(T) Sample x-coordinates. yall : ndarray(T) Sample y-coordinates. zall : ndarray(T) Sample z-coordinates. nbins : int, optional, default=100 Number of histogram bins used in x/y-dimensions. method : str, optional, default='nearest' Assignment method; scipy.interpolate.griddata supports the methods 'nearest', 'linear', and 'cubic'. Returns ------- x : ndarray(nbins, nbins) The bins' x-coordinates in meshgrid format. y : ndarray(nbins, nbins) The bins' y-coordinates in meshgrid format. z : ndarray(nbins, nbins) Interpolated z-data in meshgrid format. """
from scipy.interpolate import griddata x, y = _np.meshgrid( _np.linspace(xall.min(), xall.max(), nbins), _np.linspace(yall.min(), yall.max(), nbins), indexing='ij') z = griddata( _np.hstack([xall[:,None], yall[:,None]]), zall, (x, y), method=method) return x, y, z
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _to_free_energy(z, minener_zero=False): """Compute free energies from histogram counts. Parameters z : ndarray(T) Histogram counts. minener_zero : boolean, optional, default=False Shifts the energy minimum to zero. Returns ------- free_energy : ndarray(T) The free energy values in units of kT. """
pi = _to_density(z) free_energy = _np.inf * _np.ones(shape=z.shape) nonzero = pi.nonzero() free_energy[nonzero] = -_np.log(pi[nonzero]) if minener_zero: free_energy[nonzero] -= _np.min(free_energy[nonzero]) return free_energy
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _prune_kwargs(kwargs): """Remove non-allowed keys from a kwargs dictionary. Parameters kwargs : dict Named parameters to prune. """
allowed_keys = [ 'corner_mask', 'alpha', 'locator', 'extend', 'xunits', 'yunits', 'antialiased', 'nchunk', 'hatches', 'zorder'] ignored = [key for key in kwargs.keys() if key not in allowed_keys] for key in ignored: _warn( '{}={} is not an allowed optional parameter and will' ' be ignored'.format(key, kwargs[key])) kwargs.pop(key, None) return kwargs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plot_map( x, y, z, ax=None, cmap=None, ncontours=100, vmin=None, vmax=None, levels=None, cbar=True, cax=None, cbar_label=None, cbar_orientation='vertical', norm=None, **kwargs): """Plot a two-dimensional map from data on a grid. Parameters x : ndarray(T) Binned x-coordinates. y : ndarray(T) Binned y-coordinates. z : ndarray(T) Binned z-coordinates. ax : matplotlib.Axes object, optional, default=None The ax to plot to; if ax=None, a new ax (and fig) is created. cmap : matplotlib colormap, optional, default=None The color map to use. ncontours : int, optional, default=100 Number of contour levels. vmin : float, optional, default=None Lowest z-value to be plotted. vmax : float, optional, default=None Highest z-value to be plotted. levels : iterable of float, optional, default=None Contour levels to plot. cbar : boolean, optional, default=True Plot a color bar. cax : matplotlib.Axes object, optional, default=None Plot the colorbar into a custom axes object instead of stealing space from ax. cbar_label : str, optional, default=None Colorbar label string; use None to suppress it. cbar_orientation : str, optional, default='vertical' Colorbar orientation; choose 'vertical' or 'horizontal'. norm : matplotlib norm, optional, default=None Use a norm when coloring the contour plot. Optional parameters for contourf (**kwargs) corner_mask : boolean, optional Enable/disable corner masking, which only has an effect if z is a masked array. If False, any quad touching a masked point is masked out. If True, only the triangular corners of quads nearest those points are always masked out, other triangular corners comprising three unmasked points are contoured as usual. Defaults to rcParams['contour.corner_mask'], which defaults to True. alpha : float The alpha blending value. locator : [ None | ticker.Locator subclass ] If locator is None, the default MaxNLocator is used. The locator is used to determine the contour levels if they are not given explicitly via the levels argument. extend : [ ‘neither’ | ‘both’ | ‘min’ | ‘max’ ] Unless this is ‘neither’, contour levels are automatically added to one or both ends of the range so that all data are included. These added ranges are then mapped to the special colormap values which default to the ends of the colormap range, but can be set via matplotlib.colors.Colormap.set_under() and matplotlib.colors.Colormap.set_over() methods. xunits, yunits : [ None | registered units ] Override axis units by specifying an instance of a matplotlib.units.ConversionInterface. antialiased : boolean, optional Enable antialiasing, overriding the defaults. For filled contours, the default is True. For line contours, it is taken from rcParams[‘lines.antialiased’]. nchunk : [ 0 | integer ] If 0, no subdivision of the domain. Specify a positive integer to divide the domain into subdomains of nchunk by nchunk quads. Chunking reduces the maximum length of polygons generated by the contouring algorithm which reduces the rendering workload passed on to the backend and also requires slightly less RAM. It can however introduce rendering artifacts at chunk boundaries depending on the backend, the antialiased flag and value of alpha. hatches : A list of cross hatch patterns to use on the filled areas. If None, no hatching will be added to the contour. Hatching is supported in the PostScript, PDF, SVG and Agg backends only. zorder : float Set the zorder for the artist. Artists with lower zorder values are drawn first. Returns ------- fig : matplotlib.Figure object The figure in which the used ax resides. ax : matplotlib.Axes object The ax in which the map was plotted. misc : dict Contains a matplotlib.contour.QuadContourSet 'mappable' and, if requested, a matplotlib.Colorbar object 'cbar'. """
import matplotlib.pyplot as _plt if ax is None: fig, ax = _plt.subplots() else: fig = ax.get_figure() mappable = ax.contourf( x, y, z, ncontours, norm=norm, vmin=vmin, vmax=vmax, cmap=cmap, levels=levels, **_prune_kwargs(kwargs)) misc = dict(mappable=mappable) if cbar_orientation not in ('horizontal', 'vertical'): raise ValueError( 'cbar_orientation must be "horizontal" or "vertical"') if cbar: if cax is None: cbar_ = fig.colorbar( mappable, ax=ax, orientation=cbar_orientation) else: cbar_ = fig.colorbar( mappable, cax=cax, orientation=cbar_orientation) if cbar_label is not None: cbar_.set_label(cbar_label) misc.update(cbar=cbar_) return fig, ax, misc
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plot_density( xall, yall, ax=None, cmap=None, ncontours=100, vmin=None, vmax=None, levels=None, cbar=True, cax=None, cbar_label='sample density', cbar_orientation='vertical', logscale=False, nbins=100, weights=None, avoid_zero_count=False, **kwargs): """Plot a two-dimensional density map using a histogram of scattered data. Parameters xall : ndarray(T) Sample x-coordinates. yall : ndarray(T) Sample y-coordinates. ax : matplotlib.Axes object, optional, default=None The ax to plot to; if ax=None, a new ax (and fig) is created. cmap : matplotlib colormap, optional, default=None The color map to use. ncontours : int, optional, default=100 Number of contour levels. vmin : float, optional, default=None Lowest z-value to be plotted. vmax : float, optional, default=None Highest z-value to be plotted. levels : iterable of float, optional, default=None Contour levels to plot. cbar : boolean, optional, default=True Plot a color bar. cax : matplotlib.Axes object, optional, default=None Plot the colorbar into a custom axes object instead of stealing space from ax. cbar_label : str, optional, default='sample density' Colorbar label string; use None to suppress it. cbar_orientation : str, optional, default='vertical' Colorbar orientation; choose 'vertical' or 'horizontal'. logscale : boolean, optional, default=False Plot the z-values in logscale. nbins : int, optional, default=100 Number of histogram bins used in each dimension. weights : ndarray(T), optional, default=None Sample weights; by default all samples have the same weight. avoid_zero_count : bool, optional, default=True Avoid zero counts by lifting all histogram elements to the minimum value before computing the free energy. If False, zero histogram counts would yield infinity in the free energy. Optional parameters for contourf (**kwargs) corner_mask : boolean, optional Enable/disable corner masking, which only has an effect if z is a masked array. If False, any quad touching a masked point is masked out. If True, only the triangular corners of quads nearest those points are always masked out, other triangular corners comprising three unmasked points are contoured as usual. Defaults to rcParams['contour.corner_mask'], which defaults to True. alpha : float The alpha blending value. locator : [ None | ticker.Locator subclass ] If locator is None, the default MaxNLocator is used. The locator is used to determine the contour levels if they are not given explicitly via the levels argument. extend : [ ‘neither’ | ‘both’ | ‘min’ | ‘max’ ] Unless this is ‘neither’, contour levels are automatically added to one or both ends of the range so that all data are included. These added ranges are then mapped to the special colormap values which default to the ends of the colormap range, but can be set via matplotlib.colors.Colormap.set_under() and matplotlib.colors.Colormap.set_over() methods. xunits, yunits : [ None | registered units ] Override axis units by specifying an instance of a matplotlib.units.ConversionInterface. antialiased : boolean, optional Enable antialiasing, overriding the defaults. For filled contours, the default is True. For line contours, it is taken from rcParams[‘lines.antialiased’]. nchunk : [ 0 | integer ] If 0, no subdivision of the domain. Specify a positive integer to divide the domain into subdomains of nchunk by nchunk quads. Chunking reduces the maximum length of polygons generated by the contouring algorithm which reduces the rendering workload passed on to the backend and also requires slightly less RAM. It can however introduce rendering artifacts at chunk boundaries depending on the backend, the antialiased flag and value of alpha. hatches : A list of cross hatch patterns to use on the filled areas. If None, no hatching will be added to the contour. Hatching is supported in the PostScript, PDF, SVG and Agg backends only. zorder : float Set the zorder for the artist. Artists with lower zorder values are drawn first. Returns ------- fig : matplotlib.Figure object The figure in which the used ax resides. ax : matplotlib.Axes object The ax in which the map was plotted. misc : dict Contains a matplotlib.contour.QuadContourSet 'mappable' and, if requested, a matplotlib.Colorbar object 'cbar'. """
x, y, z = get_histogram( xall, yall, nbins=nbins, weights=weights, avoid_zero_count=avoid_zero_count) pi = _to_density(z) pi = _np.ma.masked_where(pi <= 0, pi) if logscale: from matplotlib.colors import LogNorm norm = LogNorm(vmin=vmin, vmax=vmax) if levels is None: levels = _np.logspace( _np.floor(_np.log10(pi.min())), _np.ceil(_np.log10(pi.max())), ncontours + 1) else: norm = None fig, ax, misc = plot_map( x, y, pi, ax=ax, cmap=cmap, ncontours=ncontours, vmin=vmin, vmax=vmax, levels=levels, cbar=cbar, cax=cax, cbar_label=cbar_label, cbar_orientation=cbar_orientation, norm=norm, **kwargs) if cbar and logscale: from matplotlib.ticker import LogLocator misc['cbar'].set_ticks(LogLocator(base=10.0, subs=range(10))) return fig, ax, misc
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plot_free_energy( xall, yall, weights=None, ax=None, nbins=100, ncontours=100, offset=-1, avoid_zero_count=False, minener_zero=True, kT=1.0, vmin=None, vmax=None, cmap='nipy_spectral', cbar=True, cbar_label='free energy / kT', cax=None, levels=None, legacy=True, ncountours=None, cbar_orientation='vertical', **kwargs): """Plot a two-dimensional free energy map using a histogram of scattered data. Parameters xall : ndarray(T) Sample x-coordinates. yall : ndarray(T) Sample y-coordinates. weights : ndarray(T), optional, default=None Sample weights; by default all samples have the same weight. ax : matplotlib.Axes object, optional, default=None The ax to plot to; if ax=None, a new ax (and fig) is created. Number of contour levels. nbins : int, optional, default=100 Number of histogram bins used in each dimension. ncontours : int, optional, default=100 Number of contour levels. offset : float, optional, default=-1 Deprecated and ineffective; raises a ValueError outside legacy mode. avoid_zero_count : bool, optional, default=False Avoid zero counts by lifting all histogram elements to the minimum value before computing the free energy. If False, zero histogram counts would yield infinity in the free energy. minener_zero : boolean, optional, default=True Shifts the energy minimum to zero. kT : float, optional, default=1.0 The value of kT in the desired energy unit. By default, energies are computed in kT (setting 1.0). If you want to measure the energy in kJ/mol at 298 K, use kT=2.479 and change the cbar_label accordingly. vmin : float, optional, default=None Lowest free energy value to be plotted. (default=0.0 in legacy mode) vmax : float, optional, default=None Highest free energy value to be plotted. cmap : matplotlib colormap, optional, default='nipy_spectral' The color map to use. cbar : boolean, optional, default=True Plot a color bar. cbar_label : str, optional, default='free energy / kT' Colorbar label string; use None to suppress it. cax : matplotlib.Axes object, optional, default=None Plot the colorbar into a custom axes object instead of stealing space from ax. levels : iterable of float, optional, default=None Contour levels to plot. legacy : boolean, optional, default=True Switch to use the function in legacy mode (deprecated). ncountours : int, optional, default=None Legacy parameter (typo) for number of contour levels. cbar_orientation : str, optional, default='vertical' Colorbar orientation; choose 'vertical' or 'horizontal'. Optional parameters for contourf (**kwargs) corner_mask : boolean, optional Enable/disable corner masking, which only has an effect if z is a masked array. If False, any quad touching a masked point is masked out. If True, only the triangular corners of quads nearest those points are always masked out, other triangular corners comprising three unmasked points are contoured as usual. Defaults to rcParams['contour.corner_mask'], which defaults to True. alpha : float The alpha blending value. locator : [ None | ticker.Locator subclass ] If locator is None, the default MaxNLocator is used. The locator is used to determine the contour levels if they are not given explicitly via the levels argument. extend : [ ‘neither’ | ‘both’ | ‘min’ | ‘max’ ] Unless this is ‘neither’, contour levels are automatically added to one or both ends of the range so that all data are included. These added ranges are then mapped to the special colormap values which default to the ends of the colormap range, but can be set via matplotlib.colors.Colormap.set_under() and matplotlib.colors.Colormap.set_over() methods. xunits, yunits : [ None | registered units ] Override axis units by specifying an instance of a matplotlib.units.ConversionInterface. antialiased : boolean, optional Enable antialiasing, overriding the defaults. For filled contours, the default is True. For line contours, it is taken from rcParams[‘lines.antialiased’]. nchunk : [ 0 | integer ] If 0, no subdivision of the domain. Specify a positive integer to divide the domain into subdomains of nchunk by nchunk quads. Chunking reduces the maximum length of polygons generated by the contouring algorithm which reduces the rendering workload passed on to the backend and also requires slightly less RAM. It can however introduce rendering artifacts at chunk boundaries depending on the backend, the antialiased flag and value of alpha. hatches : A list of cross hatch patterns to use on the filled areas. If None, no hatching will be added to the contour. Hatching is supported in the PostScript, PDF, SVG and Agg backends only. zorder : float Set the zorder for the artist. Artists with lower zorder values are drawn first. Returns ------- fig : matplotlib.Figure object The figure in which the used ax resides. ax : matplotlib.Axes object The ax in which the map was plotted. misc : dict Contains a matplotlib.contour.QuadContourSet 'mappable' and, if requested, a matplotlib.Colorbar object 'cbar'. """
if legacy: _warn( 'Legacy mode is deprecated is will be removed in the' ' next major release. Until then use legacy=False', DeprecationWarning) cmap = _get_cmap(cmap) if offset != -1: _warn( 'Parameter offset is deprecated and will be ignored', DeprecationWarning) if ncountours is not None: _warn( 'Parameter ncountours is deprecated;' ' use ncontours instead', DeprecationWarning) ncontours = ncountours if vmin is None: vmin = 0.0 else: if offset != -1: raise ValueError( 'Parameter offset is not allowed outside legacy mode') if ncountours is not None: raise ValueError( 'Parameter ncountours is not allowed outside' ' legacy mode; use ncontours instead') x, y, z = get_histogram( xall, yall, nbins=nbins, weights=weights, avoid_zero_count=avoid_zero_count) f = _to_free_energy(z, minener_zero=minener_zero) * kT fig, ax, misc = plot_map( x, y, f, ax=ax, cmap=cmap, ncontours=ncontours, vmin=vmin, vmax=vmax, levels=levels, cbar=cbar, cax=cax, cbar_label=cbar_label, cbar_orientation=cbar_orientation, norm=None, **kwargs) if legacy: return fig, ax return fig, ax, misc