code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
n_f = self.partial_transform(traj).shape[1] zippy=zip(itertools.repeat("N/A", n_f), itertools.repeat("N/A", n_f), itertools.repeat("N/A", n_f), itertools.repeat(("N/A","N/A","N/A","N/A"), n_f)) return dict_maker(zippy)
def describe_features(self, traj)
Generic method for describing features. Parameters ---------- traj : mdtraj.Trajectory Trajectory to use Returns ------- feature_descs : list of dict Dictionary describing each feature with the following information about the atoms pa...
4.524621
4.386999
1.03137
traj.superpose(self.reference_traj, atom_indices=self.superpose_atom_indices) diff2 = (traj.xyz[:, self.atom_indices] - self.reference_traj.xyz[0, self.atom_indices]) ** 2 x = np.sqrt(np.sum(diff2, axis=2)) return x
def partial_transform(self, traj)
Featurize an MD trajectory into a vector space via distance after superposition Parameters ---------- traj : mdtraj.Trajectory A molecular dynamics trajectory to featurize. Returns ------- features : np.ndarray, dtype=float, shape=(n_samples, n_featu...
4.119519
4.471361
0.921312
if self.atom_indices is not None: sliced_traj = traj.atom_slice(self.atom_indices) else: sliced_traj = traj result = libdistance.cdist( sliced_traj, self.sliced_reference_traj, 'rmsd' ) return self._transform(result)
def partial_transform(self, traj)
Featurize an MD trajectory into a vector space via distance after superposition Parameters ---------- traj : mdtraj.Trajectory A molecular dynamics trajectory to featurize. Returns ------- features : np.ndarray, shape=(n_frames, n_ref_frames) ...
4.755688
5.294437
0.898242
feature_descs = [] # fill in the atom indices using just the first frame self.partial_transform(traj[0]) top = traj.topology aind_tuples = [self.atom_indices for _ in range(self.sliced_reference_traj.n_frames)] zippy = zippy_maker(aind_tuples, top) zipp...
def describe_features(self, traj)
Return a list of dictionaries describing the LandmarkRMSD features. Parameters ---------- traj : mdtraj.Trajectory The trajectory to describe Returns ------- feature_descs : list of dict Dictionary describing each feature with the following infor...
12.608794
11.000287
1.146224
d = md.geometry.compute_distances(traj, self.pair_indices, periodic=self.periodic) return d ** self.exponent
def partial_transform(self, traj)
Featurize an MD trajectory into a vector space via pairwise atom-atom distances Parameters ---------- traj : mdtraj.Trajectory A molecular dynamics trajectory to featurize. Returns ------- features : np.ndarray, dtype=float, shape=(n_samples, n_featu...
8.844834
12.62221
0.700736
feature_descs = [] top = traj.topology residue_indices = [[top.atom(i[0]).residue.index, top.atom(i[1]).residue.index] \ for i in self.atom_indices] aind = [] resseqs = [] resnames = [] for ind,resid_ids in enumerate(residue_i...
def describe_features(self, traj)
Return a list of dictionaries describing the atom pair features. Parameters ---------- traj : mdtraj.Trajectory The trajectory to describe Returns ------- feature_descs : list of dict Dictionary describing each feature with the following informat...
4.599507
3.796385
1.211549
feature_descs = [] for dihed_type in self.types: # TODO: Don't recompute dihedrals, just get the indices func = getattr(md, 'compute_%s' % dihed_type) # ainds is a list of four-tuples of atoms participating # in each dihedral aind_tup...
def describe_features(self, traj)
Return a list of dictionaries describing the dihderal features. Parameters ---------- traj : mdtraj.Trajectory The trajectory to describe Returns ------- feature_descs : list of dict Dictionary describing each feature with the following informati...
6.474621
5.366882
1.206403
x = [] for a in self.types: func = getattr(md, 'compute_%s' % a) _, y = func(traj) if self.sincos: x.extend([np.sin(y), np.cos(y)]) else: x.append(y) return np.hstack(x)
def partial_transform(self, traj)
Featurize an MD trajectory into a vector space via calculation of dihedral (torsion) angles Parameters ---------- traj : mdtraj.Trajectory A molecular dynamics trajectory to featurize. Returns ------- features : np.ndarray, dtype=float, shape=(n_samp...
4.489706
5.531665
0.811637
feature_descs = [] for dihed_type in self.types: # TODO: Don't recompute dihedrals, just get the indices func = getattr(md, 'compute_%s' % dihed_type) # ainds is a list of four-tuples of atoms participating # in each dihedral aind_tupl...
def describe_features(self, traj)
Return a list of dictionaries describing the dihderal features. Parameters ---------- traj : mdtraj.Trajectory The trajectory to describe Returns ------- feature_descs : list of dict Dictionary describing each feature with the following informati...
4.835691
4.215271
1.147184
x = [] for a in self.types: func = getattr(md, 'compute_%s' % a) _, y = func(traj) res = vm.pdf(y[..., np.newaxis], loc=self.loc, kappa=self.kappa) #we reshape the results using a Fortran-like index order, #so...
def partial_transform(self, traj)
Featurize an MD trajectory into a vector space via calculation of soft-bins over dihdral angle space. Parameters ---------- traj : mdtraj.Trajectory A molecular dynamics trajectory to featurize. Returns ------- features : np.ndarray, dtype=float, sha...
10.44646
11.608897
0.899867
ca = [a.index for a in traj.top.atoms if a.name == 'CA'] if len(ca) < 4: return np.zeros((len(traj), 0), dtype=np.float32) alpha_indices = np.array( [(ca[i - 1], ca[i], ca[i + 1], ca[i + 2]) for i in range(1, len(ca) - 2)]) result = md.compute_dihedrals...
def partial_transform(self, traj)
Featurize an MD trajectory into a vector space via calculation of dihedral (torsion) angles of alpha carbon backbone Parameters ---------- traj : mdtraj.Trajectory A molecular dynamics trajectory to featurize. Returns ------- features : np.ndarray, d...
3.100617
2.899705
1.069287
feature_descs = [] # fill in the atom indices using just the first frame self.partial_transform(traj[0]) top = traj.topology if self.atom_indices is None: raise ValueError("Cannot describe features for " "trajectories with " ...
def describe_features(self, traj)
Return a list of dictionaries describing the dihderal features. Parameters ---------- traj : mdtraj.Trajectory The trajectory to describe Returns ------- feature_descs : list of dict Dictionary describing each feature with the following informati...
9.269378
7.721204
1.200509
feature_descs = [] _, mapping = md.geometry.sasa.shrake_rupley(traj, mode=self.mode, get_mapping=True) top = traj.topology if self.mode == "residue": resids = np.unique(mapping) resseqs = [top.residue(ri).resSeq for ri in resids] resnames = ...
def describe_features(self, traj)
Return a list of dictionaries describing the SASA features. Parameters ---------- traj : mdtraj.Trajectory The trajectory to describe Returns ------- feature_descs : list of dict Dictionary describing each feature with the following information ...
3.450904
3.055352
1.129462
if self.soft_min: distances, _ = md.compute_contacts(traj, self.contacts, self.scheme, self.ignore_nonprotein, soft_min=self.soft_min, soft_min_beta=s...
def partial_transform(self, traj)
Featurize an MD trajectory into a vector space derived from residue-residue distances Parameters ---------- traj : mdtraj.Trajectory A molecular dynamics trajectory to featurize. Returns ------- features : np.ndarray, dtype=float, shape=(n_samples, n...
3.299571
3.680676
0.896458
feature_descs = [] # fill in the atom indices using just the first frame if self.soft_min: distances, residue_indices = md.compute_contacts(traj[0], self.contacts, self.scheme, ...
def describe_features(self, traj)
Return a list of dictionaries describing the contacts features. Parameters ---------- traj : mdtraj.Trajectory The trajectory to describe Returns ------- feature_descs : list of dict Dictionary describing each feature with the following informati...
3.376592
3.1327
1.077854
# The result vector fingerprints = np.zeros((traj.n_frames, self.n_features)) atom_pairs = np.zeros((len(self.solvent_indices), 2)) sigma = self.sigma for i, solute_i in enumerate(self.solute_indices): # For each solute atom, calculate distance to all solven...
def partial_transform(self, traj)
Featurize an MD trajectory into a vector space via calculation of solvent fingerprints Parameters ---------- traj : mdtraj.Trajectory A molecular dynamics trajectory to featurize. Returns ------- features : np.ndarray, dtype=float, shape=(n_samples, ...
3.737714
3.838824
0.973661
# Optionally take only certain atoms if self.atom_indices is not None: p_traj = traj.atom_slice(self.atom_indices) else: p_traj = traj # Optionally superpose to a reference trajectory. if self.ref_traj is not None: p_traj.superpose(se...
def partial_transform(self, traj)
Featurize an MD trajectory into a vector space with the raw cartesian coordinates. Parameters ---------- traj : mdtraj.Trajectory A molecular dynamics trajectory to featurize. Returns ------- features : np.ndarray, dtype=float, shape=(n_samples, n_fe...
3.655636
4.247855
0.860584
if self.index is not None: return traj[:, self.index] else: return traj[:, :self.first]
def partial_transform(self, traj)
Slice a single input array along to select a subset of features. Parameters ---------- traj : np.ndarray, shape=(n_samples, n_features) A sample to slice. Returns ------- sliced_traj : np.ndarray shape=(n_samples, n_feature_subset) Slice of traj
5.570136
8.107821
0.687008
MultiSequenceClusterMixin.fit(self, sequences) self.distances_ = self._split(self.distances_) return self
def fit(self, sequences, y=None)
Fit the kcenters clustering on the data Parameters ---------- sequences : list of array-like, each of shape [sequence_length, n_features] A list of multivariate timeseries, or ``md.Trajectory``. Each sequence may have a different length, but they all must have the ...
10.580265
20.947176
0.505093
if isinstance(param_grid, dict): param_grid = ParameterGrid(param_grid) elif not isinstance(param_grid, ParameterGrid): raise ValueError("param_grid must be a dict or ParamaterGrid instance") # iterable with (model, sequence) as items iter_args = ((clone(model).set_params(**params...
def param_sweep(model, sequences, param_grid, n_jobs=1, verbose=0)
Fit a series of models over a range of parameters. Parameters ---------- model : msmbuilder.BaseEstimator An *instance* of an estimator to be used to fit data. sequences : list of array-like List of sequences, or a single sequence. Each sequence should be a 1D iterable o...
2.983254
3.249949
0.917939
if mode == 'r' and fmt is None: fmt = _guess_format(path) elif mode in 'wa' and fmt is None: raise ValueError('mode="%s", but no fmt. fmt=%s' % (mode, fmt)) if fmt == 'dir-npy': return NumpyDirDataset(path, mode=mode, verbose=verbose) elif fmt == 'mdtraj': return M...
def dataset(path, mode='r', fmt=None, verbose=False, **kwargs)
Open a dataset object MSMBuilder supports several dataset 'formats' for storing lists of sequences on disk. This function can also be used as a context manager. Parameters ---------- path : str The path to the dataset on the filesystem mode : {'r', 'w', 'a'} Open a dataset...
3.737693
3.246167
1.151417
if os.path.isdir(path): return 'dir-npy' if path.endswith('.h5') or path.endswith('.hdf5'): # TODO: Check for mdtraj .h5 file return 'hdf5' # TODO: What about a list of trajectories, e.g. from command line nargs='+' return 'mdtraj'
def _guess_format(path)
Guess the format of a dataset based on its filename / filenames.
7.497886
6.837175
1.096635
r = [] for c in string: if c.isdigit(): if r and isinstance(r[-1], int): r[-1] = r[-1] * 10 + int(c) else: r.append(int(c)) else: r.append(9 + ord(c)) return r
def _keynat(string)
A natural sort helper function for sort() and sorted() without using regular expression.
2.302766
2.290545
1.005335
if isinstance(out_ds, str): out_ds = self.create_derived(out_ds, fmt=fmt) elif isinstance(out_ds, _BaseDataset): err = "Dataset must be opened in write mode." assert out_ds.mode in ('w', 'a'), err else: err = "Please specify a dataset path...
def transform_with(self, estimator, out_ds, fmt=None)
Call the partial_transform method of the estimator on this dataset Parameters ---------- estimator : object with ``partial_fit`` method This object will be used to transform this dataset into a new dataset. The estimator should be fitted prior to calling this...
3.219316
3.186921
1.010165
self.fit_with(estimator) return self.transform_with(estimator, out_ds, fmt=fmt)
def fit_transform_with(self, estimator, out_ds, fmt=None)
Create a new dataset with the given estimator. The estimator will be fit by this dataset, and then each trajectory will be transformed by the estimator. Parameters ---------- estimator : BaseEstimator This object will be fit and used to transform this dataset ...
2.77322
5.707515
0.485889
if self.max_landmarks is not None: if self.n_clusters > self.n_landmarks: self.n_landmarks = self.max_landmarks if self.n_landmarks is None: distances = pdist(X, self.metric) tree = linkage(distances, method=self.linkage) ...
def fit(self, X, y=None)
Compute agglomerative clustering. Parameters ---------- X : array-like, shape=(n_samples, n_features) Returns ------- self
1.980719
2.004216
0.988276
dists = cdist(X, self.landmarks_, self.metric) pfunc_name = self.ward_predictor if self.linkage == 'ward' else self.linkage try: pooling_func = POOLING_FUNCTIONS[pfunc_name] except KeyError: raise ValueError("linkage {} is not supported".format(pfun...
def predict(self, X)
Predict the closest cluster each sample in X belongs to. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] New data to predict. Returns ------- labels : array, shape [n_samples,] Index of the cluster each samp...
3.621857
3.657675
0.990207
return_vect = np.zeros(mdl1.n_states_) for i in range(mdl1.n_states_): try: #there has to be a better way to do this mdl1_unmapped = mdl1.inverse_transform([i])[0][0] mdl2_mapped = mdl2.mapping_[mdl1_unmapped] return_vect[i] = mdl2.populations_[mdl2_...
def _mapped_populations(mdl1, mdl2)
Method to get the populations for states in mdl 1 from populations inferred in mdl 2. Resorts to 0 if population is not present.
3.362734
3.350452
1.003666
net_flux = copy.copy(net_flux) bottleneck_ind = net_flux[path[:-1], path[1:]].argmin() net_flux[path[bottleneck_ind], path[bottleneck_ind + 1]] = 0.0 return net_flux
def _remove_bottleneck(net_flux, path)
Internal function for modifying the net flux matrix by removing a particular edge, corresponding to the bottleneck of a particular path.
3.19778
2.754642
1.160869
net_flux = copy.copy(net_flux) net_flux[path[:-1], path[1:]] -= net_flux[path[:-1], path[1:]].min() # The above *should* make the bottleneck have zero flux, but # numerically that may not be the case, so just set it to zero # to be sure. bottleneck_ind = net_flux[path[:-1], path[1:]].arg...
def _subtract_path_flux(net_flux, path)
Internal function for modifying the net flux matrix by subtracting a path's flux from every edge in the path.
3.590731
3.450504
1.04064
p = len(S) assert S.shape == (p, p) alpha = (n-2)/(n*(n+2)) beta = ((p+1)*n - 2) / (n*(n+2)) trace_S2 = np.sum(S*S) # np.trace(S.dot(S)) U = ((p * trace_S2 / np.trace(S)**2) - 1) rho = min(alpha + beta/U, 1) F = (np.trace(S) / p) * np.eye(p) return (1-rho)*S + rho*F, rho
def rao_blackwell_ledoit_wolf(S, n)
Rao-Blackwellized Ledoit-Wolf shrinkaged estimator of the covariance matrix. Parameters ---------- S : array, shape=(n, n) Sample covariance matrix (e.g. estimated with np.cov(X.T)) n : int Number of data points. Returns ------- sigma : array, shape=(n, n) shrinkage...
4.764742
5.327794
0.894318
self._initialized = False check_iter_of_sequences(sequences, max_iter=3) # we might be lazy-loading for X in sequences: self._fit(X) if self.n_sequences_ == 0: raise ValueError('All sequences were shorter than ' 'the lag tim...
def fit(self, sequences, y=None)
Fit the model with a collection of sequences. This method is not online. Any state accumulated from previous calls to fit() or partial_fit() will be cleared. For online learning, use `partial_fit`. Parameters ---------- sequences: list of array-like, each of shape (n_s...
8.946213
10.825713
0.826386
check_iter_of_sequences(sequences, max_iter=3) # we might be lazy-loading sequences_new = [] for X in sequences: X = array2d(X) if self.means_ is not None: X = X - self.means_ X_transformed = np.dot(X, self.components_.T) ...
def transform(self, sequences)
Apply the dimensionality reduction on X. Parameters ---------- sequences: list of array-like, each of shape (n_samples_i, n_features) Training data, where n_samples_i in the number of samples in sequence i and n_features is the number of features. Returns ...
9.223295
9.525697
0.968254
# force shrinkage to be calculated self.covariance_ return .format(n_components=self.n_components, lag_time=self.lag_time, shrinkage=self.shrinkage_, kinetic_mapping=self.kinetic_mapping, timescales=self.timescales_[:5], eigenvalues=self.eigenvalues_[:5])
def summarize(self)
Some summary information.
8.894125
8.545626
1.040781
if self._sliding_window: return [X[k::self._lag_time] for k in range(self._lag_time) for X in X_all] else: return [X[::self._lag_time] for X in X_all]
def transform(self, X_all, y=None)
Subsample several time series. Parameters ---------- X_all : list(np.ndarray) List of feature time series Returns ------- features : list(np.ndarray), length = len(X_all) The subsampled trajectories.
3.303708
3.127441
1.056362
us, lvs, rvs = self._get_eigensystem() # make sure to leave off equilibrium distribution timescales = - self.lag_time / np.log(us[:, 1:]) return timescales
def all_timescales_(self)
Implied relaxation timescales each sample in the ensemble Returns ------- timescales : array-like, shape = (n_samples, n_timescales,) The longest implied relaxation timescales of the each sample in the ensemble of transition matrices, expressed in units of ti...
23.120064
20.610544
1.121759
last_hash = None last_hash_count = 0 arr = yield for i in xrange(maxiter): arr = yield i if arr is not None: hsh = hashlib.sha1(arr.view(np.uint8)).hexdigest() if last_hash == hsh: last_hash_count += 1 else: la...
def iterate_tracker(maxiter, max_nc, verbose=False)
Generator that breaks after maxiter, or after the same array has been sent in more max_nc times in a row.
3.455852
3.309603
1.044189
assert self._initialized V = self.eigenvectors_ # Note: How do we deal with regularization parameters like gamma # here? I'm not sure. Should C and S be estimated using self's # regularization parameters? m2 = self.__class__(shrinkage=self.shrinkage, n_componen...
def score(self, sequences, y=None)
Score the model on new data using the generalized matrix Rayleigh quotient Parameters ---------- sequences : list of array, each of shape (n_samples_i, n_features) Test data. A list of sequences in afeature space, each of which is a 2D array of possibily different length...
5.296618
4.94315
1.071507
super(PCCA, self).fit(sequences, y=y) self._do_lumping() return self
def fit(self, sequences, y=None)
Fit a PCCA lumping model using a sequence of cluster assignments. Parameters ---------- sequences : list(np.ndarray(dtype='int')) List of arrays of cluster assignments y : None Unused, present for sklearn compatibility only. Returns ------- ...
10.40243
7.308234
1.423385
# Extract non-perron eigenvectors right_eigenvectors = self.right_eigenvectors_[:, 1:] assert self.n_states_ > 0 microstate_mapping = np.zeros(self.n_states_, dtype=int) def spread(x): return x.max() - x.min() for i in range(self.n_macrostates - 1...
def _do_lumping(self)
Do the PCCA lumping. Notes ------- 1. Iterate over the eigenvectors, starting with the slowest. 2. Calculate the spread of that eigenvector within each existing macrostate. 3. Pick the macrostate with the largest eigenvector spread. 4. Split the macrostat...
4.352676
3.579731
1.215922
params = msm.get_params() lumper = cls(n_macrostates=n_macrostates, objective_function=objective_function, **params) lumper.transmat_ = msm.transmat_ lumper.populations_ = msm.populations_ lumper.mapping_ = msm.mapping_ lumper.countsmat_ = m...
def from_msm(cls, msm, n_macrostates, objective_function=None)
Create and fit lumped model from pre-existing MSM. Parameters ---------- msm : MarkovStateModel The input microstate msm to use. n_macrostates : int The number of macrostates Returns ------- lumper : cls The fit PCCA(+) object...
2.73164
2.841434
0.96136
num_micro, num_eigen = right_eigenvectors.shape A, chi, mapping = calculate_fuzzy_chi(alpha, square_map, right_eigenvectors) # If current point is infeasible or leads to degenerate lumping. if (len(np.unique(mapping)) != right_eigenvectors.shape[1] or ...
def metastability(alpha, T, right_eigenvectors, square_map, pi)
Return the metastability PCCA+ objective function. Parameters ---------- alpha : ndarray Parameters of objective function (e.g. flattened A) T : csr sparse matrix Transition matrix right_eigenvectors : ndarray The right eigenvectors. square_map : ndarray Mapping ...
7.069219
7.248174
0.97531
A, chi, mapping = calculate_fuzzy_chi(alpha, square_map, right_eigenvectors) # If current point is infeasible or leads to degenerate lumping. if (len(np.unique(mapping)) != right_eigenvectors.shape[1] or has_constraint_violation(A, right_eigenvect...
def crispness(alpha, T, right_eigenvectors, square_map, pi)
Return the crispness PCCA+ objective function. Parameters ---------- alpha : ndarray Parameters of objective function (e.g. flattened A) T : csr sparse matrix Transition matrix right_eigenvectors : ndarray The right eigenvectors. square_map : ndarray Mapping from...
8.769701
9.164303
0.956941
N = A.shape[0] flat_map = [] for i in range(1, N): for j in range(1, N): flat_map.append([i, j]) flat_map = np.array(flat_map) square_map = np.zeros(A.shape, 'int') for k in range((N - 1) ** 2): i, j = flat_map[k] square_map[i, j] = k return flat...
def get_maps(A)
Get mappings from the square array A to the flat vector of parameters alpha. Helper function for PCCA+ optimization. Parameters ---------- A : ndarray The transformation matrix A. Returns ------- flat_map : ndarray Mapping from flat indices (k) to square (i,j) indices....
2.493094
2.155917
1.156396
lhs = 1 - A[0, 1:].sum() rhs = dot(right_eigenvectors[:, 1:], A[1:, 0]) rhs = -1 * rhs.min() if abs(lhs - rhs) > epsilon: return True else: return False
def has_constraint_violation(A, right_eigenvectors, epsilon=1E-8)
Check for constraint violations in transformation matrix. Parameters ---------- A : ndarray The transformation matrix. right_eigenvectors : ndarray The right eigenvectors. epsilon : float, optional Tolerance of constraint violation. Returns ------- truth : bool ...
3.997733
4.294612
0.930872
num_micro, num_eigen = right_eigenvectors.shape index = np.zeros(num_eigen, 'int') # first vertex: row with largest norm index[0] = np.argmax( [norm(right_eigenvectors[i]) for i in range(num_micro)]) ortho_sys = right_eigenvectors - np.outer(np.ones(num_micro), ...
def index_search(right_eigenvectors)
Find simplex structure in eigenvectors to begin PCCA+. Parameters ---------- right_eigenvectors : ndarray Right eigenvectors of transition matrix Returns ------- index : ndarray Indices of simplex
3.197362
3.246283
0.98493
num_micro, num_eigen = right_eigenvectors.shape A = A.copy() # compute 1st column of A by row sum condition A[1:, 0] = -1 * A[1:, 1:].sum(1) # compute 1st row of A by maximum condition A[0] = -1 * dot(right_eigenvectors[:, 1:].real, A[1:]).min(0) # rescale A to be in the feasible se...
def fill_A(A, right_eigenvectors)
Construct feasible initial guess for transformation matrix A. Parameters ---------- A : ndarray Possibly non-feasible transformation matrix. right_eigenvectors : ndarray Right eigenvectors of transition matrix Returns ------- A : ndarray Feasible transformation ma...
5.217433
5.523253
0.94463
# Convert parameter vector into matrix A A = to_square(alpha, square_map) # Make A feasible. A = fill_A(A, right_eigenvectors) # Calculate the fuzzy membership matrix. chi_fuzzy = np.dot(right_eigenvectors, A) # Calculate the microstate mapping. mapping = np.argmax(chi_fuzzy, 1) ...
def calculate_fuzzy_chi(alpha, square_map, right_eigenvectors)
Calculate the membership matrix (chi) from parameters alpha. Parameters ---------- alpha : ndarray Parameters of objective function (e.g. flattened A) square_map : ndarray Mapping from square indices (i,j) to flat indices (k). right_eigenvectors : ndarray The right eigenvect...
5.440626
3.635806
1.496402
right_eigenvectors = self.right_eigenvectors_[:, :self.n_macrostates] index = index_search(right_eigenvectors) # compute transformation matrix A as initial guess for local # optimization (maybe not feasible) A = right_eigenvectors[index, :] A = inv(A) A...
def _do_lumping(self)
Perform PCCA+ algorithm by optimizing transformation matrix A. Creates the following member variables: ------- A : ndarray The transformation matrix. chi : ndarray The membership matrix microstate_mapping : ndarray Mapping from microstates to ...
5.591986
4.613251
1.212157
right_eigenvectors = self.right_eigenvectors_[:, :self.n_macrostates] flat_map, square_map = get_maps(A) alpha = to_flat(1.0 * A, flat_map) def obj(x): return -1 * self._objective_function( x, self.transmat_, right_eigenvectors, square_map, ...
def _optimize_A(self, A)
Find optimal transformation matrix A by minimization. Parameters ---------- A : ndarray The transformation matrix A. Returns ------- A : ndarray The transformation matrix.
5.091238
5.065453
1.00509
S = np.zeros((n, n)) pi = np.exp(theta[-n:]) pi = pi / pi.sum() _ratematrix.build_ratemat(theta, n, S, which='S') u, lv, rv = map(np.asarray, _ratematrix.eig_K(S, n, pi, 'S')) order = np.argsort(-u) u = u[order[:k]] lv = lv[:, order[:k]] rv = rv[:, order[:k]] return _norma...
def _solve_ratemat_eigensystem(theta, k, n)
Find the dominant eigenpairs of a reversible rate matrix (master equation) Parameters ---------- theta : ndarray, shape=(n_params,) The free parameters of the rate matrix k : int The number of eigenpairs to find n : int The number of states Notes ----- Norma...
4.907184
5.116841
0.959026
u, lv, rv = scipy.linalg.eig(transmat, left=True, right=True) order = np.argsort(-np.real(u)) u = np.real_if_close(u[order[:k]]) lv = np.real_if_close(lv[:, order[:k]]) rv = np.real_if_close(rv[:, order[:k]]) return _normalize_eigensystem(u, lv, rv)
def _solve_msm_eigensystem(transmat, k)
Find the dominant eigenpairs of an MSM transition matrix Parameters ---------- transmat : np.ndarray, shape=(n_states, n_states) The transition matrix k : int The number of eigenpairs to find. Notes ----- Normalize the left (:math:`\phi`) and right (:math:``\psi``) eigenfun...
2.326972
2.361149
0.985526
# first normalize the stationary distribution separately lv[:, 0] = lv[:, 0] / np.sum(lv[:, 0]) for i in range(1, lv.shape[1]): # the remaining left eigenvectors to satisfy # <\phi_i, \phi_i>_{\mu^{-1}} = 1 lv[:, i] = lv[:, i] / np.sqrt(np.dot(lv[:, i], lv[:, i] / lv[:, 0])) ...
def _normalize_eigensystem(u, lv, rv)
Normalize the eigenvectors of a reversible Markov state model according to our preferred scheme.
3.562584
3.3977
1.048528
n_states_input = counts.shape[0] n_components, component_assignments = csgraph.connected_components( csr_matrix(counts >= weight), connection="strong") populations = np.array(counts.sum(0)).flatten() component_pops = np.array([populations[component_assignments == i].sum() for ...
def _strongly_connected_subgraph(counts, weight=1, verbose=True)
Trim a transition count matrix down to its maximal strongly ergodic subgraph. From the counts matrix, we define a graph where there exists a directed edge between two nodes, `i` and `j` if `counts[i][j] > weight`. We then find the nodes belonging to the largest strongly connected subgraph of this g...
3.847552
3.705092
1.03845
return {k: dict2.get(v) for k, v in dict1.items() if v in dict2}
def _dict_compose(dict1, dict2)
Example ------- >>> dict1 = {'a': 0, 'b': 1, 'c': 2} >>> dict2 = {0: 'A', 1: 'B'} >>> _dict_compose(dict1, dict2) {'a': 'A', 'b': 'b'}
2.982762
3.333877
0.894683
if mode not in ['clip', 'fill']: raise ValueError('mode must be one of ["clip", "fill"]: %s' % mode) sequence = np.asarray(sequence) if sequence.ndim != 1: raise ValueError("Each sequence must be 1D") f = np.vectorize(lambda k: self.mapping_.get(k, np.na...
def partial_transform(self, sequence, mode='clip')
Transform a sequence to internal indexing Recall that `sequence` can be arbitrary labels, whereas ``transmat_`` and ``countsmat_`` are indexed with integers between 0 and ``n_states - 1``. This methods maps a set of sequences from the labels onto this internal indexing. Paramet...
3.064626
2.876623
1.065355
if mode not in ['clip', 'fill']: raise ValueError('mode must be one of ["clip", "fill"]: %s' % mode) sequences = list_of_1d(sequences) result = [] for y in sequences: if mode == 'fill': result.append(self.partial_transform(y, mode)) ...
def transform(self, sequences, mode='clip')
Transform a list of sequences to internal indexing Recall that `sequences` can be arbitrary labels, whereas ``transmat_`` and ``countsmat_`` are indexed with integers between 0 and ``n_states - 1``. This methods maps a set of sequences from the labels onto this internal indexing. ...
3.265855
4.025589
0.811274
ec_is_str = isinstance(self.ergodic_cutoff, str) if ec_is_str and self.ergodic_cutoff.lower() == 'on': if self.sliding_window: return 1.0 / self.lag_time else: return 1.0 elif ec_is_str and self.ergodic_cutoff.lower() == 'off': ...
def _parse_ergodic_cutoff(self)
Get a numeric value from the ergodic_cutoff input, which can be 'on' or 'off'.
2.653208
2.237772
1.185647
sequences = list_of_1d(sequences) inverse_mapping = {v: k for k, v in self.mapping_.items()} f = np.vectorize(inverse_mapping.get) result = [] for y in sequences: uq = np.unique(y) if not np.all(np.logical_and(0 <= uq, uq < self.n_states_)): ...
def inverse_transform(self, sequences)
Transform a list of sequences from internal indexing into labels Parameters ---------- sequences : list List of sequences, each of which is one-dimensional array of integers in ``0, ..., n_states_ - 1``. Returns ------- sequences : list ...
3.588136
3.687976
0.972928
r random = check_random_state(random_state) r = random.rand(1 + n_steps) if state is None: initial = np.sum(np.cumsum(self.populations_) < r[0]) elif hasattr(state, '__len__') and len(state) == self.n_states_: initial = np.sum(np.cumsum(state) < r[0]) ...
def sample_discrete(self, state=None, n_steps=100, random_state=None)
r"""Generate a random sequence of states by propagating the model using discrete time steps given by the model lagtime. Parameters ---------- state : {None, ndarray, label} Specify the starting state for the chain. ``None`` Choose the initial sta...
3.401778
3.441306
0.988514
if not any([isinstance(seq, collections.Iterable) for seq in sequences]): sequences = [sequences] random = check_random_state(random_state) selected_pairs_by_state = [] for state in range(self.n_states_): all_frames = [np.where(a == ...
def draw_samples(self, sequences, n_samples, random_state=None)
Sample conformations for a sequences of states. Parameters ---------- sequences : list or list of lists A sequence or list of sequences, in which each element corresponds to a state label. n_samples : int How many samples to return for any given state...
3.000938
2.607856
1.15073
model = LandmarkAgglomerative(linkage='ward', n_clusters=self.n_macrostates, metric=self.metric, n_landmarks=self.n_landmarks, landmark_strategy=self.l...
def _do_lumping(self)
Do the MVCA lumping.
4.487997
4.345141
1.032877
params = msm.get_params() lumper = cls(n_macrostates, metric=metric, fit_only=fit_only, n_landmarks=n_landmarks, landmark_strategy=landmark_strategy, random_state=random_state, **params) lumper.transmat_ = msm.transmat_ lumper.populations_ = ms...
def from_msm(cls, msm, n_macrostates, metric=js_metric_array, n_landmarks=None, landmark_strategy='stride', random_state=None, get_linkage=False, fit_only=False)
Create and fit lumped model from pre-existing MSM. Parameters ---------- msm : MarkovStateModel The input microstate msm to use. n_macrostates : int The number of macrostates get_linkage : boolean, default=False Whether to return linkage and e...
2.491796
2.340312
1.064728
''' given a vector x, leave its top-k absolute-value entries alone, and set the rest to 0 ''' not_F = np.argsort(np.abs(x))[:-k] x[not_F] = 0 return x
def _truncate(self, x, k)
given a vector x, leave its top-k absolute-value entries alone, and set the rest to 0
8.112921
3.435658
2.361387
''' given a matrix A, an initial guess x0, and a maximum cardinality k, find the best k-sparse approximation to its dominant eigenvector References ---------- [1] Yuan, X-T. and Zhang, T. "Truncated Power Method for Sparse Eigenvalue Problems." Journal of Machine...
def _truncated_power_method(self, A, x0, k, max_iter=10000, thresh=1e-8)
given a matrix A, an initial guess x0, and a maximum cardinality k, find the best k-sparse approximation to its dominant eigenvector References ---------- [1] Yuan, X-T. and Zhang, T. "Truncated Power Method for Sparse Eigenvalue Problems." Journal of Machine Learning Research. ...
3.806417
1.665096
2.286004
nonzeros = np.sum(np.abs(self.eigenvectors_) > 0, axis=0) active = '[%s]' % ', '.join(['%d/%d' % (n, self.n_features) for n in nonzeros[:n_timescales_to_report]]) return .format(n_components=self.n_components, shrinkage=self.shrinkage_, lag_time=self.lag_time, kinetic_mappin...
def summarize(self, n_timescales_to_report=5)
Some summary information.
3.435649
3.374477
1.018128
labels, inertia = libdistance.assign_nearest( X, self.cluster_centers_, metric=self.metric) return labels
def predict(self, X)
Predict the closest cluster each sample in X belongs to. In the vector quantization literature, `cluster_centers_` is called the code book and each value returned by `predict` is the index of the closest code in the code book. Parameters ---------- X : array-like, shape...
14.765899
18.474138
0.799274
MultiSequenceClusterMixin.fit(self, sequences) self.cluster_center_indices_ = self._split_indices(self.cluster_center_indices_) return self
def fit(self, sequences, y=None)
Fit the kcenters clustering on the data Parameters ---------- sequences : list of array-like, each of shape [sequence_length, n_features] A list of multivariate timeseries, or ``md.Trajectory``. Each sequence may have a different length, but they all must have the ...
6.911815
10.889477
0.634724
# X = check_array(X) t0 = time.time() self.X = X self._run() t1 = time.time() # print("APM clustering Time Cost:", t1 - t0) return self
def fit(self, X, y=None)
Perform clustering. Parameters ----------- X : array-like, shape=[n_samples, n_features] Samples to cluster.
5.942219
6.567494
0.904792
# print("Doing APM Clustering...") # Start looping for maxIter times n_macrostates = 1 # initialized as 1 because no macrostate exist in loop 0 metaQ = -1.0 prevQ = -1.0 global_maxQ = -1.0 local_maxQ = -1.0 for iter in range(self.max_iter): ...
def _run(self)
Do the APM lumping.
4.241953
4.036024
1.051023
if pbar.currval == 0: return 'ETA: --:--:--' elif pbar.finished: return 'Time: %s' % self.format_time(pbar.seconds_elapsed) else: elapsed = pbar.seconds_elapsed currval1, elapsed1 = self._update_samples(pbar.currval, elapsed) ...
def update(self, pbar)
Updates the widget to show the ETA or total time when finished.
3.250688
3.062123
1.06158
if pbar.seconds_elapsed < 2e-6 or pbar.currval < 2e-6: # =~ 0 scaled = power = 0 else: speed = pbar.currval / pbar.seconds_elapsed power = int(math.log(speed, 1000)) scaled = speed / 1000.**power return self.FORMAT % (scaled, self.PREFIX...
def update(self, pbar)
Updates the widget with the current SI prefixed speed.
6.134021
5.130699
1.195553
if keep_atoms is None: keep_atoms = ATOM_NAMES top, bonds = reference_traj.top.to_dataframe() if keep_atoms is not None: atom_indices = top[top.name.isin(keep_atoms) == True].index.values if exclude_atoms is not None: atom_indices = top[top.name.isin(exclude_atoms) == Fal...
def get_atompair_indices(reference_traj, keep_atoms=None, exclude_atoms=None, reject_bonded=True)
Get a list of acceptable atom pairs. Parameters ---------- reference_traj : mdtraj.Trajectory Trajectory to grab atom pairs from keep_atoms : np.ndarray, dtype=string, optional Select only these atom names. Defaults to N, CA, CB, C, O, H exclude_atoms : np.ndarray, dtype=string, opt...
2.646785
2.549942
1.037979
fixed_indices = list(trajs.keys()) trajs = [trajs[k][:, [dimension]] for k in fixed_indices] txx = np.concatenate([traj[:,0] for traj in trajs]) if scheme == "linear": spaced_points = np.linspace(np.min(txx), np.max(txx), n_frames) spaced_points = spaced_points[:, np.newaxis] e...
def sample_dimension(trajs, dimension, n_frames, scheme="linear")
Sample a dimension of the data. This method uses one of 3 schemes. All other dimensions are ignored, so this might result in a really "jumpy" sampled trajectory. Parameters ---------- trajs : dictionary of np.ndarray Dictionary of tica-transformed trajectories, keyed by arbitrary keys. ...
2.554786
2.624496
0.973439
X = array2d(X) self.n_features = X.shape[1] self.n_bins = self.n_bins_per_feature ** self.n_features if self.min is None: min = np.min(X, axis=0) elif isinstance(self.min, numbers.Number): min = self.min * np.ones(self.n_features) else: ...
def fit(self, X, y=None)
Fit the grid Parameters ---------- X : array-like, shape = [n_samples, n_features] Data points Returns ------- self
1.777171
1.884676
0.942959
if np.any(X < self.grid[:, 0]) or np.any(X > self.grid[:, -1]): raise ValueError('data out of min/max bounds') binassign = np.zeros((self.n_features, len(X)), dtype=int) for i in range(self.n_features): binassign[i] = np.digitize(X[:, i], self.grid[i]) - 1 ...
def predict(self, X)
Get the index of the grid cell containing each sample in X Parameters ---------- X : array-like, shape = [n_samples, n_features] New data Returns ------- y : array, shape = [n_samples,] Index of the grid cell containing each sample
3.155688
3.526485
0.894854
return np.concatenate([self._dim_match(traj) / norm for traj, norm in zip(traj_zip, self._norms)], axis=1)
def partial_transform(self, traj_zip)
Featurize an MD trajectory into a vector space. Parameters ---------- traj : mdtraj.Trajectory A molecular dynamics trajectory to featurize. Returns ------- features : np.ndarray, dtype=float, shape=(n_samples, n_features) A featurized trajectory...
7.646486
12.839734
0.595533
lens = [len(trajs) for trajs in trajs_tuple] if len(set(lens)) > 1: err = "Each dataset must be the same length. You gave: {}" err = err.format(lens) raise ValueError(err)
def _check_same_length(self, trajs_tuple)
Check that the datasets are the same length
3.168447
2.774047
1.142175
return [self.partial_transform(traj_zip) for traj_zip in zip(*trajs_tuple)]
def transform(self, trajs_tuple, y=None)
Featurize a several trajectories. Parameters ---------- traj_list : list(mdtraj.Trajectory) Trajectories to be featurized. Returns ------- features : list(np.ndarray), length = len(traj_list) The featurized trajectories. features[i] is the featu...
7.48658
9.87602
0.758056
n_states = np.shape(populations)[0] if sinks is None: # Use Thm 11.16 in [1] limiting_matrix = np.vstack([populations] * n_states) # Fundamental matrix fund_matrix = scipy.linalg.inv(np.eye(n_states) - tprob + limiting_matrix) ...
def _mfpts(tprob, populations, sinks, lag_time)
Gets the Mean First Passage Time (MFPT) for all states to a *set* of sinks. Parameters ---------- tprob : np.ndarray Transition matrix populations : np.ndarray, (n_states,) MSM populations sinks : array_like, int, optional Indices of the sink states. There are two use-ca...
4.642923
4.168859
1.113716
def inner(s): if s == '': return s first, last = os.path.splitext(s) return first + suffix return inner
def exttype(suffix)
Type for use with argument(... type=) that will force a specific suffix Especially for output files, so that we can enforce the use of appropriate file-type specific suffixes
5.289034
6.477236
0.816557
if hasattr(klass, '_init_argspec'): return _shim_argspec(klass._init_argspec()) elif PY2: return _shim_argspec(inspect.getargspec(klass.__init__)) else: return inspect.signature(klass.__init__)
def get_init_argspec(klass)
Wrapper around inspect.getargspec(klass.__init__) which, for cython classes uses an auxiliary '_init_argspec' method, since they don't play nice with the inspect module. By convention, a cython class should define the classmethod _init_argspec that, when called, returns what ``inspect.getargspec`` woul...
3.100283
3.368975
0.920245
assert cls.klass is not None sig = get_init_argspec(cls.klass) doc = numpydoc.docscrape.ClassDoc(cls.klass) # mapping from the name of the argument to the helptext helptext = {d[0]: ' '.join(d[2]) for d in doc['Parameters']} # mapping from the name of the arg...
def _register_arguments(cls, subparser)
this is a special method that gets called to construct the argparse parser. it uses the python inspect module to introspect the __init__ method of `klass`, and add an argument for each parameter. it also uses numpydoc to read the class docstring of klass (which is supposed to be in numpydoc ...
4.271119
4.133193
1.03337
if not os.path.exists(fn): return backnum = 1 backfmt = "{fn}.bak.{backnum}" trial_fn = backfmt.format(fn=fn, backnum=backnum) while os.path.exists(trial_fn): backnum += 1 trial_fn = backfmt.format(fn=fn, backnum=backnum) warnings.warn("{fn} exists. Moving it to {n...
def backup(fn)
If ``fn`` exists, rename it and issue a warning This function will rename an existing filename {fn}.bak.{i} where i is the smallest integer that gives a filename that doesn't exist. This naively uses a while loop to find such a filename, so there shouldn't be too many existing backups or performance wi...
2.632516
2.458804
1.070649
if isinstance(key, tuple): paths = [dfmt.format(k) for k in key[:-1]] paths += [ffmt.format(key[-1])] return os.path.join(*paths) else: return ffmt.format(key)
def default_key_to_path(key, dfmt="{}", ffmt="{}.npy")
Turn an arbitrary python object into a filename This uses string formatting, so make sure your keys map to unique strings. If the key is a tuple, it will join each element of the tuple with '/', resulting in a filesystem hierarchy of files.
2.017959
2.179191
0.926013
top_fns = set(meta['top_fn']) tops = {} for tfn in top_fns: tops[tfn] = md.load_topology(tfn) return tops
def preload_tops(meta)
Load all topology files into memory. This might save some performance compared to re-parsing the topology file for each trajectory you try to load in. Typically, you have far fewer (possibly 1) topologies than trajectories Parameters ---------- meta : pd.DataFrame The DataFrame of meta...
3.983028
2.841919
1.401527
top_fns = set(meta['top_fn']) if len(top_fns) != 1: raise ValueError("More than one topology is used in this project!") return md.load_topology(top_fns.pop())
def preload_top(meta)
Load one topology file into memory. This function checks to make sure there's only one topology file in play. When sampling frames, you have to have all the same topology to concatenate. Parameters ---------- meta : pd.DataFrame The DataFrame of metadata with a column named 'top_fn' ...
5.92475
3.842821
1.541771
tops = preload_tops(meta) for i, row in meta.iterrows(): yield i, md.join(md.iterload(row['traj_fn'], top=tops[row['top_fn']], stride=stride), discard_overlapping_frames=False, ...
def itertrajs(meta, stride=1)
Load one mdtraj trajectory at a time and yield it. MDTraj does striding badly. It reads in the whole trajectory and then performs a stride. We join(iterload) to conserve memory.
8.97506
6.679388
1.343695
if pandas_kwargs is None: pandas_kwargs = {} kwargs_with_defaults = { 'classes': ('table', 'table-condensed', 'table-hover'), } kwargs_with_defaults.update(**pandas_kwargs) env = Environment(loader=PackageLoader('msmbuilder', 'io_templates')) templ = env.get_template("twit...
def render_meta(meta, fn="meta.pandas.html", title="Project Metadata - MSMBuilder", pandas_kwargs=None)
Render a metadata dataframe as an html webpage for inspection. Parameters ---------- meta : pd.Dataframe The DataFrame of metadata fn : str Output filename (should end in html) title : str Page title pandas_kwargs : dict Arguments to be passed to pandas
3.354024
3.5604
0.942036
backup(fn) with open(fn, 'wb') as f: pickle.dump(obj, f)
def save_generic(obj, fn)
Save Python objects, including msmbuilder Estimators. This is a convenience wrapper around Python's ``pickle`` serialization scheme. This protocol is backwards-compatible among Python versions, but may not be "forwards-compatible". A file saved with Python 3 won't be able to be opened under Python 2. ...
3.152029
6.550192
0.481212
if key_to_path is None: key_to_path = default_key_to_path validate_keys(meta.index, key_to_path) backup(fn) os.mkdir(fn) for k in meta.index: v = trajs[k] npy_fn = os.path.join(fn, key_to_path(k)) os.makedirs(os.path.dirname(npy_fn), exist_ok=True) np.sa...
def save_trajs(trajs, fn, meta, key_to_path=None)
Save trajectory-like data Data is stored in individual numpy binary files in the directory given by ``fn``. This method will automatically back up existing files named ``fn``. Parameters ---------- trajs : dict of (key, np.ndarray) Dictionary of trajectory-like ndarray's keyed on ``me...
2.530778
2.486887
1.017649
if key_to_path is None: key_to_path = default_key_to_path if isinstance(meta, str): meta = load_meta(meta_fn=meta) trajs = {} for k in meta.index: trajs[k] = np.load(os.path.join(fn, key_to_path(k))) return meta, trajs
def load_trajs(fn, meta='meta.pandas.pickl', key_to_path=None)
Load trajectory-like data Data is expected to be stored as if saved by ``save_trajs``. This method finds trajectories based on the ``meta`` dataframe. If you remove a file (trajectory) from disk, be sure to remove its row from the dataframe. If you remove a row from the dataframe, be aware that th...
2.234631
2.434287
0.917982
cdists, cinds = self._kdtree.query(x, k, p, distance_upper_bound) return cdists, self._split_indices(cinds)
def query(self, x, k=1, p=2, distance_upper_bound=np.inf)
Query the kd-tree for nearest neighbors Parameters ---------- x : array_like, last dimension self.m An array of points to query. k : int, optional The number of nearest neighbors to return. eps : nonnegative float, optional Return approximate ...
4.434827
7.900272
0.561351
clengths = np.append([0], np.cumsum(self.__lengths)) mapping = np.zeros((clengths[-1], 2), dtype=int) for traj_i, (start, end) in enumerate(zip(clengths[:-1], clengths[1:])): mapping[start:end, 0] = traj_i mapping[start:end, 1] = np.arange(end - start) re...
def _split_indices(self, concat_inds)
Take indices in 'concatenated space' and return as pairs of (traj_i, frame_i)
2.782436
2.362219
1.177891
check_iter_of_sequences(sequences) transforms = [] for X in sequences: transforms.append(self.partial_transform(X)) return transforms
def transform(self, sequences)
Apply dimensionality reduction to sequences Parameters ---------- sequences: list of array-like, each of shape (n_samples_i, n_features) Sequence data to transform, where n_samples_i in the number of samples in sequence i and n_features is the number of features. ...
6.775969
12.116138
0.559251
self.fit(sequences) transforms = self.transform(sequences) return transforms
def fit_transform(self, sequences, y=None)
Fit the model and apply dimensionality reduction Parameters ---------- sequences: list of array-like, each of shape (n_samples_i, n_features) Training data, where n_samples_i in the number of samples in sequence i and n_features is the number of features. y : Non...
5.872624
10.201693
0.575652
check_iter_of_sequences(sequences, allow_trajectory=self._allow_trajectory) super(MultiSequenceClusterMixin, self).fit(self._concat(sequences)) if hasattr(self, 'labels_'): self.labels_ = self._split(self.labels_) return self
def fit(self, sequences, y=None)
Fit the clustering on the data Parameters ---------- sequences : list of array-like, each of shape [sequence_length, n_features] A list of multivariate timeseries. Each sequence may have a different length, but they all must have the same number of features....
7.917976
10.099009
0.784035
predictions = [] check_iter_of_sequences(sequences, allow_trajectory=self._allow_trajectory) for X in sequences: predictions.append(self.partial_predict(X)) return predictions
def predict(self, sequences, y=None)
Predict the closest cluster each sample in each sequence in sequences belongs to. In the vector quantization literature, `cluster_centers_` is called the code book and each value returned by `predict` is the index of the closest code in the code book. Parameters -------...
7.541344
13.767659
0.547758
if isinstance(X, md.Trajectory): X.center_coordinates() return super(MultiSequenceClusterMixin, self).predict(X)
def partial_predict(self, X, y=None)
Predict the closest cluster each sample in X belongs to. In the vector quantization literature, `cluster_centers_` is called the code book and each value returned by `predict` is the index of the closest code in the code book. Parameters ---------- X : array-like shape=...
14.953556
27.606785
0.541662
if hasattr(super(MultiSequenceClusterMixin, self), 'fit_predict'): check_iter_of_sequences(sequences, allow_trajectory=self._allow_trajectory) labels = super(MultiSequenceClusterMixin, self).fit_predict(sequences) else: self.fit(sequences) labels ...
def fit_predict(self, sequences, y=None)
Performs clustering on X and returns cluster labels. Parameters ---------- sequences : list of array-like, each of shape [sequence_length, n_features] A list of multivariate timeseries. Each sequence may have a different length, but they all must have the same number ...
4.636979
5.20788
0.890377