idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
15,300
def default_logging_file ( self ) : import os . path as p import pyemma return p . join ( pyemma . __path__ [ 0 ] , Config . DEFAULT_LOGGING_FILE_NAME )
default logging configuration
15,301
def cfg_dir ( self , pyemma_cfg_dir ) : if not os . path . exists ( pyemma_cfg_dir ) : try : mkdir_p ( pyemma_cfg_dir ) except NotADirectoryError : raise ConfigDirectoryException ( "pyemma cfg dir (%s) is not a directory" % pyemma_cfg_dir ) except EnvironmentError : raise ConfigDirectoryException ( "could not create configuration directory '%s'" % pyemma_cfg_dir ) if not os . path . isdir ( pyemma_cfg_dir ) : raise ConfigDirectoryException ( "%s is no valid directory" % pyemma_cfg_dir ) if not os . access ( pyemma_cfg_dir , os . W_OK ) : raise ConfigDirectoryException ( "%s is not writeable" % pyemma_cfg_dir ) self . __copy_default_files_to_cfg_dir ( pyemma_cfg_dir ) self . _cfg_dir = pyemma_cfg_dir if self . show_config_notification : stars = '*' * 80 print ( stars , '\n' , 'Changed PyEMMAs config directory to "{dir}".\n' 'To make this change permanent, export the environment variable' ' "PYEMMA_CFG_DIR" \nto point to this location. Eg. edit your .bashrc file!' . format ( dir = pyemma_cfg_dir ) , '\n' , stars , sep = '' )
Sets PyEMMAs configuration directory . Also creates it with some default files if does not exists .
15,302
def logging_config ( self ) : cfg = self . _conf_values . get ( 'pyemma' , 'logging_config' ) if cfg == 'DEFAULT' : cfg = os . path . join ( self . cfg_dir , Config . DEFAULT_LOGGING_FILE_NAME ) return cfg
currently used logging configuration file . Can not be changed during runtime .
15,303
def _cfgs_to_read ( self ) : cfg = Config . DEFAULT_CONFIG_FILE_NAME filenames = [ self . default_config_file , cfg , os . path . join ( os . path . expanduser ( '~' + os . path . sep ) , cfg ) , '.pyemma.cfg' , ] if self . cfg_dir : from glob import glob filenames . extend ( glob ( self . cfg_dir + os . path . sep + "*.cfg" ) ) return filenames
reads config files from various locations to build final config .
15,304
def _calculate_new_overlap ( stride , traj_len , skip ) : overlap = stride * ( ( traj_len - skip - 1 ) // stride + 1 ) - traj_len + skip return overlap
Given two trajectories T_1 and T_2 this function calculates for the first trajectory an overlap i . e . a skip parameter for T_2 such that the trajectory fragments T_1 and T_2 appear as one under the given stride .
15,305
def assert_allclose ( actual , desired , rtol = 1.e-5 , atol = 1.e-8 , err_msg = '' , verbose = True ) : r return assert_allclose_np ( actual , desired , rtol = rtol , atol = atol , err_msg = err_msg , verbose = verbose )
r wrapper for numpy . testing . allclose with default tolerances of numpy . allclose . Needed since testing method has different values .
15,306
def topology_to_numpy ( top ) : data = [ ( atom . serial , atom . name , atom . element . symbol , atom . residue . resSeq , atom . residue . name , atom . residue . chain . index , atom . segment_id ) for atom in top . atoms ] atoms = np . array ( data , dtype = [ ( "serial" , 'i4' ) , ( "name" , 'S4' ) , ( "element" , 'S3' ) , ( "resSeq" , 'i4' ) , ( "resName" , 'S4' ) , ( "chainID" , 'i4' ) , ( "segmentID" , 'S4' ) ] ) bonds = np . fromiter ( ( ( a . index , b . index ) for ( a , b ) in top . bonds ) , dtype = 'i4,i4' , count = top . n_bonds ) return atoms , bonds
Convert this topology into a pandas dataframe
15,307
def topology_from_numpy ( atoms , bonds = None ) : if bonds is None : bonds = np . zeros ( ( 0 , 2 ) ) for col in [ "name" , "element" , "resSeq" , "resName" , "chainID" , "serial" ] : if col not in atoms . dtype . names : raise ValueError ( 'dataframe must have column %s' % col ) if "segmentID" not in atoms . dtype . names : atoms [ "segmentID" ] = "" from mdtraj . core . topology import Atom from mdtraj . core import element as elem out = mdtraj . Topology ( ) out . _atoms = [ None for _ in range ( len ( atoms ) ) ] N = np . arange ( 0 , len ( atoms ) ) for ci in np . unique ( atoms [ 'chainID' ] ) : chain_atoms = atoms [ atoms [ 'chainID' ] == ci ] subN = N [ atoms [ 'chainID' ] == ci ] c = out . add_chain ( ) for ri in np . unique ( chain_atoms [ 'resSeq' ] ) : residue_atoms = chain_atoms [ chain_atoms [ 'resSeq' ] == ri ] mask = subN [ chain_atoms [ 'resSeq' ] == ri ] indices = N [ mask ] rnames = residue_atoms [ 'resName' ] residue_name = np . array ( rnames ) [ 0 ] segids = residue_atoms [ 'segmentID' ] segment_id = np . array ( segids ) [ 0 ] if not np . all ( rnames == residue_name ) : raise ValueError ( 'All of the atoms with residue index %d ' 'do not share the same residue name' % ri ) r = out . add_residue ( residue_name . decode ( 'ascii' ) , c , ri , segment_id . decode ( 'ascii' ) ) for ix , atom in enumerate ( residue_atoms ) : e = atom [ 'element' ] . decode ( 'ascii' ) a = Atom ( atom [ 'name' ] . decode ( 'ascii' ) , elem . get_by_symbol ( e ) , int ( indices [ ix ] ) , r , serial = atom [ 'serial' ] ) out . _atoms [ indices [ ix ] ] = a r . _atoms . append ( a ) for ai1 , ai2 in bonds : out . add_bond ( out . atom ( ai1 ) , out . atom ( ai2 ) ) out . _numAtoms = out . n_atoms return out
Create a mdtraj topology from numpy arrays
15,308
def read_discrete_trajectory ( filename ) : with open ( filename , "r" ) as f : lines = f . read ( ) dtraj = np . fromstring ( lines , dtype = int , sep = "\n" ) return dtraj
Read discrete trajectory from ascii file .
15,309
def write_discrete_trajectory ( filename , dtraj ) : r dtraj = np . asarray ( dtraj ) with open ( filename , 'w' ) as f : dtraj . tofile ( f , sep = '\n' , format = '%d' )
r Write discrete trajectory to ascii file .
15,310
def save_discrete_trajectory ( filename , dtraj ) : r dtraj = np . asarray ( dtraj ) np . save ( filename , dtraj )
r Write discrete trajectory to binary file .
15,311
def count_states ( dtrajs , ignore_negative = False ) : r dtrajs = _ensure_dtraj_list ( dtrajs ) nmax = 0 bcs = [ ] for dtraj in dtrajs : if ignore_negative : dtraj = dtraj [ np . where ( dtraj >= 0 ) ] bc = np . bincount ( dtraj ) nmax = max ( nmax , bc . shape [ 0 ] ) bcs . append ( bc ) res = np . zeros ( nmax , dtype = int ) for i , bc in enumerate ( bcs ) : res [ : bc . shape [ 0 ] ] += bc return res
r returns a histogram count
15,312
def number_of_states ( dtrajs , only_used = False ) : r dtrajs = _ensure_dtraj_list ( dtrajs ) if only_used : bc = count_states ( dtrajs ) return np . count_nonzero ( bc ) else : imax = 0 for dtraj in dtrajs : imax = max ( imax , np . max ( dtraj ) ) return imax + 1
r returns the number of states in the given trajectories .
15,313
def stdchannel_redirected ( stdchannel , dest_filename , fake = False ) : if fake : yield return oldstdchannel = dest_file = None try : oldstdchannel = os . dup ( stdchannel . fileno ( ) ) dest_file = open ( dest_filename , 'w' ) os . dup2 ( dest_file . fileno ( ) , stdchannel . fileno ( ) ) yield finally : if oldstdchannel is not None : os . dup2 ( oldstdchannel , stdchannel . fileno ( ) ) if dest_file is not None : dest_file . close ( )
A context manager to temporarily redirect stdout or stderr
15,314
def _transform_array ( self , X ) : r X_meanfree = X - self . mean Y = np . dot ( X_meanfree , self . eigenvectors [ : , 0 : self . dimension ( ) ] ) return Y . astype ( self . output_type ( ) )
r Projects the data onto the dominant independent components .
15,315
def timescales ( self ) : r return - self . lag / np . log ( np . abs ( self . eigenvalues ) )
r Implied timescales of the TICA transformation
15,316
def feature_TIC_correlation ( self ) : r feature_sigma = np . sqrt ( np . diag ( self . cov ) ) return np . dot ( self . cov , self . eigenvectors [ : , : self . dimension ( ) ] ) / feature_sigma [ : , np . newaxis ]
r Instantaneous correlation matrix between mean - free input features and TICs
15,317
def _svd_sym_koopman ( K , C00_train , Ctt_train ) : from pyemma . _ext . variational . solvers . direct import spd_inv_sqrt C0t_re = mdot ( C00_train , K ) K_sym = mdot ( spd_inv_sqrt ( C00_train ) , C0t_re , spd_inv_sqrt ( Ctt_train ) ) U , S , Vt = np . linalg . svd ( K_sym , compute_uv = True , full_matrices = False ) U = mdot ( spd_inv_sqrt ( C00_train ) , U ) Vt = mdot ( Vt , spd_inv_sqrt ( Ctt_train ) ) return U , S , Vt . T
Computes the SVD of the symmetrized Koopman operator in the empirical distribution .
15,318
def vamp_1_score ( K , C00_train , C0t_train , Ctt_train , C00_test , C0t_test , Ctt_test , k = None ) : from pyemma . _ext . variational . solvers . direct import spd_inv_sqrt U , S , V = _svd_sym_koopman ( K , C00_train , Ctt_train ) if k is not None : U = U [ : , : k ] V = V [ : , : k ] A = spd_inv_sqrt ( mdot ( U . T , C00_test , U ) ) B = mdot ( U . T , C0t_test , V ) C = spd_inv_sqrt ( mdot ( V . T , Ctt_test , V ) ) score = np . linalg . norm ( mdot ( A , B , C ) , ord = 'nuc' ) return score
Computes the VAMP - 1 score of a kinetic model .
15,319
def vamp_e_score ( K , C00_train , C0t_train , Ctt_train , C00_test , C0t_test , Ctt_test , k = None ) : U , s , V = _svd_sym_koopman ( K , C00_train , Ctt_train ) if k is not None : U = U [ : , : k ] S = np . diag ( s [ : k ] ) V = V [ : , : k ] score = np . trace ( 2.0 * mdot ( V , S , U . T , C0t_test ) - mdot ( V , S , U . T , C00_test , U , S , V . T , Ctt_test ) ) return score
Computes the VAMP - E score of a kinetic model .
15,320
def get_culprit ( omit_top_frames = 1 ) : try : caller_stack = stack ( ) [ omit_top_frames : ] while len ( caller_stack ) > 0 : frame = caller_stack . pop ( 0 ) filename = frame [ 1 ] if '<decorator' in filename or __file__ in filename : continue else : break lineno = frame [ 2 ] del caller_stack , frame except OSError : filename = 'unknown' lineno = - 1 return filename , lineno
get the filename and line number calling this .
15,321
def get_averaged_bias_matrix ( bias_sequences , dtrajs , nstates = None ) : r from pyemma . thermo . extensions . util import ( logsumexp as _logsumexp , logsumexp_pair as _logsumexp_pair ) nmax = int ( _np . max ( [ dtraj . max ( ) for dtraj in dtrajs ] ) ) if nstates is None : nstates = nmax + 1 elif nstates < nmax + 1 : raise ValueError ( "nstates is smaller than the number of observed microstates" ) nthermo = bias_sequences [ 0 ] . shape [ 1 ] bias_matrix = - _np . ones ( shape = ( nthermo , nstates ) , dtype = _np . float64 ) * _np . inf counts = _np . zeros ( shape = ( nstates , ) , dtype = _np . intc ) for s in range ( len ( bias_sequences ) ) : for i in range ( nstates ) : idx = ( dtrajs [ s ] == i ) nidx = idx . sum ( ) if nidx == 0 : continue counts [ i ] += nidx selected_bias_sequence = bias_sequences [ s ] [ idx , : ] for k in range ( nthermo ) : bias_matrix [ k , i ] = _logsumexp_pair ( bias_matrix [ k , i ] , _logsumexp ( _np . ascontiguousarray ( - selected_bias_sequence [ : , k ] ) , inplace = False ) ) idx = counts . nonzero ( ) log_counts = _np . log ( counts [ idx ] ) bias_matrix *= - 1.0 bias_matrix [ : , idx ] += log_counts [ _np . newaxis , : ] return bias_matrix
r Computes a bias matrix via an exponential average of the observed frame wise bias energies .
15,322
def get_umbrella_sampling_data ( us_trajs , us_centers , us_force_constants , md_trajs = None , kT = None , width = None ) : r ttrajs , umbrella_centers , force_constants , unbiased_state = _get_umbrella_sampling_parameters ( us_trajs , us_centers , us_force_constants , md_trajs = md_trajs , kT = kT ) if md_trajs is None : md_trajs = [ ] if width is None : width = _np . zeros ( shape = ( umbrella_centers . shape [ 1 ] , ) , dtype = _np . float64 ) else : width = _np . asarray ( map ( lambda w : w if w is not None and w > 0.0 else 0.0 , width ) , dtype = _np . float64 ) if width . shape [ 0 ] != umbrella_centers . shape [ 1 ] : raise ValueError ( 'Unmatching number of width components.' ) btrajs = _get_umbrella_bias_sequences ( us_trajs + md_trajs , umbrella_centers , force_constants , width ) return ttrajs , btrajs , umbrella_centers , force_constants , unbiased_state
r Wraps umbrella sampling data or a mix of umbrella sampling and and direct molecular dynamics .
15,323
def get_multi_temperature_data ( energy_trajs , temp_trajs , energy_unit , temp_unit , reference_temperature = None ) : r ttrajs , temperatures = _get_multi_temperature_parameters ( temp_trajs ) if reference_temperature is None : reference_temperature = temperatures . min ( ) else : assert isinstance ( reference_temperature , ( int , float ) ) , 'reference_temperature must be numeric' assert reference_temperature > 0.0 , 'reference_temperature must be positive' btrajs = _get_multi_temperature_bias_sequences ( energy_trajs , temp_trajs , temperatures , reference_temperature , energy_unit , temp_unit ) if reference_temperature in temperatures : unbiased_state = _np . where ( temperatures == reference_temperature ) [ 0 ] try : unbiased_state = unbiased_state [ 0 ] except IndexError : unbiased_state = None else : unbiased_state = None return ttrajs , btrajs , temperatures , unbiased_state
r Wraps data from multi - temperature molecular dynamics .
15,324
def assign_unbiased_state_label ( memm_list , unbiased_state ) : r if unbiased_state is None : return for memm in memm_list : assert 0 <= unbiased_state < len ( memm . models ) , "invalid state: " + str ( unbiased_state ) memm . _unbiased_state = unbiased_state
r Sets the msm label for the given list of estimated MEMM objects .
15,325
def timescales_msm ( dtrajs , lags = None , nits = None , reversible = True , connected = True , weights = 'empirical' , errors = None , nsamples = 50 , n_jobs = None , show_progress = True , mincount_connectivity = '1/n' , only_timescales = False ) : r if isinstance ( weights , str ) : if weights not in [ 'empirical' , 'oom' ] : raise ValueError ( "Weights must be either \'empirical\' or \'oom\'" ) else : raise ValueError ( "Weights must be either \'empirical\' or \'oom\'" ) if weights == 'oom' and ( errors is not None ) : errors = None dtrajs = _types . ensure_dtraj_list ( dtrajs ) if connected : connectivity = 'largest' else : connectivity = 'none' if errors is None : if weights == 'empirical' : estimator = _ML_MSM ( reversible = reversible , connectivity = connectivity ) else : estimator = _OOM_MSM ( reversible = reversible , connectivity = connectivity ) elif errors == 'bayes' : estimator = _Bayes_MSM ( reversible = reversible , connectivity = connectivity , nsamples = nsamples , show_progress = show_progress ) else : raise NotImplementedError ( 'Error estimation method {errors} currently not implemented' . format ( errors = errors ) ) if hasattr ( estimator , 'mincount_connectivity' ) : estimator . mincount_connectivity = mincount_connectivity itsobj = _ImpliedTimescales ( estimator , lags = lags , nits = nits , n_jobs = n_jobs , show_progress = show_progress , only_timescales = only_timescales ) itsobj . estimate ( dtrajs ) return itsobj
r Implied timescales from Markov state models estimated at a series of lag times .
15,326
def estimate_markov_model ( dtrajs , lag , reversible = True , statdist = None , count_mode = 'sliding' , weights = 'empirical' , sparse = False , connectivity = 'largest' , dt_traj = '1 step' , maxiter = 1000000 , maxerr = 1e-8 , score_method = 'VAMP2' , score_k = 10 , mincount_connectivity = '1/n' ) : r if isinstance ( weights , str ) : if weights not in [ 'empirical' , 'oom' ] : raise ValueError ( "Weights must be either \'empirical\' or \'oom\'" ) else : raise ValueError ( "Weights must be either \'empirical\' or \'oom\'" ) if weights == 'empirical' : mlmsm = _ML_MSM ( lag = lag , reversible = reversible , statdist_constraint = statdist , count_mode = count_mode , sparse = sparse , connectivity = connectivity , dt_traj = dt_traj , maxiter = maxiter , maxerr = maxerr , score_method = score_method , score_k = score_k , mincount_connectivity = mincount_connectivity ) return mlmsm . estimate ( dtrajs ) elif weights == 'oom' : if ( statdist is not None ) or ( maxiter != 1000000 ) or ( maxerr != 1e-8 ) : import warnings warnings . warn ( "Values for statdist, maxiter or maxerr are ignored if OOM-correction is used." ) oom_msm = _OOM_MSM ( lag = lag , reversible = reversible , count_mode = count_mode , sparse = sparse , connectivity = connectivity , dt_traj = dt_traj , score_method = score_method , score_k = score_k , mincount_connectivity = mincount_connectivity ) return oom_msm . estimate ( dtrajs )
r Estimates a Markov model from discrete trajectories
15,327
def bayesian_markov_model ( dtrajs , lag , reversible = True , statdist = None , sparse = False , connectivity = 'largest' , count_mode = 'effective' , nsamples = 100 , conf = 0.95 , dt_traj = '1 step' , show_progress = True , mincount_connectivity = '1/n' ) : r bmsm_estimator = _Bayes_MSM ( lag = lag , reversible = reversible , statdist_constraint = statdist , count_mode = count_mode , sparse = sparse , connectivity = connectivity , dt_traj = dt_traj , nsamples = nsamples , conf = conf , show_progress = show_progress , mincount_connectivity = mincount_connectivity ) return bmsm_estimator . estimate ( dtrajs )
r Bayesian Markov model estimate using Gibbs sampling of the posterior
15,328
def timescales_hmsm ( dtrajs , nstates , lags = None , nits = None , reversible = True , stationary = False , connectivity = None , mincount_connectivity = '1/n' , separate = None , errors = None , nsamples = 100 , stride = None , n_jobs = None , show_progress = True ) : r dtrajs = _types . ensure_dtraj_list ( dtrajs ) if errors is None : if stride is None : stride = 1 estimator = _ML_HMSM ( nstates = nstates , reversible = reversible , stationary = stationary , connectivity = connectivity , stride = stride , mincount_connectivity = mincount_connectivity , separate = separate ) elif errors == 'bayes' : if stride is None : stride = 'effective' estimator = _Bayes_HMSM ( nstates = nstates , reversible = reversible , stationary = stationary , connectivity = connectivity , mincount_connectivity = mincount_connectivity , stride = stride , separate = separate , show_progress = show_progress , nsamples = nsamples ) else : raise NotImplementedError ( 'Error estimation method' + str ( errors ) + 'currently not implemented' ) itsobj = _ImpliedTimescales ( estimator , lags = lags , nits = nits , n_jobs = n_jobs , show_progress = show_progress ) itsobj . estimate ( dtrajs ) return itsobj
r Calculate implied timescales from Hidden Markov state models estimated at a series of lag times .
15,329
def estimate_hidden_markov_model ( dtrajs , nstates , lag , reversible = True , stationary = False , connectivity = None , mincount_connectivity = '1/n' , separate = None , observe_nonempty = True , stride = 1 , dt_traj = '1 step' , accuracy = 1e-3 , maxit = 1000 ) : r hmsm_estimator = _ML_HMSM ( lag = lag , nstates = nstates , reversible = reversible , stationary = stationary , msm_init = 'largest-strong' , connectivity = connectivity , mincount_connectivity = mincount_connectivity , separate = separate , observe_nonempty = observe_nonempty , stride = stride , dt_traj = dt_traj , accuracy = accuracy , maxit = maxit ) return hmsm_estimator . estimate ( dtrajs )
r Estimates a Hidden Markov state model from discrete trajectories
15,330
def bayesian_hidden_markov_model ( dtrajs , nstates , lag , nsamples = 100 , reversible = True , stationary = False , connectivity = None , mincount_connectivity = '1/n' , separate = None , observe_nonempty = True , stride = 'effective' , conf = 0.95 , dt_traj = '1 step' , store_hidden = False , show_progress = True ) : r bhmsm_estimator = _Bayes_HMSM ( lag = lag , nstates = nstates , stride = stride , nsamples = nsamples , reversible = reversible , stationary = stationary , connectivity = connectivity , mincount_connectivity = mincount_connectivity , separate = separate , observe_nonempty = observe_nonempty , dt_traj = dt_traj , conf = conf , store_hidden = store_hidden , show_progress = show_progress ) return bhmsm_estimator . estimate ( dtrajs )
r Bayesian Hidden Markov model estimate using Gibbs sampling of the posterior
15,331
def estimate_augmented_markov_model ( dtrajs , ftrajs , lag , m , sigmas , count_mode = 'sliding' , connectivity = 'largest' , dt_traj = '1 step' , maxiter = 1000000 , eps = 0.05 , maxcache = 3000 ) : r if _np . all ( sigmas > 0 ) : _w = 1. / ( 2 * sigmas ** 2. ) else : raise ValueError ( 'Zero or negative standard errors supplied. Please revise input' ) if ftrajs [ 0 ] . ndim < 2 : raise ValueError ( "Supplied feature trajectories have inappropriate dimensions (%d) should be atleast 2." % ftrajs [ 0 ] . ndim ) if len ( dtrajs ) != len ( ftrajs ) : raise ValueError ( "A different number of dtrajs and ftrajs were supplied as input. They must have exactly a one-to-one correspondence." ) elif not _np . all ( [ len ( dt ) == len ( ft ) for dt , ft in zip ( dtrajs , ftrajs ) ] ) : raise ValueError ( "One or more supplied dtraj-ftraj pairs do not have the same length." ) else : dta = _np . concatenate ( dtrajs ) fta = _np . concatenate ( ftrajs ) all_markov_states = set ( dta ) _E = _np . zeros ( ( len ( all_markov_states ) , fta . shape [ 1 ] ) ) for i , s in enumerate ( all_markov_states ) : _E [ i , : ] = fta [ _np . where ( dta == s ) ] . mean ( axis = 0 ) mlamm = _ML_AMM ( lag = lag , count_mode = count_mode , connectivity = connectivity , dt_traj = dt_traj , maxiter = maxiter , max_cache = maxcache , E = _E , w = _w , m = m ) return mlamm . estimate ( dtrajs )
r Estimates an Augmented Markov model from discrete trajectories and experimental data
15,332
def _is_zero ( x ) : if x is None : return True if isinstance ( x , numbers . Number ) : return x == 0.0 if isinstance ( x , np . ndarray ) : return np . all ( x == 0 ) return False
Returns True if x is numerically 0 or an array with 0 s .
15,333
def _sparsify ( X , remove_mean = False , modify_data = False , sparse_mode = 'auto' , sparse_tol = 0.0 ) : if sparse_mode . lower ( ) == 'sparse' : min_const_col_number = 0 elif sparse_mode . lower ( ) == 'dense' : min_const_col_number = X . shape [ 1 ] + 1 else : if remove_mean and not modify_data : min_const_col_number = max ( 0.1 * X . shape [ 1 ] , 50 ) else : if X . shape [ 1 ] < 250 : min_const_col_number = X . shape [ 1 ] - 0.25 * X . shape [ 1 ] elif X . shape [ 1 ] < 1000 : min_const_col_number = X . shape [ 1 ] - ( 0.5 * X . shape [ 1 ] - 100 ) else : min_const_col_number = X . shape [ 1 ] - ( 0.8 * X . shape [ 1 ] - 400 ) min_const_col_number = int ( min_const_col_number ) if X . shape [ 1 ] > min_const_col_number : mask = covartools . variable_cols ( X , tol = sparse_tol , min_constant = min_const_col_number ) nconst = len ( np . where ( ~ mask ) [ 0 ] ) if nconst > min_const_col_number : xconst = X [ 0 , ~ mask ] X = X [ : , mask ] else : xconst = None mask = None else : xconst = None mask = None return X , mask , xconst
Determines the sparsity of X and returns a selected sub - matrix
15,334
def _copy_convert ( X , const = None , remove_mean = False , copy = True ) : r dtype = np . float64 if X . dtype . kind == 'b' and X . shape [ 0 ] < 2 ** 23 and not remove_mean : dtype = np . float32 if X . dtype not in ( np . float64 , dtype ) : X = X . astype ( dtype , order = 'C' ) if const is not None : const = const . astype ( dtype , order = 'C' ) elif copy : X = X . copy ( order = 'C' ) if const is not None : const = const . copy ( order = 'C' ) return X , const
r Makes a copy or converts the data type if needed
15,335
def _sum ( X , xmask = None , xconst = None , Y = None , ymask = None , yconst = None , symmetric = False , remove_mean = False , weights = None ) : r T = X . shape [ 0 ] if weights is not None : X = weights [ : , None ] * X if Y is not None : Y = weights [ : , None ] * Y sx_raw = X . sum ( axis = 0 ) sy_raw = 0 if Y is not None : sy_raw = Y . sum ( axis = 0 ) if xmask is not None : if weights is not None : sx_raw = _sum_sparse ( sx_raw , xmask , xconst , weights . sum ( ) ) else : sx_raw = _sum_sparse ( sx_raw , xmask , xconst , T ) if ymask is not None : if weights is not None : sy_raw = _sum_sparse ( sy_raw , ymask , yconst , weights . sum ( ) ) else : sy_raw = _sum_sparse ( sy_raw , ymask , yconst , T ) if Y is not None and symmetric : sx = sx_raw + sy_raw sy = sx if weights is not None : w = 2 * np . sum ( weights ) else : w = 2 * T else : sx = sx_raw sy = sy_raw if weights is not None : w = np . sum ( weights ) else : w = T sx_raw_centered = sx_raw . copy ( ) if Y is not None : sy_raw_centered = sy_raw . copy ( ) if remove_mean : if Y is not None and symmetric : sx_raw_centered -= 0.5 * sx sy_raw_centered -= 0.5 * sy else : sx_raw_centered = np . zeros ( sx . size ) if Y is not None : sy_raw_centered = np . zeros ( sy . size ) if Y is not None : return w , sx , sx_raw_centered , sy , sy_raw_centered else : return w , sx , sx_raw_centered
r Computes the column sums and centered column sums .
15,336
def _center ( X , w , s , mask = None , const = None , inplace = True ) : xmean = s / float ( w ) if mask is None : X = np . subtract ( X , xmean , out = X if inplace else None ) else : X = np . subtract ( X , xmean [ mask ] , out = X if inplace else None ) const = np . subtract ( const , xmean [ ~ mask ] , const if inplace else None ) return X , const
Centers the data .
15,337
def _filter_variable_indices ( mask , column_selection ) : a = np . where ( mask ) [ 0 ] b = column_selection [ np . in1d ( column_selection , a ) ] return np . searchsorted ( a , b )
Returns column indices restricted to the variable columns as determined by the given mask .
15,338
def _M2_dense ( X , Y , weights = None , diag_only = False ) : if weights is not None : if diag_only : return np . sum ( weights [ : , None ] * X * Y , axis = 0 ) else : return np . dot ( ( weights [ : , None ] * X ) . T , Y ) else : if diag_only : return np . sum ( X * Y , axis = 0 ) else : return np . dot ( X . T , Y )
2nd moment matrix using dense matrix computations .
15,339
def _M2_const ( Xvar , mask_X , xvarsum , xconst , Yvar , mask_Y , yvarsum , yconst , weights = None ) : r C = np . zeros ( ( len ( mask_X ) , len ( mask_Y ) ) ) C [ np . ix_ ( mask_X , mask_Y ) ] = _M2_dense ( Xvar , Yvar , weights = weights ) xsum_is_0 = _is_zero ( xvarsum ) ysum_is_0 = _is_zero ( yvarsum ) xconst_is_0 = _is_zero ( xconst ) yconst_is_0 = _is_zero ( yconst ) if weights is not None : wsum = np . sum ( weights ) xvarsum = np . sum ( weights [ : , None ] * Xvar , axis = 0 ) yvarsum = np . sum ( weights [ : , None ] * Yvar , axis = 0 ) else : wsum = Xvar . shape [ 0 ] if not ( xsum_is_0 or yconst_is_0 ) or not ( ysum_is_0 or xconst_is_0 ) : C [ np . ix_ ( mask_X , ~ mask_Y ) ] = np . outer ( xvarsum , yconst ) C [ np . ix_ ( ~ mask_X , mask_Y ) ] = np . outer ( xconst , yvarsum ) if not ( xconst_is_0 or yconst_is_0 ) : C [ np . ix_ ( ~ mask_X , ~ mask_Y ) ] = np . outer ( wsum * xconst , yconst ) return C
r Computes the unnormalized covariance matrix between X and Y exploiting constant input columns
15,340
def _M2_sparse ( Xvar , mask_X , Yvar , mask_Y , weights = None ) : C = np . zeros ( ( len ( mask_X ) , len ( mask_Y ) ) ) C [ np . ix_ ( mask_X , mask_Y ) ] = _M2_dense ( Xvar , Yvar , weights = weights ) return C
2nd moment matrix exploiting zero input columns
15,341
def _M2_sparse_sym ( Xvar , mask_X , Yvar , mask_Y , weights = None , column_selection = None ) : assert len ( mask_X ) == len ( mask_Y ) , 'X and Y need to have equal sizes for symmetrization' if column_selection is None : mask_Xk = mask_X mask_Yk = mask_Y Xvark = Xvar Yvark = Yvar else : mask_Xk = mask_X [ column_selection ] mask_Yk = mask_Y [ column_selection ] Xvark = Xvar [ : , _filter_variable_indices ( mask_X , column_selection ) ] Yvark = Yvar [ : , _filter_variable_indices ( mask_Y , column_selection ) ] Cxxyy = np . zeros ( ( len ( mask_X ) , len ( mask_Yk ) ) ) Cxxyy [ np . ix_ ( mask_X , mask_Xk ) ] = _M2_dense ( Xvar , Xvark , weights = weights ) Cxxyy [ np . ix_ ( mask_Y , mask_Yk ) ] += _M2_dense ( Yvar , Yvark , weights = weights ) Cxyyx = np . zeros ( ( len ( mask_X ) , len ( mask_Yk ) ) ) Cxy = _M2_dense ( Xvar , Yvark , weights = weights ) Cyx = _M2_dense ( Yvar , Xvark , weights = weights ) Cxyyx [ np . ix_ ( mask_X , mask_Yk ) ] = Cxy Cxyyx [ np . ix_ ( mask_Y , mask_Xk ) ] += Cyx return Cxxyy , Cxyyx
2nd self - symmetric moment matrix exploiting zero input columns
15,342
def _M2_symmetric ( Xvar , Yvar , mask_X = None , mask_Y = None , xsum = 0 , xconst = 0 , ysum = 0 , yconst = 0 , weights = None , column_selection = None , diag_only = False ) : if mask_X is None and mask_Y is None : if column_selection is None : Xvark = Xvar Yvark = Yvar else : Xvark = Xvar [ : , column_selection ] Yvark = Yvar [ : , column_selection ] Cxxyy = _M2_dense ( Xvar , Xvark , weights = weights , diag_only = diag_only ) + _M2_dense ( Yvar , Yvark , weights = weights , diag_only = diag_only ) Cxy = _M2_dense ( Xvar , Yvark , weights = weights , diag_only = diag_only ) Cyx = _M2_dense ( Yvar , Xvark , weights = weights , diag_only = diag_only ) Cxyyx = Cxy + Cyx else : if mask_X is None : mask_X = np . ones ( Xvar . shape [ 1 ] , dtype = np . bool ) xconst = np . ones ( 0 , dtype = float ) if mask_Y is None : mask_Y = np . ones ( Yvar . shape [ 1 ] , dtype = np . bool ) yconst = np . ones ( 0 , dtype = float ) if _is_zero ( xsum ) and _is_zero ( ysum ) or _is_zero ( xconst ) and _is_zero ( yconst ) : Cxxyy , Cxyyx = _M2_sparse_sym ( Xvar , mask_X , Yvar , mask_Y , weights = weights , column_selection = column_selection ) else : xvarsum = xsum [ mask_X ] yvarsum = ysum [ mask_Y ] if column_selection is None : Xvark = Xvar mask_Xk = mask_X xkvarsum = xvarsum xkconst = xconst Yvark = Yvar mask_Yk = mask_Y ykvarsum = yvarsum ykconst = yconst else : Xvark = Xvar [ : , _filter_variable_indices ( mask_X , column_selection ) ] mask_Xk = mask_X [ column_selection ] xksum = xsum [ column_selection ] xkvarsum = xksum [ mask_Xk ] xkconst = xconst [ _filter_variable_indices ( ~ mask_X , column_selection ) ] Yvark = Yvar [ : , _filter_variable_indices ( mask_Y , column_selection ) ] mask_Yk = mask_Y [ column_selection ] yksum = ysum [ column_selection ] ykvarsum = yksum [ mask_Yk ] ykconst = yconst [ _filter_variable_indices ( ~ mask_Y , column_selection ) ] Cxxyy = _M2_const ( Xvar , mask_X , xvarsum , xconst , Xvark , mask_Xk , xkvarsum , xkconst , weights = weights ) + _M2_const ( Yvar , mask_Y , yvarsum , yconst , Yvark , mask_Yk , ykvarsum , ykconst , weights = weights ) Cxy = _M2_const ( Xvar , mask_X , xvarsum , xconst , Yvark , mask_Yk , ykvarsum , ykconst , weights = weights ) Cyx = _M2_const ( Yvar , mask_Y , yvarsum , yconst , Xvark , mask_Xk , xkvarsum , xkconst , weights = weights ) Cxyyx = Cxy + Cyx return Cxxyy , Cxyyx
symmetric second moment matrices . Decide if we need dense sparse const
15,343
def moments_XX ( X , remove_mean = False , modify_data = False , weights = None , sparse_mode = 'auto' , sparse_tol = 0.0 , column_selection = None , diag_only = False ) : r if weights is not None : assert X . shape [ 0 ] == weights . shape [ 0 ] , 'X and weights_x must have equal length' if diag_only and sparse_mode is not 'dense' : if sparse_mode is 'sparse' : import warnings warnings . warn ( 'Computing diagonal entries only is not implemented for sparse mode. Switching to dense mode.' ) sparse_mode = 'dense' X0 , mask_X , xconst = _sparsify ( X , remove_mean = remove_mean , modify_data = modify_data , sparse_mode = sparse_mode , sparse_tol = sparse_tol ) is_sparse = mask_X is not None X0 , xconst = _copy_convert ( X0 , const = xconst , remove_mean = remove_mean , copy = is_sparse or ( remove_mean and not modify_data ) ) w , sx , sx0_centered = _sum ( X0 , xmask = mask_X , xconst = xconst , symmetric = False , remove_mean = remove_mean , weights = weights ) if remove_mean : _center ( X0 , w , sx , mask = mask_X , const = xconst , inplace = True ) if column_selection is not None : if is_sparse : Xk = X [ : , column_selection ] mask_Xk = mask_X [ column_selection ] X0k = Xk [ : , mask_Xk ] xksum = sx0_centered [ column_selection ] xkconst = Xk [ 0 , ~ mask_Xk ] X0k , xkconst = _copy_convert ( X0k , const = xkconst , remove_mean = remove_mean , copy = True ) C = _M2 ( X0 , X0k , mask_X = mask_X , mask_Y = mask_Xk , xsum = sx0_centered , xconst = xconst , ysum = xksum , yconst = xkconst , weights = weights ) else : X0k = X0 [ : , column_selection ] C = _M2 ( X0 , X0k , mask_X = mask_X , mask_Y = mask_X , xsum = sx0_centered , xconst = xconst , ysum = sx0_centered [ column_selection ] , yconst = xconst , weights = weights ) else : C = _M2 ( X0 , X0 , mask_X = mask_X , mask_Y = mask_X , xsum = sx0_centered , xconst = xconst , ysum = sx0_centered , yconst = xconst , weights = weights , diag_only = diag_only ) return w , sx , C
r Computes the first two unnormalized moments of X
15,344
def covar ( X , remove_mean = False , modify_data = False , weights = None , sparse_mode = 'auto' , sparse_tol = 0.0 ) : w , s , M = moments_XX ( X , remove_mean = remove_mean , weights = weights , modify_data = modify_data , sparse_mode = sparse_mode , sparse_tol = sparse_tol ) return M / float ( w )
Computes the covariance matrix of X
15,345
def covars ( X , Y , remove_mean = False , modify_data = False , symmetrize = False , weights = None , sparse_mode = 'auto' , sparse_tol = 0.0 ) : w , sx , sy , Mxx , Mxy = moments_XXXY ( X , Y , remove_mean = remove_mean , modify_data = modify_data , weights = weights , symmetrize = symmetrize , sparse_mode = sparse_mode , sparse_tol = sparse_tol ) return Mxx / float ( w ) , Mxy / float ( w )
Computes the covariance and cross - covariance matrix of X and Y
15,346
def getargspec_no_self ( func ) : sig = inspect . signature ( func ) args = [ p . name for p in sig . parameters . values ( ) if p . kind == inspect . Parameter . POSITIONAL_OR_KEYWORD ] varargs = [ p . name for p in sig . parameters . values ( ) if p . kind == inspect . Parameter . VAR_POSITIONAL ] varargs = varargs [ 0 ] if varargs else None varkw = [ p . name for p in sig . parameters . values ( ) if p . kind == inspect . Parameter . VAR_KEYWORD ] varkw = varkw [ 0 ] if varkw else None defaults = [ p . default for p in sig . parameters . values ( ) if ( p . kind == inspect . Parameter . POSITIONAL_OR_KEYWORD and p . default is not p . empty ) ] or None if args [ 0 ] == 'self' : args . pop ( 0 ) return ArgSpec ( args , varargs , varkw , defaults )
inspect . getargspec replacement using inspect . signature .
15,347
def blocksplit_dtrajs ( dtrajs , lag = 1 , sliding = True , shift = None ) : dtrajs_new = [ ] for dtraj in dtrajs : if len ( dtraj ) <= lag : continue if shift is None : s = np . random . randint ( min ( lag , dtraj . size - lag ) ) else : s = shift if sliding : if s > 0 : dtrajs_new . append ( dtraj [ 0 : lag + s ] ) for t0 in range ( s , dtraj . size - lag , lag ) : dtrajs_new . append ( dtraj [ t0 : t0 + 2 * lag ] ) else : for t0 in range ( s , dtraj . size - lag , lag ) : dtrajs_new . append ( dtraj [ t0 : t0 + lag + 1 ] ) return dtrajs_new
Splits the discrete trajectories into approximately uncorrelated fragments
15,348
def cvsplit_dtrajs ( dtrajs ) : if len ( dtrajs ) == 1 : raise ValueError ( 'Only have a single trajectory. Cannot be split into train and test set' ) I0 = np . random . choice ( len ( dtrajs ) , int ( len ( dtrajs ) / 2 ) , replace = False ) I1 = np . array ( list ( set ( list ( np . arange ( len ( dtrajs ) ) ) ) - set ( list ( I0 ) ) ) ) dtrajs_train = [ dtrajs [ i ] for i in I0 ] dtrajs_test = [ dtrajs [ i ] for i in I1 ] return dtrajs_train , dtrajs_test
Splits the trajectories into a training and test set with approximately equal number of trajectories
15,349
def numpy_random_seed ( seed = 42 ) : old_state = np . random . get_state ( ) np . random . seed ( seed ) try : yield finally : np . random . set_state ( old_state )
sets the random seed of numpy within the context .
15,350
def random_seed ( seed = 42 ) : old_state = random . getstate ( ) random . seed ( seed ) try : yield finally : random . setstate ( old_state )
sets the random seed of Python within the context .
15,351
def settings ( ** kwargs ) : from pyemma import config old_settings = { } try : for k , v in kwargs . items ( ) : old_settings [ k ] = getattr ( config , k ) setattr ( config , k , v ) yield finally : for k , v in old_settings . items ( ) : setattr ( config , k , v )
apply given PyEMMA config values temporarily within the given context .
15,352
def get_histogram ( xall , yall , nbins = 100 , weights = None , avoid_zero_count = False ) : z , xedge , yedge = _np . histogram2d ( xall , yall , bins = nbins , weights = weights ) x = 0.5 * ( xedge [ : - 1 ] + xedge [ 1 : ] ) y = 0.5 * ( yedge [ : - 1 ] + yedge [ 1 : ] ) if avoid_zero_count : z = _np . maximum ( z , _np . min ( z [ z . nonzero ( ) ] ) ) return x , y , z . T
Compute a two - dimensional histogram .
15,353
def get_grid_data ( xall , yall , zall , nbins = 100 , method = 'nearest' ) : from scipy . interpolate import griddata x , y = _np . meshgrid ( _np . linspace ( xall . min ( ) , xall . max ( ) , nbins ) , _np . linspace ( yall . min ( ) , yall . max ( ) , nbins ) , indexing = 'ij' ) z = griddata ( _np . hstack ( [ xall [ : , None ] , yall [ : , None ] ] ) , zall , ( x , y ) , method = method ) return x , y , z
Interpolate unstructured two - dimensional data .
15,354
def _to_free_energy ( z , minener_zero = False ) : pi = _to_density ( z ) free_energy = _np . inf * _np . ones ( shape = z . shape ) nonzero = pi . nonzero ( ) free_energy [ nonzero ] = - _np . log ( pi [ nonzero ] ) if minener_zero : free_energy [ nonzero ] -= _np . min ( free_energy [ nonzero ] ) return free_energy
Compute free energies from histogram counts .
15,355
def _prune_kwargs ( kwargs ) : allowed_keys = [ 'corner_mask' , 'alpha' , 'locator' , 'extend' , 'xunits' , 'yunits' , 'antialiased' , 'nchunk' , 'hatches' , 'zorder' ] ignored = [ key for key in kwargs . keys ( ) if key not in allowed_keys ] for key in ignored : _warn ( '{}={} is not an allowed optional parameter and will' ' be ignored' . format ( key , kwargs [ key ] ) ) kwargs . pop ( key , None ) return kwargs
Remove non - allowed keys from a kwargs dictionary .
15,356
def plot_map ( x , y , z , ax = None , cmap = None , ncontours = 100 , vmin = None , vmax = None , levels = None , cbar = True , cax = None , cbar_label = None , cbar_orientation = 'vertical' , norm = None , ** kwargs ) : import matplotlib . pyplot as _plt if ax is None : fig , ax = _plt . subplots ( ) else : fig = ax . get_figure ( ) mappable = ax . contourf ( x , y , z , ncontours , norm = norm , vmin = vmin , vmax = vmax , cmap = cmap , levels = levels , ** _prune_kwargs ( kwargs ) ) misc = dict ( mappable = mappable ) if cbar_orientation not in ( 'horizontal' , 'vertical' ) : raise ValueError ( 'cbar_orientation must be "horizontal" or "vertical"' ) if cbar : if cax is None : cbar_ = fig . colorbar ( mappable , ax = ax , orientation = cbar_orientation ) else : cbar_ = fig . colorbar ( mappable , cax = cax , orientation = cbar_orientation ) if cbar_label is not None : cbar_ . set_label ( cbar_label ) misc . update ( cbar = cbar_ ) return fig , ax , misc
Plot a two - dimensional map from data on a grid .
15,357
def plot_density ( xall , yall , ax = None , cmap = None , ncontours = 100 , vmin = None , vmax = None , levels = None , cbar = True , cax = None , cbar_label = 'sample density' , cbar_orientation = 'vertical' , logscale = False , nbins = 100 , weights = None , avoid_zero_count = False , ** kwargs ) : x , y , z = get_histogram ( xall , yall , nbins = nbins , weights = weights , avoid_zero_count = avoid_zero_count ) pi = _to_density ( z ) pi = _np . ma . masked_where ( pi <= 0 , pi ) if logscale : from matplotlib . colors import LogNorm norm = LogNorm ( vmin = vmin , vmax = vmax ) if levels is None : levels = _np . logspace ( _np . floor ( _np . log10 ( pi . min ( ) ) ) , _np . ceil ( _np . log10 ( pi . max ( ) ) ) , ncontours + 1 ) else : norm = None fig , ax , misc = plot_map ( x , y , pi , ax = ax , cmap = cmap , ncontours = ncontours , vmin = vmin , vmax = vmax , levels = levels , cbar = cbar , cax = cax , cbar_label = cbar_label , cbar_orientation = cbar_orientation , norm = norm , ** kwargs ) if cbar and logscale : from matplotlib . ticker import LogLocator misc [ 'cbar' ] . set_ticks ( LogLocator ( base = 10.0 , subs = range ( 10 ) ) ) return fig , ax , misc
Plot a two - dimensional density map using a histogram of scattered data .
15,358
def plot_free_energy ( xall , yall , weights = None , ax = None , nbins = 100 , ncontours = 100 , offset = - 1 , avoid_zero_count = False , minener_zero = True , kT = 1.0 , vmin = None , vmax = None , cmap = 'nipy_spectral' , cbar = True , cbar_label = 'free energy / kT' , cax = None , levels = None , legacy = True , ncountours = None , cbar_orientation = 'vertical' , ** kwargs ) : if legacy : _warn ( 'Legacy mode is deprecated is will be removed in the' ' next major release. Until then use legacy=False' , DeprecationWarning ) cmap = _get_cmap ( cmap ) if offset != - 1 : _warn ( 'Parameter offset is deprecated and will be ignored' , DeprecationWarning ) if ncountours is not None : _warn ( 'Parameter ncountours is deprecated;' ' use ncontours instead' , DeprecationWarning ) ncontours = ncountours if vmin is None : vmin = 0.0 else : if offset != - 1 : raise ValueError ( 'Parameter offset is not allowed outside legacy mode' ) if ncountours is not None : raise ValueError ( 'Parameter ncountours is not allowed outside' ' legacy mode; use ncontours instead' ) x , y , z = get_histogram ( xall , yall , nbins = nbins , weights = weights , avoid_zero_count = avoid_zero_count ) f = _to_free_energy ( z , minener_zero = minener_zero ) * kT fig , ax , misc = plot_map ( x , y , f , ax = ax , cmap = cmap , ncontours = ncontours , vmin = vmin , vmax = vmax , levels = levels , cbar = cbar , cax = cax , cbar_label = cbar_label , cbar_orientation = cbar_orientation , norm = None , ** kwargs ) if legacy : return fig , ax return fig , ax , misc
Plot a two - dimensional free energy map using a histogram of scattered data .
15,359
def plot_contour ( xall , yall , zall , ax = None , cmap = None , ncontours = 100 , vmin = None , vmax = None , levels = None , cbar = True , cax = None , cbar_label = None , cbar_orientation = 'vertical' , norm = None , nbins = 100 , method = 'nearest' , mask = False , ** kwargs ) : x , y , z = get_grid_data ( xall , yall , zall , nbins = nbins , method = method ) if vmin is None : vmin = _np . min ( zall [ zall > - _np . inf ] ) if vmax is None : vmax = _np . max ( zall [ zall < _np . inf ] ) if levels == 'legacy' : eps = ( vmax - vmin ) / float ( ncontours ) levels = _np . linspace ( vmin - eps , vmax + eps ) if mask : _ , _ , counts = get_histogram ( xall , yall , nbins = nbins , weights = None , avoid_zero_count = None ) z = _np . ma . masked_where ( counts . T <= 0 , z ) return plot_map ( x , y , z , ax = ax , cmap = cmap , ncontours = ncontours , vmin = vmin , vmax = vmax , levels = levels , cbar = cbar , cax = cax , cbar_label = cbar_label , cbar_orientation = cbar_orientation , norm = norm , ** kwargs )
Plot a two - dimensional contour map by interpolating scattered data on a grid .
15,360
def plot_state_map ( xall , yall , states , ax = None , ncontours = 100 , cmap = None , cbar = True , cax = None , cbar_label = 'state' , cbar_orientation = 'vertical' , nbins = 100 , mask = True , ** kwargs ) : from matplotlib . cm import get_cmap nstates = int ( _np . max ( states ) + 1 ) cmap_ = get_cmap ( cmap , nstates ) fig , ax , misc = plot_contour ( xall , yall , states , ax = ax , cmap = cmap_ , ncontours = ncontours , vmin = None , vmax = None , levels = None , cbar = cbar , cax = cax , cbar_label = cbar_label , cbar_orientation = cbar_orientation , norm = None , nbins = nbins , method = 'nearest' , mask = mask , ** kwargs ) if cbar : cmin , cmax = misc [ 'mappable' ] . get_clim ( ) f = ( cmax - cmin ) / float ( nstates ) n = _np . arange ( nstates ) misc [ 'cbar' ] . set_ticks ( ( n + 0.5 ) * f ) misc [ 'cbar' ] . set_ticklabels ( n ) return fig , ax , misc
Plot a two - dimensional contour map of states by interpolating labels of scattered data on a grid .
15,361
def single_traj_from_n_files ( file_list , top ) : traj = None for ff in file_list : if traj is None : traj = md . load ( ff , top = top ) else : traj = traj . join ( md . load ( ff , top = top ) ) return traj
Creates a single trajectory object from a list of files
15,362
def add_element ( self , e ) : r if not isinstance ( e , Iterable ) : raise TypeError ( "given element {} is not iterable in terms of " "PyEMMAs coordinate pipeline." . format ( e ) ) if not e . is_reader and len ( self . _chain ) >= 1 : data_producer = self . _chain [ - 1 ] e . _data_producer = data_producer e . chunksize = self . chunksize self . _chain . append ( e )
r Appends a pipeline stage .
15,363
def set_element ( self , index , e ) : r if index > len ( self . _chain ) : raise IndexError ( "tried to access element %i, but chain has only %i" " elements" % ( index , len ( self . _chain ) ) ) if type ( index ) is not int : raise ValueError ( "index is not a integer but '%s'" % str ( type ( index ) ) ) if self . _chain [ index ] is e : return replaced = self . _chain . pop ( index ) if not replaced . is_reader : replaced . data_producer = None self . _chain . insert ( index , e ) if index == 0 : e . data_producer = e else : e . data_producer = self . _chain [ index - 1 ] try : successor = self . _chain [ index + 1 ] successor . data_producer = e except IndexError : pass self . _chain [ index ] . _estimated = False return replaced
r Replaces a pipeline stage .
15,364
def parametrize ( self ) : r for element in self . _chain : if not element . is_reader and not element . _estimated : element . estimate ( element . data_producer , stride = self . param_stride , chunksize = self . chunksize ) self . _estimated = True
r Reads all data and discretizes it into discrete trajectories .
15,365
def _is_estimated ( self ) : r result = self . _estimated for el in self . _chain : if not el . is_reader : result &= el . _estimated return result
r Iterates through the pipeline elements and checks if every element is parametrized .
15,366
def dtrajs ( self ) : if not self . _estimated : self . logger . info ( "not yet parametrized, running now." ) self . parametrize ( ) return self . _chain [ - 1 ] . dtrajs
get discrete trajectories
15,367
def save_dtrajs ( self , prefix = '' , output_dir = '.' , output_format = 'ascii' , extension = '.dtraj' ) : r clustering = self . _chain [ - 1 ] reader = self . _chain [ 0 ] from pyemma . coordinates . clustering . interface import AbstractClustering assert isinstance ( clustering , AbstractClustering ) trajfiles = None if isinstance ( reader , FeatureReader ) : trajfiles = reader . filenames clustering . save_dtrajs ( trajfiles , prefix , output_dir , output_format , extension )
r Saves calculated discrete trajectories . Filenames are taken from given reader . If data comes from memory dtrajs are written to a default filename .
15,368
def save ( self , file_name , model_name = 'default' , overwrite = False , save_streaming_chain = False ) : r from pyemma . _base . serialization . h5file import H5File try : with H5File ( file_name = file_name , mode = 'a' ) as f : f . add_serializable ( model_name , obj = self , overwrite = overwrite , save_streaming_chain = save_streaming_chain ) except Exception as e : msg = ( 'During saving the object {obj}") ' 'the following error occurred: {error}' . format ( obj = self , error = e ) ) if isinstance ( self , Loggable ) : self . logger . exception ( msg ) else : logger . exception ( msg ) raise
r saves the current state of this object to given file and name .
15,369
def load ( cls , file_name , model_name = 'default' ) : from . h5file import H5File with H5File ( file_name , model_name = model_name , mode = 'r' ) as f : return f . model
Loads a previously saved PyEMMA object from disk .
15,370
def _get_version_for_class_from_state ( state , klass ) : names = [ _importable_name ( klass ) ] from . util import class_rename_registry names . extend ( class_rename_registry . old_handled_by ( klass ) ) for n in names : try : return state [ 'class_tree_versions' ] [ n ] except KeyError : continue if _debug : logger . debug ( 'unable to obtain a __serialize_version for class %s' , klass ) return float ( 'inf' )
retrieves the version of the current klass from the state mapping from old locations to new ones .
15,371
def partial_fit ( self , X ) : from pyemma . coordinates import source self . _estimate ( source ( X ) , partial_fit = True ) self . _estimated = True return self
incrementally update the estimates
15,372
def C00_ ( self ) : self . _check_estimated ( ) return self . _rc . cov_XX ( bessel = self . bessel )
Instantaneous covariance matrix
15,373
def C0t_ ( self ) : self . _check_estimated ( ) return self . _rc . cov_XY ( bessel = self . bessel )
Time - lagged covariance matrix
15,374
def Ctt_ ( self ) : self . _check_estimated ( ) return self . _rc . cov_YY ( bessel = self . bessel )
Covariance matrix of the time shifted data
15,375
def tram ( ttrajs , dtrajs , bias , lag , unbiased_state = None , count_mode = 'sliding' , connectivity = 'post_hoc_RE' , maxiter = 10000 , maxerr = 1.0E-15 , save_convergence_info = 0 , dt_traj = '1 step' , connectivity_factor = 1.0 , nn = None , direct_space = False , N_dtram_accelerations = 0 , callback = None , init = 'mbar' , init_maxiter = 10000 , init_maxerr = 1e-8 , equilibrium = None , overcounting_factor = 1.0 ) : r ttrajs = _types . ensure_dtraj_list ( ttrajs ) dtrajs = _types . ensure_dtraj_list ( dtrajs ) if len ( ttrajs ) != len ( dtrajs ) : raise ValueError ( "Unmatching number of dtraj/ttraj elements: %d!=%d" % ( len ( dtrajs ) , len ( ttrajs ) ) ) if len ( ttrajs ) != len ( bias ) : raise ValueError ( "Unmatching number of ttraj/bias elements: %d!=%d" % ( len ( ttrajs ) , len ( bias ) ) ) for ttraj , dtraj , btraj in zip ( ttrajs , dtrajs , bias ) : if len ( ttraj ) != len ( dtraj ) : raise ValueError ( "Unmatching number of data points in ttraj/dtraj: %d!=%d" % ( len ( ttraj ) , len ( dtraj ) ) ) if len ( ttraj ) != btraj . shape [ 0 ] : raise ValueError ( "Unmatching number of data points in ttraj/bias trajectory: %d!=%d" % ( len ( ttraj ) , len ( btraj ) ) ) lags = _np . asarray ( lag , dtype = _np . intc ) . reshape ( ( - 1 , ) ) . tolist ( ) from pyemma . thermo import TRAM as _TRAM tram_estimators = [ ] from pyemma . _base . progress import ProgressReporter pg = ProgressReporter ( ) pg . register ( amount_of_work = len ( lags ) , description = 'Estimating TRAM for lags' ) with pg . context ( ) : for lag in lags : t = _TRAM ( lag , count_mode = count_mode , connectivity = connectivity , maxiter = maxiter , maxerr = maxerr , save_convergence_info = save_convergence_info , dt_traj = dt_traj , connectivity_factor = connectivity_factor , nn = nn , direct_space = direct_space , N_dtram_accelerations = N_dtram_accelerations , callback = callback , init = init , init_maxiter = init_maxiter , init_maxerr = init_maxerr , equilibrium = equilibrium , overcounting_factor = overcounting_factor ) . estimate ( ( ttrajs , dtrajs , bias ) ) tram_estimators . append ( t ) pg . update ( 1 ) _assign_unbiased_state_label ( tram_estimators , unbiased_state ) if len ( tram_estimators ) == 1 : return tram_estimators [ 0 ] return tram_estimators
r Transition - based reweighting analysis method
15,376
def dtram ( ttrajs , dtrajs , bias , lag , unbiased_state = None , count_mode = 'sliding' , connectivity = 'reversible_pathways' , maxiter = 10000 , maxerr = 1.0E-15 , save_convergence_info = 0 , dt_traj = '1 step' , init = None , init_maxiter = 10000 , init_maxerr = 1.0E-8 ) : r ttrajs = _types . ensure_dtraj_list ( ttrajs ) dtrajs = _types . ensure_dtraj_list ( dtrajs ) if len ( ttrajs ) != len ( dtrajs ) : raise ValueError ( "Unmatching number of dtraj/ttraj elements: %d!=%d" % ( len ( dtrajs ) , len ( ttrajs ) ) ) for ttraj , dtraj in zip ( ttrajs , dtrajs ) : if len ( ttraj ) != len ( dtraj ) : raise ValueError ( "Unmatching number of data points in ttraj/dtraj: %d!=%d" % ( len ( ttraj ) , len ( dtraj ) ) ) lags = _np . asarray ( lag , dtype = _np . intc ) . reshape ( ( - 1 , ) ) . tolist ( ) from pyemma . thermo import DTRAM from pyemma . _base . progress import ProgressReporter pg = ProgressReporter ( ) pg . register ( len ( lags ) , description = 'Estimating DTRAM for lags' ) dtram_estimators = [ ] with pg . context ( ) : for _lag in lags : d = DTRAM ( bias , _lag , count_mode = count_mode , connectivity = connectivity , maxiter = maxiter , maxerr = maxerr , save_convergence_info = save_convergence_info , dt_traj = dt_traj , init = init , init_maxiter = init_maxiter , init_maxerr = init_maxerr ) . estimate ( ( ttrajs , dtrajs ) ) dtram_estimators . append ( d ) pg . update ( 1 ) _assign_unbiased_state_label ( dtram_estimators , unbiased_state ) if len ( dtram_estimators ) == 1 : return dtram_estimators [ 0 ] return dtram_estimators
r Discrete transition - based reweighting analysis method
15,377
def wham ( ttrajs , dtrajs , bias , maxiter = 100000 , maxerr = 1.0E-15 , save_convergence_info = 0 , dt_traj = '1 step' ) : r ttrajs = _types . ensure_dtraj_list ( ttrajs ) dtrajs = _types . ensure_dtraj_list ( dtrajs ) if len ( ttrajs ) != len ( dtrajs ) : raise ValueError ( "Unmatching number of dtraj/ttraj elements: %d!=%d" % ( len ( dtrajs ) , len ( ttrajs ) ) ) for ttraj , dtraj in zip ( ttrajs , dtrajs ) : if len ( ttraj ) != len ( dtraj ) : raise ValueError ( "Unmatching number of data points in ttraj/dtraj: %d!=%d" % ( len ( ttraj ) , len ( dtraj ) ) ) from pyemma . thermo import WHAM wham_estimator = WHAM ( bias , maxiter = maxiter , maxerr = maxerr , save_convergence_info = save_convergence_info , dt_traj = dt_traj ) return wham_estimator . estimate ( ( ttrajs , dtrajs ) )
r Weighted histogram analysis method
15,378
def mbar ( ttrajs , dtrajs , bias , maxiter = 100000 , maxerr = 1.0E-15 , save_convergence_info = 0 , dt_traj = '1 step' , direct_space = False ) : r ttrajs = _types . ensure_dtraj_list ( ttrajs ) dtrajs = _types . ensure_dtraj_list ( dtrajs ) if len ( ttrajs ) != len ( dtrajs ) : raise ValueError ( "Unmatching number of dtraj/ttraj elements: %d!=%d" % ( len ( dtrajs ) , len ( ttrajs ) ) ) if len ( ttrajs ) != len ( bias ) : raise ValueError ( "Unmatching number of ttraj/bias elements: %d!=%d" % ( len ( ttrajs ) , len ( bias ) ) ) for ttraj , dtraj , btraj in zip ( ttrajs , dtrajs , bias ) : if len ( ttraj ) != len ( dtraj ) : raise ValueError ( "Unmatching number of data points in ttraj/dtraj: %d!=%d" % ( len ( ttraj ) , len ( dtraj ) ) ) if len ( ttraj ) != btraj . shape [ 0 ] : raise ValueError ( "Unmatching number of data points in ttraj/bias trajectory: %d!=%d" % ( len ( ttraj ) , len ( btraj ) ) ) from pyemma . thermo import MBAR mbar_estimator = MBAR ( maxiter = maxiter , maxerr = maxerr , save_convergence_info = save_convergence_info , dt_traj = dt_traj , direct_space = direct_space ) return mbar_estimator . estimate ( ( ttrajs , dtrajs , bias ) )
r Multi - state Bennet acceptance ratio
15,379
def default_chunksize ( self ) : if self . _default_chunksize is None : try : self . dimension ( ) self . output_type ( ) except : self . _default_chunksize = Iterable . _FALLBACK_CHUNKSIZE else : self . _default_chunksize = Iterable . _compute_default_cs ( self . dimension ( ) , self . output_type ( ) . itemsize , self . logger ) return self . _default_chunksize
How much data will be processed at once in case no chunksize has been provided .
15,380
def fit ( self , X , y , lengths ) : alpha = self . alpha if alpha <= 0 : raise ValueError ( "alpha should be >0, got {0!r}" . format ( alpha ) ) X = atleast2d_or_csr ( X ) classes , y = np . unique ( y , return_inverse = True ) lengths = np . asarray ( lengths ) Y = y . reshape ( - 1 , 1 ) == np . arange ( len ( classes ) ) end = np . cumsum ( lengths ) start = end - lengths init_prob = np . log ( Y [ start ] . sum ( axis = 0 ) + alpha ) init_prob -= logsumexp ( init_prob ) final_prob = np . log ( Y [ start ] . sum ( axis = 0 ) + alpha ) final_prob -= logsumexp ( final_prob ) feature_prob = np . log ( safe_sparse_dot ( Y . T , X ) + alpha ) feature_prob -= logsumexp ( feature_prob , axis = 0 ) trans_prob = np . log ( count_trans ( y , len ( classes ) ) + alpha ) trans_prob -= logsumexp ( trans_prob , axis = 0 ) self . coef_ = feature_prob self . intercept_init_ = init_prob self . intercept_final_ = final_prob self . intercept_trans_ = trans_prob self . classes_ = classes return self
Fit HMM model to data .
15,381
def _lstree ( files , dirs ) : for f , sha1 in files : yield "100644 blob {}\t{}\0" . format ( sha1 , f ) for d , sha1 in dirs : yield "040000 tree {}\t{}\0" . format ( sha1 , d )
Make git ls - tree like output .
15,382
def hash_dir ( path ) : dir_hash = { } for root , dirs , files in os . walk ( path , topdown = False ) : f_hash = ( ( f , hash_file ( join ( root , f ) ) ) for f in files ) d_hash = ( ( d , dir_hash [ join ( root , d ) ] ) for d in dirs ) dir_hash [ join ( * split ( root ) ) ] = _mktree ( f_hash , d_hash ) return dir_hash [ path ]
Write directory at path to Git index return its SHA1 as a string .
15,383
def features ( sentence , i ) : word = sentence [ i ] yield "word:{}" + word . lower ( ) if word [ 0 ] . isupper ( ) : yield "CAP" if i > 0 : yield "word-1:{}" + sentence [ i - 1 ] . lower ( ) if i > 1 : yield "word-2:{}" + sentence [ i - 2 ] . lower ( ) if i + 1 < len ( sentence ) : yield "word+1:{}" + sentence [ i + 1 ] . lower ( ) if i + 2 < len ( sentence ) : yield "word+2:{}" + sentence [ i + 2 ] . lower ( )
Features for i th token in sentence .
15,384
def whole_sequence_accuracy ( y_true , y_pred , lengths ) : lengths = np . asarray ( lengths ) end = np . cumsum ( lengths ) start = end - lengths bounds = np . vstack ( [ start , end ] ) . T errors = sum ( 1. for i , j in bounds if np . any ( y_true [ i : j ] != y_pred [ i : j ] ) ) return 1 - errors / len ( lengths )
Average accuracy measured on whole sequences .
15,385
def load_conll ( f , features , n_features = ( 2 ** 16 ) , split = False ) : fh = FeatureHasher ( n_features = n_features , input_type = "string" ) labels = [ ] lengths = [ ] with _open ( f ) as f : raw_X = _conll_sequences ( f , features , labels , lengths , split ) X = fh . transform ( raw_X ) return X , np . asarray ( labels ) , np . asarray ( lengths , dtype = np . int32 )
Load CoNLL file extract features on the tokens and vectorize them .
15,386
def atleast2d_or_csr ( X , dtype = None , order = None , copy = False ) : return _atleast2d_or_sparse ( X , dtype , order , copy , sp . csr_matrix , "tocsr" , sp . isspmatrix_csr )
Like numpy . atleast_2d but converts sparse matrices to CSR format
15,387
def validate_lengths ( n_samples , lengths ) : if lengths is None : lengths = [ n_samples ] lengths = np . asarray ( lengths , dtype = np . int32 ) if lengths . sum ( ) > n_samples : msg = "More than {0:d} samples in lengths array {1!s}" raise ValueError ( msg . format ( n_samples , lengths ) ) end = np . cumsum ( lengths ) start = end - lengths return start , end
Validate lengths array against n_samples .
15,388
def make_trans_matrix ( y , n_classes , dtype = np . float64 ) : indices = np . empty ( len ( y ) , dtype = np . int32 ) for i in six . moves . xrange ( len ( y ) - 1 ) : indices [ i ] = y [ i ] * i + y [ i + 1 ] indptr = np . arange ( len ( y ) + 1 ) indptr [ - 1 ] = indptr [ - 2 ] return csr_matrix ( ( np . ones ( len ( y ) , dtype = dtype ) , indices , indptr ) , shape = ( len ( y ) , n_classes ** 2 ) )
Make a sparse transition matrix for y .
15,389
async def passthrough ( self , request ) : connector = TCPConnector ( ) connector . _resolve_host = partial ( self . _old_resolver_mock , connector ) new_is_ssl = ClientRequest . is_ssl ClientRequest . is_ssl = self . _old_is_ssl try : original_request = request . clone ( scheme = "https" if request . headers [ "AResponsesIsSSL" ] else "http" ) headers = { k : v for k , v in request . headers . items ( ) if k != "AResponsesIsSSL" } async with ClientSession ( connector = connector ) as session : async with getattr ( session , request . method . lower ( ) ) ( original_request . url , headers = headers , data = ( await request . read ( ) ) ) as r : headers = { k : v for k , v in r . headers . items ( ) if k . lower ( ) == "content-type" } text = await r . text ( ) response = self . Response ( text = text , status = r . status , headers = headers ) return response finally : ClientRequest . is_ssl = new_is_ssl
Make non - mocked network request
15,390
def to_one_line_string ( tiles ) : tiles = sorted ( tiles ) man = [ t for t in tiles if t < 36 ] pin = [ t for t in tiles if 36 <= t < 72 ] pin = [ t - 36 for t in pin ] sou = [ t for t in tiles if 72 <= t < 108 ] sou = [ t - 72 for t in sou ] honors = [ t for t in tiles if t >= 108 ] honors = [ t - 108 for t in honors ] sou = sou and '' . join ( [ str ( ( i // 4 ) + 1 ) for i in sou ] ) + 's' or '' pin = pin and '' . join ( [ str ( ( i // 4 ) + 1 ) for i in pin ] ) + 'p' or '' man = man and '' . join ( [ str ( ( i // 4 ) + 1 ) for i in man ] ) + 'm' or '' honors = honors and '' . join ( [ str ( ( i // 4 ) + 1 ) for i in honors ] ) + 'z' or '' return man + pin + sou + honors
Convert 136 tiles array to the one line string Example of output 123s123p123m33z
15,391
def to_136_array ( tiles ) : temp = [ ] results = [ ] for x in range ( 0 , 34 ) : if tiles [ x ] : temp_value = [ x * 4 ] * tiles [ x ] for tile in temp_value : if tile in results : count_of_tiles = len ( [ x for x in temp if x == tile ] ) new_tile = tile + count_of_tiles results . append ( new_tile ) temp . append ( tile ) else : results . append ( tile ) temp . append ( tile ) return results
Convert 34 array to the 136 tiles array
15,392
def string_to_136_array ( sou = None , pin = None , man = None , honors = None , has_aka_dora = False ) : def _split_string ( string , offset , red = None ) : data = [ ] temp = [ ] if not string : return [ ] for i in string : if i == 'r' and has_aka_dora : temp . append ( red ) data . append ( red ) else : tile = offset + ( int ( i ) - 1 ) * 4 if tile == red and has_aka_dora : tile += 1 if tile in data : count_of_tiles = len ( [ x for x in temp if x == tile ] ) new_tile = tile + count_of_tiles data . append ( new_tile ) temp . append ( tile ) else : data . append ( tile ) temp . append ( tile ) return data results = _split_string ( man , 0 , FIVE_RED_MAN ) results += _split_string ( pin , 36 , FIVE_RED_PIN ) results += _split_string ( sou , 72 , FIVE_RED_SOU ) results += _split_string ( honors , 108 ) return results
Method to convert one line string tiles format to the 136 array . You can pass r instead of 5 for it to become a red five from that suit . To prevent old usage without red has_aka_dora has to be True for this to do that . We need it to increase readability of our tests
15,393
def string_to_34_array ( sou = None , pin = None , man = None , honors = None ) : results = TilesConverter . string_to_136_array ( sou , pin , man , honors ) results = TilesConverter . to_34_array ( results ) return results
Method to convert one line string tiles format to the 34 array We need it to increase readability of our tests
15,394
def find_34_tile_in_136_array ( tile34 , tiles ) : if tile34 is None or tile34 > 33 : return None tile = tile34 * 4 possible_tiles = [ tile ] + [ tile + i for i in range ( 1 , 4 ) ] found_tile = None for possible_tile in possible_tiles : if possible_tile in tiles : found_tile = possible_tile break return found_tile
Our shanten calculator will operate with 34 tiles format after calculations we need to find calculated 34 tile in player s 136 tiles .
15,395
def value ( self ) : return self . _sign [ 1 ] * self . S0 * norm . cdf ( self . _sign [ 1 ] * self . d1 , 0.0 , 1.0 ) - self . _sign [ 1 ] * self . K * np . exp ( - self . r * self . T ) * norm . cdf ( self . _sign [ 1 ] * self . d2 , 0.0 , 1.0 )
Compute option value according to BSM model .
15,396
def add_option ( self , K = None , price = None , St = None , kind = "call" , pos = "long" ) : kinds = { "call" : Call , "Call" : Call , "c" : Call , "C" : Call , "put" : Put , "Put" : Put , "p" : Put , "P" : Put , } St = self . St if St is None else St option = kinds [ kind ] ( St = St , K = K , price = price , pos = pos ) self . options . append ( option )
Add an option to the object s options container .
15,397
def _rolling_lstsq ( x , y ) : if x . ndim == 2 : x = x [ : , : , None ] elif x . ndim <= 1 : raise np . AxisError ( "x should have ndmi >= 2" ) return np . squeeze ( np . matmul ( np . linalg . inv ( np . matmul ( x . swapaxes ( 1 , 2 ) , x ) ) , np . matmul ( x . swapaxes ( 1 , 2 ) , np . atleast_3d ( y ) ) , ) )
Finds solution for the rolling case . Matrix formulation .
15,398
def _confirm_constant ( a ) : a = np . asanyarray ( a ) return np . isclose ( a , 1.0 ) . all ( axis = 0 ) . any ( )
Confirm a has volumn vector of 1s .
15,399
def _pvalues_all ( self ) : return 2.0 * ( 1.0 - scs . t . cdf ( np . abs ( self . _tstat_all ) , self . df_err ) )
Two - tailed p values for t - stats of all parameters .