idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
23,900
def _do_lumping ( self ) : right_eigenvectors = self . right_eigenvectors_ [ : , 1 : ] assert self . n_states_ > 0 microstate_mapping = np . zeros ( self . n_states_ , dtype = int ) def spread ( x ) : return x . max ( ) - x . min ( ) for i in range ( self . n_macrostates - 1 ) : v = right_eigenvectors [ : , i ] all_spreads = np . array ( [ spread ( v [ microstate_mapping == k ] ) for k in range ( i + 1 ) ] ) state_to_split = np . argmax ( all_spreads ) inds = ( ( microstate_mapping == state_to_split ) & ( v >= self . pcca_tolerance ) ) microstate_mapping [ inds ] = i + 1 self . microstate_mapping_ = microstate_mapping
Do the PCCA lumping .
23,901
def metastability ( alpha , T , right_eigenvectors , square_map , pi ) : num_micro , num_eigen = right_eigenvectors . shape A , chi , mapping = calculate_fuzzy_chi ( alpha , square_map , right_eigenvectors ) if ( len ( np . unique ( mapping ) ) != right_eigenvectors . shape [ 1 ] or has_constraint_violation ( A , right_eigenvectors ) ) : return - 1.0 * np . inf obj = 0.0 for i in range ( num_eigen ) : obj += np . dot ( T . dot ( chi [ : , i ] ) , pi * chi [ : , i ] ) / np . dot ( chi [ : , i ] , pi ) return obj
Return the metastability PCCA + objective function .
23,902
def crispness ( alpha , T , right_eigenvectors , square_map , pi ) : A , chi , mapping = calculate_fuzzy_chi ( alpha , square_map , right_eigenvectors ) if ( len ( np . unique ( mapping ) ) != right_eigenvectors . shape [ 1 ] or has_constraint_violation ( A , right_eigenvectors ) ) : return - 1.0 * np . inf obj = tr ( dot ( diag ( 1. / A [ 0 ] ) , dot ( A . transpose ( ) , A ) ) ) return obj
Return the crispness PCCA + objective function .
23,903
def get_maps ( A ) : N = A . shape [ 0 ] flat_map = [ ] for i in range ( 1 , N ) : for j in range ( 1 , N ) : flat_map . append ( [ i , j ] ) flat_map = np . array ( flat_map ) square_map = np . zeros ( A . shape , 'int' ) for k in range ( ( N - 1 ) ** 2 ) : i , j = flat_map [ k ] square_map [ i , j ] = k return flat_map , square_map
Get mappings from the square array A to the flat vector of parameters alpha .
23,904
def has_constraint_violation ( A , right_eigenvectors , epsilon = 1E-8 ) : lhs = 1 - A [ 0 , 1 : ] . sum ( ) rhs = dot ( right_eigenvectors [ : , 1 : ] , A [ 1 : , 0 ] ) rhs = - 1 * rhs . min ( ) if abs ( lhs - rhs ) > epsilon : return True else : return False
Check for constraint violations in transformation matrix .
23,905
def index_search ( right_eigenvectors ) : num_micro , num_eigen = right_eigenvectors . shape index = np . zeros ( num_eigen , 'int' ) index [ 0 ] = np . argmax ( [ norm ( right_eigenvectors [ i ] ) for i in range ( num_micro ) ] ) ortho_sys = right_eigenvectors - np . outer ( np . ones ( num_micro ) , right_eigenvectors [ index [ 0 ] ] ) for j in range ( 1 , num_eigen ) : temp = ortho_sys [ index [ j - 1 ] ] . copy ( ) for l in range ( num_micro ) : ortho_sys [ l ] -= temp * dot ( ortho_sys [ l ] , temp ) dist_list = np . array ( [ norm ( ortho_sys [ l ] ) for l in range ( num_micro ) ] ) index [ j ] = np . argmax ( dist_list ) ortho_sys /= dist_list . max ( ) return index
Find simplex structure in eigenvectors to begin PCCA + .
23,906
def fill_A ( A , right_eigenvectors ) : num_micro , num_eigen = right_eigenvectors . shape A = A . copy ( ) A [ 1 : , 0 ] = - 1 * A [ 1 : , 1 : ] . sum ( 1 ) A [ 0 ] = - 1 * dot ( right_eigenvectors [ : , 1 : ] . real , A [ 1 : ] ) . min ( 0 ) A /= A [ 0 ] . sum ( ) return A
Construct feasible initial guess for transformation matrix A .
23,907
def _do_lumping ( self ) : right_eigenvectors = self . right_eigenvectors_ [ : , : self . n_macrostates ] index = index_search ( right_eigenvectors ) A = right_eigenvectors [ index , : ] A = inv ( A ) A = fill_A ( A , right_eigenvectors ) if self . do_minimization : A = self . _optimize_A ( A ) self . A_ = fill_A ( A , right_eigenvectors ) self . chi_ = dot ( right_eigenvectors , self . A_ ) self . microstate_mapping_ = np . argmax ( self . chi_ , 1 )
Perform PCCA + algorithm by optimizing transformation matrix A .
23,908
def _optimize_A ( self , A ) : right_eigenvectors = self . right_eigenvectors_ [ : , : self . n_macrostates ] flat_map , square_map = get_maps ( A ) alpha = to_flat ( 1.0 * A , flat_map ) def obj ( x ) : return - 1 * self . _objective_function ( x , self . transmat_ , right_eigenvectors , square_map , self . populations_ ) alpha = scipy . optimize . basinhopping ( obj , alpha , niter_success = 1000 , ) [ 'x' ] alpha = scipy . optimize . fmin ( obj , alpha , full_output = True , xtol = 1E-4 , ftol = 1E-4 , maxfun = 5000 , maxiter = 100000 ) [ 0 ] if np . isneginf ( obj ( alpha ) ) : raise ValueError ( "Error: minimization has not located a feasible point." ) A = to_square ( alpha , square_map ) return A
Find optimal transformation matrix A by minimization .
23,909
def _solve_msm_eigensystem ( transmat , k ) : u , lv , rv = scipy . linalg . eig ( transmat , left = True , right = True ) order = np . argsort ( - np . real ( u ) ) u = np . real_if_close ( u [ order [ : k ] ] ) lv = np . real_if_close ( lv [ : , order [ : k ] ] ) rv = np . real_if_close ( rv [ : , order [ : k ] ] ) return _normalize_eigensystem ( u , lv , rv )
Find the dominant eigenpairs of an MSM transition matrix
23,910
def _normalize_eigensystem ( u , lv , rv ) : lv [ : , 0 ] = lv [ : , 0 ] / np . sum ( lv [ : , 0 ] ) for i in range ( 1 , lv . shape [ 1 ] ) : lv [ : , i ] = lv [ : , i ] / np . sqrt ( np . dot ( lv [ : , i ] , lv [ : , i ] / lv [ : , 0 ] ) ) for i in range ( rv . shape [ 1 ] ) : rv [ : , i ] = rv [ : , i ] / np . dot ( lv [ : , i ] , rv [ : , i ] ) return u , lv , rv
Normalize the eigenvectors of a reversible Markov state model according to our preferred scheme .
23,911
def _strongly_connected_subgraph ( counts , weight = 1 , verbose = True ) : n_states_input = counts . shape [ 0 ] n_components , component_assignments = csgraph . connected_components ( csr_matrix ( counts >= weight ) , connection = "strong" ) populations = np . array ( counts . sum ( 0 ) ) . flatten ( ) component_pops = np . array ( [ populations [ component_assignments == i ] . sum ( ) for i in range ( n_components ) ] ) which_component = component_pops . argmax ( ) def cpop ( which ) : csum = component_pops . sum ( ) return 100 * component_pops [ which ] / csum if csum != 0 else np . nan percent_retained = cpop ( which_component ) if verbose : print ( "MSM contains %d strongly connected component%s " "above weight=%.2f. Component %d selected, with " "population %f%%" % ( n_components , 's' if ( n_components != 1 ) else '' , weight , which_component , percent_retained ) ) keys = np . arange ( n_states_input ) [ component_assignments == which_component ] if n_components == n_states_input and counts [ np . ix_ ( keys , keys ) ] == 0 : return np . zeros ( ( 0 , 0 ) ) , { } , percent_retained values = np . arange ( len ( keys ) ) mapping = dict ( zip ( keys , values ) ) n_states_output = len ( mapping ) trimmed_counts = np . zeros ( ( n_states_output , n_states_output ) , dtype = counts . dtype ) trimmed_counts [ np . ix_ ( values , values ) ] = counts [ np . ix_ ( keys , keys ) ] return trimmed_counts , mapping , percent_retained
Trim a transition count matrix down to its maximal strongly ergodic subgraph .
23,912
def _transition_counts ( sequences , lag_time = 1 , sliding_window = True ) : if ( not sliding_window ) and lag_time > 1 : return _transition_counts ( [ X [ : : lag_time ] for X in sequences ] , lag_time = 1 ) classes = np . unique ( np . concatenate ( sequences ) ) contains_nan = ( classes . dtype . kind == 'f' ) and np . any ( np . isnan ( classes ) ) contains_none = any ( c is None for c in classes ) if contains_nan : classes = classes [ ~ np . isnan ( classes ) ] if contains_none : classes = [ c for c in classes if c is not None ] n_states = len ( classes ) mapping = dict ( zip ( classes , range ( n_states ) ) ) mapping_is_identity = ( not contains_nan and not contains_none and classes . dtype . kind == 'i' and np . all ( classes == np . arange ( n_states ) ) ) mapping_fn = np . vectorize ( mapping . get , otypes = [ np . int ] ) none_to_nan = np . vectorize ( lambda x : np . nan if x is None else x , otypes = [ np . float ] ) counts = np . zeros ( ( n_states , n_states ) , dtype = float ) _transitions = [ ] for y in sequences : y = np . asarray ( y ) from_states = y [ : - lag_time : 1 ] to_states = y [ lag_time : : 1 ] if contains_none : from_states = none_to_nan ( from_states ) to_states = none_to_nan ( to_states ) if contains_nan or contains_none : mask = ~ ( np . isnan ( from_states ) + np . isnan ( to_states ) ) from_states = from_states [ mask ] to_states = to_states [ mask ] if ( not mapping_is_identity ) and len ( from_states ) > 0 and len ( to_states ) > 0 : from_states = mapping_fn ( from_states ) to_states = mapping_fn ( to_states ) _transitions . append ( np . row_stack ( ( from_states , to_states ) ) ) transitions = np . hstack ( _transitions ) C = coo_matrix ( ( np . ones ( transitions . shape [ 1 ] , dtype = int ) , transitions ) , shape = ( n_states , n_states ) ) counts = counts + np . asarray ( C . todense ( ) ) counts /= float ( lag_time ) return counts , mapping
Count the number of directed transitions in a collection of sequences in a discrete space .
23,913
def partial_transform ( self , sequence , mode = 'clip' ) : if mode not in [ 'clip' , 'fill' ] : raise ValueError ( 'mode must be one of ["clip", "fill"]: %s' % mode ) sequence = np . asarray ( sequence ) if sequence . ndim != 1 : raise ValueError ( "Each sequence must be 1D" ) f = np . vectorize ( lambda k : self . mapping_ . get ( k , np . nan ) , otypes = [ np . float ] ) a = f ( sequence ) if mode == 'fill' : if np . all ( np . mod ( a , 1 ) == 0 ) : result = a . astype ( int ) else : result = a elif mode == 'clip' : result = [ a [ s ] . astype ( int ) for s in np . ma . clump_unmasked ( np . ma . masked_invalid ( a ) ) ] else : raise RuntimeError ( ) return result
Transform a sequence to internal indexing
23,914
def transform ( self , sequences , mode = 'clip' ) : if mode not in [ 'clip' , 'fill' ] : raise ValueError ( 'mode must be one of ["clip", "fill"]: %s' % mode ) sequences = list_of_1d ( sequences ) result = [ ] for y in sequences : if mode == 'fill' : result . append ( self . partial_transform ( y , mode ) ) elif mode == 'clip' : result . extend ( self . partial_transform ( y , mode ) ) else : raise RuntimeError ( ) return result
Transform a list of sequences to internal indexing
23,915
def _parse_ergodic_cutoff ( self ) : ec_is_str = isinstance ( self . ergodic_cutoff , str ) if ec_is_str and self . ergodic_cutoff . lower ( ) == 'on' : if self . sliding_window : return 1.0 / self . lag_time else : return 1.0 elif ec_is_str and self . ergodic_cutoff . lower ( ) == 'off' : return 0.0 else : return self . ergodic_cutoff
Get a numeric value from the ergodic_cutoff input which can be on or off .
23,916
def inverse_transform ( self , sequences ) : sequences = list_of_1d ( sequences ) inverse_mapping = { v : k for k , v in self . mapping_ . items ( ) } f = np . vectorize ( inverse_mapping . get ) result = [ ] for y in sequences : uq = np . unique ( y ) if not np . all ( np . logical_and ( 0 <= uq , uq < self . n_states_ ) ) : raise ValueError ( 'sequence must be between 0 and n_states-1' ) result . append ( f ( y ) ) return result
Transform a list of sequences from internal indexing into labels
23,917
def sample_discrete ( self , state = None , n_steps = 100 , random_state = None ) : r random = check_random_state ( random_state ) r = random . rand ( 1 + n_steps ) if state is None : initial = np . sum ( np . cumsum ( self . populations_ ) < r [ 0 ] ) elif hasattr ( state , '__len__' ) and len ( state ) == self . n_states_ : initial = np . sum ( np . cumsum ( state ) < r [ 0 ] ) else : initial = self . mapping_ [ state ] cstr = np . cumsum ( self . transmat_ , axis = 1 ) chain = [ initial ] for i in range ( 1 , n_steps ) : chain . append ( np . sum ( cstr [ chain [ i - 1 ] , : ] < r [ i ] ) ) return self . inverse_transform ( [ chain ] ) [ 0 ]
r Generate a random sequence of states by propagating the model using discrete time steps given by the model lagtime .
23,918
def draw_samples ( self , sequences , n_samples , random_state = None ) : if not any ( [ isinstance ( seq , collections . Iterable ) for seq in sequences ] ) : sequences = [ sequences ] random = check_random_state ( random_state ) selected_pairs_by_state = [ ] for state in range ( self . n_states_ ) : all_frames = [ np . where ( a == state ) [ 0 ] for a in sequences ] pairs = [ ( trj , frame ) for ( trj , frames ) in enumerate ( all_frames ) for frame in frames ] if pairs : selected_pairs_by_state . append ( [ pairs [ random . choice ( len ( pairs ) ) ] for i in range ( n_samples ) ] ) else : selected_pairs_by_state . append ( [ ] ) return np . array ( selected_pairs_by_state )
Sample conformations for a sequences of states .
23,919
def _do_lumping ( self ) : model = LandmarkAgglomerative ( linkage = 'ward' , n_clusters = self . n_macrostates , metric = self . metric , n_landmarks = self . n_landmarks , landmark_strategy = self . landmark_strategy , random_state = self . random_state ) model . fit ( [ self . transmat_ ] ) if self . fit_only : microstate_mapping_ = model . landmark_labels_ else : microstate_mapping_ = model . transform ( [ self . transmat_ ] ) [ 0 ] self . microstate_mapping_ = microstate_mapping_
Do the MVCA lumping .
23,920
def _truncate ( self , x , k ) : not_F = np . argsort ( np . abs ( x ) ) [ : - k ] x [ not_F ] = 0 return x
given a vector x leave its top - k absolute - value entries alone and set the rest to 0
23,921
def _truncated_power_method ( self , A , x0 , k , max_iter = 10000 , thresh = 1e-8 ) : xts = [ x0 ] for t in range ( max_iter ) : xts . append ( self . _normalize ( self . _truncate ( np . dot ( A , xts [ - 1 ] ) , k ) ) ) if np . linalg . norm ( xts [ - 1 ] - xts [ - 2 ] ) < thresh : break return xts [ - 1 ]
given a matrix A an initial guess x0 and a maximum cardinality k find the best k - sparse approximation to its dominant eigenvector
23,922
def _run ( self ) : n_macrostates = 1 metaQ = - 1.0 prevQ = - 1.0 global_maxQ = - 1.0 local_maxQ = - 1.0 for iter in range ( self . max_iter ) : self . __max_state = - 1 self . __micro_stack = [ ] for k in range ( n_macrostates ) : self . _do_split ( micro_state = k , sub_clus = self . sub_clus ) self . _do_time_clustering ( macro_state = k ) n_micro_states = np . amax ( self . __temp_labels_ ) + 1 if n_micro_states > self . n_macrostates : self . __temp_MacroAssignments_ = self . _do_lumping ( n_macrostates = n_macrostates ) prevQ = metaQ metaQ = self . __temp_transmat_ . diagonal ( ) . sum ( ) metaQ /= len ( self . __temp_transmat_ ) else : self . __temp_MacroAssignments_ = [ copy . copy ( element ) for element in self . __temp_labels_ ] acceptedMove = False MCacc = np . exp ( metaQ * metaQ - prevQ * prevQ ) if MCacc > 1.0 : MCacc = 1.0 optLim = 0.95 if MCacc > optLim : acceptedMove = True if acceptedMove : local_maxQ = metaQ if metaQ > global_maxQ : global_maxQ = metaQ self . MacroAssignments_ = [ copy . copy ( element ) for element in self . __temp_MacroAssignments_ ] self . labels_ = [ copy . copy ( element ) for element in self . __temp_labels_ ] self . transmat_ = self . __temp_transmat_ n_macrostates = self . n_macrostates self . __temp_labels_ = [ copy . copy ( element ) for element in self . __temp_MacroAssignments_ ]
Do the APM lumping .
23,923
def get_atompair_indices ( reference_traj , keep_atoms = None , exclude_atoms = None , reject_bonded = True ) : if keep_atoms is None : keep_atoms = ATOM_NAMES top , bonds = reference_traj . top . to_dataframe ( ) if keep_atoms is not None : atom_indices = top [ top . name . isin ( keep_atoms ) == True ] . index . values if exclude_atoms is not None : atom_indices = top [ top . name . isin ( exclude_atoms ) == False ] . index . values pair_indices = np . array ( list ( itertools . combinations ( atom_indices , 2 ) ) ) if reject_bonded : a_list = bonds . min ( 1 ) b_list = bonds . max ( 1 ) n = atom_indices . max ( ) + 1 bond_hashes = a_list + b_list * n pair_hashes = pair_indices [ : , 0 ] + pair_indices [ : , 1 ] * n not_bonds = ~ np . in1d ( pair_hashes , bond_hashes ) pair_indices = np . array ( [ ( a , b ) for k , ( a , b ) in enumerate ( pair_indices ) if not_bonds [ k ] ] ) return atom_indices , pair_indices
Get a list of acceptable atom pairs .
23,924
def sample_dimension ( trajs , dimension , n_frames , scheme = "linear" ) : fixed_indices = list ( trajs . keys ( ) ) trajs = [ trajs [ k ] [ : , [ dimension ] ] for k in fixed_indices ] txx = np . concatenate ( [ traj [ : , 0 ] for traj in trajs ] ) if scheme == "linear" : spaced_points = np . linspace ( np . min ( txx ) , np . max ( txx ) , n_frames ) spaced_points = spaced_points [ : , np . newaxis ] elif scheme == "random" : spaced_points = np . sort ( np . random . choice ( txx , n_frames ) ) spaced_points = spaced_points [ : , np . newaxis ] elif scheme == "edge" : _cut_point = n_frames // 2 txx = np . sort ( txx ) spaced_points = np . hstack ( ( txx [ : _cut_point ] , txx [ - _cut_point : ] ) ) spaced_points = np . reshape ( spaced_points , newshape = ( len ( spaced_points ) , 1 ) ) else : raise ValueError ( "Scheme has be to one of linear, random or edge" ) tree = KDTree ( trajs ) dists , inds = tree . query ( spaced_points ) return [ ( fixed_indices [ i ] , j ) for i , j in inds ]
Sample a dimension of the data .
23,925
def fit ( self , X , y = None ) : X = array2d ( X ) self . n_features = X . shape [ 1 ] self . n_bins = self . n_bins_per_feature ** self . n_features if self . min is None : min = np . min ( X , axis = 0 ) elif isinstance ( self . min , numbers . Number ) : min = self . min * np . ones ( self . n_features ) else : min = np . asarray ( self . min ) if not min . shape == ( self . n_features , ) : raise ValueError ( 'min shape error' ) if self . max is None : max = np . max ( X , axis = 0 ) elif isinstance ( self . max , numbers . Number ) : max = self . max * np . ones ( self . n_features ) else : max = np . asarray ( self . max ) if not max . shape == ( self . n_features , ) : raise ValueError ( 'max shape error' ) self . grid = np . array ( [ np . linspace ( min [ i ] - EPS , max [ i ] + EPS , self . n_bins_per_feature + 1 ) for i in range ( self . n_features ) ] ) return self
Fit the grid
23,926
def predict ( self , X ) : if np . any ( X < self . grid [ : , 0 ] ) or np . any ( X > self . grid [ : , - 1 ] ) : raise ValueError ( 'data out of min/max bounds' ) binassign = np . zeros ( ( self . n_features , len ( X ) ) , dtype = int ) for i in range ( self . n_features ) : binassign [ i ] = np . digitize ( X [ : , i ] , self . grid [ i ] ) - 1 labels = np . dot ( self . n_bins_per_feature ** np . arange ( self . n_features ) , binassign ) assert np . max ( labels ) < self . n_bins return labels
Get the index of the grid cell containing each sample in X
23,927
def _check_same_length ( self , trajs_tuple ) : lens = [ len ( trajs ) for trajs in trajs_tuple ] if len ( set ( lens ) ) > 1 : err = "Each dataset must be the same length. You gave: {}" err = err . format ( lens ) raise ValueError ( err )
Check that the datasets are the same length
23,928
def transform ( self , trajs_tuple , y = None ) : return [ self . partial_transform ( traj_zip ) for traj_zip in zip ( * trajs_tuple ) ]
Featurize a several trajectories .
23,929
def backup ( fn ) : if not os . path . exists ( fn ) : return backnum = 1 backfmt = "{fn}.bak.{backnum}" trial_fn = backfmt . format ( fn = fn , backnum = backnum ) while os . path . exists ( trial_fn ) : backnum += 1 trial_fn = backfmt . format ( fn = fn , backnum = backnum ) warnings . warn ( "{fn} exists. Moving it to {newfn}" . format ( fn = fn , newfn = trial_fn ) , BackupWarning ) shutil . move ( fn , trial_fn )
If fn exists rename it and issue a warning
23,930
def default_key_to_path ( key , dfmt = "{}" , ffmt = "{}.npy" ) : if isinstance ( key , tuple ) : paths = [ dfmt . format ( k ) for k in key [ : - 1 ] ] paths += [ ffmt . format ( key [ - 1 ] ) ] return os . path . join ( * paths ) else : return ffmt . format ( key )
Turn an arbitrary python object into a filename
23,931
def preload_tops ( meta ) : top_fns = set ( meta [ 'top_fn' ] ) tops = { } for tfn in top_fns : tops [ tfn ] = md . load_topology ( tfn ) return tops
Load all topology files into memory .
23,932
def preload_top ( meta ) : top_fns = set ( meta [ 'top_fn' ] ) if len ( top_fns ) != 1 : raise ValueError ( "More than one topology is used in this project!" ) return md . load_topology ( top_fns . pop ( ) )
Load one topology file into memory .
23,933
def itertrajs ( meta , stride = 1 ) : tops = preload_tops ( meta ) for i , row in meta . iterrows ( ) : yield i , md . join ( md . iterload ( row [ 'traj_fn' ] , top = tops [ row [ 'top_fn' ] ] , stride = stride ) , discard_overlapping_frames = False , check_topology = False )
Load one mdtraj trajectory at a time and yield it .
23,934
def render_meta ( meta , fn = "meta.pandas.html" , title = "Project Metadata - MSMBuilder" , pandas_kwargs = None ) : if pandas_kwargs is None : pandas_kwargs = { } kwargs_with_defaults = { 'classes' : ( 'table' , 'table-condensed' , 'table-hover' ) , } kwargs_with_defaults . update ( ** pandas_kwargs ) env = Environment ( loader = PackageLoader ( 'msmbuilder' , 'io_templates' ) ) templ = env . get_template ( "twitter-bootstrap.html" ) rendered = templ . render ( title = title , content = meta . to_html ( ** kwargs_with_defaults ) ) rendered = re . sub ( r' border="1"' , '' , rendered ) backup ( fn ) with open ( fn , 'w' ) as f : f . write ( rendered )
Render a metadata dataframe as an html webpage for inspection .
23,935
def save_generic ( obj , fn ) : backup ( fn ) with open ( fn , 'wb' ) as f : pickle . dump ( obj , f )
Save Python objects including msmbuilder Estimators .
23,936
def save_trajs ( trajs , fn , meta , key_to_path = None ) : if key_to_path is None : key_to_path = default_key_to_path validate_keys ( meta . index , key_to_path ) backup ( fn ) os . mkdir ( fn ) for k in meta . index : v = trajs [ k ] npy_fn = os . path . join ( fn , key_to_path ( k ) ) os . makedirs ( os . path . dirname ( npy_fn ) , exist_ok = True ) np . save ( npy_fn , v )
Save trajectory - like data
23,937
def load_trajs ( fn , meta = 'meta.pandas.pickl' , key_to_path = None ) : if key_to_path is None : key_to_path = default_key_to_path if isinstance ( meta , str ) : meta = load_meta ( meta_fn = meta ) trajs = { } for k in meta . index : trajs [ k ] = np . load ( os . path . join ( fn , key_to_path ( k ) ) ) return meta , trajs
Load trajectory - like data
23,938
def query ( self , x , k = 1 , p = 2 , distance_upper_bound = np . inf ) : cdists , cinds = self . _kdtree . query ( x , k , p , distance_upper_bound ) return cdists , self . _split_indices ( cinds )
Query the kd - tree for nearest neighbors
23,939
def transform ( self , sequences ) : check_iter_of_sequences ( sequences ) transforms = [ ] for X in sequences : transforms . append ( self . partial_transform ( X ) ) return transforms
Apply dimensionality reduction to sequences
23,940
def fit_transform ( self , sequences , y = None ) : self . fit ( sequences ) transforms = self . transform ( sequences ) return transforms
Fit the model and apply dimensionality reduction
23,941
def fit ( self , sequences , y = None ) : check_iter_of_sequences ( sequences , allow_trajectory = self . _allow_trajectory ) super ( MultiSequenceClusterMixin , self ) . fit ( self . _concat ( sequences ) ) if hasattr ( self , 'labels_' ) : self . labels_ = self . _split ( self . labels_ ) return self
Fit the clustering on the data
23,942
def predict ( self , sequences , y = None ) : predictions = [ ] check_iter_of_sequences ( sequences , allow_trajectory = self . _allow_trajectory ) for X in sequences : predictions . append ( self . partial_predict ( X ) ) return predictions
Predict the closest cluster each sample in each sequence in sequences belongs to .
23,943
def fit_predict ( self , sequences , y = None ) : if hasattr ( super ( MultiSequenceClusterMixin , self ) , 'fit_predict' ) : check_iter_of_sequences ( sequences , allow_trajectory = self . _allow_trajectory ) labels = super ( MultiSequenceClusterMixin , self ) . fit_predict ( sequences ) else : self . fit ( sequences ) labels = self . predict ( sequences ) if not isinstance ( labels , list ) : labels = self . _split ( labels ) return labels
Performs clustering on X and returns cluster labels .
23,944
def plot ( self , minx = - 1.5 , maxx = 1.2 , miny = - 0.2 , maxy = 2 , ** kwargs ) : import matplotlib . pyplot as pp grid_width = max ( maxx - minx , maxy - miny ) / 200.0 ax = kwargs . pop ( 'ax' , None ) xx , yy = np . mgrid [ minx : maxx : grid_width , miny : maxy : grid_width ] V = self . potential ( xx , yy ) if ax is None : ax = pp ax . contourf ( xx , yy , V . clip ( max = 200 ) , 40 , ** kwargs )
Helper function to plot the Muller potential
23,945
def gather_metadata ( fn_glob , parser ) : meta = pd . DataFrame ( parser . parse_fn ( fn ) for fn in glob . iglob ( fn_glob ) ) return meta . set_index ( parser . index ) . sort_index ( )
Given a glob and a parser object create a metadata dataframe .
23,946
def eigtransform ( self , sequences , right = True , mode = 'clip' ) : r result = [ ] for y in self . transform ( sequences , mode = mode ) : if right : op = self . right_eigenvectors_ [ : , 1 : ] else : op = self . left_eigenvectors_ [ : , 1 : ] is_finite = np . isfinite ( y ) if not np . all ( is_finite ) : value = np . empty ( ( y . shape [ 0 ] , op . shape [ 1 ] ) ) value [ is_finite , : ] = np . take ( op , y [ is_finite ] . astype ( np . int ) , axis = 0 ) value [ ~ is_finite , : ] = np . nan else : value = np . take ( op , y , axis = 0 ) result . append ( value ) return result
r Transform a list of sequences by projecting the sequences onto the first n_timescales dynamical eigenvectors .
23,947
def score_ll ( self , sequences ) : r counts , mapping = _transition_counts ( sequences ) if not set ( self . mapping_ . keys ( ) ) . issuperset ( mapping . keys ( ) ) : return - np . inf inverse_mapping = { v : k for k , v in mapping . items ( ) } m2 = _dict_compose ( inverse_mapping , self . mapping_ ) indices = [ e [ 1 ] for e in sorted ( m2 . items ( ) ) ] transmat_slice = self . transmat_ [ np . ix_ ( indices , indices ) ] return np . nansum ( np . log ( transmat_slice ) * counts )
r log of the likelihood of sequences with respect to the model
23,948
def summarize ( self ) : doc = counts_nz = np . count_nonzero ( self . countsmat_ ) cnz = self . countsmat_ [ np . nonzero ( self . countsmat_ ) ] return doc . format ( lag_time = self . lag_time , reversible_type = self . reversible_type , ergodic_cutoff = self . ergodic_cutoff , prior_counts = self . prior_counts , n_states = self . n_states_ , counts_nz = counts_nz , percent_counts_nz = ( 100 * counts_nz / self . countsmat_ . size ) , cnz_min = np . min ( cnz ) , cnz_1st = np . percentile ( cnz , 25 ) , cnz_med = np . percentile ( cnz , 50 ) , cnz_mean = np . mean ( cnz ) , cnz_3rd = np . percentile ( cnz , 75 ) , cnz_max = np . max ( cnz ) , cnz_sum = np . sum ( cnz ) , cnz_sum_per_lag = np . sum ( cnz ) / self . lag_time , ts = ', ' . join ( [ '{:.2f}' . format ( t ) for t in self . timescales_ ] ) , )
Return some diagnostic summary statistics about this Markov model
23,949
def timescales_ ( self ) : u , lv , rv = self . _get_eigensystem ( ) with np . errstate ( invalid = 'ignore' , divide = 'ignore' ) : timescales = - self . lag_time / np . log ( u [ 1 : ] ) return timescales
Implied relaxation timescales of the model .
23,950
def uncertainty_eigenvalues ( self ) : if self . reversible_type is None : raise NotImplementedError ( 'reversible_type must be "mle" or "transpose"' ) n_timescales = min ( self . n_timescales if self . n_timescales is not None else self . n_states_ - 1 , self . n_states_ - 1 ) u , lv , rv = self . _get_eigensystem ( ) sigma2 = np . zeros ( n_timescales + 1 ) for k in range ( n_timescales + 1 ) : dLambda_dT = np . outer ( lv [ : , k ] , rv [ : , k ] ) for i in range ( self . n_states_ ) : ui = self . countsmat_ [ : , i ] wi = np . sum ( ui ) cov = wi * np . diag ( ui ) - np . outer ( ui , ui ) quad_form = dLambda_dT [ i ] . dot ( cov ) . dot ( dLambda_dT [ i ] ) sigma2 [ k ] += quad_form / ( wi ** 2 * ( wi + 1 ) ) return np . sqrt ( sigma2 )
Estimate of the element - wise asymptotic standard deviation in the model eigenvalues .
23,951
def uncertainty_timescales ( self ) : u = self . eigenvalues_ [ 1 : ] sigma_eigs = self . uncertainty_eigenvalues ( ) [ 1 : ] sigma_ts = sigma_eigs / ( u * np . log ( u ) ** 2 ) return sigma_ts
Estimate of the element - wise asymptotic standard deviation in the model implied timescales .
23,952
def describe_features ( self , traj ) : all_res = [ ] for feat in self . which_feat : all_res . extend ( self . features [ feat ] . describe_features ( traj ) ) return all_res
Return a list of dictionaries describing the features . Follows the ordering of featurizers in self . which_feat .
23,953
def partial_transform ( self , sequence ) : s = super ( MultiSequencePreprocessingMixin , self ) return s . transform ( sequence )
Apply preprocessing to single sequence
23,954
def retry ( max_retries = 1 ) : def retry_func ( func ) : @ wraps ( func ) def wrapper ( * args , ** kwargs ) : num_retries = 0 while num_retries <= max_retries : try : ret = func ( * args , ** kwargs ) break except HTTPError : if num_retries == max_retries : raise num_retries += 1 time . sleep ( 5 ) return ret return wrapper return retry_func
Retry a function max_retries times .
23,955
def get_data_home ( data_home = None ) : if data_home is not None : return _expand_and_makedir ( data_home ) msmb_data = has_msmb_data ( ) if msmb_data is not None : return _expand_and_makedir ( msmb_data ) data_home = environ . get ( 'MSMBUILDER_DATA' , join ( '~' , 'msmbuilder_data' ) ) return _expand_and_makedir ( data_home )
Return the path of the msmbuilder data dir .
23,956
def description ( cls ) : lines = [ s . strip ( ) for s in cls . __doc__ . splitlines ( ) ] note_i = lines . index ( "Notes" ) return "\n" . join ( lines [ note_i + 2 : ] )
Get a description from the Notes section of the docstring .
23,957
def wildcard_allowed_actions ( self , pattern = None ) : wildcard_allowed = [ ] for statement in self . statements : if statement . wildcard_actions ( pattern ) and statement . effect == "Allow" : wildcard_allowed . append ( statement ) return wildcard_allowed
Find statements which allow wildcard actions .
23,958
def wildcard_allowed_principals ( self , pattern = None ) : wildcard_allowed = [ ] for statement in self . statements : if statement . wildcard_principals ( pattern ) and statement . effect == "Allow" : wildcard_allowed . append ( statement ) return wildcard_allowed
Find statements which allow wildcard principals .
23,959
def nonwhitelisted_allowed_principals ( self , whitelist = None ) : if not whitelist : return [ ] nonwhitelisted = [ ] for statement in self . statements : if statement . non_whitelisted_principals ( whitelist ) and statement . effect == "Allow" : nonwhitelisted . append ( statement ) return nonwhitelisted
Find non whitelisted allowed principals .
23,960
def allows_not_principal ( self ) : not_principals = [ ] for statement in self . statements : if statement . not_principal and statement . effect == "Allow" : not_principals . append ( statement ) return not_principals
Find allowed not - principals .
23,961
def parse_parameters ( self , parameters ) : self . parameters = [ ] for param_name , param_value in parameters . items ( ) : p = Parameter ( param_name , param_value ) if p : self . parameters . append ( p )
Parses and sets parameters in the model .
23,962
def parse_resources ( self , resources ) : self . resources = { } resource_factory = ResourceFactory ( ) for res_id , res_value in resources . items ( ) : r = resource_factory . create_resource ( res_id , res_value ) if r : if r . resource_type in self . resources : self . resources [ r . resource_type ] . append ( r ) else : self . resources [ r . resource_type ] = [ r ]
Parses and sets resources in the model using a factory .
23,963
def session ( self ) : if self . _session is None : self . _session = requests . Session ( ) self . _session . headers . update ( self . _headers ) return self . _session
Get session object to benefit from connection pooling .
23,964
def open_id_connect ( self , client_id , client_secret ) : return KeycloakOpenidConnect ( realm = self , client_id = client_id , client_secret = client_secret )
Get OpenID Connect client
23,965
def create ( self , username , ** kwargs ) : payload = OrderedDict ( username = username ) for key in USER_KWARGS : from keycloak . admin . clientroles import to_camel_case if key in kwargs : payload [ to_camel_case ( key ) ] = kwargs [ key ] return self . _client . post ( url = self . _client . get_full_url ( self . get_path ( 'collection' , realm = self . _realm_name ) ) , data = json . dumps ( payload , sort_keys = True ) )
Create a user in Keycloak
23,966
def all ( self ) : return self . _client . get ( url = self . _client . get_full_url ( self . get_path ( 'collection' , realm = self . _realm_name ) ) )
Return all registered users
23,967
def get ( self ) : self . _user = self . _client . get ( url = self . _client . get_full_url ( self . get_path ( 'single' , realm = self . _realm_name , user_id = self . _user_id ) ) ) self . _user_id = self . user [ "id" ] return self . _user
Return registered user with the given user id .
23,968
def update ( self , ** kwargs ) : payload = { } for k , v in self . user . items ( ) : payload [ k ] = v for key in USER_KWARGS : from keycloak . admin . clientroles import to_camel_case if key in kwargs : payload [ to_camel_case ( key ) ] = kwargs [ key ] result = self . _client . put ( url = self . _client . get_full_url ( self . get_path ( 'single' , realm = self . _realm_name , user_id = self . _user_id ) ) , data = json . dumps ( payload , sort_keys = True ) ) self . get ( ) return result
Update existing user .
23,969
def resource_set_create ( self , token , name , ** kwargs ) : return self . _realm . client . post ( self . well_known [ 'resource_registration_endpoint' ] , data = self . _get_data ( name = name , ** kwargs ) , headers = self . get_headers ( token ) )
Create a resource set .
23,970
def resource_set_update ( self , token , id , name , ** kwargs ) : return self . _realm . client . put ( '{}/{}' . format ( self . well_known [ 'resource_registration_endpoint' ] , id ) , data = self . _get_data ( name = name , ** kwargs ) , headers = self . get_headers ( token ) )
Update a resource set .
23,971
def resource_set_read ( self , token , id ) : return self . _realm . client . get ( '{}/{}' . format ( self . well_known [ 'resource_registration_endpoint' ] , id ) , headers = self . get_headers ( token ) )
Read a resource set .
23,972
def resource_create_ticket ( self , token , id , scopes , ** kwargs ) : data = dict ( resource_id = id , resource_scopes = scopes , ** kwargs ) return self . _realm . client . post ( self . well_known [ 'permission_endpoint' ] , data = self . _dumps ( [ data ] ) , headers = self . get_headers ( token ) )
Create a ticket form permission to resource .
23,973
def resource_associate_permission ( self , token , id , name , scopes , ** kwargs ) : return self . _realm . client . post ( '{}/{}' . format ( self . well_known [ 'policy_endpoint' ] , id ) , data = self . _get_data ( name = name , scopes = scopes , ** kwargs ) , headers = self . get_headers ( token ) )
Associates a permission with a Resource .
23,974
def permission_update ( self , token , id , ** kwargs ) : return self . _realm . client . put ( '{}/{}' . format ( self . well_known [ 'policy_endpoint' ] , id ) , data = self . _dumps ( kwargs ) , headers = self . get_headers ( token ) )
To update an existing permission .
23,975
def permission_delete ( self , token , id ) : return self . _realm . client . delete ( '{}/{}' . format ( self . well_known [ 'policy_endpoint' ] , id ) , headers = self . get_headers ( token ) )
Removing a Permission .
23,976
def _decode_token ( cls , token ) : missing_padding = len ( token ) % 4 if missing_padding != 0 : token += '=' * ( 4 - missing_padding ) return json . loads ( base64 . b64decode ( token ) . decode ( 'utf-8' ) )
Permission information is encoded in an authorization token .
23,977
def get_permissions ( self , token , resource_scopes_tuples = None , submit_request = False , ticket = None ) : headers = { "Authorization" : "Bearer %s" % token , 'Content-type' : 'application/x-www-form-urlencoded' , } data = [ ( 'grant_type' , 'urn:ietf:params:oauth:grant-type:uma-ticket' ) , ( 'audience' , self . _client_id ) , ( 'response_include_resource_name' , True ) , ] if resource_scopes_tuples : for atuple in resource_scopes_tuples : data . append ( ( 'permission' , '#' . join ( atuple ) ) ) data . append ( ( 'submit_request' , submit_request ) ) elif ticket : data . append ( ( 'ticket' , ticket ) ) authz_info = { } try : response = self . _realm . client . post ( self . well_known [ 'token_endpoint' ] , data = urlencode ( data ) , headers = headers , ) error = response . get ( 'error' ) if error : self . logger . warning ( '%s: %s' , error , response . get ( 'error_description' ) ) else : token = response . get ( 'refresh_token' ) decoded_token = self . _decode_token ( token . split ( '.' ) [ 1 ] ) authz_info = decoded_token . get ( 'authorization' , { } ) except KeycloakClientError as error : self . logger . warning ( str ( error ) ) return authz_info
Request permissions for user from keycloak server .
23,978
def eval_permission ( self , token , resource , scope , submit_request = False ) : return self . eval_permissions ( token = token , resource_scopes_tuples = [ ( resource , scope ) ] , submit_request = submit_request )
Evalutes if user has permission for scope on resource .
23,979
def eval_permissions ( self , token , resource_scopes_tuples = None , submit_request = False ) : permissions = self . get_permissions ( token = token , resource_scopes_tuples = resource_scopes_tuples , submit_request = submit_request ) res = [ ] for permission in permissions . get ( 'permissions' , [ ] ) : for scope in permission . get ( 'scopes' , [ ] ) : ptuple = ( permission . get ( 'rsname' ) , scope ) if ptuple in resource_scopes_tuples : res . append ( ptuple ) return res == resource_scopes_tuples
Evaluates if user has permission for all the resource scope combinations .
23,980
def logout ( self , refresh_token ) : return self . _realm . client . post ( self . get_url ( 'end_session_endpoint' ) , data = { 'refresh_token' : refresh_token , 'client_id' : self . _client_id , 'client_secret' : self . _client_secret } )
The logout endpoint logs out the authenticated user .
23,981
def userinfo ( self , token ) : url = self . well_known [ 'userinfo_endpoint' ] return self . _realm . client . get ( url , headers = { "Authorization" : "Bearer {}" . format ( token ) } )
The UserInfo Endpoint is an OAuth 2 . 0 Protected Resource that returns Claims about the authenticated End - User . To obtain the requested Claims about the End - User the Client makes a request to the UserInfo Endpoint using an Access Token obtained through OpenID Connect Authentication . These Claims are normally represented by a JSON object that contains a collection of name and value pairs for the Claims .
23,982
def authorization_url ( self , ** kwargs ) : payload = { 'response_type' : 'code' , 'client_id' : self . _client_id } for key in kwargs . keys ( ) : payload [ key ] = kwargs [ key ] payload = sorted ( payload . items ( ) , key = lambda val : val [ 0 ] ) params = urlencode ( payload ) url = self . get_url ( 'authorization_endpoint' ) return '{}?{}' . format ( url , params )
Get authorization URL to redirect the resource owner to .
23,983
def authorization_code ( self , code , redirect_uri ) : return self . _token_request ( grant_type = 'authorization_code' , code = code , redirect_uri = redirect_uri )
Retrieve access token by authorization_code grant .
23,984
def password_credentials ( self , username , password , ** kwargs ) : return self . _token_request ( grant_type = 'password' , username = username , password = password , ** kwargs )
Retrieve access token by password credentials grant .
23,985
def refresh_token ( self , refresh_token , ** kwargs ) : return self . _token_request ( grant_type = 'refresh_token' , refresh_token = refresh_token , ** kwargs )
Refresh an access token
23,986
def _token_request ( self , grant_type , ** kwargs ) : payload = { 'grant_type' : grant_type , 'client_id' : self . _client_id , 'client_secret' : self . _client_secret } payload . update ( ** kwargs ) return self . _realm . client . post ( self . get_url ( 'token_endpoint' ) , data = payload )
Do the actual call to the token end - point .
23,987
def create ( self , name , ** kwargs ) : payload = OrderedDict ( name = name ) for key in ROLE_KWARGS : if key in kwargs : payload [ to_camel_case ( key ) ] = kwargs [ key ] return self . _client . post ( url = self . _client . get_full_url ( self . get_path ( 'collection' , realm = self . _realm_name , id = self . _client_id ) ) , data = json . dumps ( payload , sort_keys = True ) )
Create new role
23,988
def update ( self , name , ** kwargs ) : payload = OrderedDict ( name = name ) for key in ROLE_KWARGS : if key in kwargs : payload [ to_camel_case ( key ) ] = kwargs [ key ] return self . _client . put ( url = self . _client . get_full_url ( self . get_path ( 'single' , realm = self . _realm_name , id = self . _client_id , role_name = self . _role_name ) ) , data = json . dumps ( payload , sort_keys = True ) )
Update existing role .
23,989
def extract_rpm ( archive , compression , cmd , verbosity , interactive , outdir ) : cpio = util . find_program ( "cpio" ) if not cpio : raise util . PatoolError ( "cpio(1) is required for rpm2cpio extraction; please install it" ) path = util . shell_quote ( os . path . abspath ( archive ) ) cmdlist = [ util . shell_quote ( cmd ) , path , "|" , util . shell_quote ( cpio ) , '--extract' , '--make-directories' , '--preserve-modification-time' , '--no-absolute-filenames' , '--force-local' , '--nonmatching' , r'"*\.\.*"' ] if verbosity > 1 : cmdlist . append ( '-v' ) return ( cmdlist , { 'cwd' : outdir , 'shell' : True } )
Extract a RPM archive .
23,990
def list_tar ( archive , compression , cmd , verbosity , interactive ) : try : with tarfile . open ( archive ) as tfile : tfile . list ( verbose = verbosity > 1 ) except Exception as err : msg = "error listing %s: %s" % ( archive , err ) raise util . PatoolError ( msg ) return None
List a TAR archive with the tarfile Python module .
23,991
def extract_tar ( archive , compression , cmd , verbosity , interactive , outdir ) : try : with tarfile . open ( archive ) as tfile : tfile . extractall ( path = outdir ) except Exception as err : msg = "error extracting %s: %s" % ( archive , err ) raise util . PatoolError ( msg ) return None
Extract a TAR archive with the tarfile Python module .
23,992
def create_tar ( archive , compression , cmd , verbosity , interactive , filenames ) : mode = get_tar_mode ( compression ) try : with tarfile . open ( archive , mode ) as tfile : for filename in filenames : tfile . add ( filename ) except Exception as err : msg = "error creating %s: %s" % ( archive , err ) raise util . PatoolError ( msg ) return None
Create a TAR archive with the tarfile Python module .
23,993
def get_tar_mode ( compression ) : if compression == 'gzip' : return 'w:gz' if compression == 'bzip2' : return 'w:bz2' if compression == 'lzma' and py_lzma : return 'w:xz' if compression : msg = 'pytarfile does not support %s for tar compression' raise util . PatoolError ( msg % compression ) return 'w'
Determine tarfile open mode according to the given compression .
23,994
def extract_arj ( archive , compression , cmd , verbosity , interactive , outdir ) : cmdlist = [ cmd , 'x' , '-r' ] if not interactive : cmdlist . append ( '-y' ) cmdlist . extend ( [ archive , outdir ] ) return cmdlist
Extract an ARJ archive .
23,995
def list_arj ( archive , compression , cmd , verbosity , interactive ) : cmdlist = [ cmd ] if verbosity > 1 : cmdlist . append ( 'v' ) else : cmdlist . append ( 'l' ) if not interactive : cmdlist . append ( '-y' ) cmdlist . extend ( [ '-r' , archive ] ) return cmdlist
List an ARJ archive .
23,996
def create_arj ( archive , compression , cmd , verbosity , interactive , filenames ) : cmdlist = [ cmd , 'a' , '-r' ] if not interactive : cmdlist . append ( '-y' ) cmdlist . append ( archive ) cmdlist . extend ( filenames ) return cmdlist
Create an ARJ archive .
23,997
def create_zpaq ( archive , compression , cmd , verbosity , interactive , filenames ) : cmdlist = [ cmd , 'a' , archive ] cmdlist . extend ( filenames ) cmdlist . extend ( [ '-method' , '4' ] ) return cmdlist
Create a ZPAQ archive .
23,998
def extract_alzip ( archive , compression , cmd , verbosity , interactive , outdir ) : return [ cmd , '-d' , outdir , archive ]
Extract a ALZIP archive .
23,999
def extract_tar ( archive , compression , cmd , verbosity , interactive , outdir ) : cmdlist = [ cmd , '--extract' ] add_tar_opts ( cmdlist , compression , verbosity ) cmdlist . extend ( [ "--file" , archive , '--directory' , outdir ] ) return cmdlist
Extract a TAR archive .