idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
23,800
def toConsole ( self ) : fmt_spec = { "class" : utils . AnsiColors . BOLD_MAGENTA , "struct" : utils . AnsiColors . BOLD_CYAN , "define" : utils . AnsiColors . BOLD_YELLOW , "enum" : utils . AnsiColors . BOLD_MAGENTA , "enumvalue" : utils . AnsiColors . BOLD_RED , "function" : utils . AnsiColors . BOLD_CYAN , "file" : utils . AnsiColors . BOLD_YELLOW , "dir" : utils . AnsiColors . BOLD_MAGENTA , "group" : utils . AnsiColors . BOLD_RED , "namespace" : utils . AnsiColors . BOLD_CYAN , "typedef" : utils . AnsiColors . BOLD_YELLOW , "union" : utils . AnsiColors . BOLD_MAGENTA , "variable" : utils . AnsiColors . BOLD_CYAN } self . consoleFormat ( "{0} and {1}" . format ( utils . _use_color ( "Classes" , fmt_spec [ "class" ] , sys . stderr ) , utils . _use_color ( "Structs" , fmt_spec [ "struct" ] , sys . stderr ) , ) , self . class_like , fmt_spec ) self . consoleFormat ( utils . _use_color ( "Defines" , fmt_spec [ "define" ] , sys . stderr ) , self . defines , fmt_spec ) self . consoleFormat ( utils . _use_color ( "Enums" , fmt_spec [ "enum" ] , sys . stderr ) , self . enums , fmt_spec ) self . consoleFormat ( utils . _use_color ( "Enum Values (unused)" , fmt_spec [ "enumvalue" ] , sys . stderr ) , self . enum_values , fmt_spec ) self . consoleFormat ( utils . _use_color ( "Functions" , fmt_spec [ "function" ] , sys . stderr ) , self . functions , fmt_spec ) self . consoleFormat ( utils . _use_color ( "Files" , fmt_spec [ "file" ] , sys . stderr ) , self . files , fmt_spec ) self . consoleFormat ( utils . _use_color ( "Directories" , fmt_spec [ "dir" ] , sys . stderr ) , self . dirs , fmt_spec ) self . consoleFormat ( utils . _use_color ( "Groups (unused)" , fmt_spec [ "group" ] , sys . stderr ) , self . groups , fmt_spec ) self . consoleFormat ( utils . _use_color ( "Namespaces" , fmt_spec [ "namespace" ] , sys . stderr ) , self . namespaces , fmt_spec ) self . consoleFormat ( utils . _use_color ( "Typedefs" , fmt_spec [ "typedef" ] , sys . stderr ) , self . typedefs , fmt_spec ) self . consoleFormat ( utils . _use_color ( "Unions" , fmt_spec [ "union" ] , sys . stderr ) , self . unions , fmt_spec ) self . consoleFormat ( utils . _use_color ( "Variables" , fmt_spec [ "variable" ] , sys . stderr ) , self . variables , fmt_spec )
Convenience function for printing out the entire API being generated to the console . Unused in the release but is helpful for debugging ; )
23,801
def sanitize ( name ) : return name . replace ( "&lt;" , "<" ) . replace ( "&gt;" , ">" ) . replace ( "&amp;" , "&" ) . replace ( "< " , "<" ) . replace ( " >" , ">" ) . replace ( " &" , "&" ) . replace ( "& " , "&" )
Sanitize the specified name for use with breathe directives .
23,802
def doxygenLanguageToPygmentsLexer ( location , language ) : if configs . _compiled_lexer_mapping : for regex in configs . _compiled_lexer_mapping : if regex . match ( location ) : return configs . _compiled_lexer_mapping [ regex ] if language in LANG_TO_LEX : return LANG_TO_LEX [ language ] return "none"
Given an input location and language specification acquire the Pygments lexer to use for this file .
23,803
def getBriefAndDetailedRST ( textRoot , node ) : node_xml_contents = utils . nodeCompoundXMLContents ( node ) if not node_xml_contents : return "" , "" try : node_soup = BeautifulSoup ( node_xml_contents , "lxml-xml" ) except : utils . fancyError ( "Unable to parse [{0}] xml using BeautifulSoup" . format ( node . name ) ) try : brief = node_soup . doxygen . compounddef . find_all ( "briefdescription" , recursive = False ) brief_desc = "" if len ( brief ) == 1 : brief = brief [ 0 ] if not brief . get_text ( ) . isspace ( ) : brief_desc = convertDescriptionToRST ( textRoot , node , brief , None ) detailed = node_soup . doxygen . compounddef . find_all ( "detaileddescription" , recursive = False ) detailed_desc = "" if len ( detailed ) == 1 : detailed = detailed [ 0 ] if not detailed . get_text ( ) . isspace ( ) : detailed_desc = convertDescriptionToRST ( textRoot , node , detailed , "Detailed Description" ) return brief_desc , detailed_desc except : utils . fancyError ( "Could not acquire soup.doxygen.compounddef; likely not a doxygen xml file." )
Given an input node return a tuple of strings where the first element of the return is the brief description and the second is the detailed description .
23,804
def _build_url ( self , endpoint ) : try : path = self . endpoints [ endpoint ] except KeyError : msg = 'Unknown endpoint `{0}`' raise ValueError ( msg . format ( endpoint ) ) absolute_url = urljoin ( self . target , path ) return absolute_url
Builds the absolute URL using the target and desired endpoint .
23,805
def add_version ( self , project , version , egg ) : url = self . _build_url ( constants . ADD_VERSION_ENDPOINT ) data = { 'project' : project , 'version' : version } files = { 'egg' : egg } json = self . client . post ( url , data = data , files = files , timeout = self . timeout ) return json [ 'spiders' ]
Adds a new project egg to the Scrapyd service . First class maps to Scrapyd s add version endpoint .
23,806
def cancel ( self , project , job , signal = None ) : url = self . _build_url ( constants . CANCEL_ENDPOINT ) data = { 'project' : project , 'job' : job , } if signal is not None : data [ 'signal' ] = signal json = self . client . post ( url , data = data , timeout = self . timeout ) return json [ 'prevstate' ]
Cancels a job from a specific project . First class maps to Scrapyd s cancel job endpoint .
23,807
def delete_project ( self , project ) : url = self . _build_url ( constants . DELETE_PROJECT_ENDPOINT ) data = { 'project' : project , } self . client . post ( url , data = data , timeout = self . timeout ) return True
Deletes all versions of a project . First class maps to Scrapyd s delete project endpoint .
23,808
def delete_version ( self , project , version ) : url = self . _build_url ( constants . DELETE_VERSION_ENDPOINT ) data = { 'project' : project , 'version' : version } self . client . post ( url , data = data , timeout = self . timeout ) return True
Deletes a specific version of a project . First class maps to Scrapyd s delete version endpoint .
23,809
def job_status ( self , project , job_id ) : all_jobs = self . list_jobs ( project ) for state in constants . JOB_STATES : job_ids = [ job [ 'id' ] for job in all_jobs [ state ] ] if job_id in job_ids : return state return ''
Retrieves the status of a specific job specified by its id . Derived utilises Scrapyd s list jobs endpoint to provide the answer .
23,810
def list_jobs ( self , project ) : url = self . _build_url ( constants . LIST_JOBS_ENDPOINT ) params = { 'project' : project } jobs = self . client . get ( url , params = params , timeout = self . timeout ) return jobs
Lists all known jobs for a project . First class maps to Scrapyd s list jobs endpoint .
23,811
def list_projects ( self ) : url = self . _build_url ( constants . LIST_PROJECTS_ENDPOINT ) json = self . client . get ( url , timeout = self . timeout ) return json [ 'projects' ]
Lists all deployed projects . First class maps to Scrapyd s list projects endpoint .
23,812
def list_spiders ( self , project ) : url = self . _build_url ( constants . LIST_SPIDERS_ENDPOINT ) params = { 'project' : project } json = self . client . get ( url , params = params , timeout = self . timeout ) return json [ 'spiders' ]
Lists all known spiders for a specific project . First class maps to Scrapyd s list spiders endpoint .
23,813
def list_versions ( self , project ) : url = self . _build_url ( constants . LIST_VERSIONS_ENDPOINT ) params = { 'project' : project } json = self . client . get ( url , params = params , timeout = self . timeout ) return json [ 'versions' ]
Lists all deployed versions of a specific project . First class maps to Scrapyd s list versions endpoint .
23,814
def schedule ( self , project , spider , settings = None , ** kwargs ) : url = self . _build_url ( constants . SCHEDULE_ENDPOINT ) data = { 'project' : project , 'spider' : spider } data . update ( kwargs ) if settings : setting_params = [ ] for setting_name , value in iteritems ( settings ) : setting_params . append ( '{0}={1}' . format ( setting_name , value ) ) data [ 'setting' ] = setting_params json = self . client . post ( url , data = data , timeout = self . timeout ) return json [ 'jobid' ]
Schedules a spider from a specific project to run . First class maps to Scrapyd s scheduling endpoint .
23,815
def _handle_response ( self , response ) : if not response . ok : raise ScrapydResponseError ( "Scrapyd returned a {0} error: {1}" . format ( response . status_code , response . text ) ) try : json = response . json ( ) except ValueError : raise ScrapydResponseError ( "Scrapyd returned an invalid JSON " "response: {0}" . format ( response . text ) ) if json [ 'status' ] == 'ok' : json . pop ( 'status' ) return json elif json [ 'status' ] == 'error' : raise ScrapydResponseError ( json [ 'message' ] )
Handles the response received from Scrapyd .
23,816
def all ( self ) : r for child in self . expr . all : if isinstance ( child , TexExpr ) : node = TexNode ( child ) node . parent = self yield node else : yield child
r Returns all content in this node regardless of whitespace or not . This includes all LaTeX needed to reconstruct the original source .
23,817
def children ( self ) : r for child in self . expr . children : node = TexNode ( child ) node . parent = self yield node
r Immediate children of this TeX element that are valid TeX objects .
23,818
def string ( self ) : r if isinstance ( self . expr , TexCmd ) and len ( self . expr . args ) == 1 : return self . expr . args [ 0 ] . value
r This is valid if and only if
23,819
def text ( self ) : r for descendant in self . contents : if isinstance ( descendant , TokenWithPosition ) : yield descendant elif hasattr ( descendant , 'text' ) : yield from descendant . text
r All text in descendant nodes .
23,820
def count ( self , name = None , ** attrs ) : r return len ( list ( self . find_all ( name , ** attrs ) ) )
r Number of descendants matching criteria .
23,821
def delete ( self ) : r parent = self . parent if parent . expr . _supports_contents ( ) : parent . remove ( self ) return for arg in parent . args : if self . expr in arg . contents : arg . contents . remove ( self . expr )
r Delete this node from the parse tree .
23,822
def find ( self , name = None , ** attrs ) : r try : return next ( self . find_all ( name , ** attrs ) ) except StopIteration : return None
r First descendant node matching criteria .
23,823
def find_all ( self , name = None , ** attrs ) : r for descendant in self . __descendants ( ) : if hasattr ( descendant , '__match__' ) and descendant . __match__ ( name , attrs ) : yield descendant
r Return all descendant nodes matching criteria .
23,824
def all ( self ) : r for arg in self . args : for expr in arg : yield expr for content in self . _contents : yield content
r Returns all content in this expression regardless of whitespace or not . This includes all LaTeX needed to reconstruct the original source .
23,825
def contents ( self ) : r for content in self . all : is_whitespace = isinstance ( content , str ) and content . isspace ( ) if not is_whitespace or self . preserve_whitespace : yield content
r Returns all contents in this expression .
23,826
def tokens ( self ) : for content in self . contents : if isinstance ( content , TokenWithPosition ) : for word in content . split ( ) : yield word else : yield content
Further breaks down all tokens for a particular expression into words and other expressions .
23,827
def insert ( self , i , * exprs ) : self . _assert_supports_contents ( ) for j , expr in enumerate ( exprs ) : self . _contents . insert ( i + j , expr )
Insert content at specified position into expression .
23,828
def remove ( self , expr ) : self . _assert_supports_contents ( ) index = self . _contents . index ( expr ) self . _contents . remove ( expr ) return index
Remove a provided expression from its list of contents .
23,829
def parse ( s ) : if isinstance ( s , arg_type ) : return s if isinstance ( s , ( list , tuple ) ) : for arg in arg_type : if [ s [ 0 ] , s [ - 1 ] ] == arg . delims ( ) : return arg ( * s [ 1 : - 1 ] ) raise TypeError ( 'Malformed argument. First and last elements must ' 'match a valid argument format. In this case, TexSoup' ' could not find matching punctuation for: %s.\n' 'Common issues include: Unescaped special characters,' ' mistyped closing punctuation, misalignment.' % ( str ( s ) ) ) for arg in arg_type : if arg . __is__ ( s ) : return arg ( arg . __strip__ ( s ) ) raise TypeError ( 'Malformed argument. Must be an Arg or a string in ' 'either brackets or curly braces.' )
Parse a string or list and return an Argument object
23,830
def insert ( self , i , arg ) : r arg = self . __coerce ( arg ) if isinstance ( arg , Arg ) : super ( ) . insert ( i , arg ) if len ( self ) <= 1 : self . all . append ( arg ) else : if i > len ( self ) : i = len ( self ) - 1 before = self [ i - 1 ] index_before = self . all . index ( before ) self . all . insert ( index_before + 1 , arg )
r Insert whitespace an unparsed argument string or an argument object .
23,831
def remove ( self , item ) : item = self . __coerce ( item ) self . all . remove ( item ) super ( ) . remove ( item )
Remove either an unparsed argument string or an argument object .
23,832
def pop ( self , i ) : item = super ( ) . pop ( i ) j = self . all . index ( item ) return self . all . pop ( j )
Pop argument object at provided index .
23,833
def forward ( self , j = 1 ) : if j < 0 : return self . backward ( - j ) self . __i += j return self [ self . __i - j : self . __i ]
Move forward by j steps .
23,834
def read ( tex ) : if isinstance ( tex , str ) : tex = tex else : tex = '' . join ( itertools . chain ( * tex ) ) buf , children = Buffer ( tokenize ( tex ) ) , [ ] while buf . hasNext ( ) : content = read_tex ( buf ) if content is not None : children . append ( content ) return TexEnv ( '[tex]' , children ) , tex
Read and parse all LaTeX source
23,835
def resolve ( tex ) : soup = TexSoup ( tex ) for subimport in soup . find_all ( 'subimport' ) : path = subimport . args [ 0 ] + subimport . args [ 1 ] subimport . replace_with ( * resolve ( open ( path ) ) . contents ) for _import in soup . find_all ( 'import' ) : _import . replace_with ( * resolve ( open ( _import . args [ 0 ] ) ) . contents ) for include in soup . find_all ( 'include' ) : include . replace_with ( * resolve ( open ( include . args [ 0 ] ) ) . contents ) return soup
Resolve all imports and update the parse tree .
23,836
def sollen ( tex , command ) : r return sum ( len ( a . string ) for a in TexSoup ( tex ) . find_all ( command ) )
r Measure solution length
23,837
def count ( tex ) : soup = TexSoup ( tex ) labels = set ( label . string for label in soup . find_all ( 'label' ) ) return dict ( ( label , soup . find_all ( '\ref{%s}' % label ) ) for label in labels )
Extract all labels then count the number of times each is referenced in the provided file . Does not follow \ includes .
23,838
def next_token ( text ) : r while text . hasNext ( ) : for name , f in tokenizers : current_token = f ( text ) if current_token is not None : return current_token
r Returns the next possible token advancing the iterator to the next position to start processing from .
23,839
def tokenize ( text ) : r current_token = next_token ( text ) while current_token is not None : yield current_token current_token = next_token ( text )
r Generator for LaTeX tokens on text ignoring comments .
23,840
def token ( name ) : def wrap ( f ) : tokenizers . append ( ( name , f ) ) return f return wrap
Marker for a token
23,841
def tokenize_punctuation_command ( text ) : if text . peek ( ) == '\\' : for point in PUNCTUATION_COMMANDS : if text . peek ( ( 1 , len ( point ) + 1 ) ) == point : return text . forward ( len ( point ) + 1 )
Process command that augments or modifies punctuation .
23,842
def tokenize_line_comment ( text ) : r result = TokenWithPosition ( '' , text . position ) if text . peek ( ) == '%' and text . peek ( - 1 ) != '\\' : result += text . forward ( 1 ) while text . peek ( ) != '\n' and text . hasNext ( ) : result += text . forward ( 1 ) return result
r Process a line comment
23,843
def tokenize_argument ( text ) : for delim in ARG_TOKENS : if text . startswith ( delim ) : return text . forward ( len ( delim ) )
Process both optional and required arguments .
23,844
def tokenize_math ( text ) : r if text . startswith ( '$' ) and ( text . position == 0 or text . peek ( - 1 ) != '\\' or text . endswith ( r'\\' ) ) : starter = '$$' if text . startswith ( '$$' ) else '$' return TokenWithPosition ( text . forward ( len ( starter ) ) , text . position )
r Prevents math from being tokenized .
23,845
def tokenize_string ( text , delimiters = None ) : r if delimiters is None : delimiters = ALL_TOKENS result = TokenWithPosition ( '' , text . position ) for c in text : if c == '\\' and str ( text . peek ( ) ) in delimiters and str ( c + text . peek ( ) ) not in delimiters : c += next ( text ) elif str ( c ) in delimiters : text . backward ( 1 ) return result result += c if text . peek ( ( 0 , 2 ) ) == '\\\\' : result += text . forward ( 2 ) if text . peek ( ( 0 , 2 ) ) == '\n\n' : result += text . forward ( 2 ) return result return result
r Process a string of text
23,846
def read_tex ( src ) : r c = next ( src ) if c . startswith ( '%' ) : return c elif c . startswith ( '$' ) : name = '$$' if c . startswith ( '$$' ) else '$' expr = TexEnv ( name , [ ] , nobegin = True ) return read_math_env ( src , expr ) elif c . startswith ( '\[' ) or c . startswith ( "\(" ) : if c . startswith ( '\[' ) : name = 'displaymath' begin = '\[' end = '\]' else : name = "math" begin = "\(" end = "\)" expr = TexEnv ( name , [ ] , nobegin = True , begin = begin , end = end ) return read_math_env ( src , expr ) elif c . startswith ( '\\' ) : command = TokenWithPosition ( c [ 1 : ] , src . position ) if command == 'item' : contents , arg = read_item ( src ) mode , expr = 'command' , TexCmd ( command , contents , arg ) elif command == 'begin' : mode , expr , _ = 'begin' , TexEnv ( src . peek ( 1 ) ) , src . forward ( 3 ) else : mode , expr = 'command' , TexCmd ( command ) expr . args = read_args ( src , expr . args ) if mode == 'begin' : read_env ( src , expr ) return expr if c in ARG_START_TOKENS : return read_arg ( src , c ) return c
r Read next expression from buffer
23,847
def read_item ( src ) : r def stringify ( s ) : return TokenWithPosition . join ( s . split ( ' ' ) , glue = ' ' ) def forward_until_new ( s ) : t = TokenWithPosition ( '' , s . peek ( ) . position ) while ( s . hasNext ( ) and any ( [ s . peek ( ) . startswith ( substr ) for substr in string . whitespace ] ) and not t . strip ( " " ) . endswith ( '\n' ) ) : t += s . forward ( 1 ) return t arg = [ ] extra = [ ] if src . peek ( ) in ARG_START_TOKENS : c = next ( src ) a = read_arg ( src , c ) arg . append ( a ) if not src . hasNext ( ) : return extra , arg last = stringify ( forward_until_new ( src ) ) extra . append ( last . lstrip ( " " ) ) while ( src . hasNext ( ) and not str ( src ) . strip ( " " ) . startswith ( '\n\n' ) and not src . startswith ( '\item' ) and not src . startswith ( '\end' ) and not ( isinstance ( last , TokenWithPosition ) and last . strip ( " " ) . endswith ( '\n\n' ) and len ( extra ) > 1 ) ) : last = read_tex ( src ) extra . append ( last ) return extra , arg
r Read the item content .
23,848
def read_args ( src , args = None ) : r args = args or TexArgs ( ) candidate_index = src . num_forward_until ( lambda s : not s . isspace ( ) ) while src . peek ( ) . isspace ( ) : args . append ( read_tex ( src ) ) line_breaks = 0 while src . peek ( ) in ARG_START_TOKENS or ( src . peek ( ) . isspace ( ) and line_breaks == 0 ) : space_index = src . num_forward_until ( lambda s : not s . isspace ( ) ) if space_index > 0 : line_breaks += 1 if src . peek ( ( 0 , space_index ) ) . count ( "\n" ) <= 1 and src . peek ( space_index ) in ARG_START_TOKENS : args . append ( read_tex ( src ) ) else : line_breaks = 0 tex_text = read_tex ( src ) args . append ( tex_text ) if not args : src . backward ( candidate_index ) return args
r Read all arguments from buffer .
23,849
def read_arg ( src , c ) : content = [ c ] while src . hasNext ( ) : if src . peek ( ) in ARG_END_TOKENS : content . append ( next ( src ) ) break else : content . append ( read_tex ( src ) ) return Arg . parse ( content )
Read the argument from buffer .
23,850
def hub_scores ( msm , waypoints = None ) : n_states = msm . n_states_ if isinstance ( waypoints , int ) : waypoints = [ waypoints ] elif waypoints is None : waypoints = xrange ( n_states ) elif not ( isinstance ( waypoints , list ) or isinstance ( waypoints , np . ndarray ) ) : raise ValueError ( "waypoints (%s) must be an int, a list, or None" % str ( waypoints ) ) hub_scores = [ ] for waypoint in waypoints : other_states = ( i for i in xrange ( n_states ) if i != waypoint ) hub_score = 0.0 for ( source , sink ) in itertools . permutations ( other_states , 2 ) : hub_score += fraction_visited ( source , sink , waypoint , msm ) hub_score /= float ( ( n_states - 1 ) * ( n_states - 2 ) ) hub_scores . append ( hub_score ) return np . array ( hub_scores )
Calculate the hub score for one or more waypoints
23,851
def fit ( self , sequences , y = None ) : super ( BACE , self ) . fit ( sequences , y = y ) if self . n_macrostates is not None : self . _do_lumping ( ) else : raise RuntimeError ( 'n_macrostates must not be None to fit' ) return self
Fit a BACE lumping model using a sequence of cluster assignments .
23,852
def _do_lumping ( self ) : c = copy . deepcopy ( self . countsmat_ ) if self . sliding_window : c *= self . lag_time c , macro_map , statesKeep = self . _filterFunc ( c ) w = np . array ( c . sum ( axis = 1 ) ) . flatten ( ) w [ statesKeep ] += 1 unmerged = np . zeros ( w . shape [ 0 ] , dtype = np . int8 ) unmerged [ statesKeep ] = 1 indRecalc = self . _getInds ( c , statesKeep ) dMat = np . zeros ( c . shape , dtype = np . float32 ) i = 0 nCurrentStates = statesKeep . shape [ 0 ] self . bayesFactors = { } dMat , minX , minY = self . _calcDMat ( c , w , indRecalc , dMat , statesKeep , unmerged ) while nCurrentStates > self . n_macrostates : c , w , indRecalc , dMat , macro_map , statesKeep , unmerged , minX , minY = self . _mergeTwoClosestStates ( c , w , indRecalc , dMat , macro_map , statesKeep , minX , minY , unmerged ) nCurrentStates -= 1 if self . save_all_maps : saved_map = copy . deepcopy ( macro_map ) self . map_dict [ nCurrentStates ] = saved_map if nCurrentStates - 1 == self . n_macrostates : self . microstate_mapping_ = macro_map
Do the BACE lumping .
23,853
def percentage ( self ) : if self . currval >= self . maxval : return 100.0 return self . currval * 100.0 / self . maxval
Returns the progress as a percentage .
23,854
def find_packages ( ) : packages = [ 'mdtraj.scripts' ] for dir , subdirs , files in os . walk ( 'MDTraj' ) : package = dir . replace ( os . path . sep , '.' ) if '__init__.py' not in files : continue packages . append ( package . replace ( 'MDTraj' , 'mdtraj' ) ) return packages
Find all of mdtraj s python packages . Adapted from IPython s setupbase . py . Copyright IPython contributors licensed under the BSD license .
23,855
def _detect_sse3 ( self ) : "Does this compiler support SSE3 intrinsics?" self . _print_support_start ( 'SSE3' ) result = self . hasfunction ( '__m128 v; _mm_hadd_ps(v,v)' , include = '<pmmintrin.h>' , extra_postargs = [ '-msse3' ] ) self . _print_support_end ( 'SSE3' , result ) return result
Does this compiler support SSE3 intrinsics?
23,856
def _detect_sse41 ( self ) : "Does this compiler support SSE4.1 intrinsics?" self . _print_support_start ( 'SSE4.1' ) result = self . hasfunction ( '__m128 v; _mm_round_ps(v,0x00)' , include = '<smmintrin.h>' , extra_postargs = [ '-msse4' ] ) self . _print_support_end ( 'SSE4.1' , result ) return result
Does this compiler support SSE4 . 1 intrinsics?
23,857
def uncertainty_K ( self ) : if self . information_ is None : self . _build_information ( ) sigma_K = _ratematrix . sigma_K ( self . information_ , theta = self . theta_ , n = self . n_states_ ) return sigma_K
Estimate of the element - wise asymptotic standard deviation in the rate matrix
23,858
def uncertainty_pi ( self ) : if self . information_ is None : self . _build_information ( ) sigma_pi = _ratematrix . sigma_pi ( self . information_ , theta = self . theta_ , n = self . n_states_ ) return sigma_pi
Estimate of the element - wise asymptotic standard deviation in the stationary distribution .
23,859
def uncertainty_eigenvalues ( self ) : if self . information_ is None : self . _build_information ( ) sigma_eigenvalues = _ratematrix . sigma_eigenvalues ( self . information_ , theta = self . theta_ , n = self . n_states_ ) if self . n_timescales is None : return sigma_eigenvalues return np . nan_to_num ( sigma_eigenvalues [ : self . n_timescales + 1 ] )
Estimate of the element - wise asymptotic standard deviation in the model eigenvalues
23,860
def uncertainty_timescales ( self ) : if self . information_ is None : self . _build_information ( ) sigma_timescales = _ratematrix . sigma_timescales ( self . information_ , theta = self . theta_ , n = self . n_states_ ) if self . n_timescales is None : return sigma_timescales return sigma_timescales [ : self . n_timescales ]
Estimate of the element - wise asymptotic standard deviation in the model relaxation timescales .
23,861
def _initial_guess ( self , countsmat ) : if self . theta_ is not None : return self . theta_ if self . guess == 'log' : transmat , pi = _transmat_mle_prinz ( countsmat ) K = np . real ( scipy . linalg . logm ( transmat ) ) / self . lag_time elif self . guess == 'pseudo' : transmat , pi = _transmat_mle_prinz ( countsmat ) K = ( transmat - np . eye ( self . n_states_ ) ) / self . lag_time elif isinstance ( self . guess , np . ndarray ) : pi = _solve_ratemat_eigensystem ( self . guess ) [ 1 ] [ : , 0 ] K = self . guess S = np . multiply ( np . sqrt ( np . outer ( pi , 1 / pi ) ) , K ) sflat = np . maximum ( S [ np . triu_indices_from ( countsmat , k = 1 ) ] , 0 ) theta0 = np . concatenate ( ( sflat , np . log ( pi ) ) ) return theta0
Generate an initial guess for \ theta .
23,862
def _build_information ( self ) : lag_time = float ( self . lag_time ) inds = np . where ( self . theta_ != 0 ) [ 0 ] hessian = _ratematrix . hessian ( self . theta_ , self . countsmat_ , t = lag_time , inds = inds ) self . information_ = np . zeros ( ( len ( self . theta_ ) , len ( self . theta_ ) ) ) self . information_ [ np . ix_ ( inds , inds ) ] = scipy . linalg . pinv ( - hessian )
Build the inverse of hessian of the log likelihood at theta_
23,863
def _log_posterior ( theta , counts , alpha , beta , n ) : logp1 , grad = loglikelihood ( theta , counts ) logp2 = lexponential ( theta [ : - n ] , beta , grad = grad [ : - n ] ) logp3 = ldirichlet_softmax ( theta [ - n : ] , alpha = alpha , grad = grad [ - n : ] ) logp = logp1 + logp2 + logp3 return logp , grad
Log of the posterior probability and gradient
23,864
def categorical ( pvals , size = None , random_state = None ) : cumsum = np . cumsum ( pvals ) if size is None : size = ( 1 , ) axis = 0 elif isinstance ( size , tuple ) : size = size + ( 1 , ) axis = len ( size ) - 1 else : raise TypeError ( 'size must be an int or tuple of ints' ) random_state = check_random_state ( random_state ) return np . sum ( cumsum < random_state . random_sample ( size ) , axis = axis )
Return random integer from a categorical distribution
23,865
def metzner_mcmc_slow ( Z , n_samples , n_thin = 1 , random_state = None ) : K_MINUS = 0.9 K_PLUS = 1.1 Z = np . asarray ( Z ) n_states = Z . shape [ 0 ] if not Z . ndim == 2 and Z . shape [ 1 ] == n_states : raise ValueError ( "Z must be square. Z.shape=%s" % str ( Z . shape ) ) K = 0.5 * ( Z + Z . T ) / np . sum ( Z , dtype = float ) random = check_random_state ( random_state ) n_accept = 0 for t in range ( n_samples ) : i , j = ( random . rand ( 2 ) * n_states ) . astype ( np . int ) sc = np . sum ( K ) if i == j : a , b = max ( - K [ i , j ] , K_MINUS - sc ) , K_PLUS - sc else : a , b = max ( - K [ i , j ] , 0.5 * ( K_MINUS - sc ) ) , 0.5 * ( K_PLUS - sc ) epsilon = random . uniform ( a , b ) K_proposal = np . copy ( K ) K_proposal [ i , j ] += epsilon if i != j : K_proposal [ j , i ] += epsilon cutoff = np . exp ( _logprob_T ( _K_to_T ( K_proposal ) , Z ) - _logprob_T ( _K_to_T ( K ) , Z ) ) r = random . rand ( ) if r < cutoff : n_accept += 1 K = K_proposal if ( t + 1 ) % n_thin == 0 : yield _K_to_T ( K )
Metropolis Markov chain Monte Carlo sampler for reversible transition matrices
23,866
def get_layout ( ) : tica_msm = TemplateDir ( 'tica' , [ 'tica/tica.py' , 'tica/tica-plot.py' , 'tica/tica-sample-coordinate.py' , 'tica/tica-sample-coordinate-plot.py' , ] , [ TemplateDir ( 'cluster' , [ 'cluster/cluster.py' , 'cluster/cluster-plot.py' , 'cluster/sample-clusters.py' , 'cluster/sample-clusters-plot.py' , ] , [ TemplateDir ( 'msm' , [ 'msm/timescales.py' , 'msm/timescales-plot.py' , 'msm/microstate.py' , 'msm/microstate-plot.py' , 'msm/microstate-traj.py' , ] , [ ] , ) ] ) ] ) layout = TemplateDir ( '' , [ '0-test-install.py' , '1-get-example-data.py' , 'README.md' , ] , [ TemplateDir ( 'analysis' , [ 'analysis/gather-metadata.py' , 'analysis/gather-metadata-plot.py' , ] , [ TemplateDir ( 'rmsd' , [ 'rmsd/rmsd.py' , 'rmsd/rmsd-plot.py' , ] , [ ] , ) , TemplateDir ( 'landmarks' , [ 'landmarks/find-landmarks.py' , 'landmarks/featurize.py' , 'landmarks/featurize-plot.py' , ] , [ tica_msm ] , ) , TemplateDir ( 'dihedrals' , [ 'dihedrals/featurize.py' , 'dihedrals/featurize-plot.py' , ] , [ tica_msm ] , ) ] ) ] ) return layout
Specify a hierarchy of our templates .
23,867
def find ( self , name , limit = None ) : if name == self . name : if limit is not None : assert limit == 1 self . subdirs = [ ] return self for subdir in self . subdirs : res = subdir . find ( name , limit ) if res is not None : return res return None
Find the named TemplateDir in the hierarchy
23,868
def implied_timescales ( sequences , lag_times , n_timescales = 10 , msm = None , n_jobs = 1 , verbose = 0 ) : if msm is None : msm = MarkovStateModel ( ) param_grid = { 'lag_time' : lag_times } models = param_sweep ( msm , sequences , param_grid , n_jobs = n_jobs , verbose = verbose ) timescales = [ m . timescales_ for m in models ] n_timescales = min ( n_timescales , min ( len ( ts ) for ts in timescales ) ) timescales = np . array ( [ ts [ : n_timescales ] for ts in timescales ] ) return timescales
Calculate the implied timescales for a given MSM .
23,869
def experimental ( name = None ) : def inner ( func ) : @ functools . wraps ( func ) def wrapper ( * fargs , ** kw ) : fname = name if name is None : fname = func . __name__ warnings . warn ( "%s" % fname , category = ExperimentalWarning , stacklevel = 2 ) return func ( * fargs , ** kw ) return wrapper return inner
A simple decorator to mark functions and methods as experimental .
23,870
def _replace_labels ( doc ) : lines = doc . splitlines ( ) labelstart , labelend = None , None foundattributes = False for i , line in enumerate ( lines ) : stripped = line . strip ( ) if stripped == 'Attributes' : foundattributes = True if foundattributes and not labelstart and stripped . startswith ( 'labels_' ) : labelstart = len ( '\n' . join ( lines [ : i ] ) ) + 1 if labelstart and not labelend and stripped == '' : labelend = len ( '\n' . join ( lines [ : i + 1 ] ) ) if labelstart is None or labelend is None : return doc replace = '\n' . join ( [ ' labels_ : list of arrays, each of shape [sequence_length, ]' , ' The label of each point is an integer in [0, n_clusters).' , '' , ] ) return doc [ : labelstart ] + replace + doc [ labelend : ]
Really hacky find - and - replace method that modifies one of the sklearn docstrings to change the semantics of labels_ for the subclasses
23,871
def dump ( value , filename , compress = None , cache_size = None ) : if compress is not None or cache_size is not None : warnings . warn ( "compress and cache_size are no longer valid options" ) with open ( filename , 'wb' ) as f : pickle . dump ( value , f )
Save an arbitrary python object using pickle .
23,872
def load ( filename ) : try : with open ( filename , 'rb' ) as f : return pickle . load ( f ) except Exception as e1 : try : return jl_load ( filename ) except Exception as e2 : raise IOError ( "Unable to load {} using the pickle or joblib protocol.\n" "Pickle: {}\n" "Joblib: {}" . format ( filename , e1 , e2 ) )
Load an object that has been saved with dump .
23,873
def verbosedump ( value , fn , compress = None ) : print ( 'Saving "%s"... (%s)' % ( fn , type ( value ) ) ) dump ( value , fn , compress = compress )
Verbose wrapper around dump
23,874
def net_fluxes ( sources , sinks , msm , for_committors = None ) : flux_matrix = fluxes ( sources , sinks , msm , for_committors = for_committors ) net_flux = flux_matrix - flux_matrix . T net_flux [ np . where ( net_flux < 0 ) ] = 0.0 return net_flux
Computes the transition path theory net flux matrix .
23,875
def featurize_all ( filenames , featurizer , topology , chunk = 1000 , stride = 1 ) : data = [ ] indices = [ ] fns = [ ] for file in filenames : kwargs = { } if file . endswith ( '.h5' ) else { 'top' : topology } count = 0 for t in md . iterload ( file , chunk = chunk , stride = stride , ** kwargs ) : x = featurizer . partial_transform ( t ) n_frames = len ( x ) data . append ( x ) indices . append ( count + ( stride * np . arange ( n_frames ) ) ) fns . extend ( [ file ] * n_frames ) count += ( stride * n_frames ) if len ( data ) == 0 : raise ValueError ( "None!" ) return np . concatenate ( data ) , np . concatenate ( indices ) , np . array ( fns )
Load and featurize many trajectory files .
23,876
def describe_features ( self , traj ) : n_f = self . partial_transform ( traj ) . shape [ 1 ] zippy = zip ( itertools . repeat ( "N/A" , n_f ) , itertools . repeat ( "N/A" , n_f ) , itertools . repeat ( "N/A" , n_f ) , itertools . repeat ( ( "N/A" , "N/A" , "N/A" , "N/A" ) , n_f ) ) return dict_maker ( zippy )
Generic method for describing features .
23,877
def describe_features ( self , traj ) : feature_descs = [ ] self . partial_transform ( traj [ 0 ] ) top = traj . topology aind_tuples = [ self . atom_indices for _ in range ( self . sliced_reference_traj . n_frames ) ] zippy = zippy_maker ( aind_tuples , top ) zippy = itertools . product ( [ "LandMarkFeaturizer" ] , [ "RMSD" ] , [ self . sigma ] , zippy ) feature_descs . extend ( dict_maker ( zippy ) ) return feature_descs
Return a list of dictionaries describing the LandmarkRMSD features .
23,878
def partial_transform ( self , traj ) : d = md . geometry . compute_distances ( traj , self . pair_indices , periodic = self . periodic ) return d ** self . exponent
Featurize an MD trajectory into a vector space via pairwise atom - atom distances
23,879
def describe_features ( self , traj ) : feature_descs = [ ] top = traj . topology residue_indices = [ [ top . atom ( i [ 0 ] ) . residue . index , top . atom ( i [ 1 ] ) . residue . index ] for i in self . atom_indices ] aind = [ ] resseqs = [ ] resnames = [ ] for ind , resid_ids in enumerate ( residue_indices ) : aind += [ [ i for i in self . atom_indices [ ind ] ] ] resseqs += [ [ top . residue ( ri ) . resSeq for ri in resid_ids ] ] resnames += [ [ top . residue ( ri ) . name for ri in resid_ids ] ] zippy = itertools . product ( [ "AtomPairs" ] , [ "Distance" ] , [ "Exponent {}" . format ( self . exponent ) ] , zip ( aind , resseqs , residue_indices , resnames ) ) feature_descs . extend ( dict_maker ( zippy ) ) return feature_descs
Return a list of dictionaries describing the atom pair features .
23,880
def partial_transform ( self , traj ) : x = [ ] for a in self . types : func = getattr ( md , 'compute_%s' % a ) _ , y = func ( traj ) res = vm . pdf ( y [ ... , np . newaxis ] , loc = self . loc , kappa = self . kappa ) x . extend ( np . reshape ( res , ( 1 , - 1 , self . n_bins * y . shape [ 1 ] ) , order = 'F' ) ) return np . hstack ( x )
Featurize an MD trajectory into a vector space via calculation of soft - bins over dihdral angle space .
23,881
def describe_features ( self , traj ) : feature_descs = [ ] _ , mapping = md . geometry . sasa . shrake_rupley ( traj , mode = self . mode , get_mapping = True ) top = traj . topology if self . mode == "residue" : resids = np . unique ( mapping ) resseqs = [ top . residue ( ri ) . resSeq for ri in resids ] resnames = [ top . residue ( ri ) . name for ri in resids ] atoms_in_res = [ res . atoms for res in top . residues ] aind_tuples = [ ] for i , x in enumerate ( atoms_in_res ) : aind_tuples . append ( [ atom . index for atom in x ] ) zippy = itertools . product ( [ 'SASA' ] , [ 'N/A' ] , [ self . mode ] , zip ( aind_tuples , resseqs , resids , resnames ) ) else : resids = [ top . atom ( ai ) . residue . index for ai in mapping ] resseqs = [ top . atom ( ai ) . residue . resSeq for ai in mapping ] resnames = [ top . atom ( ai ) . residue . name for ai in mapping ] zippy = itertools . product ( [ 'SASA' ] , [ 'N/A' ] , [ self . mode ] , zip ( mapping , resseqs , resids , resnames ) ) feature_descs . extend ( dict_maker ( zippy ) ) return feature_descs
Return a list of dictionaries describing the SASA features .
23,882
def partial_transform ( self , traj ) : fingerprints = np . zeros ( ( traj . n_frames , self . n_features ) ) atom_pairs = np . zeros ( ( len ( self . solvent_indices ) , 2 ) ) sigma = self . sigma for i , solute_i in enumerate ( self . solute_indices ) : atom_pairs [ : , 0 ] = solute_i atom_pairs [ : , 1 ] = self . solvent_indices distances = md . compute_distances ( traj , atom_pairs , periodic = True ) distances = np . exp ( - distances / ( 2 * sigma * sigma ) ) fingerprints [ : , i ] = np . sum ( distances , axis = 1 ) return fingerprints
Featurize an MD trajectory into a vector space via calculation of solvent fingerprints
23,883
def partial_transform ( self , traj ) : if self . atom_indices is not None : p_traj = traj . atom_slice ( self . atom_indices ) else : p_traj = traj if self . ref_traj is not None : p_traj . superpose ( self . ref_traj , parallel = False ) value = p_traj . xyz . reshape ( len ( p_traj ) , - 1 ) return value
Featurize an MD trajectory into a vector space with the raw cartesian coordinates .
23,884
def partial_transform ( self , traj ) : if self . index is not None : return traj [ : , self . index ] else : return traj [ : , : self . first ]
Slice a single input array along to select a subset of features .
23,885
def param_sweep ( model , sequences , param_grid , n_jobs = 1 , verbose = 0 ) : if isinstance ( param_grid , dict ) : param_grid = ParameterGrid ( param_grid ) elif not isinstance ( param_grid , ParameterGrid ) : raise ValueError ( "param_grid must be a dict or ParamaterGrid instance" ) iter_args = ( ( clone ( model ) . set_params ( ** params ) , sequences ) for params in param_grid ) models = Parallel ( n_jobs = n_jobs , verbose = verbose ) ( delayed ( _param_sweep_helper ) ( args ) for args in iter_args ) return models
Fit a series of models over a range of parameters .
23,886
def dataset ( path , mode = 'r' , fmt = None , verbose = False , ** kwargs ) : if mode == 'r' and fmt is None : fmt = _guess_format ( path ) elif mode in 'wa' and fmt is None : raise ValueError ( 'mode="%s", but no fmt. fmt=%s' % ( mode , fmt ) ) if fmt == 'dir-npy' : return NumpyDirDataset ( path , mode = mode , verbose = verbose ) elif fmt == 'mdtraj' : return MDTrajDataset ( path , mode = mode , verbose = verbose , ** kwargs ) elif fmt == 'hdf5' : return HDF5Dataset ( path , mode = mode , verbose = verbose ) elif fmt . endswith ( "-union" ) : raise ValueError ( "union datasets have been removed. " "Please use msmbuilder.featurizer.FeatureUnion" ) else : raise NotImplementedError ( "Unknown format fmt='%s'" % fmt )
Open a dataset object
23,887
def transform_with ( self , estimator , out_ds , fmt = None ) : if isinstance ( out_ds , str ) : out_ds = self . create_derived ( out_ds , fmt = fmt ) elif isinstance ( out_ds , _BaseDataset ) : err = "Dataset must be opened in write mode." assert out_ds . mode in ( 'w' , 'a' ) , err else : err = "Please specify a dataset path or an existing dataset." raise ValueError ( err ) for key in self . keys ( ) : out_ds [ key ] = estimator . partial_transform ( self [ key ] ) return out_ds
Call the partial_transform method of the estimator on this dataset
23,888
def fit_transform_with ( self , estimator , out_ds , fmt = None ) : self . fit_with ( estimator ) return self . transform_with ( estimator , out_ds , fmt = fmt )
Create a new dataset with the given estimator .
23,889
def _mapped_populations ( mdl1 , mdl2 ) : return_vect = np . zeros ( mdl1 . n_states_ ) for i in range ( mdl1 . n_states_ ) : try : mdl1_unmapped = mdl1 . inverse_transform ( [ i ] ) [ 0 ] [ 0 ] mdl2_mapped = mdl2 . mapping_ [ mdl1_unmapped ] return_vect [ i ] = mdl2 . populations_ [ mdl2_mapped ] except : pass return return_vect
Method to get the populations for states in mdl 1 from populations inferred in mdl 2 . Resorts to 0 if population is not present .
23,890
def top_path ( sources , sinks , net_flux ) : sources = np . array ( sources , dtype = np . int ) . reshape ( ( - 1 , ) ) sinks = np . array ( sinks , dtype = np . int ) . reshape ( ( - 1 , ) ) n_states = net_flux . shape [ 0 ] queue = list ( sources ) visited = np . zeros ( n_states ) . astype ( np . bool ) previous_node = np . ones ( n_states ) . astype ( np . int ) * - 1 min_fluxes = np . ones ( n_states ) * - 1 * np . inf min_fluxes [ sources ] = np . inf while len ( queue ) > 0 : test_node = queue . pop ( min_fluxes [ queue ] . argmax ( ) ) visited [ test_node ] = True if np . all ( visited [ sinks ] ) : break neighbors = np . where ( net_flux [ test_node , : ] > 0 ) [ 0 ] if len ( neighbors ) == 0 : continue new_fluxes = net_flux [ test_node , neighbors ] . flatten ( ) new_fluxes [ np . where ( new_fluxes > min_fluxes [ test_node ] ) ] = min_fluxes [ test_node ] ind = np . where ( ( 1 - visited [ neighbors ] ) & ( new_fluxes > min_fluxes [ neighbors ] ) ) min_fluxes [ neighbors [ ind ] ] = new_fluxes [ ind ] previous_node [ neighbors [ ind ] ] = test_node queue . extend ( neighbors [ ind ] ) top_path = [ ] top_path . append ( int ( sinks [ min_fluxes [ sinks ] . argmax ( ) ] ) ) while previous_node [ top_path [ - 1 ] ] != - 1 : top_path . append ( previous_node [ top_path [ - 1 ] ] ) return np . array ( top_path [ : : - 1 ] ) , min_fluxes [ top_path [ 0 ] ]
Use the Dijkstra algorithm for finding the shortest path connecting a set of source states from a set of sink states .
23,891
def _remove_bottleneck ( net_flux , path ) : net_flux = copy . copy ( net_flux ) bottleneck_ind = net_flux [ path [ : - 1 ] , path [ 1 : ] ] . argmin ( ) net_flux [ path [ bottleneck_ind ] , path [ bottleneck_ind + 1 ] ] = 0.0 return net_flux
Internal function for modifying the net flux matrix by removing a particular edge corresponding to the bottleneck of a particular path .
23,892
def _subtract_path_flux ( net_flux , path ) : net_flux = copy . copy ( net_flux ) net_flux [ path [ : - 1 ] , path [ 1 : ] ] -= net_flux [ path [ : - 1 ] , path [ 1 : ] ] . min ( ) bottleneck_ind = net_flux [ path [ : - 1 ] , path [ 1 : ] ] . argmin ( ) net_flux [ path [ bottleneck_ind ] , path [ bottleneck_ind + 1 ] ] = 0.0 return net_flux
Internal function for modifying the net flux matrix by subtracting a path s flux from every edge in the path .
23,893
def paths ( sources , sinks , net_flux , remove_path = 'subtract' , num_paths = np . inf , flux_cutoff = ( 1 - 1E-10 ) ) : if not callable ( remove_path ) : if remove_path == 'subtract' : remove_path = _subtract_path_flux elif remove_path == 'bottleneck' : remove_path = _remove_bottleneck else : raise ValueError ( "remove_path_func (%s) must be a callable or one of ['subtract', 'bottleneck']" % str ( remove_path ) ) net_flux = copy . copy ( net_flux ) paths = [ ] fluxes = [ ] total_flux = net_flux [ sources , : ] . sum ( ) not_done = True counter = 0 expl_flux = 0.0 while not_done : path , flux = top_path ( sources , sinks , net_flux ) if np . isinf ( flux ) : break paths . append ( path ) fluxes . append ( flux ) expl_flux += flux / total_flux counter += 1 if counter >= num_paths or expl_flux >= flux_cutoff : break net_flux = remove_path ( net_flux , path ) fluxes = np . array ( fluxes ) return paths , fluxes
Get the top N paths by iteratively performing Dijkstra s algorithm .
23,894
def rao_blackwell_ledoit_wolf ( S , n ) : p = len ( S ) assert S . shape == ( p , p ) alpha = ( n - 2 ) / ( n * ( n + 2 ) ) beta = ( ( p + 1 ) * n - 2 ) / ( n * ( n + 2 ) ) trace_S2 = np . sum ( S * S ) U = ( ( p * trace_S2 / np . trace ( S ) ** 2 ) - 1 ) rho = min ( alpha + beta / U , 1 ) F = ( np . trace ( S ) / p ) * np . eye ( p ) return ( 1 - rho ) * S + rho * F , rho
Rao - Blackwellized Ledoit - Wolf shrinkaged estimator of the covariance matrix .
23,895
def fit ( self , sequences , y = None ) : self . _initialized = False check_iter_of_sequences ( sequences , max_iter = 3 ) for X in sequences : self . _fit ( X ) if self . n_sequences_ == 0 : raise ValueError ( 'All sequences were shorter than ' 'the lag time, %d' % self . lag_time ) return self
Fit the model with a collection of sequences .
23,896
def transform ( self , sequences ) : check_iter_of_sequences ( sequences , max_iter = 3 ) sequences_new = [ ] for X in sequences : X = array2d ( X ) if self . means_ is not None : X = X - self . means_ X_transformed = np . dot ( X , self . components_ . T ) if self . kinetic_mapping : X_transformed *= self . eigenvalues_ if self . commute_mapping : regularized_timescales = 0.5 * self . timescales_ * np . tanh ( np . pi * ( ( self . timescales_ - self . lag_time ) / self . lag_time ) + 1 ) X_transformed *= np . sqrt ( regularized_timescales / 2 ) X_transformed = np . nan_to_num ( X_transformed ) sequences_new . append ( X_transformed ) return sequences_new
Apply the dimensionality reduction on X .
23,897
def transform ( self , X_all , y = None ) : if self . _sliding_window : return [ X [ k : : self . _lag_time ] for k in range ( self . _lag_time ) for X in X_all ] else : return [ X [ : : self . _lag_time ] for X in X_all ]
Subsample several time series .
23,898
def iterate_tracker ( maxiter , max_nc , verbose = False ) : last_hash = None last_hash_count = 0 arr = yield for i in xrange ( maxiter ) : arr = yield i if arr is not None : hsh = hashlib . sha1 ( arr . view ( np . uint8 ) ) . hexdigest ( ) if last_hash == hsh : last_hash_count += 1 else : last_hash = hsh last_hash_count = 1 if last_hash_count >= max_nc : if verbose : print ( 'Termination. Over %d iterations without ' 'change.' % max_nc ) break
Generator that breaks after maxiter or after the same array has been sent in more max_nc times in a row .
23,899
def fit ( self , sequences , y = None ) : super ( PCCA , self ) . fit ( sequences , y = y ) self . _do_lumping ( ) return self
Fit a PCCA lumping model using a sequence of cluster assignments .