idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
240,400
def get_lexer_for_mimetype ( _mime , * * options ) : for modname , name , _ , _ , mimetypes in itervalues ( LEXERS ) : if _mime in mimetypes : if name not in _lexer_cache : _load_lexers ( modname ) return _lexer_cache [ name ] ( * * options ) for cls in find_plugin_lexers ( ) : if _mime in cls . mimetypes : return cls ( * * options ) raise ClassNotFound ( 'no lexer for mimetype %r found' % _mime )
Get a lexer for a mimetype .
143
10
240,401
def _iter_lexerclasses ( plugins = True ) : for key in sorted ( LEXERS ) : module_name , name = LEXERS [ key ] [ : 2 ] if name not in _lexer_cache : _load_lexers ( module_name ) yield _lexer_cache [ name ] if plugins : for lexer in find_plugin_lexers ( ) : yield lexer
Return an iterator over all lexer classes .
87
9
240,402
def get_file_stats ( file_name , entity_type = 'file' , lineno = None , cursorpos = None , plugin = None , language = None , local_file = None ) : language = standardize_language ( language , plugin ) stats = { 'language' : language , 'dependencies' : [ ] , 'lines' : None , 'lineno' : lineno , 'cursorpos' : cursorpos , } if entity_type == 'file' : lexer = get_lexer ( language ) if not language : language , lexer = guess_language ( file_name , local_file ) parser = DependencyParser ( local_file or file_name , lexer ) stats . update ( { 'language' : use_root_language ( language , lexer ) , 'dependencies' : parser . parse ( ) , 'lines' : number_lines_in_file ( local_file or file_name ) , } ) return stats
Returns a hash of information about the entity .
210
9
240,403
def guess_language ( file_name , local_file ) : lexer = None language = get_language_from_extension ( file_name ) if language : lexer = get_lexer ( language ) else : lexer = smart_guess_lexer ( file_name , local_file ) if lexer : language = u ( lexer . name ) return language , lexer
Guess lexer and language for a file .
85
10
240,404
def smart_guess_lexer ( file_name , local_file ) : lexer = None text = get_file_head ( file_name ) lexer1 , accuracy1 = guess_lexer_using_filename ( local_file or file_name , text ) lexer2 , accuracy2 = guess_lexer_using_modeline ( text ) if lexer1 : lexer = lexer1 if ( lexer2 and accuracy2 and ( not accuracy1 or accuracy2 > accuracy1 ) ) : lexer = lexer2 return lexer
Guess Pygments lexer for a file .
121
10
240,405
def guess_lexer_using_filename ( file_name , text ) : lexer , accuracy = None , None try : lexer = custom_pygments_guess_lexer_for_filename ( file_name , text ) except SkipHeartbeat as ex : raise SkipHeartbeat ( u ( ex ) ) except : log . traceback ( logging . DEBUG ) if lexer is not None : try : accuracy = lexer . analyse_text ( text ) except : log . traceback ( logging . DEBUG ) return lexer , accuracy
Guess lexer for given text limited to lexers for this file s extension .
115
17
240,406
def guess_lexer_using_modeline ( text ) : lexer , accuracy = None , None file_type = None try : file_type = get_filetype_from_buffer ( text ) except : # pragma: nocover log . traceback ( logging . DEBUG ) if file_type is not None : try : lexer = get_lexer_by_name ( file_type ) except ClassNotFound : log . traceback ( logging . DEBUG ) if lexer is not None : try : accuracy = lexer . analyse_text ( text ) except : # pragma: nocover log . traceback ( logging . DEBUG ) return lexer , accuracy
Guess lexer for given text using Vim modeline .
145
12
240,407
def get_language_from_extension ( file_name ) : filepart , extension = os . path . splitext ( file_name ) pathpart , filename = os . path . split ( file_name ) if filename == 'go.mod' : return 'Go' if re . match ( r'\.h.*$' , extension , re . IGNORECASE ) or re . match ( r'\.c.*$' , extension , re . IGNORECASE ) : if os . path . exists ( u ( '{0}{1}' ) . format ( u ( filepart ) , u ( '.c' ) ) ) or os . path . exists ( u ( '{0}{1}' ) . format ( u ( filepart ) , u ( '.C' ) ) ) : return 'C' if os . path . exists ( u ( '{0}{1}' ) . format ( u ( filepart ) , u ( '.m' ) ) ) or os . path . exists ( u ( '{0}{1}' ) . format ( u ( filepart ) , u ( '.M' ) ) ) : return 'Objective-C' if os . path . exists ( u ( '{0}{1}' ) . format ( u ( filepart ) , u ( '.mm' ) ) ) or os . path . exists ( u ( '{0}{1}' ) . format ( u ( filepart ) , u ( '.MM' ) ) ) : return 'Objective-C++' available_extensions = extensions_in_same_folder ( file_name ) for ext in CppLexer . filenames : ext = ext . lstrip ( '*' ) if ext in available_extensions : return 'C++' if '.c' in available_extensions : return 'C' if re . match ( r'\.m$' , extension , re . IGNORECASE ) and ( os . path . exists ( u ( '{0}{1}' ) . format ( u ( filepart ) , u ( '.h' ) ) ) or os . path . exists ( u ( '{0}{1}' ) . format ( u ( filepart ) , u ( '.H' ) ) ) ) : return 'Objective-C' if re . match ( r'\.mm$' , extension , re . IGNORECASE ) and ( os . path . exists ( u ( '{0}{1}' ) . format ( u ( filepart ) , u ( '.h' ) ) ) or os . path . exists ( u ( '{0}{1}' ) . format ( u ( filepart ) , u ( '.H' ) ) ) ) : return 'Objective-C++' return None
Returns a matching language for the given file extension .
599
10
240,408
def standardize_language ( language , plugin ) : if not language : return None # standardize language for this plugin if plugin : plugin = plugin . split ( ' ' ) [ - 1 ] . split ( '/' ) [ 0 ] . split ( '-' ) [ 0 ] standardized = get_language_from_json ( language , plugin ) if standardized is not None : return standardized # standardize language against default languages return get_language_from_json ( language , 'default' )
Maps a string to the equivalent Pygments language .
102
10
240,409
def get_language_from_json ( language , key ) : file_name = os . path . join ( os . path . dirname ( __file__ ) , 'languages' , '{0}.json' ) . format ( key . lower ( ) ) if os . path . exists ( file_name ) : try : with open ( file_name , 'r' , encoding = 'utf-8' ) as fh : languages = json . loads ( fh . read ( ) ) if languages . get ( language . lower ( ) ) : return languages [ language . lower ( ) ] except : log . traceback ( logging . DEBUG ) return None
Finds the given language in a json file .
141
10
240,410
def get_file_head ( file_name ) : text = None try : with open ( file_name , 'r' , encoding = 'utf-8' ) as fh : text = fh . read ( 512000 ) except : try : with open ( file_name , 'r' , encoding = sys . getfilesystemencoding ( ) ) as fh : text = fh . read ( 512000 ) # pragma: nocover except : log . traceback ( logging . DEBUG ) return text
Returns the first 512000 bytes of the file s contents .
111
12
240,411
def custom_pygments_guess_lexer_for_filename ( _fn , _text , * * options ) : fn = basename ( _fn ) primary = { } matching_lexers = set ( ) for lexer in _iter_lexerclasses ( ) : for filename in lexer . filenames : if _fn_matches ( fn , filename ) : matching_lexers . add ( lexer ) primary [ lexer ] = True for filename in lexer . alias_filenames : if _fn_matches ( fn , filename ) : matching_lexers . add ( lexer ) primary [ lexer ] = False if not matching_lexers : raise ClassNotFound ( 'no lexer for filename %r found' % fn ) if len ( matching_lexers ) == 1 : return matching_lexers . pop ( ) ( * * options ) result = [ ] for lexer in matching_lexers : rv = lexer . analyse_text ( _text ) if rv == 1.0 : return lexer ( * * options ) result . append ( customize_lexer_priority ( _fn , rv , lexer ) ) matlab = list ( filter ( lambda x : x [ 2 ] . name . lower ( ) == 'matlab' , result ) ) if len ( matlab ) > 0 : objc = list ( filter ( lambda x : x [ 2 ] . name . lower ( ) == 'objective-c' , result ) ) if objc and objc [ 0 ] [ 0 ] == matlab [ 0 ] [ 0 ] : raise SkipHeartbeat ( 'Skipping because not enough language accuracy.' ) def type_sort ( t ) : # sort by: # - analyse score # - is primary filename pattern? # - priority # - last resort: class name return ( t [ 0 ] , primary [ t [ 2 ] ] , t [ 1 ] , t [ 2 ] . __name__ ) result . sort ( key = type_sort ) return result [ - 1 ] [ 2 ] ( * * options )
Overwrite pygments . lexers . guess_lexer_for_filename to customize the priority of different lexers based on popularity of languages .
446
30
240,412
def customize_lexer_priority ( file_name , accuracy , lexer ) : priority = lexer . priority lexer_name = lexer . name . lower ( ) . replace ( 'sharp' , '#' ) if lexer_name in LANGUAGES : priority = LANGUAGES [ lexer_name ] elif lexer_name == 'matlab' : available_extensions = extensions_in_same_folder ( file_name ) if '.mat' in available_extensions : accuracy += 0.01 if '.h' not in available_extensions : accuracy += 0.01 elif lexer_name == 'objective-c' : available_extensions = extensions_in_same_folder ( file_name ) if '.mat' in available_extensions : accuracy -= 0.01 else : accuracy += 0.01 if '.h' in available_extensions : accuracy += 0.01 return ( accuracy , priority , lexer )
Customize lexer priority
208
5
240,413
def extensions_in_same_folder ( file_name ) : directory = os . path . dirname ( file_name ) files = os . listdir ( directory ) extensions = list ( zip ( * map ( os . path . splitext , files ) ) ) [ 1 ] extensions = set ( [ ext . lower ( ) for ext in extensions ] ) return extensions
Returns a list of file extensions from the same folder as file_name .
78
15
240,414
def analyse_text ( text ) : if re . search ( r'/\*\**\s*rexx' , text , re . IGNORECASE ) : # Header matches MVS Rexx requirements, this is certainly a Rexx # script. return 1.0 elif text . startswith ( '/*' ) : # Header matches general Rexx requirements; the source code might # still be any language using C comments such as C++, C# or Java. lowerText = text . lower ( ) result = sum ( weight for ( pattern , weight ) in RexxLexer . PATTERNS_AND_WEIGHTS if pattern . search ( lowerText ) ) + 0.01 return min ( result , 1.0 )
Check for inital comment and patterns that distinguish Rexx from other C - like languages .
156
18
240,415
def analyse_text ( text ) : result = 0.0 lines = text . split ( '\n' ) hasEndProc = False hasHeaderComment = False hasFile = False hasJob = False hasProc = False hasParm = False hasReport = False def isCommentLine ( line ) : return EasytrieveLexer . _COMMENT_LINE_REGEX . match ( lines [ 0 ] ) is not None def isEmptyLine ( line ) : return not bool ( line . strip ( ) ) # Remove possible empty lines and header comments. while lines and ( isEmptyLine ( lines [ 0 ] ) or isCommentLine ( lines [ 0 ] ) ) : if not isEmptyLine ( lines [ 0 ] ) : hasHeaderComment = True del lines [ 0 ] if EasytrieveLexer . _MACRO_HEADER_REGEX . match ( lines [ 0 ] ) : # Looks like an Easytrieve macro. result = 0.4 if hasHeaderComment : result += 0.4 else : # Scan the source for lines starting with indicators. for line in lines : words = line . split ( ) if ( len ( words ) >= 2 ) : firstWord = words [ 0 ] if not hasReport : if not hasJob : if not hasFile : if not hasParm : if firstWord == 'PARM' : hasParm = True if firstWord == 'FILE' : hasFile = True if firstWord == 'JOB' : hasJob = True elif firstWord == 'PROC' : hasProc = True elif firstWord == 'END-PROC' : hasEndProc = True elif firstWord == 'REPORT' : hasReport = True # Weight the findings. if hasJob and ( hasProc == hasEndProc ) : if hasHeaderComment : result += 0.1 if hasParm : if hasProc : # Found PARM, JOB and PROC/END-PROC: # pretty sure this is Easytrieve. result += 0.8 else : # Found PARAM and JOB: probably this is Easytrieve result += 0.5 else : # Found JOB and possibly other keywords: might be Easytrieve result += 0.11 if hasParm : # Note: PARAM is not a proper English word, so this is # regarded a much better indicator for Easytrieve than # the other words. result += 0.2 if hasFile : result += 0.01 if hasReport : result += 0.01 assert 0.0 <= result <= 1.0 return result
Perform a structural analysis for basic Easytrieve constructs .
544
12
240,416
def analyse_text ( text ) : result = 0.0 lines = text . split ( '\n' ) if len ( lines ) > 0 : if JclLexer . _JOB_HEADER_PATTERN . match ( lines [ 0 ] ) : result = 1.0 assert 0.0 <= result <= 1.0 return result
Recognize JCL job by header .
74
9
240,417
def _objdump_lexer_tokens ( asm_lexer ) : hex_re = r'[0-9A-Za-z]' return { 'root' : [ # File name & format: ( '(.*?)(:)( +file format )(.*?)$' , bygroups ( Name . Label , Punctuation , Text , String ) ) , # Section header ( '(Disassembly of section )(.*?)(:)$' , bygroups ( Text , Name . Label , Punctuation ) ) , # Function labels # (With offset) ( '(' + hex_re + '+)( )(<)(.*?)([-+])(0[xX][A-Za-z0-9]+)(>:)$' , bygroups ( Number . Hex , Text , Punctuation , Name . Function , Punctuation , Number . Hex , Punctuation ) ) , # (Without offset) ( '(' + hex_re + '+)( )(<)(.*?)(>:)$' , bygroups ( Number . Hex , Text , Punctuation , Name . Function , Punctuation ) ) , # Code line with disassembled instructions ( '( *)(' + hex_re + r'+:)(\t)((?:' + hex_re + hex_re + ' )+)( *\t)([a-zA-Z].*?)$' , bygroups ( Text , Name . Label , Text , Number . Hex , Text , using ( asm_lexer ) ) ) , # Code line with ascii ( '( *)(' + hex_re + r'+:)(\t)((?:' + hex_re + hex_re + ' )+)( *)(.*?)$' , bygroups ( Text , Name . Label , Text , Number . Hex , Text , String ) ) , # Continued code line, only raw opcodes without disassembled # instruction ( '( *)(' + hex_re + r'+:)(\t)((?:' + hex_re + hex_re + ' )+)$' , bygroups ( Text , Name . Label , Text , Number . Hex ) ) , # Skipped a few bytes ( r'\t\.\.\.$' , Text ) , # Relocation line # (With offset) ( r'(\t\t\t)(' + hex_re + r'+:)( )([^\t]+)(\t)(.*?)([-+])(0x' + hex_re + '+)$' , bygroups ( Text , Name . Label , Text , Name . Property , Text , Name . Constant , Punctuation , Number . Hex ) ) , # (Without offset) ( r'(\t\t\t)(' + hex_re + r'+:)( )([^\t]+)(\t)(.*?)$' , bygroups ( Text , Name . Label , Text , Name . Property , Text , Name . Constant ) ) , ( r'[^\n]+\n' , Other ) ] }
Common objdump lexer tokens to wrap an ASM lexer .
663
14
240,418
def get_key ( self , fileobj ) : mapping = self . get_map ( ) if mapping is None : raise RuntimeError ( "Selector is closed" ) try : return mapping [ fileobj ] except KeyError : raise KeyError ( "{0!r} is not registered" . format ( fileobj ) )
Return the key associated with a registered file object .
68
10
240,419
def get_filetype_from_buffer ( buf , max_lines = 5 ) : lines = buf . splitlines ( ) for l in lines [ - 1 : - max_lines - 1 : - 1 ] : ret = get_filetype_from_line ( l ) if ret : return ret for i in range ( max_lines , - 1 , - 1 ) : if i < len ( lines ) : ret = get_filetype_from_line ( lines [ i ] ) if ret : return ret return None
Scan the buffer for modelines and return filetype if one is found .
111
15
240,420
def get_font ( self , bold , oblique ) : if bold and oblique : return self . fonts [ 'BOLDITALIC' ] elif bold : return self . fonts [ 'BOLD' ] elif oblique : return self . fonts [ 'ITALIC' ] else : return self . fonts [ 'NORMAL' ]
Get the font based on bold and italic flags .
72
11
240,421
def _get_char_x ( self , charno ) : return charno * self . fontw + self . image_pad + self . line_number_width
Get the X coordinate of a character position .
36
9
240,422
def _get_text_pos ( self , charno , lineno ) : return self . _get_char_x ( charno ) , self . _get_line_y ( lineno )
Get the actual position for a character and line position .
43
11
240,423
def _get_image_size ( self , maxcharno , maxlineno ) : return ( self . _get_char_x ( maxcharno ) + self . image_pad , self . _get_line_y ( maxlineno + 0 ) + self . image_pad )
Get the required image size .
65
6
240,424
def _draw_linenumber ( self , posno , lineno ) : self . _draw_text ( self . _get_linenumber_pos ( posno ) , str ( lineno ) . rjust ( self . line_number_chars ) , font = self . fonts . get_font ( self . line_number_bold , self . line_number_italic ) , fill = self . line_number_fg , )
Remember a line number drawable to paint later .
97
10
240,425
def _draw_text ( self , pos , text , font , * * kw ) : self . drawables . append ( ( pos , text , font , kw ) )
Remember a single drawable tuple to paint later .
38
10
240,426
def _create_drawables ( self , tokensource ) : lineno = charno = maxcharno = 0 for ttype , value in tokensource : while ttype not in self . styles : ttype = ttype . parent style = self . styles [ ttype ] # TODO: make sure tab expansion happens earlier in the chain. It # really ought to be done on the input, as to do it right here is # quite complex. value = value . expandtabs ( 4 ) lines = value . splitlines ( True ) # print lines for i , line in enumerate ( lines ) : temp = line . rstrip ( '\n' ) if temp : self . _draw_text ( self . _get_text_pos ( charno , lineno ) , temp , font = self . _get_style_font ( style ) , fill = self . _get_text_color ( style ) ) charno += len ( temp ) maxcharno = max ( maxcharno , charno ) if line . endswith ( '\n' ) : # add a line for each extra line in the value charno = 0 lineno += 1 self . maxcharno = maxcharno self . maxlineno = lineno
Create drawables for the token content .
269
8
240,427
def _draw_line_numbers ( self ) : if not self . line_numbers : return for p in xrange ( self . maxlineno ) : n = p + self . line_number_start if ( n % self . line_number_step ) == 0 : self . _draw_linenumber ( p , n )
Create drawables for the line numbers .
74
8
240,428
def _paint_line_number_bg ( self , im ) : if not self . line_numbers : return if self . line_number_fg is None : return draw = ImageDraw . Draw ( im ) recth = im . size [ - 1 ] rectw = self . image_pad + self . line_number_width - self . line_number_pad draw . rectangle ( [ ( 0 , 0 ) , ( rectw , recth ) ] , fill = self . line_number_bg ) draw . line ( [ ( rectw , 0 ) , ( rectw , recth ) ] , fill = self . line_number_fg ) del draw
Paint the line number background on the image .
144
10
240,429
def update ( self , attrs ) : data = self . dict ( ) data . update ( attrs ) heartbeat = Heartbeat ( data , self . args , self . configs , _clone = True ) return heartbeat
Return a copy of the current Heartbeat with updated attributes .
46
12
240,430
def sanitize ( self ) : if not self . args . hide_file_names : return self if self . entity is None : return self if self . type != 'file' : return self if self . should_obfuscate_filename ( ) : self . _sanitize_metadata ( ) extension = u ( os . path . splitext ( self . entity ) [ 1 ] ) self . entity = u ( 'HIDDEN{0}' ) . format ( extension ) elif self . should_obfuscate_project ( ) : self . _sanitize_metadata ( ) return self
Removes sensitive data including file names and dependencies .
131
10
240,431
def should_obfuscate_filename ( self ) : for pattern in self . args . hide_file_names : try : compiled = re . compile ( pattern , re . IGNORECASE ) if compiled . search ( self . entity ) : return True except re . error as ex : log . warning ( u ( 'Regex error ({msg}) for hide_file_names pattern: {pattern}' ) . format ( msg = u ( ex ) , pattern = u ( pattern ) , ) ) return False
Returns True if hide_file_names is true or the entity file path matches one in the list of obfuscated file paths .
108
26
240,432
def _format_local_file ( self ) : if self . type != 'file' : return if not self . entity : return if not is_win : return if self . _file_exists ( ) : return self . args . local_file = self . _to_unc_path ( self . entity )
When args . local_file empty on Windows tries to map args . entity to a unc path .
68
20
240,433
def create_negotiate_message ( self , domain_name = None , workstation = None ) : self . negotiate_message = NegotiateMessage ( self . negotiate_flags , domain_name , workstation ) return base64 . b64encode ( self . negotiate_message . get_data ( ) )
Create an NTLM NEGOTIATE_MESSAGE
68
15
240,434
def parse_challenge_message ( self , msg2 ) : msg2 = base64 . b64decode ( msg2 ) self . challenge_message = ChallengeMessage ( msg2 )
Parse the NTLM CHALLENGE_MESSAGE from the server and add it to the Ntlm context fields
40
28
240,435
def create_authenticate_message ( self , user_name , password , domain_name = None , workstation = None , server_certificate_hash = None ) : self . authenticate_message = AuthenticateMessage ( user_name , password , domain_name , workstation , self . challenge_message , self . ntlm_compatibility , server_certificate_hash ) self . authenticate_message . add_mic ( self . negotiate_message , self . challenge_message ) # Setups up the session_security context used to sign and seal messages if wanted if self . negotiate_flags & NegotiateFlags . NTLMSSP_NEGOTIATE_SEAL or self . negotiate_flags & NegotiateFlags . NTLMSSP_NEGOTIATE_SIGN : self . session_security = SessionSecurity ( struct . unpack ( "<I" , self . authenticate_message . negotiate_flags ) [ 0 ] , self . authenticate_message . exported_session_key ) return base64 . b64encode ( self . authenticate_message . get_data ( ) )
Create an NTLM AUTHENTICATE_MESSAGE based on the Ntlm context and the previous messages sent and received
241
28
240,436
def lex ( code , lexer ) : try : return lexer . get_tokens ( code ) except TypeError as err : if ( isinstance ( err . args [ 0 ] , str ) and ( 'unbound method get_tokens' in err . args [ 0 ] or 'missing 1 required positional argument' in err . args [ 0 ] ) ) : raise TypeError ( 'lex() argument must be a lexer instance, ' 'not a class' ) raise
Lex code with lexer and return an iterable of tokens .
103
13
240,437
def format ( tokens , formatter , outfile = None ) : # pylint: disable=redefined-builtin try : if not outfile : realoutfile = getattr ( formatter , 'encoding' , None ) and BytesIO ( ) or StringIO ( ) formatter . format ( tokens , realoutfile ) return realoutfile . getvalue ( ) else : formatter . format ( tokens , outfile ) except TypeError as err : if ( isinstance ( err . args [ 0 ] , str ) and ( 'unbound method format' in err . args [ 0 ] or 'missing 1 required positional argument' in err . args [ 0 ] ) ) : raise TypeError ( 'format() argument must be a formatter instance, ' 'not a class' ) raise
Format a tokenlist tokens with the formatter formatter .
169
12
240,438
def highlight ( code , lexer , formatter , outfile = None ) : return format ( lex ( code , lexer ) , formatter , outfile )
Lex code with lexer and format it with the formatter formatter .
34
15
240,439
def getConfigFile ( ) : fileName = '.wakatime.cfg' home = os . environ . get ( 'WAKATIME_HOME' ) if home : return os . path . join ( os . path . expanduser ( home ) , fileName ) return os . path . join ( os . path . expanduser ( '~' ) , fileName )
Returns the config file location .
81
6
240,440
def find_filter_class ( filtername ) : if filtername in FILTERS : return FILTERS [ filtername ] for name , cls in find_plugin_filters ( ) : if name == filtername : return cls return None
Lookup a filter by name . Return None if not found .
57
13
240,441
def get_filter_by_name ( filtername , * * options ) : cls = find_filter_class ( filtername ) if cls : return cls ( * * options ) else : raise ClassNotFound ( 'filter %r not found' % filtername )
Return an instantiated filter .
62
6
240,442
def get_tokens_unprocessed ( self , text ) : tokens = self . _block_re . split ( text ) tokens . reverse ( ) state = idx = 0 try : while True : # text if state == 0 : val = tokens . pop ( ) yield idx , Other , val idx += len ( val ) state = 1 # block starts elif state == 1 : tag = tokens . pop ( ) # literals if tag in ( '<%%' , '%%>' ) : yield idx , Other , tag idx += 3 state = 0 # comment elif tag == '<%#' : yield idx , Comment . Preproc , tag val = tokens . pop ( ) yield idx + 3 , Comment , val idx += 3 + len ( val ) state = 2 # blocks or output elif tag in ( '<%' , '<%=' , '<%-' ) : yield idx , Comment . Preproc , tag idx += len ( tag ) data = tokens . pop ( ) r_idx = 0 for r_idx , r_token , r_value in self . ruby_lexer . get_tokens_unprocessed ( data ) : yield r_idx + idx , r_token , r_value idx += len ( data ) state = 2 elif tag in ( '%>' , '-%>' ) : yield idx , Error , tag idx += len ( tag ) state = 0 # % raw ruby statements else : yield idx , Comment . Preproc , tag [ 0 ] r_idx = 0 for r_idx , r_token , r_value in self . ruby_lexer . get_tokens_unprocessed ( tag [ 1 : ] ) : yield idx + 1 + r_idx , r_token , r_value idx += len ( tag ) state = 0 # block ends elif state == 2 : tag = tokens . pop ( ) if tag not in ( '%>' , '-%>' ) : yield idx , Other , tag else : yield idx , Comment . Preproc , tag idx += len ( tag ) state = 0 except IndexError : return
Since ERB doesn t allow <% and other tags inside of ruby blocks we have to use a split approach here that fails for that too .
482
29
240,443
def format_file_path ( filepath ) : try : is_windows_network_mount = WINDOWS_NETWORK_MOUNT_PATTERN . match ( filepath ) filepath = os . path . realpath ( os . path . abspath ( filepath ) ) filepath = re . sub ( BACKSLASH_REPLACE_PATTERN , '/' , filepath ) is_windows_drive = WINDOWS_DRIVE_PATTERN . match ( filepath ) if is_windows_drive : filepath = filepath . capitalize ( ) if is_windows_network_mount : # Add back a / to the front, since the previous modifications # will have replaced any double slashes with single filepath = '/' + filepath except : pass return filepath
Formats a path as absolute and with the correct platform separator .
170
14
240,444
def close ( self ) : # Disable access to the pool old_pool , self . pool = self . pool , None try : while True : conn = old_pool . get ( block = False ) if conn : conn . close ( ) except queue . Empty : pass
Close all pooled connections and disable the pool .
56
9
240,445
def is_same_host ( self , url ) : if url . startswith ( '/' ) : return True # TODO: Add optional support for socket.gethostbyname checking. scheme , host , port = get_host ( url ) host = _ipv6_host ( host ) . lower ( ) # Use explicit default port for comparison when none is given if self . port and not port : port = port_by_scheme . get ( scheme ) elif not self . port and port == port_by_scheme . get ( scheme ) : port = None return ( scheme , host , port ) == ( self . scheme , self . host , self . port )
Check if the given url is a member of the same host as this connection pool .
146
17
240,446
def filename ( self , value ) : warnings . warn ( "The 'filename' attribute will be removed in future versions. " "Use 'source' instead." , DeprecationWarning , stacklevel = 2 ) self . source = value
Deprecated user source .
49
5
240,447
def options ( self , section ) : try : opts = self . _sections [ section ] . copy ( ) except KeyError : raise from_none ( NoSectionError ( section ) ) opts . update ( self . _defaults ) return list ( opts . keys ( ) )
Return a list of option names for the given section name .
61
12
240,448
def read_string ( self , string , source = '<string>' ) : sfile = io . StringIO ( string ) self . read_file ( sfile , source )
Read configuration from a given string .
39
7
240,449
def read_dict ( self , dictionary , source = '<dict>' ) : elements_added = set ( ) for section , keys in dictionary . items ( ) : section = str ( section ) try : self . add_section ( section ) except ( DuplicateSectionError , ValueError ) : if self . _strict and section in elements_added : raise elements_added . add ( section ) for key , value in keys . items ( ) : key = self . optionxform ( str ( key ) ) if value is not None : value = str ( value ) if self . _strict and ( section , key ) in elements_added : raise DuplicateOptionError ( section , key , source ) elements_added . add ( ( section , key ) ) self . set ( section , key , value )
Read configuration from a dictionary .
172
6
240,450
def readfp ( self , fp , filename = None ) : warnings . warn ( "This method will be removed in future versions. " "Use 'parser.read_file()' instead." , DeprecationWarning , stacklevel = 2 ) self . read_file ( fp , source = filename )
Deprecated use read_file instead .
65
8
240,451
def has_option ( self , section , option ) : if not section or section == self . default_section : option = self . optionxform ( option ) return option in self . _defaults elif section not in self . _sections : return False else : option = self . optionxform ( option ) return ( option in self . _sections [ section ] or option in self . _defaults )
Check for the existence of a given option in a given section . If the specified section is None or an empty string DEFAULT is assumed . If the specified section does not exist returns False .
86
38
240,452
def _write_section ( self , fp , section_name , section_items , delimiter ) : fp . write ( "[{0}]\n" . format ( section_name ) ) for key , value in section_items : value = self . _interpolation . before_write ( self , section_name , key , value ) if value is not None or not self . _allow_no_value : value = delimiter + str ( value ) . replace ( '\n' , '\n\t' ) else : value = "" fp . write ( "{0}{1}\n" . format ( key , value ) ) fp . write ( "\n" )
Write a single section to the specified fp .
150
10
240,453
def _unify_values ( self , section , vars ) : sectiondict = { } try : sectiondict = self . _sections [ section ] except KeyError : if section != self . default_section : raise NoSectionError ( section ) # Update with the entry specific variables vardict = { } if vars : for key , value in vars . items ( ) : if value is not None : value = str ( value ) vardict [ self . optionxform ( key ) ] = value return _ChainMap ( vardict , sectiondict , self . _defaults )
Create a sequence of lookups with vars taking priority over the section which takes priority over the DEFAULTSECT .
126
24
240,454
def _convert_to_boolean ( self , value ) : if value . lower ( ) not in self . BOOLEAN_STATES : raise ValueError ( 'Not a boolean: %s' % value ) return self . BOOLEAN_STATES [ value . lower ( ) ]
Return a boolean value translating from other types if necessary .
65
11
240,455
def _validate_value_types ( self , * * kwargs ) : # keyword-only arguments section = kwargs . get ( 'section' , "" ) option = kwargs . get ( 'option' , "" ) value = kwargs . get ( 'value' , "" ) if PY2 and bytes in ( type ( section ) , type ( option ) , type ( value ) ) : # we allow for a little unholy magic for Python 2 so that # people not using unicode_literals can still use the library # conveniently warnings . warn ( "You passed a bytestring. Implicitly decoding as UTF-8 string." " This will not work on Python 3. Please switch to using" " Unicode strings across the board." , DeprecationWarning , stacklevel = 2 , ) if isinstance ( section , bytes ) : section = section . decode ( 'utf8' ) if isinstance ( option , bytes ) : option = option . decode ( 'utf8' ) if isinstance ( value , bytes ) : value = value . decode ( 'utf8' ) if not isinstance ( section , str ) : raise TypeError ( "section names must be strings" ) if not isinstance ( option , str ) : raise TypeError ( "option keys must be strings" ) if not self . _allow_no_value or value : if not isinstance ( value , str ) : raise TypeError ( "option values must be strings" ) return section , option , value
Raises a TypeError for non - string values .
319
11
240,456
def set ( self , section , option , value = None ) : _ , option , value = self . _validate_value_types ( option = option , value = value ) super ( ConfigParser , self ) . set ( section , option , value )
Set an option . Extends RawConfigParser . set by validating type and interpolation syntax on the value .
54
23
240,457
def add_section ( self , section ) : section , _ , _ = self . _validate_value_types ( section = section ) super ( ConfigParser , self ) . add_section ( section )
Create a new section in the configuration . Extends RawConfigParser . add_section by validating if the section name is a string .
44
28
240,458
def get ( self , option , fallback = None , * * kwargs ) : # keyword-only arguments kwargs . setdefault ( 'raw' , False ) kwargs . setdefault ( 'vars' , None ) _impl = kwargs . pop ( '_impl' , None ) # If `_impl` is provided, it should be a getter method on the parser # object that provides the desired type conversion. if not _impl : _impl = self . _parser . get return _impl ( self . _name , option , fallback = fallback , * * kwargs )
Get an option value .
132
5
240,459
def analyse_text ( text ) : if re . match ( r'^\s*REBOL\s*\[' , text , re . IGNORECASE ) : # The code starts with REBOL header return 1.0 elif re . search ( r'\s*REBOL\s*[' , text , re . IGNORECASE ) : # The code contains REBOL header but also some text before it return 0.5
Check if code contains REBOL header and so it probably not R code
98
15
240,460
def get_sign_key ( exported_session_key , magic_constant ) : sign_key = hashlib . md5 ( exported_session_key + magic_constant ) . digest ( ) return sign_key
3 . 4 . 5 . 2 SIGNKEY
48
9
240,461
def _wait_for_io_events ( socks , events , timeout = None ) : if not HAS_SELECT : raise ValueError ( 'Platform does not have a selector' ) if not isinstance ( socks , list ) : # Probably just a single socket. if hasattr ( socks , "fileno" ) : socks = [ socks ] # Otherwise it might be a non-list iterable. else : socks = list ( socks ) with DefaultSelector ( ) as selector : for sock in socks : selector . register ( sock , events ) return [ key [ 0 ] . fileobj for key in selector . select ( timeout ) if key [ 1 ] & events ]
Waits for IO events to be available from a list of sockets or optionally a single socket if passed in . Returns a list of sockets that can be interacted with immediately .
140
34
240,462
def make_analysator ( f ) : def text_analyse ( text ) : try : rv = f ( text ) except Exception : return 0.0 if not rv : return 0.0 try : return min ( 1.0 , max ( 0.0 , float ( rv ) ) ) except ( ValueError , TypeError ) : return 0.0 text_analyse . __doc__ = f . __doc__ return staticmethod ( text_analyse )
Return a static text analyser function that returns float values .
102
12
240,463
def shebang_matches ( text , regex ) : index = text . find ( '\n' ) if index >= 0 : first_line = text [ : index ] . lower ( ) else : first_line = text . lower ( ) if first_line . startswith ( '#!' ) : try : found = [ x for x in split_path_re . split ( first_line [ 2 : ] . strip ( ) ) if x and not x . startswith ( '-' ) ] [ - 1 ] except IndexError : return False regex = re . compile ( r'^%s(\.(exe|cmd|bat|bin))?$' % regex , re . IGNORECASE ) if regex . search ( found ) is not None : return True return False
r Check if the given regular expression matches the last part of the shebang if one exists .
168
19
240,464
def looks_like_xml ( text ) : if xml_decl_re . match ( text ) : return True key = hash ( text ) try : return _looks_like_xml_cache [ key ] except KeyError : m = doctype_lookup_re . match ( text ) if m is not None : return True rv = tag_re . search ( text [ : 1000 ] ) is not None _looks_like_xml_cache [ key ] = rv return rv
Check if a doctype exists or if we have some tags .
107
13
240,465
def unirange ( a , b ) : if b < a : raise ValueError ( "Bad character range" ) if a < 0x10000 or b < 0x10000 : raise ValueError ( "unirange is only defined for non-BMP ranges" ) if sys . maxunicode > 0xffff : # wide build return u'[%s-%s]' % ( unichr ( a ) , unichr ( b ) ) else : # narrow build stores surrogates, and the 're' module handles them # (incorrectly) as characters. Since there is still ordering among # these characters, expand the range to one that it understands. Some # background in http://bugs.python.org/issue3665 and # http://bugs.python.org/issue12749 # # Additionally, the lower constants are using unichr rather than # literals because jython [which uses the wide path] can't load this # file if they are literals. ah , al = _surrogatepair ( a ) bh , bl = _surrogatepair ( b ) if ah == bh : return u'(?:%s[%s-%s])' % ( unichr ( ah ) , unichr ( al ) , unichr ( bl ) ) else : buf = [ ] buf . append ( u'%s[%s-%s]' % ( unichr ( ah ) , unichr ( al ) , ah == bh and unichr ( bl ) or unichr ( 0xdfff ) ) ) if ah - bh > 1 : buf . append ( u'[%s-%s][%s-%s]' % unichr ( ah + 1 ) , unichr ( bh - 1 ) , unichr ( 0xdc00 ) , unichr ( 0xdfff ) ) if ah != bh : buf . append ( u'%s[%s-%s]' % ( unichr ( bh ) , unichr ( 0xdc00 ) , unichr ( bl ) ) ) return u'(?:' + u'|' . join ( buf ) + u')'
Returns a regular expression string to match the given non - BMP range .
472
15
240,466
def format_lines ( var_name , seq , raw = False , indent_level = 0 ) : lines = [ ] base_indent = ' ' * indent_level * 4 inner_indent = ' ' * ( indent_level + 1 ) * 4 lines . append ( base_indent + var_name + ' = (' ) if raw : # These should be preformatted reprs of, say, tuples. for i in seq : lines . append ( inner_indent + i + ',' ) else : for i in seq : # Force use of single quotes r = repr ( i + '"' ) lines . append ( inner_indent + r [ : - 2 ] + r [ - 1 ] + ',' ) lines . append ( base_indent + ')' ) return '\n' . join ( lines )
Formats a sequence of strings for output .
180
9
240,467
def duplicates_removed ( it , already_seen = ( ) ) : lst = [ ] seen = set ( ) for i in it : if i in seen or i in already_seen : continue lst . append ( i ) seen . add ( i ) return lst
Returns a list with duplicates removed from the iterable it .
60
13
240,468
def _lex_fortran ( self , match , ctx = None ) : lexer = FortranLexer ( ) text = match . group ( 0 ) + "\n" for index , token , value in lexer . get_tokens_unprocessed ( text ) : value = value . replace ( '\n' , '' ) if value != '' : yield index , token , value
Lex a line just as free form fortran without line break .
85
14
240,469
def get_project_info ( configs , heartbeat , data ) : project_name , branch_name = heartbeat . project , heartbeat . branch if heartbeat . type != 'file' : project_name = project_name or heartbeat . args . project or heartbeat . args . alternate_project return project_name , branch_name if project_name is None or branch_name is None : for plugin_cls in CONFIG_PLUGINS : plugin_name = plugin_cls . __name__ . lower ( ) plugin_configs = get_configs_for_plugin ( plugin_name , configs ) project = plugin_cls ( heartbeat . entity , configs = plugin_configs ) if project . process ( ) : project_name = project_name or project . name ( ) branch_name = project . branch ( ) break if project_name is None : project_name = data . get ( 'project' ) or heartbeat . args . project hide_project = heartbeat . should_obfuscate_project ( ) if hide_project and project_name is not None : return project_name , None if project_name is None or branch_name is None : for plugin_cls in REV_CONTROL_PLUGINS : plugin_name = plugin_cls . __name__ . lower ( ) plugin_configs = get_configs_for_plugin ( plugin_name , configs ) project = plugin_cls ( heartbeat . entity , configs = plugin_configs ) if project . process ( ) : project_name = project_name or project . name ( ) branch_name = branch_name or project . branch ( ) if hide_project : branch_name = None project_name = generate_project_name ( ) project_file = os . path . join ( project . folder ( ) , '.wakatime-project' ) try : with open ( project_file , 'w' ) as fh : fh . write ( project_name ) except IOError : project_name = None break if project_name is None and not hide_project : project_name = data . get ( 'alternate_project' ) or heartbeat . args . alternate_project return project_name , branch_name
Find the current project and branch .
481
7
240,470
def save ( self , session ) : if not HAS_SQL : # pragma: nocover return try : conn , c = self . connect ( ) c . execute ( 'DELETE FROM {0}' . format ( self . table_name ) ) values = { 'value' : sqlite3 . Binary ( pickle . dumps ( session , protocol = 2 ) ) , } c . execute ( 'INSERT INTO {0} VALUES (:value)' . format ( self . table_name ) , values ) conn . commit ( ) conn . close ( ) except : # pragma: nocover log . traceback ( logging . DEBUG )
Saves a requests . Session object for the next heartbeat process .
140
13
240,471
def get ( self ) : if not HAS_SQL : # pragma: nocover return requests . session ( ) try : conn , c = self . connect ( ) except : log . traceback ( logging . DEBUG ) return requests . session ( ) session = None try : c . execute ( 'BEGIN IMMEDIATE' ) c . execute ( 'SELECT value FROM {0} LIMIT 1' . format ( self . table_name ) ) row = c . fetchone ( ) if row is not None : session = pickle . loads ( row [ 0 ] ) except : # pragma: nocover log . traceback ( logging . DEBUG ) try : conn . close ( ) except : # pragma: nocover log . traceback ( logging . DEBUG ) return session if session is not None else requests . session ( )
Returns a requests . Session object .
178
7
240,472
def delete ( self ) : if not HAS_SQL : # pragma: nocover return try : conn , c = self . connect ( ) c . execute ( 'DELETE FROM {0}' . format ( self . table_name ) ) conn . commit ( ) conn . close ( ) except : log . traceback ( logging . DEBUG )
Clears all cached Session objects .
75
7
240,473
def rebuild_method ( self , prepared_request , response ) : method = prepared_request . method # http://tools.ietf.org/html/rfc7231#section-6.4.4 if response . status_code == codes . see_other and method != 'HEAD' : method = 'GET' # Do what the browsers do, despite standards... # First, turn 302s into GETs. if response . status_code == codes . found and method != 'HEAD' : method = 'GET' # Second, if a POST is responded to with a 301, turn it into a GET. # This bizarre behaviour is explained in Issue 1704. if response . status_code == codes . moved and method == 'POST' : method = 'GET' prepared_request . method = method
When being redirected we may want to change the method of the request based on certain specs or browser behavior .
170
21
240,474
def apply_filters ( stream , filters , lexer = None ) : def _apply ( filter_ , stream ) : for token in filter_ . filter ( lexer , stream ) : yield token for filter_ in filters : stream = _apply ( filter_ , stream ) return stream
Use this method to apply an iterable of filters to a stream . If lexer is given it s forwarded to the filter otherwise the filter receives None .
60
31
240,475
def reset_indent ( token_class ) : def callback ( lexer , match , context ) : text = match . group ( ) context . indent_stack = [ ] context . indent = - 1 context . next_indent = 0 context . block_scalar_indent = None yield match . start ( ) , token_class , text context . pos = match . end ( ) return callback
Reset the indentation levels .
86
7
240,476
def save_indent ( token_class , start = False ) : def callback ( lexer , match , context ) : text = match . group ( ) extra = '' if start : context . next_indent = len ( text ) if context . next_indent < context . indent : while context . next_indent < context . indent : context . indent = context . indent_stack . pop ( ) if context . next_indent > context . indent : extra = text [ context . indent : ] text = text [ : context . indent ] else : context . next_indent += len ( text ) if text : yield match . start ( ) , token_class , text if extra : yield match . start ( ) + len ( text ) , token_class . Error , extra context . pos = match . end ( ) return callback
Save a possible indentation level .
178
7
240,477
def set_block_scalar_indent ( token_class ) : def callback ( lexer , match , context ) : text = match . group ( ) context . block_scalar_indent = None if not text : return increment = match . group ( 1 ) if increment : current_indent = max ( context . indent , 0 ) increment = int ( increment ) context . block_scalar_indent = current_indent + increment if text : yield match . start ( ) , token_class , text context . pos = match . end ( ) return callback
Set an explicit indentation level for a block scalar .
125
12
240,478
def parse_block_scalar_indent ( token_class ) : def callback ( lexer , match , context ) : text = match . group ( ) if context . block_scalar_indent is None : if len ( text ) <= max ( context . indent , 0 ) : context . stack . pop ( ) context . stack . pop ( ) return context . block_scalar_indent = len ( text ) else : if len ( text ) < context . block_scalar_indent : context . stack . pop ( ) context . stack . pop ( ) return if text : yield match . start ( ) , token_class , text context . pos = match . end ( ) return callback
Process indentation spaces in a block scalar .
154
10
240,479
def content ( self ) : if self . _content is False : # Read the contents. if self . _content_consumed : raise RuntimeError ( 'The content for this response was already consumed' ) if self . status_code == 0 or self . raw is None : self . _content = None else : self . _content = bytes ( ) . join ( self . iter_content ( CONTENT_CHUNK_SIZE ) ) or bytes ( ) self . _content_consumed = True # don't need to release the connection; that's been handled by urllib3 # since we exhausted the data. return self . _content
Content of the response in bytes .
136
7
240,480
def py_scanstring ( s , end , encoding = None , strict = True , _b = BACKSLASH , _m = STRINGCHUNK . match , _join = u ( '' ) . join , _PY3 = PY3 , _maxunicode = sys . maxunicode ) : if encoding is None : encoding = DEFAULT_ENCODING chunks = [ ] _append = chunks . append begin = end - 1 while 1 : chunk = _m ( s , end ) if chunk is None : raise JSONDecodeError ( "Unterminated string starting at" , s , begin ) end = chunk . end ( ) content , terminator = chunk . groups ( ) # Content is contains zero or more unescaped string characters if content : if not _PY3 and not isinstance ( content , text_type ) : content = text_type ( content , encoding ) _append ( content ) # Terminator is the end of string, a literal control character, # or a backslash denoting that an escape sequence follows if terminator == '"' : break elif terminator != '\\' : if strict : msg = "Invalid control character %r at" raise JSONDecodeError ( msg , s , end ) else : _append ( terminator ) continue try : esc = s [ end ] except IndexError : raise JSONDecodeError ( "Unterminated string starting at" , s , begin ) # If not a unicode escape sequence, must be in the lookup table if esc != 'u' : try : char = _b [ esc ] except KeyError : msg = "Invalid \\X escape sequence %r" raise JSONDecodeError ( msg , s , end ) end += 1 else : # Unicode escape sequence msg = "Invalid \\uXXXX escape sequence" esc = s [ end + 1 : end + 5 ] escX = esc [ 1 : 2 ] if len ( esc ) != 4 or escX == 'x' or escX == 'X' : raise JSONDecodeError ( msg , s , end - 1 ) try : uni = int ( esc , 16 ) except ValueError : raise JSONDecodeError ( msg , s , end - 1 ) end += 5 # Check for surrogate pair on UCS-4 systems # Note that this will join high/low surrogate pairs # but will also pass unpaired surrogates through if ( _maxunicode > 65535 and uni & 0xfc00 == 0xd800 and s [ end : end + 2 ] == '\\u' ) : esc2 = s [ end + 2 : end + 6 ] escX = esc2 [ 1 : 2 ] if len ( esc2 ) == 4 and not ( escX == 'x' or escX == 'X' ) : try : uni2 = int ( esc2 , 16 ) except ValueError : raise JSONDecodeError ( msg , s , end ) if uni2 & 0xfc00 == 0xdc00 : uni = 0x10000 + ( ( ( uni - 0xd800 ) << 10 ) | ( uni2 - 0xdc00 ) ) end += 6 char = unichr ( uni ) # Append the unescaped character _append ( char ) return _join ( chunks ) , end
Scan the string s for a JSON string . End is the index of the character in s after the quote that started the JSON string . Unescapes all valid JSON string escape sequences and raises ValueError on attempt to decode an invalid string . If strict is False then literal control characters are allowed in the string .
696
62
240,481
def _get_css_class ( self , ttype ) : ttypeclass = _get_ttype_class ( ttype ) if ttypeclass : return self . classprefix + ttypeclass return ''
Return the css class of this token type prefixed with the classprefix option .
45
17
240,482
def _get_css_classes ( self , ttype ) : cls = self . _get_css_class ( ttype ) while ttype not in STANDARD_TYPES : ttype = ttype . parent cls = self . _get_css_class ( ttype ) + ' ' + cls return cls
Return the css classes of this token type prefixed with the classprefix option .
72
17
240,483
def get_style_defs ( self , arg = None ) : if arg is None : arg = ( 'cssclass' in self . options and '.' + self . cssclass or '' ) if isinstance ( arg , string_types ) : args = [ arg ] else : args = list ( arg ) def prefix ( cls ) : if cls : cls = '.' + cls tmp = [ ] for arg in args : tmp . append ( ( arg and arg + ' ' or '' ) + cls ) return ', ' . join ( tmp ) styles = [ ( level , ttype , cls , style ) for cls , ( style , ttype , level ) in iteritems ( self . class2style ) if cls and style ] styles . sort ( ) lines = [ '%s { %s } /* %s */' % ( prefix ( cls ) , style , repr ( ttype ) [ 6 : ] ) for ( level , ttype , cls , style ) in styles ] if arg and not self . nobackground and self . style . background_color is not None : text_style = '' if Text in self . ttype2class : text_style = ' ' + self . class2style [ self . ttype2class [ Text ] ] [ 0 ] lines . insert ( 0 , '%s { background: %s;%s }' % ( prefix ( '' ) , self . style . background_color , text_style ) ) if self . style . highlight_color is not None : lines . insert ( 0 , '%s.hll { background-color: %s }' % ( prefix ( '' ) , self . style . highlight_color ) ) return '\n' . join ( lines )
Return CSS style definitions for the classes produced by the current highlighting style . arg can be a string or list of selectors to insert before the token type classes .
377
32
240,484
def _format_lines ( self , tokensource ) : nocls = self . noclasses lsep = self . lineseparator # for <span style=""> lookup only getcls = self . ttype2class . get c2s = self . class2style escape_table = _escape_html_table tagsfile = self . tagsfile lspan = '' line = [ ] for ttype , value in tokensource : if nocls : cclass = getcls ( ttype ) while cclass is None : ttype = ttype . parent cclass = getcls ( ttype ) cspan = cclass and '<span style="%s">' % c2s [ cclass ] [ 0 ] or '' else : cls = self . _get_css_classes ( ttype ) cspan = cls and '<span class="%s">' % cls or '' parts = value . translate ( escape_table ) . split ( '\n' ) if tagsfile and ttype in Token . Name : filename , linenumber = self . _lookup_ctag ( value ) if linenumber : base , filename = os . path . split ( filename ) if base : base += '/' filename , extension = os . path . splitext ( filename ) url = self . tagurlformat % { 'path' : base , 'fname' : filename , 'fext' : extension } parts [ 0 ] = "<a href=\"%s#%s-%d\">%s" % ( url , self . lineanchors , linenumber , parts [ 0 ] ) parts [ - 1 ] = parts [ - 1 ] + "</a>" # for all but the last line for part in parts [ : - 1 ] : if line : if lspan != cspan : line . extend ( ( ( lspan and '</span>' ) , cspan , part , ( cspan and '</span>' ) , lsep ) ) else : # both are the same line . extend ( ( part , ( lspan and '</span>' ) , lsep ) ) yield 1 , '' . join ( line ) line = [ ] elif part : yield 1 , '' . join ( ( cspan , part , ( cspan and '</span>' ) , lsep ) ) else : yield 1 , lsep # for the last line if line and parts [ - 1 ] : if lspan != cspan : line . extend ( ( ( lspan and '</span>' ) , cspan , parts [ - 1 ] ) ) lspan = cspan else : line . append ( parts [ - 1 ] ) elif parts [ - 1 ] : line = [ cspan , parts [ - 1 ] ] lspan = cspan # else we neither have to open a new span nor set lspan if line : line . extend ( ( ( lspan and '</span>' ) , lsep ) ) yield 1 , '' . join ( line )
Just format the tokens without any wrapping tags . Yield individual lines .
648
14
240,485
def _highlight_lines ( self , tokensource ) : hls = self . hl_lines for i , ( t , value ) in enumerate ( tokensource ) : if t != 1 : yield t , value if i + 1 in hls : # i + 1 because Python indexes start at 0 if self . noclasses : style = '' if self . style . highlight_color is not None : style = ( ' style="background-color: %s"' % ( self . style . highlight_color , ) ) yield 1 , '<span%s>%s</span>' % ( style , value ) else : yield 1 , '<span class="hll">%s</span>' % value else : yield 1 , value
Highlighted the lines specified in the hl_lines option by post - processing the token stream coming from _format_lines .
160
27
240,486
def format_unencoded ( self , tokensource , outfile ) : source = self . _format_lines ( tokensource ) if self . hl_lines : source = self . _highlight_lines ( source ) if not self . nowrap : if self . linenos == 2 : source = self . _wrap_inlinelinenos ( source ) if self . lineanchors : source = self . _wrap_lineanchors ( source ) if self . linespans : source = self . _wrap_linespans ( source ) source = self . wrap ( source , outfile ) if self . linenos == 1 : source = self . _wrap_tablelinenos ( source ) if self . full : source = self . _wrap_full ( source , outfile ) for t , piece in source : outfile . write ( piece )
The formatting process uses several nested generators ; which of them are used is determined by the user s options .
182
21
240,487
def bygroups ( * args ) : def callback ( lexer , match , ctx = None ) : for i , action in enumerate ( args ) : if action is None : continue elif type ( action ) is _TokenType : data = match . group ( i + 1 ) if data : yield match . start ( i + 1 ) , action , data else : data = match . group ( i + 1 ) if data is not None : if ctx : ctx . pos = match . start ( i + 1 ) for item in action ( lexer , _PseudoMatch ( match . start ( i + 1 ) , data ) , ctx ) : if item : yield item if ctx : ctx . pos = match . end ( ) return callback
Callback that yields multiple actions for each group in the match .
162
12
240,488
def using ( _other , * * kwargs ) : gt_kwargs = { } if 'state' in kwargs : s = kwargs . pop ( 'state' ) if isinstance ( s , ( list , tuple ) ) : gt_kwargs [ 'stack' ] = s else : gt_kwargs [ 'stack' ] = ( 'root' , s ) if _other is this : def callback ( lexer , match , ctx = None ) : # if keyword arguments are given the callback # function has to create a new lexer instance if kwargs : # XXX: cache that somehow kwargs . update ( lexer . options ) lx = lexer . __class__ ( * * kwargs ) else : lx = lexer s = match . start ( ) for i , t , v in lx . get_tokens_unprocessed ( match . group ( ) , * * gt_kwargs ) : yield i + s , t , v if ctx : ctx . pos = match . end ( ) else : def callback ( lexer , match , ctx = None ) : # XXX: cache that somehow kwargs . update ( lexer . options ) lx = _other ( * * kwargs ) s = match . start ( ) for i , t , v in lx . get_tokens_unprocessed ( match . group ( ) , * * gt_kwargs ) : yield i + s , t , v if ctx : ctx . pos = match . end ( ) return callback
Callback that processes the match with a different lexer .
345
11
240,489
def do_insertions ( insertions , tokens ) : insertions = iter ( insertions ) try : index , itokens = next ( insertions ) except StopIteration : # no insertions for item in tokens : yield item return realpos = None insleft = True # iterate over the token stream where we want to insert # the tokens from the insertion list. for i , t , v in tokens : # first iteration. store the postition of first item if realpos is None : realpos = i oldi = 0 while insleft and i + len ( v ) >= index : tmpval = v [ oldi : index - i ] yield realpos , t , tmpval realpos += len ( tmpval ) for it_index , it_token , it_value in itokens : yield realpos , it_token , it_value realpos += len ( it_value ) oldi = index - i try : index , itokens = next ( insertions ) except StopIteration : insleft = False break # not strictly necessary yield realpos , t , v [ oldi : ] realpos += len ( v ) - oldi # leftover tokens while insleft : # no normal tokens, set realpos to zero realpos = realpos or 0 for p , t , v in itokens : yield realpos , t , v realpos += len ( v ) try : index , itokens = next ( insertions ) except StopIteration : insleft = False break
Helper for lexers which must combine the results of several sublexers .
317
15
240,490
def _process_regex ( cls , regex , rflags , state ) : if isinstance ( regex , Future ) : regex = regex . get ( ) return re . compile ( regex , rflags ) . match
Preprocess the regular expression component of a token definition .
46
11
240,491
def _process_token ( cls , token ) : assert type ( token ) is _TokenType or callable ( token ) , 'token type must be simple type or callable, not %r' % ( token , ) return token
Preprocess the token component of a token definition .
50
10
240,492
def _process_new_state ( cls , new_state , unprocessed , processed ) : if isinstance ( new_state , str ) : # an existing state if new_state == '#pop' : return - 1 elif new_state in unprocessed : return ( new_state , ) elif new_state == '#push' : return new_state elif new_state [ : 5 ] == '#pop:' : return - int ( new_state [ 5 : ] ) else : assert False , 'unknown new state %r' % new_state elif isinstance ( new_state , combined ) : # combine a new state from existing ones tmp_state = '_tmp_%d' % cls . _tmpname cls . _tmpname += 1 itokens = [ ] for istate in new_state : assert istate != new_state , 'circular state ref %r' % istate itokens . extend ( cls . _process_state ( unprocessed , processed , istate ) ) processed [ tmp_state ] = itokens return ( tmp_state , ) elif isinstance ( new_state , tuple ) : # push more than one state for istate in new_state : assert ( istate in unprocessed or istate in ( '#pop' , '#push' ) ) , 'unknown new state ' + istate return new_state else : assert False , 'unknown new state def %r' % new_state
Preprocess the state transition action of a token definition .
333
11
240,493
def _process_state ( cls , unprocessed , processed , state ) : assert type ( state ) is str , "wrong state name %r" % state assert state [ 0 ] != '#' , "invalid state name %r" % state if state in processed : return processed [ state ] tokens = processed [ state ] = [ ] rflags = cls . flags for tdef in unprocessed [ state ] : if isinstance ( tdef , include ) : # it's a state reference assert tdef != state , "circular state reference %r" % state tokens . extend ( cls . _process_state ( unprocessed , processed , str ( tdef ) ) ) continue if isinstance ( tdef , _inherit ) : # should be processed already, but may not in the case of: # 1. the state has no counterpart in any parent # 2. the state includes more than one 'inherit' continue if isinstance ( tdef , default ) : new_state = cls . _process_new_state ( tdef . state , unprocessed , processed ) tokens . append ( ( re . compile ( '' ) . match , None , new_state ) ) continue assert type ( tdef ) is tuple , "wrong rule def %r" % tdef try : rex = cls . _process_regex ( tdef [ 0 ] , rflags , state ) except Exception as err : raise ValueError ( "uncompilable regex %r in state %r of %r: %s" % ( tdef [ 0 ] , state , cls , err ) ) token = cls . _process_token ( tdef [ 1 ] ) if len ( tdef ) == 2 : new_state = None else : new_state = cls . _process_new_state ( tdef [ 2 ] , unprocessed , processed ) tokens . append ( ( rex , token , new_state ) ) return tokens
Preprocess a single state definition .
423
7
240,494
def process_tokendef ( cls , name , tokendefs = None ) : processed = cls . _all_tokens [ name ] = { } tokendefs = tokendefs or cls . tokens [ name ] for state in list ( tokendefs ) : cls . _process_state ( tokendefs , processed , state ) return processed
Preprocess a dictionary of token definitions .
90
8
240,495
def get_tokendefs ( cls ) : tokens = { } inheritable = { } for c in cls . __mro__ : toks = c . __dict__ . get ( 'tokens' , { } ) for state , items in iteritems ( toks ) : curitems = tokens . get ( state ) if curitems is None : # N.b. because this is assigned by reference, sufficiently # deep hierarchies are processed incrementally (e.g. for # A(B), B(C), C(RegexLexer), B will be premodified so X(B) # will not see any inherits in B). tokens [ state ] = items try : inherit_ndx = items . index ( inherit ) except ValueError : continue inheritable [ state ] = inherit_ndx continue inherit_ndx = inheritable . pop ( state , None ) if inherit_ndx is None : continue # Replace the "inherit" value with the items curitems [ inherit_ndx : inherit_ndx + 1 ] = items try : # N.b. this is the index in items (that is, the superclass # copy), so offset required when storing below. new_inh_ndx = items . index ( inherit ) except ValueError : pass else : inheritable [ state ] = inherit_ndx + new_inh_ndx return tokens
Merge tokens from superclasses in MRO order returning a single tokendef dictionary .
302
19
240,496
def memorized_timedelta ( seconds ) : try : return _timedelta_cache [ seconds ] except KeyError : delta = timedelta ( seconds = seconds ) _timedelta_cache [ seconds ] = delta return delta
Create only one instance of each distinct timedelta
49
9
240,497
def memorized_datetime ( seconds ) : try : return _datetime_cache [ seconds ] except KeyError : # NB. We can't just do datetime.utcfromtimestamp(seconds) as this # fails with negative values under Windows (Bug #90096) dt = _epoch + timedelta ( seconds = seconds ) _datetime_cache [ seconds ] = dt return dt
Create only one instance of each distinct datetime
87
9
240,498
def memorized_ttinfo ( * args ) : try : return _ttinfo_cache [ args ] except KeyError : ttinfo = ( memorized_timedelta ( args [ 0 ] ) , memorized_timedelta ( args [ 1 ] ) , args [ 2 ] ) _ttinfo_cache [ args ] = ttinfo return ttinfo
Create only one instance of each distinct tuple
78
8
240,499
def unpickler ( zone , utcoffset = None , dstoffset = None , tzname = None ) : # Raises a KeyError if zone no longer exists, which should never happen # and would be a bug. tz = pytz . timezone ( zone ) # A StaticTzInfo - just return it if utcoffset is None : return tz # This pickle was created from a DstTzInfo. We need to # determine which of the list of tzinfo instances for this zone # to use in order to restore the state of any datetime instances using # it correctly. utcoffset = memorized_timedelta ( utcoffset ) dstoffset = memorized_timedelta ( dstoffset ) try : return tz . _tzinfos [ ( utcoffset , dstoffset , tzname ) ] except KeyError : # The particular state requested in this timezone no longer exists. # This indicates a corrupt pickle, or the timezone database has been # corrected violently enough to make this particular # (utcoffset,dstoffset) no longer exist in the zone, or the # abbreviation has been changed. pass # See if we can find an entry differing only by tzname. Abbreviations # get changed from the initial guess by the database maintainers to # match reality when this information is discovered. for localized_tz in tz . _tzinfos . values ( ) : if ( localized_tz . _utcoffset == utcoffset and localized_tz . _dst == dstoffset ) : return localized_tz # This (utcoffset, dstoffset) information has been removed from the # zone. Add it back. This might occur when the database maintainers have # corrected incorrect information. datetime instances using this # incorrect information will continue to do so, exactly as they were # before being pickled. This is purely an overly paranoid safety net - I # doubt this will ever been needed in real life. inf = ( utcoffset , dstoffset , tzname ) tz . _tzinfos [ inf ] = tz . __class__ ( inf , tz . _tzinfos ) return tz . _tzinfos [ inf ]
Factory function for unpickling pytz tzinfo instances .
482
13