idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
37,900
def fix_encoding_and_explain ( text ) : best_version = text best_cost = text_cost ( text ) best_plan = [ ] plan_so_far = [ ] while True : prevtext = text text , plan = fix_one_step_and_explain ( text ) plan_so_far . extend ( plan ) cost = text_cost ( text ) for _ , _ , step_cost in plan_so_far : cost += step_cost if cost < best_cost : best_cost = cost best_version = text best_plan = list ( plan_so_far ) if text == prevtext : return best_version , best_plan
Re - decodes text that has been decoded incorrectly and also return a plan indicating all the steps required to fix it .
37,901
def fix_one_step_and_explain ( text ) : if isinstance ( text , bytes ) : raise UnicodeError ( BYTES_ERROR_TEXT ) if len ( text ) == 0 : return text , [ ] if possible_encoding ( text , 'ascii' ) : return text , [ ] possible_1byte_encodings = [ ] for encoding in CHARMAP_ENCODINGS : if possible_encoding ( text , encoding ) : encoded_bytes = text . encode ( encoding ) encode_step = ( 'encode' , encoding , ENCODING_COSTS . get ( encoding , 0 ) ) transcode_steps = [ ] try : decoding = 'utf-8' if ALTERED_UTF8_RE . search ( encoded_bytes ) : encoded_bytes = restore_byte_a0 ( encoded_bytes ) cost = encoded_bytes . count ( 0xa0 ) * 2 transcode_steps . append ( ( 'transcode' , 'restore_byte_a0' , cost ) ) if encoding . startswith ( 'sloppy' ) and 0x1a in encoded_bytes : encoded_bytes = replace_lossy_sequences ( encoded_bytes ) transcode_steps . append ( ( 'transcode' , 'replace_lossy_sequences' , 0 ) ) if 0xed in encoded_bytes or 0xc0 in encoded_bytes : decoding = 'utf-8-variants' decode_step = ( 'decode' , decoding , 0 ) steps = [ encode_step ] + transcode_steps + [ decode_step ] fixed = encoded_bytes . decode ( decoding ) return fixed , steps except UnicodeDecodeError : possible_1byte_encodings . append ( encoding ) if PARTIAL_UTF8_PUNCT_RE . search ( text ) : steps = [ ( 'transcode' , 'fix_partial_utf8_punct_in_1252' , 1 ) ] fixed = fix_partial_utf8_punct_in_1252 ( text ) return fixed , steps if 'latin-1' in possible_1byte_encodings : if 'windows-1252' in possible_1byte_encodings : return text , [ ] else : encoded = text . encode ( 'latin-1' ) try : fixed = encoded . decode ( 'windows-1252' ) steps = [ ] if fixed != text : steps = [ ( 'encode' , 'latin-1' , 0 ) , ( 'decode' , 'windows-1252' , 1 ) ] return fixed , steps except UnicodeDecodeError : pass return text , [ ]
Performs a single step of re - decoding text that s been decoded incorrectly .
37,902
def apply_plan ( text , plan ) : obj = text for operation , encoding , _ in plan : if operation == 'encode' : obj = obj . encode ( encoding ) elif operation == 'decode' : obj = obj . decode ( encoding ) elif operation == 'transcode' : if encoding in TRANSCODERS : obj = TRANSCODERS [ encoding ] ( obj ) else : raise ValueError ( "Unknown transcode operation: %s" % encoding ) else : raise ValueError ( "Unknown plan step: %s" % operation ) return obj
Apply a plan for fixing the encoding of text .
37,903
def _unescape_fixup ( match ) : text = match . group ( 0 ) if text [ : 2 ] == "&#" : try : if text [ : 3 ] == "&#x" : codept = int ( text [ 3 : - 1 ] , 16 ) else : codept = int ( text [ 2 : - 1 ] ) if 0x80 <= codept < 0xa0 : return bytes ( [ codept ] ) . decode ( 'sloppy-windows-1252' ) else : return chr ( codept ) except ValueError : return text else : try : return entities . html5 [ text [ 1 : ] ] except KeyError : return text
Replace one matched HTML entity with the character it represents if possible .
37,904
def convert_surrogate_pair ( match ) : pair = match . group ( 0 ) codept = 0x10000 + ( ord ( pair [ 0 ] ) - 0xd800 ) * 0x400 + ( ord ( pair [ 1 ] ) - 0xdc00 ) return chr ( codept )
Convert a surrogate pair to the single codepoint it represents .
37,905
def restore_byte_a0 ( byts ) : def replacement ( match ) : "The function to apply when this regex matches." return match . group ( 0 ) . replace ( b'\x20' , b'\xa0' ) return ALTERED_UTF8_RE . sub ( replacement , byts )
Some mojibake has been additionally altered by a process that said hmm byte A0 that s basically a space! and replaced it with an ASCII space . When the A0 is part of a sequence that we intend to decode as UTF - 8 changing byte A0 to 20 would make it fail to decode .
37,906
def fix_partial_utf8_punct_in_1252 ( text ) : def latin1_to_w1252 ( match ) : "The function to apply when this regex matches." return match . group ( 0 ) . encode ( 'latin-1' ) . decode ( 'sloppy-windows-1252' ) def w1252_to_utf8 ( match ) : "The function to apply when this regex matches." return match . group ( 0 ) . encode ( 'sloppy-windows-1252' ) . decode ( 'utf-8' ) text = C1_CONTROL_RE . sub ( latin1_to_w1252 , text ) return PARTIAL_UTF8_PUNCT_RE . sub ( w1252_to_utf8 , text )
Fix particular characters that seem to be found in the wild encoded in UTF - 8 and decoded in Latin - 1 or Windows - 1252 even when this fix can t be consistently applied .
37,907
def display_ljust ( text , width , fillchar = ' ' ) : if character_width ( fillchar ) != 1 : raise ValueError ( "The padding character must have display width 1" ) text_width = monospaced_width ( text ) if text_width == - 1 : return text padding = max ( 0 , width - text_width ) return text + fillchar * padding
Return text left - justified in a Unicode string whose display width in a monospaced terminal should be at least width character cells . The rest of the string will be padded with fillchar which must be a width - 1 character .
37,908
def display_center ( text , width , fillchar = ' ' ) : if character_width ( fillchar ) != 1 : raise ValueError ( "The padding character must have display width 1" ) text_width = monospaced_width ( text ) if text_width == - 1 : return text padding = max ( 0 , width - text_width ) left_padding = padding // 2 right_padding = padding - left_padding return fillchar * left_padding + text + fillchar * right_padding
Return text centered in a Unicode string whose display width in a monospaced terminal should be at least width character cells . The rest of the string will be padded with fillchar which must be a width - 1 character .
37,909
def make_sloppy_codec ( encoding ) : all_bytes = bytes ( range ( 256 ) ) sloppy_chars = list ( all_bytes . decode ( 'latin-1' ) ) if PY26 : decoded_chars = all_bytes . decode ( encoding , 'replace' ) else : decoded_chars = all_bytes . decode ( encoding , errors = 'replace' ) for i , char in enumerate ( decoded_chars ) : if char != REPLACEMENT_CHAR : sloppy_chars [ i ] = char sloppy_chars [ 0x1a ] = REPLACEMENT_CHAR decoding_table = '' . join ( sloppy_chars ) encoding_table = codecs . charmap_build ( decoding_table ) class Codec ( codecs . Codec ) : def encode ( self , input , errors = 'strict' ) : return codecs . charmap_encode ( input , errors , encoding_table ) def decode ( self , input , errors = 'strict' ) : return codecs . charmap_decode ( input , errors , decoding_table ) class IncrementalEncoder ( codecs . IncrementalEncoder ) : def encode ( self , input , final = False ) : return codecs . charmap_encode ( input , self . errors , encoding_table ) [ 0 ] class IncrementalDecoder ( codecs . IncrementalDecoder ) : def decode ( self , input , final = False ) : return codecs . charmap_decode ( input , self . errors , decoding_table ) [ 0 ] class StreamWriter ( Codec , codecs . StreamWriter ) : pass class StreamReader ( Codec , codecs . StreamReader ) : pass return codecs . CodecInfo ( name = 'sloppy-' + encoding , encode = Codec ( ) . encode , decode = Codec ( ) . decode , incrementalencoder = IncrementalEncoder , incrementaldecoder = IncrementalDecoder , streamreader = StreamReader , streamwriter = StreamWriter , )
Take a codec name and return a sloppy version of that codec that can encode and decode the unassigned bytes in that encoding .
37,910
def _make_weirdness_regex ( ) : groups = [ ] groups . append ( '[^CM]M' ) groups . append ( '[Ll][AaC]' ) groups . append ( '[AaC][Ll]' ) groups . append ( '[LA]i' ) groups . append ( 'i[LA]' ) groups . append ( '2' ) groups . append ( 'X' ) groups . append ( 'P' ) groups . append ( '_' ) exclusive_categories = 'MmN13' for cat1 in exclusive_categories : others_range = '' . join ( c for c in exclusive_categories if c != cat1 ) groups . append ( '{cat1}[{others_range}]' . format ( cat1 = cat1 , others_range = others_range ) ) regex = '|' . join ( groups ) return re . compile ( regex )
Creates a list of regexes that match weird character sequences . The more matches there are the weirder the text is .
37,911
def sequence_weirdness ( text ) : text2 = unicodedata . normalize ( 'NFC' , text ) weirdness = len ( WEIRDNESS_RE . findall ( chars_to_classes ( text2 ) ) ) adjustment = ( len ( MOJIBAKE_SYMBOL_RE . findall ( text2 ) ) * 2 - len ( COMMON_SYMBOL_RE . findall ( text2 ) ) ) return weirdness * 2 + adjustment
Determine how often a text has unexpected characters or sequences of characters . This metric is used to disambiguate when text should be re - decoded or left as is .
37,912
def search_function ( encoding ) : if encoding in _CACHE : return _CACHE [ encoding ] norm_encoding = normalize_encoding ( encoding ) codec = None if norm_encoding in UTF8_VAR_NAMES : from ftfy . bad_codecs . utf8_variants import CODEC_INFO codec = CODEC_INFO elif norm_encoding . startswith ( 'sloppy_' ) : from ftfy . bad_codecs . sloppy import CODECS codec = CODECS . get ( norm_encoding ) if codec is not None : _CACHE [ encoding ] = codec return codec
Register our bad codecs with Python s codecs API . This involves adding a search function that takes in an encoding name and returns a codec for that encoding if it knows one or None if it doesn t .
37,913
def _buffer_decode ( self , input , errors , final ) : decoded_segments = [ ] position = 0 while True : decoded , consumed = self . _buffer_decode_step ( input [ position : ] , errors , final ) if consumed == 0 : break decoded_segments . append ( decoded ) position += consumed if final : assert position == len ( input ) return '' . join ( decoded_segments ) , position
Decode bytes that may be arriving in a stream following the Codecs API .
37,914
def _buffer_decode_surrogates ( sup , input , errors , final ) : if len ( input ) < 6 : if final : return sup ( input , errors , final ) else : return '' , 0 else : if CESU8_RE . match ( input ) : codepoint = ( ( ( input [ 1 ] & 0x0f ) << 16 ) + ( ( input [ 2 ] & 0x3f ) << 10 ) + ( ( input [ 4 ] & 0x0f ) << 6 ) + ( input [ 5 ] & 0x3f ) + 0x10000 ) return chr ( codepoint ) , 6 else : return sup ( input [ : 3 ] , errors , False )
When we have improperly encoded surrogates we can still see the bits that they were meant to represent .
37,915
def fix_text ( text , * , fix_entities = 'auto' , remove_terminal_escapes = True , fix_encoding = True , fix_latin_ligatures = True , fix_character_width = True , uncurl_quotes = True , fix_line_breaks = True , fix_surrogates = True , remove_control_chars = True , remove_bom = True , normalization = 'NFC' , max_decode_length = 10 ** 6 ) : r if isinstance ( text , bytes ) : raise UnicodeError ( fixes . BYTES_ERROR_TEXT ) out = [ ] pos = 0 while pos < len ( text ) : textbreak = text . find ( '\n' , pos ) + 1 fix_encoding_this_time = fix_encoding if textbreak == 0 : textbreak = len ( text ) if ( textbreak - pos ) > max_decode_length : fix_encoding_this_time = False substring = text [ pos : textbreak ] if fix_entities == 'auto' and '<' in substring and '>' in substring : fix_entities = False out . append ( fix_text_segment ( substring , fix_entities = fix_entities , remove_terminal_escapes = remove_terminal_escapes , fix_encoding = fix_encoding_this_time , uncurl_quotes = uncurl_quotes , fix_latin_ligatures = fix_latin_ligatures , fix_character_width = fix_character_width , fix_line_breaks = fix_line_breaks , fix_surrogates = fix_surrogates , remove_control_chars = remove_control_chars , remove_bom = remove_bom , normalization = normalization ) ) pos = textbreak return '' . join ( out )
r Given Unicode text as input fix inconsistencies and glitches in it such as mojibake .
37,916
def fix_file ( input_file , encoding = None , * , fix_entities = 'auto' , remove_terminal_escapes = True , fix_encoding = True , fix_latin_ligatures = True , fix_character_width = True , uncurl_quotes = True , fix_line_breaks = True , fix_surrogates = True , remove_control_chars = True , remove_bom = True , normalization = 'NFC' ) : entities = fix_entities for line in input_file : if isinstance ( line , bytes ) : if encoding is None : line , encoding = guess_bytes ( line ) else : line = line . decode ( encoding ) if fix_entities == 'auto' and '<' in line and '>' in line : entities = False yield fix_text_segment ( line , fix_entities = entities , remove_terminal_escapes = remove_terminal_escapes , fix_encoding = fix_encoding , fix_latin_ligatures = fix_latin_ligatures , fix_character_width = fix_character_width , uncurl_quotes = uncurl_quotes , fix_line_breaks = fix_line_breaks , fix_surrogates = fix_surrogates , remove_control_chars = remove_control_chars , remove_bom = remove_bom , normalization = normalization )
Fix text that is found in a file .
37,917
def fix_text_segment ( text , * , fix_entities = 'auto' , remove_terminal_escapes = True , fix_encoding = True , fix_latin_ligatures = True , fix_character_width = True , uncurl_quotes = True , fix_line_breaks = True , fix_surrogates = True , remove_control_chars = True , remove_bom = True , normalization = 'NFC' ) : if isinstance ( text , bytes ) : raise UnicodeError ( fixes . BYTES_ERROR_TEXT ) if fix_entities == 'auto' and '<' in text and '>' in text : fix_entities = False while True : origtext = text if remove_terminal_escapes : text = fixes . remove_terminal_escapes ( text ) if fix_encoding : text = fixes . fix_encoding ( text ) if fix_entities : text = fixes . unescape_html ( text ) if fix_latin_ligatures : text = fixes . fix_latin_ligatures ( text ) if fix_character_width : text = fixes . fix_character_width ( text ) if uncurl_quotes : text = fixes . uncurl_quotes ( text ) if fix_line_breaks : text = fixes . fix_line_breaks ( text ) if fix_surrogates : text = fixes . fix_surrogates ( text ) if remove_control_chars : text = fixes . remove_control_chars ( text ) if remove_bom and not remove_control_chars : text = fixes . remove_bom ( text ) if normalization is not None : text = unicodedata . normalize ( normalization , text ) if text == origtext : return text
Apply fixes to text in a single chunk . This could be a line of text within a larger run of fix_text or it could be a larger amount of text that you are certain is in a consistent encoding .
37,918
def explain_unicode ( text ) : for char in text : if char . isprintable ( ) : display = char else : display = char . encode ( 'unicode-escape' ) . decode ( 'ascii' ) print ( 'U+{code:04X} {display} [{category}] {name}' . format ( display = display_ljust ( display , 7 ) , code = ord ( char ) , category = unicodedata . category ( char ) , name = unicodedata . name ( char , '<unknown>' ) ) )
A utility method that s useful for debugging mysterious Unicode .
37,919
def _build_regexes ( ) : encoding_regexes = { 'ascii' : re . compile ( '^[\x00-\x7f]*$' ) } for encoding in CHARMAP_ENCODINGS : byte_range = bytes ( list ( range ( 0x80 , 0x100 ) ) + [ 0x1a ] ) charlist = byte_range . decode ( encoding ) regex = '^[\x00-\x19\x1b-\x7f{0}]*$' . format ( charlist ) encoding_regexes [ encoding ] = re . compile ( regex ) return encoding_regexes
ENCODING_REGEXES contain reasonably fast ways to detect if we could represent a given string in a given encoding . The simplest one is the ascii detector which of course just determines if all characters are between U + 0000 and U + 007F .
37,920
def _build_width_map ( ) : width_map = { 0x3000 : ' ' } for i in range ( 0xff01 , 0xfff0 ) : char = chr ( i ) alternate = unicodedata . normalize ( 'NFKC' , char ) if alternate != char : width_map [ i ] = alternate return width_map
Build a translate mapping that replaces halfwidth and fullwidth forms with their standard - width forms .
37,921
def set_vml_accuracy_mode ( mode ) : if use_vml : acc_dict = { None : 0 , 'low' : 1 , 'high' : 2 , 'fast' : 3 } acc_reverse_dict = { 1 : 'low' , 2 : 'high' , 3 : 'fast' } if mode not in acc_dict . keys ( ) : raise ValueError ( "mode argument must be one of: None, 'high', 'low', 'fast'" ) retval = _set_vml_accuracy_mode ( acc_dict . get ( mode , 0 ) ) return acc_reverse_dict . get ( retval ) else : return None
Set the accuracy mode for VML operations .
37,922
def _init_num_threads ( ) : if 'sparc' in platform . machine ( ) : log . warning ( 'The number of threads have been set to 1 because problems related ' 'to threading have been reported on some sparc machine. ' 'The number of threads can be changed using the "set_num_threads" ' 'function.' ) set_num_threads ( 1 ) return 1 env_configured = False n_cores = detect_number_of_cores ( ) if 'NUMEXPR_MAX_THREADS' in os . environ : env_configured = True n_cores = MAX_THREADS else : if n_cores > MAX_THREADS : log . info ( 'Note: detected %d virtual cores but NumExpr set to maximum of %d, check "NUMEXPR_MAX_THREADS" environment variable.' % ( n_cores , MAX_THREADS ) ) if n_cores > 8 : log . info ( 'Note: NumExpr detected %d cores but "NUMEXPR_MAX_THREADS" not set, so enforcing safe limit of 8.' % n_cores ) n_cores = 8 if 'NUMEXPR_NUM_THREADS' in os . environ : requested_threads = int ( os . environ [ 'NUMEXPR_NUM_THREADS' ] ) elif 'OMP_NUM_THREADS' in os . environ : requested_threads = int ( os . environ [ 'OMP_NUM_THREADS' ] ) else : requested_threads = n_cores if not env_configured : log . info ( 'NumExpr defaulting to %d threads.' % n_cores ) set_num_threads ( requested_threads ) return requested_threads
Detects the environment variable NUMEXPR_MAX_THREADS to set the threadpool size and if necessary the slightly redundant NUMEXPR_NUM_THREADS or OMP_NUM_THREADS env vars to set the initial number of threads used by the virtual machine .
37,923
def detect_number_of_cores ( ) : if hasattr ( os , "sysconf" ) : if "SC_NPROCESSORS_ONLN" in os . sysconf_names : ncpus = os . sysconf ( "SC_NPROCESSORS_ONLN" ) if isinstance ( ncpus , int ) and ncpus > 0 : return ncpus else : return int ( subprocess . check_output ( [ "sysctl" , "-n" , "hw.ncpu" ] ) ) try : ncpus = int ( os . environ . get ( "NUMBER_OF_PROCESSORS" , "" ) ) if ncpus > 0 : return ncpus except ValueError : pass return 1
Detects the number of cores on a system . Cribbed from pp .
37,924
def chunkify ( chunksize ) : def chunkifier ( func ) : def wrap ( * args ) : assert len ( args ) > 0 assert all ( len ( a . flat ) == len ( args [ 0 ] . flat ) for a in args ) nelements = len ( args [ 0 ] . flat ) nchunks , remain = divmod ( nelements , chunksize ) out = np . ndarray ( args [ 0 ] . shape ) for start in range ( 0 , nelements , chunksize ) : stop = start + chunksize if start + chunksize > nelements : stop = nelements - start iargs = tuple ( a . flat [ start : stop ] for a in args ) out . flat [ start : stop ] = func ( * iargs ) return out return wrap return chunkifier
Very stupid chunk vectorizer which keeps memory use down . This version requires all inputs to have the same number of elements although it shouldn t be that hard to implement simple broadcasting .
37,925
def expressionToAST ( ex ) : return ASTNode ( ex . astType , ex . astKind , ex . value , [ expressionToAST ( c ) for c in ex . children ] )
Take an expression tree made out of expressions . ExpressionNode and convert to an AST tree .
37,926
def sigPerms ( s ) : codes = 'bilfdc' if not s : yield '' elif s [ 0 ] in codes : start = codes . index ( s [ 0 ] ) for x in codes [ start : ] : for y in sigPerms ( s [ 1 : ] ) : yield x + y elif s [ 0 ] == 's' : for y in sigPerms ( s [ 1 : ] ) : yield 's' + y else : yield s
Generate all possible signatures derived by upcasting the given signature .
37,927
def typeCompileAst ( ast ) : children = list ( ast . children ) if ast . astType == 'op' : retsig = ast . typecode ( ) basesig = '' . join ( x . typecode ( ) for x in list ( ast . children ) ) for sig in sigPerms ( basesig ) : value = ( ast . value + '_' + retsig + sig ) . encode ( 'ascii' ) if value in interpreter . opcodes : break else : for sig in sigPerms ( basesig ) : funcname = ( ast . value + '_' + retsig + sig ) . encode ( 'ascii' ) if funcname in interpreter . funccodes : value = ( 'func_%sn' % ( retsig + sig ) ) . encode ( 'ascii' ) children += [ ASTNode ( 'raw' , 'none' , interpreter . funccodes [ funcname ] ) ] break else : raise NotImplementedError ( "couldn't find matching opcode for '%s'" % ( ast . value + '_' + retsig + basesig ) ) for i , ( have , want ) in enumerate ( zip ( basesig , sig ) ) : if have != want : kind = typecode_to_kind [ want ] if children [ i ] . astType == 'constant' : children [ i ] = ASTNode ( 'constant' , kind , children [ i ] . value ) else : opname = "cast" children [ i ] = ASTNode ( 'op' , kind , opname , [ children [ i ] ] ) else : value = ast . value children = ast . children return ASTNode ( ast . astType , ast . astKind , value , [ typeCompileAst ( c ) for c in children ] )
Assign appropiate types to each node in the AST .
37,928
def stringToExpression ( s , types , context ) : old_ctx = expressions . _context . get_current_context ( ) try : expressions . _context . set_new_context ( context ) if context . get ( 'truediv' , False ) : flags = __future__ . division . compiler_flag else : flags = 0 c = compile ( s , '<expr>' , 'eval' , flags ) names = { } for name in c . co_names : if name == "None" : names [ name ] = None elif name == "True" : names [ name ] = True elif name == "False" : names [ name ] = False else : t = types . get ( name , default_type ) names [ name ] = expressions . VariableNode ( name , type_to_kind [ t ] ) names . update ( expressions . functions ) ex = eval ( c , names ) if expressions . isConstant ( ex ) : ex = expressions . ConstantNode ( ex , expressions . getKind ( ex ) ) elif not isinstance ( ex , expressions . ExpressionNode ) : raise TypeError ( "unsupported expression type: %s" % type ( ex ) ) finally : expressions . _context . set_new_context ( old_ctx ) return ex
Given a string convert it to a tree of ExpressionNode s .
37,929
def getInputOrder ( ast , input_order = None ) : variables = { } for a in ast . allOf ( 'variable' ) : variables [ a . value ] = a variable_names = set ( variables . keys ( ) ) if input_order : if variable_names != set ( input_order ) : raise ValueError ( "input names (%s) don't match those found in expression (%s)" % ( input_order , variable_names ) ) ordered_names = input_order else : ordered_names = list ( variable_names ) ordered_names . sort ( ) ordered_variables = [ variables [ v ] for v in ordered_names ] return ordered_variables
Derive the input order of the variables in an expression .
37,930
def assignLeafRegisters ( inodes , registerMaker ) : leafRegisters = { } for node in inodes : key = node . key ( ) if key in leafRegisters : node . reg = leafRegisters [ key ] else : node . reg = leafRegisters [ key ] = registerMaker ( node )
Assign new registers to each of the leaf nodes .
37,931
def assignBranchRegisters ( inodes , registerMaker ) : for node in inodes : node . reg = registerMaker ( node , temporary = True )
Assign temporary registers to each of the branch nodes .
37,932
def collapseDuplicateSubtrees ( ast ) : seen = { } aliases = [ ] for a in ast . allOf ( 'op' ) : if a in seen : target = seen [ a ] a . astType = 'alias' a . value = target a . children = ( ) aliases . append ( a ) else : seen [ a ] = a for a in aliases : while a . value . astType == 'alias' : a . value = a . value . value return aliases
Common subexpression elimination .
37,933
def optimizeTemporariesAllocation ( ast ) : nodes = [ n for n in ast . postorderWalk ( ) if n . reg . temporary ] users_of = dict ( ( n . reg , set ( ) ) for n in nodes ) node_regs = dict ( ( n , set ( c . reg for c in n . children if c . reg . temporary ) ) for n in nodes ) if nodes and nodes [ - 1 ] is not ast : nodes_to_check = nodes + [ ast ] else : nodes_to_check = nodes for n in nodes_to_check : for c in n . children : if c . reg . temporary : users_of [ c . reg ] . add ( n ) unused = dict ( [ ( tc , set ( ) ) for tc in scalar_constant_kinds ] ) for n in nodes : for c in n . children : reg = c . reg if reg . temporary : users = users_of [ reg ] users . discard ( n ) if not users : unused [ reg . node . astKind ] . add ( reg ) if unused [ n . astKind ] : reg = unused [ n . astKind ] . pop ( ) users_of [ reg ] = users_of [ n . reg ] n . reg = reg
Attempt to minimize the number of temporaries needed by reusing old ones .
37,934
def setOrderedRegisterNumbers ( order , start ) : for i , node in enumerate ( order ) : node . reg . n = start + i return start + len ( order )
Given an order of nodes assign register numbers .
37,935
def setRegisterNumbersForTemporaries ( ast , start ) : seen = 0 signature = '' aliases = [ ] for node in ast . postorderWalk ( ) : if node . astType == 'alias' : aliases . append ( node ) node = node . value if node . reg . immediate : node . reg . n = node . value continue reg = node . reg if reg . n is None : reg . n = start + seen seen += 1 signature += reg . node . typecode ( ) for node in aliases : node . reg = node . value . reg return start + seen , signature
Assign register numbers for temporary registers keeping track of aliases and handling immediate operands .
37,936
def convertASTtoThreeAddrForm ( ast ) : return [ ( node . value , node . reg ) + tuple ( [ c . reg for c in node . children ] ) for node in ast . allOf ( 'op' ) ]
Convert an AST to a three address form .
37,937
def compileThreeAddrForm ( program ) : def nToChr ( reg ) : if reg is None : return b'\xff' elif reg . n < 0 : raise ValueError ( "negative value for register number %s" % reg . n ) else : if sys . version_info [ 0 ] < 3 : return chr ( reg . n ) else : return bytes ( [ reg . n ] ) def quadrupleToString ( opcode , store , a1 = None , a2 = None ) : cop = chr ( interpreter . opcodes [ opcode ] ) . encode ( 'ascii' ) cs = nToChr ( store ) ca1 = nToChr ( a1 ) ca2 = nToChr ( a2 ) return cop + cs + ca1 + ca2 def toString ( args ) : while len ( args ) < 4 : args += ( None , ) opcode , store , a1 , a2 = args [ : 4 ] s = quadrupleToString ( opcode , store , a1 , a2 ) l = [ s ] args = args [ 4 : ] while args : s = quadrupleToString ( b'noop' , * args [ : 3 ] ) l . append ( s ) args = args [ 3 : ] return b'' . join ( l ) prog_str = b'' . join ( [ toString ( t ) for t in program ] ) return prog_str
Given a three address form of the program compile it a string that the VM understands .
37,938
def precompile ( ex , signature = ( ) , context = { } ) : types = dict ( signature ) input_order = [ name for ( name , type_ ) in signature ] if isinstance ( ex , ( str , unicode ) ) : ex = stringToExpression ( ex , types , context ) ast = expressionToAST ( ex ) if ex . astType != 'op' : ast = ASTNode ( 'op' , value = 'copy' , astKind = ex . astKind , children = ( ast , ) ) ast = typeCompileAst ( ast ) aliases = collapseDuplicateSubtrees ( ast ) assignLeafRegisters ( ast . allOf ( 'raw' ) , Immediate ) assignLeafRegisters ( ast . allOf ( 'variable' , 'constant' ) , Register ) assignBranchRegisters ( ast . allOf ( 'op' ) , Register ) for a in aliases : a . reg = a . value . reg input_order = getInputOrder ( ast , input_order ) constants_order , constants = getConstants ( ast ) if isReduction ( ast ) : ast . reg . temporary = False optimizeTemporariesAllocation ( ast ) ast . reg . temporary = False r_output = 0 ast . reg . n = 0 r_inputs = r_output + 1 r_constants = setOrderedRegisterNumbers ( input_order , r_inputs ) r_temps = setOrderedRegisterNumbers ( constants_order , r_constants ) r_end , tempsig = setRegisterNumbersForTemporaries ( ast , r_temps ) threeAddrProgram = convertASTtoThreeAddrForm ( ast ) input_names = tuple ( [ a . value for a in input_order ] ) signature = '' . join ( type_to_typecode [ types . get ( x , default_type ) ] for x in input_names ) return threeAddrProgram , signature , tempsig , constants , input_names
Compile the expression to an intermediate form .
37,939
def disassemble ( nex ) : rev_opcodes = { } for op in interpreter . opcodes : rev_opcodes [ interpreter . opcodes [ op ] ] = op r_constants = 1 + len ( nex . signature ) r_temps = r_constants + len ( nex . constants ) def getArg ( pc , offset ) : if sys . version_info [ 0 ] < 3 : arg = ord ( nex . program [ pc + offset ] ) op = rev_opcodes . get ( ord ( nex . program [ pc ] ) ) else : arg = nex . program [ pc + offset ] op = rev_opcodes . get ( nex . program [ pc ] ) try : code = op . split ( b'_' ) [ 1 ] [ offset - 1 ] except IndexError : return None if sys . version_info [ 0 ] > 2 : code = bytes ( [ code ] ) if arg == 255 : return None if code != b'n' : if arg == 0 : return b'r0' elif arg < r_constants : return ( 'r%d[%s]' % ( arg , nex . input_names [ arg - 1 ] ) ) . encode ( 'ascii' ) elif arg < r_temps : return ( 'c%d[%s]' % ( arg , nex . constants [ arg - r_constants ] ) ) . encode ( 'ascii' ) else : return ( 't%d' % ( arg , ) ) . encode ( 'ascii' ) else : return arg source = [ ] for pc in range ( 0 , len ( nex . program ) , 4 ) : if sys . version_info [ 0 ] < 3 : op = rev_opcodes . get ( ord ( nex . program [ pc ] ) ) else : op = rev_opcodes . get ( nex . program [ pc ] ) dest = getArg ( pc , 1 ) arg1 = getArg ( pc , 2 ) arg2 = getArg ( pc , 3 ) source . append ( ( op , dest , arg1 , arg2 ) ) return source
Given a NumExpr object return a list which is the program disassembled .
37,940
def getArguments ( names , local_dict = None , global_dict = None ) : call_frame = sys . _getframe ( 2 ) clear_local_dict = False if local_dict is None : local_dict = call_frame . f_locals clear_local_dict = True try : frame_globals = call_frame . f_globals if global_dict is None : global_dict = frame_globals clear_local_dict = clear_local_dict and not frame_globals is local_dict arguments = [ ] for name in names : try : a = local_dict [ name ] except KeyError : a = global_dict [ name ] arguments . append ( numpy . asarray ( a ) ) finally : if clear_local_dict : local_dict . clear ( ) return arguments
Get the arguments based on the names .
37,941
def evaluate ( ex , local_dict = None , global_dict = None , out = None , order = 'K' , casting = 'safe' , ** kwargs ) : global _numexpr_last if not isinstance ( ex , ( str , unicode ) ) : raise ValueError ( "must specify expression as a string" ) context = getContext ( kwargs , frame_depth = 1 ) expr_key = ( ex , tuple ( sorted ( context . items ( ) ) ) ) if expr_key not in _names_cache : _names_cache [ expr_key ] = getExprNames ( ex , context ) names , ex_uses_vml = _names_cache [ expr_key ] arguments = getArguments ( names , local_dict , global_dict ) signature = [ ( name , getType ( arg ) ) for ( name , arg ) in zip ( names , arguments ) ] numexpr_key = expr_key + ( tuple ( signature ) , ) try : compiled_ex = _numexpr_cache [ numexpr_key ] except KeyError : compiled_ex = _numexpr_cache [ numexpr_key ] = NumExpr ( ex , signature , ** context ) kwargs = { 'out' : out , 'order' : order , 'casting' : casting , 'ex_uses_vml' : ex_uses_vml } _numexpr_last = dict ( ex = compiled_ex , argnames = names , kwargs = kwargs ) with evaluate_lock : return compiled_ex ( * arguments , ** kwargs )
Evaluate a simple array expression element - wise using the new iterator .
37,942
def re_evaluate ( local_dict = None ) : try : compiled_ex = _numexpr_last [ 'ex' ] except KeyError : raise RuntimeError ( "not a previous evaluate() execution found" ) argnames = _numexpr_last [ 'argnames' ] args = getArguments ( argnames , local_dict ) kwargs = _numexpr_last [ 'kwargs' ] with evaluate_lock : return compiled_ex ( * args , ** kwargs )
Re - evaluate the previous executed array expression without any check .
37,943
def compute ( ) : if what == "numpy" : y = eval ( expr ) else : y = ne . evaluate ( expr ) return len ( y )
Compute the polynomial .
37,944
def partial_row_coordinates ( self , X ) : utils . validation . check_is_fitted ( self , 's_' ) if self . check_input : utils . check_array ( X , dtype = [ str , np . number ] ) X = self . _prepare_input ( X ) P = len ( X ) ** 0.5 * self . U_ / self . s_ coords = { } for name , cols in sorted ( self . groups . items ( ) ) : X_partial = X . loc [ : , cols ] if not self . all_nums_ [ name ] : X_partial = self . cat_one_hots_ [ name ] . transform ( X_partial ) Z_partial = X_partial / self . partial_factor_analysis_ [ name ] . s_ [ 0 ] coords [ name ] = len ( self . groups ) * ( Z_partial @ Z_partial . T ) @ P coords = pd . DataFrame ( { ( name , i ) : group_coords . loc [ : , i ] for name , group_coords in coords . items ( ) for i in range ( group_coords . shape [ 1 ] ) } ) return coords
Returns the row coordinates for each group .
37,945
def column_correlations ( self , X ) : utils . validation . check_is_fitted ( self , 's_' ) X_global = self . _build_X_global ( X ) row_pc = self . _row_coordinates_from_global ( X_global ) return pd . DataFrame ( { component : { feature : row_pc [ component ] . corr ( X_global [ feature ] . to_dense ( ) ) for feature in X_global . columns } for component in row_pc . columns } )
Returns the column correlations .
37,946
def eigenvalues_ ( self ) : utils . validation . check_is_fitted ( self , 's_' ) return np . square ( self . s_ ) . tolist ( )
The eigenvalues associated with each principal component .
37,947
def explained_inertia_ ( self ) : utils . validation . check_is_fitted ( self , 'total_inertia_' ) return [ eig / self . total_inertia_ for eig in self . eigenvalues_ ]
The percentage of explained inertia per principal component .
37,948
def row_coordinates ( self , X ) : utils . validation . check_is_fitted ( self , 'V_' ) _ , row_names , _ , _ = util . make_labels_and_names ( X ) if isinstance ( X , pd . SparseDataFrame ) : X = X . to_coo ( ) . astype ( float ) elif isinstance ( X , pd . DataFrame ) : X = X . to_numpy ( ) if self . copy : X = X . copy ( ) if isinstance ( X , np . ndarray ) : X = X / X . sum ( axis = 1 ) [ : , None ] else : X = X / X . sum ( axis = 1 ) return pd . DataFrame ( data = X @ sparse . diags ( self . col_masses_ . to_numpy ( ) ** - 0.5 ) @ self . V_ . T , index = row_names )
The row principal coordinates .
37,949
def column_coordinates ( self , X ) : utils . validation . check_is_fitted ( self , 'V_' ) _ , _ , _ , col_names = util . make_labels_and_names ( X ) if isinstance ( X , pd . SparseDataFrame ) : X = X . to_coo ( ) elif isinstance ( X , pd . DataFrame ) : X = X . to_numpy ( ) if self . copy : X = X . copy ( ) if isinstance ( X , np . ndarray ) : X = X . T / X . T . sum ( axis = 1 ) [ : , None ] else : X = X . T / X . T . sum ( axis = 1 ) return pd . DataFrame ( data = X @ sparse . diags ( self . row_masses_ . to_numpy ( ) ** - 0.5 ) @ self . U_ , index = col_names )
The column principal coordinates .
37,950
def plot_coordinates ( self , X , ax = None , figsize = ( 6 , 6 ) , x_component = 0 , y_component = 1 , show_row_labels = True , show_col_labels = True , ** kwargs ) : utils . validation . check_is_fitted ( self , 's_' ) if ax is None : fig , ax = plt . subplots ( figsize = figsize ) ax = plot . stylize_axis ( ax ) row_label , row_names , col_label , col_names = util . make_labels_and_names ( X ) row_coords = self . row_coordinates ( X ) ax . scatter ( row_coords [ x_component ] , row_coords [ y_component ] , ** kwargs , label = row_label ) col_coords = self . column_coordinates ( X ) ax . scatter ( col_coords [ x_component ] , col_coords [ y_component ] , ** kwargs , label = col_label ) if show_row_labels : x = row_coords [ x_component ] y = row_coords [ y_component ] for i , label in enumerate ( row_names ) : ax . annotate ( label , ( x [ i ] , y [ i ] ) ) if show_col_labels : x = col_coords [ x_component ] y = col_coords [ y_component ] for i , label in enumerate ( col_names ) : ax . annotate ( label , ( x [ i ] , y [ i ] ) ) ax . legend ( ) ax . set_title ( 'Principal coordinates' ) ei = self . explained_inertia_ ax . set_xlabel ( 'Component {} ({:.2f}% inertia)' . format ( x_component , 100 * ei [ x_component ] ) ) ax . set_ylabel ( 'Component {} ({:.2f}% inertia)' . format ( y_component , 100 * ei [ y_component ] ) ) return ax
Plot the principal coordinates .
37,951
def plot_coordinates ( self , X , ax = None , figsize = ( 6 , 6 ) , x_component = 0 , y_component = 1 , show_row_points = True , row_points_size = 10 , show_row_labels = False , show_column_points = True , column_points_size = 30 , show_column_labels = False , legend_n_cols = 1 ) : utils . validation . check_is_fitted ( self , 'total_inertia_' ) if ax is None : fig , ax = plt . subplots ( figsize = figsize ) ax = plot . stylize_axis ( ax ) if show_row_points or show_row_labels : row_coords = self . row_coordinates ( X ) if show_row_points : ax . scatter ( row_coords . iloc [ : , x_component ] , row_coords . iloc [ : , y_component ] , s = row_points_size , label = None , color = plot . GRAY [ 'dark' ] , alpha = 0.6 ) if show_row_labels : for _ , row in row_coords . iterrows ( ) : ax . annotate ( row . name , ( row [ x_component ] , row [ y_component ] ) ) if show_column_points or show_column_labels : col_coords = self . column_coordinates ( X ) x = col_coords [ x_component ] y = col_coords [ y_component ] prefixes = col_coords . index . str . split ( '_' ) . map ( lambda x : x [ 0 ] ) for prefix in prefixes . unique ( ) : mask = prefixes == prefix if show_column_points : ax . scatter ( x [ mask ] , y [ mask ] , s = column_points_size , label = prefix ) if show_column_labels : for i , label in enumerate ( col_coords [ mask ] . index ) : ax . annotate ( label , ( x [ mask ] [ i ] , y [ mask ] [ i ] ) ) ax . legend ( ncol = legend_n_cols ) ax . set_title ( 'Row and column principal coordinates' ) ei = self . explained_inertia_ ax . set_xlabel ( 'Component {} ({:.2f}% inertia)' . format ( x_component , 100 * ei [ x_component ] ) ) ax . set_ylabel ( 'Component {} ({:.2f}% inertia)' . format ( y_component , 100 * ei [ y_component ] ) ) return ax
Plot row and column principal coordinates .
37,952
def compute_svd ( X , n_components , n_iter , random_state , engine ) : if engine == 'auto' : engine = 'sklearn' if engine == 'fbpca' : if FBPCA_INSTALLED : U , s , V = fbpca . pca ( X , k = n_components , n_iter = n_iter ) else : raise ValueError ( 'fbpca is not installed; please install it if you want to use it' ) elif engine == 'sklearn' : U , s , V = extmath . randomized_svd ( X , n_components = n_components , n_iter = n_iter , random_state = random_state ) else : raise ValueError ( "engine has to be one of ('auto', 'fbpca', 'sklearn')" ) U , V = extmath . svd_flip ( U , V ) return U , s , V
Computes an SVD with k components .
37,953
def row_standard_coordinates ( self , X ) : utils . validation . check_is_fitted ( self , 's_' ) return self . row_coordinates ( X ) . div ( self . eigenvalues_ , axis = 'columns' )
Returns the row standard coordinates .
37,954
def row_cosine_similarities ( self , X ) : utils . validation . check_is_fitted ( self , 's_' ) squared_coordinates = np . square ( self . row_coordinates ( X ) ) total_squares = squared_coordinates . sum ( axis = 'columns' ) return squared_coordinates . div ( total_squares , axis = 'rows' )
Returns the cosine similarities between the rows and their principal components .
37,955
def column_correlations ( self , X ) : utils . validation . check_is_fitted ( self , 's_' ) if isinstance ( X , np . ndarray ) : X = pd . DataFrame ( X ) row_pc = self . row_coordinates ( X ) return pd . DataFrame ( { component : { feature : row_pc [ component ] . corr ( X [ feature ] ) for feature in X . columns } for component in row_pc . columns } )
Returns the column correlations with each principal component .
37,956
def build_ellipse ( X , Y ) : x_mean = np . mean ( X ) y_mean = np . mean ( Y ) cov_matrix = np . cov ( np . vstack ( ( X , Y ) ) ) U , s , V = linalg . svd ( cov_matrix , full_matrices = False ) chi_95 = np . sqrt ( 4.61 ) width = np . sqrt ( cov_matrix [ 0 ] [ 0 ] ) * chi_95 * 2 height = np . sqrt ( cov_matrix [ 1 ] [ 1 ] ) * chi_95 * 2 eigenvector = V . T [ 0 ] angle = np . arctan ( eigenvector [ 1 ] / eigenvector [ 0 ] ) return x_mean , y_mean , width , height , angle
Construct ellipse coordinates from two arrays of numbers .
37,957
def set_start_time ( self , start_time ) : start_time = start_time or dt . time ( ) if isinstance ( start_time , dt . time ) : self . start_time = dt . time ( start_time . hour , start_time . minute ) else : self . start_time = dt . time ( start_time . time ( ) . hour , start_time . time ( ) . minute )
set the start time . when start time is set drop down list will start from start time and duration will be displayed in brackets
37,958
def extract_time ( match ) : hour = int ( match . group ( 'hour' ) ) minute = int ( match . group ( 'minute' ) ) return dt . time ( hour , minute )
extract time from a time_re match .
37,959
def default_logger ( name ) : logger = logging . getLogger ( name ) logger_handler = logging . StreamHandler ( ) formatter = logging . Formatter ( '%(name)s - %(levelname)s - %(message)s' ) logger_handler . setFormatter ( formatter ) logger . addHandler ( logger_handler ) return logger
Return a toplevel logger .
37,960
def serialized ( self , prepend_date = True ) : name = self . serialized_name ( ) datetime = self . serialized_time ( prepend_date ) return "%s %s" % ( datetime , name )
Return a string fully representing the fact .
37,961
def _with_rotation ( self , w , h ) : res_w = abs ( w * math . cos ( self . rotation ) + h * math . sin ( self . rotation ) ) res_h = abs ( h * math . cos ( self . rotation ) + w * math . sin ( self . rotation ) ) return res_w , res_h
calculate the actual dimensions after rotation
37,962
def queue_resize ( self ) : self . _children_resize_queued = True parent = getattr ( self , "parent" , None ) if parent and isinstance ( parent , graphics . Sprite ) and hasattr ( parent , "queue_resize" ) : parent . queue_resize ( )
request the element to re - check it s child sprite sizes
37,963
def get_min_size ( self ) : if self . visible == False : return 0 , 0 else : return ( ( self . min_width or 0 ) + self . horizontal_padding + self . margin_left + self . margin_right , ( self . min_height or 0 ) + self . vertical_padding + self . margin_top + self . margin_bottom )
returns size required by the widget
37,964
def insert ( self , index = 0 , * widgets ) : for widget in widgets : self . _add ( widget , index ) index += 1 self . _sort ( )
insert widget in the sprites list at the given index . by default will prepend .
37,965
def insert_before ( self , target ) : if not target . parent : return target . parent . insert ( target . parent . sprites . index ( target ) , self )
insert this widget into the targets parent before the target
37,966
def insert_after ( self , target ) : if not target . parent : return target . parent . insert ( target . parent . sprites . index ( target ) + 1 , self )
insert this widget into the targets parent container after the target
37,967
def width ( self ) : alloc_w = self . alloc_w if self . parent and isinstance ( self . parent , graphics . Scene ) : alloc_w = self . parent . width def res ( scene , event ) : if self . parent : self . queue_resize ( ) else : scene . disconnect ( self . _scene_resize_handler ) self . _scene_resize_handler = None if not self . _scene_resize_handler : self . _scene_resize_handler = self . parent . connect ( "on-resize" , res ) min_width = ( self . min_width or 0 ) + self . margin_left + self . margin_right w = alloc_w if alloc_w is not None and self . fill else min_width w = max ( w or 0 , self . get_min_size ( ) [ 0 ] ) return w - self . margin_left - self . margin_right
width in pixels
37,968
def height ( self ) : alloc_h = self . alloc_h if self . parent and isinstance ( self . parent , graphics . Scene ) : alloc_h = self . parent . height min_height = ( self . min_height or 0 ) + self . margin_top + self . margin_bottom h = alloc_h if alloc_h is not None and self . fill else min_height h = max ( h or 0 , self . get_min_size ( ) [ 1 ] ) return h - self . margin_top - self . margin_bottom
height in pixels
37,969
def enabled ( self ) : enabled = self . _enabled if not enabled : return False if self . parent and isinstance ( self . parent , Widget ) : if self . parent . enabled == False : return False return True
whether the user is allowed to interact with the widget . Item is enabled only if all it s parent elements are
37,970
def resize_children ( self ) : width = self . width - self . horizontal_padding height = self . height - self . vertical_padding for sprite , props in ( get_props ( sprite ) for sprite in self . sprites if sprite . visible ) : sprite . alloc_w = width sprite . alloc_h = height w , h = getattr ( sprite , "width" , 0 ) , getattr ( sprite , "height" , 0 ) if hasattr ( sprite , "get_height_for_width_size" ) : w2 , h2 = sprite . get_height_for_width_size ( ) w , h = max ( w , w2 ) , max ( h , h2 ) w = w * sprite . scale_x + props [ "margin_left" ] + props [ "margin_right" ] h = h * sprite . scale_y + props [ "margin_top" ] + props [ "margin_bottom" ] sprite . x = self . padding_left + props [ "margin_left" ] + ( max ( sprite . alloc_w * sprite . scale_x , w ) - w ) * getattr ( sprite , "x_align" , 0 ) sprite . y = self . padding_top + props [ "margin_top" ] + ( max ( sprite . alloc_h * sprite . scale_y , h ) - h ) * getattr ( sprite , "y_align" , 0 ) self . __dict__ [ '_children_resize_queued' ] = False
default container alignment is to pile stuff just up respecting only padding margin and element s alignment properties
37,971
def check_hamster ( self ) : try : todays_facts = self . storage . _Storage__get_todays_facts ( ) self . check_user ( todays_facts ) except Exception as e : logger . error ( "Error while refreshing: %s" % e ) finally : return True
refresh hamster every x secs - load today check last activity etc .
37,972
def check_user ( self , todays_facts ) : interval = self . conf_notify_interval if interval <= 0 or interval >= 121 : return now = dt . datetime . now ( ) message = None last_activity = todays_facts [ - 1 ] if todays_facts else None if last_activity and not last_activity [ 'end_time' ] : delta = now - last_activity [ 'start_time' ] duration = delta . seconds / 60 if duration and duration % interval == 0 : message = _ ( "Working on %s" ) % last_activity [ 'name' ] self . notify_user ( message ) elif self . conf_notify_on_idle : if ( now . minute + now . hour * 60 ) % interval == 0 : self . notify_user ( _ ( "No activity" ) )
check if we need to notify user perhaps
37,973
def stop_tracking ( self , end_time ) : facts = self . __get_todays_facts ( ) if facts and not facts [ - 1 ] [ 'end_time' ] : self . __touch_fact ( facts [ - 1 ] , end_time ) self . facts_changed ( )
Stops tracking the current activity
37,974
def remove_fact ( self , fact_id ) : self . start_transaction ( ) fact = self . __get_fact ( fact_id ) if fact : self . __remove_fact ( fact_id ) self . facts_changed ( ) self . end_transaction ( )
Remove fact from storage by it s ID
37,975
def load_ui_file ( name ) : ui = gtk . Builder ( ) ui . add_from_file ( os . path . join ( runtime . data_dir , name ) ) return ui
loads interface from the glade file ; sorts out the path business
37,976
def _fix_key ( self , key ) : if not key . startswith ( self . GCONF_DIR ) : return self . GCONF_DIR + key else : return key
Appends the GCONF_PREFIX to the key if needed
37,977
def _key_changed ( self , client , cnxn_id , entry , data = None ) : key = self . _fix_key ( entry . key ) [ len ( self . GCONF_DIR ) : ] value = self . _get_value ( entry . value , self . DEFAULTS [ key ] ) self . emit ( 'conf-changed' , key , value )
Callback when a gconf key changes
37,978
def _get_value ( self , value , default ) : vtype = type ( default ) if vtype is bool : return value . get_bool ( ) elif vtype is str : return value . get_string ( ) elif vtype is int : return value . get_int ( ) elif vtype in ( list , tuple ) : l = [ ] for i in value . get_list ( ) : l . append ( i . get_string ( ) ) return l return None
calls appropriate gconf function by the default value
37,979
def get ( self , key , default = None ) : if default is None : default = self . DEFAULTS . get ( key , None ) vtype = type ( default ) if default is None : logger . warn ( "Unknown key: %s, must specify default value" % key ) return None if vtype not in self . VALID_KEY_TYPES : logger . warn ( "Invalid key type: %s" % vtype ) return None key = self . _fix_key ( key ) if key not in self . _notifications : self . _client . notify_add ( key , self . _key_changed , None ) self . _notifications . append ( key ) value = self . _client . get ( key ) if value is None : self . set ( key , default ) return default value = self . _get_value ( value , default ) if value is not None : return value logger . warn ( "Unknown gconf key: %s" % key ) return None
Returns the value of the key or the default value if the key is not yet in gconf
37,980
def set ( self , key , value ) : logger . debug ( "Settings %s -> %s" % ( key , value ) ) if key in self . DEFAULTS : vtype = type ( self . DEFAULTS [ key ] ) else : vtype = type ( value ) if vtype not in self . VALID_KEY_TYPES : logger . warn ( "Invalid key type: %s" % vtype ) return False key = self . _fix_key ( key ) if vtype is bool : self . _client . set_bool ( key , value ) elif vtype is str : self . _client . set_string ( key , value ) elif vtype is int : self . _client . set_int ( key , value ) elif vtype in ( list , tuple ) : strvalues = [ str ( i ) for i in value ] return True
Sets the key value in gconf and connects adds a signal which is fired if the key changes
37,981
def day_start ( self ) : day_start_minutes = self . get ( "day_start_minutes" ) hours , minutes = divmod ( day_start_minutes , 60 ) return dt . time ( hours , minutes )
Start of the hamster day .
37,982
def localized_fact ( self ) : fact = Fact ( self . activity . get_text ( ) ) if fact . start_time : fact . date = self . date else : fact . start_time = dt . datetime . now ( ) return fact
Make sure fact has the correct start_time .
37,983
def set_row_positions ( self ) : self . row_positions = [ i * self . row_height for i in range ( len ( self . rows ) ) ] self . set_size_request ( 0 , self . row_positions [ - 1 ] + self . row_height if self . row_positions else 0 )
creates a list of row positions for simpler manipulation
37,984
def from_dbus_fact ( fact ) : return Fact ( fact [ 4 ] , start_time = dt . datetime . utcfromtimestamp ( fact [ 1 ] ) , end_time = dt . datetime . utcfromtimestamp ( fact [ 2 ] ) if fact [ 2 ] else None , description = fact [ 3 ] , activity_id = fact [ 5 ] , category = fact [ 6 ] , tags = fact [ 7 ] , date = dt . datetime . utcfromtimestamp ( fact [ 8 ] ) . date ( ) , id = fact [ 0 ] )
unpack the struct into a proper dict
37,985
def get_tags ( self , only_autocomplete = False ) : return self . _to_dict ( ( 'id' , 'name' , 'autocomplete' ) , self . conn . GetTags ( only_autocomplete ) )
returns list of all tags . by default only those that have been set for autocomplete
37,986
def stop_tracking ( self , end_time = None ) : end_time = timegm ( ( end_time or dt . datetime . now ( ) ) . timetuple ( ) ) return self . conn . StopTracking ( end_time )
Stop tracking current activity . end_time can be passed in if the activity should have other end time than the current moment
37,987
def get_category_activities ( self , category_id = None ) : category_id = category_id or - 1 return self . _to_dict ( ( 'id' , 'name' , 'category_id' , 'category' ) , self . conn . GetCategoryActivities ( category_id ) )
Return activities for category . If category is not specified will return activities that have no category
37,988
def get_activity_by_name ( self , activity , category_id = None , resurrect = True ) : category_id = category_id or 0 return self . conn . GetActivityByName ( activity , category_id , resurrect )
returns activity dict by name and optionally filtering by category . if activity is found but is marked as deleted it will be resurrected unless told otherwise in the resurrect param
37,989
def bus_inspector ( self , bus , message ) : if message . get_interface ( ) != self . screensaver_uri : return True member = message . get_member ( ) if member in ( "SessionIdleChanged" , "ActiveChanged" ) : logger . debug ( "%s -> %s" % ( member , message . get_args_list ( ) ) ) idle_state = message . get_args_list ( ) [ 0 ] if idle_state : self . idle_from = dt . datetime . now ( ) if member == "SessionIdleChanged" : delay_key = "/apps/gnome-screensaver/idle_delay" else : delay_key = "/desktop/gnome/session/idle_delay" client = gconf . Client . get_default ( ) self . timeout_minutes = client . get_int ( delay_key ) else : self . screen_locked = False self . idle_from = None if member == "ActiveChanged" : def dispatch_active_changed ( idle_state ) : if not self . idle_was_there : self . emit ( 'idle-changed' , idle_state ) self . idle_was_there = False gobject . timeout_add_seconds ( 1 , dispatch_active_changed , idle_state ) else : self . idle_was_there = True self . emit ( 'idle-changed' , idle_state ) elif member == "Lock" : logger . debug ( "Screen Lock Requested" ) self . screen_locked = True return
Inspect the bus for screensaver messages of interest
37,990
def C_ ( ctx , s ) : translated = gettext . gettext ( '%s\x04%s' % ( ctx , s ) ) if '\x04' in translated : return s return translated
Provide qualified translatable strings via context . Taken from gnome - games .
37,991
def clean ( bld ) : try : proj = Environment . Environment ( Options . lockfile ) except IOError : raise Utils . WafError ( 'Nothing to clean (project not configured)' ) bld . load_dirs ( proj [ SRCDIR ] , proj [ BLDDIR ] ) bld . load_envs ( ) bld . is_install = 0 bld . add_subdirs ( [ os . path . split ( Utils . g_module . root_path ) [ 0 ] ] ) try : bld . clean ( ) finally : bld . save ( )
removes the build files
37,992
def install ( bld ) : bld = check_configured ( bld ) Options . commands [ 'install' ] = True Options . commands [ 'uninstall' ] = False Options . is_install = True bld . is_install = INSTALL build_impl ( bld ) bld . install ( )
installs the build files
37,993
def uninstall ( bld ) : Options . commands [ 'install' ] = False Options . commands [ 'uninstall' ] = True Options . is_install = True bld . is_install = UNINSTALL try : def runnable_status ( self ) : return SKIP_ME setattr ( Task . Task , 'runnable_status_back' , Task . Task . runnable_status ) setattr ( Task . Task , 'runnable_status' , runnable_status ) build_impl ( bld ) bld . install ( ) finally : setattr ( Task . Task , 'runnable_status' , Task . Task . runnable_status_back )
removes the installed files
37,994
def distclean ( ctx = None ) : global commands lst = os . listdir ( '.' ) for f in lst : if f == Options . lockfile : try : proj = Environment . Environment ( f ) except : Logs . warn ( 'could not read %r' % f ) continue try : shutil . rmtree ( proj [ BLDDIR ] ) except IOError : pass except OSError , e : if e . errno != errno . ENOENT : Logs . warn ( 'project %r cannot be removed' % proj [ BLDDIR ] ) try : os . remove ( f ) except OSError , e : if e . errno != errno . ENOENT : Logs . warn ( 'file %r cannot be removed' % f ) if not commands and f . startswith ( '.waf' ) : shutil . rmtree ( f , ignore_errors = True )
removes the build directory
37,995
def dist ( appname = '' , version = '' ) : import tarfile if not appname : appname = Utils . g_module . APPNAME if not version : version = Utils . g_module . VERSION tmp_folder = appname + '-' + version if g_gz in [ 'gz' , 'bz2' ] : arch_name = tmp_folder + '.tar.' + g_gz else : arch_name = tmp_folder + '.' + 'zip' try : shutil . rmtree ( tmp_folder ) except ( OSError , IOError ) : pass try : os . remove ( arch_name ) except ( OSError , IOError ) : pass blddir = getattr ( Utils . g_module , BLDDIR , None ) if not blddir : blddir = getattr ( Utils . g_module , 'out' , None ) copytree ( '.' , tmp_folder , blddir ) dist_hook = getattr ( Utils . g_module , 'dist_hook' , None ) if dist_hook : back = os . getcwd ( ) os . chdir ( tmp_folder ) try : dist_hook ( ) finally : os . chdir ( back ) if g_gz in [ 'gz' , 'bz2' ] : tar = tarfile . open ( arch_name , 'w:' + g_gz ) tar . add ( tmp_folder ) tar . close ( ) else : Utils . zip_folder ( tmp_folder , arch_name , tmp_folder ) try : from hashlib import sha1 as sha except ImportError : from sha import sha try : digest = " (sha=%r)" % sha ( Utils . readf ( arch_name ) ) . hexdigest ( ) except : digest = '' info ( 'New archive created: %s%s' % ( arch_name , digest ) ) if os . path . exists ( tmp_folder ) : shutil . rmtree ( tmp_folder ) return arch_name
makes a tarball for redistributing the sources
37,996
def show ( self , start_date , end_date ) : vars = { "title" : _ ( "Time track" ) , "start" : start_date . strftime ( "%x" ) . replace ( "/" , "." ) , "end" : end_date . strftime ( "%x" ) . replace ( "/" , "." ) } if start_date != end_date : filename = "%(title)s, %(start)s - %(end)s.html" % vars else : filename = "%(title)s, %(start)s.html" % vars self . dialog . set_current_name ( filename ) response = self . dialog . run ( ) if response != gtk . ResponseType . OK : self . emit ( "report-chooser-closed" ) self . dialog . destroy ( ) self . dialog = None else : self . on_save_button_clicked ( )
setting suggested name to something readable replace backslashes with dots so the name is valid in linux
37,997
def kill_tweens ( self , obj = None ) : if obj is not None : try : del self . current_tweens [ obj ] except : pass else : self . current_tweens = collections . defaultdict ( set )
Stop tweening an object without completing the motion or firing the on_complete
37,998
def remove_tween ( self , tween ) : if tween . target in self . current_tweens and tween in self . current_tweens [ tween . target ] : self . current_tweens [ tween . target ] . remove ( tween ) if not self . current_tweens [ tween . target ] : del self . current_tweens [ tween . target ]
remove given tween without completing the motion or firing the on_complete
37,999
def finish ( self ) : for obj in self . current_tweens : for tween in self . current_tweens [ obj ] : tween . finish ( ) self . current_tweens = { }
jump the the last frame of all tweens