idx int64 0 251k | question stringlengths 53 3.53k | target stringlengths 5 1.23k | len_question int64 20 893 | len_target int64 3 238 |
|---|---|---|---|---|
230,100 | def finalize_options ( self ) : assert bool ( self . fa_version ) , 'FA version is mandatory for this command.' if self . zip_path : assert os . path . exists ( self . zip_path ) , ( 'Local zipfile does not exist: %s' % self . zip_path ) | Validate the command options . | 68 | 6 |
230,101 | def __print ( self , msg ) : self . announce ( msg , level = distutils . log . INFO ) | Shortcut for printing with the distutils logger . | 24 | 10 |
230,102 | def __zip_file ( self ) : if self . zip_path : # If using a local file, just open it: self . __print ( 'Opening local zipfile: %s' % self . zip_path ) return open ( self . zip_path , 'rb' ) # Otherwise, download it and make a file object in-memory: url = self . __release_url self . __print ( 'Downloading from URL: %s' % url ) response = urlopen ( url ) return io . BytesIO ( response . read ( ) ) | Get a file object of the FA zip file . | 120 | 10 |
230,103 | def __zipped_files_data ( self ) : files = { } with zipfile . ZipFile ( self . __zip_file ) as thezip : for zipinfo in thezip . infolist ( ) : if zipinfo . filename . endswith ( 'metadata/icons.json' ) : with thezip . open ( zipinfo ) as compressed_file : files [ 'icons.json' ] = compressed_file . read ( ) elif zipinfo . filename . endswith ( '.ttf' ) : # For the record, the paths usually look like this: # webfonts/fa-brands-400.ttf # webfonts/fa-regular-400.ttf # webfonts/fa-solid-900.ttf name = os . path . basename ( zipinfo . filename ) tokens = name . split ( '-' ) style = tokens [ 1 ] if style in self . FA_STYLES : with thezip . open ( zipinfo ) as compressed_file : files [ style ] = compressed_file . read ( ) # Safety checks: assert all ( style in files for style in self . FA_STYLES ) , 'Not all FA styles found! Update code is broken.' assert 'icons.json' in files , 'icons.json not found! Update code is broken.' return files | Get a dict of all files of interest from the FA release zipfile . | 288 | 15 |
230,104 | def from_string ( proto_str ) : _ , proto_file = tempfile . mkstemp ( suffix = '.proto' ) with open ( proto_file , 'w+' ) as proto_f : proto_f . write ( proto_str ) return from_file ( proto_file ) | Produce a Protobuf module from a string description . Return the module if successfully compiled otherwise raise a BadProtobuf exception . | 66 | 27 |
230,105 | def _load_module ( path ) : module_name = os . path . splitext ( os . path . basename ( path ) ) [ 0 ] module = None if sys . version_info . minor < 5 : loader = importlib . machinery . SourceFileLoader ( module_name , path ) module = loader . load_module ( ) else : spec = importlib . util . spec_from_file_location ( module_name , path ) module = importlib . util . module_from_spec ( spec ) spec . loader . exec_module ( module ) return module | Helper to load a Python file at path and return as a module | 124 | 13 |
230,106 | def _compile_proto ( full_path , dest ) : proto_path = os . path . dirname ( full_path ) protoc_args = [ find_protoc ( ) , '--python_out={}' . format ( dest ) , '--proto_path={}' . format ( proto_path ) , full_path ] proc = subprocess . Popen ( protoc_args , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) try : outs , errs = proc . communicate ( timeout = 5 ) except subprocess . TimeoutExpired : proc . kill ( ) outs , errs = proc . communicate ( ) return False if proc . returncode != 0 : msg = 'Failed compiling "{}": \n\nstderr: {}\nstdout: {}' . format ( full_path , errs . decode ( 'utf-8' ) , outs . decode ( 'utf-8' ) ) raise BadProtobuf ( msg ) return True | Helper to compile protobuf files | 226 | 7 |
230,107 | def from_file ( proto_file ) : if not proto_file . endswith ( '.proto' ) : raise BadProtobuf ( ) dest = tempfile . mkdtemp ( ) full_path = os . path . abspath ( proto_file ) _compile_proto ( full_path , dest ) filename = os . path . split ( full_path ) [ - 1 ] name = re . search ( r'^(.*)\.proto$' , filename ) . group ( 1 ) target = os . path . join ( dest , name + '_pb2.py' ) return _load_module ( target ) | Take a filename |protoc_file| compile it via the Protobuf compiler and import the module . Return the module if successfully compiled otherwise raise either a ProtocNotFound or BadProtobuf exception . | 140 | 43 |
230,108 | def types_from_module ( pb_module ) : types = pb_module . DESCRIPTOR . message_types_by_name return [ getattr ( pb_module , name ) for name in types ] | Return protobuf class types from an imported generated module . | 48 | 12 |
230,109 | def _resolve_child ( self , path ) : obj = self for component in path . split ( '.' ) : ptr = obj if not isinstance ( ptr , Permuter ) : raise self . MessageNotFound ( "Bad element path [wrong type]" ) # pylint: disable=protected-access found_gen = ( _ for _ in ptr . _generators if _ . name ( ) == component ) obj = next ( found_gen , None ) if not obj : raise self . MessageNotFound ( "Path '{}' unresolved to member." . format ( path ) ) return ptr , obj | Return a member generator by a dot - delimited path | 131 | 11 |
230,110 | def make_dependent ( self , source , target , action ) : if not self . _generators : return src_permuter , src = self . _resolve_child ( source ) dest = self . _resolve_child ( target ) [ 1 ] # pylint: disable=protected-access container = src_permuter . _generators idx = container . index ( src ) container [ idx ] = DependentValueGenerator ( src . name ( ) , dest , action ) self . _update_independent_generators ( ) | Create a dependency between path source and path target via the callable action . | 117 | 15 |
230,111 | def get ( self ) : # If you attempt to use a generator comprehension below, it will # consume the StopIteration exception and just return an empty tuple, # instead of stopping iteration normally return tuple ( [ ( x . name ( ) , x . get ( ) ) for x in self . _generators ] ) | Retrieve the most recent value generated | 66 | 7 |
230,112 | def _fuzzdb_integers ( limit = 0 ) : path = os . path . join ( BASE_PATH , 'integer-overflow/integer-overflows.txt' ) stream = _open_fuzzdb_file ( path ) for line in _limit_helper ( stream , limit ) : yield int ( line . decode ( 'utf-8' ) , 0 ) | Helper to grab some integers from fuzzdb | 83 | 8 |
230,113 | def _fuzzdb_get_strings ( max_len = 0 ) : ignored = [ 'integer-overflow' ] for subdir in pkg_resources . resource_listdir ( 'protofuzz' , BASE_PATH ) : if subdir in ignored : continue path = '{}/{}' . format ( BASE_PATH , subdir ) listing = pkg_resources . resource_listdir ( 'protofuzz' , path ) for filename in listing : if not filename . endswith ( '.txt' ) : continue path = '{}/{}/{}' . format ( BASE_PATH , subdir , filename ) source = _open_fuzzdb_file ( path ) for line in source : string = line . decode ( 'utf-8' ) . strip ( ) if not string or string . startswith ( '#' ) : continue if max_len != 0 and len ( line ) > max_len : continue yield string | Helper to get all the strings from fuzzdb | 212 | 9 |
230,114 | def get_integers ( bitwidth , unsigned , limit = 0 ) : if unsigned : start , stop = 0 , ( ( 1 << bitwidth ) - 1 ) else : start , stop = ( - ( 1 << bitwidth - 1 ) ) , ( 1 << ( bitwidth - 1 ) - 1 ) for num in _fuzzdb_integers ( limit ) : if num >= start and num <= stop : yield num | Get integers from fuzzdb database | 90 | 6 |
230,115 | def get_floats ( bitwidth , limit = 0 ) : assert bitwidth in ( 32 , 64 , 80 ) values = [ 0.0 , - 1.0 , 1.0 , - 1231231231231.0123 , 123123123123123.123 ] for val in _limit_helper ( values , limit ) : yield val | Return a number of interesting floating point values | 75 | 8 |
230,116 | def _int_generator ( descriptor , bitwidth , unsigned ) : vals = list ( values . get_integers ( bitwidth , unsigned ) ) return gen . IterValueGenerator ( descriptor . name , vals ) | Helper to create a basic integer value generator | 48 | 8 |
230,117 | def _string_generator ( descriptor , max_length = 0 , limit = 0 ) : vals = list ( values . get_strings ( max_length , limit ) ) return gen . IterValueGenerator ( descriptor . name , vals ) | Helper to create a string generator | 53 | 6 |
230,118 | def _float_generator ( descriptor , bitwidth ) : return gen . IterValueGenerator ( descriptor . name , values . get_floats ( bitwidth ) ) | Helper to create floating point values | 36 | 6 |
230,119 | def _enum_generator ( descriptor ) : vals = descriptor . enum_type . values_by_number . keys ( ) return gen . IterValueGenerator ( descriptor . name , vals ) | Helper to create protobuf enums | 43 | 8 |
230,120 | def _prototype_to_generator ( descriptor , cls ) : _fd = D . FieldDescriptor generator = None ints32 = [ _fd . TYPE_INT32 , _fd . TYPE_UINT32 , _fd . TYPE_FIXED32 , _fd . TYPE_SFIXED32 , _fd . TYPE_SINT32 ] ints64 = [ _fd . TYPE_INT64 , _fd . TYPE_UINT64 , _fd . TYPE_FIXED64 , _fd . TYPE_SFIXED64 , _fd . TYPE_SINT64 ] ints_signed = [ _fd . TYPE_INT32 , _fd . TYPE_SFIXED32 , _fd . TYPE_SINT32 , _fd . TYPE_INT64 , _fd . TYPE_SFIXED64 , _fd . TYPE_SINT64 ] if descriptor . type in ints32 + ints64 : bitwidth = [ 32 , 64 ] [ descriptor . type in ints64 ] unsigned = descriptor . type not in ints_signed generator = _int_generator ( descriptor , bitwidth , unsigned ) elif descriptor . type == _fd . TYPE_DOUBLE : generator = _float_generator ( descriptor , 64 ) elif descriptor . type == _fd . TYPE_FLOAT : generator = _float_generator ( descriptor , 32 ) elif descriptor . type == _fd . TYPE_STRING : generator = _string_generator ( descriptor ) elif descriptor . type == _fd . TYPE_BYTES : generator = _bytes_generator ( descriptor ) elif descriptor . type == _fd . TYPE_BOOL : generator = gen . IterValueGenerator ( descriptor . name , [ True , False ] ) elif descriptor . type == _fd . TYPE_ENUM : generator = _enum_generator ( descriptor ) elif descriptor . type == _fd . TYPE_MESSAGE : generator = descriptor_to_generator ( descriptor . message_type , cls ) generator . set_name ( descriptor . name ) else : raise RuntimeError ( "type {} unsupported" . format ( descriptor . type ) ) return generator | Helper to map a descriptor to a protofuzz generator | 468 | 11 |
230,121 | def descriptor_to_generator ( cls_descriptor , cls , limit = 0 ) : generators = [ ] for descriptor in cls_descriptor . fields_by_name . values ( ) : generator = _prototype_to_generator ( descriptor , cls ) if limit != 0 : generator . set_limit ( limit ) generators . append ( generator ) obj = cls ( cls_descriptor . name , * generators ) return obj | Convert a protobuf descriptor to a protofuzz generator for same type | 100 | 16 |
230,122 | def _assign_to_field ( obj , name , val ) : target = getattr ( obj , name ) if isinstance ( target , containers . RepeatedScalarFieldContainer ) : target . append ( val ) elif isinstance ( target , containers . RepeatedCompositeFieldContainer ) : target = target . add ( ) target . CopyFrom ( val ) elif isinstance ( target , ( int , float , bool , str , bytes ) ) : setattr ( obj , name , val ) elif isinstance ( target , message . Message ) : target . CopyFrom ( val ) else : raise RuntimeError ( "Unsupported type: {}" . format ( type ( target ) ) ) | Helper to assign an arbitrary value to a protobuf field | 150 | 12 |
230,123 | def _fields_to_object ( descriptor , fields ) : # pylint: disable=protected-access obj = descriptor . _concrete_class ( ) for name , value in fields : if isinstance ( value , tuple ) : subtype = descriptor . fields_by_name [ name ] . message_type value = _fields_to_object ( subtype , value ) _assign_to_field ( obj , name , value ) return obj | Helper to convert a descriptor and a set of fields to a Protobuf instance | 97 | 16 |
230,124 | def _module_to_generators ( pb_module ) : if not pb_module : return None message_types = pb_module . DESCRIPTOR . message_types_by_name return { k : ProtobufGenerator ( v ) for k , v in message_types . items ( ) } | Convert a protobuf module to a dict of generators . | 69 | 13 |
230,125 | def add_dependency ( self , source , target , action ) : self . _dependencies . append ( ( source , target , action ) ) | Create a dependency between fields source and target via callable action . | 31 | 13 |
230,126 | def print_report ( self ) : report = compare_report_print ( self . sorted , self . scores , self . best_name ) print ( report ) | Print Compare report . | 34 | 4 |
230,127 | def F_calc ( TP , FP , FN , beta ) : try : result = ( ( 1 + ( beta ) ** 2 ) * TP ) / ( ( 1 + ( beta ) ** 2 ) * TP + FP + ( beta ** 2 ) * FN ) return result except ZeroDivisionError : return "None" | Calculate F - score . | 68 | 7 |
230,128 | def G_calc ( item1 , item2 ) : try : result = math . sqrt ( item1 * item2 ) return result except Exception : return "None" | Calculate G - measure & G - mean . | 37 | 11 |
230,129 | def RACC_calc ( TOP , P , POP ) : try : result = ( TOP * P ) / ( ( POP ) ** 2 ) return result except Exception : return "None" | Calculate random accuracy . | 40 | 6 |
230,130 | def CEN_misclassification_calc ( table , TOP , P , i , j , subject_class , modified = False ) : try : result = TOP + P if modified : result -= table [ subject_class ] [ subject_class ] result = table [ i ] [ j ] / result return result except Exception : return "None" | Calculate misclassification probability of classifying . | 73 | 11 |
230,131 | def html_init ( name ) : result = "" result += "<html>\n" result += "<head>\n" result += "<title>" + str ( name ) + "</title>\n" result += "</head>\n" result += "<body>\n" result += '<h1 style="border-bottom:1px solid ' 'black;text-align:center;">PyCM Report</h1>' return result | Return HTML report file first lines . | 98 | 7 |
230,132 | def html_dataset_type ( is_binary , is_imbalanced ) : result = "<h2>Dataset Type : </h2>\n" balance_type = "Balanced" class_type = "Binary Classification" if is_imbalanced : balance_type = "Imbalanced" if not is_binary : class_type = "Multi-Class Classification" result += "<ul>\n\n<li>{0}</li>\n\n<li>{1}</li>\n</ul>\n" . format ( class_type , balance_type ) result += "<p>{0}</p>\n" . format ( RECOMMEND_HTML_MESSAGE ) result += "<p>{0}</p>\n" . format ( RECOMMEND_HTML_MESSAGE2 ) return result | Return HTML report file dataset type . | 197 | 7 |
230,133 | def color_check ( color ) : if isinstance ( color , ( tuple , list ) ) : if all ( map ( lambda x : isinstance ( x , int ) , color ) ) : if all ( map ( lambda x : x < 256 , color ) ) : return list ( color ) if isinstance ( color , str ) : color_lower = color . lower ( ) if color_lower in TABLE_COLOR . keys ( ) : return TABLE_COLOR [ color_lower ] return [ 0 , 0 , 0 ] | Check input color format . | 110 | 5 |
230,134 | def html_table_color ( row , item , color = ( 0 , 0 , 0 ) ) : result = [ 0 , 0 , 0 ] color_list = color_check ( color ) max_color = max ( color_list ) back_color_index = 255 - int ( ( item / ( sum ( list ( row . values ( ) ) ) + 1 ) ) * 255 ) for i in range ( 3 ) : result [ i ] = back_color_index - ( max_color - color_list [ i ] ) if result [ i ] < 0 : result [ i ] = 0 return result | Return background color of each cell of table . | 130 | 9 |
230,135 | def html_table ( classes , table , rgb_color , normalize = False ) : result = "" result += "<h2>Confusion Matrix " if normalize : result += "(Normalized)" result += ": </h2>\n" result += '<table>\n' result += '<tr align="center">' + "\n" result += '<td>Actual</td>\n' result += '<td>Predict\n' table_size = str ( ( len ( classes ) + 1 ) * 7 ) + "em" result += '<table style="border:1px solid black;border-collapse: collapse;height:{0};width:{0};">\n' . format ( table_size ) classes . sort ( ) result += '<tr align="center">\n<td></td>\n' part_2 = "" for i in classes : class_name = str ( i ) if len ( class_name ) > 6 : class_name = class_name [ : 4 ] + "..." result += '<td style="border:1px solid ' 'black;padding:10px;height:7em;width:7em;">' + class_name + '</td>\n' part_2 += '<tr align="center">\n' part_2 += '<td style="border:1px solid ' 'black;padding:10px;height:7em;width:7em;">' + class_name + '</td>\n' for j in classes : item = table [ i ] [ j ] color = "black;" back_color = html_table_color ( table [ i ] , item , rgb_color ) if min ( back_color ) < 128 : color = "white" part_2 += '<td style="background-color: rgb({0},{1},{2});color:{3};padding:10px;height:7em;width:7em;">' . format ( str ( back_color [ 0 ] ) , str ( back_color [ 1 ] ) , str ( back_color [ 2 ] ) , color ) + str ( item ) + '</td>\n' part_2 += "</tr>\n" result += '</tr>\n' part_2 += "</table>\n</td>\n</tr>\n</table>\n" result += part_2 return result | Return HTML report file confusion matrix . | 534 | 7 |
230,136 | def html_overall_stat ( overall_stat , digit = 5 , overall_param = None , recommended_list = ( ) ) : result = "" result += "<h2>Overall Statistics : </h2>\n" result += '<table style="border:1px solid black;border-collapse: collapse;">\n' overall_stat_keys = sorted ( overall_stat . keys ( ) ) if isinstance ( overall_param , list ) : if set ( overall_param ) <= set ( overall_stat_keys ) : overall_stat_keys = sorted ( overall_param ) if len ( overall_stat_keys ) < 1 : return "" for i in overall_stat_keys : background_color = DEFAULT_BACKGROUND_COLOR if i in recommended_list : background_color = RECOMMEND_BACKGROUND_COLOR result += '<tr align="center">\n' result += '<td style="border:1px solid black;padding:4px;text-align:left;background-color:{};"><a href="' . format ( background_color ) + DOCUMENT_ADR + PARAMS_LINK [ i ] + '" style="text-decoration:None;">' + str ( i ) + '</a></td>\n' if i in BENCHMARK_LIST : background_color = BENCHMARK_COLOR [ overall_stat [ i ] ] result += '<td style="border:1px solid black;padding:4px;background-color:{};">' . format ( background_color ) else : result += '<td style="border:1px solid black;padding:4px;">' result += rounder ( overall_stat [ i ] , digit ) + '</td>\n' result += "</tr>\n" result += "</table>\n" return result | Return HTML report file overall stat . | 409 | 7 |
230,137 | def html_class_stat ( classes , class_stat , digit = 5 , class_param = None , recommended_list = ( ) ) : result = "" result += "<h2>Class Statistics : </h2>\n" result += '<table style="border:1px solid black;border-collapse: collapse;">\n' result += '<tr align="center">\n<td>Class</td>\n' for i in classes : result += '<td style="border:1px solid black;padding:4px;border-collapse: collapse;">' + str ( i ) + '</td>\n' result += '<td>Description</td>\n' result += '</tr>\n' class_stat_keys = sorted ( class_stat . keys ( ) ) if isinstance ( class_param , list ) : if set ( class_param ) <= set ( class_stat_keys ) : class_stat_keys = class_param classes . sort ( ) if len ( classes ) < 1 or len ( class_stat_keys ) < 1 : return "" for i in class_stat_keys : background_color = DEFAULT_BACKGROUND_COLOR if i in recommended_list : background_color = RECOMMEND_BACKGROUND_COLOR result += '<tr align="center" style="border:1px solid black;border-collapse: collapse;">\n' result += '<td style="border:1px solid black;padding:4px;border-collapse: collapse;background-color:{};"><a href="' . format ( background_color ) + DOCUMENT_ADR + PARAMS_LINK [ i ] + '" style="text-decoration:None;">' + str ( i ) + '</a></td>\n' for j in classes : if i in BENCHMARK_LIST : background_color = BENCHMARK_COLOR [ class_stat [ i ] [ j ] ] result += '<td style="border:1px solid black;padding:4px;border-collapse: collapse;background-color:{};">' . format ( background_color ) else : result += '<td style="border:1px solid black;padding:4px;border-collapse: collapse;">' result += rounder ( class_stat [ i ] [ j ] , digit ) + '</td>\n' params_text = PARAMS_DESCRIPTION [ i ] if i not in CAPITALIZE_FILTER : params_text = params_text . capitalize ( ) result += '<td style="border:1px solid black;padding:4px;border-collapse: collapse;text-align:left;">' + params_text + '</td>\n' result += "</tr>\n" result += "</table>\n" return result | Return HTML report file class_stat . | 631 | 8 |
230,138 | def table_print ( classes , table ) : classes_len = len ( classes ) table_list = [ ] for key in classes : table_list . extend ( list ( table [ key ] . values ( ) ) ) table_list . extend ( classes ) table_max_length = max ( map ( len , map ( str , table_list ) ) ) shift = "%-" + str ( 7 + table_max_length ) + "s" result = shift % "Predict" + shift * classes_len % tuple ( map ( str , classes ) ) + "\n" result = result + "Actual\n" classes . sort ( ) for key in classes : row = [ table [ key ] [ i ] for i in classes ] result += shift % str ( key ) + shift * classes_len % tuple ( map ( str , row ) ) + "\n\n" if classes_len >= CLASS_NUMBER_THRESHOLD : result += "\n" + "Warning : " + CLASS_NUMBER_WARNING + "\n" return result | Return printable confusion matrix . | 227 | 6 |
230,139 | def csv_matrix_print ( classes , table ) : result = "" classes . sort ( ) for i in classes : for j in classes : result += str ( table [ i ] [ j ] ) + "," result = result [ : - 1 ] + "\n" return result [ : - 1 ] | Return matrix as csv data . | 66 | 7 |
230,140 | def csv_print ( classes , class_stat , digit = 5 , class_param = None ) : result = "Class" classes . sort ( ) for item in classes : result += ',"' + str ( item ) + '"' result += "\n" class_stat_keys = sorted ( class_stat . keys ( ) ) if isinstance ( class_param , list ) : if set ( class_param ) <= set ( class_stat_keys ) : class_stat_keys = class_param if len ( class_stat_keys ) < 1 or len ( classes ) < 1 : return "" for key in class_stat_keys : row = [ rounder ( class_stat [ key ] [ i ] , digit ) for i in classes ] result += key + "," + "," . join ( row ) result += "\n" return result | Return csv file data . | 183 | 6 |
230,141 | def stat_print ( classes , class_stat , overall_stat , digit = 5 , overall_param = None , class_param = None ) : shift = max ( map ( len , PARAMS_DESCRIPTION . values ( ) ) ) + 5 classes_len = len ( classes ) overall_stat_keys = sorted ( overall_stat . keys ( ) ) result = "" if isinstance ( overall_param , list ) : if set ( overall_param ) <= set ( overall_stat_keys ) : overall_stat_keys = sorted ( overall_param ) if len ( overall_stat_keys ) > 0 : result = "Overall Statistics : " + "\n\n" for key in overall_stat_keys : result += key + " " * ( shift - len ( key ) + 7 ) + rounder ( overall_stat [ key ] , digit ) + "\n" class_stat_keys = sorted ( class_stat . keys ( ) ) if isinstance ( class_param , list ) : if set ( class_param ) <= set ( class_stat_keys ) : class_stat_keys = sorted ( class_param ) classes . sort ( ) if len ( class_stat_keys ) > 0 and len ( classes ) > 0 : class_shift = max ( max ( map ( lambda x : len ( str ( x ) ) , classes ) ) + 5 , digit + 6 , 14 ) class_shift_format = "%-" + str ( class_shift ) + "s" result += "\nClass Statistics :\n\n" result += "Classes" + shift * " " + class_shift_format * classes_len % tuple ( map ( str , classes ) ) + "\n" rounder_map = partial ( rounder , digit = digit ) for key in class_stat_keys : row = [ class_stat [ key ] [ i ] for i in classes ] params_text = PARAMS_DESCRIPTION [ key ] if key not in CAPITALIZE_FILTER : params_text = params_text . capitalize ( ) result += key + "(" + params_text + ")" + " " * ( shift - len ( key ) - len ( PARAMS_DESCRIPTION [ key ] ) + 5 ) + class_shift_format * classes_len % tuple ( map ( rounder_map , row ) ) + "\n" if classes_len >= CLASS_NUMBER_THRESHOLD : result += "\n" + "Warning : " + CLASS_NUMBER_WARNING + "\n" return result | Return printable statistics table . | 545 | 6 |
230,142 | def compare_report_print ( sorted_list , scores , best_name ) : title_items = [ "Rank" , "Name" , "Class-Score" , "Overall-Score" ] class_scores_len = map ( lambda x : len ( str ( x [ "class" ] ) ) , list ( scores . values ( ) ) ) shifts = [ "%-" + str ( len ( sorted_list ) + 4 ) + "s" , "%-" + str ( max ( map ( lambda x : len ( str ( x ) ) , sorted_list ) ) + 4 ) + "s" , "%-" + str ( max ( class_scores_len ) + 11 ) + "s" ] result = "" result += "Best : " + str ( best_name ) + "\n\n" result += ( "" . join ( shifts ) ) % tuple ( title_items [ : - 1 ] ) + title_items [ - 1 ] + "\n" prev_rank = 0 for index , cm in enumerate ( sorted_list ) : rank = index if scores [ sorted_list [ rank ] ] == scores [ sorted_list [ prev_rank ] ] : rank = prev_rank result += ( "" . join ( shifts ) ) % ( str ( rank + 1 ) , str ( cm ) , str ( scores [ cm ] [ "class" ] ) ) + str ( scores [ cm ] [ "overall" ] ) + "\n" prev_rank = rank if best_name is None : result += "\nWarning: " + COMPARE_RESULT_WARNING return result | Return compare report . | 343 | 4 |
230,143 | def online_help ( param = None ) : try : PARAMS_LINK_KEYS = sorted ( PARAMS_LINK . keys ( ) ) if param in PARAMS_LINK_KEYS : webbrowser . open_new_tab ( DOCUMENT_ADR + PARAMS_LINK [ param ] ) elif param in range ( 1 , len ( PARAMS_LINK_KEYS ) + 1 ) : webbrowser . open_new_tab ( DOCUMENT_ADR + PARAMS_LINK [ PARAMS_LINK_KEYS [ param - 1 ] ] ) else : print ( "Please choose one parameter : \n" ) print ( 'Example : online_help("J") or online_help(2)\n' ) for index , item in enumerate ( PARAMS_LINK_KEYS ) : print ( str ( index + 1 ) + "-" + item ) except Exception : print ( "Error in online help" ) | Open online document in web browser . | 209 | 7 |
230,144 | def rounder ( input_number , digit = 5 ) : if isinstance ( input_number , tuple ) : tuple_list = list ( input_number ) tuple_str = [ ] for i in tuple_list : if isfloat ( i ) : tuple_str . append ( str ( numpy . around ( i , digit ) ) ) else : tuple_str . append ( str ( i ) ) return "(" + "," . join ( tuple_str ) + ")" if isfloat ( input_number ) : return str ( numpy . around ( input_number , digit ) ) return str ( input_number ) | Round input number and convert to str . | 133 | 8 |
230,145 | def class_filter ( classes , class_name ) : result_classes = classes if isinstance ( class_name , list ) : if set ( class_name ) <= set ( classes ) : result_classes = class_name return result_classes | Filter classes by comparing two lists . | 52 | 7 |
230,146 | def vector_check ( vector ) : for i in vector : if isinstance ( i , int ) is False : return False if i < 0 : return False return True | Check input vector items type . | 35 | 6 |
230,147 | def matrix_check ( table ) : try : if len ( table . keys ( ) ) == 0 : return False for i in table . keys ( ) : if table . keys ( ) != table [ i ] . keys ( ) or vector_check ( list ( table [ i ] . values ( ) ) ) is False : return False return True except Exception : return False | Check input matrix format . | 77 | 5 |
230,148 | def vector_filter ( actual_vector , predict_vector ) : temp = [ ] temp . extend ( actual_vector ) temp . extend ( predict_vector ) types = set ( map ( type , temp ) ) if len ( types ) > 1 : return [ list ( map ( str , actual_vector ) ) , list ( map ( str , predict_vector ) ) ] return [ actual_vector , predict_vector ] | Convert different type of items in vectors to str . | 89 | 11 |
230,149 | def class_check ( vector ) : for i in vector : if not isinstance ( i , type ( vector [ 0 ] ) ) : return False return True | Check different items in matrix classes . | 33 | 7 |
230,150 | def one_vs_all_func ( classes , table , TP , TN , FP , FN , class_name ) : try : report_classes = [ str ( class_name ) , "~" ] report_table = { str ( class_name ) : { str ( class_name ) : TP [ class_name ] , "~" : FN [ class_name ] } , "~" : { str ( class_name ) : FP [ class_name ] , "~" : TN [ class_name ] } } return [ report_classes , report_table ] except Exception : return [ classes , table ] | One - Vs - All mode handler . | 133 | 8 |
230,151 | def normalized_table_calc ( classes , table ) : map_dict = { k : 0 for k in classes } new_table = { k : map_dict . copy ( ) for k in classes } for key in classes : div = sum ( table [ key ] . values ( ) ) if div == 0 : div = 1 for item in classes : new_table [ key ] [ item ] = numpy . around ( table [ key ] [ item ] / div , 5 ) return new_table | Return normalized confusion matrix . | 107 | 5 |
230,152 | def transpose_func ( classes , table ) : transposed_table = table for i , item1 in enumerate ( classes ) : for j , item2 in enumerate ( classes ) : if i > j : temp = transposed_table [ item1 ] [ item2 ] transposed_table [ item1 ] [ item2 ] = transposed_table [ item2 ] [ item1 ] transposed_table [ item2 ] [ item1 ] = temp return transposed_table | Transpose table . | 104 | 4 |
230,153 | def matrix_params_from_table ( table , transpose = False ) : classes = sorted ( table . keys ( ) ) map_dict = { k : 0 for k in classes } TP_dict = map_dict . copy ( ) TN_dict = map_dict . copy ( ) FP_dict = map_dict . copy ( ) FN_dict = map_dict . copy ( ) for i in classes : TP_dict [ i ] = table [ i ] [ i ] sum_row = sum ( list ( table [ i ] . values ( ) ) ) for j in classes : if j != i : FN_dict [ i ] += table [ i ] [ j ] FP_dict [ j ] += table [ i ] [ j ] TN_dict [ j ] += sum_row - table [ i ] [ j ] if transpose : temp = FN_dict FN_dict = FP_dict FP_dict = temp table = transpose_func ( classes , table ) return [ classes , table , TP_dict , TN_dict , FP_dict , FN_dict ] | Calculate TP TN FP FN from confusion matrix . | 231 | 11 |
230,154 | def matrix_params_calc ( actual_vector , predict_vector , sample_weight ) : if isinstance ( actual_vector , numpy . ndarray ) : actual_vector = actual_vector . tolist ( ) if isinstance ( predict_vector , numpy . ndarray ) : predict_vector = predict_vector . tolist ( ) classes = set ( actual_vector ) . union ( set ( predict_vector ) ) classes = sorted ( classes ) map_dict = { k : 0 for k in classes } table = { k : map_dict . copy ( ) for k in classes } weight_vector = [ 1 ] * len ( actual_vector ) if isinstance ( sample_weight , ( list , numpy . ndarray ) ) : if len ( sample_weight ) == len ( actual_vector ) : weight_vector = sample_weight for index , item in enumerate ( actual_vector ) : table [ item ] [ predict_vector [ index ] ] += 1 * weight_vector [ index ] [ classes , table , TP_dict , TN_dict , FP_dict , FN_dict ] = matrix_params_from_table ( table ) return [ classes , table , TP_dict , TN_dict , FP_dict , FN_dict ] | Calculate TP TN FP FN for each class . | 275 | 11 |
230,155 | def imbalance_check ( P ) : p_list = list ( P . values ( ) ) max_value = max ( p_list ) min_value = min ( p_list ) if min_value > 0 : balance_ratio = max_value / min_value else : balance_ratio = max_value is_imbalanced = False if balance_ratio > BALANCE_RATIO_THRESHOLD : is_imbalanced = True return is_imbalanced | Check if the dataset is imbalanced . | 104 | 8 |
230,156 | def binary_check ( classes ) : num_classes = len ( classes ) is_binary = False if num_classes == 2 : is_binary = True return is_binary | Check if the problem is a binary classification . | 37 | 9 |
230,157 | def statistic_recommend ( classes , P ) : if imbalance_check ( P ) : return IMBALANCED_RECOMMEND if binary_check ( classes ) : return BINARY_RECOMMEND return MULTICLASS_RECOMMEND | Return recommend parameters which are more suitable due to the input dataset characteristics . | 58 | 14 |
230,158 | def print_result ( failed = False ) : message = "Version tag tests " if not failed : print ( "\n" + message + "passed!" ) else : print ( "\n" + message + "failed!" ) print ( "Passed : " + str ( TEST_NUMBER - Failed ) + "/" + str ( TEST_NUMBER ) ) | Print final result . | 77 | 4 |
230,159 | def AUNP_calc ( classes , P , POP , AUC_dict ) : try : result = 0 for i in classes : result += ( P [ i ] / POP [ i ] ) * AUC_dict [ i ] return result except Exception : return "None" | Calculate AUNP . | 60 | 7 |
230,160 | def overall_MCC_calc ( classes , table , TOP , P ) : try : cov_x_y = 0 cov_x_x = 0 cov_y_y = 0 matrix_sum = sum ( list ( TOP . values ( ) ) ) for i in classes : cov_x_x += TOP [ i ] * ( matrix_sum - TOP [ i ] ) cov_y_y += P [ i ] * ( matrix_sum - P [ i ] ) cov_x_y += ( table [ i ] [ i ] * matrix_sum - P [ i ] * TOP [ i ] ) return cov_x_y / ( math . sqrt ( cov_y_y * cov_x_x ) ) except Exception : return "None" | Calculate Overall_MCC . | 165 | 8 |
230,161 | def convex_combination ( classes , TP , TOP , P , class_name , modified = False ) : try : class_number = len ( classes ) alpha = 1 if class_number == 2 : alpha = 0 matrix_sum = sum ( list ( TOP . values ( ) ) ) TP_sum = sum ( list ( TP . values ( ) ) ) up = TOP [ class_name ] + P [ class_name ] down = 2 * matrix_sum if modified : down -= ( alpha * TP_sum ) up -= TP [ class_name ] return up / down except Exception : return "None" | Calculate Overall_CEN coefficient . | 130 | 9 |
230,162 | def ncr ( n , r ) : r = min ( r , n - r ) numer = reduce ( op . mul , range ( n , n - r , - 1 ) , 1 ) denom = reduce ( op . mul , range ( 1 , r + 1 ) , 1 ) return numer // denom | Calculate n choose r . | 66 | 7 |
230,163 | def p_value_calc ( TP , POP , NIR ) : try : n = POP x = sum ( list ( TP . values ( ) ) ) p = NIR result = 0 for j in range ( x ) : result += ncr ( n , j ) * ( p ** j ) * ( ( 1 - p ) ** ( n - j ) ) return 1 - result except Exception : return "None" | Calculate p_value . | 89 | 7 |
230,164 | def hamming_calc ( TP , POP ) : try : length = POP return ( 1 / length ) * ( length - sum ( TP . values ( ) ) ) except Exception : return "None" | Calculate hamming loss . | 43 | 7 |
230,165 | def zero_one_loss_calc ( TP , POP ) : try : length = POP return ( length - sum ( TP . values ( ) ) ) except Exception : return "None" | Calculate zero - one loss . | 40 | 8 |
230,166 | def entropy_calc ( item , POP ) : try : result = 0 for i in item . keys ( ) : likelihood = item [ i ] / POP [ i ] if likelihood != 0 : result += likelihood * math . log ( likelihood , 2 ) return - result except Exception : return "None" | Calculate reference and response likelihood . | 63 | 8 |
230,167 | def cross_entropy_calc ( TOP , P , POP ) : try : result = 0 for i in TOP . keys ( ) : reference_likelihood = P [ i ] / POP [ i ] response_likelihood = TOP [ i ] / POP [ i ] if response_likelihood != 0 and reference_likelihood != 0 : result += reference_likelihood * math . log ( response_likelihood , 2 ) return - result except Exception : return "None" | Calculate cross entropy . | 101 | 6 |
230,168 | def joint_entropy_calc ( classes , table , POP ) : try : result = 0 for i in classes : for index , j in enumerate ( classes ) : p_prime = table [ i ] [ j ] / POP [ i ] if p_prime != 0 : result += p_prime * math . log ( p_prime , 2 ) return - result except Exception : return "None" | Calculate joint entropy . | 86 | 6 |
230,169 | def conditional_entropy_calc ( classes , table , P , POP ) : try : result = 0 for i in classes : temp = 0 for index , j in enumerate ( classes ) : p_prime = 0 if P [ i ] != 0 : p_prime = table [ i ] [ j ] / P [ i ] if p_prime != 0 : temp += p_prime * math . log ( p_prime , 2 ) result += temp * ( P [ i ] / POP [ i ] ) return - result except Exception : return "None" | Calculate conditional entropy . | 119 | 6 |
230,170 | def lambda_B_calc ( classes , table , TOP , POP ) : try : result = 0 length = POP maxresponse = max ( list ( TOP . values ( ) ) ) for i in classes : result += max ( list ( table [ i ] . values ( ) ) ) result = ( result - maxresponse ) / ( length - maxresponse ) return result except Exception : return "None" | Calculate Goodman and Kruskal s lambda B . | 84 | 12 |
230,171 | def lambda_A_calc ( classes , table , P , POP ) : try : result = 0 maxreference = max ( list ( P . values ( ) ) ) length = POP for i in classes : col = [ ] for col_item in table . values ( ) : col . append ( col_item [ i ] ) result += max ( col ) result = ( result - maxreference ) / ( length - maxreference ) return result except Exception : return "None" | Calculate Goodman and Kruskal s lambda A . | 100 | 12 |
230,172 | def chi_square_calc ( classes , table , TOP , P , POP ) : try : result = 0 for i in classes : for index , j in enumerate ( classes ) : expected = ( TOP [ j ] * P [ i ] ) / ( POP [ i ] ) result += ( ( table [ i ] [ j ] - expected ) ** 2 ) / expected return result except Exception : return "None" | Calculate chi - squared . | 88 | 7 |
230,173 | def kappa_se_calc ( PA , PE , POP ) : try : result = math . sqrt ( ( PA * ( 1 - PA ) ) / ( POP * ( ( 1 - PE ) ** 2 ) ) ) return result except Exception : return "None" | Calculate kappa standard error . | 58 | 8 |
230,174 | def micro_calc ( TP , item ) : try : TP_sum = sum ( TP . values ( ) ) item_sum = sum ( item . values ( ) ) return TP_sum / ( TP_sum + item_sum ) except Exception : return "None" | Calculate PPV_Micro and TPR_Micro . | 58 | 13 |
230,175 | def macro_calc ( item ) : try : item_sum = sum ( item . values ( ) ) item_len = len ( item . values ( ) ) return item_sum / item_len except Exception : return "None" | Calculate PPV_Macro and TPR_Macro . | 50 | 15 |
230,176 | def PC_PI_calc ( P , TOP , POP ) : try : result = 0 for i in P . keys ( ) : result += ( ( P [ i ] + TOP [ i ] ) / ( 2 * POP [ i ] ) ) ** 2 return result except Exception : return "None" | Calculate percent chance agreement for Scott s Pi . | 64 | 11 |
230,177 | def PC_AC1_calc ( P , TOP , POP ) : try : result = 0 classes = list ( P . keys ( ) ) for i in classes : pi = ( ( P [ i ] + TOP [ i ] ) / ( 2 * POP [ i ] ) ) result += pi * ( 1 - pi ) result = result / ( len ( classes ) - 1 ) return result except Exception : return "None" | Calculate percent chance agreement for Gwet s AC1 . | 90 | 13 |
230,178 | def overall_jaccard_index_calc ( jaccard_list ) : try : jaccard_sum = sum ( jaccard_list ) jaccard_mean = jaccard_sum / len ( jaccard_list ) return ( jaccard_sum , jaccard_mean ) except Exception : return "None" | Calculate overall jaccard index . | 76 | 9 |
230,179 | def overall_accuracy_calc ( TP , POP ) : try : overall_accuracy = sum ( TP . values ( ) ) / POP return overall_accuracy except Exception : return "None" | Calculate overall accuracy . | 43 | 6 |
230,180 | def AUC_analysis ( AUC ) : try : if AUC == "None" : return "None" if AUC < 0.6 : return "Poor" if AUC >= 0.6 and AUC < 0.7 : return "Fair" if AUC >= 0.7 and AUC < 0.8 : return "Good" if AUC >= 0.8 and AUC < 0.9 : return "Very Good" return "Excellent" except Exception : # pragma: no cover return "None" | Analysis AUC with interpretation table . | 111 | 7 |
230,181 | def kappa_analysis_cicchetti ( kappa ) : try : if kappa < 0.4 : return "Poor" if kappa >= 0.4 and kappa < 0.59 : return "Fair" if kappa >= 0.59 and kappa < 0.74 : return "Good" if kappa >= 0.74 and kappa <= 1 : return "Excellent" return "None" except Exception : # pragma: no cover return "None" | Analysis kappa number with Cicchetti benchmark . | 101 | 10 |
230,182 | def kappa_analysis_koch ( kappa ) : try : if kappa < 0 : return "Poor" if kappa >= 0 and kappa < 0.2 : return "Slight" if kappa >= 0.20 and kappa < 0.4 : return "Fair" if kappa >= 0.40 and kappa < 0.6 : return "Moderate" if kappa >= 0.60 and kappa < 0.8 : return "Substantial" if kappa >= 0.80 and kappa <= 1 : return "Almost Perfect" return "None" except Exception : # pragma: no cover return "None" | Analysis kappa number with Landis - Koch benchmark . | 137 | 11 |
230,183 | def kappa_analysis_altman ( kappa ) : try : if kappa < 0.2 : return "Poor" if kappa >= 0.20 and kappa < 0.4 : return "Fair" if kappa >= 0.40 and kappa < 0.6 : return "Moderate" if kappa >= 0.60 and kappa < 0.8 : return "Good" if kappa >= 0.80 and kappa <= 1 : return "Very Good" return "None" except Exception : # pragma: no cover return "None" | Analysis kappa number with Altman benchmark . | 120 | 9 |
230,184 | def get_requires ( ) : requirements = open ( "requirements.txt" , "r" ) . read ( ) return list ( filter ( lambda x : x != "" , requirements . split ( ) ) ) | Read requirements . txt . | 45 | 6 |
230,185 | def read_description ( ) : try : with open ( "README.md" ) as r : description = "\n" description += r . read ( ) with open ( "CHANGELOG.md" ) as c : description += "\n" description += c . read ( ) return description except Exception : return ''' PyCM is a multi-class confusion matrix library written in Python that supports both input data vectors and direct matrix, and a proper tool for post-classification model evaluation that supports most classes and overall statistics parameters. PyCM is the swiss-army knife of confusion matrices, targeted mainly at data scientists that need a broad array of metrics for predictive models and an accurate evaluation of large variety of classifiers.''' | Read README . md and CHANGELOG . md . | 154 | 13 |
230,186 | def print_matrix ( self , one_vs_all = False , class_name = None ) : classes = self . classes table = self . table if one_vs_all : [ classes , table ] = one_vs_all_func ( classes , table , self . TP , self . TN , self . FP , self . FN , class_name ) print ( table_print ( classes , table ) ) | Print confusion matrix . | 89 | 4 |
230,187 | def stat ( self , overall_param = None , class_param = None , class_name = None ) : classes = class_filter ( self . classes , class_name ) print ( stat_print ( classes , self . class_stat , self . overall_stat , self . digit , overall_param , class_param ) ) | Print statistical measures table . | 71 | 5 |
230,188 | def save_html ( self , name , address = True , overall_param = None , class_param = None , class_name = None , color = ( 0 , 0 , 0 ) , normalize = False ) : try : message = None table = self . table if normalize : table = self . normalized_table html_file = open ( name + ".html" , "w" ) html_file . write ( html_init ( name ) ) html_file . write ( html_dataset_type ( self . binary , self . imbalance ) ) html_file . write ( html_table ( self . classes , table , color , normalize ) ) html_file . write ( html_overall_stat ( self . overall_stat , self . digit , overall_param , self . recommended_list ) ) class_stat_classes = class_filter ( self . classes , class_name ) html_file . write ( html_class_stat ( class_stat_classes , self . class_stat , self . digit , class_param , self . recommended_list ) ) html_file . write ( html_end ( VERSION ) ) html_file . close ( ) if address : message = os . path . join ( os . getcwd ( ) , name + ".html" ) return { "Status" : True , "Message" : message } except Exception as e : return { "Status" : False , "Message" : str ( e ) } | Save ConfusionMatrix in HTML file . | 315 | 8 |
230,189 | def save_csv ( self , name , address = True , class_param = None , class_name = None , matrix_save = True , normalize = False ) : try : message = None classes = class_filter ( self . classes , class_name ) csv_file = open ( name + ".csv" , "w" ) csv_data = csv_print ( classes , self . class_stat , self . digit , class_param ) csv_file . write ( csv_data ) if matrix_save : matrix = self . table if normalize : matrix = self . normalized_table csv_matrix_file = open ( name + "_matrix" + ".csv" , "w" ) csv_matrix_data = csv_matrix_print ( self . classes , matrix ) csv_matrix_file . write ( csv_matrix_data ) if address : message = os . path . join ( os . getcwd ( ) , name + ".csv" ) return { "Status" : True , "Message" : message } except Exception as e : return { "Status" : False , "Message" : str ( e ) } | Save ConfusionMatrix in CSV file . | 258 | 8 |
230,190 | def save_obj ( self , name , address = True ) : try : message = None obj_file = open ( name + ".obj" , "w" ) actual_vector_temp = self . actual_vector predict_vector_temp = self . predict_vector matrix_temp = { k : self . table [ k ] . copy ( ) for k in self . classes } matrix_items = [ ] for i in self . classes : matrix_items . append ( ( i , list ( matrix_temp [ i ] . items ( ) ) ) ) if isinstance ( actual_vector_temp , numpy . ndarray ) : actual_vector_temp = actual_vector_temp . tolist ( ) if isinstance ( predict_vector_temp , numpy . ndarray ) : predict_vector_temp = predict_vector_temp . tolist ( ) json . dump ( { "Actual-Vector" : actual_vector_temp , "Predict-Vector" : predict_vector_temp , "Matrix" : matrix_items , "Digit" : self . digit , "Sample-Weight" : self . weights , "Transpose" : self . transpose } , obj_file ) if address : message = os . path . join ( os . getcwd ( ) , name + ".obj" ) return { "Status" : True , "Message" : message } except Exception as e : return { "Status" : False , "Message" : str ( e ) } | Save ConfusionMatrix in . obj file . | 320 | 9 |
230,191 | def F_beta ( self , beta ) : try : F_dict = { } for i in self . TP . keys ( ) : F_dict [ i ] = F_calc ( TP = self . TP [ i ] , FP = self . FP [ i ] , FN = self . FN [ i ] , beta = beta ) return F_dict except Exception : return { } | Calculate FBeta score . | 82 | 7 |
230,192 | def IBA_alpha ( self , alpha ) : try : IBA_dict = { } for i in self . classes : IBA_dict [ i ] = IBA_calc ( self . TPR [ i ] , self . TNR [ i ] , alpha = alpha ) return IBA_dict except Exception : return { } | Calculate IBA_alpha score . | 72 | 9 |
230,193 | def relabel ( self , mapping ) : if not isinstance ( mapping , dict ) : raise pycmMatrixError ( MAPPING_FORMAT_ERROR ) if self . classes != list ( mapping . keys ( ) ) : raise pycmMatrixError ( MAPPING_CLASS_NAME_ERROR ) for row in self . classes : temp_dict = { } temp_dict_normalized = { } for col in self . classes : temp_dict [ mapping [ col ] ] = self . table [ row ] [ col ] temp_dict_normalized [ mapping [ col ] ] = self . normalized_table [ row ] [ col ] del self . table [ row ] self . table [ mapping [ row ] ] = temp_dict del self . normalized_table [ row ] self . normalized_table [ mapping [ row ] ] = temp_dict_normalized self . matrix = self . table self . normalized_matrix = self . normalized_table for param in self . class_stat . keys ( ) : temp_dict = { } for classname in self . classes : temp_dict [ mapping [ classname ] ] = self . class_stat [ param ] [ classname ] self . class_stat [ param ] = temp_dict self . classes = list ( mapping . values ( ) ) self . TP = self . class_stat [ "TP" ] self . TN = self . class_stat [ "TN" ] self . FP = self . class_stat [ "FP" ] self . FN = self . class_stat [ "FN" ] __class_stat_init__ ( self ) | Rename ConfusionMatrix classes . | 342 | 7 |
230,194 | def add_tools ( self ) : for data in self . toolbardata : # tool type is in data[0] if data [ 0 ] == "T" : # Simple tool _ , msg_type , label , tool_tip = data icon = icons [ label ] self . label2id [ label ] = tool_id = wx . NewId ( ) self . AddSimpleTool ( tool_id , label , icon , short_help_string = tool_tip ) self . ids_msgs [ tool_id ] = msg_type self . parent . Bind ( wx . EVT_TOOL , self . OnTool , id = tool_id ) elif data [ 0 ] == "S" : # Separator self . AddSeparator ( ) elif data [ 0 ] == "C" : # Control _ , control , tool_tip = data self . AddControl ( control , label = tool_tip ) elif data [ 0 ] == "O" : # Check tool / option button _ , label , tool_tip = data icon = icons [ label ] self . label2id [ label ] = tool_id = wx . NewId ( ) self . AddCheckTool ( tool_id , label , icon , icon , tool_tip ) else : raise ValueError ( "Unknown tooltype " + str ( data [ 0 ] ) ) self . SetCustomOverflowItems ( [ ] , [ ] ) self . Realize ( ) # Adjust Toolbar size self . SetSize ( self . DoGetBestSize ( ) ) | Adds tools from self . toolbardata to self | 333 | 10 |
230,195 | def OnTool ( self , event ) : msgtype = self . ids_msgs [ event . GetId ( ) ] post_command_event ( self , msgtype ) | Toolbar event handler | 38 | 4 |
230,196 | def OnToggleTool ( self , event ) : config [ "check_spelling" ] = str ( event . IsChecked ( ) ) toggle_id = self . parent . menubar . FindMenuItem ( _ ( "View" ) , _ ( "Check spelling" ) ) if toggle_id != - 1 : # Check may fail if translation is incomplete toggle_item = self . parent . menubar . FindItemById ( toggle_id ) toggle_item . Check ( event . IsChecked ( ) ) self . parent . grid . grid_renderer . cell_cache . clear ( ) self . parent . grid . ForceRefresh ( ) event . Skip ( ) | Tool event handler | 146 | 3 |
230,197 | def _get_button_label ( self ) : dlg = wx . TextEntryDialog ( self , _ ( 'Button label:' ) ) if dlg . ShowModal ( ) == wx . ID_OK : label = dlg . GetValue ( ) else : label = "" dlg . Destroy ( ) return label | Gets Button label from user and returns string | 74 | 9 |
230,198 | def OnButtonCell ( self , event ) : if self . button_cell_button_id == event . GetId ( ) : if event . IsChecked ( ) : label = self . _get_button_label ( ) post_command_event ( self , self . ButtonCellMsg , text = label ) else : post_command_event ( self , self . ButtonCellMsg , text = False ) event . Skip ( ) | Event handler for cell button toggle button | 92 | 7 |
230,199 | def OnVideoCell ( self , event ) : if self . video_cell_button_id == event . GetId ( ) : if event . IsChecked ( ) : wildcard = _ ( "Media files" ) + " (*.*)|*.*" videofile , __ = self . get_filepath_findex_from_user ( wildcard , "Choose video or audio file" , wx . OPEN ) post_command_event ( self , self . VideoCellMsg , videofile = videofile ) else : post_command_event ( self , self . VideoCellMsg , videofile = False ) event . Skip ( ) | Event handler for video cell toggle button | 136 | 7 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.