idx int64 0 252k | question stringlengths 48 5.28k | target stringlengths 5 1.23k |
|---|---|---|
242,500 | def gen_experiment_ref ( experiment_tag , n = 10 ) : experiment_id = gen_experiment_id ( n = n ) return '{0}_{1}' . format ( experiment_tag , experiment_id ) | Generate a random string for naming . |
242,501 | def add ( self , datapoint ) : if not self . is_full : self . set_datapoint ( self . cur_index , datapoint ) self . cur_index += 1 | Adds the datapoint to the tensor if room is available . |
242,502 | def add_batch ( self , datapoints ) : num_datapoints_to_add = datapoints . shape [ 0 ] end_index = self . cur_index + num_datapoints_to_add if end_index <= self . num_datapoints : self . data [ self . cur_index : end_index , ... ] = datapoints self . cur_index = end_index | Adds a batch of datapoints to the tensor if room is available . |
242,503 | def datapoint ( self , ind ) : if self . height is None : return self . data [ ind ] return self . data [ ind , ... ] . copy ( ) | Returns the datapoint at the given index . |
242,504 | def set_datapoint ( self , ind , datapoint ) : if ind >= self . num_datapoints : raise ValueError ( 'Index %d out of bounds! Tensor has %d datapoints' % ( ind , self . num_datapoints ) ) self . data [ ind , ... ] = np . array ( datapoint ) . astype ( self . dtype ) | Sets the value of the datapoint at the given index . |
242,505 | def data_slice ( self , slice_ind ) : if self . height is None : return self . data [ slice_ind ] return self . data [ slice_ind , ... ] | Returns a slice of datapoints |
242,506 | def save ( self , filename , compressed = True ) : if not self . has_data : return False _ , file_ext = os . path . splitext ( filename ) if compressed : if file_ext != COMPRESSED_TENSOR_EXT : raise ValueError ( 'Can only save compressed tensor with %s extension' % ( COMPRESSED_TENSOR_EXT ) ) np . savez_compressed ( fi... | Save a tensor to disk . |
242,507 | def load ( filename , compressed = True , prealloc = None ) : _ , file_ext = os . path . splitext ( filename ) if compressed : if file_ext != COMPRESSED_TENSOR_EXT : raise ValueError ( 'Can only load compressed tensor with %s extension' % ( COMPRESSED_TENSOR_EXT ) ) data = np . load ( filename ) [ 'arr_0' ] else : if f... | Loads a tensor from disk . |
242,508 | def datapoint_indices_for_tensor ( self , tensor_index ) : if tensor_index >= self . _num_tensors : raise ValueError ( 'Tensor index %d is greater than the number of tensors (%d)' % ( tensor_index , self . _num_tensors ) ) return self . _file_num_to_indices [ tensor_index ] | Returns the indices for all datapoints in the given tensor . |
242,509 | def tensor_index ( self , datapoint_index ) : if datapoint_index >= self . _num_datapoints : raise ValueError ( 'Datapoint index %d is greater than the number of datapoints (%d)' % ( datapoint_index , self . _num_datapoints ) ) return self . _index_to_file_num [ datapoint_index ] | Returns the index of the tensor containing the referenced datapoint . |
242,510 | def generate_tensor_filename ( self , field_name , file_num , compressed = True ) : file_ext = TENSOR_EXT if compressed : file_ext = COMPRESSED_TENSOR_EXT filename = os . path . join ( self . filename , 'tensors' , '%s_%05d%s' % ( field_name , file_num , file_ext ) ) return filename | Generate a filename for a tensor . |
242,511 | def _allocate_tensors ( self ) : self . _tensors = { } for field_name , field_spec in self . _config [ 'fields' ] . items ( ) : field_dtype = np . dtype ( field_spec [ 'dtype' ] ) field_shape = [ self . _datapoints_per_file ] if 'height' in field_spec . keys ( ) : field_shape . append ( field_spec [ 'height' ] ) if 'wi... | Allocates the tensors in the dataset . |
242,512 | def add ( self , datapoint ) : if self . _access_mode == READ_ONLY_ACCESS : raise ValueError ( 'Cannot add datapoints with read-only access' ) tensor_ind = self . _num_datapoints // self . _datapoints_per_file for field_name in datapoint . keys ( ) : if field_name not in self . field_names : raise ValueError ( 'Field %... | Adds a datapoint to the file . |
242,513 | def datapoint ( self , ind , field_names = None ) : if self . _has_unsaved_data : self . flush ( ) if ind >= self . _num_datapoints : raise ValueError ( 'Index %d larger than the number of datapoints in the dataset (%d)' % ( ind , self . _num_datapoints ) ) if field_names is None : field_names = self . field_names data... | Loads a tensor datapoint for a given global index . |
242,514 | def tensor ( self , field_name , tensor_ind ) : if tensor_ind == self . _tensor_cache_file_num [ field_name ] : return self . _tensors [ field_name ] filename = self . generate_tensor_filename ( field_name , tensor_ind , compressed = True ) Tensor . load ( filename , compressed = True , prealloc = self . _tensors [ fie... | Returns the tensor for a given field and tensor index . |
242,515 | def delete_last ( self , num_to_delete = 1 ) : if self . _access_mode == READ_ONLY_ACCESS : raise ValueError ( 'Cannot delete datapoints with read-only access' ) if num_to_delete > self . _num_datapoints : raise ValueError ( 'Cannot remove more than the number of datapoints in the dataset' ) last_datapoint_ind = self .... | Deletes the last N datapoints from the dataset . |
242,516 | def write ( self ) : for field_name in self . field_names : filename = self . generate_tensor_filename ( field_name , self . _num_tensors - 1 ) self . _tensors [ field_name ] . save ( filename , compressed = True ) json . dump ( self . _metadata , open ( self . metadata_filename , 'w' ) , indent = JSON_INDENT , sort_ke... | Writes all tensors to the next file number . |
242,517 | def open ( dataset_dir , access_mode = READ_ONLY_ACCESS ) : if access_mode == WRITE_ACCESS : raise ValueError ( 'Cannot open a dataset with write-only access' ) try : config_filename = os . path . join ( dataset_dir , 'config.json' ) config = json . load ( open ( config_filename , 'r' ) ) except : config_filename = os ... | Opens a tensor dataset . |
242,518 | def split ( self , split_name ) : if not self . has_split ( split_name ) : raise ValueError ( 'Split %s does not exist!' % ( split_name ) ) metadata_filename = self . split_metadata_filename ( split_name ) train_filename = self . train_indices_filename ( split_name ) val_filename = self . val_indices_filename ( split_n... | Return the training and validation indices for the requested split . |
242,519 | def delete_split ( self , split_name ) : if self . has_split ( split_name ) : shutil . rmtree ( os . path . join ( self . split_dir , split_name ) ) | Delete a split of the dataset . |
242,520 | def _load_config ( self , filename ) : fh = open ( filename , 'r' ) self . file_contents = fh . read ( ) config_dir = os . path . split ( filename ) [ 0 ] include_re = re . compile ( '^(.*)!include\s+(.*)$' , re . MULTILINE ) def recursive_load ( matchobj , path ) : first_spacing = matchobj . group ( 1 ) other_spacing ... | Loads a yaml configuration file from the given filename . |
242,521 | def __convert_key ( expression ) : if type ( expression ) is str and len ( expression ) > 2 and expression [ 1 ] == '!' : expression = eval ( expression [ 2 : - 1 ] ) return expression | Converts keys in YAML that reference other keys . |
242,522 | def make_summary_table ( train_result , val_result , plot = True , save_dir = None , prepend = "" , save = False ) : table_key_list = [ 'error_rate' , 'recall_at_99_precision' , 'average_precision' , 'precision' , 'recall' ] num_fields = len ( table_key_list ) import matplotlib . pyplot as plt ax = plt . subplot ( 111 ... | Makes a matplotlib table object with relevant data . Thanks to Lucas Manuelli for the contribution . |
242,523 | def app_score ( self ) : precisions , pct_pred_pos , taus = self . precision_pct_pred_pos_curve ( interval = False ) app = 0 total = 0 for k in range ( len ( precisions ) - 1 ) : cur_prec = precisions [ k ] cur_pp = pct_pred_pos [ k ] cur_tau = taus [ k ] next_prec = precisions [ k + 1 ] next_pp = pct_pred_pos [ k + 1 ... | Computes the area under the app curve . |
242,524 | def accuracy_curve ( self , delta_tau = 0.01 ) : orig_thresh = self . threshold sorted_labels , sorted_probs = self . sorted_values scores = [ ] taus = [ ] tau = 0 for k in range ( len ( sorted_labels ) ) : self . threshold = tau scores . append ( self . accuracy ) taus . append ( tau ) tau = sorted_probs [ k ] tau = 1... | Computes the relationship between probability threshold and classification accuracy . |
242,525 | def f1_curve ( self , delta_tau = 0.01 ) : orig_thresh = self . threshold sorted_labels , sorted_probs = self . sorted_values scores = [ ] taus = [ ] tau = 0 for k in range ( len ( sorted_labels ) ) : self . threshold = tau scores . append ( self . f1_score ) taus . append ( tau ) tau = sorted_probs [ k ] tau = 1.0 sel... | Computes the relationship between probability threshold and classification F1 score . |
242,526 | def phi_coef_curve ( self , delta_tau = 0.01 ) : orig_thresh = self . threshold sorted_labels , sorted_probs = self . sorted_values scores = [ ] taus = [ ] tau = 0 for k in range ( len ( sorted_labels ) ) : self . threshold = tau scores . append ( self . phi_coef ) taus . append ( tau ) tau = sorted_probs [ k ] tau = 1... | Computes the relationship between probability threshold and classification phi coefficient . |
242,527 | def precision_pct_pred_pos_curve ( self , interval = False , delta_tau = 0.001 ) : orig_thresh = self . threshold sorted_labels , sorted_probs = self . sorted_values precisions = [ ] pct_pred_pos = [ ] taus = [ ] tau = 0 if not interval : for k in range ( len ( sorted_labels ) ) : self . threshold = tau precisions . ap... | Computes the relationship between precision and the percent of positively classified datapoints . |
242,528 | def gen_experiment_id ( n = 10 ) : chrs = 'abcdefghijklmnopqrstuvwxyz' inds = np . random . randint ( 0 , len ( chrs ) , size = n ) return '' . join ( [ chrs [ i ] for i in inds ] ) | Generate a random string with n characters . |
242,529 | def histogram ( values , num_bins , bounds , normalized = True , plot = False , color = 'b' ) : hist , bins = np . histogram ( values , bins = num_bins , range = bounds ) width = ( bins [ 1 ] - bins [ 0 ] ) if normalized : if np . sum ( hist ) > 0 : hist = hist . astype ( np . float32 ) / np . sum ( hist ) if plot : im... | Generate a histogram plot . |
242,530 | def skew ( xi ) : S = np . array ( [ [ 0 , - xi [ 2 ] , xi [ 1 ] ] , [ xi [ 2 ] , 0 , - xi [ 0 ] ] , [ - xi [ 1 ] , xi [ 0 ] , 0 ] ] ) return S | Return the skew - symmetric matrix that can be used to calculate cross - products with vector xi . |
242,531 | def deskew ( S ) : x = np . zeros ( 3 ) x [ 0 ] = S [ 2 , 1 ] x [ 1 ] = S [ 0 , 2 ] x [ 2 ] = S [ 1 , 0 ] return x | Converts a skew - symmetric cross - product matrix to its corresponding vector . Only works for 3x3 matrices . |
242,532 | def reverse_dictionary ( d ) : rev_d = { } [ rev_d . update ( { v : k } ) for k , v in d . items ( ) ] return rev_d | Reverses the key value pairs for a given dictionary . |
242,533 | def filenames ( directory , tag = '' , sorted = False , recursive = False ) : if recursive : f = [ os . path . join ( directory , f ) for directory , _ , filename in os . walk ( directory ) for f in filename if f . find ( tag ) > - 1 ] else : f = [ os . path . join ( directory , f ) for f in os . listdir ( directory ) ... | Reads in all filenames from a directory that contain a specified substring . |
242,534 | def sph2cart ( r , az , elev ) : x = r * np . cos ( az ) * np . sin ( elev ) y = r * np . sin ( az ) * np . sin ( elev ) z = r * np . cos ( elev ) return x , y , z | Convert spherical to cartesian coordinates . |
242,535 | def cart2sph ( x , y , z ) : r = np . sqrt ( x ** 2 + y ** 2 + z ** 2 ) if x > 0 and y > 0 : az = np . arctan ( y / x ) elif x > 0 and y < 0 : az = 2 * np . pi - np . arctan ( - y / x ) elif x < 0 and y > 0 : az = np . pi - np . arctan ( - y / x ) elif x < 0 and y < 0 : az = np . pi + np . arctan ( y / x ) elif x == 0 ... | Convert cartesian to spherical coordinates . |
242,536 | def keyboard_input ( message , yesno = False ) : message += ' ' if yesno : message += '[y/n] ' human_input = input ( message ) if yesno : while human_input . lower ( ) != 'n' and human_input . lower ( ) != 'y' : logging . info ( 'Did not understand input. Please answer \'y\' or \'n\'' ) human_input = input ( message ) ... | Get keyboard input from a human optionally reasking for valid yes or no input . |
242,537 | def interpolate ( dq0 , dq1 , t ) : if not 0 <= t <= 1 : raise ValueError ( "Interpolation step must be between 0 and 1! Got {0}" . format ( t ) ) dqt = dq0 * ( 1 - t ) + dq1 * t return dqt . normalized | Return the interpolation of two DualQuaternions . |
242,538 | def _save ( self ) : if os . path . isfile ( self . _full_filename ) : shutil . copyfile ( self . _full_filename , self . _full_backup_filename ) with open ( self . _full_filename , 'w' ) as file : writer = csv . DictWriter ( file , fieldnames = self . _headers ) writer . writeheader ( ) for row in self . _table : writ... | Save the model to a . csv file |
242,539 | def insert ( self , data ) : row = { key : self . _default_entry for key in self . _headers } row [ '_uid' ] = self . _get_new_uid ( ) for key , val in data . items ( ) : if key in ( '_uid' , '_default' ) : logging . warn ( "Cannot manually set columns _uid or _default of a row! Given data: {0}" . format ( data ) ) con... | Insert a row into the . csv file . |
242,540 | def update_by_uid ( self , uid , data ) : row = self . _table [ uid + 1 ] for key , val in data . items ( ) : if key == '_uid' or key == '_default' : continue if key not in self . _headers : logging . warn ( "Unknown column name: {0}" . format ( key ) ) continue if not isinstance ( val , CSVModel . _KNOWN_TYPES_MAP [ s... | Update a row with the given data . |
242,541 | def get_col ( self , col_name , filter = lambda _ : True ) : if col_name not in self . _headers : raise ValueError ( "{} not found! Model has headers: {}" . format ( col_name , self . _headers ) ) col = [ ] for i in range ( self . num_rows ) : row = self . _table [ i + 1 ] val = row [ col_name ] if filter ( val ) : col... | Return all values in the column corresponding to col_name that satisfies filter which is a function that takes in a value of the column s type and returns True or False |
242,542 | def get_by_cols ( self , cols , direction = 1 ) : if direction == 1 : iterator = range ( self . num_rows ) elif direction == - 1 : iterator = range ( self . num_rows - 1 , - 1 , - 1 ) else : raise ValueError ( "Direction can only be 1 (first) or -1 (last). Got: {0}" . format ( direction ) ) for i in iterator : row = se... | Return the first or last row that satisfies the given col value constraints or None if no row contains the given value . |
242,543 | def get_rows_by_cols ( self , matching_dict ) : result = [ ] for i in range ( self . num_rows ) : row = self . _table [ i + 1 ] matching = True for key , val in matching_dict . items ( ) : if row [ key ] != val : matching = False break if matching : result . append ( row ) return result | Return all rows where the cols match the elements given in the matching_dict |
242,544 | def next ( self ) : if self . _cur_row >= len ( self . _table ) : raise StopIteration data = self . _table [ self . _cur_row ] . copy ( ) self . _cur_row += 1 return data | Returns the next row in the CSV for iteration |
242,545 | def load ( full_filename ) : with open ( full_filename , 'r' ) as file : reader = csv . DictReader ( file ) headers = reader . fieldnames if '_uid' not in headers or '_default' not in headers : raise Exception ( "Malformed CSVModel file!" ) all_rows = [ row for row in reader ] types = all_rows [ 0 ] table = [ types ] d... | Load a . csv file into a CSVModel . |
242,546 | def get_or_create ( full_filename , headers_types = None , default_entry = '' ) : if isinstance ( headers_types , dict ) : headers_types_list = [ ( k , v ) for k , v in headers_types . items ( ) ] headers_types = headers_types_list if os . path . isfile ( full_filename ) : return CSVModel . load ( full_filename ) else ... | Load a . csv file into a CSVModel if the file exists or create a new CSVModel with the given filename if the file does not exist . |
242,547 | def projection_matrix ( point , normal , direction = None , perspective = None , pseudo = False ) : M = numpy . identity ( 4 ) point = numpy . array ( point [ : 3 ] , dtype = numpy . float64 , copy = False ) normal = unit_vector ( normal [ : 3 ] ) if perspective is not None : perspective = numpy . array ( perspective [... | Return matrix to project onto plane defined by point and normal . |
242,548 | def projection_from_matrix ( matrix , pseudo = False ) : M = numpy . array ( matrix , dtype = numpy . float64 , copy = False ) M33 = M [ : 3 , : 3 ] l , V = numpy . linalg . eig ( M ) i = numpy . where ( abs ( numpy . real ( l ) - 1.0 ) < 1e-8 ) [ 0 ] if not pseudo and len ( i ) : point = numpy . real ( V [ : , i [ - 1... | Return projection plane and perspective point from projection matrix . |
242,549 | def unit_vector ( data , axis = None , out = None ) : if out is None : data = numpy . array ( data , dtype = numpy . float64 , copy = True ) if data . ndim == 1 : data /= math . sqrt ( numpy . dot ( data , data ) ) return data else : if out is not data : out [ : ] = numpy . array ( data , copy = False ) data = out leng... | Return ndarray normalized by length i . e . eucledian norm along axis . |
242,550 | def json_numpy_obj_hook ( dct ) : if isinstance ( dct , dict ) and '__ndarray__' in dct : data = np . asarray ( dct [ '__ndarray__' ] , dtype = dct [ 'dtype' ] ) return data . reshape ( dct [ 'shape' ] ) return dct | Decodes a previously encoded numpy ndarray with proper shape and dtype . |
242,551 | def dump ( * args , ** kwargs ) : kwargs . update ( dict ( cls = NumpyEncoder , sort_keys = True , indent = 4 , separators = ( ',' , ': ' ) ) ) return _json . dump ( * args , ** kwargs ) | Dump a numpy . ndarray to file stream . |
242,552 | def load ( * args , ** kwargs ) : kwargs . update ( dict ( object_hook = json_numpy_obj_hook ) ) return _json . load ( * args , ** kwargs ) | Load an numpy . ndarray from a file stream . |
242,553 | def default ( self , obj ) : if isinstance ( obj , np . ndarray ) : return dict ( __ndarray__ = obj . tolist ( ) , dtype = str ( obj . dtype ) , shape = obj . shape ) return _json . JSONEncoder ( self , obj ) | Converts an ndarray into a dictionary for efficient serialization . |
242,554 | def _preallocate_samples ( self ) : self . prealloc_samples_ = [ ] for i in range ( self . num_prealloc_samples_ ) : self . prealloc_samples_ . append ( self . sample ( ) ) | Preallocate samples for faster adaptive sampling . |
242,555 | def rvs ( self , size = 1 , iteration = 1 ) : if self . num_prealloc_samples_ > 0 : samples = [ ] for i in range ( size ) : samples . append ( self . prealloc_samples_ [ ( iteration + i ) % self . num_prealloc_samples_ ] ) if size == 1 : return samples [ 0 ] return samples return self . sample ( size = size ) | Sample the random variable using the preallocated samples if possible . |
242,556 | def sample ( self , size = 1 ) : samples = [ ] for i in range ( size ) : xi = self . _r_xi_rv . rvs ( size = 1 ) S_xi = skew ( xi ) R_sample = scipy . linalg . expm ( S_xi ) t_sample = self . _t_rv . rvs ( size = 1 ) samples . append ( RigidTransform ( rotation = R_sample , translation = t_sample , from_frame = self . ... | Sample rigid transform random variables . |
242,557 | def _flush ( self ) : if self . _recording : raise Exception ( "Cannot flush data queue while recording!" ) if self . _saving_cache : logging . warn ( "Flush when using cache means unsaved data will be lost and not returned!" ) self . _cmds_q . put ( ( "reset_data_segment" , ) ) else : data = self . _extract_q ( 0 ) re... | Returns a list of all current data |
242,558 | def _stop ( self ) : self . _pause ( ) self . _cmds_q . put ( ( "stop" , ) ) try : self . _recorder . terminate ( ) except Exception : pass self . _recording = False | Stops recording . Returns all recorded data and their timestamps . Destroys recorder process . |
242,559 | def _listdir ( self , root ) : "List directory 'root' appending the path separator to subdirs." res = [ ] for name in os . listdir ( root ) : path = os . path . join ( root , name ) if os . path . isdir ( path ) : name += os . sep res . append ( name ) return res | List directory root appending the path separator to subdirs . |
242,560 | def complete_extra ( self , args ) : "Completions for the 'extra' command." if len ( args ) == 0 : return self . _listdir ( './' ) return self . _complete_path ( args [ - 1 ] ) | Completions for the extra command . |
242,561 | def complete ( self , text , state ) : "Generic readline completion entry point." results = [ w for w in self . words if w . startswith ( text ) ] + [ None ] if results != [ None ] : return results [ state ] buffer = readline . get_line_buffer ( ) line = readline . get_line_buffer ( ) . split ( ) results = [ w for w in... | Generic readline completion entry point . |
242,562 | def stop ( self ) : self . _cmds_q . put ( ( "stop" , ) ) for recorder in self . _data_stream_recorders : recorder . _stop ( ) try : self . _syncer . terminate ( ) except Exception : pass | Stops syncer operations . Destroys syncer process . |
242,563 | def configure_root ( ) : root_logger = logging . getLogger ( ) for hdlr in root_logger . handlers : if isinstance ( hdlr , logging . StreamHandler ) : root_logger . removeHandler ( hdlr ) root_logger . setLevel ( ROOT_LOG_LEVEL ) hdlr = logging . StreamHandler ( ROOT_LOG_STREAM ) formatter = colorlog . ColoredFormatter... | Configure the root logger . |
242,564 | def add_root_log_file ( log_file ) : root_logger = logging . getLogger ( ) hdlr = logging . FileHandler ( log_file ) formatter = logging . Formatter ( '%(asctime)s %(name)-10s %(levelname)-8s %(message)s' , datefmt = '%m-%d %H:%M:%S' ) hdlr . setFormatter ( formatter ) root_logger . addHandler ( hdlr ) root_logger . in... | Add a log file to the root logger . |
242,565 | def add_log_file ( logger , log_file , global_log_file = False ) : if global_log_file : add_root_log_file ( log_file ) else : hdlr = logging . FileHandler ( log_file ) formatter = logging . Formatter ( '%(asctime)s %(name)-10s %(levelname)-8s %(message)s' , datefmt = '%m-%d %H:%M:%S' ) hdlr . setFormatter ( formatter )... | Add a log file to this logger . If global_log_file is true log_file will be handed the root logger otherwise it will only be used by this particular logger . |
242,566 | def get_module_profile ( module , name = None ) : try : return module . profile except AttributeError : if 'profile_factory' not in module . __dict__ : return None default_section = Section ( name or module . __name__ ) profile = module . profile_factory ( default_section = default_section ) profile . auto_register ( m... | Get or create a profile from a module and return it . |
242,567 | def iterargs ( self ) : iterargs = OrderedDict ( ) for name in self . _iterargs : plural = self . _profile . iterargs [ name ] iterargs [ name ] = tuple ( self . _values [ plural ] ) return iterargs | uses the singular name as key |
242,568 | def _exec_check ( self , check : FontbakeryCallable , args : Dict [ str , Any ] ) : try : result = check ( ** args ) if isinstance ( result , types . GeneratorType ) : for sub_result in result : yield self . _check_result ( sub_result ) return except Exception as e : error = FailedCheckError ( e ) result = ( ERROR , er... | Yields check sub results . |
242,569 | def check_order ( self , order ) : own_order = self . order for item in order : if item not in own_order : raise ValueError ( f'Order item {item} not found.' ) return order | order must be a subset of self . order |
242,570 | def add_check ( self , check ) : if self . _add_check_callback is not None : if not self . _add_check_callback ( self , check ) : return False self . _checkid2index [ check . id ] = len ( self . _checks ) self . _checks . append ( check ) return True | Please use rather register_check as a decorator . |
242,571 | def merge_section ( self , section , filter_func = None ) : for check in section . checks : if filter_func and not filter_func ( check ) : continue self . add_check ( check ) | Add section . checks to self if not skipped by self . _add_check_callback . order description etc . are not updated . |
242,572 | def validate_values ( self , values ) : format_message = '{}: {} (value: {})' . format messages = [ ] for name , value in values . items ( ) : if name not in self . expected_values : continue valid , message = self . expected_values [ name ] . validate ( value ) if valid : continue messages . append ( format_message ( ... | Validate values if they are registered as expected_values and present . |
242,573 | def _get_aggregate_args ( self , item , key ) : if not key in ( 'args' , 'mandatoryArgs' ) : raise TypeError ( 'key must be "args" or "mandatoryArgs", got {}' ) . format ( key ) dependencies = list ( getattr ( item , key ) ) if hasattr ( item , 'conditions' ) : dependencies += [ name for negated , name in map ( is_nega... | Get all arguments or mandatory arguments of the item . |
242,574 | def get_iterargs ( self , item ) : args = self . _get_aggregate_args ( item , 'mandatoryArgs' ) return tuple ( sorted ( [ arg for arg in args if arg in self . iterargs ] ) ) | Returns a tuple of all iterags for item sorted by name . |
242,575 | def auto_register ( self , symbol_table , filter_func = None , profile_imports = None ) : if profile_imports : symbol_table = symbol_table . copy ( ) symbol_table [ 'profile_imports' ] = profile_imports all_items = list ( symbol_table . values ( ) ) + self . _load_profile_imports ( symbol_table ) namespace_types = ( Fo... | Register items from symbol_table in the profile . |
242,576 | def merge_profile ( self , profile , filter_func = None ) : for ns_type in self . _valid_namespace_types : ns_dict = getattr ( profile , ns_type ) if filter_func : ns_type_singular = self . _valid_namespace_types [ ns_type ] ns_dict = { name : item for name , item in ns_dict . items ( ) if filter_func ( ns_type_singula... | Copy all namespace items from profile to self . |
242,577 | def serialize_identity ( self , identity ) : section , check , iterargs = identity values = map ( partial ( json . dumps , separators = ( ',' , ':' ) ) , [ str ( section ) , check . id , sorted ( iterargs ) ] ) return '{{"section":{},"check":{},"iterargs":{}}}' . format ( * values ) | Return a json string that can also be used as a key . |
242,578 | def get_profile ( ) : argument_parser = ThrowingArgumentParser ( add_help = False ) argument_parser . add_argument ( 'profile' ) try : args , _ = argument_parser . parse_known_args ( ) except ArgumentParserError : return Profile ( ) imported = get_module ( args . profile ) profile = get_module_profile ( imported ) if n... | Prefetch the profile module to fill some holes in the help text . |
242,579 | def collate_fonts_data ( fonts_data ) : glyphs = { } for family in fonts_data : for glyph in family : if glyph [ 'unicode' ] not in glyphs : glyphs [ glyph [ 'unicode' ] ] = glyph else : c = glyphs [ glyph [ 'unicode' ] ] [ 'contours' ] glyphs [ glyph [ 'unicode' ] ] [ 'contours' ] = c | glyph [ 'contours' ] return gly... | Collate individual fonts data into a single glyph data list . |
242,580 | def com_adobe_fonts_check_family_consistent_upm ( ttFonts ) : upm_set = set ( ) for ttFont in ttFonts : upm_set . add ( ttFont [ 'head' ] . unitsPerEm ) if len ( upm_set ) > 1 : yield FAIL , ( "Fonts have different units per em: {}." ) . format ( sorted ( upm_set ) ) else : yield PASS , "Fonts have consistent units per... | Fonts have consistent Units Per Em? |
242,581 | def com_adobe_fonts_check_find_empty_letters ( ttFont ) : cmap = ttFont . getBestCmap ( ) passed = True letter_categories = { 'Ll' , 'Lm' , 'Lo' , 'Lt' , 'Lu' , } invisible_letters = { 0x115F , 0x1160 , 0x3164 , 0xFFA0 , } for unicode_val , glyph_name in cmap . items ( ) : category = unicodedata . category ( chr ( unic... | Letters in font have glyphs that are not empty? |
242,582 | def com_adobe_fonts_check_name_empty_records ( ttFont ) : failed = False for name_record in ttFont [ 'name' ] . names : name_string = name_record . toUnicode ( ) . strip ( ) if len ( name_string ) == 0 : failed = True name_key = tuple ( [ name_record . platformID , name_record . platEncID , name_record . langID , name_... | Check name table for empty records . |
242,583 | def com_google_fonts_check_name_no_copyright_on_description ( ttFont ) : failed = False for name in ttFont [ 'name' ] . names : if 'opyright' in name . string . decode ( name . getEncoding ( ) ) and name . nameID == NameID . DESCRIPTION : failed = True if failed : yield FAIL , ( "Namerecords with ID={} (NameID.DESCRIPT... | Description strings in the name table must not contain copyright info . |
242,584 | def com_google_fonts_check_monospace ( ttFont , glyph_metrics_stats ) : from fontbakery . constants import ( IsFixedWidth , PANOSE_Proportion ) failed = False seems_monospaced = glyph_metrics_stats [ "seems_monospaced" ] most_common_width = glyph_metrics_stats [ "most_common_width" ] width_max = glyph_metrics_stats [ '... | Checking correctness of monospaced metadata . |
242,585 | def com_google_fonts_check_name_line_breaks ( ttFont ) : failed = False for name in ttFont [ "name" ] . names : string = name . string . decode ( name . getEncoding ( ) ) if "\n" in string : failed = True yield FAIL , ( "Name entry {} on platform {} contains" " a line-break." ) . format ( NameID ( name . nameID ) . nam... | Name table entries should not contain line - breaks . |
242,586 | def com_google_fonts_check_name_match_familyname_fullfont ( ttFont ) : from fontbakery . utils import get_name_entry_strings familyname = get_name_entry_strings ( ttFont , NameID . FONT_FAMILY_NAME ) fullfontname = get_name_entry_strings ( ttFont , NameID . FULL_FONT_NAME ) if len ( familyname ) == 0 : yield FAIL , Mes... | Does full font name begin with the font family name? |
242,587 | def com_google_fonts_check_family_naming_recommendations ( ttFont ) : import re from fontbakery . utils import get_name_entry_strings bad_entries = [ ] bad_psname = re . compile ( "[^A-Za-z0-9-]" ) for string in get_name_entry_strings ( ttFont , NameID . POSTSCRIPT_NAME ) : if bad_psname . search ( string ) : bad_entri... | Font follows the family naming recommendations? |
242,588 | def com_google_fonts_check_name_rfn ( ttFont ) : failed = False for entry in ttFont [ "name" ] . names : string = entry . toUnicode ( ) if "reserved font name" in string . lower ( ) : yield WARN , ( "Name table entry (\"{}\")" " contains \"Reserved Font Name\"." " This is an error except in a few specific" " rare cases... | Name table strings must not contain the string Reserved Font Name . |
242,589 | def com_adobe_fonts_check_family_max_4_fonts_per_family_name ( ttFonts ) : from collections import Counter from fontbakery . utils import get_name_entry_strings failed = False family_names = list ( ) for ttFont in ttFonts : names_list = get_name_entry_strings ( ttFont , NameID . FONT_FAMILY_NAME ) names_set = set ( nam... | Verify that each group of fonts with the same nameID 1 has maximum of 4 fonts |
242,590 | def com_google_fonts_check_family_equal_unicode_encodings ( ttFonts ) : encoding = None failed = False for ttFont in ttFonts : cmap = None for table in ttFont [ 'cmap' ] . tables : if table . format == 4 : cmap = table break if not encoding : encoding = cmap . platEncID if encoding != cmap . platEncID : failed = True i... | Fonts have equal unicode encodings? |
242,591 | def com_google_fonts_check_all_glyphs_have_codepoints ( ttFont ) : failed = False for subtable in ttFont [ 'cmap' ] . tables : if subtable . isUnicode ( ) : for item in subtable . cmap . items ( ) : codepoint = item [ 0 ] if codepoint is None : failed = True yield FAIL , ( "Glyph {} lacks a unicode" " codepoint assignm... | Check all glyphs have codepoints assigned . |
242,592 | def run ( self , order = None ) : for event in self . runner . run ( order = order ) : self . receive ( event ) | self . runner must be present |
242,593 | def com_google_fonts_check_name_trailing_spaces ( ttFont ) : failed = False for name_record in ttFont [ 'name' ] . names : name_string = name_record . toUnicode ( ) if name_string != name_string . strip ( ) : failed = True name_key = tuple ( [ name_record . platformID , name_record . platEncID , name_record . langID , ... | Name table records must not have trailing spaces . |
242,594 | def com_google_fonts_check_family_single_directory ( fonts ) : directories = [ ] for target_file in fonts : directory = os . path . dirname ( target_file ) if directory not in directories : directories . append ( directory ) if len ( directories ) == 1 : yield PASS , "All files are in the same directory." else : yield ... | Checking all files are in the same directory . |
242,595 | def com_google_fonts_check_ftxvalidator ( font ) : import plistlib try : import subprocess ftx_cmd = [ "ftxvalidator" , "-t" , "all" , font ] ftx_output = subprocess . check_output ( ftx_cmd , stderr = subprocess . STDOUT ) ftx_data = plistlib . loads ( ftx_output ) if 'kATSFontTestSeverityFatalError' not in ftx_data [... | Checking with ftxvalidator . |
242,596 | def com_google_fonts_check_ots ( font ) : import ots try : process = ots . sanitize ( font , check = True , capture_output = True ) except ots . CalledProcessError as e : yield FAIL , ( "ots-sanitize returned an error code ({}). Output follows:\n\n{}{}" ) . format ( e . returncode , e . stderr . decode ( ) , e . stdout... | Checking with ots - sanitize . |
242,597 | def com_google_fonts_check_fontbakery_version ( ) : try : import subprocess installed_str = None latest_str = None is_latest = False failed = False pip_cmd = [ "pip" , "search" , "fontbakery" ] pip_output = subprocess . check_output ( pip_cmd , stderr = subprocess . STDOUT ) for line in pip_output . decode ( ) . split ... | Do we have the latest version of FontBakery installed? |
242,598 | def com_google_fonts_check_fontforge_stderr ( font , fontforge_check_results ) : if "skip" in fontforge_check_results : yield SKIP , fontforge_check_results [ "skip" ] return filtered_err_msgs = "" for line in fontforge_check_results [ "ff_err_messages" ] . split ( '\n' ) : if ( 'The following table(s) in the font' ' h... | FontForge validation outputs error messages? |
242,599 | def com_google_fonts_check_mandatory_glyphs ( ttFont ) : from fontbakery . utils import glyph_has_ink if ( ttFont . getGlyphOrder ( ) [ 0 ] == ".notdef" and ".notdef" not in ttFont . getBestCmap ( ) . values ( ) and glyph_has_ink ( ttFont , ".notdef" ) ) : yield PASS , ( "Font contains the .notdef glyph as the first gl... | Font contains . notdef as first glyph? |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.