idx int64 0 251k | question stringlengths 53 3.53k | target stringlengths 5 1.23k | len_question int64 20 893 | len_target int64 3 238 |
|---|---|---|---|---|
241,500 | def orthogonal_basis ( self ) : if self . dim == 3 : x_arr = np . array ( [ - self . data [ 1 ] , self . data [ 0 ] , 0 ] ) if np . linalg . norm ( x_arr ) == 0 : x_arr = np . array ( [ self . data [ 2 ] , 0 , 0 ] ) x_arr = x_arr / np . linalg . norm ( x_arr ) y_arr = np . cross ( self . data , x_arr ) return Direction ( x_arr , frame = self . frame ) , Direction ( y_arr , frame = self . frame ) raise NotImplementedError ( 'Orthogonal basis only supported for 3 dimensions' ) | Return an orthogonal basis to this direction . | 164 | 10 |
241,501 | def open ( filename , frame = 'unspecified' ) : data = BagOfPoints . load_data ( filename ) return Direction ( data , frame ) | Create a Direction from data saved in a file . | 32 | 10 |
241,502 | def split_points ( self , point_cloud ) : if not isinstance ( point_cloud , PointCloud ) : raise ValueError ( 'Can only split point clouds' ) # compute indices above and below above_plane = point_cloud . _data - np . tile ( self . _x0 . data , [ 1 , point_cloud . num_points ] ) . T . dot ( self . _n ) > 0 above_plane = point_cloud . z_coords > 0 & above_plane below_plane = point_cloud . _data - np . tile ( self . _x0 . data , [ 1 , point_cloud . num_points ] ) . T . dot ( self . _n ) <= 0 below_plane = point_cloud . z_coords > 0 & below_plane # split data above_data = point_cloud . data [ : , above_plane ] below_data = point_cloud . data [ : , below_plane ] return PointCloud ( above_data , point_cloud . frame ) , PointCloud ( below_data , point_cloud . frame ) | Split a point cloud into two along this plane . | 236 | 10 |
241,503 | def mean ( self ) : mean_point_data = np . mean ( self . _data , axis = 1 ) return Point ( mean_point_data , self . _frame ) | Returns the average point in the cloud . | 39 | 8 |
241,504 | def subsample ( self , rate , random = False ) : if type ( rate ) != int and rate < 1 : raise ValueError ( 'Can only subsample with strictly positive integer rate' ) indices = np . arange ( self . num_points ) if random : np . random . shuffle ( indices ) subsample_inds = indices [ : : rate ] subsampled_data = self . _data [ : , subsample_inds ] return PointCloud ( subsampled_data , self . _frame ) , subsample_inds | Returns a subsampled version of the PointCloud . | 117 | 11 |
241,505 | def box_mask ( self , box ) : if not isinstance ( box , Box ) : raise ValueError ( 'Must provide Box object' ) if box . frame != self . frame : raise ValueError ( 'Box must be in same frame as PointCloud' ) all_points = self . data . T cond1 = np . all ( box . min_pt <= all_points , axis = 1 ) cond2 = np . all ( all_points <= box . max_pt , axis = 1 ) valid_point_indices = np . where ( np . logical_and ( cond1 , cond2 ) ) [ 0 ] valid_points = all_points [ valid_point_indices ] return PointCloud ( valid_points . T , self . frame ) , valid_point_indices | Return a PointCloud containing only points within the given Box . | 170 | 12 |
241,506 | def best_fit_plane ( self ) : X = np . c_ [ self . x_coords , self . y_coords , np . ones ( self . num_points ) ] y = self . z_coords A = X . T . dot ( X ) b = X . T . dot ( y ) w = np . linalg . inv ( A ) . dot ( b ) n = np . array ( [ w [ 0 ] , w [ 1 ] , - 1 ] ) n = n / np . linalg . norm ( n ) n = Direction ( n , self . _frame ) x0 = self . mean ( ) return n , x0 | Fits a plane to the point cloud using least squares . | 146 | 12 |
241,507 | def remove_zero_points ( self ) : points_of_interest = np . where ( self . z_coords != 0.0 ) [ 0 ] self . _data = self . data [ : , points_of_interest ] | Removes points with a zero in the z - axis . | 51 | 12 |
241,508 | def remove_infinite_points ( self ) : points_of_interest = np . where ( np . all ( np . isfinite ( self . data ) , axis = 0 ) ) [ 0 ] self . _data = self . data [ : , points_of_interest ] | Removes infinite points . | 61 | 5 |
241,509 | def open ( filename , frame = 'unspecified' ) : data = BagOfPoints . load_data ( filename ) return PointCloud ( data , frame ) | Create a PointCloud from data saved in a file . | 33 | 11 |
241,510 | def subsample ( self , rate ) : if type ( rate ) != int and rate < 1 : raise ValueError ( 'Can only subsample with strictly positive integer rate' ) subsample_inds = np . arange ( self . num_points ) [ : : rate ] subsampled_data = self . _data [ : , subsample_inds ] return NormalCloud ( subsampled_data , self . _frame ) | Returns a subsampled version of the NormalCloud . | 93 | 11 |
241,511 | def remove_zero_normals ( self ) : points_of_interest = np . where ( np . linalg . norm ( self . _data , axis = 0 ) != 0.0 ) [ 0 ] self . _data = self . _data [ : , points_of_interest ] | Removes normal vectors with a zero magnitude . | 64 | 9 |
241,512 | def remove_nan_normals ( self ) : points_of_interest = np . where ( np . isfinite ( np . linalg . norm ( self . _data , axis = 0 ) ) ) [ 0 ] self . _data = self . _data [ : , points_of_interest ] | Removes normal vectors with nan magnitude . | 67 | 8 |
241,513 | def open ( filename , frame = 'unspecified' ) : data = BagOfPoints . load_data ( filename ) return NormalCloud ( data , frame ) | Create a NormalCloud from data saved in a file . | 33 | 11 |
241,514 | def open ( filename , frame = 'unspecified' ) : data = BagOfPoints . load_data ( filename ) return ImageCoords ( data , frame ) | Create an ImageCoords from data saved in a file . | 34 | 12 |
241,515 | def open ( filename , frame = 'unspecified' ) : data = BagOfPoints . load_data ( filename ) return RgbCloud ( data , frame ) | Create a RgbCloud from data saved in a file . | 34 | 12 |
241,516 | def remove_zero_points ( self ) : points_of_interest = np . where ( ( np . linalg . norm ( self . point_cloud . data , axis = 0 ) != 0.0 ) & ( np . linalg . norm ( self . normal_cloud . data , axis = 0 ) != 0.0 ) & ( np . isfinite ( self . normal_cloud . data [ 0 , : ] ) ) ) [ 0 ] self . point_cloud . _data = self . point_cloud . data [ : , points_of_interest ] self . normal_cloud . _data = self . normal_cloud . data [ : , points_of_interest ] | Remove all elements where the norms and points are zero . | 149 | 11 |
241,517 | def gen_experiment_ref ( experiment_tag , n = 10 ) : experiment_id = gen_experiment_id ( n = n ) return '{0}_{1}' . format ( experiment_tag , experiment_id ) | Generate a random string for naming . | 52 | 8 |
241,518 | def add ( self , datapoint ) : if not self . is_full : self . set_datapoint ( self . cur_index , datapoint ) self . cur_index += 1 | Adds the datapoint to the tensor if room is available . | 43 | 14 |
241,519 | def add_batch ( self , datapoints ) : num_datapoints_to_add = datapoints . shape [ 0 ] end_index = self . cur_index + num_datapoints_to_add if end_index <= self . num_datapoints : self . data [ self . cur_index : end_index , ... ] = datapoints self . cur_index = end_index | Adds a batch of datapoints to the tensor if room is available . | 97 | 17 |
241,520 | def datapoint ( self , ind ) : if self . height is None : return self . data [ ind ] return self . data [ ind , ... ] . copy ( ) | Returns the datapoint at the given index . | 37 | 10 |
241,521 | def set_datapoint ( self , ind , datapoint ) : if ind >= self . num_datapoints : raise ValueError ( 'Index %d out of bounds! Tensor has %d datapoints' % ( ind , self . num_datapoints ) ) self . data [ ind , ... ] = np . array ( datapoint ) . astype ( self . dtype ) | Sets the value of the datapoint at the given index . | 90 | 14 |
241,522 | def data_slice ( self , slice_ind ) : if self . height is None : return self . data [ slice_ind ] return self . data [ slice_ind , ... ] | Returns a slice of datapoints | 39 | 8 |
241,523 | def save ( self , filename , compressed = True ) : # check for data if not self . has_data : return False # read ext and save accordingly _ , file_ext = os . path . splitext ( filename ) if compressed : if file_ext != COMPRESSED_TENSOR_EXT : raise ValueError ( 'Can only save compressed tensor with %s extension' % ( COMPRESSED_TENSOR_EXT ) ) np . savez_compressed ( filename , self . data [ : self . cur_index , ... ] ) else : if file_ext != TENSOR_EXT : raise ValueError ( 'Can only save tensor with .npy extension' ) np . save ( filename , self . data [ : self . cur_index , ... ] ) return True | Save a tensor to disk . | 171 | 7 |
241,524 | def load ( filename , compressed = True , prealloc = None ) : # switch load based on file ext _ , file_ext = os . path . splitext ( filename ) if compressed : if file_ext != COMPRESSED_TENSOR_EXT : raise ValueError ( 'Can only load compressed tensor with %s extension' % ( COMPRESSED_TENSOR_EXT ) ) data = np . load ( filename ) [ 'arr_0' ] else : if file_ext != TENSOR_EXT : raise ValueError ( 'Can only load tensor with .npy extension' ) data = np . load ( filename ) # fill prealloc tensor if prealloc is not None : prealloc . reset ( ) prealloc . add_batch ( data ) return prealloc # init new tensor tensor = Tensor ( data . shape , data . dtype , data = data ) return tensor | Loads a tensor from disk . | 196 | 8 |
241,525 | def datapoint_indices_for_tensor ( self , tensor_index ) : if tensor_index >= self . _num_tensors : raise ValueError ( 'Tensor index %d is greater than the number of tensors (%d)' % ( tensor_index , self . _num_tensors ) ) return self . _file_num_to_indices [ tensor_index ] | Returns the indices for all datapoints in the given tensor . | 92 | 15 |
241,526 | def tensor_index ( self , datapoint_index ) : if datapoint_index >= self . _num_datapoints : raise ValueError ( 'Datapoint index %d is greater than the number of datapoints (%d)' % ( datapoint_index , self . _num_datapoints ) ) return self . _index_to_file_num [ datapoint_index ] | Returns the index of the tensor containing the referenced datapoint . | 93 | 14 |
241,527 | def generate_tensor_filename ( self , field_name , file_num , compressed = True ) : file_ext = TENSOR_EXT if compressed : file_ext = COMPRESSED_TENSOR_EXT filename = os . path . join ( self . filename , 'tensors' , '%s_%05d%s' % ( field_name , file_num , file_ext ) ) return filename | Generate a filename for a tensor . | 93 | 9 |
241,528 | def _allocate_tensors ( self ) : # init tensors dict self . _tensors = { } # allocate tensor for each data field for field_name , field_spec in self . _config [ 'fields' ] . items ( ) : # parse attributes field_dtype = np . dtype ( field_spec [ 'dtype' ] ) # parse shape field_shape = [ self . _datapoints_per_file ] if 'height' in field_spec . keys ( ) : field_shape . append ( field_spec [ 'height' ] ) if 'width' in field_spec . keys ( ) : field_shape . append ( field_spec [ 'width' ] ) if 'channels' in field_spec . keys ( ) : field_shape . append ( field_spec [ 'channels' ] ) # create tensor self . _tensors [ field_name ] = Tensor ( field_shape , field_dtype ) | Allocates the tensors in the dataset . | 214 | 10 |
241,529 | def add ( self , datapoint ) : # check access level if self . _access_mode == READ_ONLY_ACCESS : raise ValueError ( 'Cannot add datapoints with read-only access' ) # read tensor datapoint ind tensor_ind = self . _num_datapoints // self . _datapoints_per_file # check datapoint fields for field_name in datapoint . keys ( ) : if field_name not in self . field_names : raise ValueError ( 'Field %s not specified in dataset' % ( field_name ) ) # store data in tensor cur_num_tensors = self . _num_tensors new_num_tensors = cur_num_tensors for field_name in self . field_names : if tensor_ind < cur_num_tensors : # load tensor if it was previously allocated self . _tensors [ field_name ] = self . tensor ( field_name , tensor_ind ) else : # clear tensor if this is a new tensor self . _tensors [ field_name ] . reset ( ) self . _tensor_cache_file_num [ field_name ] = tensor_ind new_num_tensors = cur_num_tensors + 1 self . _has_unsaved_data = True self . _tensors [ field_name ] . add ( datapoint [ field_name ] ) cur_size = self . _tensors [ field_name ] . size # update num tensors if new_num_tensors > cur_num_tensors : self . _num_tensors = new_num_tensors # update file indices self . _index_to_file_num [ self . _num_datapoints ] = tensor_ind self . _file_num_to_indices [ tensor_ind ] = tensor_ind * self . _datapoints_per_file + np . arange ( cur_size ) # save if tensors are full field_name = self . field_names [ 0 ] if self . _tensors [ field_name ] . is_full : # save next tensors to file logging . info ( 'Dataset %s: Writing tensor %d to disk' % ( self . filename , tensor_ind ) ) self . write ( ) # increment num datapoints self . _num_datapoints += 1 | Adds a datapoint to the file . | 557 | 9 |
241,530 | def datapoint ( self , ind , field_names = None ) : # flush if necessary if self . _has_unsaved_data : self . flush ( ) # check valid input if ind >= self . _num_datapoints : raise ValueError ( 'Index %d larger than the number of datapoints in the dataset (%d)' % ( ind , self . _num_datapoints ) ) # load the field names if field_names is None : field_names = self . field_names # return the datapoint datapoint = TensorDatapoint ( field_names ) file_num = self . _index_to_file_num [ ind ] for field_name in field_names : tensor = self . tensor ( field_name , file_num ) tensor_index = ind % self . _datapoints_per_file datapoint [ field_name ] = tensor . datapoint ( tensor_index ) return datapoint | Loads a tensor datapoint for a given global index . | 217 | 14 |
241,531 | def tensor ( self , field_name , tensor_ind ) : if tensor_ind == self . _tensor_cache_file_num [ field_name ] : return self . _tensors [ field_name ] filename = self . generate_tensor_filename ( field_name , tensor_ind , compressed = True ) Tensor . load ( filename , compressed = True , prealloc = self . _tensors [ field_name ] ) self . _tensor_cache_file_num [ field_name ] = tensor_ind return self . _tensors [ field_name ] | Returns the tensor for a given field and tensor index . | 134 | 13 |
241,532 | def delete_last ( self , num_to_delete = 1 ) : # check access level if self . _access_mode == READ_ONLY_ACCESS : raise ValueError ( 'Cannot delete datapoints with read-only access' ) # check num to delete if num_to_delete > self . _num_datapoints : raise ValueError ( 'Cannot remove more than the number of datapoints in the dataset' ) # compute indices last_datapoint_ind = self . _num_datapoints - 1 last_tensor_ind = last_datapoint_ind // self . _datapoints_per_file new_last_datapoint_ind = self . _num_datapoints - 1 - num_to_delete new_num_datapoints = new_last_datapoint_ind + 1 new_last_datapoint_ind = max ( new_last_datapoint_ind , 0 ) new_last_tensor_ind = new_last_datapoint_ind // self . _datapoints_per_file # delete all but the last tensor delete_tensor_ind = range ( new_last_tensor_ind + 1 , last_tensor_ind + 1 ) for tensor_ind in delete_tensor_ind : for field_name in self . field_names : filename = self . generate_tensor_filename ( field_name , tensor_ind ) os . remove ( filename ) # update last tensor dataset_empty = False target_tensor_size = new_num_datapoints % self . _datapoints_per_file if target_tensor_size == 0 : if new_num_datapoints > 0 : target_tensor_size = self . _datapoints_per_file else : dataset_empty = True for field_name in self . field_names : new_last_tensor = self . tensor ( field_name , new_last_tensor_ind ) while new_last_tensor . size > target_tensor_size : new_last_tensor . delete_last ( ) filename = self . generate_tensor_filename ( field_name , new_last_tensor_ind ) new_last_tensor . save ( filename , compressed = True ) if not new_last_tensor . has_data : os . remove ( filename ) new_last_tensor . reset ( ) # update num datapoints if self . _num_datapoints - 1 - num_to_delete >= 0 : self . _num_datapoints = new_num_datapoints else : self . _num_datapoints = 0 # handle deleted tensor self . _num_tensors = new_last_tensor_ind + 1 if dataset_empty : self . _num_tensors = 0 | Deletes the last N datapoints from the dataset . | 650 | 13 |
241,533 | def write ( self ) : # write the next file for all fields for field_name in self . field_names : filename = self . generate_tensor_filename ( field_name , self . _num_tensors - 1 ) self . _tensors [ field_name ] . save ( filename , compressed = True ) # write the current metadata to file json . dump ( self . _metadata , open ( self . metadata_filename , 'w' ) , indent = JSON_INDENT , sort_keys = True ) # update self . _has_unsaved_data = False | Writes all tensors to the next file number . | 126 | 11 |
241,534 | def open ( dataset_dir , access_mode = READ_ONLY_ACCESS ) : # check access mode if access_mode == WRITE_ACCESS : raise ValueError ( 'Cannot open a dataset with write-only access' ) # read config try : # json load config_filename = os . path . join ( dataset_dir , 'config.json' ) config = json . load ( open ( config_filename , 'r' ) ) except : # YAML load config_filename = os . path . join ( dataset_dir , 'config.yaml' ) config = YamlConfig ( config_filename ) # open dataset dataset = TensorDataset ( dataset_dir , config , access_mode = access_mode ) return dataset | Opens a tensor dataset . | 161 | 7 |
241,535 | def split ( self , split_name ) : if not self . has_split ( split_name ) : raise ValueError ( 'Split %s does not exist!' % ( split_name ) ) metadata_filename = self . split_metadata_filename ( split_name ) train_filename = self . train_indices_filename ( split_name ) val_filename = self . val_indices_filename ( split_name ) metadata = json . load ( open ( metadata_filename , 'r' ) ) train_indices = np . load ( train_filename ) [ 'arr_0' ] val_indices = np . load ( val_filename ) [ 'arr_0' ] return train_indices , val_indices , metadata | Return the training and validation indices for the requested split . | 161 | 11 |
241,536 | def delete_split ( self , split_name ) : if self . has_split ( split_name ) : shutil . rmtree ( os . path . join ( self . split_dir , split_name ) ) | Delete a split of the dataset . | 48 | 7 |
241,537 | def _load_config ( self , filename ) : # Read entire file for metadata fh = open ( filename , 'r' ) self . file_contents = fh . read ( ) # Replace !include directives with content config_dir = os . path . split ( filename ) [ 0 ] include_re = re . compile ( '^(.*)!include\s+(.*)$' , re . MULTILINE ) def recursive_load ( matchobj , path ) : first_spacing = matchobj . group ( 1 ) other_spacing = first_spacing . replace ( '-' , ' ' ) fname = os . path . join ( path , matchobj . group ( 2 ) ) new_path , _ = os . path . split ( fname ) new_path = os . path . realpath ( new_path ) text = '' with open ( fname ) as f : text = f . read ( ) text = first_spacing + text text = text . replace ( '\n' , '\n{}' . format ( other_spacing ) , text . count ( '\n' ) - 1 ) return re . sub ( include_re , lambda m : recursive_load ( m , new_path ) , text ) # def include_repl(matchobj): # first_spacing = matchobj.group(1) # other_spacing = first_spacing.replace('-', ' ') # fname = os.path.join(config_dir, matchobj.group(2)) # text = '' # with open(fname) as f: # text = f.read() # text = first_spacing + text # text = text.replace('\n', '\n{}'.format(other_spacing), text.count('\n') - 1) # return text self . file_contents = re . sub ( include_re , lambda m : recursive_load ( m , config_dir ) , self . file_contents ) # Read in dictionary self . config = self . __ordered_load ( self . file_contents ) # Convert functions of other params to true expressions for k in self . config . keys ( ) : self . config [ k ] = YamlConfig . __convert_key ( self . config [ k ] ) fh . close ( ) # Load core configuration return self . config | Loads a yaml configuration file from the given filename . | 515 | 12 |
241,538 | def __convert_key ( expression ) : if type ( expression ) is str and len ( expression ) > 2 and expression [ 1 ] == '!' : expression = eval ( expression [ 2 : - 1 ] ) return expression | Converts keys in YAML that reference other keys . | 47 | 12 |
241,539 | def make_summary_table ( train_result , val_result , plot = True , save_dir = None , prepend = "" , save = False ) : table_key_list = [ 'error_rate' , 'recall_at_99_precision' , 'average_precision' , 'precision' , 'recall' ] num_fields = len ( table_key_list ) import matplotlib . pyplot as plt ax = plt . subplot ( 111 , frame_on = False ) ax . xaxis . set_visible ( False ) ax . yaxis . set_visible ( False ) data = np . zeros ( [ num_fields , 2 ] ) data_dict = dict ( ) names = [ 'train' , 'validation' ] for name , result in zip ( names , [ train_result , val_result ] ) : data_dict [ name ] = { } data_dict [ name ] [ 'error_rate' ] = result . error_rate data_dict [ name ] [ 'average_precision' ] = result . ap_score * 100 data_dict [ name ] [ 'precision' ] = result . precision * 100 data_dict [ name ] [ 'recall' ] = result . recall * 100 precision_array , recall_array , _ = result . precision_recall_curve ( ) recall_at_99_precision = recall_array [ np . argmax ( precision_array > 0.99 ) ] * 100 # to put it in percentage terms data_dict [ name ] [ 'recall_at_99_precision' ] = recall_at_99_precision for i , key in enumerate ( table_key_list ) : data_dict [ name ] [ key ] = float ( "{0:.2f}" . format ( data_dict [ name ] [ key ] ) ) j = names . index ( name ) data [ i , j ] = data_dict [ name ] [ key ] table = plt . table ( cellText = data , rowLabels = table_key_list , colLabels = names ) fig = plt . gcf ( ) fig . subplots_adjust ( bottom = 0.15 ) if plot : plt . show ( ) # save the results if save_dir is not None and save : fig_filename = os . path . join ( save_dir , prepend + 'summary.png' ) yaml_filename = os . path . join ( save_dir , prepend + 'summary.yaml' ) yaml . dump ( data_dict , open ( yaml_filename , 'w' ) , default_flow_style = False ) fig . savefig ( fig_filename , bbox_inches = "tight" ) return data_dict , fig | Makes a matplotlib table object with relevant data . Thanks to Lucas Manuelli for the contribution . | 609 | 22 |
241,540 | def app_score ( self ) : # compute curve precisions , pct_pred_pos , taus = self . precision_pct_pred_pos_curve ( interval = False ) # compute area app = 0 total = 0 for k in range ( len ( precisions ) - 1 ) : # read cur data cur_prec = precisions [ k ] cur_pp = pct_pred_pos [ k ] cur_tau = taus [ k ] # read next data next_prec = precisions [ k + 1 ] next_pp = pct_pred_pos [ k + 1 ] next_tau = taus [ k + 1 ] # approximate with rectangles mid_prec = ( cur_prec + next_prec ) / 2.0 width_pp = np . abs ( next_pp - cur_pp ) app += mid_prec * width_pp total += width_pp return app | Computes the area under the app curve . | 203 | 9 |
241,541 | def accuracy_curve ( self , delta_tau = 0.01 ) : # compute thresholds based on the sorted probabilities orig_thresh = self . threshold sorted_labels , sorted_probs = self . sorted_values scores = [ ] taus = [ ] tau = 0 for k in range ( len ( sorted_labels ) ) : # compute new accuracy self . threshold = tau scores . append ( self . accuracy ) taus . append ( tau ) # update threshold tau = sorted_probs [ k ] # add last datapoint tau = 1.0 self . threshold = tau scores . append ( self . accuracy ) taus . append ( tau ) self . threshold = orig_thresh return scores , taus | Computes the relationship between probability threshold and classification accuracy . | 162 | 11 |
241,542 | def f1_curve ( self , delta_tau = 0.01 ) : # compute thresholds based on the sorted probabilities orig_thresh = self . threshold sorted_labels , sorted_probs = self . sorted_values scores = [ ] taus = [ ] tau = 0 for k in range ( len ( sorted_labels ) ) : # compute new accuracy self . threshold = tau scores . append ( self . f1_score ) taus . append ( tau ) # update threshold tau = sorted_probs [ k ] # add last datapoint tau = 1.0 self . threshold = tau scores . append ( self . f1_score ) taus . append ( tau ) self . threshold = orig_thresh return scores , taus | Computes the relationship between probability threshold and classification F1 score . | 169 | 13 |
241,543 | def phi_coef_curve ( self , delta_tau = 0.01 ) : # compute thresholds based on the sorted probabilities orig_thresh = self . threshold sorted_labels , sorted_probs = self . sorted_values scores = [ ] taus = [ ] tau = 0 for k in range ( len ( sorted_labels ) ) : # compute new accuracy self . threshold = tau scores . append ( self . phi_coef ) taus . append ( tau ) # update threshold tau = sorted_probs [ k ] # add last datapoint tau = 1.0 self . threshold = tau scores . append ( self . phi_coef ) taus . append ( tau ) self . threshold = orig_thresh return scores , taus | Computes the relationship between probability threshold and classification phi coefficient . | 174 | 13 |
241,544 | def precision_pct_pred_pos_curve ( self , interval = False , delta_tau = 0.001 ) : # compute thresholds based on the sorted probabilities orig_thresh = self . threshold sorted_labels , sorted_probs = self . sorted_values precisions = [ ] pct_pred_pos = [ ] taus = [ ] tau = 0 if not interval : for k in range ( len ( sorted_labels ) ) : # compute new accuracy self . threshold = tau precisions . append ( self . precision ) pct_pred_pos . append ( self . pct_pred_pos ) taus . append ( tau ) # update threshold tau = sorted_probs [ k ] else : while tau < 1.0 : # compute new accuracy self . threshold = tau precisions . append ( self . precision ) pct_pred_pos . append ( self . pct_pred_pos ) taus . append ( tau ) # update threshold tau += delta_tau # add last datapoint tau = 1.0 self . threshold = tau precisions . append ( self . precision ) pct_pred_pos . append ( self . pct_pred_pos ) taus . append ( tau ) precisions . append ( 1.0 ) pct_pred_pos . append ( 0.0 ) taus . append ( 1.0 + 1e-12 ) self . threshold = orig_thresh return precisions , pct_pred_pos , taus | Computes the relationship between precision and the percent of positively classified datapoints . | 334 | 17 |
241,545 | def gen_experiment_id ( n = 10 ) : chrs = 'abcdefghijklmnopqrstuvwxyz' inds = np . random . randint ( 0 , len ( chrs ) , size = n ) return '' . join ( [ chrs [ i ] for i in inds ] ) | Generate a random string with n characters . | 72 | 9 |
241,546 | def histogram ( values , num_bins , bounds , normalized = True , plot = False , color = 'b' ) : hist , bins = np . histogram ( values , bins = num_bins , range = bounds ) width = ( bins [ 1 ] - bins [ 0 ] ) if normalized : if np . sum ( hist ) > 0 : hist = hist . astype ( np . float32 ) / np . sum ( hist ) if plot : import matplotlib . pyplot as plt plt . bar ( bins [ : - 1 ] , hist , width = width , color = color ) return hist , bins | Generate a histogram plot . | 134 | 7 |
241,547 | def skew ( xi ) : S = np . array ( [ [ 0 , - xi [ 2 ] , xi [ 1 ] ] , [ xi [ 2 ] , 0 , - xi [ 0 ] ] , [ - xi [ 1 ] , xi [ 0 ] , 0 ] ] ) return S | Return the skew - symmetric matrix that can be used to calculate cross - products with vector xi . | 68 | 21 |
241,548 | def deskew ( S ) : x = np . zeros ( 3 ) x [ 0 ] = S [ 2 , 1 ] x [ 1 ] = S [ 0 , 2 ] x [ 2 ] = S [ 1 , 0 ] return x | Converts a skew - symmetric cross - product matrix to its corresponding vector . Only works for 3x3 matrices . | 52 | 25 |
241,549 | def reverse_dictionary ( d ) : rev_d = { } [ rev_d . update ( { v : k } ) for k , v in d . items ( ) ] return rev_d | Reverses the key value pairs for a given dictionary . | 43 | 12 |
241,550 | def filenames ( directory , tag = '' , sorted = False , recursive = False ) : if recursive : f = [ os . path . join ( directory , f ) for directory , _ , filename in os . walk ( directory ) for f in filename if f . find ( tag ) > - 1 ] else : f = [ os . path . join ( directory , f ) for f in os . listdir ( directory ) if f . find ( tag ) > - 1 ] if sorted : f . sort ( ) return f | Reads in all filenames from a directory that contain a specified substring . | 110 | 17 |
241,551 | def sph2cart ( r , az , elev ) : x = r * np . cos ( az ) * np . sin ( elev ) y = r * np . sin ( az ) * np . sin ( elev ) z = r * np . cos ( elev ) return x , y , z | Convert spherical to cartesian coordinates . | 63 | 8 |
241,552 | def cart2sph ( x , y , z ) : r = np . sqrt ( x ** 2 + y ** 2 + z ** 2 ) if x > 0 and y > 0 : az = np . arctan ( y / x ) elif x > 0 and y < 0 : az = 2 * np . pi - np . arctan ( - y / x ) elif x < 0 and y > 0 : az = np . pi - np . arctan ( - y / x ) elif x < 0 and y < 0 : az = np . pi + np . arctan ( y / x ) elif x == 0 and y > 0 : az = np . pi / 2 elif x == 0 and y < 0 : az = 3 * np . pi / 2 elif y == 0 and x > 0 : az = 0 elif y == 0 and x < 0 : az = np . pi elev = np . arccos ( z / r ) return r , az , elev | Convert cartesian to spherical coordinates . | 217 | 8 |
241,553 | def keyboard_input ( message , yesno = False ) : # add space for readability message += ' ' # add yes or no to message if yesno : message += '[y/n] ' # ask human human_input = input ( message ) if yesno : while human_input . lower ( ) != 'n' and human_input . lower ( ) != 'y' : logging . info ( 'Did not understand input. Please answer \'y\' or \'n\'' ) human_input = input ( message ) return human_input | Get keyboard input from a human optionally reasking for valid yes or no input . | 115 | 16 |
241,554 | def interpolate ( dq0 , dq1 , t ) : if not 0 <= t <= 1 : raise ValueError ( "Interpolation step must be between 0 and 1! Got {0}" . format ( t ) ) dqt = dq0 * ( 1 - t ) + dq1 * t return dqt . normalized | Return the interpolation of two DualQuaternions . | 72 | 11 |
241,555 | def _save ( self ) : # if not first time saving, copy .csv to a backup if os . path . isfile ( self . _full_filename ) : shutil . copyfile ( self . _full_filename , self . _full_backup_filename ) # write to csv with open ( self . _full_filename , 'w' ) as file : writer = csv . DictWriter ( file , fieldnames = self . _headers ) writer . writeheader ( ) for row in self . _table : writer . writerow ( row ) | Save the model to a . csv file | 121 | 9 |
241,556 | def insert ( self , data ) : row = { key : self . _default_entry for key in self . _headers } row [ '_uid' ] = self . _get_new_uid ( ) for key , val in data . items ( ) : if key in ( '_uid' , '_default' ) : logging . warn ( "Cannot manually set columns _uid or _default of a row! Given data: {0}" . format ( data ) ) continue if not isinstance ( val , CSVModel . _KNOWN_TYPES_MAP [ self . _headers_types [ key ] ] ) : raise Exception ( 'Data type mismatch for column {0}. Expected: {1}, got: {2}' . format ( key , CSVModel . _KNOWN_TYPES_MAP [ self . _headers_types [ key ] ] , type ( val ) ) ) row [ key ] = val self . _table . append ( row ) self . _save ( ) return row [ '_uid' ] | Insert a row into the . csv file . | 221 | 10 |
241,557 | def update_by_uid ( self , uid , data ) : row = self . _table [ uid + 1 ] for key , val in data . items ( ) : if key == '_uid' or key == '_default' : continue if key not in self . _headers : logging . warn ( "Unknown column name: {0}" . format ( key ) ) continue if not isinstance ( val , CSVModel . _KNOWN_TYPES_MAP [ self . _headers_types [ key ] ] ) : raise Exception ( 'Data type mismatch for column {0}. Expected: {1}, got: {2}' . format ( key , CSVModel . _KNOWN_TYPES_MAP [ self . _headers_types [ key ] ] , type ( val ) ) ) row [ key ] = val self . _save ( ) | Update a row with the given data . | 183 | 8 |
241,558 | def get_col ( self , col_name , filter = lambda _ : True ) : if col_name not in self . _headers : raise ValueError ( "{} not found! Model has headers: {}" . format ( col_name , self . _headers ) ) col = [ ] for i in range ( self . num_rows ) : row = self . _table [ i + 1 ] val = row [ col_name ] if filter ( val ) : col . append ( val ) return col | Return all values in the column corresponding to col_name that satisfies filter which is a function that takes in a value of the column s type and returns True or False | 107 | 33 |
241,559 | def get_by_cols ( self , cols , direction = 1 ) : if direction == 1 : iterator = range ( self . num_rows ) elif direction == - 1 : iterator = range ( self . num_rows - 1 , - 1 , - 1 ) else : raise ValueError ( "Direction can only be 1 (first) or -1 (last). Got: {0}" . format ( direction ) ) for i in iterator : row = self . _table [ i + 1 ] all_sat = True for key , val in cols . items ( ) : if row [ key ] != val : all_sat = False break if all_sat : return row . copy ( ) return None | Return the first or last row that satisfies the given col value constraints or None if no row contains the given value . | 151 | 23 |
241,560 | def get_rows_by_cols ( self , matching_dict ) : result = [ ] for i in range ( self . num_rows ) : row = self . _table [ i + 1 ] matching = True for key , val in matching_dict . items ( ) : if row [ key ] != val : matching = False break if matching : result . append ( row ) return result | Return all rows where the cols match the elements given in the matching_dict | 83 | 16 |
241,561 | def next ( self ) : if self . _cur_row >= len ( self . _table ) : raise StopIteration data = self . _table [ self . _cur_row ] . copy ( ) self . _cur_row += 1 return data | Returns the next row in the CSV for iteration | 54 | 9 |
241,562 | def load ( full_filename ) : with open ( full_filename , 'r' ) as file : reader = csv . DictReader ( file ) headers = reader . fieldnames if '_uid' not in headers or '_default' not in headers : raise Exception ( "Malformed CSVModel file!" ) all_rows = [ row for row in reader ] types = all_rows [ 0 ] table = [ types ] default_entry = table [ 0 ] [ '_default' ] for i in range ( 1 , len ( all_rows ) ) : raw_row = all_rows [ i ] row = { } for column_name in headers : if raw_row [ column_name ] != default_entry and column_name != '' : if types [ column_name ] == 'bool' : row [ column_name ] = CSVModel . _str_to_bool ( raw_row [ column_name ] ) else : try : row [ column_name ] = CSVModel . _KNOWN_TYPES_MAP [ types [ column_name ] ] ( raw_row [ column_name ] ) except : logging . error ( '{}, {}, {}' . format ( column_name , types [ column_name ] , raw_row [ column_name ] ) ) row [ column_name ] = CSVModel . _KNOWN_TYPES_MAP [ types [ column_name ] ] ( bool ( raw_row [ column_name ] ) ) else : row [ column_name ] = default_entry table . append ( row ) if len ( table ) == 1 : next_valid_uid = 0 else : next_valid_uid = int ( table [ - 1 ] [ '_uid' ] ) + 1 headers_init = headers [ 1 : - 1 ] types_init = [ types [ column_name ] for column_name in headers_init ] headers_types_list = zip ( headers_init , types_init ) csv_model = CSVModel ( full_filename , headers_types_list , default_entry = default_entry ) csv_model . _uid = next_valid_uid csv_model . _table = table csv_model . _save ( ) return csv_model | Load a . csv file into a CSVModel . | 481 | 11 |
241,563 | def get_or_create ( full_filename , headers_types = None , default_entry = '' ) : # convert dictionaries to list if isinstance ( headers_types , dict ) : headers_types_list = [ ( k , v ) for k , v in headers_types . items ( ) ] headers_types = headers_types_list if os . path . isfile ( full_filename ) : return CSVModel . load ( full_filename ) else : return CSVModel ( full_filename , headers_types , default_entry = default_entry ) | Load a . csv file into a CSVModel if the file exists or create a new CSVModel with the given filename if the file does not exist . | 120 | 31 |
241,564 | def projection_matrix ( point , normal , direction = None , perspective = None , pseudo = False ) : M = numpy . identity ( 4 ) point = numpy . array ( point [ : 3 ] , dtype = numpy . float64 , copy = False ) normal = unit_vector ( normal [ : 3 ] ) if perspective is not None : # perspective projection perspective = numpy . array ( perspective [ : 3 ] , dtype = numpy . float64 , copy = False ) M [ 0 , 0 ] = M [ 1 , 1 ] = M [ 2 , 2 ] = numpy . dot ( perspective - point , normal ) M [ : 3 , : 3 ] -= numpy . outer ( perspective , normal ) if pseudo : # preserve relative depth M [ : 3 , : 3 ] -= numpy . outer ( normal , normal ) M [ : 3 , 3 ] = numpy . dot ( point , normal ) * ( perspective + normal ) else : M [ : 3 , 3 ] = numpy . dot ( point , normal ) * perspective M [ 3 , : 3 ] = - normal M [ 3 , 3 ] = numpy . dot ( perspective , normal ) elif direction is not None : # parallel projection direction = numpy . array ( direction [ : 3 ] , dtype = numpy . float64 , copy = False ) scale = numpy . dot ( direction , normal ) M [ : 3 , : 3 ] -= numpy . outer ( direction , normal ) / scale M [ : 3 , 3 ] = direction * ( numpy . dot ( point , normal ) / scale ) else : # orthogonal projection M [ : 3 , : 3 ] -= numpy . outer ( normal , normal ) M [ : 3 , 3 ] = numpy . dot ( point , normal ) * normal return M | Return matrix to project onto plane defined by point and normal . | 386 | 12 |
241,565 | def projection_from_matrix ( matrix , pseudo = False ) : M = numpy . array ( matrix , dtype = numpy . float64 , copy = False ) M33 = M [ : 3 , : 3 ] l , V = numpy . linalg . eig ( M ) i = numpy . where ( abs ( numpy . real ( l ) - 1.0 ) < 1e-8 ) [ 0 ] if not pseudo and len ( i ) : # point: any eigenvector corresponding to eigenvalue 1 point = numpy . real ( V [ : , i [ - 1 ] ] ) . squeeze ( ) point /= point [ 3 ] # direction: unit eigenvector corresponding to eigenvalue 0 l , V = numpy . linalg . eig ( M33 ) i = numpy . where ( abs ( numpy . real ( l ) ) < 1e-8 ) [ 0 ] if not len ( i ) : raise ValueError ( "no eigenvector corresponding to eigenvalue 0" ) direction = numpy . real ( V [ : , i [ 0 ] ] ) . squeeze ( ) direction /= vector_norm ( direction ) # normal: unit eigenvector of M33.T corresponding to eigenvalue 0 l , V = numpy . linalg . eig ( M33 . T ) i = numpy . where ( abs ( numpy . real ( l ) ) < 1e-8 ) [ 0 ] if len ( i ) : # parallel projection normal = numpy . real ( V [ : , i [ 0 ] ] ) . squeeze ( ) normal /= vector_norm ( normal ) return point , normal , direction , None , False else : # orthogonal projection, where normal equals direction vector return point , direction , None , None , False else : # perspective projection i = numpy . where ( abs ( numpy . real ( l ) ) > 1e-8 ) [ 0 ] if not len ( i ) : raise ValueError ( "no eigenvector not corresponding to eigenvalue 0" ) point = numpy . real ( V [ : , i [ - 1 ] ] ) . squeeze ( ) point /= point [ 3 ] normal = - M [ 3 , : 3 ] perspective = M [ : 3 , 3 ] / numpy . dot ( point [ : 3 ] , normal ) if pseudo : perspective -= normal return point , normal , None , perspective , pseudo | Return projection plane and perspective point from projection matrix . | 524 | 10 |
241,566 | def unit_vector ( data , axis = None , out = None ) : if out is None : data = numpy . array ( data , dtype = numpy . float64 , copy = True ) if data . ndim == 1 : data /= math . sqrt ( numpy . dot ( data , data ) ) return data else : if out is not data : out [ : ] = numpy . array ( data , copy = False ) data = out length = numpy . atleast_1d ( numpy . sum ( data * data , axis ) ) numpy . sqrt ( length , length ) if axis is not None : length = numpy . expand_dims ( length , axis ) data /= length if out is None : return data | Return ndarray normalized by length i . e . eucledian norm along axis . | 163 | 19 |
241,567 | def json_numpy_obj_hook ( dct ) : if isinstance ( dct , dict ) and '__ndarray__' in dct : data = np . asarray ( dct [ '__ndarray__' ] , dtype = dct [ 'dtype' ] ) return data . reshape ( dct [ 'shape' ] ) return dct | Decodes a previously encoded numpy ndarray with proper shape and dtype . | 81 | 17 |
241,568 | def dump ( * args , * * kwargs ) : kwargs . update ( dict ( cls = NumpyEncoder , sort_keys = True , indent = 4 , separators = ( ',' , ': ' ) ) ) return _json . dump ( * args , * * kwargs ) | Dump a numpy . ndarray to file stream . | 67 | 13 |
241,569 | def load ( * args , * * kwargs ) : kwargs . update ( dict ( object_hook = json_numpy_obj_hook ) ) return _json . load ( * args , * * kwargs ) | Load an numpy . ndarray from a file stream . | 50 | 13 |
241,570 | def default ( self , obj ) : if isinstance ( obj , np . ndarray ) : return dict ( __ndarray__ = obj . tolist ( ) , dtype = str ( obj . dtype ) , shape = obj . shape ) # Let the base class default method raise the TypeError return _json . JSONEncoder ( self , obj ) | Converts an ndarray into a dictionary for efficient serialization . | 76 | 14 |
241,571 | def _preallocate_samples ( self ) : self . prealloc_samples_ = [ ] for i in range ( self . num_prealloc_samples_ ) : self . prealloc_samples_ . append ( self . sample ( ) ) | Preallocate samples for faster adaptive sampling . | 57 | 9 |
241,572 | def rvs ( self , size = 1 , iteration = 1 ) : if self . num_prealloc_samples_ > 0 : samples = [ ] for i in range ( size ) : samples . append ( self . prealloc_samples_ [ ( iteration + i ) % self . num_prealloc_samples_ ] ) if size == 1 : return samples [ 0 ] return samples # generate a new sample return self . sample ( size = size ) | Sample the random variable using the preallocated samples if possible . | 98 | 13 |
241,573 | def sample ( self , size = 1 ) : samples = [ ] for i in range ( size ) : # sample random pose xi = self . _r_xi_rv . rvs ( size = 1 ) S_xi = skew ( xi ) R_sample = scipy . linalg . expm ( S_xi ) t_sample = self . _t_rv . rvs ( size = 1 ) samples . append ( RigidTransform ( rotation = R_sample , translation = t_sample , from_frame = self . _from_frame , to_frame = self . _to_frame ) ) # not a list if only 1 sample if size == 1 and len ( samples ) > 0 : return samples [ 0 ] return samples | Sample rigid transform random variables . | 163 | 6 |
241,574 | def _flush ( self ) : if self . _recording : raise Exception ( "Cannot flush data queue while recording!" ) if self . _saving_cache : logging . warn ( "Flush when using cache means unsaved data will be lost and not returned!" ) self . _cmds_q . put ( ( "reset_data_segment" , ) ) else : data = self . _extract_q ( 0 ) return data | Returns a list of all current data | 95 | 7 |
241,575 | def _stop ( self ) : self . _pause ( ) self . _cmds_q . put ( ( "stop" , ) ) try : self . _recorder . terminate ( ) except Exception : pass self . _recording = False | Stops recording . Returns all recorded data and their timestamps . Destroys recorder process . | 52 | 20 |
241,576 | def _listdir ( self , root ) : res = [ ] for name in os . listdir ( root ) : path = os . path . join ( root , name ) if os . path . isdir ( path ) : name += os . sep res . append ( name ) return res | List directory root appending the path separator to subdirs . | 61 | 14 |
241,577 | def complete_extra ( self , args ) : # treat the last arg as a path and complete it if len ( args ) == 0 : return self . _listdir ( './' ) return self . _complete_path ( args [ - 1 ] ) | Completions for the extra command . | 54 | 8 |
241,578 | def complete ( self , text , state ) : # dexnet entity tab completion results = [ w for w in self . words if w . startswith ( text ) ] + [ None ] if results != [ None ] : return results [ state ] buffer = readline . get_line_buffer ( ) line = readline . get_line_buffer ( ) . split ( ) # dexnet entity tab completion results = [ w for w in self . words if w . startswith ( text ) ] + [ None ] if results != [ None ] : return results [ state ] # account for last argument ending in a space if RE_SPACE . match ( buffer ) : line . append ( '' ) return ( self . complete_extra ( line ) + [ None ] ) [ state ] | Generic readline completion entry point . | 166 | 7 |
241,579 | def stop ( self ) : self . _cmds_q . put ( ( "stop" , ) ) for recorder in self . _data_stream_recorders : recorder . _stop ( ) try : self . _syncer . terminate ( ) except Exception : pass | Stops syncer operations . Destroys syncer process . | 57 | 13 |
241,580 | def configure_root ( ) : root_logger = logging . getLogger ( ) # clear any existing handles to streams because we don't want duplicate logs # NOTE: we assume that any stream handles we find are to ROOT_LOG_STREAM, which is usually the case(because it is stdout). This is fine because we will be re-creating that handle. Otherwise we might be deleting a handle that won't be re-created, which could result in dropped logs. for hdlr in root_logger . handlers : if isinstance ( hdlr , logging . StreamHandler ) : root_logger . removeHandler ( hdlr ) # configure the root logger root_logger . setLevel ( ROOT_LOG_LEVEL ) hdlr = logging . StreamHandler ( ROOT_LOG_STREAM ) formatter = colorlog . ColoredFormatter ( '%(purple)s%(name)-10s %(log_color)s%(levelname)-8s%(reset)s %(white)s%(message)s' , reset = True , log_colors = { 'DEBUG' : 'cyan' , 'INFO' : 'green' , 'WARNING' : 'yellow' , 'ERROR' : 'red' , 'CRITICAL' : 'red,bg_white' , } ) hdlr . setFormatter ( formatter ) root_logger . addHandler ( hdlr ) | Configure the root logger . | 315 | 6 |
241,581 | def add_root_log_file ( log_file ) : root_logger = logging . getLogger ( ) # add a file handle to the root logger hdlr = logging . FileHandler ( log_file ) formatter = logging . Formatter ( '%(asctime)s %(name)-10s %(levelname)-8s %(message)s' , datefmt = '%m-%d %H:%M:%S' ) hdlr . setFormatter ( formatter ) root_logger . addHandler ( hdlr ) root_logger . info ( 'Root logger now logging to {}' . format ( log_file ) ) | Add a log file to the root logger . | 150 | 9 |
241,582 | def add_log_file ( logger , log_file , global_log_file = False ) : if global_log_file : add_root_log_file ( log_file ) else : hdlr = logging . FileHandler ( log_file ) formatter = logging . Formatter ( '%(asctime)s %(name)-10s %(levelname)-8s %(message)s' , datefmt = '%m-%d %H:%M:%S' ) hdlr . setFormatter ( formatter ) logger . addHandler ( hdlr ) | Add a log file to this logger . If global_log_file is true log_file will be handed the root logger otherwise it will only be used by this particular logger . | 132 | 36 |
241,583 | def get_module_profile ( module , name = None ) : try : # if profile is defined we just use it return module . profile except AttributeError : # > 'module' object has no attribute 'profile' # try to create one on the fly. # e.g. module.__name__ == "fontbakery.profiles.cmap" if 'profile_factory' not in module . __dict__ : return None default_section = Section ( name or module . __name__ ) profile = module . profile_factory ( default_section = default_section ) profile . auto_register ( module . __dict__ ) return profile | Get or create a profile from a module and return it . | 139 | 12 |
241,584 | def iterargs ( self ) : iterargs = OrderedDict ( ) for name in self . _iterargs : plural = self . _profile . iterargs [ name ] iterargs [ name ] = tuple ( self . _values [ plural ] ) return iterargs | uses the singular name as key | 56 | 6 |
241,585 | def _exec_check ( self , check : FontbakeryCallable , args : Dict [ str , Any ] ) : try : # A check can be either a normal function that returns one Status or a # generator that yields one or more. The latter will return a generator # object that we can detect with types.GeneratorType. result = check ( * * args ) # Might raise. if isinstance ( result , types . GeneratorType ) : # Iterate over sub-results one-by-one, list(result) would abort on # encountering the first exception. for sub_result in result : # Might raise. yield self . _check_result ( sub_result ) return # Do not fall through to rest of method. except Exception as e : error = FailedCheckError ( e ) result = ( ERROR , error ) yield self . _check_result ( result ) | Yields check sub results . | 185 | 7 |
241,586 | def check_order ( self , order ) : own_order = self . order for item in order : if item not in own_order : raise ValueError ( f'Order item {item} not found.' ) return order | order must be a subset of self . order | 47 | 9 |
241,587 | def add_check ( self , check ) : if self . _add_check_callback is not None : if not self . _add_check_callback ( self , check ) : # rejected, skip! return False self . _checkid2index [ check . id ] = len ( self . _checks ) self . _checks . append ( check ) return True | Please use rather register_check as a decorator . | 77 | 11 |
241,588 | def merge_section ( self , section , filter_func = None ) : for check in section . checks : if filter_func and not filter_func ( check ) : continue self . add_check ( check ) | Add section . checks to self if not skipped by self . _add_check_callback . order description etc . are not updated . | 45 | 27 |
241,589 | def validate_values ( self , values ) : format_message = '{}: {} (value: {})' . format messages = [ ] for name , value in values . items ( ) : if name not in self . expected_values : continue valid , message = self . expected_values [ name ] . validate ( value ) if valid : continue messages . append ( format_message ( name , message , value ) ) if len ( messages ) : return False , '\n' . join ( messages ) return True , None | Validate values if they are registered as expected_values and present . | 109 | 14 |
241,590 | def _get_aggregate_args ( self , item , key ) : if not key in ( 'args' , 'mandatoryArgs' ) : raise TypeError ( 'key must be "args" or "mandatoryArgs", got {}' ) . format ( key ) dependencies = list ( getattr ( item , key ) ) if hasattr ( item , 'conditions' ) : dependencies += [ name for negated , name in map ( is_negated , item . conditions ) ] args = set ( ) while dependencies : name = dependencies . pop ( ) if name in args : continue args . add ( name ) # if this is a condition, expand its dependencies c = self . conditions . get ( name , None ) if c is None : continue dependencies += [ dependency for dependency in getattr ( c , key ) if dependency not in args ] return args | Get all arguments or mandatory arguments of the item . | 181 | 10 |
241,591 | def get_iterargs ( self , item ) : # iterargs should always be mandatory, unless there's a good reason # not to, which I can't think of right now. args = self . _get_aggregate_args ( item , 'mandatoryArgs' ) return tuple ( sorted ( [ arg for arg in args if arg in self . iterargs ] ) ) | Returns a tuple of all iterags for item sorted by name . | 79 | 13 |
241,592 | def auto_register ( self , symbol_table , filter_func = None , profile_imports = None ) : if profile_imports : symbol_table = symbol_table . copy ( ) # Avoid messing with original table symbol_table [ 'profile_imports' ] = profile_imports all_items = list ( symbol_table . values ( ) ) + self . _load_profile_imports ( symbol_table ) namespace_types = ( FontBakeryCondition , FontBakeryExpectedValue ) namespace_items = [ ] for item in all_items : if isinstance ( item , namespace_types ) : # register these after all modules have been registered. That way, # "local" items can optionally force override items registered # previously by modules. namespace_items . append ( item ) elif isinstance ( item , FontBakeryCheck ) : if filter_func and not filter_func ( 'check' , item . id , item ) : continue self . register_check ( item ) elif isinstance ( item , types . ModuleType ) : if filter_func and not filter_func ( 'module' , item . __name__ , item ) : continue profile = get_module_profile ( item ) if profile : self . merge_profile ( profile , filter_func = filter_func ) for item in namespace_items : if isinstance ( item , FontBakeryCondition ) : if filter_func and not filter_func ( 'condition' , item . name , item ) : continue self . register_condition ( item ) elif isinstance ( item , FontBakeryExpectedValue ) : if filter_func and not filter_func ( 'expected_value' , item . name , item ) : continue self . register_expected_value ( item ) | Register items from symbol_table in the profile . | 378 | 10 |
241,593 | def merge_profile ( self , profile , filter_func = None ) : # 'iterargs', 'derived_iterables', 'aliases', 'conditions', 'expected_values' for ns_type in self . _valid_namespace_types : # this will raise a NamespaceError if an item of profile.{ns_type} # is already registered. ns_dict = getattr ( profile , ns_type ) if filter_func : ns_type_singular = self . _valid_namespace_types [ ns_type ] ns_dict = { name : item for name , item in ns_dict . items ( ) if filter_func ( ns_type_singular , name , item ) } self . _add_dict_to_namespace ( ns_type , ns_dict ) check_filter_func = None if not filter_func else lambda check : filter_func ( 'check' , check . id , check ) for section in profile . sections : my_section = self . _sections . get ( str ( section ) , None ) if not len ( section . checks ) : continue if my_section is None : # create a new section: don't change other module/profile contents my_section = section . clone ( check_filter_func ) self . add_section ( my_section ) else : # order, description are not updated my_section . merge_section ( section , check_filter_func ) | Copy all namespace items from profile to self . | 310 | 9 |
241,594 | def serialize_identity ( self , identity ) : section , check , iterargs = identity values = map ( # separators are without space, which is the default in JavaScript; # just in case we need to make these keys in JS. partial ( json . dumps , separators = ( ',' , ':' ) ) # iterargs are sorted, because it doesn't matter for the result # but it gives more predictable keys. # Though, arguably, the order generated by the profile is also good # and conveys insights on how the order came to be (clustering of # iterargs). `sorted(iterargs)` however is more robust over time, # the keys will be the same, even if the sorting order changes. , [ str ( section ) , check . id , sorted ( iterargs ) ] ) return '{{"section":{},"check":{},"iterargs":{}}}' . format ( * values ) | Return a json string that can also be used as a key . | 197 | 13 |
241,595 | def get_profile ( ) : argument_parser = ThrowingArgumentParser ( add_help = False ) argument_parser . add_argument ( 'profile' ) try : args , _ = argument_parser . parse_known_args ( ) except ArgumentParserError : # silently fails, the main parser will show usage string. return Profile ( ) imported = get_module ( args . profile ) profile = get_module_profile ( imported ) if not profile : raise Exception ( f"Can't get a profile from {imported}." ) return profile | Prefetch the profile module to fill some holes in the help text . | 116 | 14 |
241,596 | def collate_fonts_data ( fonts_data ) : glyphs = { } for family in fonts_data : for glyph in family : if glyph [ 'unicode' ] not in glyphs : glyphs [ glyph [ 'unicode' ] ] = glyph else : c = glyphs [ glyph [ 'unicode' ] ] [ 'contours' ] glyphs [ glyph [ 'unicode' ] ] [ 'contours' ] = c | glyph [ 'contours' ] return glyphs . values ( ) | Collate individual fonts data into a single glyph data list . | 112 | 12 |
241,597 | def com_adobe_fonts_check_family_consistent_upm ( ttFonts ) : upm_set = set ( ) for ttFont in ttFonts : upm_set . add ( ttFont [ 'head' ] . unitsPerEm ) if len ( upm_set ) > 1 : yield FAIL , ( "Fonts have different units per em: {}." ) . format ( sorted ( upm_set ) ) else : yield PASS , "Fonts have consistent units per em." | Fonts have consistent Units Per Em? | 115 | 8 |
241,598 | def com_adobe_fonts_check_find_empty_letters ( ttFont ) : cmap = ttFont . getBestCmap ( ) passed = True # http://unicode.org/reports/tr44/#General_Category_Values letter_categories = { 'Ll' , 'Lm' , 'Lo' , 'Lt' , 'Lu' , } invisible_letters = { 0x115F , 0x1160 , 0x3164 , 0xFFA0 , # Hangul filler chars (category='Lo') } for unicode_val , glyph_name in cmap . items ( ) : category = unicodedata . category ( chr ( unicode_val ) ) if ( _quick_and_dirty_glyph_is_empty ( ttFont , glyph_name ) ) and ( category in letter_categories ) and ( unicode_val not in invisible_letters ) : yield FAIL , ( "U+%04X should be visible, but its glyph ('%s') is empty." % ( unicode_val , glyph_name ) ) passed = False if passed : yield PASS , "No empty glyphs for letters found." | Letters in font have glyphs that are not empty? | 257 | 12 |
241,599 | def com_adobe_fonts_check_name_empty_records ( ttFont ) : failed = False for name_record in ttFont [ 'name' ] . names : name_string = name_record . toUnicode ( ) . strip ( ) if len ( name_string ) == 0 : failed = True name_key = tuple ( [ name_record . platformID , name_record . platEncID , name_record . langID , name_record . nameID ] ) yield FAIL , ( "'name' table record with key={} is " "empty and should be removed." ) . format ( name_key ) if not failed : yield PASS , ( "No empty name table records found." ) | Check name table for empty records . | 157 | 7 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.