idx int64 0 251k | question stringlengths 53 3.53k | target stringlengths 5 1.23k | len_question int64 20 893 | len_target int64 3 238 |
|---|---|---|---|---|
235,900 | def _get_tree_paths ( tree , node_id , depth = 0 ) : if node_id == _tree . TREE_LEAF : raise ValueError ( "Invalid node_id %s" % _tree . TREE_LEAF ) left_child = tree . children_left [ node_id ] right_child = tree . children_right [ node_id ] if left_child != _tree . TREE_LEAF : left_paths = _get_tree_paths ( tree , left_child , depth = depth + 1 ) right_paths = _get_tree_paths ( tree , right_child , depth = depth + 1 ) for path in left_paths : path . append ( node_id ) for path in right_paths : path . append ( node_id ) paths = left_paths + right_paths else : paths = [ [ node_id ] ] return paths | Returns all paths through the tree as list of node_ids | 204 | 12 |
235,901 | def extract_keywords_from_text ( self , text ) : sentences = nltk . tokenize . sent_tokenize ( text ) self . extract_keywords_from_sentences ( sentences ) | Method to extract keywords from the text provided . | 45 | 9 |
235,902 | def extract_keywords_from_sentences ( self , sentences ) : phrase_list = self . _generate_phrases ( sentences ) self . _build_frequency_dist ( phrase_list ) self . _build_word_co_occurance_graph ( phrase_list ) self . _build_ranklist ( phrase_list ) | Method to extract keywords from the list of sentences provided . | 75 | 11 |
235,903 | def _build_word_co_occurance_graph ( self , phrase_list ) : co_occurance_graph = defaultdict ( lambda : defaultdict ( lambda : 0 ) ) for phrase in phrase_list : # For each phrase in the phrase list, count co-occurances of the # word with other words in the phrase. # # Note: Keep the co-occurances graph as is, to help facilitate its # use in other creative ways if required later. for ( word , coword ) in product ( phrase , phrase ) : co_occurance_graph [ word ] [ coword ] += 1 self . degree = defaultdict ( lambda : 0 ) for key in co_occurance_graph : self . degree [ key ] = sum ( co_occurance_graph [ key ] . values ( ) ) | Builds the co - occurance graph of words in the given body of text to compute degree of each word . | 174 | 23 |
235,904 | def _build_ranklist ( self , phrase_list ) : self . rank_list = [ ] for phrase in phrase_list : rank = 0.0 for word in phrase : if self . metric == Metric . DEGREE_TO_FREQUENCY_RATIO : rank += 1.0 * self . degree [ word ] / self . frequency_dist [ word ] elif self . metric == Metric . WORD_DEGREE : rank += 1.0 * self . degree [ word ] else : rank += 1.0 * self . frequency_dist [ word ] self . rank_list . append ( ( rank , " " . join ( phrase ) ) ) self . rank_list . sort ( reverse = True ) self . ranked_phrases = [ ph [ 1 ] for ph in self . rank_list ] | Method to rank each contender phrase using the formula | 181 | 9 |
235,905 | def _generate_phrases ( self , sentences ) : phrase_list = set ( ) # Create contender phrases from sentences. for sentence in sentences : word_list = [ word . lower ( ) for word in wordpunct_tokenize ( sentence ) ] phrase_list . update ( self . _get_phrase_list_from_words ( word_list ) ) return phrase_list | Method to generate contender phrases given the sentences of the text document . | 84 | 13 |
235,906 | def _retain_centroids ( numbers , thres ) : numbers . sort ( ) prev = - 1 ret = [ ] for n in numbers : if prev < 0 or n - prev > thres : ret . append ( n ) prev = n return ret | Only keep one number for each cluster within thres of each other | 56 | 13 |
235,907 | def _split_vlines_hlines ( lines ) : vlines , hlines = [ ] , [ ] for line in lines : ( vlines if line . x1 - line . x0 < 0.1 else hlines ) . append ( line ) return vlines , hlines | Separates lines into horizontal and vertical ones | 61 | 9 |
235,908 | def _npiter ( arr ) : for a in np . nditer ( arr , flags = [ "refs_ok" ] ) : c = a . item ( ) if c is not None : yield c | Wrapper for iterating numpy array | 46 | 8 |
235,909 | def get_normalized_grid ( self ) : log = logging . getLogger ( __name__ ) # Resolve multirow mentions, TODO: validate against all PDFs # subcol_count = 0 mega_rows = [ ] for row_id , row in enumerate ( self . _grid ) : # maps yc_grid -> [mentions] subrow_across_cell = defaultdict ( list ) for col_id , cell in enumerate ( row ) : # Keep cell text in reading order cell . texts . sort ( key = cmp_to_key ( reading_order ) ) log . debug ( "=" * 50 ) for m in cell . texts : subrow_across_cell [ m . yc_grid ] . append ( m ) # prev = m log . debug ( pformat ( dict ( subrow_across_cell ) ) ) mega_rows . append ( subrow_across_cell ) # Multiline paragraph check # Subrow/Subcolumn return mega_rows | Analyzes subcell structure | 220 | 5 |
235,910 | def _mark_grid_bounds ( self , plane , region_bbox ) : # Grid boundaries vbars = np . zeros ( [ self . num_rows , self . num_cols + 1 ] , dtype = np . bool ) hbars = np . zeros ( [ self . num_rows + 1 , self . num_cols ] , dtype = np . bool ) def closest_idx ( arr , elem ) : left = bisect . bisect_left ( arr , elem ) - 1 right = bisect . bisect_right ( arr , elem ) - 1 return left if abs ( arr [ left ] - elem ) < abs ( arr [ right ] - elem ) else right # Figure out which separating segments are missing, i.e. merge cells for row , ( y0 , y1 ) in enumerate ( self . yranges ) : yc = ( y0 + y1 ) // 2 for l in plane . find ( ( region_bbox . x0 , yc , region_bbox . x1 , yc ) ) : vbars [ row , closest_idx ( self . xs , l . xc ) ] = True for col , ( x0 , x1 ) in enumerate ( self . xranges ) : xc = ( x0 + x1 ) // 2 for l in plane . find ( ( xc , region_bbox . y0 , xc , region_bbox . y1 ) ) : hbars [ closest_idx ( self . ys , l . yc ) , col ] = True return vbars , hbars | Assume all lines define a complete grid over the region_bbox . Detect which lines are missing so that we can recover merged cells . | 354 | 28 |
235,911 | def vectorize ( e , tolerance = 0.1 ) : tolerance = max ( tolerance , e . linewidth ) is_high = e . height > tolerance is_wide = e . width > tolerance # if skewed towards a line if is_wide and not is_high : return ( e . width , 0.0 ) if is_high and not is_wide : return ( 0.0 , e . height ) | vectorizes the pdf object s bounding box min_width is the width under which we consider it a line instead of a big rectangle | 89 | 27 |
235,912 | def aligned ( e1 , e2 ) : return ( any ( close ( c1 , c2 ) for c1 , c2 in zip ( e1 . bbox , e2 . bbox ) ) or x_center_aligned ( e1 , e2 ) or y_center_aligned ( e1 , e2 ) ) | alignment is determined by two boxes having one exactly the same attribute which could mean parallel perpendicularly forming a corner etc . | 71 | 24 |
235,913 | def bound_bboxes ( bboxes ) : group_x0 = min ( map ( lambda l : l [ x0 ] , bboxes ) ) group_y0 = min ( map ( lambda l : l [ y0 ] , bboxes ) ) group_x1 = max ( map ( lambda l : l [ x1 ] , bboxes ) ) group_y1 = max ( map ( lambda l : l [ y1 ] , bboxes ) ) return ( group_x0 , group_y0 , group_x1 , group_y1 ) | Finds the minimal bbox that contains all given bboxes | 120 | 12 |
235,914 | def bound_elems ( elems ) : group_x0 = min ( map ( lambda l : l . x0 , elems ) ) group_y0 = min ( map ( lambda l : l . y0 , elems ) ) group_x1 = max ( map ( lambda l : l . x1 , elems ) ) group_y1 = max ( map ( lambda l : l . y1 , elems ) ) return ( group_x0 , group_y0 , group_x1 , group_y1 ) | Finds the minimal bbox that contains all given elems | 116 | 12 |
235,915 | def intersect ( a , b ) : if a [ x0 ] == a [ x1 ] or a [ y0 ] == a [ y1 ] : return False if b [ x0 ] == b [ x1 ] or b [ y0 ] == b [ y1 ] : return False return a [ x0 ] <= b [ x1 ] and b [ x0 ] <= a [ x1 ] and a [ y0 ] <= b [ y1 ] and b [ y0 ] <= a [ y1 ] | Check if two rectangles intersect | 110 | 6 |
235,916 | def reading_order ( e1 , e2 ) : b1 = e1 . bbox b2 = e2 . bbox if round ( b1 [ y0 ] ) == round ( b2 [ y0 ] ) or round ( b1 [ y1 ] ) == round ( b2 [ y1 ] ) : return float_cmp ( b1 [ x0 ] , b2 [ x0 ] ) return float_cmp ( b1 [ y0 ] , b2 [ y0 ] ) | A comparator to sort bboxes from top to bottom left to right | 107 | 14 |
235,917 | def xy_reading_order ( e1 , e2 ) : b1 = e1 . bbox b2 = e2 . bbox if round ( b1 [ x0 ] ) == round ( b2 [ x0 ] ) : return float_cmp ( b1 [ y0 ] , b2 [ y0 ] ) return float_cmp ( b1 [ x0 ] , b2 [ x0 ] ) | A comparator to sort bboxes from left to right top to bottom | 90 | 14 |
235,918 | def column_order ( b1 , b2 ) : ( top , left , bottom ) = ( 1 , 2 , 3 ) # TODO(senwu): Reimplement the functionality of this comparator to # detect the number of columns, and sort those in reading order. # TODO: This is just a simple top to bottom, left to right comparator # for now. if round ( b1 [ top ] ) == round ( b2 [ top ] ) or round ( b1 [ bottom ] ) == round ( b2 [ bottom ] ) : return float_cmp ( b1 [ left ] , b2 [ left ] ) return float_cmp ( b1 [ top ] , b2 [ top ] ) | A comparator that sorts bboxes first by columns where a column is made up of all bboxes that overlap then by vertical position in each column . | 151 | 30 |
235,919 | def merge_intervals ( elems , overlap_thres = 2.0 ) : overlap_thres = max ( 0.0 , overlap_thres ) ordered = sorted ( elems , key = lambda e : e . x0 ) intervals = [ ] cur = [ - overlap_thres , - overlap_thres ] for e in ordered : if e . x0 - cur [ 1 ] > overlap_thres : # Check interval validity if cur [ 1 ] > 0.0 : intervals . append ( cur ) cur = [ e . x0 , e . x1 ] continue cur [ 1 ] = max ( cur [ 1 ] , e . x1 ) intervals . append ( cur ) # Freeze the interval to tuples return map ( tuple , intervals ) | Project in x axis Sort by start Go through segments and keep max x1 | 163 | 15 |
235,920 | def predict_heatmap ( pdf_path , page_num , model , img_dim = 448 , img_dir = "tmp/img" ) : if not os . path . isdir ( img_dir ) : print ( "\nCreating image folder at {}" . format ( img_dir ) ) os . makedirs ( img_dir ) pdf_name = os . path . splitext ( os . path . basename ( pdf_path ) ) [ 0 ] # TODO: add hashing function to make sure name is unique # TODO: add parallelization img_path = os . path . join ( img_dir , pdf_name + "-{}.png" . format ( page_num ) ) if not os . path . isfile ( img_path ) : # create image for a page in the pdf document and save it in img_dir save_image ( pdf_path , img_path , page_num ) image = load_img ( img_path , grayscale = True , target_size = ( img_dim , img_dim ) ) image = img_to_array ( image , data_format = K . image_data_format ( ) ) image = ( image . reshape ( ( img_dim , img_dim , 1 ) ) . repeat ( 3 , axis = 2 ) . reshape ( ( 1 , img_dim , img_dim , 3 ) ) ) return ( image . astype ( np . uint8 ) . reshape ( ( img_dim , img_dim , 3 ) ) , model . predict ( image ) . reshape ( ( img_dim , img_dim ) ) , ) | Return an image corresponding to the page of the pdf documents saved at pdf_path . If the image is not found in img_dir this function creates it and saves it in img_dir . | 353 | 39 |
235,921 | def do_intersect ( bb1 , bb2 ) : if bb1 [ 0 ] + bb1 [ 2 ] < bb2 [ 0 ] or bb2 [ 0 ] + bb2 [ 2 ] < bb1 [ 0 ] : return False if bb1 [ 1 ] + bb1 [ 3 ] < bb2 [ 1 ] or bb2 [ 1 ] + bb2 [ 3 ] < bb1 [ 1 ] : return False return True | Helper function that returns True if two bounding boxes overlap . | 107 | 12 |
235,922 | def get_bboxes ( img , mask , nb_boxes = 100 , score_thresh = 0.5 , iou_thresh = 0.2 , prop_size = 0.09 , prop_scale = 1.2 , ) : min_size = int ( img . shape [ 0 ] * prop_size * img . shape [ 1 ] * prop_size ) scale = int ( img . shape [ 0 ] * prop_scale ) # TODO: cross validate for multiple values of prop_size, prop_scale, and nb_bboxes img_lbl , regions = selectivesearch . selective_search ( img , scale = scale , sigma = 0.8 , min_size = min_size ) rect = [ None ] * nb_boxes max_iou = - 1 * np . ones ( nb_boxes ) mask = 1.0 * ( mask > score_thresh ) # compute iou for each candidate bounding box and save top nb_bboxes for region in regions : left , top , width , height = region [ "rect" ] intersection = mask [ top : top + height , left : left + width ] . sum ( ) union = height * width + mask . sum ( ) - intersection iou = intersection / union idx = np . argmin ( max_iou ) if iou > max_iou [ idx ] : max_iou [ idx ] = iou rect [ idx ] = region [ "rect" ] # Exclusive maximum remove_indexes = max_iou == - 1 bboxes = [ ] filtered_ious = [ ] for idx in np . argsort ( [ - x for x in max_iou ] ) : if remove_indexes [ idx ] : # no more tables bounding boxes break if len ( bboxes ) == 0 : # first candidate table bounding box if max_iou [ idx ] > iou_thresh : bboxes += [ rect [ idx ] ] filtered_ious += [ max_iou [ idx ] ] else : # No tables in this document break else : # If it doensn't intersect with any other bounding box if not any ( [ do_intersect ( rect [ idx ] , bboxes [ k ] ) for k in range ( len ( bboxes ) ) ] ) : if max_iou [ idx ] > iou_thresh : bboxes += [ rect [ idx ] ] filtered_ious += [ max_iou [ idx ] ] return bboxes , filtered_ious | Uses selective search to generate candidate bounding boxes and keeps the ones that have the largest iou with the predicted mask . | 553 | 25 |
235,923 | def _print_dict ( elem_dict ) : for key , value in sorted ( elem_dict . iteritems ( ) ) : if isinstance ( value , collections . Iterable ) : print ( key , len ( value ) ) else : print ( key , value ) | Print a dict in a readable way | 59 | 7 |
235,924 | def _font_of_mention ( m ) : for ch in m : if isinstance ( ch , LTChar ) and ch . get_text ( ) . isalnum ( ) : return ( ch . fontname , _font_size_of ( ch ) ) return ( None , 0 ) | Returns the font type and size of the first alphanumeric char in the text or None if there isn t any . | 64 | 24 |
235,925 | def _allowed_char ( c ) : c = ord ( c ) if c < 0 : return False if c < 128 : return _ascii_allowed [ c ] # Genereally allow unicodes, TODO: check for unicode control characters # characters return True | Returns whether the given unicode char is allowed in output | 58 | 11 |
235,926 | def keep_allowed_chars ( text ) : # print ','.join(str(ord(c)) for c in text) return "" . join ( " " if c == "\n" else c for c in text . strip ( ) if _allowed_char ( c ) ) | Cleans the text for output | 61 | 6 |
235,927 | def paint_path ( self , gstate , stroke , fill , evenodd , path ) : shape = "" . join ( x [ 0 ] for x in path ) prev_split = 0 for i in range ( len ( shape ) ) : if shape [ i ] == "m" and prev_split != i : self . paint_single_path ( gstate , stroke , fill , evenodd , path [ prev_split : i ] ) prev_split = i if shape [ i ] == "h" : self . paint_single_path ( gstate , stroke , fill , evenodd , path [ prev_split : i + 1 ] ) prev_split = i + 1 # clean up remaining segments if prev_split < len ( shape ) : self . paint_single_path ( gstate , stroke , fill , evenodd , path [ prev_split : ] ) | Converting long paths to small segments each time we m = Move or h = ClosePath for polygon | 186 | 21 |
235,928 | def paint_single_path ( self , gstate , stroke , fill , evenodd , path ) : if len ( path ) < 2 : return shape = "" . join ( x [ 0 ] for x in path ) pts = [ ] for p in path : for i in range ( 1 , len ( p ) , 2 ) : pts . append ( apply_matrix_pt ( self . ctm , ( p [ i ] , p [ i + 1 ] ) ) ) # Line mode if self . line_only_shape . match ( shape ) : # check for sloped lines first has_slope = False for i in range ( len ( pts ) - 1 ) : if pts [ i ] [ 0 ] != pts [ i + 1 ] [ 0 ] and pts [ i ] [ 1 ] != pts [ i + 1 ] [ 1 ] : has_slope = True break if not has_slope : for i in range ( len ( pts ) - 1 ) : self . cur_item . add ( LTLine ( gstate . linewidth , pts [ i ] , pts [ i + 1 ] ) ) # Adding the closing line for a polygon, especially rectangles if shape . endswith ( "h" ) : self . cur_item . add ( LTLine ( gstate . linewidth , pts [ 0 ] , pts [ - 1 ] ) ) return # Add the curve as an arbitrary polyline (belzier curve info is lost here) self . cur_item . add ( LTCurve ( gstate . linewidth , pts ) ) | Converting a single path draw command into lines and curves objects | 337 | 12 |
235,929 | def traverse_layout ( root , callback ) : callback ( root ) if isinstance ( root , collections . Iterable ) : for child in root : traverse_layout ( child , callback ) | Tree walker and invokes the callback as it traverse pdf object tree | 39 | 14 |
235,930 | def get_near_items ( tree , tree_key ) : try : yield tree . floor_item ( tree_key ) except KeyError : pass try : yield tree . ceiling_item ( tree_key ) except KeyError : pass | Check both possible neighbors for key in a binary tree | 50 | 10 |
235,931 | def align_add ( tree , key , item , align_thres = 2.0 ) : for near_key , near_list in get_near_items ( tree , key ) : if abs ( key - near_key ) < align_thres : near_list . append ( item ) return # Create a new group if no items are close tree [ key ] = [ item ] | Adding the item object to a binary tree with the given key while allow for small key differences close_enough_func that checks if two keys are within threshold | 83 | 31 |
235,932 | def collect_table_content ( table_bboxes , elems ) : # list of table content chars table_contents = [ [ ] for _ in range ( len ( table_bboxes ) ) ] prev_content = None prev_bbox = None for cid , c in enumerate ( elems ) : # Annotations should not fall outside alone if isinstance ( c , LTAnno ) : if prev_content is not None : prev_content . append ( c ) continue # Generally speaking table contents should be included sequentially # and we can avoid checking all tables for elems inside # Elements only need to intersect the bbox for table as some # formatting of fonts may result in slightly out of bbox text if prev_bbox is not None and intersect ( prev_bbox , c . bbox ) : prev_content . append ( c ) continue # Search the rest of the tables for membership when done with # the current one for table_id , table_bbox in enumerate ( table_bboxes ) : if intersect ( table_bbox , c . bbox ) : prev_bbox = table_bbox prev_content = table_contents [ table_id ] prev_content . append ( c ) break return table_contents | Returns a list of elements that are contained inside the corresponding supplied bbox . | 267 | 15 |
235,933 | def project_onto ( objs , axis , min_gap_size = 4.0 ) : if axis == "x" : axis = 0 if axis == "y" : axis = 1 axis_end = axis + 2 if axis == 0 : # if projecting onto X axis objs . sort ( key = lambda o : o . x0 ) else : objs . sort ( key = lambda o : o . y0 ) intervals = [ ] groups = [ ] start_i = 0 start = objs [ 0 ] . bbox [ axis ] end = objs [ 0 ] . bbox [ axis_end ] # Use _inf_bbox to trigger the last interval divide for o_i , o in enumerate ( chain ( objs , [ _inf_bbox ] ) ) : # Get current interval o_start = o . bbox [ axis ] o_end = o . bbox [ axis_end ] # start new interval when gap with previous end is big if o_start > end + min_gap_size : # Append new interval coordinates for children intervals . append ( ( start , end ) ) # Append child object group on page groups . append ( objs [ start_i : o_i ] ) # Mark next obj list range start_i = o_i start = o_start # Always check to extend current interval to new end if o_end > end : end = o_end # else do nothing return intervals , groups | Projects object bboxes onto the axis and return the unioned intervals and groups of objects in intervals . | 312 | 21 |
235,934 | def draw_rect ( self , bbox , cell_val ) : new_x0 = int ( bbox [ x0 ] ) new_y0 = int ( bbox [ y0 ] ) new_x1 = max ( new_x0 + 1 , int ( bbox [ x1 ] ) ) new_y1 = max ( new_y0 + 1 , int ( bbox [ y1 ] ) ) self . grid [ new_x0 : new_x1 , new_y0 : new_y1 ] = cell_val | Fills the bbox with the content values Float bbox values are normalized to have non - zero area | 119 | 21 |
235,935 | def parse_layout ( elems , font_stat , combine = False ) : boxes_segments = elems . segments boxes_curves = elems . curves boxes_figures = elems . figures page_width = elems . layout . width # page_height = elems.layout.height boxes = elems . mentions avg_font_pts = get_most_common_font_pts ( elems . mentions , font_stat ) width = get_page_width ( boxes + boxes_segments + boxes_figures + boxes_curves ) char_width = get_char_width ( boxes ) grid_size = avg_font_pts / 2.0 for i , m in enumerate ( boxes + elems . figures ) : m . id = i m . feats = defaultdict ( bool ) prefix = "" if isinstance ( m , LTTextLine ) and m . font_name : prefix = m . font_name + "-" + str ( m . font_size ) + "-" m . xc = ( m . x0 + m . x1 ) / 2.0 m . yc = ( m . y0 + m . y1 ) / 2.0 m . feats [ prefix + "x0" ] = m . x0_grid = m . x0 // grid_size m . feats [ prefix + "x1" ] = m . x1_grid = m . x1 // grid_size m . feats [ prefix + "xc" ] = m . xc_grid = m . xc // grid_size m . feats [ prefix + "yc" ] = m . yc_grid = m . yc // grid_size tbls , tbl_features = cluster_vertically_aligned_boxes ( boxes , elems . layout . bbox , avg_font_pts , width , char_width , boxes_segments , boxes_curves , boxes_figures , page_width , combine , ) return tbls , tbl_features | Parses pdf texts into a hypergraph grouped into rows and columns and then output | 440 | 17 |
235,936 | def merge_nodes ( nodes , plane , page_stat , merge_indices ) : # Merge inner boxes to the best outer box # nodes.sort(key=Node.area) to_be_removed = set ( ) for inner_idx in range ( len ( nodes ) ) : inner = nodes [ inner_idx ] outers = [ ] outers_indices = [ ] for outer_idx in range ( len ( nodes ) ) : outer = nodes [ outer_idx ] if outer is inner or outer in to_be_removed : continue if intersect ( outer . bbox , inner . bbox ) : outers . append ( outer ) outers_indices . append ( outer_idx ) if not outers : continue # Best is defined as min L1 distance to outer center best_outer = min ( outers , key = lambda outer : l1 ( center ( outer . bbox ) , center ( inner . bbox ) ) ) best_outer_idx = outers_indices [ outers . index ( best_outer ) ] to_be_removed . add ( inner ) best_outer . merge ( inner ) for cid_iter in range ( len ( merge_indices ) ) : if merge_indices [ cid_iter ] == merge_indices [ inner_idx ] : merge_indices [ cid_iter ] = merge_indices [ best_outer_idx ] return nodes , merge_indices | Merges overlapping nodes | 322 | 4 |
235,937 | def _get_cols ( row_content ) : cols = [ ] subcell_col = [ ] prev_bar = None for _coord , item in row_content : if isinstance ( item , LTTextLine ) : subcell_col . append ( item ) else : # bar, add column content # When there is no content, we count a None column if prev_bar : bar_ranges = ( prev_bar , item ) col_items = subcell_col if subcell_col else [ None ] cols . extend ( [ bar_ranges , col_items ] ) prev_bar = item subcell_col = [ ] # Remove extra column before first bar return cols | Counting the number columns based on the content of this row | 151 | 12 |
235,938 | def _one_contains_other ( s1 , s2 ) : return min ( len ( s1 ) , len ( s2 ) ) == len ( s1 & s2 ) | Whether one set contains the other | 40 | 6 |
235,939 | def is_table ( self ) : if self . type_counts [ "text" ] < 6 or "figure" in self . type_counts : return False for e in self . elems : # Characters written as curve are usually small, discard diagrams here if elem_type ( e ) == "curve" and e . height * e . width > 100 : return False # import re # space_re = '\\s+' # ws_arr = [] # whitespace_aligned = False # for elem in self.elems: # elem_ws = [] # for m in re.finditer(space_re, elem.get_text()): # elem_ws.append(m.start()) # # print elem, elem_ws # if(len(elem_ws)>0): # ws_arr.append(elem_ws) # # print ws_arr # if(len(ws_arr)>0): # count_arr = max([ws_arr.count(i) for i in ws_arr]) # if(float(count_arr)/len(ws_arr) > 0.75): # return True if ( self . sum_elem_bbox / ( self . height * self . width ) ) > self . table_area_threshold : return False has_many_x_align = False has_many_y_align = False for k , v in six . iteritems ( self . feat_counts ) : font_key = k [ 0 ] if ( v >= 2 and "-" in font_key ) : # Text row or column with more than 2 elements if font_key [ - 2 ] == "x" : has_many_x_align = True if font_key [ - 2 ] == "y" : has_many_y_align = True return has_many_x_align and has_many_y_align | Count the node s number of mention al ignment in both axes to determine if the node is a table . | 422 | 22 |
235,940 | def get_grid ( self ) : mentions , lines = _split_text_n_lines ( self . elems ) # Sort mentions in reading order where y values are snapped to half # height-sized grid mentions . sort ( key = lambda m : ( m . yc_grid , m . xc ) ) grid = Grid ( mentions , lines , self ) return grid | Standardize the layout of the table into grids | 79 | 9 |
235,941 | def lazy_load_font ( font_size = default_font_size ) : if font_size not in _font_cache : if _platform . startswith ( "darwin" ) : font_path = "/Library/Fonts/Arial.ttf" elif _platform . startswith ( "linux" ) : font_path = "/usr/share/fonts/truetype/ubuntu-font-family/UbuntuMono-R.ttf" elif _platform . startswith ( "win32" ) : font_path = "C:\\Windows\\Fonts\\arial.ttf" _font_cache [ font_size ] = ImageFont . truetype ( font_path , font_size ) return _font_cache [ font_size ] | Lazy loading font according to system platform | 174 | 8 |
235,942 | def render_debug_img ( file_name , page_num , elems , nodes = [ ] , scaler = 1 , print_segments = False , print_curves = True , print_table_bbox = True , print_text_as_rect = True , ) : # For debugging show the boolean pixels in black white grayscale height = scaler * int ( elems . layout . height ) width = scaler * int ( elems . layout . width ) debug_img , draw = create_img ( ( 0 , 0 , width , height ) ) font = lazy_load_font ( ) large_font = lazy_load_font ( 24 ) if print_curves : for i , c in enumerate ( elems . curves ) : if len ( c . pts ) > 1 : draw . polygon ( c . pts , outline = blue ) draw . rectangle ( c . bbox , fill = blue ) # for fig in elems.figures: # draw.rectangle(fig.bbox, fill = blue) for i , m in enumerate ( elems . mentions ) : if isinstance ( m , LTAnno ) : continue if print_text_as_rect : fill = "pink" if hasattr ( m , "feats" ) and m . feats [ "is_cell" ] else green # fill = green draw . rectangle ( m . bbox , fill = fill ) # draw.text(center(m.bbox), str(i), black, font = font) # Draw id draw . text ( m . bbox [ : 2 ] , m . get_text ( ) , black , font = font ) # Draw mention content else : draw . text ( m . bbox [ : 2 ] , m . get_text ( ) , "black" , font = font ) if print_segments : # draw skeleton for all segments for i , s in enumerate ( elems . segments ) : draw . line ( s . bbox , fill = "black" ) if print_table_bbox : for node in nodes : is_table = node . is_table ( ) color = "red" if is_table else "green" draw . rectangle ( node . bbox , outline = color ) if is_table : # text = 'Borderless' if node.is_borderless() else 'Bordered' text = "Table" draw . rectangle ( node . bbox , outline = color ) draw . text ( node . bbox [ : 2 ] , text , red , font = large_font ) # Water mark with file name so we can identify among multiple images if file_name and page_num is not None : water_mark = ( file_name + ":page " + str ( page_num + 1 ) + "@%dx%d" % ( width , height ) ) draw . text ( ( 10 , 10 ) , water_mark , black , font = font ) debug_img . show ( ) return debug_img | Shows an image rendering of the pdf page along with debugging info printed | 644 | 14 |
235,943 | def _partition_estimators ( n_estimators , n_jobs ) : # Compute the number of jobs if n_jobs == - 1 : n_jobs = min ( cpu_count ( ) , n_estimators ) else : n_jobs = min ( n_jobs , n_estimators ) # Partition estimators between jobs n_estimators_per_job = ( n_estimators // n_jobs ) * np . ones ( n_jobs , dtype = np . int ) n_estimators_per_job [ : n_estimators % n_jobs ] += 1 starts = np . cumsum ( n_estimators_per_job ) return n_jobs , n_estimators_per_job . tolist ( ) , [ 0 ] + starts . tolist ( ) | Private function used to partition estimators between jobs . | 186 | 10 |
235,944 | def _parallel_build_estimators ( n_estimators , ensemble , X , y , cost_mat , seeds , verbose ) : # Retrieve settings n_samples , n_features = X . shape max_samples = ensemble . max_samples max_features = ensemble . max_features if ( not isinstance ( max_samples , ( numbers . Integral , np . integer ) ) and ( 0.0 < max_samples <= 1.0 ) ) : max_samples = int ( max_samples * n_samples ) if ( not isinstance ( max_features , ( numbers . Integral , np . integer ) ) and ( 0.0 < max_features <= 1.0 ) ) : max_features = int ( max_features * n_features ) bootstrap = ensemble . bootstrap bootstrap_features = ensemble . bootstrap_features # Build estimators estimators = [ ] estimators_samples = [ ] estimators_features = [ ] for i in range ( n_estimators ) : if verbose > 1 : print ( ( "building estimator %d of %d" % ( i + 1 , n_estimators ) ) ) random_state = check_random_state ( seeds [ i ] ) seed = check_random_state ( random_state . randint ( MAX_INT ) ) estimator = ensemble . _make_estimator ( append = False ) try : # Not all estimator accept a random_state estimator . set_params ( random_state = seed ) except ValueError : pass # Draw features if bootstrap_features : features = random_state . randint ( 0 , n_features , max_features ) else : features = sample_without_replacement ( n_features , max_features , random_state = random_state ) # Draw samples, using a mask, and then fit if bootstrap : indices = random_state . randint ( 0 , n_samples , max_samples ) else : indices = sample_without_replacement ( n_samples , max_samples , random_state = random_state ) sample_counts = np . bincount ( indices , minlength = n_samples ) estimator . fit ( ( X [ indices ] ) [ : , features ] , y [ indices ] , cost_mat [ indices , : ] ) samples = sample_counts > 0. estimators . append ( estimator ) estimators_samples . append ( samples ) estimators_features . append ( features ) return estimators , estimators_samples , estimators_features | Private function used to build a batch of estimators within a job . | 566 | 14 |
235,945 | def _parallel_predict ( estimators , estimators_features , X , n_classes , combination , estimators_weight ) : n_samples = X . shape [ 0 ] pred = np . zeros ( ( n_samples , n_classes ) ) n_estimators = len ( estimators ) for estimator , features , weight in zip ( estimators , estimators_features , estimators_weight ) : # Resort to voting predictions = estimator . predict ( X [ : , features ] ) for i in range ( n_samples ) : if combination == 'weighted_voting' : pred [ i , int ( predictions [ i ] ) ] += 1 * weight else : pred [ i , int ( predictions [ i ] ) ] += 1 return pred | Private function used to compute predictions within a job . | 168 | 10 |
235,946 | def _create_stacking_set ( estimators , estimators_features , estimators_weight , X , combination ) : n_samples = X . shape [ 0 ] valid_estimators = np . nonzero ( estimators_weight ) [ 0 ] n_valid_estimators = valid_estimators . shape [ 0 ] X_stacking = np . zeros ( ( n_samples , n_valid_estimators ) ) for e in range ( n_valid_estimators ) : if combination in [ 'stacking' , 'stacking_bmr' ] : X_stacking [ : , e ] = estimators [ valid_estimators [ e ] ] . predict ( X [ : , estimators_features [ valid_estimators [ e ] ] ] ) elif combination in [ 'stacking_proba' , 'stacking_proba_bmr' ] : X_stacking [ : , e ] = estimators [ valid_estimators [ e ] ] . predict_proba ( X [ : , estimators_features [ valid_estimators [ e ] ] ] ) [ : , 1 ] return X_stacking | Private function used to create the stacking training set . | 260 | 10 |
235,947 | def _fit_bmr_model ( self , X , y ) : self . f_bmr = BayesMinimumRiskClassifier ( ) X_bmr = self . predict_proba ( X ) self . f_bmr . fit ( y , X_bmr ) return self | Private function used to fit the BayesMinimumRisk model . | 64 | 13 |
235,948 | def _fit_stacking_model ( self , X , y , cost_mat , max_iter = 100 ) : self . f_staking = CostSensitiveLogisticRegression ( verbose = self . verbose , max_iter = max_iter ) X_stacking = _create_stacking_set ( self . estimators_ , self . estimators_features_ , self . estimators_weight_ , X , self . combination ) self . f_staking . fit ( X_stacking , y , cost_mat ) return self | Private function used to fit the stacking model . | 118 | 9 |
235,949 | def _evaluate_oob_savings ( self , X , y , cost_mat ) : estimators_weight = [ ] for estimator , samples , features in zip ( self . estimators_ , self . estimators_samples_ , self . estimators_features_ ) : # Test if all examples where used for training if not np . any ( ~ samples ) : # Then use training oob_pred = estimator . predict ( X [ : , features ] ) oob_savings = max ( 0 , savings_score ( y , oob_pred , cost_mat ) ) else : # Then use OOB oob_pred = estimator . predict ( ( X [ ~ samples ] ) [ : , features ] ) oob_savings = max ( 0 , savings_score ( y [ ~ samples ] , oob_pred , cost_mat [ ~ samples ] ) ) estimators_weight . append ( oob_savings ) # Control in case were all weights are 0 if sum ( estimators_weight ) == 0 : self . estimators_weight_ = np . ones ( len ( estimators_weight ) ) / len ( estimators_weight ) else : self . estimators_weight_ = ( np . array ( estimators_weight ) / sum ( estimators_weight ) ) . tolist ( ) return self | Private function used to calculate the OOB Savings of each estimator . | 289 | 14 |
235,950 | def predict ( self , X , cost_mat = None ) : # Check data # X = check_array(X, accept_sparse=['csr', 'csc', 'coo']) # Dont in version 0.15 if self . n_features_ != X . shape [ 1 ] : raise ValueError ( "Number of features of the model must " "match the input. Model n_features is {0} and " "input n_features is {1}." "" . format ( self . n_features_ , X . shape [ 1 ] ) ) #TODO: check if combination in possible combinations if self . combination in [ 'stacking' , 'stacking_proba' ] : X_stacking = _create_stacking_set ( self . estimators_ , self . estimators_features_ , self . estimators_weight_ , X , self . combination ) return self . f_staking . predict ( X_stacking ) elif self . combination in [ 'majority_voting' , 'weighted_voting' ] : # Parallel loop n_jobs , n_estimators , starts = _partition_estimators ( self . n_estimators , self . n_jobs ) all_pred = Parallel ( n_jobs = n_jobs , verbose = self . verbose ) ( delayed ( _parallel_predict ) ( self . estimators_ [ starts [ i ] : starts [ i + 1 ] ] , self . estimators_features_ [ starts [ i ] : starts [ i + 1 ] ] , X , self . n_classes_ , self . combination , self . estimators_weight_ [ starts [ i ] : starts [ i + 1 ] ] ) for i in range ( n_jobs ) ) # Reduce pred = sum ( all_pred ) / self . n_estimators return self . classes_ . take ( np . argmax ( pred , axis = 1 ) , axis = 0 ) elif self . combination in [ 'majority_bmr' , 'weighted_bmr' , 'stacking_bmr' , 'stacking_proba_bmr' ] : #TODO: Add check if cost_mat == None X_bmr = self . predict_proba ( X ) return self . f_bmr . predict ( X_bmr , cost_mat ) | Predict class for X . | 517 | 6 |
235,951 | def predict_proba ( self , X ) : # Check data # X = check_array(X, accept_sparse=['csr', 'csc', 'coo']) # Dont in version 0.15 if self . n_features_ != X . shape [ 1 ] : raise ValueError ( "Number of features of the model must " "match the input. Model n_features is {0} and " "input n_features is {1}." "" . format ( self . n_features_ , X . shape [ 1 ] ) ) # Parallel loop n_jobs , n_estimators , starts = _partition_estimators ( self . n_estimators , self . n_jobs ) all_proba = Parallel ( n_jobs = n_jobs , verbose = self . verbose ) ( delayed ( _parallel_predict_proba ) ( self . estimators_ [ starts [ i ] : starts [ i + 1 ] ] , self . estimators_features_ [ starts [ i ] : starts [ i + 1 ] ] , X , self . n_classes_ , self . combination , self . estimators_weight_ [ starts [ i ] : starts [ i + 1 ] ] ) for i in range ( n_jobs ) ) # Reduce if self . combination in [ 'majority_voting' , 'majority_bmr' ] : proba = sum ( all_proba ) / self . n_estimators elif self . combination in [ 'weighted_voting' , 'weighted_bmr' ] : proba = sum ( all_proba ) elif self . combination in [ 'stacking' , 'stacking_proba' , 'stacking_bmr' , 'stacking_proba_bmr' ] : X_stacking = _create_stacking_set ( self . estimators_ , self . estimators_features_ , self . estimators_weight_ , X , self . combination ) proba = self . f_staking . predict_proba ( X_stacking ) return proba | Predict class probabilities for X . | 457 | 7 |
235,952 | def cost_sampling ( X , y , cost_mat , method = 'RejectionSampling' , oversampling_norm = 0.1 , max_wc = 97.5 ) : #TODO: Check consistency of input # The methods are construct only for the misclassification costs, not the full cost matrix. cost_mis = cost_mat [ : , 0 ] cost_mis [ y == 1 ] = cost_mat [ y == 1 , 1 ] # wc = cost_mis / cost_mis.max() wc = np . minimum ( cost_mis / np . percentile ( cost_mis , max_wc ) , 1 ) n_samples = X . shape [ 0 ] filter_ = list ( range ( n_samples ) ) if method == 'RejectionSampling' : # under-sampling by rejection [1] #TODO: Add random state rej_rand = np . random . rand ( n_samples ) filter_ = rej_rand <= wc elif method == 'OverSampling' : # over-sampling with normalized wn [2] wc_n = np . ceil ( wc / oversampling_norm ) . astype ( np . int ) new_n = wc_n . sum ( ) filter_ = np . ones ( new_n , dtype = np . int ) e = 0 #TODO replace for for i in range ( n_samples ) : filter_ [ e : e + wc_n [ i ] ] = i e += wc_n [ i ] x_cps = X [ filter_ ] y_cps = y [ filter_ ] cost_mat_cps = cost_mat [ filter_ ] return x_cps , y_cps , cost_mat_cps | Cost - proportionate sampling . | 397 | 6 |
235,953 | def _creditscoring_costmat ( income , debt , pi_1 , cost_mat_parameters ) : def calculate_a ( cl_i , int_ , n_term ) : """ Private function """ return cl_i * ( ( int_ * ( 1 + int_ ) ** n_term ) / ( ( 1 + int_ ) ** n_term - 1 ) ) def calculate_pv ( a , int_ , n_term ) : """ Private function """ return a / int_ * ( 1 - 1 / ( 1 + int_ ) ** n_term ) #Calculate credit line Cl def calculate_cl ( k , inc_i , cl_max , debt_i , int_r , n_term ) : """ Private function """ cl_k = k * inc_i A = calculate_a ( cl_k , int_r , n_term ) Cl_debt = calculate_pv ( inc_i * min ( A / inc_i , 1 - debt_i ) , int_r , n_term ) return min ( cl_k , cl_max , Cl_debt ) #calculate costs def calculate_cost_fn ( cl_i , lgd ) : return cl_i * lgd def calculate_cost_fp ( cl_i , int_r , n_term , int_cf , pi_1 , lgd , cl_avg ) : a = calculate_a ( cl_i , int_r , n_term ) pv = calculate_pv ( a , int_cf , n_term ) r = pv - cl_i r_avg = calculate_pv ( calculate_a ( cl_avg , int_r , n_term ) , int_cf , n_term ) - cl_avg cost_fp = r - ( 1 - pi_1 ) * r_avg + pi_1 * calculate_cost_fn ( cl_avg , lgd ) return max ( 0 , cost_fp ) v_calculate_cost_fp = np . vectorize ( calculate_cost_fp ) v_calculate_cost_fn = np . vectorize ( calculate_cost_fn ) v_calculate_cl = np . vectorize ( calculate_cl ) # Parameters k = cost_mat_parameters [ 'k' ] int_r = cost_mat_parameters [ 'int_r' ] n_term = cost_mat_parameters [ 'n_term' ] int_cf = cost_mat_parameters [ 'int_cf' ] lgd = cost_mat_parameters [ 'lgd' ] cl_max = cost_mat_parameters [ 'cl_max' ] cl = v_calculate_cl ( k , income , cl_max , debt , int_r , n_term ) cl_avg = cl . mean ( ) n_samples = income . shape [ 0 ] cost_mat = np . zeros ( ( n_samples , 4 ) ) #cost_mat[FP,FN,TP,TN] cost_mat [ : , 0 ] = v_calculate_cost_fp ( cl , int_r , n_term , int_cf , pi_1 , lgd , cl_avg ) cost_mat [ : , 1 ] = v_calculate_cost_fn ( cl , lgd ) cost_mat [ : , 2 ] = 0.0 cost_mat [ : , 3 ] = 0.0 return cost_mat | Private function to calculate the cost matrix of credit scoring models . | 773 | 12 |
235,954 | def predict_proba ( self , p ) : # TODO: Check input if p . size != p . shape [ 0 ] : p = p [ : , 1 ] calibrated_proba = np . zeros ( p . shape [ 0 ] ) for i in range ( self . calibration_map . shape [ 0 ] ) : calibrated_proba [ np . logical_and ( self . calibration_map [ i , 1 ] <= p , self . calibration_map [ i , 0 ] > p ) ] = self . calibration_map [ i , 2 ] # TODO: return 2D and refactor return calibrated_proba | Calculate the calibrated probabilities | 135 | 6 |
235,955 | def cross_val_score ( estimator , X , y = None , scoring = None , cv = None , n_jobs = 1 , verbose = 0 , fit_params = None , pre_dispatch = '2*n_jobs' ) : X , y = indexable ( X , y ) cv = _check_cv ( cv , X , y , classifier = is_classifier ( estimator ) ) scorer = check_scoring ( estimator , scoring = scoring ) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. parallel = Parallel ( n_jobs = n_jobs , verbose = verbose , pre_dispatch = pre_dispatch ) scores = parallel ( delayed ( _fit_and_score ) ( clone ( estimator ) , X , y , scorer , train , test , verbose , None , fit_params ) for train , test in cv ) return np . array ( scores ) [ : , 0 ] | Evaluate a score by cross - validation | 220 | 9 |
235,956 | def _safe_split ( estimator , X , y , indices , train_indices = None ) : if hasattr ( estimator , 'kernel' ) and isinstance ( estimator . kernel , collections . Callable ) : # cannot compute the kernel values with custom function raise ValueError ( "Cannot use a custom kernel function. " "Precompute the kernel matrix instead." ) if not hasattr ( X , "shape" ) : if getattr ( estimator , "_pairwise" , False ) : raise ValueError ( "Precomputed kernels or affinity matrices have " "to be passed as arrays or sparse matrices." ) X_subset = [ X [ idx ] for idx in indices ] else : if getattr ( estimator , "_pairwise" , False ) : # X is a precomputed square kernel matrix if X . shape [ 0 ] != X . shape [ 1 ] : raise ValueError ( "X should be a square kernel matrix" ) if train_indices is None : X_subset = X [ np . ix_ ( indices , indices ) ] else : X_subset = X [ np . ix_ ( indices , train_indices ) ] else : X_subset = safe_indexing ( X , indices ) if y is not None : y_subset = safe_indexing ( y , indices ) else : y_subset = None return X_subset , y_subset | Create subset of dataset and properly handle kernels . | 312 | 9 |
235,957 | def _score ( estimator , X_test , y_test , scorer ) : if y_test is None : score = scorer ( estimator , X_test ) else : score = scorer ( estimator , X_test , y_test ) if not isinstance ( score , numbers . Number ) : raise ValueError ( "scoring must return a number, got %s (%s) instead." % ( str ( score ) , type ( score ) ) ) return score | Compute the score of an estimator on a given test set . | 99 | 14 |
235,958 | def _shuffle ( y , labels , random_state ) : if labels is None : ind = random_state . permutation ( len ( y ) ) else : ind = np . arange ( len ( labels ) ) for label in np . unique ( labels ) : this_mask = ( labels == label ) ind [ this_mask ] = random_state . permutation ( ind [ this_mask ] ) return y [ ind ] | Return a shuffled copy of y eventually shuffle among same labels . | 92 | 13 |
235,959 | def check_cv ( cv , X = None , y = None , classifier = False ) : return _check_cv ( cv , X = X , y = y , classifier = classifier , warn_mask = True ) | Input checker utility for building a CV in a user friendly way . | 51 | 14 |
235,960 | def _borderlineSMOTE ( X , y , minority_target , N , k ) : n_samples , _ = X . shape #Learn nearest neighbours on complete training set neigh = NearestNeighbors ( n_neighbors = k ) neigh . fit ( X ) safe_minority_indices = list ( ) danger_minority_indices = list ( ) for i in range ( n_samples ) : if y [ i ] != minority_target : continue nn = neigh . kneighbors ( X [ i ] , return_distance = False ) majority_neighbours = 0 for n in nn [ 0 ] : if y [ n ] != minority_target : majority_neighbours += 1 if majority_neighbours == len ( nn ) : continue elif majority_neighbours < ( len ( nn ) / 2 ) : logger . debug ( "Add sample to safe minorities." ) safe_minority_indices . append ( i ) else : #DANGER zone danger_minority_indices . append ( i ) #SMOTE danger minority samples synthetic_samples = _SMOTE ( X [ danger_minority_indices ] , N , k , h = 0.5 ) return ( X [ safe_minority_indices ] , synthetic_samples , X [ danger_minority_indices ] ) | Returns synthetic minority samples . | 300 | 5 |
235,961 | def fit ( self , y_true_cal = None , y_prob_cal = None ) : if self . calibration : self . cal = ROCConvexHull ( ) self . cal . fit ( y_true_cal , y_prob_cal [ : , 1 ] ) | If calibration then train the calibration of probabilities | 65 | 8 |
235,962 | def fit ( self , y_prob , cost_mat , y_true ) : #TODO: Check input if self . calibration : cal = ROCConvexHull ( ) cal . fit ( y_true , y_prob [ : , 1 ] ) y_prob [ : , 1 ] = cal . predict_proba ( y_prob [ : , 1 ] ) y_prob [ : , 0 ] = 1 - y_prob [ : , 1 ] thresholds = np . unique ( y_prob ) cost = np . zeros ( thresholds . shape ) for i in range ( thresholds . shape [ 0 ] ) : pred = np . floor ( y_prob [ : , 1 ] + ( 1 - thresholds [ i ] ) ) cost [ i ] = cost_loss ( y_true , pred , cost_mat ) self . threshold_ = thresholds [ np . argmin ( cost ) ] return self | Calculate the optimal threshold using the ThresholdingOptimization . | 204 | 15 |
235,963 | def predict ( self , y_prob ) : y_pred = np . floor ( y_prob [ : , 1 ] + ( 1 - self . threshold_ ) ) return y_pred | Calculate the prediction using the ThresholdingOptimization . | 42 | 14 |
235,964 | def undersampling ( X , y , cost_mat = None , per = 0.5 ) : n_samples = X . shape [ 0 ] #TODO: allow y different from (0, 1) num_y1 = y . sum ( ) num_y0 = n_samples - num_y1 filter_rand = np . random . rand ( int ( num_y1 + num_y0 ) ) #TODO: rewrite in a more readable way if num_y1 < num_y0 : num_y0_new = num_y1 * 1.0 / per - num_y1 num_y0_new_per = num_y0_new * 1.0 / num_y0 filter_0 = np . logical_and ( y == 0 , filter_rand <= num_y0_new_per ) filter_ = np . nonzero ( np . logical_or ( y == 1 , filter_0 ) ) [ 0 ] else : num_y1_new = num_y0 * 1.0 / per - num_y0 num_y1_new_per = num_y1_new * 1.0 / num_y1 filter_1 = np . logical_and ( y == 1 , filter_rand <= num_y1_new_per ) filter_ = np . nonzero ( np . logical_or ( y == 0 , filter_1 ) ) [ 0 ] X_u = X [ filter_ , : ] y_u = y [ filter_ ] if not cost_mat is None : cost_mat_u = cost_mat [ filter_ , : ] return X_u , y_u , cost_mat_u else : return X_u , y_u | Under - sampling . | 383 | 4 |
235,965 | def _node_cost ( self , y_true , cost_mat ) : n_samples = len ( y_true ) # Evaluates the cost by predicting the node as positive and negative costs = np . zeros ( 2 ) costs [ 0 ] = cost_loss ( y_true , np . zeros ( y_true . shape ) , cost_mat ) costs [ 1 ] = cost_loss ( y_true , np . ones ( y_true . shape ) , cost_mat ) pi = np . array ( [ 1 - y_true . mean ( ) , y_true . mean ( ) ] ) if self . criterion == 'direct_cost' : costs = costs elif self . criterion == 'pi_cost' : costs *= pi elif self . criterion == 'gini_cost' : costs *= pi ** 2 elif self . criterion in 'entropy_cost' : if pi [ 0 ] == 0 or pi [ 1 ] == 0 : costs *= 0 else : costs *= - np . log ( pi ) y_pred = np . argmin ( costs ) # Calculate the predicted probability of a node using laplace correction. n_positives = y_true . sum ( ) y_prob = ( n_positives + 1.0 ) / ( n_samples + 2.0 ) return costs [ y_pred ] , y_pred , y_prob | Private function to calculate the cost of a node . | 305 | 10 |
235,966 | def _calculate_gain ( self , cost_base , y_true , X , cost_mat , split ) : # Check if cost_base == 0, then no gain is possible #TODO: This must be check in _best_split if cost_base == 0.0 : return 0.0 , int ( np . sign ( y_true . mean ( ) - 0.5 ) == 1 ) # In case cost_b==0 and pi_1!=(0,1) j , l = split filter_Xl = ( X [ : , j ] <= l ) filter_Xr = ~ filter_Xl n_samples , n_features = X . shape # Check if one of the leafs is empty #TODO: This must be check in _best_split if np . nonzero ( filter_Xl ) [ 0 ] . shape [ 0 ] in [ 0 , n_samples ] : # One leaft is empty return 0.0 , 0.0 # Split X in Xl and Xr according to rule split Xl_cost , Xl_pred , _ = self . _node_cost ( y_true [ filter_Xl ] , cost_mat [ filter_Xl , : ] ) Xr_cost , _ , _ = self . _node_cost ( y_true [ filter_Xr ] , cost_mat [ filter_Xr , : ] ) if self . criterion_weight : n_samples_Xl = np . nonzero ( filter_Xl ) [ 0 ] . shape [ 0 ] Xl_w = n_samples_Xl * 1.0 / n_samples Xr_w = 1 - Xl_w gain = round ( ( cost_base - ( Xl_w * Xl_cost + Xr_w * Xr_cost ) ) / cost_base , 6 ) else : gain = round ( ( cost_base - ( Xl_cost + Xr_cost ) ) / cost_base , 6 ) return gain , Xl_pred | Private function to calculate the gain in cost of using split in the current node . | 449 | 16 |
235,967 | def _best_split ( self , y_true , X , cost_mat ) : n_samples , n_features = X . shape num_pct = self . num_pct cost_base , y_pred , y_prob = self . _node_cost ( y_true , cost_mat ) # Calculate the gain of all features each split in num_pct gains = np . zeros ( ( n_features , num_pct ) ) pred = np . zeros ( ( n_features , num_pct ) ) splits = np . zeros ( ( n_features , num_pct ) ) # Selected features selected_features = np . arange ( 0 , self . n_features_ ) # Add random state np . random . shuffle ( selected_features ) selected_features = selected_features [ : self . max_features_ ] selected_features . sort ( ) #TODO: # Skip the CPU intensive evaluation of the impurity criterion for # features that were already detected as constant (hence not suitable # for good splitting) by ancestor nodes and save the information on # newly discovered constant features to spare computation on descendant # nodes. # For each feature test all possible splits for j in selected_features : splits [ j , : ] = np . percentile ( X [ : , j ] , np . arange ( 0 , 100 , 100.0 / num_pct ) . tolist ( ) ) for l in range ( num_pct ) : # Avoid repeated values, since np.percentile may return repeated values if l == 0 or ( l > 0 and splits [ j , l ] != splits [ j , l - 1 ] ) : split = ( j , splits [ j , l ] ) gains [ j , l ] , pred [ j , l ] = self . _calculate_gain ( cost_base , y_true , X , cost_mat , split ) best_split = np . unravel_index ( gains . argmax ( ) , gains . shape ) return ( best_split [ 0 ] , splits [ best_split ] ) , gains . max ( ) , pred [ best_split ] , y_pred , y_prob | Private function to calculate the split that gives the best gain . | 472 | 12 |
235,968 | def _tree_grow ( self , y_true , X , cost_mat , level = 0 ) : #TODO: Find error, add min_samples_split if len ( X . shape ) == 1 : tree = dict ( y_pred = y_true , y_prob = 0.5 , level = level , split = - 1 , n_samples = 1 , gain = 0 ) return tree # Calculate the best split of the current node split , gain , Xl_pred , y_pred , y_prob = self . _best_split ( y_true , X , cost_mat ) n_samples , n_features = X . shape # Construct the tree object as a dictionary #TODO: Convert tree to be equal to sklearn.tree.tree object tree = dict ( y_pred = y_pred , y_prob = y_prob , level = level , split = - 1 , n_samples = n_samples , gain = gain ) # Check the stopping criteria if gain < self . min_gain : return tree if self . max_depth is not None : if level >= self . max_depth : return tree if n_samples <= self . min_samples_split : return tree j , l = split filter_Xl = ( X [ : , j ] <= l ) filter_Xr = ~ filter_Xl n_samples_Xl = np . nonzero ( filter_Xl ) [ 0 ] . shape [ 0 ] n_samples_Xr = np . nonzero ( filter_Xr ) [ 0 ] . shape [ 0 ] if min ( n_samples_Xl , n_samples_Xr ) <= self . min_samples_leaf : return tree # No stooping criteria is met tree [ 'split' ] = split tree [ 'node' ] = self . tree_ . n_nodes self . tree_ . n_nodes += 1 tree [ 'sl' ] = self . _tree_grow ( y_true [ filter_Xl ] , X [ filter_Xl ] , cost_mat [ filter_Xl ] , level + 1 ) tree [ 'sr' ] = self . _tree_grow ( y_true [ filter_Xr ] , X [ filter_Xr ] , cost_mat [ filter_Xr ] , level + 1 ) return tree | Private recursive function to grow the decision tree . | 522 | 9 |
235,969 | def _nodes ( self , tree ) : def recourse ( temp_tree_ , nodes ) : if isinstance ( temp_tree_ , dict ) : if temp_tree_ [ 'split' ] != - 1 : nodes . append ( temp_tree_ [ 'node' ] ) if temp_tree_ [ 'split' ] != - 1 : for k in [ 'sl' , 'sr' ] : recourse ( temp_tree_ [ k ] , nodes ) return None nodes_ = [ ] recourse ( tree , nodes_ ) return nodes_ | Private function that find the number of nodes in a tree . | 117 | 12 |
235,970 | def _classify ( self , X , tree , proba = False ) : n_samples , n_features = X . shape predicted = np . ones ( n_samples ) # Check if final node if tree [ 'split' ] == - 1 : if not proba : predicted = predicted * tree [ 'y_pred' ] else : predicted = predicted * tree [ 'y_prob' ] else : j , l = tree [ 'split' ] filter_Xl = ( X [ : , j ] <= l ) filter_Xr = ~ filter_Xl n_samples_Xl = np . nonzero ( filter_Xl ) [ 0 ] . shape [ 0 ] n_samples_Xr = np . nonzero ( filter_Xr ) [ 0 ] . shape [ 0 ] if n_samples_Xl == 0 : # If left node is empty only continue with right predicted [ filter_Xr ] = self . _classify ( X [ filter_Xr , : ] , tree [ 'sr' ] , proba ) elif n_samples_Xr == 0 : # If right node is empty only continue with left predicted [ filter_Xl ] = self . _classify ( X [ filter_Xl , : ] , tree [ 'sl' ] , proba ) else : predicted [ filter_Xl ] = self . _classify ( X [ filter_Xl , : ] , tree [ 'sl' ] , proba ) predicted [ filter_Xr ] = self . _classify ( X [ filter_Xr , : ] , tree [ 'sr' ] , proba ) return predicted | Private function that classify a dataset using tree . | 360 | 9 |
235,971 | def predict ( self , X ) : #TODO: Check consistency of X if self . pruned : tree_ = self . tree_ . tree_pruned else : tree_ = self . tree_ . tree return self . _classify ( X , tree_ , proba = False ) | Predict class of X . | 63 | 6 |
235,972 | def predict_proba ( self , X ) : #TODO: Check consistency of X n_samples , n_features = X . shape prob = np . zeros ( ( n_samples , 2 ) ) if self . pruned : tree_ = self . tree_ . tree_pruned else : tree_ = self . tree_ . tree prob [ : , 1 ] = self . _classify ( X , tree_ , proba = True ) prob [ : , 0 ] = 1 - prob [ : , 1 ] return prob | Predict class probabilities of the input samples X . | 117 | 10 |
235,973 | def _delete_node ( self , tree , node ) : # Calculate gains temp_tree = copy . deepcopy ( tree ) def recourse ( temp_tree_ , del_node ) : if isinstance ( temp_tree_ , dict ) : if temp_tree_ [ 'split' ] != - 1 : if temp_tree_ [ 'node' ] == del_node : del temp_tree_ [ 'sr' ] del temp_tree_ [ 'sl' ] del temp_tree_ [ 'node' ] temp_tree_ [ 'split' ] = - 1 else : for k in [ 'sl' , 'sr' ] : recourse ( temp_tree_ [ k ] , del_node ) return None recourse ( temp_tree , node ) return temp_tree | Private function that eliminate node from tree . | 167 | 8 |
235,974 | def _pruning ( self , X , y_true , cost_mat ) : # Calculate gains nodes = self . _nodes ( self . tree_ . tree_pruned ) n_nodes = len ( nodes ) gains = np . zeros ( n_nodes ) y_pred = self . _classify ( X , self . tree_ . tree_pruned ) cost_base = cost_loss ( y_true , y_pred , cost_mat ) for m , node in enumerate ( nodes ) : # Create temporal tree by eliminating node from tree_pruned temp_tree = self . _delete_node ( self . tree_ . tree_pruned , node ) y_pred = self . _classify ( X , temp_tree ) nodes_pruned = self . _nodes ( temp_tree ) # Calculate %gain gain = ( cost_base - cost_loss ( y_true , y_pred , cost_mat ) ) / cost_base # Calculate %gain_size gain_size = ( len ( nodes ) - len ( nodes_pruned ) ) * 1.0 / len ( nodes ) # Calculate weighted gain gains [ m ] = gain * gain_size best_gain = np . max ( gains ) best_node = nodes [ int ( np . argmax ( gains ) ) ] if best_gain > self . min_gain : self . tree_ . tree_pruned = self . _delete_node ( self . tree_ . tree_pruned , best_node ) # If best tree is not root node, then recursively pruning the tree if best_node != 0 : self . _pruning ( X , y_true , cost_mat ) | Private function that prune the decision tree . | 371 | 9 |
235,975 | def pruning ( self , X , y , cost_mat ) : self . tree_ . tree_pruned = copy . deepcopy ( self . tree_ . tree ) if self . tree_ . n_nodes > 0 : self . _pruning ( X , y , cost_mat ) nodes_pruned = self . _nodes ( self . tree_ . tree_pruned ) self . tree_ . n_nodes_pruned = len ( nodes_pruned ) | Function that prune the decision tree . | 105 | 8 |
235,976 | def cost_loss ( y_true , y_pred , cost_mat ) : #TODO: update description #TODO: Check consistency of cost_mat y_true = column_or_1d ( y_true ) y_true = ( y_true == 1 ) . astype ( np . float ) y_pred = column_or_1d ( y_pred ) y_pred = ( y_pred == 1 ) . astype ( np . float ) cost = y_true * ( ( 1 - y_pred ) * cost_mat [ : , 1 ] + y_pred * cost_mat [ : , 2 ] ) cost += ( 1 - y_true ) * ( y_pred * cost_mat [ : , 0 ] + ( 1 - y_pred ) * cost_mat [ : , 3 ] ) return np . sum ( cost ) | Cost classification loss . | 189 | 4 |
235,977 | def savings_score ( y_true , y_pred , cost_mat ) : #TODO: update description #TODO: Check consistency of cost_mat y_true = column_or_1d ( y_true ) y_pred = column_or_1d ( y_pred ) n_samples = len ( y_true ) # Calculate the cost of naive prediction cost_base = min ( cost_loss ( y_true , np . zeros ( n_samples ) , cost_mat ) , cost_loss ( y_true , np . ones ( n_samples ) , cost_mat ) ) cost = cost_loss ( y_true , y_pred , cost_mat ) return 1.0 - cost / cost_base | Savings score . | 166 | 4 |
235,978 | def brier_score_loss ( y_true , y_prob ) : y_true = column_or_1d ( y_true ) y_prob = column_or_1d ( y_prob ) return np . mean ( ( y_true - y_prob ) ** 2 ) | Compute the Brier score | 68 | 6 |
235,979 | def _logistic_cost_loss ( w , X , y , cost_mat , alpha ) : if w . shape [ 0 ] == w . size : # Only evaluating one w return _logistic_cost_loss_i ( w , X , y , cost_mat , alpha ) else : # Evaluating a set of w n_w = w . shape [ 0 ] out = np . zeros ( n_w ) for i in range ( n_w ) : out [ i ] = _logistic_cost_loss_i ( w [ i ] , X , y , cost_mat , alpha ) return out | Computes the logistic loss . | 134 | 7 |
235,980 | def predict ( self , X , cut_point = 0.5 ) : return np . floor ( self . predict_proba ( X ) [ : , 1 ] + ( 1 - cut_point ) ) | Predicted class . | 44 | 4 |
235,981 | def list_tags ( userdata ) : macros = re . findall ( '@(.*?)@' , userdata ) logging . info ( 'List of available macros:' ) for macro in macros : logging . info ( '\t%r' , macro ) | List all used macros within a UserData script . | 56 | 10 |
235,982 | def handle_tags ( userdata , macros ) : macro_vars = re . findall ( '@(.*?)@' , userdata ) for macro_var in macro_vars : if macro_var == '!all_macros_export' : macro_var_export_list = [ ] for defined_macro in macros : macro_var_export_list . append ( 'export %s="%s"' % ( defined_macro , macros [ defined_macro ] ) ) macro_var_exports = "\n" . join ( macro_var_export_list ) userdata = userdata . replace ( '@%s@' % macro_var , macro_var_exports ) elif macro_var == "!all_macros_docker" : macro_var_export_list = [ ] for defined_macro in macros : macro_var_export_list . append ( "-e '%s=%s'" % ( defined_macro , macros [ defined_macro ] ) ) macro_var_exports = " " . join ( macro_var_export_list ) userdata = userdata . replace ( '@%s@' % macro_var , macro_var_exports ) else : if "|" in macro_var : macro_var , default_value = macro_var . split ( '|' ) if macro_var not in macros : logging . warning ( 'Using default variable value %s for @%s@ ' , default_value , macro_var ) value = default_value else : value = macros [ macro_var ] userdata = userdata . replace ( '@%s|%s@' % ( macro_var , default_value ) , value ) else : if macro_var not in macros : logging . error ( 'Undefined variable @%s@ in UserData script' , macro_var ) return None userdata = userdata . replace ( '@%s@' % macro_var , macros [ macro_var ] ) return userdata | Insert macro values or auto export variables in UserData scripts . | 442 | 12 |
235,983 | def retry_on_ec2_error ( self , func , * args , * * kwargs ) : exception_retry_count = 6 while True : try : return func ( * args , * * kwargs ) except ( boto . exception . EC2ResponseError , ssl . SSLError ) as msg : exception_retry_count -= 1 if exception_retry_count <= 0 : raise msg time . sleep ( 5 ) | Call the given method with the given arguments retrying if the call failed due to an EC2ResponseError . This method will wait at most 30 seconds and perform up to 6 retries . If the method still fails it will propagate the error . | 99 | 49 |
235,984 | def connect ( self , region , * * kw_params ) : self . ec2 = boto . ec2 . connect_to_region ( region , * * kw_params ) if not self . ec2 : raise EC2ManagerException ( 'Unable to connect to region "%s"' % region ) self . remote_images . clear ( ) if self . images and any ( ( 'image_name' in img and 'image_id' not in img ) for img in self . images . values ( ) ) : for img in self . images . values ( ) : if 'image_name' in img and 'image_id' not in img : img [ 'image_id' ] = self . resolve_image_name ( img . pop ( 'image_name' ) ) | Connect to a EC2 . | 170 | 6 |
235,985 | def resolve_image_name ( self , image_name ) : # look at each scope in order of size scopes = [ 'self' , 'amazon' , 'aws-marketplace' ] if image_name in self . remote_images : return self . remote_images [ image_name ] for scope in scopes : logger . info ( 'Retrieving available AMIs owned by %s...' , scope ) remote_images = self . ec2 . get_all_images ( owners = [ scope ] , filters = { 'name' : image_name } ) self . remote_images . update ( { ri . name : ri . id for ri in remote_images } ) if image_name in self . remote_images : return self . remote_images [ image_name ] raise EC2ManagerException ( 'Failed to resolve AMI name "%s" to an AMI' % image_name ) | Look up an AMI for the connected region based on an image name . | 198 | 15 |
235,986 | def create_on_demand ( self , instance_type = 'default' , tags = None , root_device_type = 'ebs' , size = 'default' , vol_type = 'gp2' , delete_on_termination = False ) : name , size = self . _get_default_name_size ( instance_type , size ) if root_device_type == 'ebs' : self . images [ instance_type ] [ 'block_device_map' ] = self . _configure_ebs_volume ( vol_type , name , size , delete_on_termination ) reservation = self . ec2 . run_instances ( * * self . images [ instance_type ] ) logger . info ( 'Creating requested tags...' ) for i in reservation . instances : self . retry_on_ec2_error ( self . ec2 . create_tags , [ i . id ] , tags or { } ) instances = [ ] logger . info ( 'Waiting for instances to become ready...' ) while len ( reservation . instances ) : # pylint: disable=len-as-condition for i in reservation . instances : if i . state == 'running' : instances . append ( i ) reservation . instances . pop ( reservation . instances . index ( i ) ) logger . info ( '%s is %s at %s (%s)' , i . id , i . state , i . public_dns_name , i . ip_address ) else : self . retry_on_ec2_error ( i . update ) return instances | Create one or more EC2 on - demand instances . | 341 | 11 |
235,987 | def create_spot_requests ( self , price , instance_type = 'default' , root_device_type = 'ebs' , size = 'default' , vol_type = 'gp2' , delete_on_termination = False , timeout = None ) : name , size = self . _get_default_name_size ( instance_type , size ) if root_device_type == 'ebs' : self . images [ instance_type ] [ 'block_device_map' ] = self . _configure_ebs_volume ( vol_type , name , size , delete_on_termination ) valid_until = None if timeout is not None : valid_until = ( datetime . datetime . now ( ) + datetime . timedelta ( seconds = timeout ) ) . isoformat ( ) requests = self . ec2 . request_spot_instances ( price , valid_until = valid_until , * * self . images [ instance_type ] ) return [ r . id for r in requests ] | Request creation of one or more EC2 spot instances . | 222 | 11 |
235,988 | def check_spot_requests ( self , requests , tags = None ) : instances = [ None ] * len ( requests ) ec2_requests = self . retry_on_ec2_error ( self . ec2 . get_all_spot_instance_requests , request_ids = requests ) for req in ec2_requests : if req . instance_id : instance = self . retry_on_ec2_error ( self . ec2 . get_only_instances , req . instance_id ) [ 0 ] if not instance : raise EC2ManagerException ( 'Failed to get instance with id %s for %s request %s' % ( req . instance_id , req . status . code , req . id ) ) instances [ requests . index ( req . id ) ] = instance self . retry_on_ec2_error ( self . ec2 . create_tags , [ instance . id ] , tags or { } ) logger . info ( 'Request %s is %s and %s.' , req . id , req . status . code , req . state ) logger . info ( '%s is %s at %s (%s)' , instance . id , instance . state , instance . public_dns_name , instance . ip_address ) elif req . state != "open" : # return the request so we don't try again instances [ requests . index ( req . id ) ] = req return instances | Check status of one or more EC2 spot instance requests . | 314 | 12 |
235,989 | def cancel_spot_requests ( self , requests ) : ec2_requests = self . retry_on_ec2_error ( self . ec2 . get_all_spot_instance_requests , request_ids = requests ) for req in ec2_requests : req . cancel ( ) | Cancel one or more EC2 spot instance requests . | 67 | 11 |
235,990 | def create_spot ( self , price , instance_type = 'default' , tags = None , root_device_type = 'ebs' , size = 'default' , vol_type = 'gp2' , delete_on_termination = False , timeout = None ) : request_ids = self . create_spot_requests ( price , instance_type = instance_type , root_device_type = root_device_type , size = size , vol_type = vol_type , delete_on_termination = delete_on_termination ) instances = [ ] logger . info ( 'Waiting on fulfillment of requested spot instances.' ) poll_resolution = 5.0 time_exceeded = False while request_ids : time . sleep ( poll_resolution ) new_instances = self . check_spot_requests ( request_ids , tags = tags ) if timeout is not None : timeout -= poll_resolution time_exceeded = timeout <= 0 fulfilled = [ ] for idx , instance in enumerate ( new_instances ) : if instance . status . code == "bad-parameters" : logging . error ( 'Spot request for "%s" failed due to bad parameters.' , instance . id ) self . cancel_spot_requests ( [ instance . id ] ) if instance is not None : fulfilled . append ( idx ) if isinstance ( instance , boto . ec2 . instance . Instance ) : instances . append ( instance ) for idx in reversed ( fulfilled ) : request_ids . pop ( idx ) if request_ids and time_exceeded : self . cancel_spot_requests ( request_ids ) break return instances | Create one or more EC2 spot instances . | 359 | 9 |
235,991 | def _scale_down ( self , instances , count ) : i = sorted ( instances , key = lambda i : i . launch_time , reverse = True ) if not i : return [ ] running = len ( i ) logger . info ( '%d instance/s are running.' , running ) logger . info ( 'Scaling down %d instances of those.' , count ) if count > running : logger . info ( 'Scale-down value is > than running instance/s - using maximum of %d!' , running ) count = running return i [ : count ] | Return a list of |count| last created instances by launch time . | 120 | 14 |
235,992 | def _configure_ebs_volume ( self , vol_type , name , size , delete_on_termination ) : # From GitHub boto docs: http://git.io/veyDv root_dev = boto . ec2 . blockdevicemapping . BlockDeviceType ( ) root_dev . delete_on_termination = delete_on_termination root_dev . volume_type = vol_type if size != 'default' : root_dev . size = size # change root volume to desired size bdm = boto . ec2 . blockdevicemapping . BlockDeviceMapping ( ) bdm [ name ] = root_dev return bdm | Sets the desired root EBS size otherwise the default EC2 value is used . | 144 | 17 |
235,993 | def stop ( self , instances , count = 0 ) : if not instances : return if count > 0 : instances = self . _scale_down ( instances , count ) self . ec2 . stop_instances ( [ i . id for i in instances ] ) | Stop each provided running instance . | 55 | 6 |
235,994 | def terminate ( self , instances , count = 0 ) : if not instances : return if count > 0 : instances = self . _scale_down ( instances , count ) self . ec2 . terminate_instances ( [ i . id for i in instances ] ) | Terminate each provided running or stopped instance . | 55 | 9 |
235,995 | def find ( self , instance_ids = None , filters = None ) : instances = [ ] reservations = self . retry_on_ec2_error ( self . ec2 . get_all_instances , instance_ids = instance_ids , filters = filters ) for reservation in reservations : instances . extend ( reservation . instances ) return instances | Flatten list of reservations to a list of instances . | 73 | 11 |
235,996 | def load ( self , root , module_path , pkg_name ) : root = os . path . join ( root , module_path ) import_name = os . path . join ( pkg_name , module_path ) . replace ( os . sep , '.' ) for ( _ , name , _ ) in pkgutil . iter_modules ( [ root ] ) : self . modules [ name ] = import_module ( '.' + name , package = import_name ) return self . modules | Load modules dynamically . | 108 | 4 |
235,997 | def command_line_interfaces ( self ) : interfaces = [ ] for _ , module in self . modules . items ( ) : for entry in dir ( module ) : if entry . endswith ( 'CommandLine' ) : interfaces . append ( ( module , entry ) ) return interfaces | Return the CommandLine classes from each provider . | 61 | 9 |
235,998 | def pluralize ( item ) : assert isinstance ( item , ( int , list ) ) if isinstance ( item , int ) : return 's' if item > 1 else '' if isinstance ( item , list ) : return 's' if len ( item ) > 1 else '' return '' | Nothing to see here . | 62 | 5 |
235,999 | def validate ( self ) : if not self . conf . get ( 'auth_token' ) : raise PacketManagerException ( 'The auth token for Packet is not defined but required.' ) if not self . conf . get ( 'projects' ) : raise PacketManagerException ( 'Required "projects" section is missing.' ) projects = self . conf . get ( 'projects' ) if not projects . keys ( ) : raise PacketManagerException ( 'At least one project at Packet is required.' ) failure = False for project , identifier in projects . items ( ) : if not identifier : failure = True logging . error ( 'Project "%s" has no valid identifier.' , project ) if failure : raise PacketManagerException ( 'One or more projects are not setup appropriately.' ) | Perform some basic configuration validation . | 166 | 7 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.