idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
48,000
def bound_bboxes ( bboxes ) : group_x0 = min ( map ( lambda l : l [ x0 ] , bboxes ) ) group_y0 = min ( map ( lambda l : l [ y0 ] , bboxes ) ) group_x1 = max ( map ( lambda l : l [ x1 ] , bboxes ) ) group_y1 = max ( map ( lambda l : l [ y1 ] , bboxes ) ) return ( group_x0 , group_y0 , group_x1 , group_y1 )
Finds the minimal bbox that contains all given bboxes
48,001
def bound_elems ( elems ) : group_x0 = min ( map ( lambda l : l . x0 , elems ) ) group_y0 = min ( map ( lambda l : l . y0 , elems ) ) group_x1 = max ( map ( lambda l : l . x1 , elems ) ) group_y1 = max ( map ( lambda l : l . y1 , elems ) ) return ( group_x0 , group_y0 , group_x1 , group_y1 )
Finds the minimal bbox that contains all given elems
48,002
def intersect ( a , b ) : if a [ x0 ] == a [ x1 ] or a [ y0 ] == a [ y1 ] : return False if b [ x0 ] == b [ x1 ] or b [ y0 ] == b [ y1 ] : return False return a [ x0 ] <= b [ x1 ] and b [ x0 ] <= a [ x1 ] and a [ y0 ] <= b [ y1 ] and b [ y0 ] <= a [ y1 ]
Check if two rectangles intersect
48,003
def reading_order ( e1 , e2 ) : b1 = e1 . bbox b2 = e2 . bbox if round ( b1 [ y0 ] ) == round ( b2 [ y0 ] ) or round ( b1 [ y1 ] ) == round ( b2 [ y1 ] ) : return float_cmp ( b1 [ x0 ] , b2 [ x0 ] ) return float_cmp ( b1 [ y0 ] , b2 [ y0 ] )
A comparator to sort bboxes from top to bottom left to right
48,004
def xy_reading_order ( e1 , e2 ) : b1 = e1 . bbox b2 = e2 . bbox if round ( b1 [ x0 ] ) == round ( b2 [ x0 ] ) : return float_cmp ( b1 [ y0 ] , b2 [ y0 ] ) return float_cmp ( b1 [ x0 ] , b2 [ x0 ] )
A comparator to sort bboxes from left to right top to bottom
48,005
def column_order ( b1 , b2 ) : ( top , left , bottom ) = ( 1 , 2 , 3 ) if round ( b1 [ top ] ) == round ( b2 [ top ] ) or round ( b1 [ bottom ] ) == round ( b2 [ bottom ] ) : return float_cmp ( b1 [ left ] , b2 [ left ] ) return float_cmp ( b1 [ top ] , b2 [ top ] )
A comparator that sorts bboxes first by columns where a column is made up of all bboxes that overlap then by vertical position in each column .
48,006
def merge_intervals ( elems , overlap_thres = 2.0 ) : overlap_thres = max ( 0.0 , overlap_thres ) ordered = sorted ( elems , key = lambda e : e . x0 ) intervals = [ ] cur = [ - overlap_thres , - overlap_thres ] for e in ordered : if e . x0 - cur [ 1 ] > overlap_thres : if cur [ 1 ] > 0.0 : intervals . append ( cur ) cur = [ e . x0 , e . x1 ] continue cur [ 1 ] = max ( cur [ 1 ] , e . x1 ) intervals . append ( cur ) return map ( tuple , intervals )
Project in x axis Sort by start Go through segments and keep max x1
48,007
def predict_heatmap ( pdf_path , page_num , model , img_dim = 448 , img_dir = "tmp/img" ) : if not os . path . isdir ( img_dir ) : print ( "\nCreating image folder at {}" . format ( img_dir ) ) os . makedirs ( img_dir ) pdf_name = os . path . splitext ( os . path . basename ( pdf_path ) ) [ 0 ] img_path = os . path . join ( img_dir , pdf_name + "-{}.png" . format ( page_num ) ) if not os . path . isfile ( img_path ) : save_image ( pdf_path , img_path , page_num ) image = load_img ( img_path , grayscale = True , target_size = ( img_dim , img_dim ) ) image = img_to_array ( image , data_format = K . image_data_format ( ) ) image = ( image . reshape ( ( img_dim , img_dim , 1 ) ) . repeat ( 3 , axis = 2 ) . reshape ( ( 1 , img_dim , img_dim , 3 ) ) ) return ( image . astype ( np . uint8 ) . reshape ( ( img_dim , img_dim , 3 ) ) , model . predict ( image ) . reshape ( ( img_dim , img_dim ) ) , )
Return an image corresponding to the page of the pdf documents saved at pdf_path . If the image is not found in img_dir this function creates it and saves it in img_dir .
48,008
def do_intersect ( bb1 , bb2 ) : if bb1 [ 0 ] + bb1 [ 2 ] < bb2 [ 0 ] or bb2 [ 0 ] + bb2 [ 2 ] < bb1 [ 0 ] : return False if bb1 [ 1 ] + bb1 [ 3 ] < bb2 [ 1 ] or bb2 [ 1 ] + bb2 [ 3 ] < bb1 [ 1 ] : return False return True
Helper function that returns True if two bounding boxes overlap .
48,009
def get_bboxes ( img , mask , nb_boxes = 100 , score_thresh = 0.5 , iou_thresh = 0.2 , prop_size = 0.09 , prop_scale = 1.2 , ) : min_size = int ( img . shape [ 0 ] * prop_size * img . shape [ 1 ] * prop_size ) scale = int ( img . shape [ 0 ] * prop_scale ) img_lbl , regions = selectivesearch . selective_search ( img , scale = scale , sigma = 0.8 , min_size = min_size ) rect = [ None ] * nb_boxes max_iou = - 1 * np . ones ( nb_boxes ) mask = 1.0 * ( mask > score_thresh ) for region in regions : left , top , width , height = region [ "rect" ] intersection = mask [ top : top + height , left : left + width ] . sum ( ) union = height * width + mask . sum ( ) - intersection iou = intersection / union idx = np . argmin ( max_iou ) if iou > max_iou [ idx ] : max_iou [ idx ] = iou rect [ idx ] = region [ "rect" ] remove_indexes = max_iou == - 1 bboxes = [ ] filtered_ious = [ ] for idx in np . argsort ( [ - x for x in max_iou ] ) : if remove_indexes [ idx ] : break if len ( bboxes ) == 0 : if max_iou [ idx ] > iou_thresh : bboxes += [ rect [ idx ] ] filtered_ious += [ max_iou [ idx ] ] else : break else : if not any ( [ do_intersect ( rect [ idx ] , bboxes [ k ] ) for k in range ( len ( bboxes ) ) ] ) : if max_iou [ idx ] > iou_thresh : bboxes += [ rect [ idx ] ] filtered_ious += [ max_iou [ idx ] ] return bboxes , filtered_ious
Uses selective search to generate candidate bounding boxes and keeps the ones that have the largest iou with the predicted mask .
48,010
def _print_dict ( elem_dict ) : for key , value in sorted ( elem_dict . iteritems ( ) ) : if isinstance ( value , collections . Iterable ) : print ( key , len ( value ) ) else : print ( key , value )
Print a dict in a readable way
48,011
def _font_of_mention ( m ) : for ch in m : if isinstance ( ch , LTChar ) and ch . get_text ( ) . isalnum ( ) : return ( ch . fontname , _font_size_of ( ch ) ) return ( None , 0 )
Returns the font type and size of the first alphanumeric char in the text or None if there isn t any .
48,012
def _allowed_char ( c ) : c = ord ( c ) if c < 0 : return False if c < 128 : return _ascii_allowed [ c ] return True
Returns whether the given unicode char is allowed in output
48,013
def keep_allowed_chars ( text ) : return "" . join ( " " if c == "\n" else c for c in text . strip ( ) if _allowed_char ( c ) )
Cleans the text for output
48,014
def paint_path ( self , gstate , stroke , fill , evenodd , path ) : shape = "" . join ( x [ 0 ] for x in path ) prev_split = 0 for i in range ( len ( shape ) ) : if shape [ i ] == "m" and prev_split != i : self . paint_single_path ( gstate , stroke , fill , evenodd , path [ prev_split : i ] ) prev_split = i if shape [ i ] == "h" : self . paint_single_path ( gstate , stroke , fill , evenodd , path [ prev_split : i + 1 ] ) prev_split = i + 1 if prev_split < len ( shape ) : self . paint_single_path ( gstate , stroke , fill , evenodd , path [ prev_split : ] )
Converting long paths to small segments each time we m = Move or h = ClosePath for polygon
48,015
def paint_single_path ( self , gstate , stroke , fill , evenodd , path ) : if len ( path ) < 2 : return shape = "" . join ( x [ 0 ] for x in path ) pts = [ ] for p in path : for i in range ( 1 , len ( p ) , 2 ) : pts . append ( apply_matrix_pt ( self . ctm , ( p [ i ] , p [ i + 1 ] ) ) ) if self . line_only_shape . match ( shape ) : has_slope = False for i in range ( len ( pts ) - 1 ) : if pts [ i ] [ 0 ] != pts [ i + 1 ] [ 0 ] and pts [ i ] [ 1 ] != pts [ i + 1 ] [ 1 ] : has_slope = True break if not has_slope : for i in range ( len ( pts ) - 1 ) : self . cur_item . add ( LTLine ( gstate . linewidth , pts [ i ] , pts [ i + 1 ] ) ) if shape . endswith ( "h" ) : self . cur_item . add ( LTLine ( gstate . linewidth , pts [ 0 ] , pts [ - 1 ] ) ) return self . cur_item . add ( LTCurve ( gstate . linewidth , pts ) )
Converting a single path draw command into lines and curves objects
48,016
def traverse_layout ( root , callback ) : callback ( root ) if isinstance ( root , collections . Iterable ) : for child in root : traverse_layout ( child , callback )
Tree walker and invokes the callback as it traverse pdf object tree
48,017
def get_near_items ( tree , tree_key ) : try : yield tree . floor_item ( tree_key ) except KeyError : pass try : yield tree . ceiling_item ( tree_key ) except KeyError : pass
Check both possible neighbors for key in a binary tree
48,018
def align_add ( tree , key , item , align_thres = 2.0 ) : for near_key , near_list in get_near_items ( tree , key ) : if abs ( key - near_key ) < align_thres : near_list . append ( item ) return tree [ key ] = [ item ]
Adding the item object to a binary tree with the given key while allow for small key differences close_enough_func that checks if two keys are within threshold
48,019
def collect_table_content ( table_bboxes , elems ) : table_contents = [ [ ] for _ in range ( len ( table_bboxes ) ) ] prev_content = None prev_bbox = None for cid , c in enumerate ( elems ) : if isinstance ( c , LTAnno ) : if prev_content is not None : prev_content . append ( c ) continue if prev_bbox is not None and intersect ( prev_bbox , c . bbox ) : prev_content . append ( c ) continue for table_id , table_bbox in enumerate ( table_bboxes ) : if intersect ( table_bbox , c . bbox ) : prev_bbox = table_bbox prev_content = table_contents [ table_id ] prev_content . append ( c ) break return table_contents
Returns a list of elements that are contained inside the corresponding supplied bbox .
48,020
def project_onto ( objs , axis , min_gap_size = 4.0 ) : if axis == "x" : axis = 0 if axis == "y" : axis = 1 axis_end = axis + 2 if axis == 0 : objs . sort ( key = lambda o : o . x0 ) else : objs . sort ( key = lambda o : o . y0 ) intervals = [ ] groups = [ ] start_i = 0 start = objs [ 0 ] . bbox [ axis ] end = objs [ 0 ] . bbox [ axis_end ] for o_i , o in enumerate ( chain ( objs , [ _inf_bbox ] ) ) : o_start = o . bbox [ axis ] o_end = o . bbox [ axis_end ] if o_start > end + min_gap_size : intervals . append ( ( start , end ) ) groups . append ( objs [ start_i : o_i ] ) start_i = o_i start = o_start if o_end > end : end = o_end return intervals , groups
Projects object bboxes onto the axis and return the unioned intervals and groups of objects in intervals .
48,021
def draw_rect ( self , bbox , cell_val ) : new_x0 = int ( bbox [ x0 ] ) new_y0 = int ( bbox [ y0 ] ) new_x1 = max ( new_x0 + 1 , int ( bbox [ x1 ] ) ) new_y1 = max ( new_y0 + 1 , int ( bbox [ y1 ] ) ) self . grid [ new_x0 : new_x1 , new_y0 : new_y1 ] = cell_val
Fills the bbox with the content values Float bbox values are normalized to have non - zero area
48,022
def parse_layout ( elems , font_stat , combine = False ) : boxes_segments = elems . segments boxes_curves = elems . curves boxes_figures = elems . figures page_width = elems . layout . width boxes = elems . mentions avg_font_pts = get_most_common_font_pts ( elems . mentions , font_stat ) width = get_page_width ( boxes + boxes_segments + boxes_figures + boxes_curves ) char_width = get_char_width ( boxes ) grid_size = avg_font_pts / 2.0 for i , m in enumerate ( boxes + elems . figures ) : m . id = i m . feats = defaultdict ( bool ) prefix = "" if isinstance ( m , LTTextLine ) and m . font_name : prefix = m . font_name + "-" + str ( m . font_size ) + "-" m . xc = ( m . x0 + m . x1 ) / 2.0 m . yc = ( m . y0 + m . y1 ) / 2.0 m . feats [ prefix + "x0" ] = m . x0_grid = m . x0 // grid_size m . feats [ prefix + "x1" ] = m . x1_grid = m . x1 // grid_size m . feats [ prefix + "xc" ] = m . xc_grid = m . xc // grid_size m . feats [ prefix + "yc" ] = m . yc_grid = m . yc // grid_size tbls , tbl_features = cluster_vertically_aligned_boxes ( boxes , elems . layout . bbox , avg_font_pts , width , char_width , boxes_segments , boxes_curves , boxes_figures , page_width , combine , ) return tbls , tbl_features
Parses pdf texts into a hypergraph grouped into rows and columns and then output
48,023
def merge_nodes ( nodes , plane , page_stat , merge_indices ) : to_be_removed = set ( ) for inner_idx in range ( len ( nodes ) ) : inner = nodes [ inner_idx ] outers = [ ] outers_indices = [ ] for outer_idx in range ( len ( nodes ) ) : outer = nodes [ outer_idx ] if outer is inner or outer in to_be_removed : continue if intersect ( outer . bbox , inner . bbox ) : outers . append ( outer ) outers_indices . append ( outer_idx ) if not outers : continue best_outer = min ( outers , key = lambda outer : l1 ( center ( outer . bbox ) , center ( inner . bbox ) ) ) best_outer_idx = outers_indices [ outers . index ( best_outer ) ] to_be_removed . add ( inner ) best_outer . merge ( inner ) for cid_iter in range ( len ( merge_indices ) ) : if merge_indices [ cid_iter ] == merge_indices [ inner_idx ] : merge_indices [ cid_iter ] = merge_indices [ best_outer_idx ] return nodes , merge_indices
Merges overlapping nodes
48,024
def _get_cols ( row_content ) : cols = [ ] subcell_col = [ ] prev_bar = None for _coord , item in row_content : if isinstance ( item , LTTextLine ) : subcell_col . append ( item ) else : if prev_bar : bar_ranges = ( prev_bar , item ) col_items = subcell_col if subcell_col else [ None ] cols . extend ( [ bar_ranges , col_items ] ) prev_bar = item subcell_col = [ ] return cols
Counting the number columns based on the content of this row
48,025
def _one_contains_other ( s1 , s2 ) : return min ( len ( s1 ) , len ( s2 ) ) == len ( s1 & s2 )
Whether one set contains the other
48,026
def is_table ( self ) : if self . type_counts [ "text" ] < 6 or "figure" in self . type_counts : return False for e in self . elems : if elem_type ( e ) == "curve" and e . height * e . width > 100 : return False if ( self . sum_elem_bbox / ( self . height * self . width ) ) > self . table_area_threshold : return False has_many_x_align = False has_many_y_align = False for k , v in six . iteritems ( self . feat_counts ) : font_key = k [ 0 ] if ( v >= 2 and "-" in font_key ) : if font_key [ - 2 ] == "x" : has_many_x_align = True if font_key [ - 2 ] == "y" : has_many_y_align = True return has_many_x_align and has_many_y_align
Count the node s number of mention al ignment in both axes to determine if the node is a table .
48,027
def get_grid ( self ) : mentions , lines = _split_text_n_lines ( self . elems ) mentions . sort ( key = lambda m : ( m . yc_grid , m . xc ) ) grid = Grid ( mentions , lines , self ) return grid
Standardize the layout of the table into grids
48,028
def lazy_load_font ( font_size = default_font_size ) : if font_size not in _font_cache : if _platform . startswith ( "darwin" ) : font_path = "/Library/Fonts/Arial.ttf" elif _platform . startswith ( "linux" ) : font_path = "/usr/share/fonts/truetype/ubuntu-font-family/UbuntuMono-R.ttf" elif _platform . startswith ( "win32" ) : font_path = "C:\\Windows\\Fonts\\arial.ttf" _font_cache [ font_size ] = ImageFont . truetype ( font_path , font_size ) return _font_cache [ font_size ]
Lazy loading font according to system platform
48,029
def render_debug_img ( file_name , page_num , elems , nodes = [ ] , scaler = 1 , print_segments = False , print_curves = True , print_table_bbox = True , print_text_as_rect = True , ) : height = scaler * int ( elems . layout . height ) width = scaler * int ( elems . layout . width ) debug_img , draw = create_img ( ( 0 , 0 , width , height ) ) font = lazy_load_font ( ) large_font = lazy_load_font ( 24 ) if print_curves : for i , c in enumerate ( elems . curves ) : if len ( c . pts ) > 1 : draw . polygon ( c . pts , outline = blue ) draw . rectangle ( c . bbox , fill = blue ) for i , m in enumerate ( elems . mentions ) : if isinstance ( m , LTAnno ) : continue if print_text_as_rect : fill = "pink" if hasattr ( m , "feats" ) and m . feats [ "is_cell" ] else green draw . rectangle ( m . bbox , fill = fill ) draw . text ( m . bbox [ : 2 ] , m . get_text ( ) , black , font = font ) else : draw . text ( m . bbox [ : 2 ] , m . get_text ( ) , "black" , font = font ) if print_segments : for i , s in enumerate ( elems . segments ) : draw . line ( s . bbox , fill = "black" ) if print_table_bbox : for node in nodes : is_table = node . is_table ( ) color = "red" if is_table else "green" draw . rectangle ( node . bbox , outline = color ) if is_table : text = "Table" draw . rectangle ( node . bbox , outline = color ) draw . text ( node . bbox [ : 2 ] , text , red , font = large_font ) if file_name and page_num is not None : water_mark = ( file_name + ":page " + str ( page_num + 1 ) + "@%dx%d" % ( width , height ) ) draw . text ( ( 10 , 10 ) , water_mark , black , font = font ) debug_img . show ( ) return debug_img
Shows an image rendering of the pdf page along with debugging info printed
48,030
def _partition_estimators ( n_estimators , n_jobs ) : if n_jobs == - 1 : n_jobs = min ( cpu_count ( ) , n_estimators ) else : n_jobs = min ( n_jobs , n_estimators ) n_estimators_per_job = ( n_estimators // n_jobs ) * np . ones ( n_jobs , dtype = np . int ) n_estimators_per_job [ : n_estimators % n_jobs ] += 1 starts = np . cumsum ( n_estimators_per_job ) return n_jobs , n_estimators_per_job . tolist ( ) , [ 0 ] + starts . tolist ( )
Private function used to partition estimators between jobs .
48,031
def _parallel_build_estimators ( n_estimators , ensemble , X , y , cost_mat , seeds , verbose ) : n_samples , n_features = X . shape max_samples = ensemble . max_samples max_features = ensemble . max_features if ( not isinstance ( max_samples , ( numbers . Integral , np . integer ) ) and ( 0.0 < max_samples <= 1.0 ) ) : max_samples = int ( max_samples * n_samples ) if ( not isinstance ( max_features , ( numbers . Integral , np . integer ) ) and ( 0.0 < max_features <= 1.0 ) ) : max_features = int ( max_features * n_features ) bootstrap = ensemble . bootstrap bootstrap_features = ensemble . bootstrap_features estimators = [ ] estimators_samples = [ ] estimators_features = [ ] for i in range ( n_estimators ) : if verbose > 1 : print ( ( "building estimator %d of %d" % ( i + 1 , n_estimators ) ) ) random_state = check_random_state ( seeds [ i ] ) seed = check_random_state ( random_state . randint ( MAX_INT ) ) estimator = ensemble . _make_estimator ( append = False ) try : estimator . set_params ( random_state = seed ) except ValueError : pass if bootstrap_features : features = random_state . randint ( 0 , n_features , max_features ) else : features = sample_without_replacement ( n_features , max_features , random_state = random_state ) if bootstrap : indices = random_state . randint ( 0 , n_samples , max_samples ) else : indices = sample_without_replacement ( n_samples , max_samples , random_state = random_state ) sample_counts = np . bincount ( indices , minlength = n_samples ) estimator . fit ( ( X [ indices ] ) [ : , features ] , y [ indices ] , cost_mat [ indices , : ] ) samples = sample_counts > 0. estimators . append ( estimator ) estimators_samples . append ( samples ) estimators_features . append ( features ) return estimators , estimators_samples , estimators_features
Private function used to build a batch of estimators within a job .
48,032
def _parallel_predict ( estimators , estimators_features , X , n_classes , combination , estimators_weight ) : n_samples = X . shape [ 0 ] pred = np . zeros ( ( n_samples , n_classes ) ) n_estimators = len ( estimators ) for estimator , features , weight in zip ( estimators , estimators_features , estimators_weight ) : predictions = estimator . predict ( X [ : , features ] ) for i in range ( n_samples ) : if combination == 'weighted_voting' : pred [ i , int ( predictions [ i ] ) ] += 1 * weight else : pred [ i , int ( predictions [ i ] ) ] += 1 return pred
Private function used to compute predictions within a job .
48,033
def _create_stacking_set ( estimators , estimators_features , estimators_weight , X , combination ) : n_samples = X . shape [ 0 ] valid_estimators = np . nonzero ( estimators_weight ) [ 0 ] n_valid_estimators = valid_estimators . shape [ 0 ] X_stacking = np . zeros ( ( n_samples , n_valid_estimators ) ) for e in range ( n_valid_estimators ) : if combination in [ 'stacking' , 'stacking_bmr' ] : X_stacking [ : , e ] = estimators [ valid_estimators [ e ] ] . predict ( X [ : , estimators_features [ valid_estimators [ e ] ] ] ) elif combination in [ 'stacking_proba' , 'stacking_proba_bmr' ] : X_stacking [ : , e ] = estimators [ valid_estimators [ e ] ] . predict_proba ( X [ : , estimators_features [ valid_estimators [ e ] ] ] ) [ : , 1 ] return X_stacking
Private function used to create the stacking training set .
48,034
def _fit_bmr_model ( self , X , y ) : self . f_bmr = BayesMinimumRiskClassifier ( ) X_bmr = self . predict_proba ( X ) self . f_bmr . fit ( y , X_bmr ) return self
Private function used to fit the BayesMinimumRisk model .
48,035
def _fit_stacking_model ( self , X , y , cost_mat , max_iter = 100 ) : self . f_staking = CostSensitiveLogisticRegression ( verbose = self . verbose , max_iter = max_iter ) X_stacking = _create_stacking_set ( self . estimators_ , self . estimators_features_ , self . estimators_weight_ , X , self . combination ) self . f_staking . fit ( X_stacking , y , cost_mat ) return self
Private function used to fit the stacking model .
48,036
def _evaluate_oob_savings ( self , X , y , cost_mat ) : estimators_weight = [ ] for estimator , samples , features in zip ( self . estimators_ , self . estimators_samples_ , self . estimators_features_ ) : if not np . any ( ~ samples ) : oob_pred = estimator . predict ( X [ : , features ] ) oob_savings = max ( 0 , savings_score ( y , oob_pred , cost_mat ) ) else : oob_pred = estimator . predict ( ( X [ ~ samples ] ) [ : , features ] ) oob_savings = max ( 0 , savings_score ( y [ ~ samples ] , oob_pred , cost_mat [ ~ samples ] ) ) estimators_weight . append ( oob_savings ) if sum ( estimators_weight ) == 0 : self . estimators_weight_ = np . ones ( len ( estimators_weight ) ) / len ( estimators_weight ) else : self . estimators_weight_ = ( np . array ( estimators_weight ) / sum ( estimators_weight ) ) . tolist ( ) return self
Private function used to calculate the OOB Savings of each estimator .
48,037
def predict ( self , X , cost_mat = None ) : if self . n_features_ != X . shape [ 1 ] : raise ValueError ( "Number of features of the model must " "match the input. Model n_features is {0} and " "input n_features is {1}." "" . format ( self . n_features_ , X . shape [ 1 ] ) ) if self . combination in [ 'stacking' , 'stacking_proba' ] : X_stacking = _create_stacking_set ( self . estimators_ , self . estimators_features_ , self . estimators_weight_ , X , self . combination ) return self . f_staking . predict ( X_stacking ) elif self . combination in [ 'majority_voting' , 'weighted_voting' ] : n_jobs , n_estimators , starts = _partition_estimators ( self . n_estimators , self . n_jobs ) all_pred = Parallel ( n_jobs = n_jobs , verbose = self . verbose ) ( delayed ( _parallel_predict ) ( self . estimators_ [ starts [ i ] : starts [ i + 1 ] ] , self . estimators_features_ [ starts [ i ] : starts [ i + 1 ] ] , X , self . n_classes_ , self . combination , self . estimators_weight_ [ starts [ i ] : starts [ i + 1 ] ] ) for i in range ( n_jobs ) ) pred = sum ( all_pred ) / self . n_estimators return self . classes_ . take ( np . argmax ( pred , axis = 1 ) , axis = 0 ) elif self . combination in [ 'majority_bmr' , 'weighted_bmr' , 'stacking_bmr' , 'stacking_proba_bmr' ] : X_bmr = self . predict_proba ( X ) return self . f_bmr . predict ( X_bmr , cost_mat )
Predict class for X .
48,038
def predict_proba ( self , X ) : if self . n_features_ != X . shape [ 1 ] : raise ValueError ( "Number of features of the model must " "match the input. Model n_features is {0} and " "input n_features is {1}." "" . format ( self . n_features_ , X . shape [ 1 ] ) ) n_jobs , n_estimators , starts = _partition_estimators ( self . n_estimators , self . n_jobs ) all_proba = Parallel ( n_jobs = n_jobs , verbose = self . verbose ) ( delayed ( _parallel_predict_proba ) ( self . estimators_ [ starts [ i ] : starts [ i + 1 ] ] , self . estimators_features_ [ starts [ i ] : starts [ i + 1 ] ] , X , self . n_classes_ , self . combination , self . estimators_weight_ [ starts [ i ] : starts [ i + 1 ] ] ) for i in range ( n_jobs ) ) if self . combination in [ 'majority_voting' , 'majority_bmr' ] : proba = sum ( all_proba ) / self . n_estimators elif self . combination in [ 'weighted_voting' , 'weighted_bmr' ] : proba = sum ( all_proba ) elif self . combination in [ 'stacking' , 'stacking_proba' , 'stacking_bmr' , 'stacking_proba_bmr' ] : X_stacking = _create_stacking_set ( self . estimators_ , self . estimators_features_ , self . estimators_weight_ , X , self . combination ) proba = self . f_staking . predict_proba ( X_stacking ) return proba
Predict class probabilities for X .
48,039
def cost_sampling ( X , y , cost_mat , method = 'RejectionSampling' , oversampling_norm = 0.1 , max_wc = 97.5 ) : cost_mis = cost_mat [ : , 0 ] cost_mis [ y == 1 ] = cost_mat [ y == 1 , 1 ] wc = np . minimum ( cost_mis / np . percentile ( cost_mis , max_wc ) , 1 ) n_samples = X . shape [ 0 ] filter_ = list ( range ( n_samples ) ) if method == 'RejectionSampling' : rej_rand = np . random . rand ( n_samples ) filter_ = rej_rand <= wc elif method == 'OverSampling' : wc_n = np . ceil ( wc / oversampling_norm ) . astype ( np . int ) new_n = wc_n . sum ( ) filter_ = np . ones ( new_n , dtype = np . int ) e = 0 for i in range ( n_samples ) : filter_ [ e : e + wc_n [ i ] ] = i e += wc_n [ i ] x_cps = X [ filter_ ] y_cps = y [ filter_ ] cost_mat_cps = cost_mat [ filter_ ] return x_cps , y_cps , cost_mat_cps
Cost - proportionate sampling .
48,040
def _creditscoring_costmat ( income , debt , pi_1 , cost_mat_parameters ) : def calculate_a ( cl_i , int_ , n_term ) : return cl_i * ( ( int_ * ( 1 + int_ ) ** n_term ) / ( ( 1 + int_ ) ** n_term - 1 ) ) def calculate_pv ( a , int_ , n_term ) : return a / int_ * ( 1 - 1 / ( 1 + int_ ) ** n_term ) def calculate_cl ( k , inc_i , cl_max , debt_i , int_r , n_term ) : cl_k = k * inc_i A = calculate_a ( cl_k , int_r , n_term ) Cl_debt = calculate_pv ( inc_i * min ( A / inc_i , 1 - debt_i ) , int_r , n_term ) return min ( cl_k , cl_max , Cl_debt ) def calculate_cost_fn ( cl_i , lgd ) : return cl_i * lgd def calculate_cost_fp ( cl_i , int_r , n_term , int_cf , pi_1 , lgd , cl_avg ) : a = calculate_a ( cl_i , int_r , n_term ) pv = calculate_pv ( a , int_cf , n_term ) r = pv - cl_i r_avg = calculate_pv ( calculate_a ( cl_avg , int_r , n_term ) , int_cf , n_term ) - cl_avg cost_fp = r - ( 1 - pi_1 ) * r_avg + pi_1 * calculate_cost_fn ( cl_avg , lgd ) return max ( 0 , cost_fp ) v_calculate_cost_fp = np . vectorize ( calculate_cost_fp ) v_calculate_cost_fn = np . vectorize ( calculate_cost_fn ) v_calculate_cl = np . vectorize ( calculate_cl ) k = cost_mat_parameters [ 'k' ] int_r = cost_mat_parameters [ 'int_r' ] n_term = cost_mat_parameters [ 'n_term' ] int_cf = cost_mat_parameters [ 'int_cf' ] lgd = cost_mat_parameters [ 'lgd' ] cl_max = cost_mat_parameters [ 'cl_max' ] cl = v_calculate_cl ( k , income , cl_max , debt , int_r , n_term ) cl_avg = cl . mean ( ) n_samples = income . shape [ 0 ] cost_mat = np . zeros ( ( n_samples , 4 ) ) cost_mat [ : , 0 ] = v_calculate_cost_fp ( cl , int_r , n_term , int_cf , pi_1 , lgd , cl_avg ) cost_mat [ : , 1 ] = v_calculate_cost_fn ( cl , lgd ) cost_mat [ : , 2 ] = 0.0 cost_mat [ : , 3 ] = 0.0 return cost_mat
Private function to calculate the cost matrix of credit scoring models .
48,041
def predict_proba ( self , p ) : if p . size != p . shape [ 0 ] : p = p [ : , 1 ] calibrated_proba = np . zeros ( p . shape [ 0 ] ) for i in range ( self . calibration_map . shape [ 0 ] ) : calibrated_proba [ np . logical_and ( self . calibration_map [ i , 1 ] <= p , self . calibration_map [ i , 0 ] > p ) ] = self . calibration_map [ i , 2 ] return calibrated_proba
Calculate the calibrated probabilities
48,042
def cross_val_score ( estimator , X , y = None , scoring = None , cv = None , n_jobs = 1 , verbose = 0 , fit_params = None , pre_dispatch = '2*n_jobs' ) : X , y = indexable ( X , y ) cv = _check_cv ( cv , X , y , classifier = is_classifier ( estimator ) ) scorer = check_scoring ( estimator , scoring = scoring ) parallel = Parallel ( n_jobs = n_jobs , verbose = verbose , pre_dispatch = pre_dispatch ) scores = parallel ( delayed ( _fit_and_score ) ( clone ( estimator ) , X , y , scorer , train , test , verbose , None , fit_params ) for train , test in cv ) return np . array ( scores ) [ : , 0 ]
Evaluate a score by cross - validation
48,043
def _safe_split ( estimator , X , y , indices , train_indices = None ) : if hasattr ( estimator , 'kernel' ) and isinstance ( estimator . kernel , collections . Callable ) : raise ValueError ( "Cannot use a custom kernel function. " "Precompute the kernel matrix instead." ) if not hasattr ( X , "shape" ) : if getattr ( estimator , "_pairwise" , False ) : raise ValueError ( "Precomputed kernels or affinity matrices have " "to be passed as arrays or sparse matrices." ) X_subset = [ X [ idx ] for idx in indices ] else : if getattr ( estimator , "_pairwise" , False ) : if X . shape [ 0 ] != X . shape [ 1 ] : raise ValueError ( "X should be a square kernel matrix" ) if train_indices is None : X_subset = X [ np . ix_ ( indices , indices ) ] else : X_subset = X [ np . ix_ ( indices , train_indices ) ] else : X_subset = safe_indexing ( X , indices ) if y is not None : y_subset = safe_indexing ( y , indices ) else : y_subset = None return X_subset , y_subset
Create subset of dataset and properly handle kernels .
48,044
def _score ( estimator , X_test , y_test , scorer ) : if y_test is None : score = scorer ( estimator , X_test ) else : score = scorer ( estimator , X_test , y_test ) if not isinstance ( score , numbers . Number ) : raise ValueError ( "scoring must return a number, got %s (%s) instead." % ( str ( score ) , type ( score ) ) ) return score
Compute the score of an estimator on a given test set .
48,045
def _shuffle ( y , labels , random_state ) : if labels is None : ind = random_state . permutation ( len ( y ) ) else : ind = np . arange ( len ( labels ) ) for label in np . unique ( labels ) : this_mask = ( labels == label ) ind [ this_mask ] = random_state . permutation ( ind [ this_mask ] ) return y [ ind ]
Return a shuffled copy of y eventually shuffle among same labels .
48,046
def check_cv ( cv , X = None , y = None , classifier = False ) : return _check_cv ( cv , X = X , y = y , classifier = classifier , warn_mask = True )
Input checker utility for building a CV in a user friendly way .
48,047
def _borderlineSMOTE ( X , y , minority_target , N , k ) : n_samples , _ = X . shape neigh = NearestNeighbors ( n_neighbors = k ) neigh . fit ( X ) safe_minority_indices = list ( ) danger_minority_indices = list ( ) for i in range ( n_samples ) : if y [ i ] != minority_target : continue nn = neigh . kneighbors ( X [ i ] , return_distance = False ) majority_neighbours = 0 for n in nn [ 0 ] : if y [ n ] != minority_target : majority_neighbours += 1 if majority_neighbours == len ( nn ) : continue elif majority_neighbours < ( len ( nn ) / 2 ) : logger . debug ( "Add sample to safe minorities." ) safe_minority_indices . append ( i ) else : danger_minority_indices . append ( i ) synthetic_samples = _SMOTE ( X [ danger_minority_indices ] , N , k , h = 0.5 ) return ( X [ safe_minority_indices ] , synthetic_samples , X [ danger_minority_indices ] )
Returns synthetic minority samples .
48,048
def fit ( self , y_true_cal = None , y_prob_cal = None ) : if self . calibration : self . cal = ROCConvexHull ( ) self . cal . fit ( y_true_cal , y_prob_cal [ : , 1 ] )
If calibration then train the calibration of probabilities
48,049
def fit ( self , y_prob , cost_mat , y_true ) : if self . calibration : cal = ROCConvexHull ( ) cal . fit ( y_true , y_prob [ : , 1 ] ) y_prob [ : , 1 ] = cal . predict_proba ( y_prob [ : , 1 ] ) y_prob [ : , 0 ] = 1 - y_prob [ : , 1 ] thresholds = np . unique ( y_prob ) cost = np . zeros ( thresholds . shape ) for i in range ( thresholds . shape [ 0 ] ) : pred = np . floor ( y_prob [ : , 1 ] + ( 1 - thresholds [ i ] ) ) cost [ i ] = cost_loss ( y_true , pred , cost_mat ) self . threshold_ = thresholds [ np . argmin ( cost ) ] return self
Calculate the optimal threshold using the ThresholdingOptimization .
48,050
def predict ( self , y_prob ) : y_pred = np . floor ( y_prob [ : , 1 ] + ( 1 - self . threshold_ ) ) return y_pred
Calculate the prediction using the ThresholdingOptimization .
48,051
def undersampling ( X , y , cost_mat = None , per = 0.5 ) : n_samples = X . shape [ 0 ] num_y1 = y . sum ( ) num_y0 = n_samples - num_y1 filter_rand = np . random . rand ( int ( num_y1 + num_y0 ) ) if num_y1 < num_y0 : num_y0_new = num_y1 * 1.0 / per - num_y1 num_y0_new_per = num_y0_new * 1.0 / num_y0 filter_0 = np . logical_and ( y == 0 , filter_rand <= num_y0_new_per ) filter_ = np . nonzero ( np . logical_or ( y == 1 , filter_0 ) ) [ 0 ] else : num_y1_new = num_y0 * 1.0 / per - num_y0 num_y1_new_per = num_y1_new * 1.0 / num_y1 filter_1 = np . logical_and ( y == 1 , filter_rand <= num_y1_new_per ) filter_ = np . nonzero ( np . logical_or ( y == 0 , filter_1 ) ) [ 0 ] X_u = X [ filter_ , : ] y_u = y [ filter_ ] if not cost_mat is None : cost_mat_u = cost_mat [ filter_ , : ] return X_u , y_u , cost_mat_u else : return X_u , y_u
Under - sampling .
48,052
def _node_cost ( self , y_true , cost_mat ) : n_samples = len ( y_true ) costs = np . zeros ( 2 ) costs [ 0 ] = cost_loss ( y_true , np . zeros ( y_true . shape ) , cost_mat ) costs [ 1 ] = cost_loss ( y_true , np . ones ( y_true . shape ) , cost_mat ) pi = np . array ( [ 1 - y_true . mean ( ) , y_true . mean ( ) ] ) if self . criterion == 'direct_cost' : costs = costs elif self . criterion == 'pi_cost' : costs *= pi elif self . criterion == 'gini_cost' : costs *= pi ** 2 elif self . criterion in 'entropy_cost' : if pi [ 0 ] == 0 or pi [ 1 ] == 0 : costs *= 0 else : costs *= - np . log ( pi ) y_pred = np . argmin ( costs ) n_positives = y_true . sum ( ) y_prob = ( n_positives + 1.0 ) / ( n_samples + 2.0 ) return costs [ y_pred ] , y_pred , y_prob
Private function to calculate the cost of a node .
48,053
def _calculate_gain ( self , cost_base , y_true , X , cost_mat , split ) : if cost_base == 0.0 : return 0.0 , int ( np . sign ( y_true . mean ( ) - 0.5 ) == 1 ) j , l = split filter_Xl = ( X [ : , j ] <= l ) filter_Xr = ~ filter_Xl n_samples , n_features = X . shape if np . nonzero ( filter_Xl ) [ 0 ] . shape [ 0 ] in [ 0 , n_samples ] : return 0.0 , 0.0 Xl_cost , Xl_pred , _ = self . _node_cost ( y_true [ filter_Xl ] , cost_mat [ filter_Xl , : ] ) Xr_cost , _ , _ = self . _node_cost ( y_true [ filter_Xr ] , cost_mat [ filter_Xr , : ] ) if self . criterion_weight : n_samples_Xl = np . nonzero ( filter_Xl ) [ 0 ] . shape [ 0 ] Xl_w = n_samples_Xl * 1.0 / n_samples Xr_w = 1 - Xl_w gain = round ( ( cost_base - ( Xl_w * Xl_cost + Xr_w * Xr_cost ) ) / cost_base , 6 ) else : gain = round ( ( cost_base - ( Xl_cost + Xr_cost ) ) / cost_base , 6 ) return gain , Xl_pred
Private function to calculate the gain in cost of using split in the current node .
48,054
def _best_split ( self , y_true , X , cost_mat ) : n_samples , n_features = X . shape num_pct = self . num_pct cost_base , y_pred , y_prob = self . _node_cost ( y_true , cost_mat ) gains = np . zeros ( ( n_features , num_pct ) ) pred = np . zeros ( ( n_features , num_pct ) ) splits = np . zeros ( ( n_features , num_pct ) ) selected_features = np . arange ( 0 , self . n_features_ ) np . random . shuffle ( selected_features ) selected_features = selected_features [ : self . max_features_ ] selected_features . sort ( ) for j in selected_features : splits [ j , : ] = np . percentile ( X [ : , j ] , np . arange ( 0 , 100 , 100.0 / num_pct ) . tolist ( ) ) for l in range ( num_pct ) : if l == 0 or ( l > 0 and splits [ j , l ] != splits [ j , l - 1 ] ) : split = ( j , splits [ j , l ] ) gains [ j , l ] , pred [ j , l ] = self . _calculate_gain ( cost_base , y_true , X , cost_mat , split ) best_split = np . unravel_index ( gains . argmax ( ) , gains . shape ) return ( best_split [ 0 ] , splits [ best_split ] ) , gains . max ( ) , pred [ best_split ] , y_pred , y_prob
Private function to calculate the split that gives the best gain .
48,055
def _tree_grow ( self , y_true , X , cost_mat , level = 0 ) : if len ( X . shape ) == 1 : tree = dict ( y_pred = y_true , y_prob = 0.5 , level = level , split = - 1 , n_samples = 1 , gain = 0 ) return tree split , gain , Xl_pred , y_pred , y_prob = self . _best_split ( y_true , X , cost_mat ) n_samples , n_features = X . shape tree = dict ( y_pred = y_pred , y_prob = y_prob , level = level , split = - 1 , n_samples = n_samples , gain = gain ) if gain < self . min_gain : return tree if self . max_depth is not None : if level >= self . max_depth : return tree if n_samples <= self . min_samples_split : return tree j , l = split filter_Xl = ( X [ : , j ] <= l ) filter_Xr = ~ filter_Xl n_samples_Xl = np . nonzero ( filter_Xl ) [ 0 ] . shape [ 0 ] n_samples_Xr = np . nonzero ( filter_Xr ) [ 0 ] . shape [ 0 ] if min ( n_samples_Xl , n_samples_Xr ) <= self . min_samples_leaf : return tree tree [ 'split' ] = split tree [ 'node' ] = self . tree_ . n_nodes self . tree_ . n_nodes += 1 tree [ 'sl' ] = self . _tree_grow ( y_true [ filter_Xl ] , X [ filter_Xl ] , cost_mat [ filter_Xl ] , level + 1 ) tree [ 'sr' ] = self . _tree_grow ( y_true [ filter_Xr ] , X [ filter_Xr ] , cost_mat [ filter_Xr ] , level + 1 ) return tree
Private recursive function to grow the decision tree .
48,056
def _nodes ( self , tree ) : def recourse ( temp_tree_ , nodes ) : if isinstance ( temp_tree_ , dict ) : if temp_tree_ [ 'split' ] != - 1 : nodes . append ( temp_tree_ [ 'node' ] ) if temp_tree_ [ 'split' ] != - 1 : for k in [ 'sl' , 'sr' ] : recourse ( temp_tree_ [ k ] , nodes ) return None nodes_ = [ ] recourse ( tree , nodes_ ) return nodes_
Private function that find the number of nodes in a tree .
48,057
def _classify ( self , X , tree , proba = False ) : n_samples , n_features = X . shape predicted = np . ones ( n_samples ) if tree [ 'split' ] == - 1 : if not proba : predicted = predicted * tree [ 'y_pred' ] else : predicted = predicted * tree [ 'y_prob' ] else : j , l = tree [ 'split' ] filter_Xl = ( X [ : , j ] <= l ) filter_Xr = ~ filter_Xl n_samples_Xl = np . nonzero ( filter_Xl ) [ 0 ] . shape [ 0 ] n_samples_Xr = np . nonzero ( filter_Xr ) [ 0 ] . shape [ 0 ] if n_samples_Xl == 0 : predicted [ filter_Xr ] = self . _classify ( X [ filter_Xr , : ] , tree [ 'sr' ] , proba ) elif n_samples_Xr == 0 : predicted [ filter_Xl ] = self . _classify ( X [ filter_Xl , : ] , tree [ 'sl' ] , proba ) else : predicted [ filter_Xl ] = self . _classify ( X [ filter_Xl , : ] , tree [ 'sl' ] , proba ) predicted [ filter_Xr ] = self . _classify ( X [ filter_Xr , : ] , tree [ 'sr' ] , proba ) return predicted
Private function that classify a dataset using tree .
48,058
def predict ( self , X ) : if self . pruned : tree_ = self . tree_ . tree_pruned else : tree_ = self . tree_ . tree return self . _classify ( X , tree_ , proba = False )
Predict class of X .
48,059
def predict_proba ( self , X ) : n_samples , n_features = X . shape prob = np . zeros ( ( n_samples , 2 ) ) if self . pruned : tree_ = self . tree_ . tree_pruned else : tree_ = self . tree_ . tree prob [ : , 1 ] = self . _classify ( X , tree_ , proba = True ) prob [ : , 0 ] = 1 - prob [ : , 1 ] return prob
Predict class probabilities of the input samples X .
48,060
def _delete_node ( self , tree , node ) : temp_tree = copy . deepcopy ( tree ) def recourse ( temp_tree_ , del_node ) : if isinstance ( temp_tree_ , dict ) : if temp_tree_ [ 'split' ] != - 1 : if temp_tree_ [ 'node' ] == del_node : del temp_tree_ [ 'sr' ] del temp_tree_ [ 'sl' ] del temp_tree_ [ 'node' ] temp_tree_ [ 'split' ] = - 1 else : for k in [ 'sl' , 'sr' ] : recourse ( temp_tree_ [ k ] , del_node ) return None recourse ( temp_tree , node ) return temp_tree
Private function that eliminate node from tree .
48,061
def _pruning ( self , X , y_true , cost_mat ) : nodes = self . _nodes ( self . tree_ . tree_pruned ) n_nodes = len ( nodes ) gains = np . zeros ( n_nodes ) y_pred = self . _classify ( X , self . tree_ . tree_pruned ) cost_base = cost_loss ( y_true , y_pred , cost_mat ) for m , node in enumerate ( nodes ) : temp_tree = self . _delete_node ( self . tree_ . tree_pruned , node ) y_pred = self . _classify ( X , temp_tree ) nodes_pruned = self . _nodes ( temp_tree ) gain = ( cost_base - cost_loss ( y_true , y_pred , cost_mat ) ) / cost_base gain_size = ( len ( nodes ) - len ( nodes_pruned ) ) * 1.0 / len ( nodes ) gains [ m ] = gain * gain_size best_gain = np . max ( gains ) best_node = nodes [ int ( np . argmax ( gains ) ) ] if best_gain > self . min_gain : self . tree_ . tree_pruned = self . _delete_node ( self . tree_ . tree_pruned , best_node ) if best_node != 0 : self . _pruning ( X , y_true , cost_mat )
Private function that prune the decision tree .
48,062
def pruning ( self , X , y , cost_mat ) : self . tree_ . tree_pruned = copy . deepcopy ( self . tree_ . tree ) if self . tree_ . n_nodes > 0 : self . _pruning ( X , y , cost_mat ) nodes_pruned = self . _nodes ( self . tree_ . tree_pruned ) self . tree_ . n_nodes_pruned = len ( nodes_pruned )
Function that prune the decision tree .
48,063
def cost_loss ( y_true , y_pred , cost_mat ) : y_true = column_or_1d ( y_true ) y_true = ( y_true == 1 ) . astype ( np . float ) y_pred = column_or_1d ( y_pred ) y_pred = ( y_pred == 1 ) . astype ( np . float ) cost = y_true * ( ( 1 - y_pred ) * cost_mat [ : , 1 ] + y_pred * cost_mat [ : , 2 ] ) cost += ( 1 - y_true ) * ( y_pred * cost_mat [ : , 0 ] + ( 1 - y_pred ) * cost_mat [ : , 3 ] ) return np . sum ( cost )
Cost classification loss .
48,064
def savings_score ( y_true , y_pred , cost_mat ) : y_true = column_or_1d ( y_true ) y_pred = column_or_1d ( y_pred ) n_samples = len ( y_true ) cost_base = min ( cost_loss ( y_true , np . zeros ( n_samples ) , cost_mat ) , cost_loss ( y_true , np . ones ( n_samples ) , cost_mat ) ) cost = cost_loss ( y_true , y_pred , cost_mat ) return 1.0 - cost / cost_base
Savings score .
48,065
def brier_score_loss ( y_true , y_prob ) : y_true = column_or_1d ( y_true ) y_prob = column_or_1d ( y_prob ) return np . mean ( ( y_true - y_prob ) ** 2 )
Compute the Brier score
48,066
def _logistic_cost_loss ( w , X , y , cost_mat , alpha ) : if w . shape [ 0 ] == w . size : return _logistic_cost_loss_i ( w , X , y , cost_mat , alpha ) else : n_w = w . shape [ 0 ] out = np . zeros ( n_w ) for i in range ( n_w ) : out [ i ] = _logistic_cost_loss_i ( w [ i ] , X , y , cost_mat , alpha ) return out
Computes the logistic loss .
48,067
def predict ( self , X , cut_point = 0.5 ) : return np . floor ( self . predict_proba ( X ) [ : , 1 ] + ( 1 - cut_point ) )
Predicted class .
48,068
def list_tags ( userdata ) : macros = re . findall ( '@(.*?)@' , userdata ) logging . info ( 'List of available macros:' ) for macro in macros : logging . info ( '\t%r' , macro )
List all used macros within a UserData script .
48,069
def handle_tags ( userdata , macros ) : macro_vars = re . findall ( '@(.*?)@' , userdata ) for macro_var in macro_vars : if macro_var == '!all_macros_export' : macro_var_export_list = [ ] for defined_macro in macros : macro_var_export_list . append ( 'export %s="%s"' % ( defined_macro , macros [ defined_macro ] ) ) macro_var_exports = "\n" . join ( macro_var_export_list ) userdata = userdata . replace ( '@%s@' % macro_var , macro_var_exports ) elif macro_var == "!all_macros_docker" : macro_var_export_list = [ ] for defined_macro in macros : macro_var_export_list . append ( "-e '%s=%s'" % ( defined_macro , macros [ defined_macro ] ) ) macro_var_exports = " " . join ( macro_var_export_list ) userdata = userdata . replace ( '@%s@' % macro_var , macro_var_exports ) else : if "|" in macro_var : macro_var , default_value = macro_var . split ( '|' ) if macro_var not in macros : logging . warning ( 'Using default variable value %s for @%s@ ' , default_value , macro_var ) value = default_value else : value = macros [ macro_var ] userdata = userdata . replace ( '@%s|%s@' % ( macro_var , default_value ) , value ) else : if macro_var not in macros : logging . error ( 'Undefined variable @%s@ in UserData script' , macro_var ) return None userdata = userdata . replace ( '@%s@' % macro_var , macros [ macro_var ] ) return userdata
Insert macro values or auto export variables in UserData scripts .
48,070
def retry_on_ec2_error ( self , func , * args , ** kwargs ) : exception_retry_count = 6 while True : try : return func ( * args , ** kwargs ) except ( boto . exception . EC2ResponseError , ssl . SSLError ) as msg : exception_retry_count -= 1 if exception_retry_count <= 0 : raise msg time . sleep ( 5 )
Call the given method with the given arguments retrying if the call failed due to an EC2ResponseError . This method will wait at most 30 seconds and perform up to 6 retries . If the method still fails it will propagate the error .
48,071
def connect ( self , region , ** kw_params ) : self . ec2 = boto . ec2 . connect_to_region ( region , ** kw_params ) if not self . ec2 : raise EC2ManagerException ( 'Unable to connect to region "%s"' % region ) self . remote_images . clear ( ) if self . images and any ( ( 'image_name' in img and 'image_id' not in img ) for img in self . images . values ( ) ) : for img in self . images . values ( ) : if 'image_name' in img and 'image_id' not in img : img [ 'image_id' ] = self . resolve_image_name ( img . pop ( 'image_name' ) )
Connect to a EC2 .
48,072
def resolve_image_name ( self , image_name ) : scopes = [ 'self' , 'amazon' , 'aws-marketplace' ] if image_name in self . remote_images : return self . remote_images [ image_name ] for scope in scopes : logger . info ( 'Retrieving available AMIs owned by %s...' , scope ) remote_images = self . ec2 . get_all_images ( owners = [ scope ] , filters = { 'name' : image_name } ) self . remote_images . update ( { ri . name : ri . id for ri in remote_images } ) if image_name in self . remote_images : return self . remote_images [ image_name ] raise EC2ManagerException ( 'Failed to resolve AMI name "%s" to an AMI' % image_name )
Look up an AMI for the connected region based on an image name .
48,073
def create_on_demand ( self , instance_type = 'default' , tags = None , root_device_type = 'ebs' , size = 'default' , vol_type = 'gp2' , delete_on_termination = False ) : name , size = self . _get_default_name_size ( instance_type , size ) if root_device_type == 'ebs' : self . images [ instance_type ] [ 'block_device_map' ] = self . _configure_ebs_volume ( vol_type , name , size , delete_on_termination ) reservation = self . ec2 . run_instances ( ** self . images [ instance_type ] ) logger . info ( 'Creating requested tags...' ) for i in reservation . instances : self . retry_on_ec2_error ( self . ec2 . create_tags , [ i . id ] , tags or { } ) instances = [ ] logger . info ( 'Waiting for instances to become ready...' ) while len ( reservation . instances ) : for i in reservation . instances : if i . state == 'running' : instances . append ( i ) reservation . instances . pop ( reservation . instances . index ( i ) ) logger . info ( '%s is %s at %s (%s)' , i . id , i . state , i . public_dns_name , i . ip_address ) else : self . retry_on_ec2_error ( i . update ) return instances
Create one or more EC2 on - demand instances .
48,074
def create_spot_requests ( self , price , instance_type = 'default' , root_device_type = 'ebs' , size = 'default' , vol_type = 'gp2' , delete_on_termination = False , timeout = None ) : name , size = self . _get_default_name_size ( instance_type , size ) if root_device_type == 'ebs' : self . images [ instance_type ] [ 'block_device_map' ] = self . _configure_ebs_volume ( vol_type , name , size , delete_on_termination ) valid_until = None if timeout is not None : valid_until = ( datetime . datetime . now ( ) + datetime . timedelta ( seconds = timeout ) ) . isoformat ( ) requests = self . ec2 . request_spot_instances ( price , valid_until = valid_until , ** self . images [ instance_type ] ) return [ r . id for r in requests ]
Request creation of one or more EC2 spot instances .
48,075
def check_spot_requests ( self , requests , tags = None ) : instances = [ None ] * len ( requests ) ec2_requests = self . retry_on_ec2_error ( self . ec2 . get_all_spot_instance_requests , request_ids = requests ) for req in ec2_requests : if req . instance_id : instance = self . retry_on_ec2_error ( self . ec2 . get_only_instances , req . instance_id ) [ 0 ] if not instance : raise EC2ManagerException ( 'Failed to get instance with id %s for %s request %s' % ( req . instance_id , req . status . code , req . id ) ) instances [ requests . index ( req . id ) ] = instance self . retry_on_ec2_error ( self . ec2 . create_tags , [ instance . id ] , tags or { } ) logger . info ( 'Request %s is %s and %s.' , req . id , req . status . code , req . state ) logger . info ( '%s is %s at %s (%s)' , instance . id , instance . state , instance . public_dns_name , instance . ip_address ) elif req . state != "open" : instances [ requests . index ( req . id ) ] = req return instances
Check status of one or more EC2 spot instance requests .
48,076
def cancel_spot_requests ( self , requests ) : ec2_requests = self . retry_on_ec2_error ( self . ec2 . get_all_spot_instance_requests , request_ids = requests ) for req in ec2_requests : req . cancel ( )
Cancel one or more EC2 spot instance requests .
48,077
def create_spot ( self , price , instance_type = 'default' , tags = None , root_device_type = 'ebs' , size = 'default' , vol_type = 'gp2' , delete_on_termination = False , timeout = None ) : request_ids = self . create_spot_requests ( price , instance_type = instance_type , root_device_type = root_device_type , size = size , vol_type = vol_type , delete_on_termination = delete_on_termination ) instances = [ ] logger . info ( 'Waiting on fulfillment of requested spot instances.' ) poll_resolution = 5.0 time_exceeded = False while request_ids : time . sleep ( poll_resolution ) new_instances = self . check_spot_requests ( request_ids , tags = tags ) if timeout is not None : timeout -= poll_resolution time_exceeded = timeout <= 0 fulfilled = [ ] for idx , instance in enumerate ( new_instances ) : if instance . status . code == "bad-parameters" : logging . error ( 'Spot request for "%s" failed due to bad parameters.' , instance . id ) self . cancel_spot_requests ( [ instance . id ] ) if instance is not None : fulfilled . append ( idx ) if isinstance ( instance , boto . ec2 . instance . Instance ) : instances . append ( instance ) for idx in reversed ( fulfilled ) : request_ids . pop ( idx ) if request_ids and time_exceeded : self . cancel_spot_requests ( request_ids ) break return instances
Create one or more EC2 spot instances .
48,078
def _scale_down ( self , instances , count ) : i = sorted ( instances , key = lambda i : i . launch_time , reverse = True ) if not i : return [ ] running = len ( i ) logger . info ( '%d instance/s are running.' , running ) logger . info ( 'Scaling down %d instances of those.' , count ) if count > running : logger . info ( 'Scale-down value is > than running instance/s - using maximum of %d!' , running ) count = running return i [ : count ]
Return a list of |count| last created instances by launch time .
48,079
def _configure_ebs_volume ( self , vol_type , name , size , delete_on_termination ) : root_dev = boto . ec2 . blockdevicemapping . BlockDeviceType ( ) root_dev . delete_on_termination = delete_on_termination root_dev . volume_type = vol_type if size != 'default' : root_dev . size = size bdm = boto . ec2 . blockdevicemapping . BlockDeviceMapping ( ) bdm [ name ] = root_dev return bdm
Sets the desired root EBS size otherwise the default EC2 value is used .
48,080
def stop ( self , instances , count = 0 ) : if not instances : return if count > 0 : instances = self . _scale_down ( instances , count ) self . ec2 . stop_instances ( [ i . id for i in instances ] )
Stop each provided running instance .
48,081
def terminate ( self , instances , count = 0 ) : if not instances : return if count > 0 : instances = self . _scale_down ( instances , count ) self . ec2 . terminate_instances ( [ i . id for i in instances ] )
Terminate each provided running or stopped instance .
48,082
def find ( self , instance_ids = None , filters = None ) : instances = [ ] reservations = self . retry_on_ec2_error ( self . ec2 . get_all_instances , instance_ids = instance_ids , filters = filters ) for reservation in reservations : instances . extend ( reservation . instances ) return instances
Flatten list of reservations to a list of instances .
48,083
def load ( self , root , module_path , pkg_name ) : root = os . path . join ( root , module_path ) import_name = os . path . join ( pkg_name , module_path ) . replace ( os . sep , '.' ) for ( _ , name , _ ) in pkgutil . iter_modules ( [ root ] ) : self . modules [ name ] = import_module ( '.' + name , package = import_name ) return self . modules
Load modules dynamically .
48,084
def command_line_interfaces ( self ) : interfaces = [ ] for _ , module in self . modules . items ( ) : for entry in dir ( module ) : if entry . endswith ( 'CommandLine' ) : interfaces . append ( ( module , entry ) ) return interfaces
Return the CommandLine classes from each provider .
48,085
def pluralize ( item ) : assert isinstance ( item , ( int , list ) ) if isinstance ( item , int ) : return 's' if item > 1 else '' if isinstance ( item , list ) : return 's' if len ( item ) > 1 else '' return ''
Nothing to see here .
48,086
def validate ( self ) : if not self . conf . get ( 'auth_token' ) : raise PacketManagerException ( 'The auth token for Packet is not defined but required.' ) if not self . conf . get ( 'projects' ) : raise PacketManagerException ( 'Required "projects" section is missing.' ) projects = self . conf . get ( 'projects' ) if not projects . keys ( ) : raise PacketManagerException ( 'At least one project at Packet is required.' ) failure = False for project , identifier in projects . items ( ) : if not identifier : failure = True logging . error ( 'Project "%s" has no valid identifier.' , project ) if failure : raise PacketManagerException ( 'One or more projects are not setup appropriately.' )
Perform some basic configuration validation .
48,087
def print_projects ( self , projects ) : for project in projects : print ( '{}: {}' . format ( project . name , project . id ) )
Print method for projects .
48,088
def print_operating_systems ( self , operating_systems ) : for _os in operating_systems : print ( '{}: {}' . format ( _os . name , _os . slug ) )
Print method for operating systems .
48,089
def print_plans ( self , plans ) : for plan in plans : print ( 'Name: {} "{}" Price: {} USD' . format ( plan . name , plan . slug , plan . pricing [ 'hour' ] ) ) self . pprint ( plan . specs ) print ( '\n' )
Print method for plans .
48,090
def print_facilities ( self , facilities ) : for facility in facilities : print ( '{} - ({}): {}' . format ( facility . code , facility . name , "," . join ( facility . features ) ) )
Print method for facilities .
48,091
def list_devices ( self , project_id , conditions = None , params = None ) : default_params = { 'per_page' : 1000 } if params : default_params . update ( params ) data = self . api ( 'projects/%s/devices' % project_id , params = default_params ) devices = [ ] for device in self . filter ( conditions , data [ 'devices' ] ) : devices . append ( packet . Device ( device , self . manager ) ) return devices
Retrieve list of devices in a project by one of more conditions .
48,092
def print_devices ( self , devices ) : for device in devices : print ( 'ID: {} OS: {} IP: {} State: {} ({}) Tags: {}' . format ( device . id , device . operating_system . slug , self . get_public_ip ( device . ip_addresses ) , device . state , 'spot' if device . spot_instance else 'on-demand' , device . tags ) )
Print method for devices .
48,093
def filter ( criterias , devices ) : if not criterias : return devices result = [ ] for device in devices : for criteria_name , criteria_values in criterias . items ( ) : if criteria_name in device . keys ( ) : if isinstance ( device [ criteria_name ] , list ) : for criteria_value in criteria_values : if criteria_value in device [ criteria_name ] : result . append ( device ) break elif isinstance ( device [ criteria_name ] , str ) : for criteria_value in criteria_values : if criteria_value == device [ criteria_name ] : result . append ( device ) elif isinstance ( device [ criteria_name ] , int ) : for criteria_value in criteria_values : if criteria_value == device [ criteria_name ] : result . append ( device ) else : continue return result
Filter a device by criterias on the root level of the dictionary .
48,094
def get_public_ip ( addresses , version = 4 ) : for addr in addresses : if addr [ 'public' ] and addr [ 'address_family' ] == version : return addr . get ( 'address' ) return None
Return either the devices public IPv4 or IPv6 address .
48,095
def validate_capacity ( self , servers ) : try : return self . manager . validate_capacity ( servers ) except packet . baseapi . Error as msg : raise PacketManagerException ( msg )
Validates if a deploy can be fulfilled .
48,096
def create_volume ( self , project_id , plan , size , facility , label = "" ) : try : return self . manager . create_volume ( project_id , label , plan , size , facility ) except packet . baseapi . Error as msg : raise PacketManagerException ( msg )
Creates a new volume .
48,097
def attach_volume_to_device ( self , volume_id , device_id ) : try : volume = self . manager . get_volume ( volume_id ) volume . attach ( device_id ) except packet . baseapi . Error as msg : raise PacketManagerException ( msg ) return volume
Attaches the created Volume to a Device .
48,098
def create_demand ( self , project_id , facility , plan , operating_system , tags = None , userdata = '' , hostname = None , count = 1 ) : tags = { } if tags is None else tags hostname = self . get_random_hostname ( ) if hostname is None else hostname devices = [ ] for i in range ( 1 , count + 1 ) : new_hostname = hostname if count == 1 else hostname + '-' + str ( i ) self . logger . info ( 'Adding to project %s: %s, %s, %s, %s, %r' , project_id , new_hostname , facility , plan , operating_system , tags ) try : device = self . manager . create_device ( project_id = project_id , hostname = new_hostname , facility = facility , plan = plan , tags = tags , userdata = userdata , operating_system = operating_system ) devices . append ( device ) except packet . baseapi . Error as msg : raise PacketManagerException ( msg ) return devices
Create a new on demand device under the given project .
48,099
def stop ( self , devices ) : for device in devices : self . logger . info ( 'Stopping: %s' , device . id ) try : device . power_off ( ) except packet . baseapi . Error : raise PacketManagerException ( 'Unable to stop instance "{}"' . format ( device . id ) )
Power - Off one or more running devices .