idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
226,700
def _interpolate_coefficients ( self , alpha ) : exact = False coef_idx = None for i , val in enumerate ( self . alphas_ ) : if val > alpha : coef_idx = i elif alpha - val < numpy . finfo ( numpy . float ) . eps : coef_idx = i exact = True break if coef_idx is None : coef = self . coef_ [ : , 0 ] elif exact or coef_idx == len ( self . alphas_ ) - 1 : coef = self . coef_ [ : , coef_idx ] else : # interpolate between coefficients a1 = self . alphas_ [ coef_idx + 1 ] a2 = self . alphas_ [ coef_idx ] frac = ( alpha - a1 ) / ( a2 - a1 ) coef = frac * self . coef_ [ : , coef_idx ] + ( 1.0 - frac ) * self . coef_ [ : , coef_idx + 1 ] return coef
Interpolate coefficients by calculating the weighted average of coefficient vectors corresponding to neighbors of alpha in the list of alphas constructed during training .
248
27
226,701
def predict ( self , X , alpha = None ) : X = check_array ( X ) coef = self . _get_coef ( alpha ) return numpy . dot ( X , coef )
The linear predictor of the model .
44
7
226,702
def _create_base_ensemble ( self , out , n_estimators , n_folds ) : ensemble_scores = numpy . empty ( ( n_estimators , n_folds ) ) base_ensemble = numpy . empty_like ( ensemble_scores , dtype = numpy . object ) for model , fold , score , est in out : ensemble_scores [ model , fold ] = score base_ensemble [ model , fold ] = est return ensemble_scores , base_ensemble
For each base estimator collect models trained on each fold
116
11
226,703
def _create_cv_ensemble ( self , base_ensemble , idx_models_included , model_names = None ) : fitted_models = numpy . empty ( len ( idx_models_included ) , dtype = numpy . object ) for i , idx in enumerate ( idx_models_included ) : model_name = self . base_estimators [ idx ] [ 0 ] if model_names is None else model_names [ idx ] avg_model = EnsembleAverage ( base_ensemble [ idx , : ] , name = model_name ) fitted_models [ i ] = avg_model return fitted_models
For each selected base estimator average models trained on each fold
148
12
226,704
def _get_base_estimators ( self , X ) : base_estimators = [ ] kernel_cache = { } kernel_fns = { } for i , ( name , estimator ) in enumerate ( self . base_estimators ) : if hasattr ( estimator , 'kernel' ) and callable ( estimator . kernel ) : if not hasattr ( estimator , '_get_kernel' ) : raise ValueError ( 'estimator %s uses a custom kernel function, but does not have a _get_kernel method' % name ) kernel_mat = kernel_fns . get ( estimator . kernel , None ) if kernel_mat is None : kernel_mat = estimator . _get_kernel ( X ) kernel_cache [ i ] = kernel_mat kernel_fns [ estimator . kernel ] = kernel_mat kernel_cache [ i ] = kernel_mat # We precompute kernel, but only for training, for testing use original custom kernel function kernel_estimator = clone ( estimator ) kernel_estimator . set_params ( kernel = 'precomputed' ) base_estimators . append ( ( name , kernel_estimator ) ) else : base_estimators . append ( ( name , estimator ) ) return base_estimators , kernel_cache
Takes special care of estimators using custom kernel function
291
11
226,705
def _restore_base_estimators ( self , kernel_cache , out , X , cv ) : train_folds = { fold : train_index for fold , ( train_index , _ ) in enumerate ( cv ) } for idx , fold , _ , est in out : if idx in kernel_cache : if not hasattr ( est , 'fit_X_' ) : raise ValueError ( 'estimator %s uses a custom kernel function, ' 'but does not have the attribute `fit_X_` after training' % self . base_estimators [ idx ] [ 0 ] ) est . set_params ( kernel = self . base_estimators [ idx ] [ 1 ] . kernel ) est . fit_X_ = X [ train_folds [ fold ] ] return out
Restore custom kernel functions of estimators for predictions
182
10
226,706
def _fit_and_score_ensemble ( self , X , y , cv , * * fit_params ) : fit_params_steps = self . _split_fit_params ( fit_params ) folds = list ( cv . split ( X , y ) ) # Take care of custom kernel functions base_estimators , kernel_cache = self . _get_base_estimators ( X ) out = Parallel ( n_jobs = self . n_jobs , verbose = self . verbose ) ( delayed ( _fit_and_score_fold ) ( clone ( estimator ) , X if i not in kernel_cache else kernel_cache [ i ] , y , self . scorer , train_index , test_index , fit_params_steps [ name ] , i , fold ) for i , ( name , estimator ) in enumerate ( base_estimators ) for fold , ( train_index , test_index ) in enumerate ( folds ) ) if len ( kernel_cache ) > 0 : out = self . _restore_base_estimators ( kernel_cache , out , X , folds ) return self . _create_base_ensemble ( out , len ( base_estimators ) , len ( folds ) )
Create a cross - validated model by training a model for each fold with the same model parameters
273
18
226,707
def fit ( self , X , y = None , * * fit_params ) : self . _check_params ( ) cv = check_cv ( self . cv , X ) self . _fit ( X , y , cv , * * fit_params ) return self
Fit ensemble of models
60
4
226,708
def writearff ( data , filename , relation_name = None , index = True ) : if isinstance ( filename , str ) : fp = open ( filename , 'w' ) if relation_name is None : relation_name = os . path . basename ( filename ) else : fp = filename if relation_name is None : relation_name = "pandas" try : data = _write_header ( data , fp , relation_name , index ) fp . write ( "\n" ) _write_data ( data , fp ) finally : fp . close ( )
Write ARFF file
129
4
226,709
def _write_header ( data , fp , relation_name , index ) : fp . write ( "@relation {0}\n\n" . format ( relation_name ) ) if index : data = data . reset_index ( ) attribute_names = _sanitize_column_names ( data ) for column , series in data . iteritems ( ) : name = attribute_names [ column ] fp . write ( "@attribute {0}\t" . format ( name ) ) if is_categorical_dtype ( series ) or is_object_dtype ( series ) : _write_attribute_categorical ( series , fp ) elif numpy . issubdtype ( series . dtype , numpy . floating ) : fp . write ( "real" ) elif numpy . issubdtype ( series . dtype , numpy . integer ) : fp . write ( "integer" ) elif numpy . issubdtype ( series . dtype , numpy . datetime64 ) : fp . write ( "date 'yyyy-MM-dd HH:mm:ss'" ) else : raise TypeError ( 'unsupported type %s' % series . dtype ) fp . write ( "\n" ) return data
Write header containing attribute names and types
276
7
226,710
def _sanitize_column_names ( data ) : new_names = { } for name in data . columns : new_names [ name ] = _ILLEGAL_CHARACTER_PAT . sub ( "_" , name ) return new_names
Replace illegal characters with underscore
56
6
226,711
def _write_data ( data , fp ) : fp . write ( "@data\n" ) def to_str ( x ) : if pandas . isnull ( x ) : return '?' else : return str ( x ) data = data . applymap ( to_str ) n_rows = data . shape [ 0 ] for i in range ( n_rows ) : str_values = list ( data . iloc [ i , : ] . apply ( _check_str_array ) ) line = "," . join ( str_values ) fp . write ( line ) fp . write ( "\n" )
Write the data section
135
4
226,712
def fit ( self , X , y = None , * * fit_params ) : X = numpy . asarray ( X ) self . _fit_estimators ( X , y , * * fit_params ) Xt = self . _predict_estimators ( X ) self . meta_estimator . fit ( Xt , y ) return self
Fit base estimators .
79
5
226,713
def standardize ( table , with_std = True ) : if isinstance ( table , pandas . DataFrame ) : cat_columns = table . select_dtypes ( include = [ 'category' ] ) . columns else : cat_columns = [ ] new_frame = _apply_along_column ( table , standardize_column , with_std = with_std ) # work around for apply converting category dtype to object # https://github.com/pydata/pandas/issues/9573 for col in cat_columns : new_frame [ col ] = table [ col ] . copy ( ) return new_frame
Perform Z - Normalization on each numeric column of the given table .
140
15
226,714
def encode_categorical ( table , columns = None , * * kwargs ) : if isinstance ( table , pandas . Series ) : if not is_categorical_dtype ( table . dtype ) and not table . dtype . char == "O" : raise TypeError ( "series must be of categorical dtype, but was {}" . format ( table . dtype ) ) return _encode_categorical_series ( table , * * kwargs ) def _is_categorical_or_object ( series ) : return is_categorical_dtype ( series . dtype ) or series . dtype . char == "O" if columns is None : # for columns containing categories columns_to_encode = { nam for nam , s in table . iteritems ( ) if _is_categorical_or_object ( s ) } else : columns_to_encode = set ( columns ) items = [ ] for name , series in table . iteritems ( ) : if name in columns_to_encode : series = _encode_categorical_series ( series , * * kwargs ) if series is None : continue items . append ( series ) # concat columns of tables new_table = pandas . concat ( items , axis = 1 , copy = False ) return new_table
Encode categorical columns with M categories into M - 1 columns according to the one - hot scheme .
296
21
226,715
def categorical_to_numeric ( table ) : def transform ( column ) : if is_categorical_dtype ( column . dtype ) : return column . cat . codes if column . dtype . char == "O" : try : nc = column . astype ( numpy . int64 ) except ValueError : classes = column . dropna ( ) . unique ( ) classes . sort ( kind = "mergesort" ) nc = column . replace ( classes , numpy . arange ( classes . shape [ 0 ] ) ) return nc elif column . dtype == bool : return column . astype ( numpy . int64 ) return column if isinstance ( table , pandas . Series ) : return pandas . Series ( transform ( table ) , name = table . name , index = table . index ) else : if _pandas_version_under0p23 : return table . apply ( transform , axis = 0 , reduce = False ) else : return table . apply ( transform , axis = 0 , result_type = 'reduce' )
Encode categorical columns to numeric by converting each category to an integer value .
232
16
226,716
def check_y_survival ( y_or_event , * args , allow_all_censored = False ) : if len ( args ) == 0 : y = y_or_event if not isinstance ( y , numpy . ndarray ) or y . dtype . fields is None or len ( y . dtype . fields ) != 2 : raise ValueError ( 'y must be a structured array with the first field' ' being a binary class event indicator and the second field' ' the time of the event/censoring' ) event_field , time_field = y . dtype . names y_event = y [ event_field ] time_args = ( y [ time_field ] , ) else : y_event = numpy . asanyarray ( y_or_event ) time_args = args event = check_array ( y_event , ensure_2d = False ) if not numpy . issubdtype ( event . dtype , numpy . bool_ ) : raise ValueError ( 'elements of event indicator must be boolean, but found {0}' . format ( event . dtype ) ) if not ( allow_all_censored or numpy . any ( event ) ) : raise ValueError ( 'all samples are censored' ) return_val = [ event ] for i , yt in enumerate ( time_args ) : if yt is None : return_val . append ( yt ) continue yt = check_array ( yt , ensure_2d = False ) if not numpy . issubdtype ( yt . dtype , numpy . number ) : raise ValueError ( 'time must be numeric, but found {} for argument {}' . format ( yt . dtype , i + 2 ) ) return_val . append ( yt ) return tuple ( return_val )
Check that array correctly represents an outcome for survival analysis .
399
11
226,717
def check_arrays_survival ( X , y , * * kwargs ) : event , time = check_y_survival ( y ) kwargs . setdefault ( "dtype" , numpy . float64 ) X = check_array ( X , ensure_min_samples = 2 , * * kwargs ) check_consistent_length ( X , event , time ) return X , event , time
Check that all arrays have consistent first dimensions .
93
9
226,718
def from_arrays ( event , time , name_event = None , name_time = None ) : name_event = name_event or 'event' name_time = name_time or 'time' if name_time == name_event : raise ValueError ( 'name_time must be different from name_event' ) time = numpy . asanyarray ( time , dtype = numpy . float_ ) y = numpy . empty ( time . shape [ 0 ] , dtype = [ ( name_event , numpy . bool_ ) , ( name_time , numpy . float_ ) ] ) y [ name_time ] = time event = numpy . asanyarray ( event ) check_consistent_length ( time , event ) if numpy . issubdtype ( event . dtype , numpy . bool_ ) : y [ name_event ] = event else : events = numpy . unique ( event ) events . sort ( ) if len ( events ) != 2 : raise ValueError ( 'event indicator must be binary' ) if numpy . all ( events == numpy . array ( [ 0 , 1 ] , dtype = events . dtype ) ) : y [ name_event ] = event . astype ( numpy . bool_ ) else : raise ValueError ( 'non-boolean event indicator must contain 0 and 1 only' ) return y
Create structured array .
299
4
226,719
def from_dataframe ( event , time , data ) : if not isinstance ( data , pandas . DataFrame ) : raise TypeError ( "exepected pandas.DataFrame, but got {!r}" . format ( type ( data ) ) ) return Surv . from_arrays ( data . loc [ : , event ] . values , data . loc [ : , time ] . values , name_event = str ( event ) , name_time = str ( time ) )
Create structured array from data frame .
103
7
226,720
def update_terminal_regions ( self , tree , X , y , residual , y_pred , sample_weight , sample_mask , learning_rate = 1.0 , k = 0 ) : # update predictions y_pred [ : , k ] += learning_rate * tree . predict ( X ) . ravel ( )
Least squares does not need to update terminal regions .
71
11
226,721
def build_from_c_and_cpp_files ( extensions ) : for extension in extensions : sources = [ ] for sfile in extension . sources : path , ext = os . path . splitext ( sfile ) if ext in ( '.pyx' , '.py' ) : if extension . language == 'c++' : ext = '.cpp' else : ext = '.c' sfile = path + ext sources . append ( sfile ) extension . sources = sources
Modify the extensions to build from the . c and . cpp files . This is useful for releases this way cython is not required to run python setup . py install .
102
37
226,722
def _count_values ( self ) : indices = { yi : [ i ] for i , yi in enumerate ( self . y ) if self . status [ i ] } return indices
Return dict mapping relevance level to sample index
41
8
226,723
def _create_optimizer ( self , X , y , status ) : if self . optimizer is None : self . optimizer = 'avltree' times , ranks = y if self . optimizer == 'simple' : optimizer = SimpleOptimizer ( X , status , self . alpha , self . rank_ratio , timeit = self . timeit ) elif self . optimizer == 'PRSVM' : optimizer = PRSVMOptimizer ( X , status , self . alpha , self . rank_ratio , timeit = self . timeit ) elif self . optimizer == 'direct-count' : optimizer = LargeScaleOptimizer ( self . alpha , self . rank_ratio , self . fit_intercept , SurvivalCounter ( X , ranks , status , len ( ranks ) , times ) , timeit = self . timeit ) elif self . optimizer == 'rbtree' : optimizer = LargeScaleOptimizer ( self . alpha , self . rank_ratio , self . fit_intercept , OrderStatisticTreeSurvivalCounter ( X , ranks , status , RBTree , times ) , timeit = self . timeit ) elif self . optimizer == 'avltree' : optimizer = LargeScaleOptimizer ( self . alpha , self . rank_ratio , self . fit_intercept , OrderStatisticTreeSurvivalCounter ( X , ranks , status , AVLTree , times ) , timeit = self . timeit ) else : raise ValueError ( 'unknown optimizer: {0}' . format ( self . optimizer ) ) return optimizer
Samples are ordered by relevance
356
6
226,724
def _argsort_and_resolve_ties ( time , random_state ) : n_samples = len ( time ) order = numpy . argsort ( time , kind = "mergesort" ) i = 0 while i < n_samples - 1 : inext = i + 1 while inext < n_samples and time [ order [ i ] ] == time [ order [ inext ] ] : inext += 1 if i + 1 != inext : # resolve ties randomly random_state . shuffle ( order [ i : inext ] ) i = inext return order
Like numpy . argsort but resolves ties uniformly at random
127
12
226,725
def fit ( self , X , y ) : X , event , time = check_arrays_survival ( X , y ) weights = ipc_weights ( event , time ) super ( ) . fit ( X , numpy . log ( time ) , sample_weight = weights ) return self
Build an accelerated failure time model .
63
7
226,726
def fit ( self , linear_predictor , event , time ) : risk_score = numpy . exp ( linear_predictor ) order = numpy . argsort ( time , kind = "mergesort" ) risk_score = risk_score [ order ] uniq_times , n_events , n_at_risk = _compute_counts ( event , time , order ) divisor = numpy . empty ( n_at_risk . shape , dtype = numpy . float_ ) value = numpy . sum ( risk_score ) divisor [ 0 ] = value k = 0 for i in range ( 1 , len ( n_at_risk ) ) : d = n_at_risk [ i - 1 ] - n_at_risk [ i ] value -= risk_score [ k : ( k + d ) ] . sum ( ) k += d divisor [ i ] = value assert k == n_at_risk [ 0 ] - n_at_risk [ - 1 ] y = numpy . cumsum ( n_events / divisor ) self . cum_baseline_hazard_ = StepFunction ( uniq_times , y ) self . baseline_survival_ = StepFunction ( self . cum_baseline_hazard_ . x , numpy . exp ( - self . cum_baseline_hazard_ . y ) ) return self
Compute baseline cumulative hazard function .
304
7
226,727
def nlog_likelihood ( self , w ) : time = self . time n_samples = self . x . shape [ 0 ] xw = numpy . dot ( self . x , w ) loss = 0 risk_set = 0 k = 0 for i in range ( n_samples ) : ti = time [ i ] while k < n_samples and ti == time [ k ] : risk_set += numpy . exp ( xw [ k ] ) k += 1 if self . event [ i ] : loss -= ( xw [ i ] - numpy . log ( risk_set ) ) / n_samples # add regularization term to log-likelihood return loss + self . alpha * squared_norm ( w ) / ( 2. * n_samples )
Compute negative partial log - likelihood
170
7
226,728
def update ( self , w , offset = 0 ) : time = self . time x = self . x exp_xw = numpy . exp ( offset + numpy . dot ( x , w ) ) n_samples , n_features = x . shape gradient = numpy . zeros ( ( 1 , n_features ) , dtype = float ) hessian = numpy . zeros ( ( n_features , n_features ) , dtype = float ) inv_n_samples = 1. / n_samples risk_set = 0 risk_set_x = 0 risk_set_xx = 0 k = 0 # iterate time in descending order for i in range ( n_samples ) : ti = time [ i ] while k < n_samples and ti == time [ k ] : risk_set += exp_xw [ k ] # preserve 2D shape of row vector xk = x [ k : k + 1 ] risk_set_x += exp_xw [ k ] * xk # outer product xx = numpy . dot ( xk . T , xk ) risk_set_xx += exp_xw [ k ] * xx k += 1 if self . event [ i ] : gradient -= ( x [ i : i + 1 ] - risk_set_x / risk_set ) * inv_n_samples a = risk_set_xx / risk_set z = risk_set_x / risk_set # outer product b = numpy . dot ( z . T , z ) hessian += ( a - b ) * inv_n_samples if self . alpha > 0 : gradient += self . alpha * inv_n_samples * w diag_idx = numpy . diag_indices ( n_features ) hessian [ diag_idx ] += self . alpha * inv_n_samples self . gradient = gradient . ravel ( ) self . hessian = hessian
Compute gradient and Hessian matrix with respect to w .
430
12
226,729
def fit ( self , X , y ) : X , event , time = check_arrays_survival ( X , y ) if self . alpha < 0 : raise ValueError ( "alpha must be positive, but was %r" % self . alpha ) optimizer = CoxPHOptimizer ( X , event , time , self . alpha ) verbose_reporter = VerboseReporter ( self . verbose ) w = numpy . zeros ( X . shape [ 1 ] ) w_prev = w i = 0 loss = float ( 'inf' ) while True : if i >= self . n_iter : verbose_reporter . end_max_iter ( i ) warnings . warn ( ( 'Optimization did not converge: Maximum number of iterations has been exceeded.' ) , stacklevel = 2 , category = ConvergenceWarning ) break optimizer . update ( w ) delta = solve ( optimizer . hessian , optimizer . gradient , overwrite_a = False , overwrite_b = False , check_finite = False ) if not numpy . all ( numpy . isfinite ( delta ) ) : raise ValueError ( "search direction contains NaN or infinite values" ) w_new = w - delta loss_new = optimizer . nlog_likelihood ( w_new ) verbose_reporter . update ( i , delta , loss_new ) if loss_new > loss : # perform step-halving if negative log-likelihood does not decrease w = ( w_prev + w ) / 2 loss = optimizer . nlog_likelihood ( w ) verbose_reporter . step_halving ( i , loss ) i += 1 continue w_prev = w w = w_new res = numpy . abs ( 1 - ( loss_new / loss ) ) if res < self . tol : verbose_reporter . end_converged ( i ) break loss = loss_new i += 1 self . coef_ = w self . _baseline_model . fit ( numpy . dot ( X , self . coef_ ) , event , time ) return self
Minimize negative partial log - likelihood for provided data .
457
11
226,730
def _compute_counts ( event , time , order = None ) : n_samples = event . shape [ 0 ] if order is None : order = numpy . argsort ( time , kind = "mergesort" ) uniq_times = numpy . empty ( n_samples , dtype = time . dtype ) uniq_events = numpy . empty ( n_samples , dtype = numpy . int_ ) uniq_counts = numpy . empty ( n_samples , dtype = numpy . int_ ) i = 0 prev_val = time [ order [ 0 ] ] j = 0 while True : count_event = 0 count = 0 while i < n_samples and prev_val == time [ order [ i ] ] : if event [ order [ i ] ] : count_event += 1 count += 1 i += 1 uniq_times [ j ] = prev_val uniq_events [ j ] = count_event uniq_counts [ j ] = count j += 1 if i == n_samples : break prev_val = time [ order [ i ] ] times = numpy . resize ( uniq_times , j ) n_events = numpy . resize ( uniq_events , j ) total_count = numpy . resize ( uniq_counts , j ) # offset cumulative sum by one total_count = numpy . concatenate ( ( [ 0 ] , total_count ) ) n_at_risk = n_samples - numpy . cumsum ( total_count ) return times , n_events , n_at_risk [ : - 1 ]
Count right censored and uncensored samples at each unique time point .
359
14
226,731
def _compute_counts_truncated ( event , time_enter , time_exit ) : if ( time_enter > time_exit ) . any ( ) : raise ValueError ( "exit time must be larger start time for all samples" ) n_samples = event . shape [ 0 ] uniq_times = numpy . sort ( numpy . unique ( numpy . concatenate ( ( time_enter , time_exit ) ) ) , kind = "mergesort" ) total_counts = numpy . empty ( len ( uniq_times ) , dtype = numpy . int_ ) event_counts = numpy . empty ( len ( uniq_times ) , dtype = numpy . int_ ) order_enter = numpy . argsort ( time_enter , kind = "mergesort" ) order_exit = numpy . argsort ( time_exit , kind = "mergesort" ) s_time_enter = time_enter [ order_enter ] s_time_exit = time_exit [ order_exit ] t0 = uniq_times [ 0 ] # everything larger is included idx_enter = numpy . searchsorted ( s_time_enter , t0 , side = "right" ) # everything smaller is excluded idx_exit = numpy . searchsorted ( s_time_exit , t0 , side = "left" ) total_counts [ 0 ] = idx_enter # except people die on the day they enter event_counts [ 0 ] = 0 for i in range ( 1 , len ( uniq_times ) ) : ti = uniq_times [ i ] while idx_enter < n_samples and s_time_enter [ idx_enter ] <= ti : idx_enter += 1 while idx_exit < n_samples and s_time_exit [ idx_exit ] < ti : idx_exit += 1 risk_set = numpy . setdiff1d ( order_enter [ : idx_enter ] , order_exit [ : idx_exit ] , assume_unique = True ) total_counts [ i ] = len ( risk_set ) count_event = 0 k = idx_exit while k < n_samples and s_time_exit [ k ] == ti : if event [ order_exit [ k ] ] : count_event += 1 k += 1 event_counts [ i ] = count_event return uniq_times , event_counts , total_counts
Compute counts for left truncated and right censored survival data .
554
13
226,732
def kaplan_meier_estimator ( event , time_exit , time_enter = None , time_min = None ) : event , time_enter , time_exit = check_y_survival ( event , time_enter , time_exit , allow_all_censored = True ) check_consistent_length ( event , time_enter , time_exit ) if time_enter is None : uniq_times , n_events , n_at_risk = _compute_counts ( event , time_exit ) else : uniq_times , n_events , n_at_risk = _compute_counts_truncated ( event , time_enter , time_exit ) values = 1 - n_events / n_at_risk if time_min is not None : mask = uniq_times >= time_min uniq_times = numpy . compress ( mask , uniq_times ) values = numpy . compress ( mask , values ) y = numpy . cumprod ( values ) return uniq_times , y
Kaplan - Meier estimator of survival function .
232
10
226,733
def nelson_aalen_estimator ( event , time ) : event , time = check_y_survival ( event , time ) check_consistent_length ( event , time ) uniq_times , n_events , n_at_risk = _compute_counts ( event , time ) y = numpy . cumsum ( n_events / n_at_risk ) return uniq_times , y
Nelson - Aalen estimator of cumulative hazard function .
95
13
226,734
def ipc_weights ( event , time ) : if event . all ( ) : return numpy . ones ( time . shape [ 0 ] ) unique_time , p = kaplan_meier_estimator ( ~ event , time ) idx = numpy . searchsorted ( unique_time , time [ event ] ) Ghat = p [ idx ] assert ( Ghat > 0 ) . all ( ) weights = numpy . zeros ( time . shape [ 0 ] ) weights [ event ] = 1.0 / Ghat return weights
Compute inverse probability of censoring weights
117
8
226,735
def fit ( self , y ) : event , time = check_y_survival ( y , allow_all_censored = True ) unique_time , prob = kaplan_meier_estimator ( event , time ) self . unique_time_ = numpy . concatenate ( ( [ - numpy . infty ] , unique_time ) ) self . prob_ = numpy . concatenate ( ( [ 1. ] , prob ) ) return self
Estimate survival distribution from training data .
101
8
226,736
def predict_proba ( self , time ) : check_is_fitted ( self , "unique_time_" ) time = check_array ( time , ensure_2d = False ) # K-M is undefined if estimate at last time point is non-zero extends = time > self . unique_time_ [ - 1 ] if self . prob_ [ - 1 ] > 0 and extends . any ( ) : raise ValueError ( "time must be smaller than largest " "observed time point: {}" . format ( self . unique_time_ [ - 1 ] ) ) # beyond last time point is zero probability Shat = numpy . empty ( time . shape , dtype = float ) Shat [ extends ] = 0.0 valid = ~ extends time = time [ valid ] idx = numpy . searchsorted ( self . unique_time_ , time ) # for non-exact matches, we need to shift the index to left eps = numpy . finfo ( self . unique_time_ . dtype ) . eps exact = numpy . absolute ( self . unique_time_ [ idx ] - time ) < eps idx [ ~ exact ] -= 1 Shat [ valid ] = self . prob_ [ idx ] return Shat
Return probability of an event after given time point .
273
10
226,737
def fit ( self , y ) : event , time = check_y_survival ( y ) if event . all ( ) : self . unique_time_ = numpy . unique ( time ) self . prob_ = numpy . ones ( self . unique_time_ . shape [ 0 ] ) else : unique_time , prob = kaplan_meier_estimator ( ~ event , time ) self . unique_time_ = numpy . concatenate ( ( [ - numpy . infty ] , unique_time ) ) self . prob_ = numpy . concatenate ( ( [ 1. ] , prob ) ) return self
Estimate censoring distribution from training data .
138
9
226,738
def predict_ipcw ( self , y ) : event , time = check_y_survival ( y ) Ghat = self . predict_proba ( time [ event ] ) if ( Ghat == 0.0 ) . any ( ) : raise ValueError ( "censoring survival function is zero at one or more time points" ) weights = numpy . zeros ( time . shape [ 0 ] ) weights [ event ] = 1.0 / Ghat return weights
Return inverse probability of censoring weights at given time points .
101
12
226,739
def concordance_index_censored ( event_indicator , event_time , estimate , tied_tol = 1e-8 ) : event_indicator , event_time , estimate = _check_inputs ( event_indicator , event_time , estimate ) w = numpy . ones_like ( estimate ) return _estimate_concordance_index ( event_indicator , event_time , estimate , w , tied_tol )
Concordance index for right - censored data
101
10
226,740
def concordance_index_ipcw ( survival_train , survival_test , estimate , tau = None , tied_tol = 1e-8 ) : test_event , test_time = check_y_survival ( survival_test ) if tau is not None : survival_test = survival_test [ test_time < tau ] estimate = check_array ( estimate , ensure_2d = False ) check_consistent_length ( test_event , test_time , estimate ) cens = CensoringDistributionEstimator ( ) cens . fit ( survival_train ) ipcw = cens . predict_ipcw ( survival_test ) w = numpy . square ( ipcw ) return _estimate_concordance_index ( test_event , test_time , estimate , w , tied_tol )
Concordance index for right - censored data based on inverse probability of censoring weights .
186
19
226,741
def _nominal_kernel ( x , y , out ) : for i in range ( x . shape [ 0 ] ) : for j in range ( y . shape [ 0 ] ) : out [ i , j ] += ( x [ i , : ] == y [ j , : ] ) . sum ( ) return out
Number of features that match exactly
68
6
226,742
def _get_continuous_and_ordinal_array ( x ) : nominal_columns = x . select_dtypes ( include = [ 'object' , 'category' ] ) . columns ordinal_columns = pandas . Index ( [ v for v in nominal_columns if x [ v ] . cat . ordered ] ) continuous_columns = x . select_dtypes ( include = [ numpy . number ] ) . columns x_num = x . loc [ : , continuous_columns ] . astype ( numpy . float64 ) . values if len ( ordinal_columns ) > 0 : x = _ordinal_as_numeric ( x , ordinal_columns ) nominal_columns = nominal_columns . difference ( ordinal_columns ) x_out = numpy . column_stack ( ( x_num , x ) ) else : x_out = x_num return x_out , nominal_columns
Convert array from continuous and ordered categorical columns
210
10
226,743
def clinical_kernel ( x , y = None ) : if y is not None : if x . shape [ 1 ] != y . shape [ 1 ] : raise ValueError ( 'x and y have different number of features' ) if not x . columns . equals ( y . columns ) : raise ValueError ( 'columns do not match' ) else : y = x mat = numpy . zeros ( ( x . shape [ 0 ] , y . shape [ 0 ] ) , dtype = float ) x_numeric , nominal_columns = _get_continuous_and_ordinal_array ( x ) if id ( x ) != id ( y ) : y_numeric , _ = _get_continuous_and_ordinal_array ( y ) else : y_numeric = x_numeric continuous_ordinal_kernel ( x_numeric , y_numeric , mat ) _nominal_kernel ( x . loc [ : , nominal_columns ] . values , y . loc [ : , nominal_columns ] . values , mat ) mat /= x . shape [ 1 ] return mat
Computes clinical kernel
242
4
226,744
def _prepare_by_column_dtype ( self , X ) : if not isinstance ( X , pandas . DataFrame ) : raise TypeError ( 'X must be a pandas DataFrame' ) numeric_columns = [ ] nominal_columns = [ ] numeric_ranges = [ ] fit_data = numpy . empty_like ( X ) for i , dt in enumerate ( X . dtypes ) : col = X . iloc [ : , i ] if is_categorical_dtype ( dt ) : if col . cat . ordered : numeric_ranges . append ( col . cat . codes . max ( ) - col . cat . codes . min ( ) ) numeric_columns . append ( i ) else : nominal_columns . append ( i ) col = col . cat . codes elif is_numeric_dtype ( dt ) : numeric_ranges . append ( col . max ( ) - col . min ( ) ) numeric_columns . append ( i ) else : raise TypeError ( 'unsupported dtype: %r' % dt ) fit_data [ : , i ] = col . values self . _numeric_columns = numpy . asarray ( numeric_columns ) self . _nominal_columns = numpy . asarray ( nominal_columns ) self . _numeric_ranges = numpy . asarray ( numeric_ranges , dtype = float ) self . X_fit_ = fit_data
Get distance functions for each column s dtype
329
9
226,745
def fit ( self , X , y = None , * * kwargs ) : if X . ndim != 2 : raise ValueError ( "expected 2d array, but got %d" % X . ndim ) if self . fit_once : self . X_fit_ = X else : self . _prepare_by_column_dtype ( X ) return self
Determine transformation parameters from data in X .
81
10
226,746
def transform ( self , Y ) : check_is_fitted ( self , 'X_fit_' ) n_samples_x , n_features = self . X_fit_ . shape Y = numpy . asarray ( Y ) if Y . shape [ 1 ] != n_features : raise ValueError ( 'expected array with %d features, but got %d' % ( n_features , Y . shape [ 1 ] ) ) n_samples_y = Y . shape [ 0 ] mat = numpy . zeros ( ( n_samples_y , n_samples_x ) , dtype = float ) continuous_ordinal_kernel_with_ranges ( Y [ : , self . _numeric_columns ] . astype ( numpy . float64 ) , self . X_fit_ [ : , self . _numeric_columns ] . astype ( numpy . float64 ) , self . _numeric_ranges , mat ) if len ( self . _nominal_columns ) > 0 : _nominal_kernel ( Y [ : , self . _nominal_columns ] , self . X_fit_ [ : , self . _nominal_columns ] , mat ) mat /= n_features return mat
r Compute all pairwise distances between self . X_fit_ and Y .
277
17
226,747
def _fit_stage_componentwise ( X , residuals , sample_weight , * * fit_params ) : n_features = X . shape [ 1 ] base_learners = [ ] error = numpy . empty ( n_features ) for component in range ( n_features ) : learner = ComponentwiseLeastSquares ( component ) . fit ( X , residuals , sample_weight ) l_pred = learner . predict ( X ) error [ component ] = squared_norm ( residuals - l_pred ) base_learners . append ( learner ) # TODO: could use bottleneck.nanargmin for speed best_component = numpy . nanargmin ( error ) best_learner = base_learners [ best_component ] return best_learner
Fit component - wise weighted least squares model
169
8
226,748
def coef_ ( self ) : coef = numpy . zeros ( self . n_features_ + 1 , dtype = float ) for estimator in self . estimators_ : coef [ estimator . component ] += self . learning_rate * estimator . coef_ return coef
Return the aggregated coefficients .
65
6
226,749
def _fit_stage ( self , i , X , y , y_pred , sample_weight , sample_mask , random_state , scale , X_idx_sorted , X_csc = None , X_csr = None ) : assert sample_mask . dtype == numpy . bool loss = self . loss_ # whether to use dropout in next iteration do_dropout = self . dropout_rate > 0. and 0 < i < len ( scale ) - 1 for k in range ( loss . K ) : residual = loss . negative_gradient ( y , y_pred , k = k , sample_weight = sample_weight ) # induce regression tree on residuals tree = DecisionTreeRegressor ( criterion = self . criterion , splitter = 'best' , max_depth = self . max_depth , min_samples_split = self . min_samples_split , min_samples_leaf = self . min_samples_leaf , min_weight_fraction_leaf = self . min_weight_fraction_leaf , min_impurity_split = self . min_impurity_split , min_impurity_decrease = self . min_impurity_decrease , max_features = self . max_features , max_leaf_nodes = self . max_leaf_nodes , random_state = random_state , presort = self . presort ) if self . subsample < 1.0 : # no inplace multiplication! sample_weight = sample_weight * sample_mask . astype ( numpy . float64 ) X = X_csr if X_csr is not None else X tree . fit ( X , residual , sample_weight = sample_weight , check_input = False , X_idx_sorted = X_idx_sorted ) # add tree to ensemble self . estimators_ [ i , k ] = tree # update tree leaves if do_dropout : # select base learners to be dropped for next iteration drop_model , n_dropped = _sample_binomial_plus_one ( self . dropout_rate , i + 1 , random_state ) # adjust scaling factor of tree that is going to be trained in next iteration scale [ i + 1 ] = 1. / ( n_dropped + 1. ) y_pred [ : , k ] = 0 for m in range ( i + 1 ) : if drop_model [ m ] == 1 : # adjust scaling factor of dropped trees scale [ m ] *= n_dropped / ( n_dropped + 1. ) else : # pseudoresponse of next iteration (without contribution of dropped trees) y_pred [ : , k ] += self . learning_rate * scale [ m ] * self . estimators_ [ m , k ] . predict ( X ) . ravel ( ) else : # update tree leaves loss . update_terminal_regions ( tree . tree_ , X , y , residual , y_pred , sample_weight , sample_mask , self . learning_rate , k = k ) return y_pred
Fit another stage of n_classes_ trees to the boosting model .
673
14
226,750
def _fit_stages ( self , X , y , y_pred , sample_weight , random_state , begin_at_stage = 0 , monitor = None , X_idx_sorted = None ) : n_samples = X . shape [ 0 ] do_oob = self . subsample < 1.0 sample_mask = numpy . ones ( ( n_samples , ) , dtype = numpy . bool ) n_inbag = max ( 1 , int ( self . subsample * n_samples ) ) loss_ = self . loss_ if self . verbose : verbose_reporter = VerboseReporter ( self . verbose ) verbose_reporter . init ( self , begin_at_stage ) X_csc = csc_matrix ( X ) if issparse ( X ) else None X_csr = csr_matrix ( X ) if issparse ( X ) else None if self . dropout_rate > 0. : scale = numpy . ones ( self . n_estimators , dtype = float ) else : scale = None # perform boosting iterations i = begin_at_stage for i in range ( begin_at_stage , self . n_estimators ) : # subsampling if do_oob : sample_mask = _random_sample_mask ( n_samples , n_inbag , random_state ) # OOB score before adding this stage y_oob_sample = y [ ~ sample_mask ] old_oob_score = loss_ ( y_oob_sample , y_pred [ ~ sample_mask ] , sample_weight [ ~ sample_mask ] ) # fit next stage of trees y_pred = self . _fit_stage ( i , X , y , y_pred , sample_weight , sample_mask , random_state , scale , X_idx_sorted , X_csc , X_csr ) # track deviance (= loss) if do_oob : self . train_score_ [ i ] = loss_ ( y [ sample_mask ] , y_pred [ sample_mask ] , sample_weight [ sample_mask ] ) self . oob_improvement_ [ i ] = ( old_oob_score - loss_ ( y_oob_sample , y_pred [ ~ sample_mask ] , sample_weight [ ~ sample_mask ] ) ) else : # no need to fancy index w/ no subsampling self . train_score_ [ i ] = loss_ ( y , y_pred , sample_weight ) if self . verbose > 0 : verbose_reporter . update ( i , self ) if monitor is not None : early_stopping = monitor ( i , self , locals ( ) ) if early_stopping : break if self . dropout_rate > 0. : self . scale_ = scale return i + 1
Iteratively fits the stages .
634
6
226,751
def fit ( self , X , y , sample_weight = None , monitor = None ) : random_state = check_random_state ( self . random_state ) X , event , time = check_arrays_survival ( X , y , accept_sparse = [ 'csr' , 'csc' , 'coo' ] , dtype = DTYPE ) n_samples , self . n_features_ = X . shape X = X . astype ( DTYPE ) if sample_weight is None : sample_weight = numpy . ones ( n_samples , dtype = numpy . float32 ) else : sample_weight = column_or_1d ( sample_weight , warn = True ) check_consistent_length ( X , sample_weight ) self . _check_params ( ) self . loss_ = LOSS_FUNCTIONS [ self . loss ] ( 1 ) if isinstance ( self . loss_ , ( CensoredSquaredLoss , IPCWLeastSquaresError ) ) : time = numpy . log ( time ) self . _init_state ( ) self . init_ . fit ( X , ( event , time ) , sample_weight ) y_pred = self . init_ . predict ( X ) begin_at_stage = 0 if self . presort is True and issparse ( X ) : raise ValueError ( "Presorting is not supported for sparse matrices." ) presort = self . presort # Allow presort to be 'auto', which means True if the dataset is dense, # otherwise it will be False. if presort == 'auto' : presort = not issparse ( X ) X_idx_sorted = None if presort : X_idx_sorted = numpy . asfortranarray ( numpy . argsort ( X , axis = 0 ) , dtype = numpy . int32 ) # fit the boosting stages y = numpy . fromiter ( zip ( event , time ) , dtype = [ ( 'event' , numpy . bool ) , ( 'time' , numpy . float64 ) ] ) n_stages = self . _fit_stages ( X , y , y_pred , sample_weight , random_state , begin_at_stage , monitor , X_idx_sorted ) # change shape of arrays after fit (early-stopping or additional tests) if n_stages != self . estimators_ . shape [ 0 ] : self . estimators_ = self . estimators_ [ : n_stages ] self . train_score_ = self . train_score_ [ : n_stages ] if hasattr ( self , 'oob_improvement_' ) : self . oob_improvement_ = self . oob_improvement_ [ : n_stages ] self . n_estimators_ = n_stages return self
Fit the gradient boosting model .
632
6
226,752
def staged_predict ( self , X ) : check_is_fitted ( self , 'estimators_' ) # if dropout wasn't used during training, proceed as usual, # otherwise consider scaling factor of individual trees if not hasattr ( self , "scale_" ) : for y in self . _staged_decision_function ( X ) : yield self . _scale_prediction ( y . ravel ( ) ) else : for y in self . _dropout_staged_decision_function ( X ) : yield self . _scale_prediction ( y . ravel ( ) )
Predict hazard at each stage for X .
132
9
226,753
def fit ( self , X , y ) : X , event , time = check_arrays_survival ( X , y ) self . _fit ( X , event , time ) return self
Build a MINLIP survival model from training data .
41
11
226,754
def predict ( self , X ) : K = self . _get_kernel ( X , self . X_fit_ ) pred = - numpy . dot ( self . coef_ , K . T ) return pred . ravel ( )
Predict risk score of experiencing an event .
51
9
226,755
def get_x_y ( data_frame , attr_labels , pos_label = None , survival = True ) : if survival : if len ( attr_labels ) != 2 : raise ValueError ( "expected sequence of length two for attr_labels, but got %d" % len ( attr_labels ) ) if pos_label is None : raise ValueError ( "pos_label needs to be specified if survival=True" ) return _get_x_y_survival ( data_frame , attr_labels [ 0 ] , attr_labels [ 1 ] , pos_label ) return _get_x_y_other ( data_frame , attr_labels )
Split data frame into features and labels .
157
8
226,756
def load_arff_files_standardized ( path_training , attr_labels , pos_label = None , path_testing = None , survival = True , standardize_numeric = True , to_numeric = True ) : dataset = loadarff ( path_training ) if "index" in dataset . columns : dataset . index = dataset [ "index" ] . astype ( object ) dataset . drop ( "index" , axis = 1 , inplace = True ) x_train , y_train = get_x_y ( dataset , attr_labels , pos_label , survival ) if path_testing is not None : x_test , y_test = _load_arff_testing ( path_testing , attr_labels , pos_label , survival ) if len ( x_train . columns . symmetric_difference ( x_test . columns ) ) > 0 : warnings . warn ( "Restricting columns to intersection between training and testing data" , stacklevel = 2 ) cols = x_train . columns . intersection ( x_test . columns ) if len ( cols ) == 0 : raise ValueError ( "columns of training and test data do not intersect" ) x_train = x_train . loc [ : , cols ] x_test = x_test . loc [ : , cols ] x = safe_concat ( ( x_train , x_test ) , axis = 0 ) if standardize_numeric : x = standardize ( x ) if to_numeric : x = categorical_to_numeric ( x ) n_train = x_train . shape [ 0 ] x_train = x . iloc [ : n_train , : ] x_test = x . iloc [ n_train : , : ] else : if standardize_numeric : x_train = standardize ( x_train ) if to_numeric : x_train = categorical_to_numeric ( x_train ) x_test = None y_test = None return x_train , y_train , x_test , y_test
Load dataset in ARFF format .
458
7
226,757
def load_aids ( endpoint = "aids" ) : labels_aids = [ 'censor' , 'time' ] labels_death = [ 'censor_d' , 'time_d' ] if endpoint == "aids" : attr_labels = labels_aids drop_columns = labels_death elif endpoint == "death" : attr_labels = labels_death drop_columns = labels_aids else : raise ValueError ( "endpoint must be 'aids' or 'death'" ) fn = resource_filename ( __name__ , 'data/actg320.arff' ) x , y = get_x_y ( loadarff ( fn ) , attr_labels = attr_labels , pos_label = '1' ) x . drop ( drop_columns , axis = 1 , inplace = True ) return x , y
Load and return the AIDS Clinical Trial dataset
199
8
226,758
def _api_scrape ( json_inp , ndx ) : try : headers = json_inp [ 'resultSets' ] [ ndx ] [ 'headers' ] values = json_inp [ 'resultSets' ] [ ndx ] [ 'rowSet' ] except KeyError : # This is so ugly but this is what you get when your data comes out # in not a standard format try : headers = json_inp [ 'resultSet' ] [ ndx ] [ 'headers' ] values = json_inp [ 'resultSet' ] [ ndx ] [ 'rowSet' ] except KeyError : # Added for results that only include one set (ex. LeagueLeaders) headers = json_inp [ 'resultSet' ] [ 'headers' ] values = json_inp [ 'resultSet' ] [ 'rowSet' ] if HAS_PANDAS : return DataFrame ( values , columns = headers ) else : # Taken from www.github.com/bradleyfay/py-goldsberry return [ dict ( zip ( headers , value ) ) for value in values ]
Internal method to streamline the getting of data from the json
244
12
226,759
def get_player ( first_name , last_name = None , season = constants . CURRENT_SEASON , only_current = 0 , just_id = True ) : if last_name is None : name = first_name . lower ( ) else : name = '{}, {}' . format ( last_name , first_name ) . lower ( ) pl = PlayerList ( season = season , only_current = only_current ) . info ( ) hdr = 'DISPLAY_LAST_COMMA_FIRST' if HAS_PANDAS : item = pl [ pl . DISPLAY_LAST_COMMA_FIRST . str . lower ( ) == name ] else : item = next ( plyr for plyr in pl if str ( plyr [ hdr ] ) . lower ( ) == name ) if len ( item ) == 0 : raise PlayerNotFoundException elif just_id : return item [ 'PERSON_ID' ] else : return item
Calls our PlayerList class to get a full list of players and then returns just an id if specified or the full row of player information
211
28
226,760
def respond_to_ask ( self , message ) : valid_actions , hole_card , round_state = self . __parse_ask_message ( message ) return self . declare_action ( valid_actions , hole_card , round_state )
Called from Dealer when ask message received from RoundManager
54
11
226,761
def receive_notification ( self , message ) : msg_type = message [ "message_type" ] if msg_type == "game_start_message" : info = self . __parse_game_start_message ( message ) self . receive_game_start_message ( info ) elif msg_type == "round_start_message" : round_count , hole , seats = self . __parse_round_start_message ( message ) self . receive_round_start_message ( round_count , hole , seats ) elif msg_type == "street_start_message" : street , state = self . __parse_street_start_message ( message ) self . receive_street_start_message ( street , state ) elif msg_type == "game_update_message" : new_action , round_state = self . __parse_game_update_message ( message ) self . receive_game_update_message ( new_action , round_state ) elif msg_type == "round_result_message" : winners , hand_info , state = self . __parse_round_result_message ( message ) self . receive_round_result_message ( winners , hand_info , state )
Called from Dealer when notification received from RoundManager
267
10
226,762
async def result_continuation ( task ) : await asyncio . sleep ( 0.1 ) num , res = task . result ( ) return num , res * 2
A preliminary result processor we ll chain on to the original task This will get executed wherever the source task was executed in this case one of the threads in the ThreadPoolExecutor
36
35
226,763
async def result_processor ( tasks ) : output = { } for task in tasks : num , res = await task output [ num ] = res return output
An async result aggregator that combines all the results This gets executed in unsync . loop and unsync . thread
33
23
226,764
def read_union ( fo , writer_schema , reader_schema = None ) : # schema resolution index = read_long ( fo ) if reader_schema : # Handle case where the reader schema is just a single type (not union) if not isinstance ( reader_schema , list ) : if match_types ( writer_schema [ index ] , reader_schema ) : return read_data ( fo , writer_schema [ index ] , reader_schema ) else : for schema in reader_schema : if match_types ( writer_schema [ index ] , schema ) : return read_data ( fo , writer_schema [ index ] , schema ) msg = 'schema mismatch: %s not found in %s' % ( writer_schema , reader_schema ) raise SchemaResolutionError ( msg ) else : return read_data ( fo , writer_schema [ index ] )
A union is encoded by first writing a long value indicating the zero - based position within the union of the schema of its value .
201
26
226,765
def read_data ( fo , writer_schema , reader_schema = None ) : record_type = extract_record_type ( writer_schema ) logical_type = extract_logical_type ( writer_schema ) if reader_schema and record_type in AVRO_TYPES : # If the schemas are the same, set the reader schema to None so that no # schema resolution is done for this call or future recursive calls if writer_schema == reader_schema : reader_schema = None else : match_schemas ( writer_schema , reader_schema ) reader_fn = READERS . get ( record_type ) if reader_fn : try : data = reader_fn ( fo , writer_schema , reader_schema ) except StructError : raise EOFError ( 'cannot read %s from %s' % ( record_type , fo ) ) if 'logicalType' in writer_schema : fn = LOGICAL_READERS . get ( logical_type ) if fn : return fn ( data , writer_schema , reader_schema ) if reader_schema is not None : return maybe_promote ( data , record_type , extract_record_type ( reader_schema ) ) else : return data else : return read_data ( fo , SCHEMA_DEFS [ record_type ] , SCHEMA_DEFS . get ( reader_schema ) )
Read data from file object according to schema .
315
9
226,766
def _iter_avro_records ( fo , header , codec , writer_schema , reader_schema ) : sync_marker = header [ 'sync' ] read_block = BLOCK_READERS . get ( codec ) if not read_block : raise ValueError ( 'Unrecognized codec: %r' % codec ) block_count = 0 while True : try : block_count = read_long ( fo ) except StopIteration : return block_fo = read_block ( fo ) for i in xrange ( block_count ) : yield read_data ( block_fo , writer_schema , reader_schema ) skip_sync ( fo , sync_marker )
Return iterator over avro records .
151
7
226,767
def _iter_avro_blocks ( fo , header , codec , writer_schema , reader_schema ) : sync_marker = header [ 'sync' ] read_block = BLOCK_READERS . get ( codec ) if not read_block : raise ValueError ( 'Unrecognized codec: %r' % codec ) while True : offset = fo . tell ( ) try : num_block_records = read_long ( fo ) except StopIteration : return block_bytes = read_block ( fo ) skip_sync ( fo , sync_marker ) size = fo . tell ( ) - offset yield Block ( block_bytes , num_block_records , codec , reader_schema , writer_schema , offset , size )
Return iterator over avro blocks .
164
7
226,768
def prepare_timestamp_millis ( data , schema ) : if isinstance ( data , datetime . datetime ) : if data . tzinfo is not None : delta = ( data - epoch ) return int ( delta . total_seconds ( ) * MLS_PER_SECOND ) t = int ( time . mktime ( data . timetuple ( ) ) ) * MLS_PER_SECOND + int ( data . microsecond / 1000 ) return t else : return data
Converts datetime . datetime object to int timestamp with milliseconds
102
13
226,769
def prepare_timestamp_micros ( data , schema ) : if isinstance ( data , datetime . datetime ) : if data . tzinfo is not None : delta = ( data - epoch ) return int ( delta . total_seconds ( ) * MCS_PER_SECOND ) t = int ( time . mktime ( data . timetuple ( ) ) ) * MCS_PER_SECOND + data . microsecond return t else : return data
Converts datetime . datetime to int timestamp with microseconds
99
13
226,770
def prepare_date ( data , schema ) : if isinstance ( data , datetime . date ) : return data . toordinal ( ) - DAYS_SHIFT else : return data
Converts datetime . date to int timestamp
40
9
226,771
def prepare_uuid ( data , schema ) : if isinstance ( data , uuid . UUID ) : return str ( data ) else : return data
Converts uuid . UUID to string formatted UUID xxxxxxxx - xxxx - xxxx - xxxx - xxxxxxxxxxxx
33
28
226,772
def prepare_time_millis ( data , schema ) : if isinstance ( data , datetime . time ) : return int ( data . hour * MLS_PER_HOUR + data . minute * MLS_PER_MINUTE + data . second * MLS_PER_SECOND + int ( data . microsecond / 1000 ) ) else : return data
Convert datetime . time to int timestamp with milliseconds
75
11
226,773
def prepare_time_micros ( data , schema ) : if isinstance ( data , datetime . time ) : return long ( data . hour * MCS_PER_HOUR + data . minute * MCS_PER_MINUTE + data . second * MCS_PER_SECOND + data . microsecond ) else : return data
Convert datetime . time to int timestamp with microseconds
73
12
226,774
def prepare_bytes_decimal ( data , schema ) : if not isinstance ( data , decimal . Decimal ) : return data scale = schema . get ( 'scale' , 0 ) # based on https://github.com/apache/avro/pull/82/ sign , digits , exp = data . as_tuple ( ) if - exp > scale : raise ValueError ( 'Scale provided in schema does not match the decimal' ) delta = exp + scale if delta > 0 : digits = digits + ( 0 , ) * delta unscaled_datum = 0 for digit in digits : unscaled_datum = ( unscaled_datum * 10 ) + digit bits_req = unscaled_datum . bit_length ( ) + 1 if sign : unscaled_datum = ( 1 << bits_req ) - unscaled_datum bytes_req = bits_req // 8 padding_bits = ~ ( ( 1 << bits_req ) - 1 ) if sign else 0 packed_bits = padding_bits | unscaled_datum bytes_req += 1 if ( bytes_req << 3 ) < bits_req else 0 tmp = MemoryIO ( ) for index in range ( bytes_req - 1 , - 1 , - 1 ) : bits_to_write = packed_bits >> ( 8 * index ) tmp . write ( mk_bits ( bits_to_write & 0xff ) ) return tmp . getvalue ( )
Convert decimal . Decimal to bytes
307
8
226,775
def prepare_fixed_decimal ( data , schema ) : if not isinstance ( data , decimal . Decimal ) : return data scale = schema . get ( 'scale' , 0 ) size = schema [ 'size' ] # based on https://github.com/apache/avro/pull/82/ sign , digits , exp = data . as_tuple ( ) if - exp > scale : raise ValueError ( 'Scale provided in schema does not match the decimal' ) delta = exp + scale if delta > 0 : digits = digits + ( 0 , ) * delta unscaled_datum = 0 for digit in digits : unscaled_datum = ( unscaled_datum * 10 ) + digit bits_req = unscaled_datum . bit_length ( ) + 1 size_in_bits = size * 8 offset_bits = size_in_bits - bits_req mask = 2 ** size_in_bits - 1 bit = 1 for i in range ( bits_req ) : mask ^= bit bit <<= 1 if bits_req < 8 : bytes_req = 1 else : bytes_req = bits_req // 8 if bits_req % 8 != 0 : bytes_req += 1 tmp = MemoryIO ( ) if sign : unscaled_datum = ( 1 << bits_req ) - unscaled_datum unscaled_datum = mask | unscaled_datum for index in range ( size - 1 , - 1 , - 1 ) : bits_to_write = unscaled_datum >> ( 8 * index ) tmp . write ( mk_bits ( bits_to_write & 0xff ) ) else : for i in range ( offset_bits // 8 ) : tmp . write ( mk_bits ( 0 ) ) for index in range ( bytes_req - 1 , - 1 , - 1 ) : bits_to_write = unscaled_datum >> ( 8 * index ) tmp . write ( mk_bits ( bits_to_write & 0xff ) ) return tmp . getvalue ( )
Converts decimal . Decimal to fixed length bytes array
436
11
226,776
def write_crc32 ( fo , bytes ) : data = crc32 ( bytes ) & 0xFFFFFFFF fo . write ( pack ( '>I' , data ) )
A 4 - byte big - endian CRC32 checksum
39
12
226,777
def write_union ( fo , datum , schema ) : if isinstance ( datum , tuple ) : ( name , datum ) = datum for index , candidate in enumerate ( schema ) : if extract_record_type ( candidate ) == 'record' : schema_name = candidate [ 'name' ] else : schema_name = candidate if name == schema_name : break else : msg = 'provided union type name %s not found in schema %s' % ( name , schema ) raise ValueError ( msg ) else : pytype = type ( datum ) best_match_index = - 1 most_fields = - 1 for index , candidate in enumerate ( schema ) : if validate ( datum , candidate , raise_errors = False ) : if extract_record_type ( candidate ) == 'record' : fields = len ( candidate [ 'fields' ] ) if fields > most_fields : best_match_index = index most_fields = fields else : best_match_index = index break if best_match_index < 0 : msg = '%r (type %s) do not match %s' % ( datum , pytype , schema ) raise ValueError ( msg ) index = best_match_index # write data write_long ( fo , index ) write_data ( fo , datum , schema [ index ] )
A union is encoded by first writing a long value indicating the zero - based position within the union of the schema of its value . The value is then encoded per the indicated schema within the union .
289
39
226,778
def write_data ( fo , datum , schema ) : record_type = extract_record_type ( schema ) logical_type = extract_logical_type ( schema ) fn = WRITERS . get ( record_type ) if fn : if logical_type : prepare = LOGICAL_WRITERS . get ( logical_type ) if prepare : datum = prepare ( datum , schema ) return fn ( fo , datum , schema ) else : return write_data ( fo , datum , SCHEMA_DEFS [ record_type ] )
Write a datum of data to output stream .
119
10
226,779
def null_write_block ( fo , block_bytes ) : write_long ( fo , len ( block_bytes ) ) fo . write ( block_bytes )
Write block in null codec .
35
6
226,780
def deflate_write_block ( fo , block_bytes ) : # The first two characters and last character are zlib # wrappers around deflate data. data = compress ( block_bytes ) [ 2 : - 1 ] write_long ( fo , len ( data ) ) fo . write ( data )
Write block in deflate codec .
65
7
226,781
def schemaless_writer ( fo , schema , record ) : schema = parse_schema ( schema ) write_data ( fo , record , schema )
Write a single record without the schema or header information
33
10
226,782
def validate_int ( datum , * * kwargs ) : return ( ( isinstance ( datum , ( int , long , numbers . Integral ) ) and INT_MIN_VALUE <= datum <= INT_MAX_VALUE and not isinstance ( datum , bool ) ) or isinstance ( datum , ( datetime . time , datetime . datetime , datetime . date ) ) )
Check that the data value is a non floating point number with size less that Int32 . Also support for logicalType timestamp validation with datetime .
87
29
226,783
def validate_float ( datum , * * kwargs ) : return ( isinstance ( datum , ( int , long , float , numbers . Real ) ) and not isinstance ( datum , bool ) )
Check that the data value is a floating point number or double precision .
46
14
226,784
def validate_record ( datum , schema , parent_ns = None , raise_errors = True ) : _ , namespace = schema_name ( schema , parent_ns ) return ( isinstance ( datum , Mapping ) and all ( validate ( datum = datum . get ( f [ 'name' ] , f . get ( 'default' , no_value ) ) , schema = f [ 'type' ] , field = '{}.{}' . format ( namespace , f [ 'name' ] ) , raise_errors = raise_errors ) for f in schema [ 'fields' ] ) )
Check that the data is a Mapping type with all schema defined fields validated as True .
131
18
226,785
def validate_union ( datum , schema , parent_ns = None , raise_errors = True ) : if isinstance ( datum , tuple ) : ( name , datum ) = datum for candidate in schema : if extract_record_type ( candidate ) == 'record' : if name == candidate [ "name" ] : return validate ( datum , schema = candidate , field = parent_ns , raise_errors = raise_errors ) else : return False errors = [ ] for s in schema : try : ret = validate ( datum , schema = s , field = parent_ns , raise_errors = raise_errors ) if ret : # We exit on the first passing type in Unions return True except ValidationError as e : errors . extend ( e . errors ) if raise_errors : raise ValidationError ( * errors ) return False
Check that the data is a list type with possible options to validate as True .
181
16
226,786
def validate_many ( records , schema , raise_errors = True ) : errors = [ ] results = [ ] for record in records : try : results . append ( validate ( record , schema , raise_errors = raise_errors ) ) except ValidationError as e : errors . extend ( e . errors ) if raise_errors and errors : raise ValidationError ( * errors ) return all ( results )
Validate a list of data!
85
7
226,787
def parse_schema ( schema , _write_hint = True , _force = False ) : if _force : return _parse_schema ( schema , "" , _write_hint ) elif isinstance ( schema , dict ) and "__fastavro_parsed" in schema : return schema else : return _parse_schema ( schema , "" , _write_hint )
Returns a parsed avro schema
87
6
226,788
def load_schema ( schema_path ) : with open ( schema_path ) as fd : schema = json . load ( fd ) schema_dir , schema_file = path . split ( schema_path ) return _load_schema ( schema , schema_dir )
Returns a schema loaded from the file at schema_path .
60
12
226,789
def showtip ( self , text ) : self . text = text if self . tipwindow or not self . text : return x , y , cx , cy = self . widget . bbox ( "insert" ) x = x + self . widget . winfo_rootx ( ) + 27 y = y + cy + self . widget . winfo_rooty ( ) + 27 self . tipwindow = tw = tk . Toplevel ( self . widget ) tw . wm_overrideredirect ( 1 ) tw . wm_geometry ( "+%d+%d" % ( x , y ) ) try : # For Mac OS tw . tk . call ( "::tk::unsupported::MacWindowStyle" , "style" , tw . _w , "help" , "noActivates" ) except tk . TclError : pass label = tk . Label ( tw , text = self . text , justify = tk . LEFT , background = "#ffffe0" , foreground = "black" , relief = tk . SOLID , borderwidth = 1 , font = ( "tahoma" , "8" , "normal" ) ) label . pack ( ipadx = 1 )
Display text in tooltip window
264
5
226,790
def run ( self ) : self . toplevel . protocol ( "WM_DELETE_WINDOW" , self . __on_window_close ) self . toplevel . mainloop ( )
Ejecute the main loop .
44
8
226,791
def create_regpoly ( self , x0 , y0 , x1 , y1 , sides = 0 , start = 90 , extent = 360 , * * kw ) : coords = self . __regpoly_coords ( x0 , y0 , x1 , y1 , sides , start , extent ) return self . canvas . create_polygon ( * coords , * * kw )
Create a regular polygon
87
5
226,792
def __regpoly_coords ( self , x0 , y0 , x1 , y1 , sides , start , extent ) : coords = [ ] if extent == 0 : return coords xm = ( x0 + x1 ) / 2. ym = ( y0 + y1 ) / 2. rx = xm - x0 ry = ym - y0 n = sides if n == 0 : # 0 sides => circle n = round ( ( rx + ry ) * .5 ) if n < 2 : n = 4 # Extent can be negative dirv = 1 if extent > 0 else - 1 if abs ( extent ) > 360 : extent = dirv * abs ( extent ) % 360 step = dirv * 360 / n numsteps = 1 + extent / float ( step ) numsteps_int = int ( numsteps ) i = 0 while i < numsteps_int : rad = ( start - i * step ) * DEG2RAD x = rx * math . cos ( rad ) y = ry * math . sin ( rad ) coords . append ( ( xm + x , ym - y ) ) i += 1 # Figure out where last segment should end if numsteps != numsteps_int : # Vecter V1 is last drawn vertext (x,y) from above # Vector V2 is the edge of the polygon rad2 = ( start - numsteps_int * step ) * DEG2RAD x2 = rx * math . cos ( rad2 ) - x y2 = ry * math . sin ( rad2 ) - y # Vector V3 is unit vector in direction we end at rad3 = ( start - extent ) * DEG2RAD x3 = math . cos ( rad3 ) y3 = math . sin ( rad3 ) # Find where V3 crosses V1+V2 => find j s.t. V1 + kV2 = jV3 j = ( x * y2 - x2 * y ) / ( x3 * y2 - x2 * y3 ) coords . append ( ( xm + j * x3 , ym - j * y3 ) ) return coords
Create the coordinates of the regular polygon specified
475
9
226,793
def get_image ( self , path ) : image = '' name = os . path . basename ( path ) if not StockImage . is_registered ( name ) : ipath = self . __find_image ( path ) if ipath is not None : StockImage . register ( name , ipath ) else : msg = "Image '{0}' not found in resource paths." . format ( name ) logger . warning ( msg ) try : image = StockImage . get ( name ) except StockImageException : # TODO: notify something here. pass return image
Return tk image corresponding to name which is taken form path .
120
13
226,794
def import_variables ( self , container , varnames = None ) : if varnames is None : for keyword in self . tkvariables : setattr ( container , keyword , self . tkvariables [ keyword ] ) else : for keyword in varnames : if keyword in self . tkvariables : setattr ( container , keyword , self . tkvariables [ keyword ] )
Helper method to avoid call get_variable for every variable .
84
12
226,795
def create_variable ( self , varname , vtype = None ) : var_types = ( 'string' , 'int' , 'boolean' , 'double' ) vname = varname var = None type_from_name = 'string' # default type if ':' in varname : type_from_name , vname = varname . split ( ':' ) # Fix incorrect order bug #33 if type_from_name not in ( var_types ) : # Swap order type_from_name , vname = vname , type_from_name if type_from_name not in ( var_types ) : raise Exception ( 'Undefined variable type in "{0}"' . format ( varname ) ) if vname in self . tkvariables : var = self . tkvariables [ vname ] else : if vtype is None : # get type from name if type_from_name == 'int' : var = tkinter . IntVar ( ) elif type_from_name == 'boolean' : var = tkinter . BooleanVar ( ) elif type_from_name == 'double' : var = tkinter . DoubleVar ( ) else : var = tkinter . StringVar ( ) else : var = vtype ( ) self . tkvariables [ vname ] = var return var
Create a tk variable . If the variable was created previously return that instance .
293
16
226,796
def add_from_file ( self , fpath ) : if self . tree is None : base , name = os . path . split ( fpath ) self . add_resource_path ( base ) self . tree = tree = ET . parse ( fpath ) self . root = tree . getroot ( ) self . objects = { } else : # TODO: append to current tree pass
Load ui definition from file .
83
7
226,797
def add_from_string ( self , strdata ) : if self . tree is None : self . tree = tree = ET . ElementTree ( ET . fromstring ( strdata ) ) self . root = tree . getroot ( ) self . objects = { } else : # TODO: append to current tree pass
Load ui definition from string .
67
7
226,798
def add_from_xmlnode ( self , element ) : if self . tree is None : root = ET . Element ( 'interface' ) root . append ( element ) self . tree = tree = ET . ElementTree ( root ) self . root = tree . getroot ( ) self . objects = { } # ET.dump(tree) else : # TODO: append to current tree pass
Load ui definition from xml . etree . Element node .
83
13
226,799
def get_object ( self , name , master = None ) : widget = None if name in self . objects : widget = self . objects [ name ] . widget else : xpath = ".//object[@id='{0}']" . format ( name ) node = self . tree . find ( xpath ) if node is not None : root = BuilderObject ( self , dict ( ) ) root . widget = master bobject = self . _realize ( root , node ) widget = bobject . widget if widget is None : msg = 'Widget "{0}" not defined.' . format ( name ) raise Exception ( msg ) return widget
Find and create the widget named name . Use master as parent . If widget was already created return that instance .
135
22