idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
38,700
def diff_packages ( pkg1 , pkg2 = None ) : if pkg2 is None : it = iter_packages ( pkg1 . name ) pkgs = [ x for x in it if x . version < pkg1 . version ] if not pkgs : raise RezError ( "No package to diff with - %s is the earliest " "package version" % pkg1 . qualified_name ) pkgs = sorted ( pkgs , key = lambda x : x . version ) pkg2 = pkgs [ - 1 ] def _check_pkg ( pkg ) : if not ( pkg . vcs and pkg . revision ) : raise RezError ( "Cannot diff package %s: it is a legacy format " "package that does not contain enough information" % pkg . qualified_name ) _check_pkg ( pkg1 ) _check_pkg ( pkg2 ) path = mkdtemp ( prefix = "rez-pkg-diff" ) paths = [ ] for pkg in ( pkg1 , pkg2 ) : print "Exporting %s..." % pkg . qualified_name path_ = os . path . join ( path , pkg . qualified_name ) vcs_cls_1 = plugin_manager . get_plugin_class ( "release_vcs" , pkg1 . vcs ) vcs_cls_1 . export ( revision = pkg . revision , path = path_ ) paths . append ( path_ ) difftool = config . difftool print "Opening diff viewer %s..." % difftool proc = Popen ( [ difftool ] + paths ) proc . wait ( )
Invoke a diff editor to show the difference between the source of two packages .
38,701
def sigint_handler ( signum , frame ) : global _handled_int if not _handled_int : _handled_int = True if not _env_var_true ( "_REZ_QUIET_ON_SIG" ) : print >> sys . stderr , "Interrupted by user" sigbase_handler ( signum , frame )
Exit gracefully on ctrl - C .
38,702
def sigterm_handler ( signum , frame ) : global _handled_term if not _handled_term : _handled_term = True if not _env_var_true ( "_REZ_QUIET_ON_SIG" ) : print >> sys . stderr , "Terminated by user" sigbase_handler ( signum , frame )
Exit gracefully on terminate .
38,703
def format_help ( self ) : if self . _subparsers : for action in self . _subparsers . _actions : if isinstance ( action , LazySubParsersAction ) : for parser_name , parser in action . _name_parser_map . iteritems ( ) : action . _setup_subparser ( parser_name , parser ) return super ( LazyArgumentParser , self ) . format_help ( )
Sets up all sub - parsers when help is requested .
38,704
def is_valid_package_name ( name , raise_error = False ) : is_valid = PACKAGE_NAME_REGEX . match ( name ) if raise_error and not is_valid : raise PackageRequestError ( "Not a valid package name: %r" % name ) return is_valid
Test the validity of a package name string .
38,705
def expand_abbreviations ( txt , fields ) : def _expand ( matchobj ) : s = matchobj . group ( "var" ) if s not in fields : matches = [ x for x in fields if x . startswith ( s ) ] if len ( matches ) == 1 : s = matches [ 0 ] return "{%s}" % s return re . sub ( FORMAT_VAR_REGEX , _expand , txt )
Expand abbreviations in a format string .
38,706
def dict_to_attributes_code ( dict_ ) : lines = [ ] for key , value in dict_ . iteritems ( ) : if isinstance ( value , dict ) : txt = dict_to_attributes_code ( value ) lines_ = txt . split ( '\n' ) for line in lines_ : if not line . startswith ( ' ' ) : line = "%s.%s" % ( key , line ) lines . append ( line ) else : value_txt = pformat ( value ) if '\n' in value_txt : lines . append ( "%s = \\" % key ) value_txt = indent ( value_txt ) lines . extend ( value_txt . split ( '\n' ) ) else : line = "%s = %s" % ( key , value_txt ) lines . append ( line ) return '\n' . join ( lines )
Given a nested dict generate a python code equivalent .
38,707
def columnise ( rows , padding = 2 ) : strs = [ ] maxwidths = { } for row in rows : for i , e in enumerate ( row ) : se = str ( e ) nse = len ( se ) w = maxwidths . get ( i , - 1 ) if nse > w : maxwidths [ i ] = nse for row in rows : s = '' for i , e in enumerate ( row ) : se = str ( e ) if i < len ( row ) - 1 : n = maxwidths [ i ] + padding - len ( se ) se += ' ' * n s += se strs . append ( s ) return strs
Print rows of entries in aligned columns .
38,708
def print_colored_columns ( printer , rows , padding = 2 ) : rows_ = [ x [ : - 1 ] for x in rows ] colors = [ x [ - 1 ] for x in rows ] for col , line in zip ( colors , columnise ( rows_ , padding = padding ) ) : printer ( line , col )
Like columnise but with colored rows .
38,709
def expanduser ( path ) : if '~' not in path : return path if os . name == "nt" : if 'HOME' in os . environ : userhome = os . environ [ 'HOME' ] elif 'USERPROFILE' in os . environ : userhome = os . environ [ 'USERPROFILE' ] elif 'HOMEPATH' in os . environ : drive = os . environ . get ( 'HOMEDRIVE' , '' ) userhome = os . path . join ( drive , os . environ [ 'HOMEPATH' ] ) else : return path else : userhome = os . path . expanduser ( '~' ) def _expanduser ( path ) : return EXPANDUSER_RE . sub ( lambda m : m . groups ( ) [ 0 ] + userhome + m . groups ( ) [ 1 ] , path ) return os . path . normpath ( _expanduser ( path ) )
Expand ~ to home directory in the given string .
38,710
def as_block_string ( txt ) : import json lines = [ ] for line in txt . split ( '\n' ) : line_ = json . dumps ( line ) line_ = line_ [ 1 : - 1 ] . rstrip ( ) lines . append ( line_ ) return '' % '\n' . join ( lines )
Return a string formatted as a python block comment string like the one you re currently reading . Special characters are escaped if necessary .
38,711
def format ( self , s , pretty = None , expand = None ) : if pretty is None : pretty = self . format_pretty if expand is None : expand = self . format_expand formatter = ObjectStringFormatter ( self , pretty = pretty , expand = expand ) return formatter . format ( s )
Format a string .
38,712
def copy ( self ) : other = Version ( None ) other . tokens = self . tokens [ : ] other . seps = self . seps [ : ] return other
Returns a copy of the version .
38,713
def trim ( self , len_ ) : other = Version ( None ) other . tokens = self . tokens [ : len_ ] other . seps = self . seps [ : len_ - 1 ] return other
Return a copy of the version possibly with less tokens .
38,714
def union ( self , other ) : if not hasattr ( other , "__iter__" ) : other = [ other ] bounds = self . bounds [ : ] for range in other : bounds += range . bounds bounds = self . _union ( bounds ) range = VersionRange ( None ) range . bounds = bounds return range
OR together version ranges .
38,715
def intersection ( self , other ) : if not hasattr ( other , "__iter__" ) : other = [ other ] bounds = self . bounds for range in other : bounds = self . _intersection ( bounds , range . bounds ) if not bounds : return None range = VersionRange ( None ) range . bounds = bounds return range
AND together version ranges .
38,716
def inverse ( self ) : if self . is_any ( ) : return None else : bounds = self . _inverse ( self . bounds ) range = VersionRange ( None ) range . bounds = bounds return range
Calculate the inverse of the range .
38,717
def split ( self ) : ranges = [ ] for bound in self . bounds : range = VersionRange ( None ) range . bounds = [ bound ] ranges . append ( range ) return ranges
Split into separate contiguous ranges .
38,718
def as_span ( cls , lower_version = None , upper_version = None , lower_inclusive = True , upper_inclusive = True ) : lower = ( None if lower_version is None else _LowerBound ( lower_version , lower_inclusive ) ) upper = ( None if upper_version is None else _UpperBound ( upper_version , upper_inclusive ) ) bound = _Bound ( lower , upper ) range = cls ( None ) range . bounds = [ bound ] return range
Create a range from lower_version .. upper_version .
38,719
def from_version ( cls , version , op = None ) : lower = None upper = None if op is None : lower = _LowerBound ( version , True ) upper = _UpperBound ( version . next ( ) , False ) elif op in ( "eq" , "==" ) : lower = _LowerBound ( version , True ) upper = _UpperBound ( version , True ) elif op in ( "gt" , ">" ) : lower = _LowerBound ( version , False ) elif op in ( "gte" , ">=" ) : lower = _LowerBound ( version , True ) elif op in ( "lt" , "<" ) : upper = _UpperBound ( version , False ) elif op in ( "lte" , "<=" ) : upper = _UpperBound ( version , True ) else : raise VersionError ( "Unknown bound operation '%s'" % op ) bound = _Bound ( lower , upper ) range = cls ( None ) range . bounds = [ bound ] return range
Create a range from a version .
38,720
def from_versions ( cls , versions ) : range = cls ( None ) range . bounds = [ ] for version in dedup ( sorted ( versions ) ) : lower = _LowerBound ( version , True ) upper = _UpperBound ( version , True ) bound = _Bound ( lower , upper ) range . bounds . append ( bound ) return range
Create a range from a list of versions .
38,721
def to_versions ( self ) : versions = [ ] for bound in self . bounds : if bound . lower . inclusive and bound . upper . inclusive and ( bound . lower . version == bound . upper . version ) : versions . append ( bound . lower . version ) return versions or None
Returns exact version ranges as Version objects or None if there are no exact version ranges present .
38,722
def contains_version ( self , version ) : if len ( self . bounds ) < 5 : for bound in self . bounds : i = bound . version_containment ( version ) if i == 0 : return True if i == - 1 : return False else : _ , contains = self . _contains_version ( version ) return contains return False
Returns True if version is contained in this range .
38,723
def iter_intersecting ( self , iterable , key = None , descending = False ) : return _ContainsVersionIterator ( self , iterable , key , descending , mode = _ContainsVersionIterator . MODE_INTERSECTING )
Like iter_intersect_test but returns intersections only .
38,724
def iter_non_intersecting ( self , iterable , key = None , descending = False ) : return _ContainsVersionIterator ( self , iterable , key , descending , mode = _ContainsVersionIterator . MODE_NON_INTERSECTING )
Like iter_intersect_test but returns non - intersections only .
38,725
def span ( self ) : other = VersionRange ( None ) bound = _Bound ( self . bounds [ 0 ] . lower , self . bounds [ - 1 ] . upper ) other . bounds = [ bound ] return other
Return a contiguous range that is a superset of this range .
38,726
def visit_versions ( self , func ) : for bound in self . bounds : if bound . lower is not _LowerBound . min : result = func ( bound . lower . version ) if isinstance ( result , Version ) : bound . lower . version = result if bound . upper is not _UpperBound . inf : result = func ( bound . upper . version ) if isinstance ( result , Version ) : bound . upper . version = result
Visit each version in the range and apply a function to each .
38,727
def send ( self , url , data , headers ) : req = urllib2 . Request ( url , headers = headers ) try : response = urlopen ( url = req , data = data , timeout = self . timeout , verify_ssl = self . verify_ssl , ca_certs = self . ca_certs , ) except urllib2 . HTTPError as exc : msg = exc . headers . get ( 'x-sentry-error' ) code = exc . getcode ( ) if code == 429 : try : retry_after = int ( exc . headers . get ( 'retry-after' ) ) except ( ValueError , TypeError ) : retry_after = 0 raise RateLimited ( msg , retry_after ) elif msg : raise APIError ( msg , code ) else : raise return response
Sends a request to a remote webserver using HTTP POST .
38,728
def extract_auth_vars ( request ) : if request . META . get ( 'HTTP_X_SENTRY_AUTH' , '' ) . startswith ( 'Sentry' ) : return request . META [ 'HTTP_X_SENTRY_AUTH' ] elif request . META . get ( 'HTTP_AUTHORIZATION' , '' ) . startswith ( 'Sentry' ) : return request . META [ 'HTTP_AUTHORIZATION' ] else : args = [ '%s=%s' % i for i in request . GET . items ( ) if i [ 0 ] . startswith ( 'sentry_' ) and i [ 0 ] != 'sentry_data' ] if args : return 'Sentry %s' % ', ' . join ( args ) return None
raven - js will pass both Authorization and X - Sentry - Auth depending on the browser and server configurations .
38,729
def _get_value ( self , exc_type , exc_value , exc_traceback ) : stack_info = get_stack_info ( iter_traceback_frames ( exc_traceback ) , transformer = self . transform , capture_locals = self . client . capture_locals , ) exc_module = getattr ( exc_type , '__module__' , None ) if exc_module : exc_module = str ( exc_module ) exc_type = getattr ( exc_type , '__name__' , '<unknown>' ) return { 'value' : to_unicode ( exc_value ) , 'type' : str ( exc_type ) , 'module' : to_unicode ( exc_module ) , 'stacktrace' : stack_info , }
Convert exception info to a value for the values list .
38,730
def record ( message = None , timestamp = None , level = None , category = None , data = None , type = None , processor = None ) : if timestamp is None : timestamp = time ( ) for ctx in raven . context . get_active_contexts ( ) : ctx . breadcrumbs . record ( timestamp , level , message , category , data , type , processor )
Records a breadcrumb for all active clients . This is what integration code should use rather than invoking the captureBreadcrumb method on a specific client .
38,731
def ignore_logger ( name_or_logger , allow_level = None ) : def handler ( logger , level , msg , args , kwargs ) : if allow_level is not None and level >= allow_level : return False return True register_special_log_handler ( name_or_logger , handler )
Ignores a logger during breadcrumb recording .
38,732
def transform ( self , value , ** kwargs ) : if value is None : return None objid = id ( value ) if objid in self . context : return '<...>' self . context . add ( objid ) try : for serializer in self . serializers : try : if serializer . can ( value ) : return serializer . serialize ( value , ** kwargs ) except Exception as e : logger . exception ( e ) return text_type ( type ( value ) ) try : return repr ( value ) except Exception as e : logger . exception ( e ) return text_type ( type ( value ) ) finally : self . context . remove ( objid )
Primary function which handles recursively transforming values via their serializers
38,733
def send ( self , auth_header = None , callback = None , ** data ) : message = self . encode ( data ) return self . send_encoded ( message , auth_header = auth_header , callback = callback )
Serializes the message and passes the payload onto send_encoded .
38,734
def _send_remote ( self , url , data , headers = None , callback = None ) : if headers is None : headers = { } return AsyncHTTPClient ( ) . fetch ( url , callback , method = "POST" , body = data , headers = headers , validate_cert = self . validate_cert )
Initialise a Tornado AsyncClient and send the request to the sentry server . If the callback is a callable it will be called with the response .
38,735
def get_sentry_data_from_request ( self ) : return { 'request' : { 'url' : self . request . full_url ( ) , 'method' : self . request . method , 'data' : self . request . body , 'query_string' : self . request . query , 'cookies' : self . request . headers . get ( 'Cookie' , None ) , 'headers' : dict ( self . request . headers ) , } }
Extracts the data required for sentry . interfaces . Http from the current request being handled by the request handler
38,736
def get_public_dsn ( self , scheme = None ) : if self . is_enabled ( ) : url = self . remote . get_public_dsn ( ) if scheme : return '%s:%s' % ( scheme , url ) return url
Returns a public DSN which is consumable by raven - js
38,737
def capture ( self , event_type , data = None , date = None , time_spent = None , extra = None , stack = None , tags = None , sample_rate = None , ** kwargs ) : if not self . is_enabled ( ) : return exc_info = kwargs . get ( 'exc_info' ) if exc_info is not None : if self . skip_error_for_logging ( exc_info ) : return elif not self . should_capture ( exc_info ) : self . logger . info ( 'Not capturing exception due to filters: %s' , exc_info [ 0 ] , exc_info = sys . exc_info ( ) ) return self . record_exception_seen ( exc_info ) data = self . build_msg ( event_type , data , date , time_spent , extra , stack , tags = tags , ** kwargs ) if sample_rate is None : sample_rate = self . sample_rate if self . _random . random ( ) < sample_rate : self . send ( ** data ) self . _local_state . last_event_id = data [ 'event_id' ] return data [ 'event_id' ]
Captures and processes an event and pipes it off to SentryClient . send .
38,738
def _log_failed_submission ( self , data ) : message = data . pop ( 'message' , '<no message value>' ) output = [ message ] if 'exception' in data and 'stacktrace' in data [ 'exception' ] [ 'values' ] [ - 1 ] : for frame in data [ 'exception' ] [ 'values' ] [ - 1 ] [ 'stacktrace' ] . get ( 'frames' , [ ] ) : output . append ( ' File "%(fn)s", line %(lineno)s, in %(func)s' % { 'fn' : frame . get ( 'filename' , 'unknown_filename' ) , 'lineno' : frame . get ( 'lineno' , - 1 ) , 'func' : frame . get ( 'function' , 'unknown_function' ) , } ) self . uncaught_logger . error ( output )
Log a reasonable representation of an event that should have been sent to Sentry
38,739
def send_encoded ( self , message , auth_header = None , ** kwargs ) : client_string = 'raven-python/%s' % ( raven . VERSION , ) if not auth_header : timestamp = time . time ( ) auth_header = get_auth_header ( protocol = self . protocol_version , timestamp = timestamp , client = client_string , api_key = self . remote . public_key , api_secret = self . remote . secret_key , ) headers = { 'User-Agent' : client_string , 'X-Sentry-Auth' : auth_header , 'Content-Encoding' : self . get_content_encoding ( ) , 'Content-Type' : 'application/octet-stream' , } return self . send_remote ( url = self . remote . store_endpoint , data = message , headers = headers , ** kwargs )
Given an already serialized message signs the message and passes the payload off to send_remote .
38,740
def captureQuery ( self , query , params = ( ) , engine = None , ** kwargs ) : return self . capture ( 'raven.events.Query' , query = query , params = params , engine = engine , ** kwargs )
Creates an event for a SQL query .
38,741
def captureBreadcrumb ( self , * args , ** kwargs ) : self . context . breadcrumbs . record ( * args , ** kwargs )
Records a breadcrumb with the current context . They will be sent with the next event .
38,742
def register_scheme ( self , scheme , cls ) : if scheme in self . _schemes : raise DuplicateScheme ( ) urlparse . register_scheme ( scheme ) self . _schemes [ scheme ] = cls
It is possible to inject new schemes at runtime
38,743
def get_http_info ( self , request ) : if self . is_json_type ( request . mimetype ) : retriever = self . get_json_data else : retriever = self . get_form_data return self . get_http_info_with_retriever ( request , retriever )
Determine how to retrieve actual data by using request . mimetype .
38,744
def setup_logging ( handler , exclude = EXCLUDE_LOGGER_DEFAULTS ) : logger = logging . getLogger ( ) if handler . __class__ in map ( type , logger . handlers ) : return False logger . addHandler ( handler ) for logger_name in exclude : logger = logging . getLogger ( logger_name ) logger . propagate = False logger . addHandler ( logging . StreamHandler ( ) ) return True
Configures logging to pipe to Sentry .
38,745
def to_dict ( dictish ) : if hasattr ( dictish , 'iterkeys' ) : m = dictish . iterkeys elif hasattr ( dictish , 'keys' ) : m = dictish . keys else : raise ValueError ( dictish ) return dict ( ( k , dictish [ k ] ) for k in m ( ) )
Given something that closely resembles a dictionary we attempt to coerce it into a propery dictionary .
38,746
def slim_frame_data ( frames , frame_allowance = 25 ) : frames_len = 0 app_frames = [ ] system_frames = [ ] for frame in frames : frames_len += 1 if frame . get ( 'in_app' ) : app_frames . append ( frame ) else : system_frames . append ( frame ) if frames_len <= frame_allowance : return frames remaining = frames_len - frame_allowance app_count = len ( app_frames ) system_allowance = max ( frame_allowance - app_count , 0 ) if system_allowance : half_max = int ( system_allowance / 2 ) for frame in system_frames [ half_max : - half_max ] : frame . pop ( 'vars' , None ) frame . pop ( 'pre_context' , None ) frame . pop ( 'post_context' , None ) remaining -= 1 else : for frame in system_frames : frame . pop ( 'vars' , None ) frame . pop ( 'pre_context' , None ) frame . pop ( 'post_context' , None ) remaining -= 1 if remaining : app_allowance = app_count - remaining half_max = int ( app_allowance / 2 ) for frame in app_frames [ half_max : - half_max ] : frame . pop ( 'vars' , None ) frame . pop ( 'pre_context' , None ) frame . pop ( 'post_context' , None ) return frames
Removes various excess metadata from middle frames which go beyond frame_allowance .
38,747
def get_data_from_request ( ) : return { 'request' : { 'url' : '%s://%s%s' % ( web . ctx [ 'protocol' ] , web . ctx [ 'host' ] , web . ctx [ 'path' ] ) , 'query_string' : web . ctx . query , 'method' : web . ctx . method , 'data' : web . data ( ) , 'headers' : dict ( get_headers ( web . ctx . environ ) ) , 'env' : dict ( get_environ ( web . ctx . environ ) ) , } }
Returns request data extracted from web . ctx .
38,748
def get_regex ( resolver_or_pattern ) : try : regex = resolver_or_pattern . regex except AttributeError : regex = resolver_or_pattern . pattern . regex return regex
Utility method for django s deprecated resolver . regex
38,749
def once ( func ) : lock = threading . Lock ( ) def new_func ( * args , ** kwargs ) : if new_func . called : return with lock : if new_func . called : return rv = func ( * args , ** kwargs ) new_func . called = True return rv new_func = update_wrapper ( new_func , func ) new_func . called = False return new_func
Runs a thing once and once only .
38,750
def get_host ( request ) : if settings . USE_X_FORWARDED_HOST and ( 'HTTP_X_FORWARDED_HOST' in request . META ) : host = request . META [ 'HTTP_X_FORWARDED_HOST' ] elif 'HTTP_HOST' in request . META : host = request . META [ 'HTTP_HOST' ] else : host = request . META [ 'SERVER_NAME' ] server_port = str ( request . META [ 'SERVER_PORT' ] ) if server_port != ( request . is_secure ( ) and '443' or '80' ) : host = '%s:%s' % ( host , server_port ) return host
A reimplementation of Django s get_host without the SuspiciousOperation check .
38,751
def install_middleware ( middleware_name , lookup_names = None ) : if lookup_names is None : lookup_names = ( middleware_name , ) middleware_attr = 'MIDDLEWARE' if getattr ( settings , 'MIDDLEWARE' , None ) is not None else 'MIDDLEWARE_CLASSES' middleware = getattr ( settings , middleware_attr , ( ) ) or ( ) if set ( lookup_names ) . isdisjoint ( set ( middleware ) ) : setattr ( settings , middleware_attr , type ( middleware ) ( ( middleware_name , ) ) + middleware )
Install specified middleware
38,752
def _fit_and_score ( est , x , y , scorer , train_index , test_index , parameters , fit_params , predict_params ) : X_train , y_train = _safe_split ( est , x , y , train_index ) train_params = fit_params . copy ( ) est . set_params ( ** parameters ) est . fit ( X_train , y_train , ** train_params ) test_predict_params = predict_params . copy ( ) X_test , y_test = _safe_split ( est , x , y , test_index , train_index ) score = scorer ( est , X_test , y_test , ** test_predict_params ) if not isinstance ( score , numbers . Number ) : raise ValueError ( "scoring must return a number, got %s (%s) instead." % ( str ( score ) , type ( score ) ) ) return score
Train survival model on given data and return its score on test data
38,753
def _interpolate_coefficients ( self , alpha ) : exact = False coef_idx = None for i , val in enumerate ( self . alphas_ ) : if val > alpha : coef_idx = i elif alpha - val < numpy . finfo ( numpy . float ) . eps : coef_idx = i exact = True break if coef_idx is None : coef = self . coef_ [ : , 0 ] elif exact or coef_idx == len ( self . alphas_ ) - 1 : coef = self . coef_ [ : , coef_idx ] else : a1 = self . alphas_ [ coef_idx + 1 ] a2 = self . alphas_ [ coef_idx ] frac = ( alpha - a1 ) / ( a2 - a1 ) coef = frac * self . coef_ [ : , coef_idx ] + ( 1.0 - frac ) * self . coef_ [ : , coef_idx + 1 ] return coef
Interpolate coefficients by calculating the weighted average of coefficient vectors corresponding to neighbors of alpha in the list of alphas constructed during training .
38,754
def predict ( self , X , alpha = None ) : X = check_array ( X ) coef = self . _get_coef ( alpha ) return numpy . dot ( X , coef )
The linear predictor of the model .
38,755
def _create_base_ensemble ( self , out , n_estimators , n_folds ) : ensemble_scores = numpy . empty ( ( n_estimators , n_folds ) ) base_ensemble = numpy . empty_like ( ensemble_scores , dtype = numpy . object ) for model , fold , score , est in out : ensemble_scores [ model , fold ] = score base_ensemble [ model , fold ] = est return ensemble_scores , base_ensemble
For each base estimator collect models trained on each fold
38,756
def _create_cv_ensemble ( self , base_ensemble , idx_models_included , model_names = None ) : fitted_models = numpy . empty ( len ( idx_models_included ) , dtype = numpy . object ) for i , idx in enumerate ( idx_models_included ) : model_name = self . base_estimators [ idx ] [ 0 ] if model_names is None else model_names [ idx ] avg_model = EnsembleAverage ( base_ensemble [ idx , : ] , name = model_name ) fitted_models [ i ] = avg_model return fitted_models
For each selected base estimator average models trained on each fold
38,757
def _get_base_estimators ( self , X ) : base_estimators = [ ] kernel_cache = { } kernel_fns = { } for i , ( name , estimator ) in enumerate ( self . base_estimators ) : if hasattr ( estimator , 'kernel' ) and callable ( estimator . kernel ) : if not hasattr ( estimator , '_get_kernel' ) : raise ValueError ( 'estimator %s uses a custom kernel function, but does not have a _get_kernel method' % name ) kernel_mat = kernel_fns . get ( estimator . kernel , None ) if kernel_mat is None : kernel_mat = estimator . _get_kernel ( X ) kernel_cache [ i ] = kernel_mat kernel_fns [ estimator . kernel ] = kernel_mat kernel_cache [ i ] = kernel_mat kernel_estimator = clone ( estimator ) kernel_estimator . set_params ( kernel = 'precomputed' ) base_estimators . append ( ( name , kernel_estimator ) ) else : base_estimators . append ( ( name , estimator ) ) return base_estimators , kernel_cache
Takes special care of estimators using custom kernel function
38,758
def _restore_base_estimators ( self , kernel_cache , out , X , cv ) : train_folds = { fold : train_index for fold , ( train_index , _ ) in enumerate ( cv ) } for idx , fold , _ , est in out : if idx in kernel_cache : if not hasattr ( est , 'fit_X_' ) : raise ValueError ( 'estimator %s uses a custom kernel function, ' 'but does not have the attribute `fit_X_` after training' % self . base_estimators [ idx ] [ 0 ] ) est . set_params ( kernel = self . base_estimators [ idx ] [ 1 ] . kernel ) est . fit_X_ = X [ train_folds [ fold ] ] return out
Restore custom kernel functions of estimators for predictions
38,759
def _fit_and_score_ensemble ( self , X , y , cv , ** fit_params ) : fit_params_steps = self . _split_fit_params ( fit_params ) folds = list ( cv . split ( X , y ) ) base_estimators , kernel_cache = self . _get_base_estimators ( X ) out = Parallel ( n_jobs = self . n_jobs , verbose = self . verbose ) ( delayed ( _fit_and_score_fold ) ( clone ( estimator ) , X if i not in kernel_cache else kernel_cache [ i ] , y , self . scorer , train_index , test_index , fit_params_steps [ name ] , i , fold ) for i , ( name , estimator ) in enumerate ( base_estimators ) for fold , ( train_index , test_index ) in enumerate ( folds ) ) if len ( kernel_cache ) > 0 : out = self . _restore_base_estimators ( kernel_cache , out , X , folds ) return self . _create_base_ensemble ( out , len ( base_estimators ) , len ( folds ) )
Create a cross - validated model by training a model for each fold with the same model parameters
38,760
def fit ( self , X , y = None , ** fit_params ) : self . _check_params ( ) cv = check_cv ( self . cv , X ) self . _fit ( X , y , cv , ** fit_params ) return self
Fit ensemble of models
38,761
def writearff ( data , filename , relation_name = None , index = True ) : if isinstance ( filename , str ) : fp = open ( filename , 'w' ) if relation_name is None : relation_name = os . path . basename ( filename ) else : fp = filename if relation_name is None : relation_name = "pandas" try : data = _write_header ( data , fp , relation_name , index ) fp . write ( "\n" ) _write_data ( data , fp ) finally : fp . close ( )
Write ARFF file
38,762
def _write_header ( data , fp , relation_name , index ) : fp . write ( "@relation {0}\n\n" . format ( relation_name ) ) if index : data = data . reset_index ( ) attribute_names = _sanitize_column_names ( data ) for column , series in data . iteritems ( ) : name = attribute_names [ column ] fp . write ( "@attribute {0}\t" . format ( name ) ) if is_categorical_dtype ( series ) or is_object_dtype ( series ) : _write_attribute_categorical ( series , fp ) elif numpy . issubdtype ( series . dtype , numpy . floating ) : fp . write ( "real" ) elif numpy . issubdtype ( series . dtype , numpy . integer ) : fp . write ( "integer" ) elif numpy . issubdtype ( series . dtype , numpy . datetime64 ) : fp . write ( "date 'yyyy-MM-dd HH:mm:ss'" ) else : raise TypeError ( 'unsupported type %s' % series . dtype ) fp . write ( "\n" ) return data
Write header containing attribute names and types
38,763
def _sanitize_column_names ( data ) : new_names = { } for name in data . columns : new_names [ name ] = _ILLEGAL_CHARACTER_PAT . sub ( "_" , name ) return new_names
Replace illegal characters with underscore
38,764
def _write_data ( data , fp ) : fp . write ( "@data\n" ) def to_str ( x ) : if pandas . isnull ( x ) : return '?' else : return str ( x ) data = data . applymap ( to_str ) n_rows = data . shape [ 0 ] for i in range ( n_rows ) : str_values = list ( data . iloc [ i , : ] . apply ( _check_str_array ) ) line = "," . join ( str_values ) fp . write ( line ) fp . write ( "\n" )
Write the data section
38,765
def fit ( self , X , y = None , ** fit_params ) : X = numpy . asarray ( X ) self . _fit_estimators ( X , y , ** fit_params ) Xt = self . _predict_estimators ( X ) self . meta_estimator . fit ( Xt , y ) return self
Fit base estimators .
38,766
def standardize ( table , with_std = True ) : if isinstance ( table , pandas . DataFrame ) : cat_columns = table . select_dtypes ( include = [ 'category' ] ) . columns else : cat_columns = [ ] new_frame = _apply_along_column ( table , standardize_column , with_std = with_std ) for col in cat_columns : new_frame [ col ] = table [ col ] . copy ( ) return new_frame
Perform Z - Normalization on each numeric column of the given table .
38,767
def encode_categorical ( table , columns = None , ** kwargs ) : if isinstance ( table , pandas . Series ) : if not is_categorical_dtype ( table . dtype ) and not table . dtype . char == "O" : raise TypeError ( "series must be of categorical dtype, but was {}" . format ( table . dtype ) ) return _encode_categorical_series ( table , ** kwargs ) def _is_categorical_or_object ( series ) : return is_categorical_dtype ( series . dtype ) or series . dtype . char == "O" if columns is None : columns_to_encode = { nam for nam , s in table . iteritems ( ) if _is_categorical_or_object ( s ) } else : columns_to_encode = set ( columns ) items = [ ] for name , series in table . iteritems ( ) : if name in columns_to_encode : series = _encode_categorical_series ( series , ** kwargs ) if series is None : continue items . append ( series ) new_table = pandas . concat ( items , axis = 1 , copy = False ) return new_table
Encode categorical columns with M categories into M - 1 columns according to the one - hot scheme .
38,768
def categorical_to_numeric ( table ) : def transform ( column ) : if is_categorical_dtype ( column . dtype ) : return column . cat . codes if column . dtype . char == "O" : try : nc = column . astype ( numpy . int64 ) except ValueError : classes = column . dropna ( ) . unique ( ) classes . sort ( kind = "mergesort" ) nc = column . replace ( classes , numpy . arange ( classes . shape [ 0 ] ) ) return nc elif column . dtype == bool : return column . astype ( numpy . int64 ) return column if isinstance ( table , pandas . Series ) : return pandas . Series ( transform ( table ) , name = table . name , index = table . index ) else : if _pandas_version_under0p23 : return table . apply ( transform , axis = 0 , reduce = False ) else : return table . apply ( transform , axis = 0 , result_type = 'reduce' )
Encode categorical columns to numeric by converting each category to an integer value .
38,769
def check_y_survival ( y_or_event , * args , allow_all_censored = False ) : if len ( args ) == 0 : y = y_or_event if not isinstance ( y , numpy . ndarray ) or y . dtype . fields is None or len ( y . dtype . fields ) != 2 : raise ValueError ( 'y must be a structured array with the first field' ' being a binary class event indicator and the second field' ' the time of the event/censoring' ) event_field , time_field = y . dtype . names y_event = y [ event_field ] time_args = ( y [ time_field ] , ) else : y_event = numpy . asanyarray ( y_or_event ) time_args = args event = check_array ( y_event , ensure_2d = False ) if not numpy . issubdtype ( event . dtype , numpy . bool_ ) : raise ValueError ( 'elements of event indicator must be boolean, but found {0}' . format ( event . dtype ) ) if not ( allow_all_censored or numpy . any ( event ) ) : raise ValueError ( 'all samples are censored' ) return_val = [ event ] for i , yt in enumerate ( time_args ) : if yt is None : return_val . append ( yt ) continue yt = check_array ( yt , ensure_2d = False ) if not numpy . issubdtype ( yt . dtype , numpy . number ) : raise ValueError ( 'time must be numeric, but found {} for argument {}' . format ( yt . dtype , i + 2 ) ) return_val . append ( yt ) return tuple ( return_val )
Check that array correctly represents an outcome for survival analysis .
38,770
def check_arrays_survival ( X , y , ** kwargs ) : event , time = check_y_survival ( y ) kwargs . setdefault ( "dtype" , numpy . float64 ) X = check_array ( X , ensure_min_samples = 2 , ** kwargs ) check_consistent_length ( X , event , time ) return X , event , time
Check that all arrays have consistent first dimensions .
38,771
def from_arrays ( event , time , name_event = None , name_time = None ) : name_event = name_event or 'event' name_time = name_time or 'time' if name_time == name_event : raise ValueError ( 'name_time must be different from name_event' ) time = numpy . asanyarray ( time , dtype = numpy . float_ ) y = numpy . empty ( time . shape [ 0 ] , dtype = [ ( name_event , numpy . bool_ ) , ( name_time , numpy . float_ ) ] ) y [ name_time ] = time event = numpy . asanyarray ( event ) check_consistent_length ( time , event ) if numpy . issubdtype ( event . dtype , numpy . bool_ ) : y [ name_event ] = event else : events = numpy . unique ( event ) events . sort ( ) if len ( events ) != 2 : raise ValueError ( 'event indicator must be binary' ) if numpy . all ( events == numpy . array ( [ 0 , 1 ] , dtype = events . dtype ) ) : y [ name_event ] = event . astype ( numpy . bool_ ) else : raise ValueError ( 'non-boolean event indicator must contain 0 and 1 only' ) return y
Create structured array .
38,772
def from_dataframe ( event , time , data ) : if not isinstance ( data , pandas . DataFrame ) : raise TypeError ( "exepected pandas.DataFrame, but got {!r}" . format ( type ( data ) ) ) return Surv . from_arrays ( data . loc [ : , event ] . values , data . loc [ : , time ] . values , name_event = str ( event ) , name_time = str ( time ) )
Create structured array from data frame .
38,773
def update_terminal_regions ( self , tree , X , y , residual , y_pred , sample_weight , sample_mask , learning_rate = 1.0 , k = 0 ) : y_pred [ : , k ] += learning_rate * tree . predict ( X ) . ravel ( )
Least squares does not need to update terminal regions .
38,774
def build_from_c_and_cpp_files ( extensions ) : for extension in extensions : sources = [ ] for sfile in extension . sources : path , ext = os . path . splitext ( sfile ) if ext in ( '.pyx' , '.py' ) : if extension . language == 'c++' : ext = '.cpp' else : ext = '.c' sfile = path + ext sources . append ( sfile ) extension . sources = sources
Modify the extensions to build from the . c and . cpp files . This is useful for releases this way cython is not required to run python setup . py install .
38,775
def _count_values ( self ) : indices = { yi : [ i ] for i , yi in enumerate ( self . y ) if self . status [ i ] } return indices
Return dict mapping relevance level to sample index
38,776
def _create_optimizer ( self , X , y , status ) : if self . optimizer is None : self . optimizer = 'avltree' times , ranks = y if self . optimizer == 'simple' : optimizer = SimpleOptimizer ( X , status , self . alpha , self . rank_ratio , timeit = self . timeit ) elif self . optimizer == 'PRSVM' : optimizer = PRSVMOptimizer ( X , status , self . alpha , self . rank_ratio , timeit = self . timeit ) elif self . optimizer == 'direct-count' : optimizer = LargeScaleOptimizer ( self . alpha , self . rank_ratio , self . fit_intercept , SurvivalCounter ( X , ranks , status , len ( ranks ) , times ) , timeit = self . timeit ) elif self . optimizer == 'rbtree' : optimizer = LargeScaleOptimizer ( self . alpha , self . rank_ratio , self . fit_intercept , OrderStatisticTreeSurvivalCounter ( X , ranks , status , RBTree , times ) , timeit = self . timeit ) elif self . optimizer == 'avltree' : optimizer = LargeScaleOptimizer ( self . alpha , self . rank_ratio , self . fit_intercept , OrderStatisticTreeSurvivalCounter ( X , ranks , status , AVLTree , times ) , timeit = self . timeit ) else : raise ValueError ( 'unknown optimizer: {0}' . format ( self . optimizer ) ) return optimizer
Samples are ordered by relevance
38,777
def _argsort_and_resolve_ties ( time , random_state ) : n_samples = len ( time ) order = numpy . argsort ( time , kind = "mergesort" ) i = 0 while i < n_samples - 1 : inext = i + 1 while inext < n_samples and time [ order [ i ] ] == time [ order [ inext ] ] : inext += 1 if i + 1 != inext : random_state . shuffle ( order [ i : inext ] ) i = inext return order
Like numpy . argsort but resolves ties uniformly at random
38,778
def fit ( self , X , y ) : X , event , time = check_arrays_survival ( X , y ) weights = ipc_weights ( event , time ) super ( ) . fit ( X , numpy . log ( time ) , sample_weight = weights ) return self
Build an accelerated failure time model .
38,779
def fit ( self , linear_predictor , event , time ) : risk_score = numpy . exp ( linear_predictor ) order = numpy . argsort ( time , kind = "mergesort" ) risk_score = risk_score [ order ] uniq_times , n_events , n_at_risk = _compute_counts ( event , time , order ) divisor = numpy . empty ( n_at_risk . shape , dtype = numpy . float_ ) value = numpy . sum ( risk_score ) divisor [ 0 ] = value k = 0 for i in range ( 1 , len ( n_at_risk ) ) : d = n_at_risk [ i - 1 ] - n_at_risk [ i ] value -= risk_score [ k : ( k + d ) ] . sum ( ) k += d divisor [ i ] = value assert k == n_at_risk [ 0 ] - n_at_risk [ - 1 ] y = numpy . cumsum ( n_events / divisor ) self . cum_baseline_hazard_ = StepFunction ( uniq_times , y ) self . baseline_survival_ = StepFunction ( self . cum_baseline_hazard_ . x , numpy . exp ( - self . cum_baseline_hazard_ . y ) ) return self
Compute baseline cumulative hazard function .
38,780
def nlog_likelihood ( self , w ) : time = self . time n_samples = self . x . shape [ 0 ] xw = numpy . dot ( self . x , w ) loss = 0 risk_set = 0 k = 0 for i in range ( n_samples ) : ti = time [ i ] while k < n_samples and ti == time [ k ] : risk_set += numpy . exp ( xw [ k ] ) k += 1 if self . event [ i ] : loss -= ( xw [ i ] - numpy . log ( risk_set ) ) / n_samples return loss + self . alpha * squared_norm ( w ) / ( 2. * n_samples )
Compute negative partial log - likelihood
38,781
def update ( self , w , offset = 0 ) : time = self . time x = self . x exp_xw = numpy . exp ( offset + numpy . dot ( x , w ) ) n_samples , n_features = x . shape gradient = numpy . zeros ( ( 1 , n_features ) , dtype = float ) hessian = numpy . zeros ( ( n_features , n_features ) , dtype = float ) inv_n_samples = 1. / n_samples risk_set = 0 risk_set_x = 0 risk_set_xx = 0 k = 0 for i in range ( n_samples ) : ti = time [ i ] while k < n_samples and ti == time [ k ] : risk_set += exp_xw [ k ] xk = x [ k : k + 1 ] risk_set_x += exp_xw [ k ] * xk xx = numpy . dot ( xk . T , xk ) risk_set_xx += exp_xw [ k ] * xx k += 1 if self . event [ i ] : gradient -= ( x [ i : i + 1 ] - risk_set_x / risk_set ) * inv_n_samples a = risk_set_xx / risk_set z = risk_set_x / risk_set b = numpy . dot ( z . T , z ) hessian += ( a - b ) * inv_n_samples if self . alpha > 0 : gradient += self . alpha * inv_n_samples * w diag_idx = numpy . diag_indices ( n_features ) hessian [ diag_idx ] += self . alpha * inv_n_samples self . gradient = gradient . ravel ( ) self . hessian = hessian
Compute gradient and Hessian matrix with respect to w .
38,782
def fit ( self , X , y ) : X , event , time = check_arrays_survival ( X , y ) if self . alpha < 0 : raise ValueError ( "alpha must be positive, but was %r" % self . alpha ) optimizer = CoxPHOptimizer ( X , event , time , self . alpha ) verbose_reporter = VerboseReporter ( self . verbose ) w = numpy . zeros ( X . shape [ 1 ] ) w_prev = w i = 0 loss = float ( 'inf' ) while True : if i >= self . n_iter : verbose_reporter . end_max_iter ( i ) warnings . warn ( ( 'Optimization did not converge: Maximum number of iterations has been exceeded.' ) , stacklevel = 2 , category = ConvergenceWarning ) break optimizer . update ( w ) delta = solve ( optimizer . hessian , optimizer . gradient , overwrite_a = False , overwrite_b = False , check_finite = False ) if not numpy . all ( numpy . isfinite ( delta ) ) : raise ValueError ( "search direction contains NaN or infinite values" ) w_new = w - delta loss_new = optimizer . nlog_likelihood ( w_new ) verbose_reporter . update ( i , delta , loss_new ) if loss_new > loss : w = ( w_prev + w ) / 2 loss = optimizer . nlog_likelihood ( w ) verbose_reporter . step_halving ( i , loss ) i += 1 continue w_prev = w w = w_new res = numpy . abs ( 1 - ( loss_new / loss ) ) if res < self . tol : verbose_reporter . end_converged ( i ) break loss = loss_new i += 1 self . coef_ = w self . _baseline_model . fit ( numpy . dot ( X , self . coef_ ) , event , time ) return self
Minimize negative partial log - likelihood for provided data .
38,783
def _compute_counts ( event , time , order = None ) : n_samples = event . shape [ 0 ] if order is None : order = numpy . argsort ( time , kind = "mergesort" ) uniq_times = numpy . empty ( n_samples , dtype = time . dtype ) uniq_events = numpy . empty ( n_samples , dtype = numpy . int_ ) uniq_counts = numpy . empty ( n_samples , dtype = numpy . int_ ) i = 0 prev_val = time [ order [ 0 ] ] j = 0 while True : count_event = 0 count = 0 while i < n_samples and prev_val == time [ order [ i ] ] : if event [ order [ i ] ] : count_event += 1 count += 1 i += 1 uniq_times [ j ] = prev_val uniq_events [ j ] = count_event uniq_counts [ j ] = count j += 1 if i == n_samples : break prev_val = time [ order [ i ] ] times = numpy . resize ( uniq_times , j ) n_events = numpy . resize ( uniq_events , j ) total_count = numpy . resize ( uniq_counts , j ) total_count = numpy . concatenate ( ( [ 0 ] , total_count ) ) n_at_risk = n_samples - numpy . cumsum ( total_count ) return times , n_events , n_at_risk [ : - 1 ]
Count right censored and uncensored samples at each unique time point .
38,784
def _compute_counts_truncated ( event , time_enter , time_exit ) : if ( time_enter > time_exit ) . any ( ) : raise ValueError ( "exit time must be larger start time for all samples" ) n_samples = event . shape [ 0 ] uniq_times = numpy . sort ( numpy . unique ( numpy . concatenate ( ( time_enter , time_exit ) ) ) , kind = "mergesort" ) total_counts = numpy . empty ( len ( uniq_times ) , dtype = numpy . int_ ) event_counts = numpy . empty ( len ( uniq_times ) , dtype = numpy . int_ ) order_enter = numpy . argsort ( time_enter , kind = "mergesort" ) order_exit = numpy . argsort ( time_exit , kind = "mergesort" ) s_time_enter = time_enter [ order_enter ] s_time_exit = time_exit [ order_exit ] t0 = uniq_times [ 0 ] idx_enter = numpy . searchsorted ( s_time_enter , t0 , side = "right" ) idx_exit = numpy . searchsorted ( s_time_exit , t0 , side = "left" ) total_counts [ 0 ] = idx_enter event_counts [ 0 ] = 0 for i in range ( 1 , len ( uniq_times ) ) : ti = uniq_times [ i ] while idx_enter < n_samples and s_time_enter [ idx_enter ] <= ti : idx_enter += 1 while idx_exit < n_samples and s_time_exit [ idx_exit ] < ti : idx_exit += 1 risk_set = numpy . setdiff1d ( order_enter [ : idx_enter ] , order_exit [ : idx_exit ] , assume_unique = True ) total_counts [ i ] = len ( risk_set ) count_event = 0 k = idx_exit while k < n_samples and s_time_exit [ k ] == ti : if event [ order_exit [ k ] ] : count_event += 1 k += 1 event_counts [ i ] = count_event return uniq_times , event_counts , total_counts
Compute counts for left truncated and right censored survival data .
38,785
def kaplan_meier_estimator ( event , time_exit , time_enter = None , time_min = None ) : event , time_enter , time_exit = check_y_survival ( event , time_enter , time_exit , allow_all_censored = True ) check_consistent_length ( event , time_enter , time_exit ) if time_enter is None : uniq_times , n_events , n_at_risk = _compute_counts ( event , time_exit ) else : uniq_times , n_events , n_at_risk = _compute_counts_truncated ( event , time_enter , time_exit ) values = 1 - n_events / n_at_risk if time_min is not None : mask = uniq_times >= time_min uniq_times = numpy . compress ( mask , uniq_times ) values = numpy . compress ( mask , values ) y = numpy . cumprod ( values ) return uniq_times , y
Kaplan - Meier estimator of survival function .
38,786
def nelson_aalen_estimator ( event , time ) : event , time = check_y_survival ( event , time ) check_consistent_length ( event , time ) uniq_times , n_events , n_at_risk = _compute_counts ( event , time ) y = numpy . cumsum ( n_events / n_at_risk ) return uniq_times , y
Nelson - Aalen estimator of cumulative hazard function .
38,787
def ipc_weights ( event , time ) : if event . all ( ) : return numpy . ones ( time . shape [ 0 ] ) unique_time , p = kaplan_meier_estimator ( ~ event , time ) idx = numpy . searchsorted ( unique_time , time [ event ] ) Ghat = p [ idx ] assert ( Ghat > 0 ) . all ( ) weights = numpy . zeros ( time . shape [ 0 ] ) weights [ event ] = 1.0 / Ghat return weights
Compute inverse probability of censoring weights
38,788
def fit ( self , y ) : event , time = check_y_survival ( y , allow_all_censored = True ) unique_time , prob = kaplan_meier_estimator ( event , time ) self . unique_time_ = numpy . concatenate ( ( [ - numpy . infty ] , unique_time ) ) self . prob_ = numpy . concatenate ( ( [ 1. ] , prob ) ) return self
Estimate survival distribution from training data .
38,789
def predict_proba ( self , time ) : check_is_fitted ( self , "unique_time_" ) time = check_array ( time , ensure_2d = False ) extends = time > self . unique_time_ [ - 1 ] if self . prob_ [ - 1 ] > 0 and extends . any ( ) : raise ValueError ( "time must be smaller than largest " "observed time point: {}" . format ( self . unique_time_ [ - 1 ] ) ) Shat = numpy . empty ( time . shape , dtype = float ) Shat [ extends ] = 0.0 valid = ~ extends time = time [ valid ] idx = numpy . searchsorted ( self . unique_time_ , time ) eps = numpy . finfo ( self . unique_time_ . dtype ) . eps exact = numpy . absolute ( self . unique_time_ [ idx ] - time ) < eps idx [ ~ exact ] -= 1 Shat [ valid ] = self . prob_ [ idx ] return Shat
Return probability of an event after given time point .
38,790
def fit ( self , y ) : event , time = check_y_survival ( y ) if event . all ( ) : self . unique_time_ = numpy . unique ( time ) self . prob_ = numpy . ones ( self . unique_time_ . shape [ 0 ] ) else : unique_time , prob = kaplan_meier_estimator ( ~ event , time ) self . unique_time_ = numpy . concatenate ( ( [ - numpy . infty ] , unique_time ) ) self . prob_ = numpy . concatenate ( ( [ 1. ] , prob ) ) return self
Estimate censoring distribution from training data .
38,791
def predict_ipcw ( self , y ) : event , time = check_y_survival ( y ) Ghat = self . predict_proba ( time [ event ] ) if ( Ghat == 0.0 ) . any ( ) : raise ValueError ( "censoring survival function is zero at one or more time points" ) weights = numpy . zeros ( time . shape [ 0 ] ) weights [ event ] = 1.0 / Ghat return weights
Return inverse probability of censoring weights at given time points .
38,792
def concordance_index_censored ( event_indicator , event_time , estimate , tied_tol = 1e-8 ) : event_indicator , event_time , estimate = _check_inputs ( event_indicator , event_time , estimate ) w = numpy . ones_like ( estimate ) return _estimate_concordance_index ( event_indicator , event_time , estimate , w , tied_tol )
Concordance index for right - censored data
38,793
def concordance_index_ipcw ( survival_train , survival_test , estimate , tau = None , tied_tol = 1e-8 ) : test_event , test_time = check_y_survival ( survival_test ) if tau is not None : survival_test = survival_test [ test_time < tau ] estimate = check_array ( estimate , ensure_2d = False ) check_consistent_length ( test_event , test_time , estimate ) cens = CensoringDistributionEstimator ( ) cens . fit ( survival_train ) ipcw = cens . predict_ipcw ( survival_test ) w = numpy . square ( ipcw ) return _estimate_concordance_index ( test_event , test_time , estimate , w , tied_tol )
Concordance index for right - censored data based on inverse probability of censoring weights .
38,794
def _nominal_kernel ( x , y , out ) : for i in range ( x . shape [ 0 ] ) : for j in range ( y . shape [ 0 ] ) : out [ i , j ] += ( x [ i , : ] == y [ j , : ] ) . sum ( ) return out
Number of features that match exactly
38,795
def _get_continuous_and_ordinal_array ( x ) : nominal_columns = x . select_dtypes ( include = [ 'object' , 'category' ] ) . columns ordinal_columns = pandas . Index ( [ v for v in nominal_columns if x [ v ] . cat . ordered ] ) continuous_columns = x . select_dtypes ( include = [ numpy . number ] ) . columns x_num = x . loc [ : , continuous_columns ] . astype ( numpy . float64 ) . values if len ( ordinal_columns ) > 0 : x = _ordinal_as_numeric ( x , ordinal_columns ) nominal_columns = nominal_columns . difference ( ordinal_columns ) x_out = numpy . column_stack ( ( x_num , x ) ) else : x_out = x_num return x_out , nominal_columns
Convert array from continuous and ordered categorical columns
38,796
def clinical_kernel ( x , y = None ) : if y is not None : if x . shape [ 1 ] != y . shape [ 1 ] : raise ValueError ( 'x and y have different number of features' ) if not x . columns . equals ( y . columns ) : raise ValueError ( 'columns do not match' ) else : y = x mat = numpy . zeros ( ( x . shape [ 0 ] , y . shape [ 0 ] ) , dtype = float ) x_numeric , nominal_columns = _get_continuous_and_ordinal_array ( x ) if id ( x ) != id ( y ) : y_numeric , _ = _get_continuous_and_ordinal_array ( y ) else : y_numeric = x_numeric continuous_ordinal_kernel ( x_numeric , y_numeric , mat ) _nominal_kernel ( x . loc [ : , nominal_columns ] . values , y . loc [ : , nominal_columns ] . values , mat ) mat /= x . shape [ 1 ] return mat
Computes clinical kernel
38,797
def _prepare_by_column_dtype ( self , X ) : if not isinstance ( X , pandas . DataFrame ) : raise TypeError ( 'X must be a pandas DataFrame' ) numeric_columns = [ ] nominal_columns = [ ] numeric_ranges = [ ] fit_data = numpy . empty_like ( X ) for i , dt in enumerate ( X . dtypes ) : col = X . iloc [ : , i ] if is_categorical_dtype ( dt ) : if col . cat . ordered : numeric_ranges . append ( col . cat . codes . max ( ) - col . cat . codes . min ( ) ) numeric_columns . append ( i ) else : nominal_columns . append ( i ) col = col . cat . codes elif is_numeric_dtype ( dt ) : numeric_ranges . append ( col . max ( ) - col . min ( ) ) numeric_columns . append ( i ) else : raise TypeError ( 'unsupported dtype: %r' % dt ) fit_data [ : , i ] = col . values self . _numeric_columns = numpy . asarray ( numeric_columns ) self . _nominal_columns = numpy . asarray ( nominal_columns ) self . _numeric_ranges = numpy . asarray ( numeric_ranges , dtype = float ) self . X_fit_ = fit_data
Get distance functions for each column s dtype
38,798
def fit ( self , X , y = None , ** kwargs ) : if X . ndim != 2 : raise ValueError ( "expected 2d array, but got %d" % X . ndim ) if self . fit_once : self . X_fit_ = X else : self . _prepare_by_column_dtype ( X ) return self
Determine transformation parameters from data in X .
38,799
def transform ( self , Y ) : r check_is_fitted ( self , 'X_fit_' ) n_samples_x , n_features = self . X_fit_ . shape Y = numpy . asarray ( Y ) if Y . shape [ 1 ] != n_features : raise ValueError ( 'expected array with %d features, but got %d' % ( n_features , Y . shape [ 1 ] ) ) n_samples_y = Y . shape [ 0 ] mat = numpy . zeros ( ( n_samples_y , n_samples_x ) , dtype = float ) continuous_ordinal_kernel_with_ranges ( Y [ : , self . _numeric_columns ] . astype ( numpy . float64 ) , self . X_fit_ [ : , self . _numeric_columns ] . astype ( numpy . float64 ) , self . _numeric_ranges , mat ) if len ( self . _nominal_columns ) > 0 : _nominal_kernel ( Y [ : , self . _nominal_columns ] , self . X_fit_ [ : , self . _nominal_columns ] , mat ) mat /= n_features return mat
r Compute all pairwise distances between self . X_fit_ and Y .