INSTRUCTION
stringlengths 1
46.3k
| RESPONSE
stringlengths 75
80.2k
|
|---|---|
Starts the task thread.
|
def start(self):
"""
Starts the task thread.
"""
self._lock.acquire()
try:
if not self.is_alive():
self._thread = threading.Thread(target=self._target, name="raven.AsyncWorker")
self._thread.setDaemon(True)
self._thread.start()
self._thread_for_pid = os.getpid()
finally:
self._lock.release()
atexit.register(self.main_thread_terminated)
|
Primary function which handles recursively transforming
values via their serializers
|
def transform(self, value, **kwargs):
"""
Primary function which handles recursively transforming
values via their serializers
"""
if value is None:
return None
objid = id(value)
if objid in self.context:
return '<...>'
self.context.add(objid)
try:
for serializer in self.serializers:
try:
if serializer.can(value):
return serializer.serialize(value, **kwargs)
except Exception as e:
logger.exception(e)
return text_type(type(value))
# if all else fails, lets use the repr of the object
try:
return repr(value)
except Exception as e:
logger.exception(e)
# It's common case that a model's __unicode__ definition
# may try to query the database which if it was not
# cleaned up correctly, would hit a transaction aborted
# exception
return text_type(type(value))
finally:
self.context.remove(objid)
|
Takes the same arguments as the super function in :py:class:`Client`
and extracts the keyword argument callback which will be called on
asynchronous sending of the request
:return: a 32-length string identifying this event
|
def capture(self, *args, **kwargs):
"""
Takes the same arguments as the super function in :py:class:`Client`
and extracts the keyword argument callback which will be called on
asynchronous sending of the request
:return: a 32-length string identifying this event
"""
if not self.is_enabled():
return
data = self.build_msg(*args, **kwargs)
future = self.send(callback=kwargs.get('callback', None), **data)
return (data['event_id'], future)
|
Serializes the message and passes the payload onto ``send_encoded``.
|
def send(self, auth_header=None, callback=None, **data):
"""
Serializes the message and passes the payload onto ``send_encoded``.
"""
message = self.encode(data)
return self.send_encoded(message, auth_header=auth_header, callback=callback)
|
Initialise a Tornado AsyncClient and send the request to the sentry
server. If the callback is a callable, it will be called with the
response.
|
def _send_remote(self, url, data, headers=None, callback=None):
"""
Initialise a Tornado AsyncClient and send the request to the sentry
server. If the callback is a callable, it will be called with the
response.
"""
if headers is None:
headers = {}
return AsyncHTTPClient().fetch(
url, callback, method="POST", body=data, headers=headers,
validate_cert=self.validate_cert
)
|
Extracts the data required for 'sentry.interfaces.Http' from the
current request being handled by the request handler
:param return: A dictionary.
|
def get_sentry_data_from_request(self):
"""
Extracts the data required for 'sentry.interfaces.Http' from the
current request being handled by the request handler
:param return: A dictionary.
"""
return {
'request': {
'url': self.request.full_url(),
'method': self.request.method,
'data': self.request.body,
'query_string': self.request.query,
'cookies': self.request.headers.get('Cookie', None),
'headers': dict(self.request.headers),
}
}
|
Override implementation to report all exceptions to sentry.
log_exception() is added in Tornado v3.1.
|
def log_exception(self, typ, value, tb):
"""Override implementation to report all exceptions to sentry.
log_exception() is added in Tornado v3.1.
"""
rv = super(SentryMixin, self).log_exception(typ, value, tb)
# Do not capture tornado.web.HTTPErrors outside the 500 range.
if isinstance(value, WebHTTPError) and (value.status_code < 500 or value.status_code > 599):
return rv
self.captureException(exc_info=(typ, value, tb))
return rv
|
Override implementation to report all exceptions to sentry, even
after self.flush() or self.finish() is called, for pre-v3.1 Tornado.
|
def send_error(self, status_code=500, **kwargs):
"""Override implementation to report all exceptions to sentry, even
after self.flush() or self.finish() is called, for pre-v3.1 Tornado.
"""
if hasattr(super(SentryMixin, self), 'log_exception'):
return super(SentryMixin, self).send_error(status_code, **kwargs)
else:
rv = super(SentryMixin, self).send_error(status_code, **kwargs)
if 500 <= status_code <= 599:
self.captureException(exc_info=kwargs.get('exc_info'))
return rv
|
Given ``value``, recurse (using the parent serializer) to handle
coercing of newly defined values.
|
def recurse(self, value, max_depth=6, _depth=0, **kwargs):
"""
Given ``value``, recurse (using the parent serializer) to handle
coercing of newly defined values.
"""
string_max_length = kwargs.get('string_max_length', None)
_depth += 1
if _depth >= max_depth:
try:
value = text_type(repr(value))[:string_max_length]
except Exception as e:
import traceback
traceback.print_exc()
self.manager.logger.exception(e)
return text_type(type(value))
return self.manager.transform(value, max_depth=max_depth,
_depth=_depth, **kwargs)
|
Returns a public DSN which is consumable by raven-js
>>> # Return scheme-less DSN
>>> print client.get_public_dsn()
>>> # Specify a scheme to use (http or https)
>>> print client.get_public_dsn('https')
|
def get_public_dsn(self, scheme=None):
"""
Returns a public DSN which is consumable by raven-js
>>> # Return scheme-less DSN
>>> print client.get_public_dsn()
>>> # Specify a scheme to use (http or https)
>>> print client.get_public_dsn('https')
"""
if self.is_enabled():
url = self.remote.get_public_dsn()
if scheme:
return '%s:%s' % (scheme, url)
return url
|
Captures, processes and serializes an event into a dict object
The result of ``build_msg`` should be a standardized dict, with
all default values available.
|
def build_msg(self, event_type, data=None, date=None,
time_spent=None, extra=None, stack=None, public_key=None,
tags=None, fingerprint=None, **kwargs):
"""
Captures, processes and serializes an event into a dict object
The result of ``build_msg`` should be a standardized dict, with
all default values available.
"""
# create ID client-side so that it can be passed to application
event_id = uuid.uuid4().hex
data = merge_dicts(self.context.data, data)
data.setdefault('tags', {})
data.setdefault('extra', {})
if '.' not in event_type:
# Assume it's a builtin
event_type = 'raven.events.%s' % event_type
handler = self.get_handler(event_type)
result = handler.capture(**kwargs)
# data (explicit) culprit takes over auto event detection
culprit = result.pop('culprit', None)
if data.get('culprit'):
culprit = data['culprit']
for k, v in iteritems(result):
if k not in data:
data[k] = v
# auto_log_stacks only applies to events that are not exceptions
# due to confusion about which stack is which and the automatic
# application of stacktrace to exception objects by Sentry
if stack is None and 'exception' not in data:
stack = self.auto_log_stacks
if stack and 'stacktrace' not in data:
if stack is True:
frames = iter_stack_frames()
else:
frames = stack
stack_info = get_stack_info(
frames,
transformer=self.transform,
capture_locals=self.capture_locals,
)
data.update({
'stacktrace': stack_info,
})
if self.include_paths:
for frame in self._iter_frames(data):
if frame.get('in_app') is not None:
continue
path = frame.get('module')
if not path:
continue
if path.startswith('raven.'):
frame['in_app'] = False
else:
frame['in_app'] = (
any(path.startswith(x) for x in self.include_paths)
and not any(path.startswith(x) for x in self.exclude_paths)
)
transaction = None
if not culprit:
transaction = self.transaction.peek()
if not data.get('level'):
data['level'] = kwargs.get('level') or logging.ERROR
if not data.get('server_name'):
data['server_name'] = self.name
if not data.get('modules'):
data['modules'] = self.get_module_versions()
if self.release is not None:
data['release'] = self.release
if self.environment is not None:
data['environment'] = self.environment
data['tags'] = merge_dicts(self.tags, data['tags'], tags)
data['extra'] = merge_dicts(self.extra, data['extra'], extra)
# Legacy support for site attribute
site = data.pop('site', None) or self.site
if site:
data['tags'].setdefault('site', site)
if transaction:
data['transaction'] = transaction
elif culprit:
data['culprit'] = culprit
if fingerprint:
data['fingerprint'] = fingerprint
# Run the data through processors
for processor in self.get_processors():
data.update(processor.process(data))
if 'message' not in data:
data['message'] = kwargs.get('message', handler.to_string(data))
# tags should only be key=>u'value'
for key, value in iteritems(data['tags']):
data['tags'][key] = to_unicode(value)
# extra data can be any arbitrary value
for k, v in iteritems(data['extra']):
data['extra'][k] = self.transform(v)
# It's important date is added **after** we serialize
data.setdefault('project', self.remote.project)
data.setdefault('timestamp', date or datetime.utcnow())
data.setdefault('time_spent', time_spent)
data.setdefault('event_id', event_id)
data.setdefault('platform', PLATFORM_NAME)
data.setdefault('sdk', SDK_VALUE)
data.setdefault('repos', self.repos)
# insert breadcrumbs
if self.enable_breadcrumbs:
crumbs = self.context.breadcrumbs.get_buffer()
if crumbs:
# Make sure we send the crumbs here as "values" as we use the
# raven client internally in sentry and the alternative
# submission option of a list here is not supported by the
# internal sender.
data.setdefault('breadcrumbs', {
'values': crumbs
})
return data
|
Captures and processes an event and pipes it off to SentryClient.send.
To use structured data (interfaces) with capture:
>>> capture('raven.events.Message', message='foo', data={
>>> 'request': {
>>> 'url': '...',
>>> 'data': {},
>>> 'query_string': '...',
>>> 'method': 'POST',
>>> },
>>> 'logger': 'logger.name',
>>> }, extra={
>>> 'key': 'value',
>>> })
The finalized ``data`` structure contains the following (some optional)
builtin values:
>>> {
>>> # the culprit and version information
>>> 'culprit': 'full.module.name', # or /arbitrary/path
>>>
>>> # all detectable installed modules
>>> 'modules': {
>>> 'full.module.name': 'version string',
>>> },
>>>
>>> # arbitrary data provided by user
>>> 'extra': {
>>> 'key': 'value',
>>> }
>>> }
:param event_type: the module path to the Event class. Builtins can use
shorthand class notation and exclude the full module
path.
:param data: the data base, useful for specifying structured data
interfaces. Any key which contains a '.' will be
assumed to be a data interface.
:param date: the datetime of this event
:param time_spent: a integer value representing the duration of the
event (in milliseconds)
:param extra: a dictionary of additional standard metadata
:param stack: a stacktrace for the event
:param tags: dict of extra tags
:param sample_rate: a float in the range [0, 1] to sample this message
:return: a 32-length string identifying this event
|
def capture(self, event_type, data=None, date=None, time_spent=None,
extra=None, stack=None, tags=None, sample_rate=None,
**kwargs):
"""
Captures and processes an event and pipes it off to SentryClient.send.
To use structured data (interfaces) with capture:
>>> capture('raven.events.Message', message='foo', data={
>>> 'request': {
>>> 'url': '...',
>>> 'data': {},
>>> 'query_string': '...',
>>> 'method': 'POST',
>>> },
>>> 'logger': 'logger.name',
>>> }, extra={
>>> 'key': 'value',
>>> })
The finalized ``data`` structure contains the following (some optional)
builtin values:
>>> {
>>> # the culprit and version information
>>> 'culprit': 'full.module.name', # or /arbitrary/path
>>>
>>> # all detectable installed modules
>>> 'modules': {
>>> 'full.module.name': 'version string',
>>> },
>>>
>>> # arbitrary data provided by user
>>> 'extra': {
>>> 'key': 'value',
>>> }
>>> }
:param event_type: the module path to the Event class. Builtins can use
shorthand class notation and exclude the full module
path.
:param data: the data base, useful for specifying structured data
interfaces. Any key which contains a '.' will be
assumed to be a data interface.
:param date: the datetime of this event
:param time_spent: a integer value representing the duration of the
event (in milliseconds)
:param extra: a dictionary of additional standard metadata
:param stack: a stacktrace for the event
:param tags: dict of extra tags
:param sample_rate: a float in the range [0, 1] to sample this message
:return: a 32-length string identifying this event
"""
if not self.is_enabled():
return
exc_info = kwargs.get('exc_info')
if exc_info is not None:
if self.skip_error_for_logging(exc_info):
return
elif not self.should_capture(exc_info):
self.logger.info(
'Not capturing exception due to filters: %s', exc_info[0],
exc_info=sys.exc_info())
return
self.record_exception_seen(exc_info)
data = self.build_msg(
event_type, data, date, time_spent, extra, stack, tags=tags,
**kwargs)
# should this event be sampled?
if sample_rate is None:
sample_rate = self.sample_rate
if self._random.random() < sample_rate:
self.send(**data)
self._local_state.last_event_id = data['event_id']
return data['event_id']
|
Log a reasonable representation of an event that should have been sent
to Sentry
|
def _log_failed_submission(self, data):
"""
Log a reasonable representation of an event that should have been sent
to Sentry
"""
message = data.pop('message', '<no message value>')
output = [message]
if 'exception' in data and 'stacktrace' in data['exception']['values'][-1]:
# try to reconstruct a reasonable version of the exception
for frame in data['exception']['values'][-1]['stacktrace'].get('frames', []):
output.append(' File "%(fn)s", line %(lineno)s, in %(func)s' % {
'fn': frame.get('filename', 'unknown_filename'),
'lineno': frame.get('lineno', -1),
'func': frame.get('function', 'unknown_function'),
})
self.uncaught_logger.error(output)
|
Given an already serialized message, signs the message and passes the
payload off to ``send_remote``.
|
def send_encoded(self, message, auth_header=None, **kwargs):
"""
Given an already serialized message, signs the message and passes the
payload off to ``send_remote``.
"""
client_string = 'raven-python/%s' % (raven.VERSION,)
if not auth_header:
timestamp = time.time()
auth_header = get_auth_header(
protocol=self.protocol_version,
timestamp=timestamp,
client=client_string,
api_key=self.remote.public_key,
api_secret=self.remote.secret_key,
)
headers = {
'User-Agent': client_string,
'X-Sentry-Auth': auth_header,
'Content-Encoding': self.get_content_encoding(),
'Content-Type': 'application/octet-stream',
}
return self.send_remote(
url=self.remote.store_endpoint,
data=message,
headers=headers,
**kwargs
)
|
Creates an event from an exception.
>>> try:
>>> exc_info = sys.exc_info()
>>> client.captureException(exc_info)
>>> finally:
>>> del exc_info
If exc_info is not provided, or is set to True, then this method will
perform the ``exc_info = sys.exc_info()`` and the requisite clean-up
for you.
``kwargs`` are passed through to ``.capture``.
|
def captureException(self, exc_info=None, **kwargs):
"""
Creates an event from an exception.
>>> try:
>>> exc_info = sys.exc_info()
>>> client.captureException(exc_info)
>>> finally:
>>> del exc_info
If exc_info is not provided, or is set to True, then this method will
perform the ``exc_info = sys.exc_info()`` and the requisite clean-up
for you.
``kwargs`` are passed through to ``.capture``.
"""
if exc_info is None or exc_info is True:
exc_info = sys.exc_info()
return self.capture(
'raven.events.Exception', exc_info=exc_info, **kwargs)
|
Wrap a function or code block in try/except and automatically call
``.captureException`` if it raises an exception, then the exception
is reraised.
By default, it will capture ``Exception``
>>> @client.capture_exceptions
>>> def foo():
>>> raise Exception()
>>> with client.capture_exceptions():
>>> raise Exception()
You can also specify exceptions to be caught specifically
>>> @client.capture_exceptions((IOError, LookupError))
>>> def bar():
>>> ...
>>> with client.capture_exceptions((IOError, LookupError)):
>>> ...
``kwargs`` are passed through to ``.captureException``.
|
def capture_exceptions(self, function_or_exceptions=None, **kwargs):
"""
Wrap a function or code block in try/except and automatically call
``.captureException`` if it raises an exception, then the exception
is reraised.
By default, it will capture ``Exception``
>>> @client.capture_exceptions
>>> def foo():
>>> raise Exception()
>>> with client.capture_exceptions():
>>> raise Exception()
You can also specify exceptions to be caught specifically
>>> @client.capture_exceptions((IOError, LookupError))
>>> def bar():
>>> ...
>>> with client.capture_exceptions((IOError, LookupError)):
>>> ...
``kwargs`` are passed through to ``.captureException``.
"""
function = None
exceptions = (Exception,)
if isinstance(function_or_exceptions, FunctionType):
function = function_or_exceptions
elif function_or_exceptions is not None:
exceptions = function_or_exceptions
# In python3.2 contextmanager acts both as contextmanager and decorator
@contextlib.contextmanager
def make_decorator(exceptions):
try:
yield
except exceptions:
self.captureException(**kwargs)
raise
decorator = make_decorator(exceptions)
if function:
return decorator(function)
return decorator
|
Creates an event for a SQL query.
>>> client.captureQuery('SELECT * FROM foo')
|
def captureQuery(self, query, params=(), engine=None, **kwargs):
"""
Creates an event for a SQL query.
>>> client.captureQuery('SELECT * FROM foo')
"""
return self.capture(
'raven.events.Query', query=query, params=params, engine=engine,
**kwargs)
|
Records a breadcrumb with the current context. They will be
sent with the next event.
|
def captureBreadcrumb(self, *args, **kwargs):
"""
Records a breadcrumb with the current context. They will be
sent with the next event.
"""
# Note: framework integration should not call this method but
# instead use the raven.breadcrumbs.record_breadcrumb function
# which will record to the correct client automatically.
self.context.breadcrumbs.record(*args, **kwargs)
|
It is possible to inject new schemes at runtime
|
def register_scheme(self, scheme, cls):
"""
It is possible to inject new schemes at runtime
"""
if scheme in self._schemes:
raise DuplicateScheme()
urlparse.register_scheme(scheme)
# TODO (vng): verify the interface of the new class
self._schemes[scheme] = cls
|
Requires Flask-Login (https://pypi.python.org/pypi/Flask-Login/)
to be installed and setup.
|
def get_user_info(self, request):
"""
Requires Flask-Login (https://pypi.python.org/pypi/Flask-Login/)
to be installed and setup.
"""
user_info = {}
try:
ip_address = request.access_route[0]
except IndexError:
ip_address = request.remote_addr
if ip_address:
user_info['ip_address'] = ip_address
if not has_flask_login:
return user_info
if not hasattr(current_app, 'login_manager'):
return user_info
try:
is_authenticated = current_user.is_authenticated
except AttributeError:
# HACK: catch the attribute error thrown by flask-login is not attached
# > current_user = LocalProxy(lambda: _request_ctx_stack.top.user)
# E AttributeError: 'RequestContext' object has no attribute 'user'
return user_info
if callable(is_authenticated):
is_authenticated = is_authenticated()
if not is_authenticated:
return user_info
user_info['id'] = current_user.get_id()
if 'SENTRY_USER_ATTRS' in current_app.config:
for attr in current_app.config['SENTRY_USER_ATTRS']:
if hasattr(current_user, attr):
user_info[attr] = getattr(current_user, attr)
return user_info
|
Determine how to retrieve actual data by using request.mimetype.
|
def get_http_info(self, request):
"""
Determine how to retrieve actual data by using request.mimetype.
"""
if self.is_json_type(request.mimetype):
retriever = self.get_json_data
else:
retriever = self.get_form_data
return self.get_http_info_with_retriever(request, retriever)
|
Exact method for getting http_info but with form data work around.
|
def get_http_info_with_retriever(self, request, retriever=None):
"""
Exact method for getting http_info but with form data work around.
"""
if retriever is None:
retriever = self.get_form_data
urlparts = urlparse.urlsplit(request.url)
try:
data = retriever(request)
except ClientDisconnected:
data = {}
return {
'url': '%s://%s%s' % (urlparts.scheme, urlparts.netloc, urlparts.path),
'query_string': urlparts.query,
'method': request.method,
'data': data,
'headers': dict(get_headers(request.environ)),
'env': dict(get_environ(request.environ)),
}
|
Configures logging to pipe to Sentry.
- ``exclude`` is a list of loggers that shouldn't go to Sentry.
For a typical Python install:
>>> from raven.handlers.logging import SentryHandler
>>> client = Sentry(...)
>>> setup_logging(SentryHandler(client))
Within Django:
>>> from raven.contrib.django.handlers import SentryHandler
>>> setup_logging(SentryHandler())
Returns a boolean based on if logging was configured or not.
|
def setup_logging(handler, exclude=EXCLUDE_LOGGER_DEFAULTS):
"""
Configures logging to pipe to Sentry.
- ``exclude`` is a list of loggers that shouldn't go to Sentry.
For a typical Python install:
>>> from raven.handlers.logging import SentryHandler
>>> client = Sentry(...)
>>> setup_logging(SentryHandler(client))
Within Django:
>>> from raven.contrib.django.handlers import SentryHandler
>>> setup_logging(SentryHandler())
Returns a boolean based on if logging was configured or not.
"""
logger = logging.getLogger()
if handler.__class__ in map(type, logger.handlers):
return False
logger.addHandler(handler)
# Add StreamHandler to sentry's default so you can catch missed exceptions
for logger_name in exclude:
logger = logging.getLogger(logger_name)
logger.propagate = False
logger.addHandler(logging.StreamHandler())
return True
|
Spawn an async request to a remote webserver.
|
def send(self, url, data, headers):
"""
Spawn an async request to a remote webserver.
"""
eventlet.spawn(self._send_payload, (url, data, headers))
|
Exact method for getting http_info but with form data work around.
|
def get_http_info_with_retriever(self, request, retriever):
"""
Exact method for getting http_info but with form data work around.
"""
urlparts = urlparse.urlsplit(request.url)
try:
data = retriever(request)
except Exception:
data = {}
return {
'url': '{0}://{1}{2}'.format(
urlparts.scheme, urlparts.netloc, urlparts.path),
'query_string': urlparts.query,
'method': request.method,
'data': data,
'cookies': request.cookies,
'headers': request.headers,
'env': {
'REMOTE_ADDR': request.remote_addr,
}
}
|
Given something that closely resembles a dictionary, we attempt
to coerce it into a propery dictionary.
|
def to_dict(dictish):
"""
Given something that closely resembles a dictionary, we attempt
to coerce it into a propery dictionary.
"""
if hasattr(dictish, 'iterkeys'):
m = dictish.iterkeys
elif hasattr(dictish, 'keys'):
m = dictish.keys
else:
raise ValueError(dictish)
return dict((k, dictish[k]) for k in m())
|
Given a traceback object, it will iterate over all
frames that do not contain the ``__traceback_hide__``
local variable.
|
def iter_traceback_frames(tb):
"""
Given a traceback object, it will iterate over all
frames that do not contain the ``__traceback_hide__``
local variable.
"""
# Some versions of celery have hacked traceback objects that might
# miss tb_frame.
while tb and hasattr(tb, 'tb_frame'):
# support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
f_locals = getattr(tb.tb_frame, 'f_locals', {})
if not _getitem_from_frame(f_locals, '__traceback_hide__'):
yield tb.tb_frame, getattr(tb, 'tb_lineno', None)
tb = tb.tb_next
|
Given an optional list of frames (defaults to current stack),
iterates over all frames that do not contain the ``__traceback_hide__``
local variable.
|
def iter_stack_frames(frames=None):
"""
Given an optional list of frames (defaults to current stack),
iterates over all frames that do not contain the ``__traceback_hide__``
local variable.
"""
if not frames:
frames = inspect.stack()[1:]
for frame, lineno in ((f[0], f[2]) for f in reversed(frames)):
f_locals = getattr(frame, 'f_locals', {})
if not _getitem_from_frame(f_locals, '__traceback_hide__'):
yield frame, lineno
|
Removes various excess metadata from middle frames which go beyond
``frame_allowance``.
Returns ``frames``.
|
def slim_frame_data(frames, frame_allowance=25):
"""
Removes various excess metadata from middle frames which go beyond
``frame_allowance``.
Returns ``frames``.
"""
frames_len = 0
app_frames = []
system_frames = []
for frame in frames:
frames_len += 1
if frame.get('in_app'):
app_frames.append(frame)
else:
system_frames.append(frame)
if frames_len <= frame_allowance:
return frames
remaining = frames_len - frame_allowance
app_count = len(app_frames)
system_allowance = max(frame_allowance - app_count, 0)
if system_allowance:
half_max = int(system_allowance / 2)
# prioritize trimming system frames
for frame in system_frames[half_max:-half_max]:
frame.pop('vars', None)
frame.pop('pre_context', None)
frame.pop('post_context', None)
remaining -= 1
else:
for frame in system_frames:
frame.pop('vars', None)
frame.pop('pre_context', None)
frame.pop('post_context', None)
remaining -= 1
if remaining:
app_allowance = app_count - remaining
half_max = int(app_allowance / 2)
for frame in app_frames[half_max:-half_max]:
frame.pop('vars', None)
frame.pop('pre_context', None)
frame.pop('post_context', None)
return frames
|
Given a list of frames, returns a list of stack information
dictionary objects that are JSON-ready.
We have to be careful here as certain implementations of the
_Frame class do not contain the necessary data to lookup all
of the information we want.
|
def get_stack_info(frames, transformer=transform, capture_locals=True,
frame_allowance=25):
"""
Given a list of frames, returns a list of stack information
dictionary objects that are JSON-ready.
We have to be careful here as certain implementations of the
_Frame class do not contain the necessary data to lookup all
of the information we want.
"""
__traceback_hide__ = True # NOQA
result = []
for frame_info in frames:
# Old, terrible API
if isinstance(frame_info, (list, tuple)):
frame, lineno = frame_info
else:
frame = frame_info
lineno = frame_info.f_lineno
# Support hidden frames
f_locals = getattr(frame, 'f_locals', {})
if _getitem_from_frame(f_locals, '__traceback_hide__'):
continue
f_globals = getattr(frame, 'f_globals', {})
f_code = getattr(frame, 'f_code', None)
if f_code:
abs_path = frame.f_code.co_filename
function = frame.f_code.co_name
else:
abs_path = None
function = None
loader = _getitem_from_frame(f_globals, '__loader__')
module_name = _getitem_from_frame(f_globals, '__name__')
if lineno:
lineno -= 1
if lineno is not None and abs_path:
pre_context, context_line, post_context = \
get_lines_from_file(abs_path, lineno, 5, loader, module_name)
else:
pre_context, context_line, post_context = None, None, None
# Try to pull a relative file path
# This changes /foo/site-packages/baz/bar.py into baz/bar.py
try:
base_filename = sys.modules[module_name.split('.', 1)[0]].__file__
filename = abs_path.split(
base_filename.rsplit(os.sep, 2)[0], 1)[-1].lstrip(os.sep)
except Exception:
filename = abs_path
if not filename:
filename = abs_path
frame_result = {
'abs_path': abs_path,
'filename': filename,
'module': module_name or None,
'function': function or '<unknown>',
'lineno': lineno + 1,
}
if capture_locals:
f_vars = get_frame_locals(frame, transformer=transformer)
if f_vars:
frame_result['vars'] = f_vars
if context_line is not None:
frame_result.update({
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
})
result.append(frame_result)
stackinfo = {
'frames': slim_frame_data(result, frame_allowance=frame_allowance),
}
return stackinfo
|
Returns request data extracted from web.ctx.
|
def get_data_from_request():
"""Returns request data extracted from web.ctx."""
return {
'request': {
'url': '%s://%s%s' % (web.ctx['protocol'], web.ctx['host'], web.ctx['path']),
'query_string': web.ctx.query,
'method': web.ctx.method,
'data': web.data(),
'headers': dict(get_headers(web.ctx.environ)),
'env': dict(get_environ(web.ctx.environ)),
}
}
|
Utility method for django's deprecated resolver.regex
|
def get_regex(resolver_or_pattern):
"""Utility method for django's deprecated resolver.regex"""
try:
regex = resolver_or_pattern.regex
except AttributeError:
regex = resolver_or_pattern.pattern.regex
return regex
|
>>> fetch_package_version('sentry')
|
def fetch_package_version(dist_name):
"""
>>> fetch_package_version('sentry')
"""
try:
# Importing pkg_resources can be slow, so only import it
# if we need it.
import pkg_resources
except ImportError:
# pkg_resource is not available on Google App Engine
raise NotImplementedError('pkg_resources is not available '
'on this Python install')
dist = pkg_resources.get_distribution(dist_name)
return dist.version
|
Convert a settings object (or dictionary) to parameters which may be passed
to a new ``Client()`` instance.
|
def convert_options(settings, defaults=None):
"""
Convert a settings object (or dictionary) to parameters which may be passed
to a new ``Client()`` instance.
"""
if defaults is None:
defaults = {}
if isinstance(settings, dict):
def getopt(key, default=None):
return settings.get(
'SENTRY_%s' % key.upper(),
defaults.get(key, default)
)
options = copy.copy(
settings.get('SENTRY_CONFIG')
or settings.get('RAVEN_CONFIG')
or {}
)
else:
def getopt(key, default=None):
return getattr(settings, 'SENTRY_%s' % key.upper(), defaults.get(key, default))
options = copy.copy(
getattr(settings, 'SENTRY_CONFIG', None)
or getattr(settings, 'RAVEN_CONFIG', None)
or {}
)
options.setdefault('include_paths', getopt('include_paths', []))
options.setdefault('exclude_paths', getopt('exclude_paths', []))
options.setdefault('timeout', getopt('timeout'))
options.setdefault('name', getopt('name'))
options.setdefault('auto_log_stacks', getopt('auto_log_stacks'))
options.setdefault('string_max_length', getopt('string_max_length'))
options.setdefault('list_max_length', getopt('list_max_length'))
options.setdefault('site', getopt('site'))
options.setdefault('processors', getopt('processors'))
options.setdefault('sanitize_keys', getopt('sanitize_keys'))
options.setdefault('dsn', getopt('dsn', os.environ.get('SENTRY_DSN')))
options.setdefault('context', getopt('context'))
options.setdefault('tags', getopt('tags'))
options.setdefault('release', getopt('release'))
options.setdefault('repos', getopt('repos'))
options.setdefault('environment', getopt('environment'))
options.setdefault('ignore_exceptions', getopt('ignore_exceptions'))
options.setdefault('sample_rate', getopt('sample_rate'))
transport = getopt('transport') or options.get('transport')
if isinstance(transport, string_types):
transport = import_string(transport)
options['transport'] = transport
return options
|
Access request.body, otherwise it might not be accessible later
after request has been read/streamed
|
def process_request(self, request):
"""
Access request.body, otherwise it might not be accessible later
after request has been read/streamed
"""
content_type = request.META.get('CONTENT_TYPE', '')
for non_cacheable_type in self.non_cacheable_types:
if non_cacheable_type in content_type:
return
request.body
|
Wrap a function or code block in try/except and automatically call
``.captureException`` if it raises an exception, then the exception
is reraised.
By default, it will capture ``Exception``
>>> @client.capture_exceptions
>>> def foo():
>>> raise Exception()
>>> with client.capture_exceptions():
>>> raise Exception()
You can also specify exceptions to be caught specifically
>>> @client.capture_exceptions((IOError, LookupError))
>>> def bar():
>>> ...
``kwargs`` are passed through to ``.captureException``.
|
def capture_exceptions(self, f=None, exceptions=None): # TODO: Ash fix kwargs in base
"""
Wrap a function or code block in try/except and automatically call
``.captureException`` if it raises an exception, then the exception
is reraised.
By default, it will capture ``Exception``
>>> @client.capture_exceptions
>>> def foo():
>>> raise Exception()
>>> with client.capture_exceptions():
>>> raise Exception()
You can also specify exceptions to be caught specifically
>>> @client.capture_exceptions((IOError, LookupError))
>>> def bar():
>>> ...
``kwargs`` are passed through to ``.captureException``.
"""
if not isinstance(f, FunctionType):
# when the decorator has args which is not a function we except
# f to be the exceptions tuple
return functools.partial(self.capture_exceptions, exceptions=f)
exceptions = exceptions or (Exception,)
@functools.wraps(f)
def wrapped(event, context, *args, **kwargs):
try:
return f(event, context, *args, **kwargs)
except exceptions:
self.captureException(event=event, context=context, **kwargs)
self.context.clear()
raise
return wrapped
|
Runs a thing once and once only.
|
def once(func):
"""Runs a thing once and once only."""
lock = threading.Lock()
def new_func(*args, **kwargs):
if new_func.called:
return
with lock:
if new_func.called:
return
rv = func(*args, **kwargs)
new_func.called = True
return rv
new_func = update_wrapper(new_func, func)
new_func.called = False
return new_func
|
A reimplementation of Django's get_host, without the
SuspiciousOperation check.
|
def get_host(request):
"""
A reimplementation of Django's get_host, without the
SuspiciousOperation check.
"""
# We try three options, in order of decreasing preference.
if settings.USE_X_FORWARDED_HOST and (
'HTTP_X_FORWARDED_HOST' in request.META):
host = request.META['HTTP_X_FORWARDED_HOST']
elif 'HTTP_HOST' in request.META:
host = request.META['HTTP_HOST']
else:
# Reconstruct the host using the algorithm from PEP 333.
host = request.META['SERVER_NAME']
server_port = str(request.META['SERVER_PORT'])
if server_port != (request.is_secure() and '443' or '80'):
host = '%s:%s' % (host, server_port)
return host
|
Called when an exception has been raised in the code run by ZeroRPC
|
def server_inspect_exception(self, req_event, rep_event, task_ctx, exc_info):
"""
Called when an exception has been raised in the code run by ZeroRPC
"""
# Hide the zerorpc internal frames for readability, for a REQ/REP or
# REQ/STREAM server the frames to hide are:
# - core.ServerBase._async_task
# - core.Pattern*.process_call
# - core.DecoratorBase.__call__
#
# For a PUSH/PULL or PUB/SUB server the frame to hide is:
# - core.Puller._receiver
if self._hide_zerorpc_frames:
traceback = exc_info[2]
while traceback:
zerorpc_frame = traceback.tb_frame
zerorpc_frame.f_locals['__traceback_hide__'] = True
frame_info = inspect.getframeinfo(zerorpc_frame)
# Is there a better way than this (or looking up the filenames
# or hardcoding the number of frames to skip) to know when we
# are out of zerorpc?
if frame_info.function == '__call__' \
or frame_info.function == '_receiver':
break
traceback = traceback.tb_next
self._sentry_client.captureException(
exc_info,
extra=task_ctx
)
|
Install specified middleware
|
def install_middleware(middleware_name, lookup_names=None):
"""
Install specified middleware
"""
if lookup_names is None:
lookup_names = (middleware_name,)
# default settings.MIDDLEWARE is None
middleware_attr = 'MIDDLEWARE' if getattr(settings,
'MIDDLEWARE',
None) is not None \
else 'MIDDLEWARE_CLASSES'
# make sure to get an empty tuple when attr is None
middleware = getattr(settings, middleware_attr, ()) or ()
if set(lookup_names).isdisjoint(set(middleware)):
setattr(settings,
middleware_attr,
type(middleware)((middleware_name,)) + middleware)
|
Train survival model on given data and return its score on test data
|
def _fit_and_score(est, x, y, scorer, train_index, test_index, parameters, fit_params, predict_params):
"""Train survival model on given data and return its score on test data"""
X_train, y_train = _safe_split(est, x, y, train_index)
train_params = fit_params.copy()
# Training
est.set_params(**parameters)
est.fit(X_train, y_train, **train_params)
# Testing
test_predict_params = predict_params.copy()
X_test, y_test = _safe_split(est, x, y, test_index, train_index)
score = scorer(est, X_test, y_test, **test_predict_params)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
|
Fit estimator.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data matrix
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
Returns
-------
self
|
def fit(self, X, y):
"""Fit estimator.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data matrix
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
Returns
-------
self
"""
X, event_num, time = self._pre_fit(X, y)
create_path, alphas, penalty = self._check_params(X.shape[1])
coef, alphas, deviance_ratio, n_iter = call_fit_coxnet(
X, time, event_num, penalty, alphas, create_path,
self.alpha_min_ratio, self.l1_ratio, int(self.max_iter),
self.tol, self.verbose)
assert numpy.isfinite(coef).all()
if numpy.all(numpy.absolute(coef) < numpy.finfo(numpy.float).eps):
warnings.warn('all coefficients are zero, consider decreasing alpha.',
stacklevel=2)
if n_iter >= self.max_iter:
warnings.warn('Optimization terminated early, you might want'
' to increase the number of iterations (max_iter=%d).'
% self.max_iter,
category=ConvergenceWarning,
stacklevel=2)
if self.fit_baseline_model:
self._baseline_models = tuple(
BreslowEstimator().fit(numpy.dot(X, coef[:, i]), event_num, time)
for i in range(coef.shape[1])
)
else:
self._baseline_models = None
self.alphas_ = alphas
self.penalty_factor_ = penalty
self.coef_ = coef
self.deviance_ratio_ = deviance_ratio
return self
|
Interpolate coefficients by calculating the weighted average of coefficient vectors corresponding to
neighbors of alpha in the list of alphas constructed during training.
|
def _interpolate_coefficients(self, alpha):
"""Interpolate coefficients by calculating the weighted average of coefficient vectors corresponding to
neighbors of alpha in the list of alphas constructed during training."""
exact = False
coef_idx = None
for i, val in enumerate(self.alphas_):
if val > alpha:
coef_idx = i
elif alpha - val < numpy.finfo(numpy.float).eps:
coef_idx = i
exact = True
break
if coef_idx is None:
coef = self.coef_[:, 0]
elif exact or coef_idx == len(self.alphas_) - 1:
coef = self.coef_[:, coef_idx]
else:
# interpolate between coefficients
a1 = self.alphas_[coef_idx + 1]
a2 = self.alphas_[coef_idx]
frac = (alpha - a1) / (a2 - a1)
coef = frac * self.coef_[:, coef_idx] + (1.0 - frac) * self.coef_[:, coef_idx + 1]
return coef
|
The linear predictor of the model.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test data of which to calculate log-likelihood from
alpha : float, optional
Constant that multiplies the penalty terms. If the same alpha was used during training, exact
coefficients are used, otherwise coefficients are interpolated from the closest alpha values that
were used during training. If set to ``None``, the last alpha in the solution path is used.
Returns
-------
T : array, shape = (n_samples,)
The predicted decision function
|
def predict(self, X, alpha=None):
"""The linear predictor of the model.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test data of which to calculate log-likelihood from
alpha : float, optional
Constant that multiplies the penalty terms. If the same alpha was used during training, exact
coefficients are used, otherwise coefficients are interpolated from the closest alpha values that
were used during training. If set to ``None``, the last alpha in the solution path is used.
Returns
-------
T : array, shape = (n_samples,)
The predicted decision function
"""
X = check_array(X)
coef = self._get_coef(alpha)
return numpy.dot(X, coef)
|
Predict cumulative hazard function.
Only available if :meth:`fit` has been called with `fit_baseline_model = True`.
The cumulative hazard function for an individual
with feature vector :math:`x_\\alpha` is defined as
.. math::
H(t \\mid x_\\alpha) = \\exp(x_\\alpha^\\top \\beta) H_0(t) ,
where :math:`H_0(t)` is the baseline hazard function,
estimated by Breslow's estimator.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data matrix.
alpha : float, optional
Constant that multiplies the penalty terms. The same alpha as used during training
must be specified. If set to ``None``, the last alpha in the solution path is used.
Returns
-------
cum_hazard : ndarray, shape = (n_samples,)
Predicted cumulative hazard functions.
|
def predict_cumulative_hazard_function(self, X, alpha=None):
"""Predict cumulative hazard function.
Only available if :meth:`fit` has been called with `fit_baseline_model = True`.
The cumulative hazard function for an individual
with feature vector :math:`x_\\alpha` is defined as
.. math::
H(t \\mid x_\\alpha) = \\exp(x_\\alpha^\\top \\beta) H_0(t) ,
where :math:`H_0(t)` is the baseline hazard function,
estimated by Breslow's estimator.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data matrix.
alpha : float, optional
Constant that multiplies the penalty terms. The same alpha as used during training
must be specified. If set to ``None``, the last alpha in the solution path is used.
Returns
-------
cum_hazard : ndarray, shape = (n_samples,)
Predicted cumulative hazard functions.
"""
baseline_model = self._get_baseline_model(alpha)
return baseline_model.get_cumulative_hazard_function(self.predict(X, alpha=alpha))
|
Predict survival function.
Only available if :meth:`fit` has been called with `fit_baseline_model = True`.
The survival function for an individual
with feature vector :math:`x_\\alpha` is defined as
.. math::
S(t \\mid x_\\alpha) = S_0(t)^{\\exp(x_\\alpha^\\top \\beta)} ,
where :math:`S_0(t)` is the baseline survival function,
estimated by Breslow's estimator.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data matrix.
alpha : float, optional
Constant that multiplies the penalty terms. The same alpha as used during training
must be specified. If set to ``None``, the last alpha in the solution path is used.
Returns
-------
survival : ndarray, shape = (n_samples,)
Predicted survival functions.
|
def predict_survival_function(self, X, alpha=None):
"""Predict survival function.
Only available if :meth:`fit` has been called with `fit_baseline_model = True`.
The survival function for an individual
with feature vector :math:`x_\\alpha` is defined as
.. math::
S(t \\mid x_\\alpha) = S_0(t)^{\\exp(x_\\alpha^\\top \\beta)} ,
where :math:`S_0(t)` is the baseline survival function,
estimated by Breslow's estimator.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data matrix.
alpha : float, optional
Constant that multiplies the penalty terms. The same alpha as used during training
must be specified. If set to ``None``, the last alpha in the solution path is used.
Returns
-------
survival : ndarray, shape = (n_samples,)
Predicted survival functions.
"""
baseline_model = self._get_baseline_model(alpha)
return baseline_model.get_survival_function(self.predict(X, alpha=alpha))
|
For each base estimator collect models trained on each fold
|
def _create_base_ensemble(self, out, n_estimators, n_folds):
"""For each base estimator collect models trained on each fold"""
ensemble_scores = numpy.empty((n_estimators, n_folds))
base_ensemble = numpy.empty_like(ensemble_scores, dtype=numpy.object)
for model, fold, score, est in out:
ensemble_scores[model, fold] = score
base_ensemble[model, fold] = est
return ensemble_scores, base_ensemble
|
For each selected base estimator, average models trained on each fold
|
def _create_cv_ensemble(self, base_ensemble, idx_models_included, model_names=None):
"""For each selected base estimator, average models trained on each fold"""
fitted_models = numpy.empty(len(idx_models_included), dtype=numpy.object)
for i, idx in enumerate(idx_models_included):
model_name = self.base_estimators[idx][0] if model_names is None else model_names[idx]
avg_model = EnsembleAverage(base_ensemble[idx, :], name=model_name)
fitted_models[i] = avg_model
return fitted_models
|
Takes special care of estimators using custom kernel function
Parameters
----------
X : array, shape = (n_samples, n_features)
Samples to pre-compute kernel matrix from.
Returns
-------
base_estimators : list
Same as `self.base_estimators`, expect that estimators with custom kernel function
use ``kernel='precomputed'``.
kernel_cache : dict
Maps estimator name to kernel matrix. Use this for cross-validation instead of `X`.
|
def _get_base_estimators(self, X):
"""Takes special care of estimators using custom kernel function
Parameters
----------
X : array, shape = (n_samples, n_features)
Samples to pre-compute kernel matrix from.
Returns
-------
base_estimators : list
Same as `self.base_estimators`, expect that estimators with custom kernel function
use ``kernel='precomputed'``.
kernel_cache : dict
Maps estimator name to kernel matrix. Use this for cross-validation instead of `X`.
"""
base_estimators = []
kernel_cache = {}
kernel_fns = {}
for i, (name, estimator) in enumerate(self.base_estimators):
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
if not hasattr(estimator, '_get_kernel'):
raise ValueError(
'estimator %s uses a custom kernel function, but does not have a _get_kernel method' % name)
kernel_mat = kernel_fns.get(estimator.kernel, None)
if kernel_mat is None:
kernel_mat = estimator._get_kernel(X)
kernel_cache[i] = kernel_mat
kernel_fns[estimator.kernel] = kernel_mat
kernel_cache[i] = kernel_mat
# We precompute kernel, but only for training, for testing use original custom kernel function
kernel_estimator = clone(estimator)
kernel_estimator.set_params(kernel='precomputed')
base_estimators.append((name, kernel_estimator))
else:
base_estimators.append((name, estimator))
return base_estimators, kernel_cache
|
Restore custom kernel functions of estimators for predictions
|
def _restore_base_estimators(self, kernel_cache, out, X, cv):
"""Restore custom kernel functions of estimators for predictions"""
train_folds = {fold: train_index for fold, (train_index, _) in enumerate(cv)}
for idx, fold, _, est in out:
if idx in kernel_cache:
if not hasattr(est, 'fit_X_'):
raise ValueError(
'estimator %s uses a custom kernel function, '
'but does not have the attribute `fit_X_` after training' % self.base_estimators[idx][0])
est.set_params(kernel=self.base_estimators[idx][1].kernel)
est.fit_X_ = X[train_folds[fold]]
return out
|
Create a cross-validated model by training a model for each fold with the same model parameters
|
def _fit_and_score_ensemble(self, X, y, cv, **fit_params):
"""Create a cross-validated model by training a model for each fold with the same model parameters"""
fit_params_steps = self._split_fit_params(fit_params)
folds = list(cv.split(X, y))
# Take care of custom kernel functions
base_estimators, kernel_cache = self._get_base_estimators(X)
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose
)(
delayed(_fit_and_score_fold)(clone(estimator),
X if i not in kernel_cache else kernel_cache[i],
y,
self.scorer,
train_index, test_index,
fit_params_steps[name],
i, fold)
for i, (name, estimator) in enumerate(base_estimators)
for fold, (train_index, test_index) in enumerate(folds))
if len(kernel_cache) > 0:
out = self._restore_base_estimators(kernel_cache, out, X, folds)
return self._create_base_ensemble(out, len(base_estimators), len(folds))
|
Fit ensemble of models
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data.
y : array-like, optional
Target data if base estimators are supervised.
Returns
-------
self
|
def fit(self, X, y=None, **fit_params):
"""Fit ensemble of models
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data.
y : array-like, optional
Target data if base estimators are supervised.
Returns
-------
self
"""
self._check_params()
cv = check_cv(self.cv, X)
self._fit(X, y, cv, **fit_params)
return self
|
Write ARFF file
Parameters
----------
data : :class:`pandas.DataFrame`
DataFrame containing data
filename : string or file-like object
Path to ARFF file or file-like object. In the latter case,
the handle is closed by calling this function.
relation_name : string, optional, default: "pandas"
Name of relation in ARFF file.
index : boolean, optional, default: True
Write row names (index)
|
def writearff(data, filename, relation_name=None, index=True):
"""Write ARFF file
Parameters
----------
data : :class:`pandas.DataFrame`
DataFrame containing data
filename : string or file-like object
Path to ARFF file or file-like object. In the latter case,
the handle is closed by calling this function.
relation_name : string, optional, default: "pandas"
Name of relation in ARFF file.
index : boolean, optional, default: True
Write row names (index)
"""
if isinstance(filename, str):
fp = open(filename, 'w')
if relation_name is None:
relation_name = os.path.basename(filename)
else:
fp = filename
if relation_name is None:
relation_name = "pandas"
try:
data = _write_header(data, fp, relation_name, index)
fp.write("\n")
_write_data(data, fp)
finally:
fp.close()
|
Write header containing attribute names and types
|
def _write_header(data, fp, relation_name, index):
"""Write header containing attribute names and types"""
fp.write("@relation {0}\n\n".format(relation_name))
if index:
data = data.reset_index()
attribute_names = _sanitize_column_names(data)
for column, series in data.iteritems():
name = attribute_names[column]
fp.write("@attribute {0}\t".format(name))
if is_categorical_dtype(series) or is_object_dtype(series):
_write_attribute_categorical(series, fp)
elif numpy.issubdtype(series.dtype, numpy.floating):
fp.write("real")
elif numpy.issubdtype(series.dtype, numpy.integer):
fp.write("integer")
elif numpy.issubdtype(series.dtype, numpy.datetime64):
fp.write("date 'yyyy-MM-dd HH:mm:ss'")
else:
raise TypeError('unsupported type %s' % series.dtype)
fp.write("\n")
return data
|
Replace illegal characters with underscore
|
def _sanitize_column_names(data):
"""Replace illegal characters with underscore"""
new_names = {}
for name in data.columns:
new_names[name] = _ILLEGAL_CHARACTER_PAT.sub("_", name)
return new_names
|
If string has a space, wrap it in double quotes and remove/escape illegal characters
|
def _check_str_value(x):
"""If string has a space, wrap it in double quotes and remove/escape illegal characters"""
if isinstance(x, str):
# remove commas, and single quotation marks since loadarff cannot deal with it
x = x.replace(",", ".").replace(chr(0x2018), "'").replace(chr(0x2019), "'")
# put string in double quotes
if " " in x:
if x[0] in ('"', "'"):
x = x[1:]
if x[-1] in ('"', "'"):
x = x[:len(x) - 1]
x = '"' + x.replace('"', "\\\"") + '"'
return str(x)
|
Write categories of a categorical/nominal attribute
|
def _write_attribute_categorical(series, fp):
"""Write categories of a categorical/nominal attribute"""
if is_categorical_dtype(series.dtype):
categories = series.cat.categories
string_values = _check_str_array(categories)
else:
categories = series.dropna().unique()
string_values = sorted(_check_str_array(categories), key=lambda x: x.strip('"'))
values = ",".join(string_values)
fp.write("{")
fp.write(values)
fp.write("}")
|
Write the data section
|
def _write_data(data, fp):
"""Write the data section"""
fp.write("@data\n")
def to_str(x):
if pandas.isnull(x):
return '?'
else:
return str(x)
data = data.applymap(to_str)
n_rows = data.shape[0]
for i in range(n_rows):
str_values = list(data.iloc[i, :].apply(_check_str_array))
line = ",".join(str_values)
fp.write(line)
fp.write("\n")
|
Fit base estimators.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data.
y : array-like, optional
Target data if base estimators are supervised.
Returns
-------
self
|
def fit(self, X, y=None, **fit_params):
"""Fit base estimators.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data.
y : array-like, optional
Target data if base estimators are supervised.
Returns
-------
self
"""
X = numpy.asarray(X)
self._fit_estimators(X, y, **fit_params)
Xt = self._predict_estimators(X)
self.meta_estimator.fit(Xt, y)
return self
|
Perform prediction.
Only available of the meta estimator has a predict method.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data with samples to predict.
Returns
-------
prediction : array, shape = (n_samples, n_dim)
Prediction of meta estimator that combines
predictions of base estimators. `n_dim` depends
on the return value of meta estimator's `predict`
method.
|
def predict(self, X):
"""Perform prediction.
Only available of the meta estimator has a predict method.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data with samples to predict.
Returns
-------
prediction : array, shape = (n_samples, n_dim)
Prediction of meta estimator that combines
predictions of base estimators. `n_dim` depends
on the return value of meta estimator's `predict`
method.
"""
X = numpy.asarray(X)
Xt = self._predict_estimators(X)
return self.meta_estimator.predict(Xt)
|
Perform prediction.
Only available of the meta estimator has a predict_proba method.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data with samples to predict.
Returns
-------
prediction : ndarray, shape = (n_samples, n_dim)
Prediction of meta estimator that combines
predictions of base estimators. `n_dim` depends
on the return value of meta estimator's `predict`
method.
|
def predict_proba(self, X):
"""Perform prediction.
Only available of the meta estimator has a predict_proba method.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data with samples to predict.
Returns
-------
prediction : ndarray, shape = (n_samples, n_dim)
Prediction of meta estimator that combines
predictions of base estimators. `n_dim` depends
on the return value of meta estimator's `predict`
method.
"""
X = numpy.asarray(X)
Xt = self._predict_estimators(X)
return self.meta_estimator.predict_proba(Xt)
|
Perform prediction.
Only available of the meta estimator has a predict_log_proba method.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data with samples to predict.
Returns
-------
prediction : ndarray, shape = (n_samples, n_dim)
Prediction of meta estimator that combines
predictions of base estimators. `n_dim` depends
on the return value of meta estimator's `predict`
method.
|
def predict_log_proba(self, X):
"""Perform prediction.
Only available of the meta estimator has a predict_log_proba method.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data with samples to predict.
Returns
-------
prediction : ndarray, shape = (n_samples, n_dim)
Prediction of meta estimator that combines
predictions of base estimators. `n_dim` depends
on the return value of meta estimator's `predict`
method.
"""
X = numpy.asarray(X)
Xt = self._predict_estimators(X)
return self.meta_estimator.predict_log_proba(Xt)
|
Perform Z-Normalization on each numeric column of the given table.
Parameters
----------
table : pandas.DataFrame or numpy.ndarray
Data to standardize.
with_std : bool, optional, default: True
If ``False`` data is only centered and not converted to unit variance.
Returns
-------
normalized : pandas.DataFrame
Table with numeric columns normalized.
Categorical columns in the input table remain unchanged.
|
def standardize(table, with_std=True):
"""
Perform Z-Normalization on each numeric column of the given table.
Parameters
----------
table : pandas.DataFrame or numpy.ndarray
Data to standardize.
with_std : bool, optional, default: True
If ``False`` data is only centered and not converted to unit variance.
Returns
-------
normalized : pandas.DataFrame
Table with numeric columns normalized.
Categorical columns in the input table remain unchanged.
"""
if isinstance(table, pandas.DataFrame):
cat_columns = table.select_dtypes(include=['category']).columns
else:
cat_columns = []
new_frame = _apply_along_column(table, standardize_column, with_std=with_std)
# work around for apply converting category dtype to object
# https://github.com/pydata/pandas/issues/9573
for col in cat_columns:
new_frame[col] = table[col].copy()
return new_frame
|
Encode categorical columns with `M` categories into `M-1` columns according
to the one-hot scheme.
Parameters
----------
table : pandas.DataFrame
Table with categorical columns to encode.
columns : list-like, optional, default: None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
allow_drop : boolean, optional, default: True
Whether to allow dropping categorical columns that only consist
of a single category.
Returns
-------
encoded : pandas.DataFrame
Table with categorical columns encoded as numeric.
Numeric columns in the input table remain unchanged.
|
def encode_categorical(table, columns=None, **kwargs):
"""
Encode categorical columns with `M` categories into `M-1` columns according
to the one-hot scheme.
Parameters
----------
table : pandas.DataFrame
Table with categorical columns to encode.
columns : list-like, optional, default: None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
allow_drop : boolean, optional, default: True
Whether to allow dropping categorical columns that only consist
of a single category.
Returns
-------
encoded : pandas.DataFrame
Table with categorical columns encoded as numeric.
Numeric columns in the input table remain unchanged.
"""
if isinstance(table, pandas.Series):
if not is_categorical_dtype(table.dtype) and not table.dtype.char == "O":
raise TypeError("series must be of categorical dtype, but was {}".format(table.dtype))
return _encode_categorical_series(table, **kwargs)
def _is_categorical_or_object(series):
return is_categorical_dtype(series.dtype) or series.dtype.char == "O"
if columns is None:
# for columns containing categories
columns_to_encode = {nam for nam, s in table.iteritems() if _is_categorical_or_object(s)}
else:
columns_to_encode = set(columns)
items = []
for name, series in table.iteritems():
if name in columns_to_encode:
series = _encode_categorical_series(series, **kwargs)
if series is None:
continue
items.append(series)
# concat columns of tables
new_table = pandas.concat(items, axis=1, copy=False)
return new_table
|
Encode categorical columns to numeric by converting each category to
an integer value.
Parameters
----------
table : pandas.DataFrame
Table with categorical columns to encode.
Returns
-------
encoded : pandas.DataFrame
Table with categorical columns encoded as numeric.
Numeric columns in the input table remain unchanged.
|
def categorical_to_numeric(table):
"""Encode categorical columns to numeric by converting each category to
an integer value.
Parameters
----------
table : pandas.DataFrame
Table with categorical columns to encode.
Returns
-------
encoded : pandas.DataFrame
Table with categorical columns encoded as numeric.
Numeric columns in the input table remain unchanged.
"""
def transform(column):
if is_categorical_dtype(column.dtype):
return column.cat.codes
if column.dtype.char == "O":
try:
nc = column.astype(numpy.int64)
except ValueError:
classes = column.dropna().unique()
classes.sort(kind="mergesort")
nc = column.replace(classes, numpy.arange(classes.shape[0]))
return nc
elif column.dtype == bool:
return column.astype(numpy.int64)
return column
if isinstance(table, pandas.Series):
return pandas.Series(transform(table), name=table.name, index=table.index)
else:
if _pandas_version_under0p23:
return table.apply(transform, axis=0, reduce=False)
else:
return table.apply(transform, axis=0, result_type='reduce')
|
Build a survival support vector machine model from training data.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data matrix.
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
sample_weight : array-like, shape = (n_samples,), optional
Array of weights that are assigned to individual
samples. If not provided,
then each sample is given unit weight.
Returns
-------
self
|
def fit(self, X, y, sample_weight=None):
"""Build a survival support vector machine model from training data.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data matrix.
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
sample_weight : array-like, shape = (n_samples,), optional
Array of weights that are assigned to individual
samples. If not provided,
then each sample is given unit weight.
Returns
-------
self
"""
random_state = check_random_state(self.random_state)
x_pairs, y_pairs = self._get_survival_pairs(X, y, random_state)
self.C = self.alpha
return super().fit(x_pairs, y_pairs, sample_weight=sample_weight)
|
Check that array correctly represents an outcome for survival analysis.
Parameters
----------
y_or_event : structured array with two fields, or boolean array
If a structured array, it must contain the binary event indicator
as first field, and time of event or time of censoring as
second field. Otherwise, it is assumed that a boolean array
representing the event indicator is passed.
*args : list of array-likes
Any number of array-like objects representing time information.
Elements that are `None` are passed along in the return value.
allow_all_censored : bool, optional, default: False
Whether to allow all events to be censored.
Returns
-------
event : array, shape=[n_samples,], dtype=bool
Binary event indicator.
time : array, shape=[n_samples,], dtype=float
Time of event or censoring.
|
def check_y_survival(y_or_event, *args, allow_all_censored=False):
"""Check that array correctly represents an outcome for survival analysis.
Parameters
----------
y_or_event : structured array with two fields, or boolean array
If a structured array, it must contain the binary event indicator
as first field, and time of event or time of censoring as
second field. Otherwise, it is assumed that a boolean array
representing the event indicator is passed.
*args : list of array-likes
Any number of array-like objects representing time information.
Elements that are `None` are passed along in the return value.
allow_all_censored : bool, optional, default: False
Whether to allow all events to be censored.
Returns
-------
event : array, shape=[n_samples,], dtype=bool
Binary event indicator.
time : array, shape=[n_samples,], dtype=float
Time of event or censoring.
"""
if len(args) == 0:
y = y_or_event
if not isinstance(y, numpy.ndarray) or y.dtype.fields is None or len(y.dtype.fields) != 2:
raise ValueError('y must be a structured array with the first field'
' being a binary class event indicator and the second field'
' the time of the event/censoring')
event_field, time_field = y.dtype.names
y_event = y[event_field]
time_args = (y[time_field],)
else:
y_event = numpy.asanyarray(y_or_event)
time_args = args
event = check_array(y_event, ensure_2d=False)
if not numpy.issubdtype(event.dtype, numpy.bool_):
raise ValueError('elements of event indicator must be boolean, but found {0}'.format(event.dtype))
if not (allow_all_censored or numpy.any(event)):
raise ValueError('all samples are censored')
return_val = [event]
for i, yt in enumerate(time_args):
if yt is None:
return_val.append(yt)
continue
yt = check_array(yt, ensure_2d=False)
if not numpy.issubdtype(yt.dtype, numpy.number):
raise ValueError('time must be numeric, but found {} for argument {}'.format(yt.dtype, i + 2))
return_val.append(yt)
return tuple(return_val)
|
Check that all arrays have consistent first dimensions.
Parameters
----------
X : array-like
Data matrix containing feature vectors.
y : structured array with two fields
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
kwargs : dict
Additional arguments passed to :func:`sklearn.utils.check_array`.
Returns
-------
X : array, shape=[n_samples, n_features]
Feature vectors.
event : array, shape=[n_samples,], dtype=bool
Binary event indicator.
time : array, shape=[n_samples,], dtype=float
Time of event or censoring.
|
def check_arrays_survival(X, y, **kwargs):
"""Check that all arrays have consistent first dimensions.
Parameters
----------
X : array-like
Data matrix containing feature vectors.
y : structured array with two fields
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
kwargs : dict
Additional arguments passed to :func:`sklearn.utils.check_array`.
Returns
-------
X : array, shape=[n_samples, n_features]
Feature vectors.
event : array, shape=[n_samples,], dtype=bool
Binary event indicator.
time : array, shape=[n_samples,], dtype=float
Time of event or censoring.
"""
event, time = check_y_survival(y)
kwargs.setdefault("dtype", numpy.float64)
X = check_array(X, ensure_min_samples=2, **kwargs)
check_consistent_length(X, event, time)
return X, event, time
|
Alternative to :func:`pandas.concat` that preserves categorical variables.
Parameters
----------
objs : a sequence or mapping of Series, DataFrame, or Panel objects
If a dict is passed, the sorted keys will be used as the `keys`
argument, unless it is passed, in which case the values will be
selected (see below). Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised
axis : {0, 1, ...}, default 0
The axis to concatenate along
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis(es)
join_axes : list of Index objects
Specific indexes to use for the other n - 1 axes instead of performing
inner/outer set logic
verify_integrity : boolean, default False
Check whether the new concatenated axis contains duplicates. This can
be very expensive relative to the actual data concatenation
keys : sequence, default None
If multiple levels passed, should contain tuples. Construct
hierarchical index using the passed keys as the outermost level
levels : list of sequences, default None
Specific levels (unique values) to use for constructing a
MultiIndex. Otherwise they will be inferred from the keys
names : list, default None
Names for the levels in the resulting hierarchical index
ignore_index : boolean, default False
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the the index values on the other
axes are still respected in the join.
copy : boolean, default True
If False, do not copy data unnecessarily
Notes
-----
The keys, levels, and names arguments are all optional
Returns
-------
concatenated : type of objects
|
def safe_concat(objs, *args, **kwargs):
"""Alternative to :func:`pandas.concat` that preserves categorical variables.
Parameters
----------
objs : a sequence or mapping of Series, DataFrame, or Panel objects
If a dict is passed, the sorted keys will be used as the `keys`
argument, unless it is passed, in which case the values will be
selected (see below). Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised
axis : {0, 1, ...}, default 0
The axis to concatenate along
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis(es)
join_axes : list of Index objects
Specific indexes to use for the other n - 1 axes instead of performing
inner/outer set logic
verify_integrity : boolean, default False
Check whether the new concatenated axis contains duplicates. This can
be very expensive relative to the actual data concatenation
keys : sequence, default None
If multiple levels passed, should contain tuples. Construct
hierarchical index using the passed keys as the outermost level
levels : list of sequences, default None
Specific levels (unique values) to use for constructing a
MultiIndex. Otherwise they will be inferred from the keys
names : list, default None
Names for the levels in the resulting hierarchical index
ignore_index : boolean, default False
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the the index values on the other
axes are still respected in the join.
copy : boolean, default True
If False, do not copy data unnecessarily
Notes
-----
The keys, levels, and names arguments are all optional
Returns
-------
concatenated : type of objects
"""
axis = kwargs.pop("axis", 0)
categories = {}
for df in objs:
if isinstance(df, pandas.Series):
if is_categorical_dtype(df.dtype):
categories[df.name] = {"categories": df.cat.categories, "ordered": df.cat.ordered}
else:
dfc = df.select_dtypes(include=["category"])
for name, s in dfc.iteritems():
if name in categories:
if axis == 1:
raise ValueError("duplicate columns %s" % name)
if not categories[name]["categories"].equals(s.cat.categories):
raise ValueError("categories for column %s do not match" % name)
else:
categories[name] = {"categories": s.cat.categories, "ordered": s.cat.ordered}
df[name] = df[name].astype(object)
concatenated = pandas.concat(objs, *args, axis=axis, **kwargs)
for name, params in categories.items():
concatenated[name] = pandas.Categorical(concatenated[name], **params)
return concatenated
|
Create structured array.
Parameters
----------
event : array-like
Event indicator. A boolean array or array with values 0/1.
time : array-like
Observed time.
name_event : str|None
Name of event, optional, default: 'event'
name_time : str|None
Name of observed time, optional, default: 'time'
Returns
-------
y : np.array
Structured array with two fields.
|
def from_arrays(event, time, name_event=None, name_time=None):
"""Create structured array.
Parameters
----------
event : array-like
Event indicator. A boolean array or array with values 0/1.
time : array-like
Observed time.
name_event : str|None
Name of event, optional, default: 'event'
name_time : str|None
Name of observed time, optional, default: 'time'
Returns
-------
y : np.array
Structured array with two fields.
"""
name_event = name_event or 'event'
name_time = name_time or 'time'
if name_time == name_event:
raise ValueError('name_time must be different from name_event')
time = numpy.asanyarray(time, dtype=numpy.float_)
y = numpy.empty(time.shape[0],
dtype=[(name_event, numpy.bool_), (name_time, numpy.float_)])
y[name_time] = time
event = numpy.asanyarray(event)
check_consistent_length(time, event)
if numpy.issubdtype(event.dtype, numpy.bool_):
y[name_event] = event
else:
events = numpy.unique(event)
events.sort()
if len(events) != 2:
raise ValueError('event indicator must be binary')
if numpy.all(events == numpy.array([0, 1], dtype=events.dtype)):
y[name_event] = event.astype(numpy.bool_)
else:
raise ValueError('non-boolean event indicator must contain 0 and 1 only')
return y
|
Create structured array from data frame.
Parameters
----------
event : object
Identifier of column containing event indicator.
time : object
Identifier of column containing time.
data : pandas.DataFrame
Dataset.
Returns
-------
y : np.array
Structured array with two fields.
|
def from_dataframe(event, time, data):
"""Create structured array from data frame.
Parameters
----------
event : object
Identifier of column containing event indicator.
time : object
Identifier of column containing time.
data : pandas.DataFrame
Dataset.
Returns
-------
y : np.array
Structured array with two fields.
"""
if not isinstance(data, pandas.DataFrame):
raise TypeError(
"exepected pandas.DataFrame, but got {!r}".format(type(data)))
return Surv.from_arrays(
data.loc[:, event].values,
data.loc[:, time].values,
name_event=str(event),
name_time=str(time))
|
Negative gradient of partial likelihood
Parameters
---------
y : tuple, len = 2
First element is boolean event indicator and second element survival/censoring time.
y_pred : np.ndarray, shape=(n,):
The predictions.
|
def negative_gradient(self, y, y_pred, sample_weight=None, **kwargs):
"""Negative gradient of partial likelihood
Parameters
---------
y : tuple, len = 2
First element is boolean event indicator and second element survival/censoring time.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
ret = coxph_negative_gradient(y['event'].astype(numpy.uint8), y['time'], y_pred.ravel())
if sample_weight is not None:
ret *= sample_weight
return ret
|
Least squares does not need to update terminal regions.
But it has to update the predictions.
|
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
|
Least squares does not need to update terminal regions
|
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Least squares does not need to update terminal regions"""
|
Negative gradient of partial likelihood
Parameters
---------
y : tuple, len = 2
First element is boolean event indicator and second element survival/censoring time.
y_pred : np.ndarray, shape=(n,):
The predictions.
|
def negative_gradient(self, y, y_pred, sample_weight=None, **kwargs):
"""Negative gradient of partial likelihood
Parameters
---------
y : tuple, len = 2
First element is boolean event indicator and second element survival/censoring time.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
pred_time = y['time'] - y_pred.ravel()
mask = (pred_time > 0) | y['event']
ret = numpy.zeros(y['event'].shape[0])
ret[mask] = pred_time.compress(mask, axis=0)
if sample_weight is not None:
ret *= sample_weight
return ret
|
Modify the extensions to build from the .c and .cpp files.
This is useful for releases, this way cython is not required to
run python setup.py install.
|
def build_from_c_and_cpp_files(extensions):
"""Modify the extensions to build from the .c and .cpp files.
This is useful for releases, this way cython is not required to
run python setup.py install.
"""
for extension in extensions:
sources = []
for sfile in extension.sources:
path, ext = os.path.splitext(sfile)
if ext in ('.pyx', '.py'):
if extension.language == 'c++':
ext = '.cpp'
else:
ext = '.c'
sfile = path + ext
sources.append(sfile)
extension.sources = sources
|
Tweaks for building extensions between release and development mode.
|
def maybe_cythonize_extensions(top_path, config):
"""Tweaks for building extensions between release and development mode."""
is_release = os.path.exists(os.path.join(top_path, 'PKG-INFO'))
if is_release:
build_from_c_and_cpp_files(config.ext_modules)
else:
message = ('Please install cython with a version >= {0} in order '
'to build a scikit-survival development version.').format(
CYTHON_MIN_VERSION)
try:
import Cython
if LooseVersion(Cython.__version__) < CYTHON_MIN_VERSION:
message += ' Your version of Cython was {0}.'.format(
Cython.__version__)
raise ValueError(message)
from Cython.Build import cythonize
except ImportError as exc:
exc.args += (message,)
raise
# http://docs.cython.org/en/latest/src/userguide/source_files_and_compilation.html#cythonize-arguments
directives = {'language_level': '3'}
cy_cov = os.environ.get('CYTHON_COVERAGE', False)
if cy_cov:
directives['linetrace'] = True
macros = [('CYTHON_TRACE', '1'), ('CYTHON_TRACE_NOGIL', '1')]
else:
macros = []
config.ext_modules = cythonize(
config.ext_modules,
compiler_directives=directives)
for e in config.ext_modules:
e.define_macros.extend(macros)
|
Return dict mapping relevance level to sample index
|
def _count_values(self):
"""Return dict mapping relevance level to sample index"""
indices = {yi: [i] for i, yi in enumerate(self.y) if self.status[i]}
return indices
|
Split into intercept/bias and feature-specific coefficients
|
def _split_coefficents(self, w):
"""Split into intercept/bias and feature-specific coefficients"""
if self._fit_intercept:
bias = w[0]
wf = w[1:]
else:
bias = 0.0
wf = w
return bias, wf
|
Samples are ordered by relevance
|
def _create_optimizer(self, X, y, status):
"""Samples are ordered by relevance"""
if self.optimizer is None:
self.optimizer = 'avltree'
times, ranks = y
if self.optimizer == 'simple':
optimizer = SimpleOptimizer(X, status, self.alpha, self.rank_ratio, timeit=self.timeit)
elif self.optimizer == 'PRSVM':
optimizer = PRSVMOptimizer(X, status, self.alpha, self.rank_ratio, timeit=self.timeit)
elif self.optimizer == 'direct-count':
optimizer = LargeScaleOptimizer(self.alpha, self.rank_ratio, self.fit_intercept,
SurvivalCounter(X, ranks, status, len(ranks), times), timeit=self.timeit)
elif self.optimizer == 'rbtree':
optimizer = LargeScaleOptimizer(self.alpha, self.rank_ratio, self.fit_intercept,
OrderStatisticTreeSurvivalCounter(X, ranks, status, RBTree, times),
timeit=self.timeit)
elif self.optimizer == 'avltree':
optimizer = LargeScaleOptimizer(self.alpha, self.rank_ratio, self.fit_intercept,
OrderStatisticTreeSurvivalCounter(X, ranks, status, AVLTree, times),
timeit=self.timeit)
else:
raise ValueError('unknown optimizer: {0}'.format(self.optimizer))
return optimizer
|
Build a survival support vector machine model from training data.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data matrix.
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
Returns
-------
self
|
def fit(self, X, y):
"""Build a survival support vector machine model from training data.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data matrix.
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
Returns
-------
self
"""
X, event, time = check_arrays_survival(X, y)
if self.alpha <= 0:
raise ValueError("alpha must be positive")
if not 0 <= self.rank_ratio <= 1:
raise ValueError("rank_ratio must be in [0; 1]")
if self.fit_intercept and self.rank_ratio == 1.0:
raise ValueError("fit_intercept=True is only meaningful if rank_ratio < 1.0")
if self.rank_ratio < 1.0:
if self.optimizer in {'simple', 'PRSVM'}:
raise ValueError("optimizer '%s' does not implement regression objective" % self.optimizer)
if (time <= 0).any():
raise ValueError("observed time contains values smaller or equal to zero")
# log-transform time
time = numpy.log(time)
assert numpy.isfinite(time).all()
random_state = check_random_state(self.random_state)
samples_order = BaseSurvivalSVM._argsort_and_resolve_ties(time, random_state)
opt_result = self._fit(X, time, event, samples_order)
coef = opt_result.x
if self.fit_intercept:
self.coef_ = coef[1:]
self.intercept_ = coef[0]
else:
self.coef_ = coef
if not opt_result.success:
warnings.warn(('Optimization did not converge: ' + opt_result.message),
category=ConvergenceWarning,
stacklevel=2)
self.optimizer_result_ = opt_result
return self
|
Like numpy.argsort, but resolves ties uniformly at random
|
def _argsort_and_resolve_ties(time, random_state):
"""Like numpy.argsort, but resolves ties uniformly at random"""
n_samples = len(time)
order = numpy.argsort(time, kind="mergesort")
i = 0
while i < n_samples - 1:
inext = i + 1
while inext < n_samples and time[order[i]] == time[order[inext]]:
inext += 1
if i + 1 != inext:
# resolve ties randomly
random_state.shuffle(order[i:inext])
i = inext
return order
|
Rank samples according to survival times
Lower ranks indicate shorter survival, higher ranks longer survival.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape = (n_samples,)
Predicted ranks.
|
def predict(self, X):
"""Rank samples according to survival times
Lower ranks indicate shorter survival, higher ranks longer survival.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape = (n_samples,)
Predicted ranks.
"""
val = numpy.dot(X, self.coef_)
if hasattr(self, "intercept_"):
val += self.intercept_
# Order by increasing survival time if objective is pure ranking
if self.rank_ratio == 1:
val *= -1
else:
# model was fitted on log(time), transform to original scale
val = numpy.exp(val)
return val
|
Rank samples according to survival times
Lower ranks indicate shorter survival, higher ranks longer survival.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape = (n_samples,)
Predicted ranks.
|
def predict(self, X):
"""Rank samples according to survival times
Lower ranks indicate shorter survival, higher ranks longer survival.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape = (n_samples,)
Predicted ranks.
"""
kernel_mat = self._get_kernel(X, self.fit_X_)
val = numpy.dot(kernel_mat, self.coef_)
if hasattr(self, "intercept_"):
val += self.intercept_
# Order by increasing survival time if objective is pure ranking
if self.rank_ratio == 1:
val *= -1
else:
# model was fitted on log(time), transform to original scale
val = numpy.exp(val)
return val
|
Build an accelerated failure time model.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data matrix.
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
Returns
-------
self
|
def fit(self, X, y):
"""Build an accelerated failure time model.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data matrix.
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
Returns
-------
self
"""
X, event, time = check_arrays_survival(X, y)
weights = ipc_weights(event, time)
super().fit(X, numpy.log(time), sample_weight=weights)
return self
|
Compute baseline cumulative hazard function.
Parameters
----------
linear_predictor : array-like, shape = (n_samples,)
Linear predictor of risk: `X @ coef`.
event : array-like, shape = (n_samples,)
Contains binary event indicators.
time : array-like, shape = (n_samples,)
Contains event/censoring times.
Returns
-------
self
|
def fit(self, linear_predictor, event, time):
"""Compute baseline cumulative hazard function.
Parameters
----------
linear_predictor : array-like, shape = (n_samples,)
Linear predictor of risk: `X @ coef`.
event : array-like, shape = (n_samples,)
Contains binary event indicators.
time : array-like, shape = (n_samples,)
Contains event/censoring times.
Returns
-------
self
"""
risk_score = numpy.exp(linear_predictor)
order = numpy.argsort(time, kind="mergesort")
risk_score = risk_score[order]
uniq_times, n_events, n_at_risk = _compute_counts(event, time, order)
divisor = numpy.empty(n_at_risk.shape, dtype=numpy.float_)
value = numpy.sum(risk_score)
divisor[0] = value
k = 0
for i in range(1, len(n_at_risk)):
d = n_at_risk[i - 1] - n_at_risk[i]
value -= risk_score[k:(k + d)].sum()
k += d
divisor[i] = value
assert k == n_at_risk[0] - n_at_risk[-1]
y = numpy.cumsum(n_events / divisor)
self.cum_baseline_hazard_ = StepFunction(uniq_times, y)
self.baseline_survival_ = StepFunction(self.cum_baseline_hazard_.x,
numpy.exp(- self.cum_baseline_hazard_.y))
return self
|
Predict cumulative hazard function.
Parameters
----------
linear_predictor : array-like, shape = (n_samples,)
Linear predictor of risk: `X @ coef`.
Returns
-------
cum_hazard : ndarray, shape = (n_samples,)
Predicted cumulative hazard functions.
|
def get_cumulative_hazard_function(self, linear_predictor):
"""Predict cumulative hazard function.
Parameters
----------
linear_predictor : array-like, shape = (n_samples,)
Linear predictor of risk: `X @ coef`.
Returns
-------
cum_hazard : ndarray, shape = (n_samples,)
Predicted cumulative hazard functions.
"""
risk_score = numpy.exp(linear_predictor)
n_samples = risk_score.shape[0]
funcs = numpy.empty(n_samples, dtype=numpy.object_)
for i in range(n_samples):
funcs[i] = StepFunction(x=self.cum_baseline_hazard_.x,
y=self.cum_baseline_hazard_.y,
a=risk_score[i])
return funcs
|
Predict survival function.
Parameters
----------
linear_predictor : array-like, shape = (n_samples,)
Linear predictor of risk: `X @ coef`.
Returns
-------
survival : ndarray, shape = (n_samples,)
Predicted survival functions.
|
def get_survival_function(self, linear_predictor):
"""Predict survival function.
Parameters
----------
linear_predictor : array-like, shape = (n_samples,)
Linear predictor of risk: `X @ coef`.
Returns
-------
survival : ndarray, shape = (n_samples,)
Predicted survival functions.
"""
risk_score = numpy.exp(linear_predictor)
n_samples = risk_score.shape[0]
funcs = numpy.empty(n_samples, dtype=numpy.object_)
for i in range(n_samples):
funcs[i] = StepFunction(x=self.baseline_survival_.x,
y=numpy.power(self.baseline_survival_.y, risk_score[i]))
return funcs
|
Compute negative partial log-likelihood
Parameters
----------
w : array, shape = (n_features,)
Estimate of coefficients
Returns
-------
loss : float
Average negative partial log-likelihood
|
def nlog_likelihood(self, w):
"""Compute negative partial log-likelihood
Parameters
----------
w : array, shape = (n_features,)
Estimate of coefficients
Returns
-------
loss : float
Average negative partial log-likelihood
"""
time = self.time
n_samples = self.x.shape[0]
xw = numpy.dot(self.x, w)
loss = 0
risk_set = 0
k = 0
for i in range(n_samples):
ti = time[i]
while k < n_samples and ti == time[k]:
risk_set += numpy.exp(xw[k])
k += 1
if self.event[i]:
loss -= (xw[i] - numpy.log(risk_set)) / n_samples
# add regularization term to log-likelihood
return loss + self.alpha * squared_norm(w) / (2. * n_samples)
|
Compute gradient and Hessian matrix with respect to `w`.
|
def update(self, w, offset=0):
"""Compute gradient and Hessian matrix with respect to `w`."""
time = self.time
x = self.x
exp_xw = numpy.exp(offset + numpy.dot(x, w))
n_samples, n_features = x.shape
gradient = numpy.zeros((1, n_features), dtype=float)
hessian = numpy.zeros((n_features, n_features), dtype=float)
inv_n_samples = 1. / n_samples
risk_set = 0
risk_set_x = 0
risk_set_xx = 0
k = 0
# iterate time in descending order
for i in range(n_samples):
ti = time[i]
while k < n_samples and ti == time[k]:
risk_set += exp_xw[k]
# preserve 2D shape of row vector
xk = x[k:k + 1]
risk_set_x += exp_xw[k] * xk
# outer product
xx = numpy.dot(xk.T, xk)
risk_set_xx += exp_xw[k] * xx
k += 1
if self.event[i]:
gradient -= (x[i:i + 1] - risk_set_x / risk_set) * inv_n_samples
a = risk_set_xx / risk_set
z = risk_set_x / risk_set
# outer product
b = numpy.dot(z.T, z)
hessian += (a - b) * inv_n_samples
if self.alpha > 0:
gradient += self.alpha * inv_n_samples * w
diag_idx = numpy.diag_indices(n_features)
hessian[diag_idx] += self.alpha * inv_n_samples
self.gradient = gradient.ravel()
self.hessian = hessian
|
Minimize negative partial log-likelihood for provided data.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data matrix
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
Returns
-------
self
|
def fit(self, X, y):
"""Minimize negative partial log-likelihood for provided data.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data matrix
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
Returns
-------
self
"""
X, event, time = check_arrays_survival(X, y)
if self.alpha < 0:
raise ValueError("alpha must be positive, but was %r" % self.alpha)
optimizer = CoxPHOptimizer(X, event, time, self.alpha)
verbose_reporter = VerboseReporter(self.verbose)
w = numpy.zeros(X.shape[1])
w_prev = w
i = 0
loss = float('inf')
while True:
if i >= self.n_iter:
verbose_reporter.end_max_iter(i)
warnings.warn(('Optimization did not converge: Maximum number of iterations has been exceeded.'),
stacklevel=2, category=ConvergenceWarning)
break
optimizer.update(w)
delta = solve(optimizer.hessian, optimizer.gradient,
overwrite_a=False, overwrite_b=False, check_finite=False)
if not numpy.all(numpy.isfinite(delta)):
raise ValueError("search direction contains NaN or infinite values")
w_new = w - delta
loss_new = optimizer.nlog_likelihood(w_new)
verbose_reporter.update(i, delta, loss_new)
if loss_new > loss:
# perform step-halving if negative log-likelihood does not decrease
w = (w_prev + w) / 2
loss = optimizer.nlog_likelihood(w)
verbose_reporter.step_halving(i, loss)
i += 1
continue
w_prev = w
w = w_new
res = numpy.abs(1 - (loss_new / loss))
if res < self.tol:
verbose_reporter.end_converged(i)
break
loss = loss_new
i += 1
self.coef_ = w
self._baseline_model.fit(numpy.dot(X, self.coef_), event, time)
return self
|
Predict risk scores.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data matrix.
Returns
-------
risk_score : array, shape = (n_samples,)
Predicted risk scores.
|
def predict(self, X):
"""Predict risk scores.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data matrix.
Returns
-------
risk_score : array, shape = (n_samples,)
Predicted risk scores.
"""
check_is_fitted(self, "coef_")
X = numpy.atleast_2d(X)
return numpy.dot(X, self.coef_)
|
Count right censored and uncensored samples at each unique time point.
Parameters
----------
event : array
Boolean event indicator.
time : array
Survival time or time of censoring.
order : array or None
Indices to order time in ascending order.
If None, order will be computed.
Returns
-------
times : array
Unique time points.
n_events : array
Number of events at each time point.
n_at_risk : array
Number of samples that are censored or have an event at each time point.
|
def _compute_counts(event, time, order=None):
"""Count right censored and uncensored samples at each unique time point.
Parameters
----------
event : array
Boolean event indicator.
time : array
Survival time or time of censoring.
order : array or None
Indices to order time in ascending order.
If None, order will be computed.
Returns
-------
times : array
Unique time points.
n_events : array
Number of events at each time point.
n_at_risk : array
Number of samples that are censored or have an event at each time point.
"""
n_samples = event.shape[0]
if order is None:
order = numpy.argsort(time, kind="mergesort")
uniq_times = numpy.empty(n_samples, dtype=time.dtype)
uniq_events = numpy.empty(n_samples, dtype=numpy.int_)
uniq_counts = numpy.empty(n_samples, dtype=numpy.int_)
i = 0
prev_val = time[order[0]]
j = 0
while True:
count_event = 0
count = 0
while i < n_samples and prev_val == time[order[i]]:
if event[order[i]]:
count_event += 1
count += 1
i += 1
uniq_times[j] = prev_val
uniq_events[j] = count_event
uniq_counts[j] = count
j += 1
if i == n_samples:
break
prev_val = time[order[i]]
times = numpy.resize(uniq_times, j)
n_events = numpy.resize(uniq_events, j)
total_count = numpy.resize(uniq_counts, j)
# offset cumulative sum by one
total_count = numpy.concatenate(([0], total_count))
n_at_risk = n_samples - numpy.cumsum(total_count)
return times, n_events, n_at_risk[:-1]
|
Compute counts for left truncated and right censored survival data.
Parameters
----------
event : array
Boolean event indicator.
time_start : array
Time when a subject entered the study.
time_exit : array
Time when a subject left the study due to an
event or censoring.
Returns
-------
times : array
Unique time points.
n_events : array
Number of events at each time point.
n_at_risk : array
Number of samples that are censored or have an event at each time point.
|
def _compute_counts_truncated(event, time_enter, time_exit):
"""Compute counts for left truncated and right censored survival data.
Parameters
----------
event : array
Boolean event indicator.
time_start : array
Time when a subject entered the study.
time_exit : array
Time when a subject left the study due to an
event or censoring.
Returns
-------
times : array
Unique time points.
n_events : array
Number of events at each time point.
n_at_risk : array
Number of samples that are censored or have an event at each time point.
"""
if (time_enter > time_exit).any():
raise ValueError("exit time must be larger start time for all samples")
n_samples = event.shape[0]
uniq_times = numpy.sort(numpy.unique(numpy.concatenate((time_enter, time_exit))), kind="mergesort")
total_counts = numpy.empty(len(uniq_times), dtype=numpy.int_)
event_counts = numpy.empty(len(uniq_times), dtype=numpy.int_)
order_enter = numpy.argsort(time_enter, kind="mergesort")
order_exit = numpy.argsort(time_exit, kind="mergesort")
s_time_enter = time_enter[order_enter]
s_time_exit = time_exit[order_exit]
t0 = uniq_times[0]
# everything larger is included
idx_enter = numpy.searchsorted(s_time_enter, t0, side="right")
# everything smaller is excluded
idx_exit = numpy.searchsorted(s_time_exit, t0, side="left")
total_counts[0] = idx_enter
# except people die on the day they enter
event_counts[0] = 0
for i in range(1, len(uniq_times)):
ti = uniq_times[i]
while idx_enter < n_samples and s_time_enter[idx_enter] <= ti:
idx_enter += 1
while idx_exit < n_samples and s_time_exit[idx_exit] < ti:
idx_exit += 1
risk_set = numpy.setdiff1d(order_enter[:idx_enter], order_exit[:idx_exit], assume_unique=True)
total_counts[i] = len(risk_set)
count_event = 0
k = idx_exit
while k < n_samples and s_time_exit[k] == ti:
if event[order_exit[k]]:
count_event += 1
k += 1
event_counts[i] = count_event
return uniq_times, event_counts, total_counts
|
Kaplan-Meier estimator of survival function.
Parameters
----------
event : array-like, shape = (n_samples,)
Contains binary event indicators.
time_exit : array-like, shape = (n_samples,)
Contains event/censoring times.
time_enter : array-like, shape = (n_samples,), optional
Contains time when each individual entered the study for
left truncated survival data.
time_min : float, optional
Compute estimator conditional on survival at least up to
the specified time.
Returns
-------
time : array, shape = (n_times,)
Unique times.
prob_survival : array, shape = (n_times,)
Survival probability at each unique time point.
If `time_enter` is provided, estimates are conditional probabilities.
Examples
--------
Creating a Kaplan-Meier curve:
>>> x, y = kaplan_meier_estimator(event, time)
>>> plt.step(x, y, where="post")
>>> plt.ylim(0, 1)
>>> plt.show()
References
----------
.. [1] Kaplan, E. L. and Meier, P., "Nonparametric estimation from incomplete observations",
Journal of The American Statistical Association, vol. 53, pp. 457-481, 1958.
|
def kaplan_meier_estimator(event, time_exit, time_enter=None, time_min=None):
"""Kaplan-Meier estimator of survival function.
Parameters
----------
event : array-like, shape = (n_samples,)
Contains binary event indicators.
time_exit : array-like, shape = (n_samples,)
Contains event/censoring times.
time_enter : array-like, shape = (n_samples,), optional
Contains time when each individual entered the study for
left truncated survival data.
time_min : float, optional
Compute estimator conditional on survival at least up to
the specified time.
Returns
-------
time : array, shape = (n_times,)
Unique times.
prob_survival : array, shape = (n_times,)
Survival probability at each unique time point.
If `time_enter` is provided, estimates are conditional probabilities.
Examples
--------
Creating a Kaplan-Meier curve:
>>> x, y = kaplan_meier_estimator(event, time)
>>> plt.step(x, y, where="post")
>>> plt.ylim(0, 1)
>>> plt.show()
References
----------
.. [1] Kaplan, E. L. and Meier, P., "Nonparametric estimation from incomplete observations",
Journal of The American Statistical Association, vol. 53, pp. 457-481, 1958.
"""
event, time_enter, time_exit = check_y_survival(event, time_enter, time_exit, allow_all_censored=True)
check_consistent_length(event, time_enter, time_exit)
if time_enter is None:
uniq_times, n_events, n_at_risk = _compute_counts(event, time_exit)
else:
uniq_times, n_events, n_at_risk = _compute_counts_truncated(event, time_enter, time_exit)
values = 1 - n_events / n_at_risk
if time_min is not None:
mask = uniq_times >= time_min
uniq_times = numpy.compress(mask, uniq_times)
values = numpy.compress(mask, values)
y = numpy.cumprod(values)
return uniq_times, y
|
Nelson-Aalen estimator of cumulative hazard function.
Parameters
----------
event : array-like, shape = (n_samples,)
Contains binary event indicators.
time : array-like, shape = (n_samples,)
Contains event/censoring times.
Returns
-------
time : array, shape = (n_times,)
Unique times.
cum_hazard : array, shape = (n_times,)
Cumulative hazard at each unique time point.
References
----------
.. [1] Nelson, W., "Theory and applications of hazard plotting for censored failure data",
Technometrics, vol. 14, pp. 945-965, 1972.
.. [2] Aalen, O. O., "Nonparametric inference for a family of counting processes",
Annals of Statistics, vol. 6, pp. 701–726, 1978.
|
def nelson_aalen_estimator(event, time):
"""Nelson-Aalen estimator of cumulative hazard function.
Parameters
----------
event : array-like, shape = (n_samples,)
Contains binary event indicators.
time : array-like, shape = (n_samples,)
Contains event/censoring times.
Returns
-------
time : array, shape = (n_times,)
Unique times.
cum_hazard : array, shape = (n_times,)
Cumulative hazard at each unique time point.
References
----------
.. [1] Nelson, W., "Theory and applications of hazard plotting for censored failure data",
Technometrics, vol. 14, pp. 945-965, 1972.
.. [2] Aalen, O. O., "Nonparametric inference for a family of counting processes",
Annals of Statistics, vol. 6, pp. 701–726, 1978.
"""
event, time = check_y_survival(event, time)
check_consistent_length(event, time)
uniq_times, n_events, n_at_risk = _compute_counts(event, time)
y = numpy.cumsum(n_events / n_at_risk)
return uniq_times, y
|
Compute inverse probability of censoring weights
Parameters
----------
event : array, shape = (n_samples,)
Boolean event indicator.
time : array, shape = (n_samples,)
Time when a subject experienced an event or was censored.
Returns
-------
weights : array, shape = (n_samples,)
inverse probability of censoring weights
|
def ipc_weights(event, time):
"""Compute inverse probability of censoring weights
Parameters
----------
event : array, shape = (n_samples,)
Boolean event indicator.
time : array, shape = (n_samples,)
Time when a subject experienced an event or was censored.
Returns
-------
weights : array, shape = (n_samples,)
inverse probability of censoring weights
"""
if event.all():
return numpy.ones(time.shape[0])
unique_time, p = kaplan_meier_estimator(~event, time)
idx = numpy.searchsorted(unique_time, time[event])
Ghat = p[idx]
assert (Ghat > 0).all()
weights = numpy.zeros(time.shape[0])
weights[event] = 1.0 / Ghat
return weights
|
Estimate survival distribution from training data.
Parameters
----------
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
Returns
-------
self
|
def fit(self, y):
"""Estimate survival distribution from training data.
Parameters
----------
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
Returns
-------
self
"""
event, time = check_y_survival(y, allow_all_censored=True)
unique_time, prob = kaplan_meier_estimator(event, time)
self.unique_time_ = numpy.concatenate(([-numpy.infty], unique_time))
self.prob_ = numpy.concatenate(([1.], prob))
return self
|
Return probability of an event after given time point.
:math:`\\hat{S}(t) = P(T > t)`
Parameters
----------
time : array, shape = (n_samples,)
Time to estimate probability at.
Returns
-------
prob : array, shape = (n_samples,)
Probability of an event.
|
def predict_proba(self, time):
"""Return probability of an event after given time point.
:math:`\\hat{S}(t) = P(T > t)`
Parameters
----------
time : array, shape = (n_samples,)
Time to estimate probability at.
Returns
-------
prob : array, shape = (n_samples,)
Probability of an event.
"""
check_is_fitted(self, "unique_time_")
time = check_array(time, ensure_2d=False)
# K-M is undefined if estimate at last time point is non-zero
extends = time > self.unique_time_[-1]
if self.prob_[-1] > 0 and extends.any():
raise ValueError("time must be smaller than largest "
"observed time point: {}".format(self.unique_time_[-1]))
# beyond last time point is zero probability
Shat = numpy.empty(time.shape, dtype=float)
Shat[extends] = 0.0
valid = ~extends
time = time[valid]
idx = numpy.searchsorted(self.unique_time_, time)
# for non-exact matches, we need to shift the index to left
eps = numpy.finfo(self.unique_time_.dtype).eps
exact = numpy.absolute(self.unique_time_[idx] - time) < eps
idx[~exact] -= 1
Shat[valid] = self.prob_[idx]
return Shat
|
Estimate censoring distribution from training data.
Parameters
----------
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
Returns
-------
self
|
def fit(self, y):
"""Estimate censoring distribution from training data.
Parameters
----------
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
Returns
-------
self
"""
event, time = check_y_survival(y)
if event.all():
self.unique_time_ = numpy.unique(time)
self.prob_ = numpy.ones(self.unique_time_.shape[0])
else:
unique_time, prob = kaplan_meier_estimator(~event, time)
self.unique_time_ = numpy.concatenate(([-numpy.infty], unique_time))
self.prob_ = numpy.concatenate(([1.], prob))
return self
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.