repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
5monkeys/content-io | cio/utils/formatters.py | ContentFormatter._inject_format_spec | python | def _inject_format_spec(self, value, format_spec):
t = type(value)
return value[:-1] + t(u':') + format_spec + t(u'}') | value: '{x}', format_spec: 'f' -> '{x:f}' | train | https://github.com/5monkeys/content-io/blob/8c8519c74cbadab871f7151c0e02252cb5753759/cio/utils/formatters.py#L70-L75 | null | class ContentFormatter(Formatter):
"""
ContentFormatter uses string formatting as a template engine,
not raising key/index/value errors, and keeps braces and variable-like parts in place.
"""
def get_value(self, key, args, kwargs):
try:
return super(ContentFormatter, self).get_value(key, args, kwargs)
except (IndexError, KeyError):
if (PY26 or six.PY3) and key == u'\0':
# PY26: Handle of non-indexed variable -> Turn null byte into {}
return type(key)(u'{}')
else:
# PY27: Not a context variable -> Keep braces
return self._brace_key(key)
def convert_field(self, value, conversion):
if conversion and isinstance(value, six.string_types) and value[0] == u'{' and value[-1] == u'}':
# Value is wrapped with braces and therefore not a context variable -> Keep conversion as value
return self._inject_conversion(value, conversion)
else:
return super(ContentFormatter, self).convert_field(value, conversion)
def format_field(self, value, format_spec):
try:
return super(ContentFormatter, self).format_field(value, format_spec)
except ValueError:
# Unable to format value and therefore not a context variable -> Keep format_spec as value
return self._inject_format_spec(value, format_spec)
def parse(self, format_string):
if PY26 or six.PY3:
# PY26 does not support non-indexed variables -> Place null byte for later removal
# PY3 does not like mixing non-indexed and indexed variables to we disable them here too.
format_string = format_string.replace('{}', '{\0}')
parsed_bits = super(ContentFormatter, self).parse(format_string)
# Double braces are treated as escaped -> re-duplicate when parsed
return self._escape(parsed_bits)
def get_field(self, field_name, args, kwargs):
return super(ContentFormatter, self).get_field(field_name, args, kwargs)
def _brace_key(self, key):
"""
key: 'x' -> '{x}'
"""
if isinstance(key, six.integer_types):
t = str
key = t(key)
else:
t = type(key)
return t(u'{') + key + t(u'}')
def _inject_conversion(self, value, conversion):
"""
value: '{x}', conversion: 's' -> '{x!s}'
"""
t = type(value)
return value[:-1] + t(u'!') + conversion + t(u'}')
def _escape(self, bits):
"""
value: 'foobar {' -> 'foobar {{'
value: 'x}' -> 'x}}'
"""
# for value, field_name, format_spec, conversion in bits:
while True:
try:
value, field_name, format_spec, conversion = next(bits)
if value:
end = value[-1]
if end in (u'{', u'}'):
value += end
yield value, field_name, format_spec, conversion
except StopIteration:
break
|
5monkeys/content-io | cio/utils/formatters.py | ContentFormatter._escape | python | def _escape(self, bits):
# for value, field_name, format_spec, conversion in bits:
while True:
try:
value, field_name, format_spec, conversion = next(bits)
if value:
end = value[-1]
if end in (u'{', u'}'):
value += end
yield value, field_name, format_spec, conversion
except StopIteration:
break | value: 'foobar {' -> 'foobar {{'
value: 'x}' -> 'x}}' | train | https://github.com/5monkeys/content-io/blob/8c8519c74cbadab871f7151c0e02252cb5753759/cio/utils/formatters.py#L77-L92 | null | class ContentFormatter(Formatter):
"""
ContentFormatter uses string formatting as a template engine,
not raising key/index/value errors, and keeps braces and variable-like parts in place.
"""
def get_value(self, key, args, kwargs):
try:
return super(ContentFormatter, self).get_value(key, args, kwargs)
except (IndexError, KeyError):
if (PY26 or six.PY3) and key == u'\0':
# PY26: Handle of non-indexed variable -> Turn null byte into {}
return type(key)(u'{}')
else:
# PY27: Not a context variable -> Keep braces
return self._brace_key(key)
def convert_field(self, value, conversion):
if conversion and isinstance(value, six.string_types) and value[0] == u'{' and value[-1] == u'}':
# Value is wrapped with braces and therefore not a context variable -> Keep conversion as value
return self._inject_conversion(value, conversion)
else:
return super(ContentFormatter, self).convert_field(value, conversion)
def format_field(self, value, format_spec):
try:
return super(ContentFormatter, self).format_field(value, format_spec)
except ValueError:
# Unable to format value and therefore not a context variable -> Keep format_spec as value
return self._inject_format_spec(value, format_spec)
def parse(self, format_string):
if PY26 or six.PY3:
# PY26 does not support non-indexed variables -> Place null byte for later removal
# PY3 does not like mixing non-indexed and indexed variables to we disable them here too.
format_string = format_string.replace('{}', '{\0}')
parsed_bits = super(ContentFormatter, self).parse(format_string)
# Double braces are treated as escaped -> re-duplicate when parsed
return self._escape(parsed_bits)
def get_field(self, field_name, args, kwargs):
return super(ContentFormatter, self).get_field(field_name, args, kwargs)
def _brace_key(self, key):
"""
key: 'x' -> '{x}'
"""
if isinstance(key, six.integer_types):
t = str
key = t(key)
else:
t = type(key)
return t(u'{') + key + t(u'}')
def _inject_conversion(self, value, conversion):
"""
value: '{x}', conversion: 's' -> '{x!s}'
"""
t = type(value)
return value[:-1] + t(u'!') + conversion + t(u'}')
def _inject_format_spec(self, value, format_spec):
"""
value: '{x}', format_spec: 'f' -> '{x:f}'
"""
t = type(value)
return value[:-1] + t(u':') + format_spec + t(u'}')
|
5monkeys/content-io | cio/utils/imports.py | import_class | python | def import_class(import_path, name=None):
if not name:
import_path, name = import_path.rsplit('.', 1)
mod = import_module(import_path)
try:
return getattr(mod, name)
except AttributeError as e:
raise ImportError(e) | Imports and returns class for full class path string.
Ex. 'foo.bar.Bogus' -> <class 'foo.bar.Bogus'> | train | https://github.com/5monkeys/content-io/blob/8c8519c74cbadab871f7151c0e02252cb5753759/cio/utils/imports.py#L12-L23 | [
"def import_module(package):\n __import__(package)\n return sys.modules[package]\n"
] | # coding=utf-8
from __future__ import unicode_literals
import sys
def import_module(package):
__import__(package)
return sys.modules[package]
|
5monkeys/content-io | cio/utils/uri.py | URI.is_absolute | python | def is_absolute(self):
return self.namespace and self.ext and self.scheme and self.path | Validates that uri contains all parts except version | train | https://github.com/5monkeys/content-io/blob/8c8519c74cbadab871f7151c0e02252cb5753759/cio/utils/uri.py#L67-L71 | null | class URI(six.text_type):
@staticmethod
def __new__(cls, uri=None, scheme=None, namespace=None, path=None, ext=None, version=None):
if isinstance(uri, URI):
return uri
elif uri is not None:
return URI._parse(uri)
else:
return URI._render(scheme, namespace, path, ext, version)
@classmethod
def _parse(cls, uri):
base, _, version = uri.partition(settings.URI_VERSION_SEPARATOR)
scheme, _, path = base.rpartition(settings.URI_SCHEME_SEPARATOR)
namespace, _, path = path.rpartition(settings.URI_NAMESPACE_SEPARATOR)
_path, _, ext = path.rpartition(settings.URI_EXT_SEPARATOR)
if '/' in ext:
ext = ''
else:
path = _path
if not path and ext:
path, ext = ext, ''
return cls._render(
scheme or settings.URI_DEFAULT_SCHEME,
namespace or None,
path,
ext or None,
version or None
)
@classmethod
def _render(cls, scheme, namespace, path, ext, version):
def parts_gen():
if scheme:
yield scheme
yield settings.URI_SCHEME_SEPARATOR
if namespace:
yield namespace
yield settings.URI_NAMESPACE_SEPARATOR
if path:
yield path
if ext:
yield settings.URI_EXT_SEPARATOR
yield ext
if version:
yield settings.URI_VERSION_SEPARATOR
yield version
uri = six.text_type.__new__(cls, ''.join(parts_gen()))
uri.scheme = scheme
uri.namespace = namespace
uri.path = path
uri.ext = ext
uri.version = version
return uri
def has_parts(self, *parts):
return not any(getattr(self, part, None) is None for part in parts)
def clone(self, **parts):
part = lambda part: parts.get(part, getattr(self, part))
args = (part(p) for p in ('scheme', 'namespace', 'path', 'ext', 'version'))
return URI._render(*args)
class Invalid(Exception):
pass
|
ryanpetrello/cleaver | cleaver/backend/db/__init__.py | SQLAlchemyBackend.all_experiments | python | def all_experiments(self):
try:
return [
self.experiment_factory(e)
for e in model.Experiment.query.all()
]
finally:
self.Session.close() | Retrieve every available experiment.
Returns a list of ``cleaver.experiment.Experiment``s | train | https://github.com/ryanpetrello/cleaver/blob/0ef266e1a0603c6d414df4b9926ce2a59844db35/cleaver/backend/db/__init__.py#L48-L60 | null | class SQLAlchemyBackend(CleaverBackend):
"""
Provides an interface for persisting and retrieving A/B test results
to a SQLAlchemy-supported database.
"""
def __init__(self, dburi='sqlite://', engine_options={}):
self.dburi = dburi
self.engine_options = engine_options
self.Session = session_for(
dburi=self.dburi,
**self.engine_options
)
def experiment_factory(self, experiment):
if experiment is None:
return None
return CleaverExperiment(
backend=self,
name=experiment.name,
started_on=experiment.started_on,
variants=tuple(v.name for v in experiment.variants)
)
def get_experiment(self, name, variants):
"""
Retrieve an experiment by its name and variants (assuming it exists).
:param name a unique string name for the experiment
:param variants a list of strings, each with a unique variant name
Returns a ``cleaver.experiment.Experiment`` or ``None``
"""
try:
return self.experiment_factory(model.Experiment.get_by(name=name))
finally:
self.Session.close()
def save_experiment(self, name, variants):
"""
Persist an experiment and its variants (unless they already exist).
:param name a unique string name for the experiment
:param variants a list of strings, each with a unique variant name
"""
try:
model.Experiment(
name=name,
started_on=datetime.utcnow(),
variants=[
model.Variant(name=v, order=i)
for i, v in enumerate(variants)
]
)
self.Session.commit()
finally:
self.Session.close()
def is_verified_human(self, identity):
try:
return model.VerifiedHuman.get_by(identity=identity) is not None
finally:
self.Session.close()
def mark_human(self, identity):
try:
if model.VerifiedHuman.get_by(identity=identity) is None:
model.VerifiedHuman(identity=identity)
self.Session.commit()
finally:
self.Session.close()
def get_variant(self, identity, experiment_name):
"""
Retrieve the variant for a specific user and experiment (if it exists).
:param identity a unique user identifier
:param experiment_name the string name of the experiment
Returns a ``String`` or `None`
"""
try:
match = model.Participant.query.join(
model.Experiment
).filter(and_(
model.Participant.identity == identity,
model.Experiment.name == experiment_name
)).first()
return match.variant.name if match else None
finally:
self.Session.close()
def set_variant(self, identity, experiment_name, variant_name):
"""
Set the variant for a specific user.
:param identity a unique user identifier
:param experiment_name the string name of the experiment
:param variant_name the string name of the variant
"""
try:
experiment = model.Experiment.get_by(name=experiment_name)
variant = model.Variant.get_by(name=variant_name)
if experiment and variant and model.Participant.query.filter(and_(
model.Participant.identity == identity,
model.Participant.experiment_id == experiment.id,
model.Participant.variant_id == variant.id
)).count() == 0:
model.Participant(
identity=identity,
experiment=experiment,
variant=variant
)
self.Session.commit()
finally:
self.Session.close()
def _mark_event(self, type, experiment_name, variant_name):
try:
experiment = model.Experiment.get_by(name=experiment_name)
variant = model.Variant.get_by(name=variant_name)
if experiment and variant and model.TrackedEvent.query.filter(and_(
model.TrackedEvent.type == type,
model.TrackedEvent.experiment_id == experiment.id,
model.TrackedEvent.variant_id == variant.id
)).first() is None:
model.TrackedEvent(
type=type,
experiment=experiment,
variant=variant
)
self.Session.commit()
finally:
self.Session.close()
try:
experiment = model.Experiment.get_by(name=experiment_name)
variant = model.Variant.get_by(name=variant_name)
if experiment and variant:
self.Session.execute(
'UPDATE %s SET total = total + 1 '
'WHERE experiment_id = :experiment_id '
'AND variant_id = :variant_id '
'AND `type` = :type' % (
model.TrackedEvent.__tablename__
),
{
'experiment_id': experiment.id,
'variant_id': variant.id,
'type': type
}
)
self.Session.commit()
finally:
self.Session.close()
def mark_participant(self, experiment_name, variant):
"""
Mark a participation for a specific experiment variant.
:param experiment_name the string name of the experiment
:param variant the string name of the variant
"""
self._mark_event('PARTICIPANT', experiment_name, variant)
def mark_conversion(self, experiment_name, variant):
"""
Mark a conversion for a specific experiment variant.
:param experiment_name the string name of the experiment
:param variant the string name of the variant
"""
self._mark_event('CONVERSION', experiment_name, variant)
def _total_events(self, type, experiment_name, variant):
try:
row = model.TrackedEvent.query.join(
model.Experiment
).join(
model.Variant
).filter(and_(
model.TrackedEvent.type == type,
model.TrackedEvent.experiment_id == model.Experiment.id,
model.TrackedEvent.variant_id == model.Variant.id,
model.Experiment.name == experiment_name,
model.Variant.name == variant
)).first()
return row.total if row else 0
finally:
self.Session.close()
def participants(self, experiment_name, variant):
"""
The number of participants for a certain variant.
Returns an integer.
"""
return self._total_events('PARTICIPANT', experiment_name, variant)
def conversions(self, experiment_name, variant):
"""
The number of conversions for a certain variant.
Returns an integer.
"""
return self._total_events('CONVERSION', experiment_name, variant)
|
ryanpetrello/cleaver | cleaver/backend/db/__init__.py | SQLAlchemyBackend.get_experiment | python | def get_experiment(self, name, variants):
try:
return self.experiment_factory(model.Experiment.get_by(name=name))
finally:
self.Session.close() | Retrieve an experiment by its name and variants (assuming it exists).
:param name a unique string name for the experiment
:param variants a list of strings, each with a unique variant name
Returns a ``cleaver.experiment.Experiment`` or ``None`` | train | https://github.com/ryanpetrello/cleaver/blob/0ef266e1a0603c6d414df4b9926ce2a59844db35/cleaver/backend/db/__init__.py#L62-L74 | [
"def experiment_factory(self, experiment):\n if experiment is None:\n return None\n return CleaverExperiment(\n backend=self,\n name=experiment.name,\n started_on=experiment.started_on,\n variants=tuple(v.name for v in experiment.variants)\n )\n"
] | class SQLAlchemyBackend(CleaverBackend):
"""
Provides an interface for persisting and retrieving A/B test results
to a SQLAlchemy-supported database.
"""
def __init__(self, dburi='sqlite://', engine_options={}):
self.dburi = dburi
self.engine_options = engine_options
self.Session = session_for(
dburi=self.dburi,
**self.engine_options
)
def experiment_factory(self, experiment):
if experiment is None:
return None
return CleaverExperiment(
backend=self,
name=experiment.name,
started_on=experiment.started_on,
variants=tuple(v.name for v in experiment.variants)
)
def all_experiments(self):
"""
Retrieve every available experiment.
Returns a list of ``cleaver.experiment.Experiment``s
"""
try:
return [
self.experiment_factory(e)
for e in model.Experiment.query.all()
]
finally:
self.Session.close()
def save_experiment(self, name, variants):
"""
Persist an experiment and its variants (unless they already exist).
:param name a unique string name for the experiment
:param variants a list of strings, each with a unique variant name
"""
try:
model.Experiment(
name=name,
started_on=datetime.utcnow(),
variants=[
model.Variant(name=v, order=i)
for i, v in enumerate(variants)
]
)
self.Session.commit()
finally:
self.Session.close()
def is_verified_human(self, identity):
try:
return model.VerifiedHuman.get_by(identity=identity) is not None
finally:
self.Session.close()
def mark_human(self, identity):
try:
if model.VerifiedHuman.get_by(identity=identity) is None:
model.VerifiedHuman(identity=identity)
self.Session.commit()
finally:
self.Session.close()
def get_variant(self, identity, experiment_name):
"""
Retrieve the variant for a specific user and experiment (if it exists).
:param identity a unique user identifier
:param experiment_name the string name of the experiment
Returns a ``String`` or `None`
"""
try:
match = model.Participant.query.join(
model.Experiment
).filter(and_(
model.Participant.identity == identity,
model.Experiment.name == experiment_name
)).first()
return match.variant.name if match else None
finally:
self.Session.close()
def set_variant(self, identity, experiment_name, variant_name):
"""
Set the variant for a specific user.
:param identity a unique user identifier
:param experiment_name the string name of the experiment
:param variant_name the string name of the variant
"""
try:
experiment = model.Experiment.get_by(name=experiment_name)
variant = model.Variant.get_by(name=variant_name)
if experiment and variant and model.Participant.query.filter(and_(
model.Participant.identity == identity,
model.Participant.experiment_id == experiment.id,
model.Participant.variant_id == variant.id
)).count() == 0:
model.Participant(
identity=identity,
experiment=experiment,
variant=variant
)
self.Session.commit()
finally:
self.Session.close()
def _mark_event(self, type, experiment_name, variant_name):
try:
experiment = model.Experiment.get_by(name=experiment_name)
variant = model.Variant.get_by(name=variant_name)
if experiment and variant and model.TrackedEvent.query.filter(and_(
model.TrackedEvent.type == type,
model.TrackedEvent.experiment_id == experiment.id,
model.TrackedEvent.variant_id == variant.id
)).first() is None:
model.TrackedEvent(
type=type,
experiment=experiment,
variant=variant
)
self.Session.commit()
finally:
self.Session.close()
try:
experiment = model.Experiment.get_by(name=experiment_name)
variant = model.Variant.get_by(name=variant_name)
if experiment and variant:
self.Session.execute(
'UPDATE %s SET total = total + 1 '
'WHERE experiment_id = :experiment_id '
'AND variant_id = :variant_id '
'AND `type` = :type' % (
model.TrackedEvent.__tablename__
),
{
'experiment_id': experiment.id,
'variant_id': variant.id,
'type': type
}
)
self.Session.commit()
finally:
self.Session.close()
def mark_participant(self, experiment_name, variant):
"""
Mark a participation for a specific experiment variant.
:param experiment_name the string name of the experiment
:param variant the string name of the variant
"""
self._mark_event('PARTICIPANT', experiment_name, variant)
def mark_conversion(self, experiment_name, variant):
"""
Mark a conversion for a specific experiment variant.
:param experiment_name the string name of the experiment
:param variant the string name of the variant
"""
self._mark_event('CONVERSION', experiment_name, variant)
def _total_events(self, type, experiment_name, variant):
try:
row = model.TrackedEvent.query.join(
model.Experiment
).join(
model.Variant
).filter(and_(
model.TrackedEvent.type == type,
model.TrackedEvent.experiment_id == model.Experiment.id,
model.TrackedEvent.variant_id == model.Variant.id,
model.Experiment.name == experiment_name,
model.Variant.name == variant
)).first()
return row.total if row else 0
finally:
self.Session.close()
def participants(self, experiment_name, variant):
"""
The number of participants for a certain variant.
Returns an integer.
"""
return self._total_events('PARTICIPANT', experiment_name, variant)
def conversions(self, experiment_name, variant):
"""
The number of conversions for a certain variant.
Returns an integer.
"""
return self._total_events('CONVERSION', experiment_name, variant)
|
ryanpetrello/cleaver | cleaver/backend/db/__init__.py | SQLAlchemyBackend.save_experiment | python | def save_experiment(self, name, variants):
try:
model.Experiment(
name=name,
started_on=datetime.utcnow(),
variants=[
model.Variant(name=v, order=i)
for i, v in enumerate(variants)
]
)
self.Session.commit()
finally:
self.Session.close() | Persist an experiment and its variants (unless they already exist).
:param name a unique string name for the experiment
:param variants a list of strings, each with a unique variant name | train | https://github.com/ryanpetrello/cleaver/blob/0ef266e1a0603c6d414df4b9926ce2a59844db35/cleaver/backend/db/__init__.py#L76-L94 | null | class SQLAlchemyBackend(CleaverBackend):
"""
Provides an interface for persisting and retrieving A/B test results
to a SQLAlchemy-supported database.
"""
def __init__(self, dburi='sqlite://', engine_options={}):
self.dburi = dburi
self.engine_options = engine_options
self.Session = session_for(
dburi=self.dburi,
**self.engine_options
)
def experiment_factory(self, experiment):
if experiment is None:
return None
return CleaverExperiment(
backend=self,
name=experiment.name,
started_on=experiment.started_on,
variants=tuple(v.name for v in experiment.variants)
)
def all_experiments(self):
"""
Retrieve every available experiment.
Returns a list of ``cleaver.experiment.Experiment``s
"""
try:
return [
self.experiment_factory(e)
for e in model.Experiment.query.all()
]
finally:
self.Session.close()
def get_experiment(self, name, variants):
"""
Retrieve an experiment by its name and variants (assuming it exists).
:param name a unique string name for the experiment
:param variants a list of strings, each with a unique variant name
Returns a ``cleaver.experiment.Experiment`` or ``None``
"""
try:
return self.experiment_factory(model.Experiment.get_by(name=name))
finally:
self.Session.close()
def is_verified_human(self, identity):
try:
return model.VerifiedHuman.get_by(identity=identity) is not None
finally:
self.Session.close()
def mark_human(self, identity):
try:
if model.VerifiedHuman.get_by(identity=identity) is None:
model.VerifiedHuman(identity=identity)
self.Session.commit()
finally:
self.Session.close()
def get_variant(self, identity, experiment_name):
"""
Retrieve the variant for a specific user and experiment (if it exists).
:param identity a unique user identifier
:param experiment_name the string name of the experiment
Returns a ``String`` or `None`
"""
try:
match = model.Participant.query.join(
model.Experiment
).filter(and_(
model.Participant.identity == identity,
model.Experiment.name == experiment_name
)).first()
return match.variant.name if match else None
finally:
self.Session.close()
def set_variant(self, identity, experiment_name, variant_name):
"""
Set the variant for a specific user.
:param identity a unique user identifier
:param experiment_name the string name of the experiment
:param variant_name the string name of the variant
"""
try:
experiment = model.Experiment.get_by(name=experiment_name)
variant = model.Variant.get_by(name=variant_name)
if experiment and variant and model.Participant.query.filter(and_(
model.Participant.identity == identity,
model.Participant.experiment_id == experiment.id,
model.Participant.variant_id == variant.id
)).count() == 0:
model.Participant(
identity=identity,
experiment=experiment,
variant=variant
)
self.Session.commit()
finally:
self.Session.close()
def _mark_event(self, type, experiment_name, variant_name):
try:
experiment = model.Experiment.get_by(name=experiment_name)
variant = model.Variant.get_by(name=variant_name)
if experiment and variant and model.TrackedEvent.query.filter(and_(
model.TrackedEvent.type == type,
model.TrackedEvent.experiment_id == experiment.id,
model.TrackedEvent.variant_id == variant.id
)).first() is None:
model.TrackedEvent(
type=type,
experiment=experiment,
variant=variant
)
self.Session.commit()
finally:
self.Session.close()
try:
experiment = model.Experiment.get_by(name=experiment_name)
variant = model.Variant.get_by(name=variant_name)
if experiment and variant:
self.Session.execute(
'UPDATE %s SET total = total + 1 '
'WHERE experiment_id = :experiment_id '
'AND variant_id = :variant_id '
'AND `type` = :type' % (
model.TrackedEvent.__tablename__
),
{
'experiment_id': experiment.id,
'variant_id': variant.id,
'type': type
}
)
self.Session.commit()
finally:
self.Session.close()
def mark_participant(self, experiment_name, variant):
"""
Mark a participation for a specific experiment variant.
:param experiment_name the string name of the experiment
:param variant the string name of the variant
"""
self._mark_event('PARTICIPANT', experiment_name, variant)
def mark_conversion(self, experiment_name, variant):
"""
Mark a conversion for a specific experiment variant.
:param experiment_name the string name of the experiment
:param variant the string name of the variant
"""
self._mark_event('CONVERSION', experiment_name, variant)
def _total_events(self, type, experiment_name, variant):
try:
row = model.TrackedEvent.query.join(
model.Experiment
).join(
model.Variant
).filter(and_(
model.TrackedEvent.type == type,
model.TrackedEvent.experiment_id == model.Experiment.id,
model.TrackedEvent.variant_id == model.Variant.id,
model.Experiment.name == experiment_name,
model.Variant.name == variant
)).first()
return row.total if row else 0
finally:
self.Session.close()
def participants(self, experiment_name, variant):
"""
The number of participants for a certain variant.
Returns an integer.
"""
return self._total_events('PARTICIPANT', experiment_name, variant)
def conversions(self, experiment_name, variant):
"""
The number of conversions for a certain variant.
Returns an integer.
"""
return self._total_events('CONVERSION', experiment_name, variant)
|
ryanpetrello/cleaver | cleaver/backend/db/__init__.py | SQLAlchemyBackend.get_variant | python | def get_variant(self, identity, experiment_name):
try:
match = model.Participant.query.join(
model.Experiment
).filter(and_(
model.Participant.identity == identity,
model.Experiment.name == experiment_name
)).first()
return match.variant.name if match else None
finally:
self.Session.close() | Retrieve the variant for a specific user and experiment (if it exists).
:param identity a unique user identifier
:param experiment_name the string name of the experiment
Returns a ``String`` or `None` | train | https://github.com/ryanpetrello/cleaver/blob/0ef266e1a0603c6d414df4b9926ce2a59844db35/cleaver/backend/db/__init__.py#L110-L128 | null | class SQLAlchemyBackend(CleaverBackend):
"""
Provides an interface for persisting and retrieving A/B test results
to a SQLAlchemy-supported database.
"""
def __init__(self, dburi='sqlite://', engine_options={}):
self.dburi = dburi
self.engine_options = engine_options
self.Session = session_for(
dburi=self.dburi,
**self.engine_options
)
def experiment_factory(self, experiment):
if experiment is None:
return None
return CleaverExperiment(
backend=self,
name=experiment.name,
started_on=experiment.started_on,
variants=tuple(v.name for v in experiment.variants)
)
def all_experiments(self):
"""
Retrieve every available experiment.
Returns a list of ``cleaver.experiment.Experiment``s
"""
try:
return [
self.experiment_factory(e)
for e in model.Experiment.query.all()
]
finally:
self.Session.close()
def get_experiment(self, name, variants):
"""
Retrieve an experiment by its name and variants (assuming it exists).
:param name a unique string name for the experiment
:param variants a list of strings, each with a unique variant name
Returns a ``cleaver.experiment.Experiment`` or ``None``
"""
try:
return self.experiment_factory(model.Experiment.get_by(name=name))
finally:
self.Session.close()
def save_experiment(self, name, variants):
"""
Persist an experiment and its variants (unless they already exist).
:param name a unique string name for the experiment
:param variants a list of strings, each with a unique variant name
"""
try:
model.Experiment(
name=name,
started_on=datetime.utcnow(),
variants=[
model.Variant(name=v, order=i)
for i, v in enumerate(variants)
]
)
self.Session.commit()
finally:
self.Session.close()
def is_verified_human(self, identity):
try:
return model.VerifiedHuman.get_by(identity=identity) is not None
finally:
self.Session.close()
def mark_human(self, identity):
try:
if model.VerifiedHuman.get_by(identity=identity) is None:
model.VerifiedHuman(identity=identity)
self.Session.commit()
finally:
self.Session.close()
def set_variant(self, identity, experiment_name, variant_name):
"""
Set the variant for a specific user.
:param identity a unique user identifier
:param experiment_name the string name of the experiment
:param variant_name the string name of the variant
"""
try:
experiment = model.Experiment.get_by(name=experiment_name)
variant = model.Variant.get_by(name=variant_name)
if experiment and variant and model.Participant.query.filter(and_(
model.Participant.identity == identity,
model.Participant.experiment_id == experiment.id,
model.Participant.variant_id == variant.id
)).count() == 0:
model.Participant(
identity=identity,
experiment=experiment,
variant=variant
)
self.Session.commit()
finally:
self.Session.close()
def _mark_event(self, type, experiment_name, variant_name):
try:
experiment = model.Experiment.get_by(name=experiment_name)
variant = model.Variant.get_by(name=variant_name)
if experiment and variant and model.TrackedEvent.query.filter(and_(
model.TrackedEvent.type == type,
model.TrackedEvent.experiment_id == experiment.id,
model.TrackedEvent.variant_id == variant.id
)).first() is None:
model.TrackedEvent(
type=type,
experiment=experiment,
variant=variant
)
self.Session.commit()
finally:
self.Session.close()
try:
experiment = model.Experiment.get_by(name=experiment_name)
variant = model.Variant.get_by(name=variant_name)
if experiment and variant:
self.Session.execute(
'UPDATE %s SET total = total + 1 '
'WHERE experiment_id = :experiment_id '
'AND variant_id = :variant_id '
'AND `type` = :type' % (
model.TrackedEvent.__tablename__
),
{
'experiment_id': experiment.id,
'variant_id': variant.id,
'type': type
}
)
self.Session.commit()
finally:
self.Session.close()
def mark_participant(self, experiment_name, variant):
"""
Mark a participation for a specific experiment variant.
:param experiment_name the string name of the experiment
:param variant the string name of the variant
"""
self._mark_event('PARTICIPANT', experiment_name, variant)
def mark_conversion(self, experiment_name, variant):
"""
Mark a conversion for a specific experiment variant.
:param experiment_name the string name of the experiment
:param variant the string name of the variant
"""
self._mark_event('CONVERSION', experiment_name, variant)
def _total_events(self, type, experiment_name, variant):
try:
row = model.TrackedEvent.query.join(
model.Experiment
).join(
model.Variant
).filter(and_(
model.TrackedEvent.type == type,
model.TrackedEvent.experiment_id == model.Experiment.id,
model.TrackedEvent.variant_id == model.Variant.id,
model.Experiment.name == experiment_name,
model.Variant.name == variant
)).first()
return row.total if row else 0
finally:
self.Session.close()
def participants(self, experiment_name, variant):
"""
The number of participants for a certain variant.
Returns an integer.
"""
return self._total_events('PARTICIPANT', experiment_name, variant)
def conversions(self, experiment_name, variant):
"""
The number of conversions for a certain variant.
Returns an integer.
"""
return self._total_events('CONVERSION', experiment_name, variant)
|
ryanpetrello/cleaver | cleaver/backend/db/__init__.py | SQLAlchemyBackend.set_variant | python | def set_variant(self, identity, experiment_name, variant_name):
try:
experiment = model.Experiment.get_by(name=experiment_name)
variant = model.Variant.get_by(name=variant_name)
if experiment and variant and model.Participant.query.filter(and_(
model.Participant.identity == identity,
model.Participant.experiment_id == experiment.id,
model.Participant.variant_id == variant.id
)).count() == 0:
model.Participant(
identity=identity,
experiment=experiment,
variant=variant
)
self.Session.commit()
finally:
self.Session.close() | Set the variant for a specific user.
:param identity a unique user identifier
:param experiment_name the string name of the experiment
:param variant_name the string name of the variant | train | https://github.com/ryanpetrello/cleaver/blob/0ef266e1a0603c6d414df4b9926ce2a59844db35/cleaver/backend/db/__init__.py#L130-L153 | null | class SQLAlchemyBackend(CleaverBackend):
"""
Provides an interface for persisting and retrieving A/B test results
to a SQLAlchemy-supported database.
"""
def __init__(self, dburi='sqlite://', engine_options={}):
self.dburi = dburi
self.engine_options = engine_options
self.Session = session_for(
dburi=self.dburi,
**self.engine_options
)
def experiment_factory(self, experiment):
if experiment is None:
return None
return CleaverExperiment(
backend=self,
name=experiment.name,
started_on=experiment.started_on,
variants=tuple(v.name for v in experiment.variants)
)
def all_experiments(self):
"""
Retrieve every available experiment.
Returns a list of ``cleaver.experiment.Experiment``s
"""
try:
return [
self.experiment_factory(e)
for e in model.Experiment.query.all()
]
finally:
self.Session.close()
def get_experiment(self, name, variants):
"""
Retrieve an experiment by its name and variants (assuming it exists).
:param name a unique string name for the experiment
:param variants a list of strings, each with a unique variant name
Returns a ``cleaver.experiment.Experiment`` or ``None``
"""
try:
return self.experiment_factory(model.Experiment.get_by(name=name))
finally:
self.Session.close()
def save_experiment(self, name, variants):
"""
Persist an experiment and its variants (unless they already exist).
:param name a unique string name for the experiment
:param variants a list of strings, each with a unique variant name
"""
try:
model.Experiment(
name=name,
started_on=datetime.utcnow(),
variants=[
model.Variant(name=v, order=i)
for i, v in enumerate(variants)
]
)
self.Session.commit()
finally:
self.Session.close()
def is_verified_human(self, identity):
try:
return model.VerifiedHuman.get_by(identity=identity) is not None
finally:
self.Session.close()
def mark_human(self, identity):
try:
if model.VerifiedHuman.get_by(identity=identity) is None:
model.VerifiedHuman(identity=identity)
self.Session.commit()
finally:
self.Session.close()
def get_variant(self, identity, experiment_name):
"""
Retrieve the variant for a specific user and experiment (if it exists).
:param identity a unique user identifier
:param experiment_name the string name of the experiment
Returns a ``String`` or `None`
"""
try:
match = model.Participant.query.join(
model.Experiment
).filter(and_(
model.Participant.identity == identity,
model.Experiment.name == experiment_name
)).first()
return match.variant.name if match else None
finally:
self.Session.close()
def _mark_event(self, type, experiment_name, variant_name):
try:
experiment = model.Experiment.get_by(name=experiment_name)
variant = model.Variant.get_by(name=variant_name)
if experiment and variant and model.TrackedEvent.query.filter(and_(
model.TrackedEvent.type == type,
model.TrackedEvent.experiment_id == experiment.id,
model.TrackedEvent.variant_id == variant.id
)).first() is None:
model.TrackedEvent(
type=type,
experiment=experiment,
variant=variant
)
self.Session.commit()
finally:
self.Session.close()
try:
experiment = model.Experiment.get_by(name=experiment_name)
variant = model.Variant.get_by(name=variant_name)
if experiment and variant:
self.Session.execute(
'UPDATE %s SET total = total + 1 '
'WHERE experiment_id = :experiment_id '
'AND variant_id = :variant_id '
'AND `type` = :type' % (
model.TrackedEvent.__tablename__
),
{
'experiment_id': experiment.id,
'variant_id': variant.id,
'type': type
}
)
self.Session.commit()
finally:
self.Session.close()
def mark_participant(self, experiment_name, variant):
"""
Mark a participation for a specific experiment variant.
:param experiment_name the string name of the experiment
:param variant the string name of the variant
"""
self._mark_event('PARTICIPANT', experiment_name, variant)
def mark_conversion(self, experiment_name, variant):
"""
Mark a conversion for a specific experiment variant.
:param experiment_name the string name of the experiment
:param variant the string name of the variant
"""
self._mark_event('CONVERSION', experiment_name, variant)
def _total_events(self, type, experiment_name, variant):
try:
row = model.TrackedEvent.query.join(
model.Experiment
).join(
model.Variant
).filter(and_(
model.TrackedEvent.type == type,
model.TrackedEvent.experiment_id == model.Experiment.id,
model.TrackedEvent.variant_id == model.Variant.id,
model.Experiment.name == experiment_name,
model.Variant.name == variant
)).first()
return row.total if row else 0
finally:
self.Session.close()
def participants(self, experiment_name, variant):
"""
The number of participants for a certain variant.
Returns an integer.
"""
return self._total_events('PARTICIPANT', experiment_name, variant)
def conversions(self, experiment_name, variant):
"""
The number of conversions for a certain variant.
Returns an integer.
"""
return self._total_events('CONVERSION', experiment_name, variant)
|
ryanpetrello/cleaver | cleaver/reports/web/bottle.py | validate | python | def validate(**vkargs):
depr('Use route wildcard filters instead.')
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kargs):
for key, value in vkargs.items():
if key not in kargs:
abort(403, 'Missing parameter: %s' % key)
try:
kargs[key] = value(kargs[key])
except ValueError:
abort(403, 'Wrong parameter format for: %s' % key)
return func(*args, **kargs)
return wrapper
return decorator | Validates and manipulates keyword arguments by user defined callables.
Handles ValueError and missing arguments by raising HTTPError(403). | train | https://github.com/ryanpetrello/cleaver/blob/0ef266e1a0603c6d414df4b9926ce2a59844db35/cleaver/reports/web/bottle.py#L2248-L2266 | [
"def depr(message):\n warnings.warn(message, DeprecationWarning, stacklevel=3)\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2011, Marcel Hellkamp.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with url parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2011, Marcel Hellkamp.
License: MIT (see LICENSE for details)
"""
from __future__ import with_statement
__author__ = 'Marcel Hellkamp'
__version__ = '0.11.dev'
__license__ = 'MIT'
# The gevent server adapter needs to patch some modules before they are imported
# This is why we parse the commandline parameters here but handle them later
if __name__ == '__main__':
from optparse import OptionParser
_cmd_parser = OptionParser(usage="usage: %prog [options] package.module:app")
_opt = _cmd_parser.add_option
_opt("--version", action="store_true", help="show version number.")
_opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
_opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
_opt("-p", "--plugin", action="append", help="install additional plugin/s.")
_opt("--debug", action="store_true", help="start server in debug mode.")
_opt("--reload", action="store_true", help="auto-reload on file changes.")
_cmd_options, _cmd_args = _cmd_parser.parse_args()
if _cmd_options.server and _cmd_options.server.startswith('gevent'):
import gevent.monkey; gevent.monkey.patch_all()
import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\
os, re, subprocess, sys, tempfile, threading, time, urllib, warnings
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
try: from json import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try: from simplejson import dumps as json_dumps, loads as json_lds
except ImportError:
try: from django.utils.simplejson import dumps as json_dumps, loads as json_lds
except ImportError:
def json_dumps(data):
raise ImportError("JSON support requires Python 2.6 or simplejson.")
json_lds = json_dumps
# We now try to fix 2.5/2.6/3.1/3.2 incompatibilities.
# It ain't pretty but it works... Sorry for the mess.
py = sys.version_info
py3k = py >= (3,0,0)
py25 = py < (2,6,0)
# Workaround for the missing "as" keyword in py3k.
def _e(): return sys.exc_info()[1]
# Workaround for the "print is a keyword/function" dilemma.
_stdout, _stderr = sys.stdout.write, sys.stderr.write
# Lots of stdlib and builtin differences.
if py3k:
import http.client as httplib
import _thread as thread
from urllib.parse import urljoin, parse_qsl, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
from http.cookies import SimpleCookie
from collections import MutableMapping as DictMixin
import pickle
from io import BytesIO
basestring = str
unicode = str
json_loads = lambda s: json_lds(touni(s))
callable = lambda x: hasattr(x, '__call__')
imap = map
else: # 2.x
import httplib
import thread
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
from Cookie import SimpleCookie
from itertools import imap
import cPickle as pickle
from StringIO import StringIO as BytesIO
if py25:
msg = "Python 2.5 support may be dropped in future versions of Bottle."
warnings.warn(msg, DeprecationWarning)
from cgi import parse_qsl
from UserDict import DictMixin
def next(it): return it.next()
bytes = str
else: # 2.6, 2.7
from urlparse import parse_qsl
from collections import MutableMapping as DictMixin
json_loads = json_lds
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
def touni(s, enc='utf8', err='strict'):
return s.decode(enc, err) if isinstance(s, bytes) else unicode(s)
tonat = touni if py3k else tob
# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense).
# 3.1 needs a workaround.
NCTextIOWrapper = None
if (3,0,0) < py < (3,2,0):
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
def close(self): pass # Keep wrapped buffer open.
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try: functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError: pass
# These helpers are used at module level and need to be defined first.
# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
def depr(message):
warnings.warn(message, DeprecationWarning, stacklevel=3)
def makelist(data): # This is just to handy
if isinstance(data, (tuple, list, set, dict)): return list(data)
elif data: return [data]
else: return []
class DictProperty(object):
''' Property that maps to a key in a local dict-like attribute. '''
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class cached_property(object):
''' A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. '''
def __init__(self, func):
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class lazy_attribute(object):
''' A property that caches itself to the class object. '''
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events ########################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
#TODO: This should subclass BaseRequest
class HTTPResponse(BottleException):
""" Used to break execution and immediately finish the response """
def __init__(self, output='', status=200, header=None):
super(BottleException, self).__init__("HTTP Response %d" % status)
self.status = int(status)
self.output = output
self.headers = HeaderDict(header) if header else None
def apply(self, response):
if self.headers:
for key, value in self.headers.allitems():
response.headers[key] = value
response.status = self.status
class HTTPError(HTTPResponse):
""" Used to generate an error page """
def __init__(self, code=500, output='Unknown Error', exception=None,
traceback=None, header=None):
super(HTTPError, self).__init__(output, code, header)
self.exception = exception
self.traceback = traceback
def __repr__(self):
return tonat(template(ERROR_PAGE_TEMPLATE, e=self))
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError): pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router """
class RouteBuildError(RouteError):
""" The route could not been built """
class Router(object):
''' A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
'''
default_pattern = '[^/]+'
default_filter = 're'
#: Sorry for the mess. It works. Trust me.
rule_syntax = re.compile('(\\\\*)'\
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'\
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'\
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def __init__(self, strict=False):
self.rules = {} # A {rule: Rule} mapping
self.builder = {} # A rule/name->build_info mapping
self.static = {} # Cache for static routes: {path: {method: target}}
self.dynamic = [] # Cache for dynamic routes. See _compile()
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {'re': self.re_filter, 'int': self.int_filter,
'float': self.float_filter, 'path': self.path_filter}
def re_filter(self, conf):
return conf or self.default_pattern, None, None
def int_filter(self, conf):
return r'-?\d+', int, lambda x: str(int(x))
def float_filter(self, conf):
return r'-?[\d.]+', float, lambda x: str(float(x))
def path_filter(self, conf):
return r'.+?', None, None
def add_filter(self, name, func):
''' Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. '''
self.filters[name] = func
def parse_rule(self, rule):
''' Parses a rule into a (name, filter, conf) token stream. If mode is
None, name contains a static rule part. '''
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if len(g[0])%2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix: yield prefix, None, None
name, filtr, conf = g[1:4] if not g[2] is None else g[4:7]
if not filtr: filtr = self.default_filter
yield name, filtr, conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix+rule[offset:], None, None
def add(self, rule, method, target, name=None):
''' Add a new route or replace the target for an existing route. '''
if rule in self.rules:
self.rules[rule][method] = target
if name: self.builder[name] = self.builder[rule]
return
target = self.rules[rule] = {method: target}
# Build pattern and other structures for dynamic routes
anons = 0 # Number of anonymous wildcards
pattern = '' # Regular expression pattern
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self.parse_rule(rule):
if mode:
is_static = False
mask, in_filter, out_filter = self.filters[mode](conf)
if key:
pattern += '(?P<%s>%s)' % (key, mask)
else:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons; anons += 1
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static[self.build(rule)] = target
return
def fpat_sub(m):
return m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:'
flat_pattern = re.sub(r'(\\*)(\(\?P<[^>]*>|\((?!\?))', fpat_sub, pattern)
try:
re_match = re.compile('^(%s)$' % pattern).match
except re.error:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, _e()))
def match(path):
""" Return an url-argument dictionary. """
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
try:
combined = '%s|(^%s$)' % (self.dynamic[-1][0].pattern, flat_pattern)
self.dynamic[-1] = (re.compile(combined), self.dynamic[-1][1])
self.dynamic[-1][1].append((match, target))
except (AssertionError, IndexError): # AssertionError: Too many groups
self.dynamic.append((re.compile('(^%s$)' % flat_pattern),
[(match, target)]))
return match
def build(self, _name, *anons, **query):
''' Build an URL by filling the wildcards in a rule. '''
builder = self.builder.get(_name)
if not builder: raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons): query['anon%d'%i] = value
url = ''.join([f(query.pop(n)) if n else f for (n,f) in builder])
return url if not query else url+'?'+urlencode(query)
except KeyError:
raise RouteBuildError('Missing URL argument: %r' % _e().args[0])
def match(self, environ):
''' Return a (target, url_agrs) tuple or raise HTTPError(400/404/405). '''
path, targets, urlargs = environ['PATH_INFO'] or '/', None, {}
if path in self.static:
targets = self.static[path]
else:
for combined, rules in self.dynamic:
match = combined.match(path)
if not match: continue
getargs, targets = rules[match.lastindex - 1]
urlargs = getargs(path) if getargs else {}
break
if not targets:
raise HTTPError(404, "Not found: " + repr(environ['PATH_INFO']))
method = environ['REQUEST_METHOD'].upper()
if method in targets:
return targets[method], urlargs
if method == 'HEAD' and 'GET' in targets:
return targets['GET'], urlargs
if 'ANY' in targets:
return targets['ANY'], urlargs
allowed = [verb for verb in targets if verb != 'ANY']
if 'GET' in allowed and 'HEAD' not in allowed:
allowed.append('HEAD')
raise HTTPError(405, "Method not allowed.",
header=[('Allow',",".join(allowed))])
class Route(object):
''' This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
'''
def __init__(self, app, rule, method, callback, name=None,
plugins=None, skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/:page``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = ConfigDict(config)
def __call__(self, *a, **ka):
depr("Some APIs changed to return Route() instances instead of"\
" callables. Make sure to use the Route.call method and not to"\
" call Route instances directly.")
return self.call(*a, **ka)
@cached_property
def call(self):
''' The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests.'''
return self._make_callback()
def reset(self):
''' Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. '''
self.__dict__.pop('call', None)
def prepare(self):
''' Do all on-demand work immediately (useful for debugging).'''
self.call
@property
def _context(self):
depr('Switch to Plugin API v2 and access the Route object directly.')
return dict(rule=self.rule, method=self.method, callback=self.callback,
name=self.name, app=self.app, config=self.config,
apply=self.plugins, skip=self.skiplist)
def all_plugins(self):
''' Yield all Plugins affecting this route. '''
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
api = getattr(plugin, 'api', 1)
context = self if api > 1 else self._context
callback = plugin.apply(callback, context)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
update_wrapper(callback, self.callback)
return callback
def __repr__(self):
return '<%s %r %r>' % (self.method, self.rule, self.callback)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
def __init__(self, catchall=True, autojson=True):
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
self.catchall = catchall
#: A :cls:`ResourceManager` for application files
self.resources = ResourceManager()
#: A :cls:`ConfigDict` for app specific configuration.
self.config = ConfigDict()
self.config.autojson = autojson
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
self.hooks = HooksPlugin()
self.install(self.hooks)
if self.config.autojson:
self.install(JSONPlugin())
self.install(TemplatePlugin())
def mount(self, prefix, app, **options):
''' Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call.
'''
if isinstance(app, basestring):
prefix, app = app, prefix
depr('Parameter order of Bottle.mount() changed.') # 0.10
parts = [p for p in prefix.split('/') if p]
if not parts: raise ValueError('Empty path prefix.')
path_depth = len(parts)
options.setdefault('skip', True)
options.setdefault('method', 'ANY')
@self.route('/%s/:#.*#' % '/'.join(parts), **options)
def mountpoint():
try:
request.path_shift(path_depth)
rs = BaseResponse([], 200)
def start_response(status, header):
rs.status = status
for name, value in header: rs.add_header(name, value)
return rs.body.append
rs.body = itertools.chain(rs.body, app(request.environ, start_response))
return HTTPResponse(rs.body, rs.status_code, rs.headers)
finally:
request.path_shift(-path_depth)
if not prefix.endswith('/'):
self.route('/' + '/'.join(parts), callback=mountpoint, **options)
def merge(self, routes):
''' Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. '''
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
''' Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
'''
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
''' Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. '''
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def run(self, **kwargs):
''' Calls :func:`run` with the same parameters. '''
run(self, **kwargs)
def reset(self, route=None):
''' Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. '''
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes: route.reset()
if DEBUG:
for route in routes: route.prepare()
self.hooks.trigger('app_reset')
def close(self):
''' Close the application and all installed plugins. '''
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
self.stopped = True
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
''' Add a route object, but do not change the :data:`Route.app`
attribute.'''
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self, path=None, method='GET', callback=None, name=None,
apply=None, skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/:name')
def hello(name):
return 'Hello %s' % name
The ``:name`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
# TODO: Documentation and tests
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback, name=name,
plugins=plugins, skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. Three hooks
are currently implemented:
- before_request: Executed once before each request
- after_request: Executed once after each request
- app_reset: Called whenever :meth:`reset` is called.
"""
def wrapper(func):
self.hooks.add(name, func)
return func
return wrapper
def handle(self, path, method='GET'):
""" (deprecated) Execute the first matching route callback and return
the result. :exc:`HTTPResponse` exceptions are caught and returned.
If :attr:`Bottle.catchall` is true, other exceptions are caught as
well and returned as :exc:`HTTPError` instances (500).
"""
depr("This method will change semantics in 0.10. Try to avoid it.")
if isinstance(path, dict):
return self._handle(path)
return self._handle({'PATH_INFO': path, 'REQUEST_METHOD': method.upper()})
def _handle(self, environ):
try:
environ['bottle.app'] = self
request.bind(environ)
response.bind()
route, args = self.router.match(environ)
environ['route.handle'] = environ['bottle.route'] = route
environ['route.url_args'] = args
return route.call(**args)
except HTTPResponse:
return _e()
except RouteReset:
route.reset()
return self._handle(environ)
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
stacktrace = format_exc(10)
environ['wsgi.errors'].write(stacktrace)
return HTTPError(500, "Internal Server Error", _e(), stacktrace)
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status, repr)(out)
if isinstance(out, HTTPResponse):
depr('Error handlers must not return :exc:`HTTPResponse`.') #0.9
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.output)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
out = iter(out)
first = next(out)
while not first:
first = next(out)
except StopIteration:
return self._cast('')
except HTTPResponse:
first = _e()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', _e(), format_exc(10))
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
if isinstance(first, bytes):
return itertools.chain([first], out)
if isinstance(first, unicode):
return imap(lambda x: x.encode(response.charset),
itertools.chain([first], out))
return self._cast(HTTPError(500, 'Unsupported response type: %s'\
% type(first)))
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or request.method == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
if isinstance(response._status_line, unicode):
response._status_line = str(response._status_line)
start_response(response._status_line, list(response.iter_headers()))
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(_e())), html_escape(format_exc(10)))
environ['wsgi.errors'].write(err)
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers)
return [tob(err)]
def __call__(self, environ, start_response):
''' Each instance of :class:'Bottle' is a WSGI application. '''
return self.wsgi(environ, start_response)
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ')
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
#: Maximum number pr GET or POST parameters per request
MAX_PARAMS = 100
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
''' Bottle application handling this request. '''
raise RuntimeError('This request is not connected to an application.')
@property
def path(self):
''' The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). '''
return '/' + self.environ.get('PATH_INFO','').lstrip('/')
@property
def method(self):
''' The ``REQUEST_METHOD`` value as an uppercase string. '''
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
''' A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. '''
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
''' Return the value of a request header, or a given default value. '''
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE',''))
cookies = list(cookies.values())[:self.MAX_PARAMS]
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
''' The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. '''
pairs = parse_qsl(self.query_string, keep_blank_values=True)
get = self.environ['bottle.get'] = FormsDict()
for key, value in pairs[:self.MAX_PARAMS]:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is retuned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not hasattr(item, 'filename'):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The values are instances of
:class:`cgi.FieldStorage`. The most important attributes are:
filename
The filename, if specified; otherwise None; this is the client
side filename, *not* the file name on which it is stored (that's
a temporary file you don't deal with)
file
The file(-like) object from which you can read the data.
value
The value as a *string*; for file uploads, this transparently
reads the file every time you request the value. Do not do this
on big files.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if hasattr(item, 'filename'):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
''' If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. '''
if 'application/json' in self.environ.get('CONTENT_TYPE', '') \
and 0 < self.content_length < self.MEMFILE_MAX:
return json_loads(self.body.read(self.MEMFILE_MAX))
return None
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
maxread = max(0, self.content_length)
stream = self.environ['wsgi.input']
body = BytesIO() if maxread < self.MEMFILE_MAX else TemporaryFile(mode='w+b')
while maxread > 0:
part = stream.read(min(maxread, self.MEMFILE_MAX))
if not part: break
body.write(part)
maxread -= len(part)
self.environ['wsgi.input'] = body
body.seek(0)
return body
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
if NCTextIOWrapper:
fb = NCTextIOWrapper(self.body, encoding='ISO-8859-1', newline='\n')
else:
fb = self.body
data = cgi.FieldStorage(fp=fb, environ=safe_env, keep_blank_values=True)
for item in (data.list or [])[:self.MAX_PARAMS]:
post[item.name] = item if item.filename else item.value
return post
@property
def COOKIES(self):
''' Alias for :attr:`cookies` (deprecated). '''
depr('BaseRequest.COOKIES was renamed to BaseRequest.cookies (lowercase).')
return self.cookies
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
''' The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. '''
env = self.environ
http = env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
''' The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. '''
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
''' Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
'''
script = self.environ.get('SCRIPT_NAME','/')
self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift)
@property
def content_length(self):
''' The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. '''
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def is_xhr(self):
''' True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). '''
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH','')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
''' Alias for :attr:`is_xhr`. "Ajax" is not the right term. '''
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None): return self.environ.get(value, default)
def __getitem__(self, key): return self.environ[key]
def __delitem__(self, key): self[key] = ""; del(self.environ[key])
def __iter__(self): return iter(self.environ)
def __len__(self): return len(self.environ)
def keys(self): return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.'+key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
''' Search in self.environ for additional user defined attributes. '''
try:
var = self.environ['bottle.request.ext.%s'%name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
self.environ['bottle.request.ext.%s'%name] = value
def _hkey(s):
return s.title().replace('_','-')
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=str, default=''):
self.name, self.reader, self.writer, self.default = name, reader, writer, default
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, cls):
if obj is None: return self
value = obj.headers.get(self.name)
return self.reader(value) if (value and self.reader) else (value or self.default)
def __set__(self, obj, value):
if self.writer: value = self.writer(value)
obj.headers[self.name] = value
def __delete__(self, obj):
if self.name in obj.headers:
del obj.headers[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: set(('Content-Type',)),
304: set(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))}
def __init__(self, body='', status=None, **headers):
self._status_line = None
self._status_code = None
self._cookies = None
self._headers = {'Content-Type': [self.default_content_type]}
self.body = body
self.status = status or self.default_status
if headers:
for name, value in headers.items():
self[name] = value
def copy(self):
''' Returns a copy of self. '''
copy = Response()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
''' The HTTP status line as a string (e.g. ``404 Not Found``).'''
return self._status_line
@property
def status_code(self):
''' The HTTP status code as an integer (e.g. 404).'''
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999: raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = status or ('%d Unknown' % code)
def _get_status(self):
return self._status_line
status = property(_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
''' An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. '''
self.__dict__['headers'] = hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name): return _hkey(name) in self._headers
def __delitem__(self, name): del self._headers[_hkey(name)]
def __getitem__(self, name): return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value): self._headers[_hkey(name)] = [str(value)]
def get_header(self, name, default=None):
''' Return the value of a previously defined header. If there is no
header with that name, return a default value. '''
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value, append=False):
''' Create a new response header, replacing any previously defined
headers with the same name. '''
if append:
self.add_header(name, value)
else:
self._headers[_hkey(name)] = [str(value)]
def add_header(self, name, value):
''' Add an additional response header, not removing duplicates. '''
self._headers.setdefault(_hkey(name), []).append(str(value))
def iter_headers(self):
''' Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. '''
headers = self._headers.items()
bad_headers = self.bad_headers.get(self._status_code)
if bad_headers:
headers = [h for h in headers if h[0] not in bad_headers]
for name, values in headers:
for value in values:
yield name, value
if self._cookies:
for c in self._cookies.values():
yield 'Set-Cookie', c.OutputString()
def wsgiheader(self):
depr('The wsgiheader method is deprecated. See headerlist.') #0.10
return self.headerlist
@property
def headerlist(self):
''' WSGI conform list of (header, value) tuples. '''
return list(self.iter_headers())
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
@property
def charset(self):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return 'UTF-8'
@property
def COOKIES(self):
""" A dict-like SimpleCookie instance. This should not be used directly.
See :meth:`set_cookie`. """
depr('The COOKIES dict is deprecated. Use `set_cookie()` instead.') # 0.10
if not self._cookies:
self._cookies = SimpleCookie()
return self._cookies
def set_cookie(self, name, value, secret=None, **options):
''' Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
'''
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((name, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
if len(value) > 4096: raise ValueError('Cookie value to long.')
self._cookies[name] = value
for key, value in options.items():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
''' Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. '''
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
#: Thread-local storage for :class:`LocalRequest` and :class:`LocalResponse`
#: attributes.
_lctx = threading.local()
def local_property(name):
return property(lambda self: getattr(_lctx, name),
lambda self, value: setattr(_lctx, name, value),
lambda self: delattr(_lctx, name),
'Thread-local property stored in :data:`_lctx.%s`' % name)
class LocalRequest(BaseRequest):
''' A thread-local subclass of :class:`BaseRequest` with a different
set of attribues for each thread. There is usually only one global
instance of this class (:data:`request`). If accessed during a
request/response cycle, this instance always refers to the *current*
request (even on a multithreaded server). '''
bind = BaseRequest.__init__
environ = local_property('request_environ')
class LocalResponse(BaseResponse):
''' A thread-local subclass of :class:`BaseResponse` with a different
set of attribues for each thread. There is usually only one global
instance of this class (:data:`response`). Its attributes are used
to build the HTTP response at the end of the request/response cycle.
'''
bind = BaseResponse.__init__
_status_line = local_property('response_status_line')
_status_code = local_property('response_status_code')
_cookies = local_property('response_cookies')
_headers = local_property('response_headers')
body = local_property('response_body')
Response = LocalResponse # BC 0.9
Request = LocalRequest # BC 0.9
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException): pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, context):
dumps = self.json_dumps
if not dumps: return callback
def wrapper(*a, **ka):
rv = callback(*a, **ka)
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization succesful
response.content_type = 'application/json'
return json_response
return rv
return wrapper
class HooksPlugin(object):
name = 'hooks'
api = 2
_names = 'before_request', 'after_request', 'app_reset'
def __init__(self):
self.hooks = dict((name, []) for name in self._names)
self.app = None
def _empty(self):
return not (self.hooks['before_request'] or self.hooks['after_request'])
def setup(self, app):
self.app = app
def add(self, name, func):
''' Attach a callback to a hook. '''
was_empty = self._empty()
self.hooks.setdefault(name, []).append(func)
if self.app and was_empty and not self._empty(): self.app.reset()
def remove(self, name, func):
''' Remove a callback from a hook. '''
was_empty = self._empty()
if name in self.hooks and func in self.hooks[name]:
self.hooks[name].remove(func)
if self.app and not was_empty and self._empty(): self.app.reset()
def trigger(self, name, *a, **ka):
''' Trigger a hook and return a list of results. '''
hooks = self.hooks[name]
if ka.pop('reversed', False): hooks = hooks[::-1]
return [hook(*a, **ka) for hook in hooks]
def apply(self, callback, context):
if self._empty(): return callback
def wrapper(*a, **ka):
self.trigger('before_request')
rv = callback(*a, **ka)
self.trigger('after_request', reversed=True)
return rv
return wrapper
class TemplatePlugin(object):
''' This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. '''
name = 'template'
api = 2
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str) and 'template_opts' in route.config:
depr('The `template_opts` parameter is deprecated.') #0.9
return view(conf, **route.config['template_opts'])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
''' Create a virtual package that redirects imports (see PEP 302). '''
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({'__file__': __file__, '__path__': [],
'__all__': [], '__loader__': self})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname, modname = fullname.rsplit('.', 1)
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
packname, modname = fullname.rsplit('.', 1)
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
def __len__(self): return len(self.dict)
def __iter__(self): return iter(self.dict)
def __contains__(self, key): return key in self.dict
def __delitem__(self, key): del self.dict[key]
def __getitem__(self, key): return self.dict[key][-1]
def __setitem__(self, key, value): self.append(key, value)
def keys(self): return self.dict.keys()
if py3k:
def values(self): return (v[-1] for v in self.dict.values())
def items(self): return ((k, v[-1]) for k, v in self.dict.items())
def allitems(self):
return ((k, v) for k, vl in self.dict.items() for v in vl)
iterkeys = keys
itervalues = values
iteritems = items
iterallitems = allitems
else:
def values(self): return [v[-1] for v in self.dict.values()]
def items(self): return [(k, v[-1]) for k, v in self.dict.items()]
def iterkeys(self): return self.dict.iterkeys()
def itervalues(self): return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for k, v in self.dict.iteritems())
def iterallitems(self):
return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
def allitems(self):
return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
def get(self, key, default=None, index=-1, type=None):
''' Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
'''
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception:
pass
return default
def append(self, key, value):
''' Add a new value to the list of values for this key. '''
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
''' Replace the list of values with a single value. '''
self.dict[key] = [value]
def getall(self, key):
''' Return a (possibly empty) list of values for a key. '''
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
''' This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attributes are automatically de-
or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. '''
#: Encoding used for attribute values.
input_encoding = 'utf8'
#: If true (default), unicode strings are first encoded with `latin1`
#: and then decoded to match :attr:`input_encoding`.
recode_unicode = True
def _fix(self, s, encoding=None):
if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI
s = s.encode('latin1')
if isinstance(s, bytes): # Python 2 WSGI
return s.decode(encoding or self.input_encoding)
return s
def decode(self, encoding=None):
''' Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. '''
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy
def getunicode(self, name, default=None, encoding=None):
try:
return self._fix(self[name], encoding)
except (UnicodeError, KeyError):
return default
def __getattr__(self, name, default=unicode()):
return self.getunicode(name, default=default)
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key): return _hkey(key) in self.dict
def __delitem__(self, key): del self.dict[_hkey(key)]
def __getitem__(self, key): return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value): self.dict[_hkey(key)] = [str(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(str(value))
def replace(self, key, value): self.dict[_hkey(key)] = [str(value)]
def getall(self, key): return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in [_hkey(n) for n in names]:
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
''' This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
'''
#: List of keys that do not have a 'HTTP_' prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
''' Translate header field name to CGI/WSGI environ key. '''
key = key.replace('-','_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
''' Return the header value as is (may be bytes or unicode). '''
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
return tonat(self.environ[self._ekey(key)], 'latin1')
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield key[5:].replace('_', '-').title()
elif key in self.cgikeys:
yield key.replace('_', '-').title()
def keys(self): return [x for x in self]
def __len__(self): return len(self.keys())
def __contains__(self, key): return self._ekey(key) in self.environ
class ConfigDict(dict):
''' A dict-subclass with some extras: You can access keys like attributes.
Uppercase attributes create new ConfigDicts and act as name-spaces.
Other missing attributes return None. Calling a ConfigDict updates its
values and returns itself.
>>> cfg = ConfigDict()
>>> cfg.Namespace.value = 5
>>> cfg.OtherNamespace(a=1, b=2)
>>> cfg
{'Namespace': {'value': 5}, 'OtherNamespace': {'a': 1, 'b': 2}}
'''
def __getattr__(self, key):
if key not in self and key[0].isupper():
self[key] = ConfigDict()
return self.get(key)
def __setattr__(self, key, value):
if hasattr(dict, key):
raise AttributeError('Read-only attribute.')
if key in self and self[key] and isinstance(self[key], ConfigDict):
raise AttributeError('Non-empty namespace attribute.')
self[key] = value
def __delattr__(self, key):
if key in self: del self[key]
def __call__(self, *a, **ka):
for key, value in dict(*a, **ka).items(): setattr(self, key, value)
return self
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self[-1]
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024*64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
class ResourceManager(object):
''' This class manages a list of search paths and helps to find and open
aplication-bound resources (files).
:param base: default value for same-named :meth:`add_path` parameter.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
'''
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = open
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. `res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
''' Add a new path to the list of search paths. Return False if it does
not exist.
:param path: The new search path. Relative paths are turned into an
absolute and normalized form. If the path looks like a file (not
ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to `:attr:base` which defaults to ``./``.
:param index: Position within the list of search paths. Defaults to
last index (appends to the list).
:param create: Create non-existent search paths. Off by default.
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
'''
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.mkdirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
def __iter__(self):
''' Iterate over all existing files in all registered paths. '''
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
''' Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. '''
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
''' Find a resource and return a file object, or raise IOError. '''
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(name, mode=mode, *args, **kwargs)
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error: Application stopped.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if code is None:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
location = urljoin(request.url, url)
raise HTTPResponse("", status=code, header=dict(Location=location))
def _file_iter_range(fp, offset, bytes, maxread=1024*1024):
''' Yield chunks from a range in a file. No chunk is bigger than maxread.'''
fp.seek(offset)
while bytes > 0:
part = fp.read(min(bytes, maxread))
if not part: break
bytes -= len(part)
yield part
def static_file(filename, root, mimetype='auto', download=False):
""" Open a file in a safe way and return :exc:`HTTPResponse` with status
code 200, 305, 401 or 404. Set Content-Type, Content-Encoding,
Content-Length and Last-Modified header. Obey If-Modified-Since header
and HEAD requests.
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
header = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype == 'auto':
mimetype, encoding = mimetypes.guess_type(filename)
if mimetype: header['Content-Type'] = mimetype
if encoding: header['Content-Encoding'] = encoding
elif mimetype:
header['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download == True else download)
header['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
header['Content-Length'] = clen = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
header['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
header['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
return HTTPResponse(status=304, header=header)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
header["Accept-Ranges"] = "bytes"
ranges = request.environ.get('HTTP_RANGE')
if 'HTTP_RANGE' in request.environ:
ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
header["Content-Range"] = "bytes %d-%d/%d" % (offset, end-1, clen)
header["Content-Length"] = str(end-offset)
if body: body = _file_iter_range(body, offset, end-offset)
return HTTPResponse(body, header=header, status=206)
return HTTPResponse(body, header=header)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
DEBUG = bool(mode)
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':',1)
return user, pwd
except (KeyError, ValueError):
return None
def parse_range_header(header, maxlen=0):
''' Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive.'''
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen-int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end)+1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
def _lscmp(a, b):
''' Compares two strings in a cryptographically save way:
Runtime is not affected by length of common prefix. '''
return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key):
''' Encode and sign a pickle-able object. Return a (byte) string '''
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key):
''' Verify and decode an encoded string. Return an object or None.'''
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
''' Return True if the argument looks like a encoded cookie.'''
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
''' Escape HTML special characters ``&<>`` and quotes ``'"``. '''
return string.replace('&','&').replace('<','<').replace('>','>')\
.replace('"','"').replace("'",''')
def html_quote(string):
''' Escape and quote a string to be used as an HTTP attribute.'''
return '"%s"' % html_escape(string).replace('\n','%#10;')\
.replace('\r',' ').replace('\t','	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/:x/:y'
c(x, y=5) -> '/c/:x' and '/c/:x/:y'
d(x=5, y=6) -> '/d' and '/d/:x' and '/d/:x/:y'
"""
import inspect # Expensive module. Only import if necessary.
path = '/' + func.__name__.replace('__','/').lstrip('/')
spec = inspect.getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/:%s' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/:%s' % arg
yield path
def path_shift(script_name, path_info, shift=1):
''' Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
'''
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if shift > 0 and shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif shift < 0 and shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def auth_basic(check, realm="private", text="Access denied"):
''' Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. '''
def decorator(func):
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
response.headers['WWW-Authenticate'] = 'Basic realm="%s"' % realm
return HTTPError(401, text)
return func(*a, **ka)
return wrapper
return decorator
# Shortcuts for common Bottle methods.
# They all refer to the current default application.
def make_default_app_wrapper(name):
''' Return a callable that relays calls to the current default app. '''
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
route = make_default_app_wrapper('route')
get = make_default_app_wrapper('get')
post = make_default_app_wrapper('post')
put = make_default_app_wrapper('put')
delete = make_default_app_wrapper('delete')
error = make_default_app_wrapper('error')
mount = make_default_app_wrapper('mount')
hook = make_default_app_wrapper('hook')
install = make_default_app_wrapper('install')
uninstall = make_default_app_wrapper('uninstall')
url = make_default_app_wrapper('get_url')
###############################################################################
# Server Adapter ###############################################################
###############################################################################
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **config):
self.options = config
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s'%(k,repr(v)) for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from wsgiref.simple_server import make_server, WSGIRequestHandler
if self.quiet:
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw): pass
self.options['handler_class'] = QuietHandler
srv = make_server(self.host, self.port, handler, **self.options)
srv.serve_forever()
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
server = wsgiserver.CherryPyWSGIServer((self.host, self.port), handler)
try:
server.start()
finally:
server.stop()
class WaitressServer(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port)
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
if not self.quiet:
from paste.translogger import TransLogger
handler = TransLogger(handler)
httpserver.serve(handler, host=self.host, port=str(self.port),
**self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
_stderr("WARNING: Auto-reloading does not work with Fapws3.\n")
_stderr(" (Fapws3 breaks python thread support)\n")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* `monkey` (default: True) fixes the stdlib to use greenthreads.
* `fast` (default: False) uses libevent's http server, but has some
issues: No streaming, no pipelining, no SSL.
"""
def run(self, handler):
from gevent import wsgi as wsgi_fast, pywsgi, monkey, local
if self.options.get('monkey', True):
if not threading.local is local.local: monkey.patch_all()
wsgi = wsgi_fast if self.options.get('fast') else pywsgi
log = None if self.quiet else 'default'
wsgi.WSGIServer((self.host, self.port), handler, log=log).serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested """
def run(self, handler):
from eventlet import wsgi, listen
try:
wsgi.server(listen((self.host, self.port)), handler,
log_output=(not self.quiet))
except TypeError:
# Fallback, if we have old version of eventlet
wsgi.server(listen((self.host, self.port)), handler)
class RocketServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', { 'wsgi_app' : handler })
server.start()
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer, WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'waitress': WaitressServer,
'cherrypy': CherryPyServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'rocket': RocketServer,
'bjoern' : BjoernServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN; NORUN, nr_old = True, NORUN
try:
tmp = default_app.push() # Create a new "default application"
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
_debug = debug
def run(app=None, server='wsgiref', host='127.0.0.1', port=8080,
interval=1, reloader=False, quiet=False, plugins=None,
debug=False, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
try:
lockfile = None
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
_debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
app.install(plugin)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" % (__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" % (server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
class FileCheckerThread(threading.Thread):
''' Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets to old. '''
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda path: os.stat(path).st_mtime
files = dict()
for module in list(sys.modules.values()):
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in list(files.items()):
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.status: self.status = 'exit' # silent exit
self.join()
return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl','html','thtml','stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup]
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=[]):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if os.path.isfile(name): return name
for spath in lookup:
fname = os.path.join(spath, name)
if os.path.isfile(fname):
return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
''' This reads or sets the global settings stored in class.settings. '''
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (*args)
or directly, as keywords (**kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding':self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name, filename=self.filename, lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, **kwargs):
from jinja2 import Environment, FunctionLoader
if 'prefix' in kwargs: # TODO: to be removed after a while
raise RuntimeError('The keyword argument `prefix` has been removed. '
'Use the full jinja2 environment name line_statement_prefix instead.')
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
fname = self.search(name, self.lookup)
if not fname: return
with open(fname, "rb") as f:
return f.read().decode(self.encoding)
class SimpleTALTemplate(BaseTemplate):
''' Deprecated, do not use. '''
def prepare(self, **options):
depr('The SimpleTAL template handler is deprecated'\
' and will be removed in 0.12')
from simpletal import simpleTAL
if self.source:
self.tpl = simpleTAL.compileHTMLTemplate(self.source)
else:
with open(self.filename, 'rb') as fp:
self.tpl = simpleTAL.compileHTMLTemplate(tonat(fp.read()))
def render(self, *args, **kwargs):
from simpletal import simpleTALES
for dictarg in args: kwargs.update(dictarg)
context = simpleTALES.Context()
for k,v in self.defaults.items():
context.addGlobal(k, v)
for k,v in kwargs.items():
context.addGlobal(k, v)
output = StringIO()
self.tpl.expand(context, output)
return output.getvalue()
class SimpleTemplate(BaseTemplate):
blocks = ('if', 'elif', 'else', 'try', 'except', 'finally', 'for', 'while',
'with', 'def', 'class')
dedent_blocks = ('elif', 'else', 'except', 'finally')
@lazy_attribute
def re_pytokens(cls):
''' This matches comments and all kinds of quoted strings but does
NOT match comments (#...) within quoted strings. (trust me) '''
return re.compile(r'''
(''(?!')|""(?!")|'{6}|"{6} # Empty strings (all 4 types)
|'(?:[^\\']|\\.)+?' # Single quotes (')
|"(?:[^\\"]|\\.)+?" # Double quotes (")
|'{3}(?:[^\\]|\\.|\n)+?'{3} # Triple-quoted strings (')
|"{3}(?:[^\\]|\\.|\n)+?"{3} # Triple-quoted strings (")
|\#.* # Comments
)''', re.VERBOSE)
def prepare(self, escape_func=html_escape, noescape=False, **kwargs):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
if noescape:
self._str, self._escape = self._escape, self._str
@classmethod
def split_comment(cls, code):
""" Removes comments (#...) from python code. """
if '#' not in code: return code
#: Remove comments only (leave quoted strings as they are)
subf = lambda m: '' if m.group(0)[0]=='#' else m.group(0)
return re.sub(cls.re_pytokens, subf, code)
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
stack = [] # Current Code indentation
lineno = 0 # Current line of code
ptrbuffer = [] # Buffer for printable strings and token tuple instances
codebuffer = [] # Buffer for generated python code
multiline = dedent = oneline = False
template = self.source or open(self.filename, 'rb').read()
def yield_tokens(line):
for i, part in enumerate(re.split(r'\{\{(.*?)\}\}', line)):
if i % 2:
if part.startswith('!'): yield 'RAW', part[1:]
else: yield 'CMD', part
else: yield 'TXT', part
def flush(): # Flush the ptrbuffer
if not ptrbuffer: return
cline = ''
for line in ptrbuffer:
for token, value in line:
if token == 'TXT': cline += repr(value)
elif token == 'RAW': cline += '_str(%s)' % value
elif token == 'CMD': cline += '_escape(%s)' % value
cline += ', '
cline = cline[:-2] + '\\\n'
cline = cline[:-2]
if cline[:-1].endswith('\\\\\\\\\\n'):
cline = cline[:-7] + cline[-1] # 'nobr\\\\\n' --> 'nobr'
cline = '_printlist([' + cline + '])'
del ptrbuffer[:] # Do this before calling code() again
code(cline)
def code(stmt):
for line in stmt.splitlines():
codebuffer.append(' ' * len(stack) + line.strip())
for line in template.splitlines(True):
lineno += 1
line = touni(line, self.encoding)
sline = line.lstrip()
if lineno <= 2:
m = re.match(r"%\s*#.*coding[:=]\s*([-\w.]+)", sline)
if m: self.encoding = m.group(1)
if m: line = line.replace('coding','coding (removed)')
if sline and sline[0] == '%' and sline[:2] != '%%':
line = line.split('%',1)[1].lstrip() # Full line following the %
cline = self.split_comment(line).strip()
cmd = re.split(r'[^a-zA-Z0-9_]', cline)[0]
flush() # You are actually reading this? Good luck, it's a mess :)
if cmd in self.blocks or multiline:
cmd = multiline or cmd
dedent = cmd in self.dedent_blocks # "else:"
if dedent and not oneline and not multiline:
cmd = stack.pop()
code(line)
oneline = not cline.endswith(':') # "if 1: pass"
multiline = cmd if cline.endswith('\\') else False
if not oneline and not multiline:
stack.append(cmd)
elif cmd == 'end' and stack:
code('#end(%s) %s' % (stack.pop(), line.strip()[3:]))
elif cmd == 'include':
p = cline.split(None, 2)[1:]
if len(p) == 2:
code("_=_include(%s, _stdout, %s)" % (repr(p[0]), p[1]))
elif p:
code("_=_include(%s, _stdout)" % repr(p[0]))
else: # Empty %include -> reverse of %rebase
code("_printlist(_base)")
elif cmd == 'rebase':
p = cline.split(None, 2)[1:]
if len(p) == 2:
code("globals()['_rebase']=(%s, dict(%s))" % (repr(p[0]), p[1]))
elif p:
code("globals()['_rebase']=(%s, {})" % repr(p[0]))
else:
code(line)
else: # Line starting with text (not '%') or '%%' (escaped)
if line.strip().startswith('%%'):
line = line.replace('%%', '%', 1)
ptrbuffer.append(yield_tokens(line))
flush()
return '\n'.join(codebuffer) + '\n'
def subtemplate(self, _name, _stdout, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(_stdout, kwargs)
def execute(self, _stdout, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
env = self.defaults.copy()
env.update({'_stdout': _stdout, '_printlist': _stdout.extend,
'_include': self.subtemplate, '_str': self._str,
'_escape': self._escape, 'get': env.get,
'setdefault': env.setdefault, 'defined': env.__contains__})
env.update(kwargs)
eval(self.co, env)
if '_rebase' in env:
subtpl, rargs = env['_rebase']
rargs['_base'] = _stdout[:] #copy stdout
del _stdout[:] # clear stdout
return self.subtemplate(subtpl,_stdout,rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
for dictarg in args: kwargs.update(dictarg)
stdout = []
self.execute(stdout, kwargs)
return ''.join(stdout)
def template(*args, **kwargs):
'''
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
'''
tpl = args[0] if args else None
template_adapter = kwargs.pop('template_adapter', SimpleTemplate)
if tpl not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
if isinstance(tpl, template_adapter):
TEMPLATES[tpl] = tpl
if settings: TEMPLATES[tpl].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tpl] = template_adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tpl] = template_adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tpl]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]: kwargs.update(dictarg)
return TEMPLATES[tpl].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template, template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
simpletal_template = functools.partial(template, template_adapter=SimpleTALTemplate)
def view(tpl_name, **defaults):
''' Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
'''
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
simpletal_view = functools.partial(view, template_adapter=SimpleTALTemplate)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s'%(k,v)) for (k,v) in HTTP_CODES.items())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%%try:
%%from %s import DEBUG, HTTP_CODES, request, touni
%%status_name = HTTP_CODES.get(e.status, 'Unknown').title()
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error {{e.status}}: {{status_name}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error {{e.status}}: {{status_name}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.output}}</pre>
%%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%%end
%%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%%end
</body>
</html>
%%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%%end
""" % __name__
#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
#: request callback, this instance always refers to the *current* request
#: (even on a multithreaded server).
request = LocalRequest()
#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
#: HTTP response for the *current* request.
response = LocalResponse()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app)
# BC: 0.6.4 and needed for run()
app = default_app = AppStack()
app.push()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect(__name__+'.ext', 'bottle_%s').module
if __name__ == '__main__':
opt, args, parser = _cmd_options, _cmd_args, _cmd_parser
if opt.version:
_stdout('Bottle %s\n'%__version__)
sys.exit(0)
if not args:
parser.print_help()
_stderr('\nError: No application specified.\n')
sys.exit(1)
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
host, port = (opt.bind or 'localhost'), 8080
if ':' in host:
host, port = host.rsplit(':', 1)
run(args[0], host=host, port=port, server=opt.server,
reloader=opt.reload, plugins=opt.plugin, debug=opt.debug)
# THE END
|
ryanpetrello/cleaver | cleaver/reports/web/bottle.py | Bottle.mount | python | def mount(self, prefix, app, **options):
''' Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call.
'''
if isinstance(app, basestring):
prefix, app = app, prefix
depr('Parameter order of Bottle.mount() changed.') # 0.10
parts = [p for p in prefix.split('/') if p]
if not parts: raise ValueError('Empty path prefix.')
path_depth = len(parts)
options.setdefault('skip', True)
options.setdefault('method', 'ANY')
@self.route('/%s/:#.*#' % '/'.join(parts), **options)
def mountpoint():
try:
request.path_shift(path_depth)
rs = BaseResponse([], 200)
def start_response(status, header):
rs.status = status
for name, value in header: rs.add_header(name, value)
return rs.body.append
rs.body = itertools.chain(rs.body, app(request.environ, start_response))
return HTTPResponse(rs.body, rs.status_code, rs.headers)
finally:
request.path_shift(-path_depth)
if not prefix.endswith('/'):
self.route('/' + '/'.join(parts), callback=mountpoint, **options) | Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call. | train | https://github.com/ryanpetrello/cleaver/blob/0ef266e1a0603c6d414df4b9926ce2a59844db35/cleaver/reports/web/bottle.py#L581-L618 | [
"def depr(message):\n warnings.warn(message, DeprecationWarning, stacklevel=3)\n",
"def route(self, path=None, method='GET', callback=None, name=None,\n apply=None, skip=None, **config):\n \"\"\" A decorator to bind a function to a request URL. Example::\n\n @app.route('/hello/:name')\n ... | class Bottle(object):
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
def __init__(self, catchall=True, autojson=True):
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
self.catchall = catchall
#: A :cls:`ResourceManager` for application files
self.resources = ResourceManager()
#: A :cls:`ConfigDict` for app specific configuration.
self.config = ConfigDict()
self.config.autojson = autojson
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
self.hooks = HooksPlugin()
self.install(self.hooks)
if self.config.autojson:
self.install(JSONPlugin())
self.install(TemplatePlugin())
def merge(self, routes):
''' Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. '''
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
''' Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
'''
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
''' Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. '''
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def run(self, **kwargs):
''' Calls :func:`run` with the same parameters. '''
run(self, **kwargs)
def reset(self, route=None):
''' Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. '''
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes: route.reset()
if DEBUG:
for route in routes: route.prepare()
self.hooks.trigger('app_reset')
def close(self):
''' Close the application and all installed plugins. '''
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
self.stopped = True
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
''' Add a route object, but do not change the :data:`Route.app`
attribute.'''
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self, path=None, method='GET', callback=None, name=None,
apply=None, skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/:name')
def hello(name):
return 'Hello %s' % name
The ``:name`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
# TODO: Documentation and tests
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback, name=name,
plugins=plugins, skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. Three hooks
are currently implemented:
- before_request: Executed once before each request
- after_request: Executed once after each request
- app_reset: Called whenever :meth:`reset` is called.
"""
def wrapper(func):
self.hooks.add(name, func)
return func
return wrapper
def handle(self, path, method='GET'):
""" (deprecated) Execute the first matching route callback and return
the result. :exc:`HTTPResponse` exceptions are caught and returned.
If :attr:`Bottle.catchall` is true, other exceptions are caught as
well and returned as :exc:`HTTPError` instances (500).
"""
depr("This method will change semantics in 0.10. Try to avoid it.")
if isinstance(path, dict):
return self._handle(path)
return self._handle({'PATH_INFO': path, 'REQUEST_METHOD': method.upper()})
def _handle(self, environ):
try:
environ['bottle.app'] = self
request.bind(environ)
response.bind()
route, args = self.router.match(environ)
environ['route.handle'] = environ['bottle.route'] = route
environ['route.url_args'] = args
return route.call(**args)
except HTTPResponse:
return _e()
except RouteReset:
route.reset()
return self._handle(environ)
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
stacktrace = format_exc(10)
environ['wsgi.errors'].write(stacktrace)
return HTTPError(500, "Internal Server Error", _e(), stacktrace)
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status, repr)(out)
if isinstance(out, HTTPResponse):
depr('Error handlers must not return :exc:`HTTPResponse`.') #0.9
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.output)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
out = iter(out)
first = next(out)
while not first:
first = next(out)
except StopIteration:
return self._cast('')
except HTTPResponse:
first = _e()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', _e(), format_exc(10))
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
if isinstance(first, bytes):
return itertools.chain([first], out)
if isinstance(first, unicode):
return imap(lambda x: x.encode(response.charset),
itertools.chain([first], out))
return self._cast(HTTPError(500, 'Unsupported response type: %s'\
% type(first)))
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or request.method == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
if isinstance(response._status_line, unicode):
response._status_line = str(response._status_line)
start_response(response._status_line, list(response.iter_headers()))
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(_e())), html_escape(format_exc(10)))
environ['wsgi.errors'].write(err)
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers)
return [tob(err)]
def __call__(self, environ, start_response):
''' Each instance of :class:'Bottle' is a WSGI application. '''
return self.wsgi(environ, start_response)
|
ryanpetrello/cleaver | cleaver/reports/web/bottle.py | BaseRequest.query | python | def query(self):
''' The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. '''
pairs = parse_qsl(self.query_string, keep_blank_values=True)
get = self.environ['bottle.get'] = FormsDict()
for key, value in pairs[:self.MAX_PARAMS]:
get[key] = value
return get | The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. | train | https://github.com/ryanpetrello/cleaver/blob/0ef266e1a0603c6d414df4b9926ce2a59844db35/cleaver/reports/web/bottle.py#L988-L997 | null | class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ')
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
#: Maximum number pr GET or POST parameters per request
MAX_PARAMS = 100
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
''' Bottle application handling this request. '''
raise RuntimeError('This request is not connected to an application.')
@property
def path(self):
''' The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). '''
return '/' + self.environ.get('PATH_INFO','').lstrip('/')
@property
def method(self):
''' The ``REQUEST_METHOD`` value as an uppercase string. '''
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
''' A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. '''
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
''' Return the value of a request header, or a given default value. '''
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE',''))
cookies = list(cookies.values())[:self.MAX_PARAMS]
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is retuned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not hasattr(item, 'filename'):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The values are instances of
:class:`cgi.FieldStorage`. The most important attributes are:
filename
The filename, if specified; otherwise None; this is the client
side filename, *not* the file name on which it is stored (that's
a temporary file you don't deal with)
file
The file(-like) object from which you can read the data.
value
The value as a *string*; for file uploads, this transparently
reads the file every time you request the value. Do not do this
on big files.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if hasattr(item, 'filename'):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
''' If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. '''
if 'application/json' in self.environ.get('CONTENT_TYPE', '') \
and 0 < self.content_length < self.MEMFILE_MAX:
return json_loads(self.body.read(self.MEMFILE_MAX))
return None
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
maxread = max(0, self.content_length)
stream = self.environ['wsgi.input']
body = BytesIO() if maxread < self.MEMFILE_MAX else TemporaryFile(mode='w+b')
while maxread > 0:
part = stream.read(min(maxread, self.MEMFILE_MAX))
if not part: break
body.write(part)
maxread -= len(part)
self.environ['wsgi.input'] = body
body.seek(0)
return body
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
if NCTextIOWrapper:
fb = NCTextIOWrapper(self.body, encoding='ISO-8859-1', newline='\n')
else:
fb = self.body
data = cgi.FieldStorage(fp=fb, environ=safe_env, keep_blank_values=True)
for item in (data.list or [])[:self.MAX_PARAMS]:
post[item.name] = item if item.filename else item.value
return post
@property
def COOKIES(self):
''' Alias for :attr:`cookies` (deprecated). '''
depr('BaseRequest.COOKIES was renamed to BaseRequest.cookies (lowercase).')
return self.cookies
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
''' The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. '''
env = self.environ
http = env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
''' The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. '''
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
''' Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
'''
script = self.environ.get('SCRIPT_NAME','/')
self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift)
@property
def content_length(self):
''' The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. '''
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def is_xhr(self):
''' True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). '''
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH','')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
''' Alias for :attr:`is_xhr`. "Ajax" is not the right term. '''
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None): return self.environ.get(value, default)
def __getitem__(self, key): return self.environ[key]
def __delitem__(self, key): self[key] = ""; del(self.environ[key])
def __iter__(self): return iter(self.environ)
def __len__(self): return len(self.environ)
def keys(self): return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.'+key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
''' Search in self.environ for additional user defined attributes. '''
try:
var = self.environ['bottle.request.ext.%s'%name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
self.environ['bottle.request.ext.%s'%name] = value
|
ryanpetrello/cleaver | cleaver/reports/web/bottle.py | BaseRequest.forms | python | def forms(self):
forms = FormsDict()
for name, item in self.POST.allitems():
if not hasattr(item, 'filename'):
forms[name] = item
return forms | Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is retuned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. | train | https://github.com/ryanpetrello/cleaver/blob/0ef266e1a0603c6d414df4b9926ce2a59844db35/cleaver/reports/web/bottle.py#L1000-L1009 | null | class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ')
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
#: Maximum number pr GET or POST parameters per request
MAX_PARAMS = 100
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
''' Bottle application handling this request. '''
raise RuntimeError('This request is not connected to an application.')
@property
def path(self):
''' The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). '''
return '/' + self.environ.get('PATH_INFO','').lstrip('/')
@property
def method(self):
''' The ``REQUEST_METHOD`` value as an uppercase string. '''
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
''' A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. '''
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
''' Return the value of a request header, or a given default value. '''
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE',''))
cookies = list(cookies.values())[:self.MAX_PARAMS]
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
''' The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. '''
pairs = parse_qsl(self.query_string, keep_blank_values=True)
get = self.environ['bottle.get'] = FormsDict()
for key, value in pairs[:self.MAX_PARAMS]:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The values are instances of
:class:`cgi.FieldStorage`. The most important attributes are:
filename
The filename, if specified; otherwise None; this is the client
side filename, *not* the file name on which it is stored (that's
a temporary file you don't deal with)
file
The file(-like) object from which you can read the data.
value
The value as a *string*; for file uploads, this transparently
reads the file every time you request the value. Do not do this
on big files.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if hasattr(item, 'filename'):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
''' If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. '''
if 'application/json' in self.environ.get('CONTENT_TYPE', '') \
and 0 < self.content_length < self.MEMFILE_MAX:
return json_loads(self.body.read(self.MEMFILE_MAX))
return None
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
maxread = max(0, self.content_length)
stream = self.environ['wsgi.input']
body = BytesIO() if maxread < self.MEMFILE_MAX else TemporaryFile(mode='w+b')
while maxread > 0:
part = stream.read(min(maxread, self.MEMFILE_MAX))
if not part: break
body.write(part)
maxread -= len(part)
self.environ['wsgi.input'] = body
body.seek(0)
return body
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
if NCTextIOWrapper:
fb = NCTextIOWrapper(self.body, encoding='ISO-8859-1', newline='\n')
else:
fb = self.body
data = cgi.FieldStorage(fp=fb, environ=safe_env, keep_blank_values=True)
for item in (data.list or [])[:self.MAX_PARAMS]:
post[item.name] = item if item.filename else item.value
return post
@property
def COOKIES(self):
''' Alias for :attr:`cookies` (deprecated). '''
depr('BaseRequest.COOKIES was renamed to BaseRequest.cookies (lowercase).')
return self.cookies
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
''' The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. '''
env = self.environ
http = env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
''' The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. '''
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
''' Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
'''
script = self.environ.get('SCRIPT_NAME','/')
self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift)
@property
def content_length(self):
''' The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. '''
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def is_xhr(self):
''' True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). '''
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH','')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
''' Alias for :attr:`is_xhr`. "Ajax" is not the right term. '''
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None): return self.environ.get(value, default)
def __getitem__(self, key): return self.environ[key]
def __delitem__(self, key): self[key] = ""; del(self.environ[key])
def __iter__(self): return iter(self.environ)
def __len__(self): return len(self.environ)
def keys(self): return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.'+key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
''' Search in self.environ for additional user defined attributes. '''
try:
var = self.environ['bottle.request.ext.%s'%name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
self.environ['bottle.request.ext.%s'%name] = value
|
ryanpetrello/cleaver | cleaver/reports/web/bottle.py | BaseRequest.files | python | def files(self):
files = FormsDict()
for name, item in self.POST.allitems():
if hasattr(item, 'filename'):
files[name] = item
return files | File uploads parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The values are instances of
:class:`cgi.FieldStorage`. The most important attributes are:
filename
The filename, if specified; otherwise None; this is the client
side filename, *not* the file name on which it is stored (that's
a temporary file you don't deal with)
file
The file(-like) object from which you can read the data.
value
The value as a *string*; for file uploads, this transparently
reads the file every time you request the value. Do not do this
on big files. | train | https://github.com/ryanpetrello/cleaver/blob/0ef266e1a0603c6d414df4b9926ce2a59844db35/cleaver/reports/web/bottle.py#L1023-L1043 | null | class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ')
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
#: Maximum number pr GET or POST parameters per request
MAX_PARAMS = 100
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
''' Bottle application handling this request. '''
raise RuntimeError('This request is not connected to an application.')
@property
def path(self):
''' The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). '''
return '/' + self.environ.get('PATH_INFO','').lstrip('/')
@property
def method(self):
''' The ``REQUEST_METHOD`` value as an uppercase string. '''
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
''' A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. '''
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
''' Return the value of a request header, or a given default value. '''
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE',''))
cookies = list(cookies.values())[:self.MAX_PARAMS]
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
''' The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. '''
pairs = parse_qsl(self.query_string, keep_blank_values=True)
get = self.environ['bottle.get'] = FormsDict()
for key, value in pairs[:self.MAX_PARAMS]:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is retuned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not hasattr(item, 'filename'):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
''' If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. '''
if 'application/json' in self.environ.get('CONTENT_TYPE', '') \
and 0 < self.content_length < self.MEMFILE_MAX:
return json_loads(self.body.read(self.MEMFILE_MAX))
return None
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
maxread = max(0, self.content_length)
stream = self.environ['wsgi.input']
body = BytesIO() if maxread < self.MEMFILE_MAX else TemporaryFile(mode='w+b')
while maxread > 0:
part = stream.read(min(maxread, self.MEMFILE_MAX))
if not part: break
body.write(part)
maxread -= len(part)
self.environ['wsgi.input'] = body
body.seek(0)
return body
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
if NCTextIOWrapper:
fb = NCTextIOWrapper(self.body, encoding='ISO-8859-1', newline='\n')
else:
fb = self.body
data = cgi.FieldStorage(fp=fb, environ=safe_env, keep_blank_values=True)
for item in (data.list or [])[:self.MAX_PARAMS]:
post[item.name] = item if item.filename else item.value
return post
@property
def COOKIES(self):
''' Alias for :attr:`cookies` (deprecated). '''
depr('BaseRequest.COOKIES was renamed to BaseRequest.cookies (lowercase).')
return self.cookies
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
''' The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. '''
env = self.environ
http = env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
''' The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. '''
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
''' Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
'''
script = self.environ.get('SCRIPT_NAME','/')
self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift)
@property
def content_length(self):
''' The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. '''
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def is_xhr(self):
''' True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). '''
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH','')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
''' Alias for :attr:`is_xhr`. "Ajax" is not the right term. '''
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None): return self.environ.get(value, default)
def __getitem__(self, key): return self.environ[key]
def __delitem__(self, key): self[key] = ""; del(self.environ[key])
def __iter__(self): return iter(self.environ)
def __len__(self): return len(self.environ)
def keys(self): return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.'+key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
''' Search in self.environ for additional user defined attributes. '''
try:
var = self.environ['bottle.request.ext.%s'%name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
self.environ['bottle.request.ext.%s'%name] = value
|
ryanpetrello/cleaver | cleaver/reports/web/bottle.py | BaseResponse.iter_headers | python | def iter_headers(self):
''' Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. '''
headers = self._headers.items()
bad_headers = self.bad_headers.get(self._status_code)
if bad_headers:
headers = [h for h in headers if h[0] not in bad_headers]
for name, values in headers:
for value in values:
yield name, value
if self._cookies:
for c in self._cookies.values():
yield 'Set-Cookie', c.OutputString() | Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. | train | https://github.com/ryanpetrello/cleaver/blob/0ef266e1a0603c6d414df4b9926ce2a59844db35/cleaver/reports/web/bottle.py#L1393-L1405 | null | class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: set(('Content-Type',)),
304: set(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))}
def __init__(self, body='', status=None, **headers):
self._status_line = None
self._status_code = None
self._cookies = None
self._headers = {'Content-Type': [self.default_content_type]}
self.body = body
self.status = status or self.default_status
if headers:
for name, value in headers.items():
self[name] = value
def copy(self):
''' Returns a copy of self. '''
copy = Response()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
''' The HTTP status line as a string (e.g. ``404 Not Found``).'''
return self._status_line
@property
def status_code(self):
''' The HTTP status code as an integer (e.g. 404).'''
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999: raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = status or ('%d Unknown' % code)
def _get_status(self):
return self._status_line
status = property(_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
''' An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. '''
self.__dict__['headers'] = hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name): return _hkey(name) in self._headers
def __delitem__(self, name): del self._headers[_hkey(name)]
def __getitem__(self, name): return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value): self._headers[_hkey(name)] = [str(value)]
def get_header(self, name, default=None):
''' Return the value of a previously defined header. If there is no
header with that name, return a default value. '''
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value, append=False):
''' Create a new response header, replacing any previously defined
headers with the same name. '''
if append:
self.add_header(name, value)
else:
self._headers[_hkey(name)] = [str(value)]
def add_header(self, name, value):
''' Add an additional response header, not removing duplicates. '''
self._headers.setdefault(_hkey(name), []).append(str(value))
def wsgiheader(self):
depr('The wsgiheader method is deprecated. See headerlist.') #0.10
return self.headerlist
@property
def headerlist(self):
''' WSGI conform list of (header, value) tuples. '''
return list(self.iter_headers())
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
@property
def charset(self):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return 'UTF-8'
@property
def COOKIES(self):
""" A dict-like SimpleCookie instance. This should not be used directly.
See :meth:`set_cookie`. """
depr('The COOKIES dict is deprecated. Use `set_cookie()` instead.') # 0.10
if not self._cookies:
self._cookies = SimpleCookie()
return self._cookies
def set_cookie(self, name, value, secret=None, **options):
''' Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
'''
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((name, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
if len(value) > 4096: raise ValueError('Cookie value to long.')
self._cookies[name] = value
for key, value in options.items():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
''' Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. '''
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
|
ryanpetrello/cleaver | cleaver/reports/web/bottle.py | ResourceManager.add_path | python | def add_path(self, path, base=None, index=None, create=False):
''' Add a new path to the list of search paths. Return False if it does
not exist.
:param path: The new search path. Relative paths are turned into an
absolute and normalized form. If the path looks like a file (not
ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to `:attr:base` which defaults to ``./``.
:param index: Position within the list of search paths. Defaults to
last index (appends to the list).
:param create: Create non-existent search paths. Off by default.
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
'''
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.mkdirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear() | Add a new path to the list of search paths. Return False if it does
not exist.
:param path: The new search path. Relative paths are turned into an
absolute and normalized form. If the path looks like a file (not
ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to `:attr:base` which defaults to ``./``.
:param index: Position within the list of search paths. Defaults to
last index (appends to the list).
:param create: Create non-existent search paths. Off by default.
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__) | train | https://github.com/ryanpetrello/cleaver/blob/0ef266e1a0603c6d414df4b9926ce2a59844db35/cleaver/reports/web/bottle.py#L1948-L1977 | null | class ResourceManager(object):
''' This class manages a list of search paths and helps to find and open
aplication-bound resources (files).
:param base: default value for same-named :meth:`add_path` parameter.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
'''
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = open
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. `res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
''' Add a new path to the list of search paths. Return False if it does
not exist.
:param path: The new search path. Relative paths are turned into an
absolute and normalized form. If the path looks like a file (not
ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to `:attr:base` which defaults to ``./``.
:param index: Position within the list of search paths. Defaults to
last index (appends to the list).
:param create: Create non-existent search paths. Off by default.
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
'''
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.mkdirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
def __iter__(self):
''' Iterate over all existing files in all registered paths. '''
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
''' Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. '''
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
''' Find a resource and return a file object, or raise IOError. '''
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(name, mode=mode, *args, **kwargs)
|
ryanpetrello/cleaver | cleaver/compat/abc.py | ABCMeta.register | python | def register(cls, subclass):
if not isinstance(cls, type):
raise TypeError("Can only register classes")
if issubclass(subclass, cls):
return # Already a subclass
# Subtle: test for cycles *after* testing for "already a subclass";
# this means we allow X.register(X) and interpret it as a no-op.
if issubclass(cls, subclass):
# This would create a cycle, which is bad for the algorithm below
raise RuntimeError("Refusing to create an inheritance cycle")
cls._abc_registry.add(subclass)
ABCMeta._abc_invalidation_counter += 1 | Register a virtual subclass of an ABC. | train | https://github.com/ryanpetrello/cleaver/blob/0ef266e1a0603c6d414df4b9926ce2a59844db35/cleaver/compat/abc.py#L102-L114 | null | class ABCMeta(type):
"""Metaclass for defining Abstract Base Classes (ABCs).
Use this metaclass to create an ABC. An ABC can be subclassed
directly, and then acts as a mix-in class. You can also register
unrelated concrete classes (even built-in classes) and unrelated
ABCs as 'virtual subclasses' -- these and their descendants will
be considered subclasses of the registering ABC by the built-in
issubclass() function, but the registering ABC won't show up in
their MRO (Method Resolution Order) nor will method
implementations defined by the registering ABC be callable (not
even via super()).
"""
# A global counter that is incremented each time a class is
# registered as a virtual subclass of anything. It forces the
# negative cache to be cleared before its next use.
_abc_invalidation_counter = 0
def __new__(mcls, name, bases, namespace):
cls = super(ABCMeta, mcls).__new__(mcls, name, bases, namespace)
# Compute set of abstract method names
abstracts = set(name
for name, value in namespace.items()
if getattr(value, "__isabstractmethod__", False))
for base in bases:
for name in getattr(base, "__abstractmethods__", set()):
value = getattr(cls, name, None)
if getattr(value, "__isabstractmethod__", False):
abstracts.add(name)
cls.__abstractmethods__ = frozenset(abstracts)
# Set up inheritance registry
cls._abc_registry = set()
cls._abc_cache = set()
cls._abc_negative_cache = set()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
return cls
# Invalidate negative cache
def _dump_registry(cls, file=None):
"""Debug helper to print the ABC registry."""
print >> file, "Class: %s.%s" % (cls.__module__, cls.__name__)
print >> file, "Inv.counter: %s" % ABCMeta._abc_invalidation_counter
for name in sorted(cls.__dict__.keys()):
if name.startswith("_abc_"):
value = getattr(cls, name)
print >> file, "%s: %r" % (name, value)
def __instancecheck__(cls, instance):
"""Override for isinstance(instance, cls)."""
# Inline the cache checking when it's simple.
subclass = getattr(instance, '__class__', None)
if subclass in cls._abc_cache:
return True
subtype = type(instance)
# Old-style instances
if subtype is _InstanceType:
subtype = subclass
if subtype is subclass or subclass is None:
if (cls._abc_negative_cache_version ==
ABCMeta._abc_invalidation_counter and
subtype in cls._abc_negative_cache):
return False
# Fall back to the subclass check.
return cls.__subclasscheck__(subtype)
return (cls.__subclasscheck__(subclass) or
cls.__subclasscheck__(subtype))
def __subclasscheck__(cls, subclass):
"""Override for issubclass(subclass, cls)."""
# Check cache
if subclass in cls._abc_cache:
return True
# Check negative cache; may have to invalidate
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
# Invalidate the negative cache
cls._abc_negative_cache = set()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
elif subclass in cls._abc_negative_cache:
return False
# Check the subclass hook
ok = cls.__subclasshook__(subclass)
if ok is not NotImplemented:
assert isinstance(ok, bool)
if ok:
cls._abc_cache.add(subclass)
else:
cls._abc_negative_cache.add(subclass)
return ok
# Check if it's a direct subclass
if cls in getattr(subclass, '__mro__', ()):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a registered class (recursive)
for rcls in cls._abc_registry:
if issubclass(subclass, rcls):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a subclass (recursive)
for scls in cls.__subclasses__():
if issubclass(subclass, scls):
cls._abc_cache.add(subclass)
return True
# No dice; update negative cache
cls._abc_negative_cache.add(subclass)
return False
|
ryanpetrello/cleaver | cleaver/middleware.py | SplitMiddleware._copy_body_to_tempfile | python | def _copy_body_to_tempfile(cls, environ):
try:
length = int(environ.get('CONTENT_LENGTH', 0))
except ValueError:
length = 0
try:
fileobj = tempfile.SpooledTemporaryFile(1024*1024)
except AttributeError: # pragma: nocover
fileobj = tempfile.TemporaryFile() # py25 fallback
if length:
remaining = length
while remaining > 0:
data = environ['wsgi.input'].read(min(remaining, 65536))
if not data:
raise IOError(
"Client disconnected (%s more bytes were expected)"
% remaining
)
fileobj.write(data)
remaining -= len(data)
fileobj.seek(0)
environ['wsgi.input'] = fileobj
return fileobj, length | Copy wsgi.input to a tempfile so it can be reused. | train | https://github.com/ryanpetrello/cleaver/blob/0ef266e1a0603c6d414df4b9926ce2a59844db35/cleaver/middleware.py#L161-L188 | null | class SplitMiddleware(object):
def __init__(self, app, identity, backend, environ_key='cleaver',
allow_override=False, count_humans_only=False,
human_callback_token='__cleaver_human_verification__'):
"""
Makes a Cleaver instance available every request under
``environ['cleaver']``.
:param identity any implementation of
``identity.CleaverIdentityProvider`` or
a callable that emulates
``identity.CleaverIdentityProvider.get_identity``.
:param backend any implementation of
``cleaver.backend.CleaverBackend``
:param environ_key location where the Cleaver instance will be keyed in
the WSGI environ
:param allow_override when True, specific variants can be overriden via
the request query string, e.g.,
http://mypythonapp.com?cleaver:button_size=small
Especially useful for tests and QA.
:param count_humans_only when False, every request (including those
originating from bots and web crawlers) is
treated as a unique visit (defaults to False).
:param human_callback_token when ``count_humans_only`` is True, this
token in the URL will trigger a simple
verification process for humans.
"""
self.app = app
if not isinstance(identity, CleaverIdentityProvider) and \
not callable(identity):
raise RuntimeError(
'%s must be callable or implement '
'cleaver.identity.CleaverIdentityProvider' % identity
)
if not isinstance(backend, CleaverBackend):
raise RuntimeError(
'%s must implement cleaver.backend.CleaverBackend' % backend
)
self._identity = identity
self._backend = backend
self.environ_key = environ_key
self.allow_override = allow_override
self.count_humans_only = count_humans_only
self.human_callback_token = human_callback_token
def __call__(self, environ, start_response):
cleaver = Cleaver(
environ,
self._identity,
self._backend,
count_humans_only=self.count_humans_only
)
environ[self.environ_key] = cleaver
if self.allow_override:
self._handle_variant_overrides(environ)
#
# If human verification is required and this request represents
# a valid AJAX callback (which bots aren't generally capable of), then
# mark the visitor as human.
#
if self.count_humans_only and \
environ.get('REQUEST_METHOD', '') == 'POST' and \
self.human_callback_token in environ.get('PATH_INFO', ''):
fp, length = SplitMiddleware._copy_body_to_tempfile(environ)
environ.setdefault('CONTENT_LENGTH', length)
fs = cgi.FieldStorage(
fp=fp,
environ=environ,
keep_blank_values=True
)
try:
try:
x = int(fs.getlist('x')[0])
except (IndexError, ValueError):
x = 0
try:
y = int(fs.getlist('y')[0])
except (IndexError, ValueError):
y = 0
try:
z = int(fs.getlist('z')[0])
except (IndexError, ValueError):
z = 0
# The AJAX call will include three POST arguments, X, Y, and Z
#
# Part of the "not a robot test" is validating that X + Y = Z
# (most web crawlers won't perform complicated Javascript
# execution like math and HTTP callbacks, because it's just too
# expensive at scale)
if x and y and z and x + y == z:
# Mark the visitor as a human
self._backend.mark_human(cleaver.identity)
# If the visitor has been assigned any experiment variants,
# tally their participation.
for e in self._backend.all_experiments():
variant = self._backend.get_variant(
cleaver.identity,
e.name
)
if variant:
self._backend.mark_participant(e.name, variant)
start_response(
'204 No Content',
[('Content-Type', 'text/plain')]
)
return []
except (KeyError, ValueError):
pass
start_response(
'401 Unauthorized',
[('Content-Type', 'text/plain')]
)
return []
return self.app(environ, start_response)
def _handle_variant_overrides(self, environ):
# Parse the QUERY_STRING into a dictionary, and make an editable copy
parsed = dict(parse_qsl(environ.get('QUERY_STRING', '')))
qs = parsed.copy()
# For each key that starts with cleaver: ...
for k in parsed:
if k.startswith('cleaver:'):
# Store the key -> value in ``environ['cleaver.override']``
# and remove it from the editable ``qs`` copy.
environ.setdefault('cleaver.override', {})[
k.split('cleaver:')[1]
] = qs.pop(k)
# If any overriden variables were changed, re-encode QUERY_STRING so
# that the next WSGI layer doesn't see the parsed ``cleaver:``
# arguments.
if 'cleaver.override' in environ:
environ['QUERY_STRING'] = urlencode(qs)
@classmethod
|
ryanpetrello/cleaver | cleaver/experiment.py | VariantStat.conversion_rate | python | def conversion_rate(self):
participants = self.participant_count
if participants == 0:
return 0.0
return self.experiment.conversions_for(self.name) / float(participants) | The percentage of participants that have converted for this variant.
Returns a > 0 float representing a percentage rate. | train | https://github.com/ryanpetrello/cleaver/blob/0ef266e1a0603c6d414df4b9926ce2a59844db35/cleaver/experiment.py#L116-L125 | null | class VariantStat(object):
"""
Used to calculate statistics related to Experiment variants.
"""
def __init__(self, name, experiment):
self.name = name
self.experiment = experiment
@property
def participant_count(self):
"""
The number of participants for this variant.
Returns a > 0 integer.
"""
return self.experiment.participants_for(self.name)
@property
@property
def z_score(self):
"""
Calculate the Z-Score between this alternative and the project control.
Statistical formulas based on:
http://20bits.com/article/statistical-analysis-and-ab-testing
"""
control = VariantStat(self.experiment.control, self.experiment)
alternative = self
if control.name == alternative.name:
return 'N/A'
conv_c = control.conversion_rate
conv_a = alternative.conversion_rate
num_c = control.participant_count
num_a = alternative.participant_count
if conv_c == 0 or conv_a == 0:
return 0
numerator = conv_a - conv_c
frac_c = (conv_c * (1 - conv_c)) / float(num_c)
frac_a = (conv_a * (1 - conv_a)) / float(num_a)
if frac_c + frac_a == 0:
# square root of 0 is 0, so no need to calculate
return 0
elif frac_c + frac_a < 0:
# can't take a square root of a negative number,
# so return 'Invalid'
return 'Invalid'
return numerator / math.sqrt((frac_c + frac_a))
@property
def confidence_level(self):
"""
Based on the variant's Z-Score, returns a human-readable string that
describes the confidence with which we can say the results are
statistically significant.
"""
z = self.z_score
if isinstance(z, string_types):
return z
z = abs(round(z, 3))
if z == 0.0:
return "No Change"
elif z < 1.65:
return "No Confidence"
elif z < 2.33:
return "95% Confidence"
elif z < 3.08:
return "99% Confidence"
return "99.9% Confidence"
|
ryanpetrello/cleaver | cleaver/experiment.py | VariantStat.z_score | python | def z_score(self):
control = VariantStat(self.experiment.control, self.experiment)
alternative = self
if control.name == alternative.name:
return 'N/A'
conv_c = control.conversion_rate
conv_a = alternative.conversion_rate
num_c = control.participant_count
num_a = alternative.participant_count
if conv_c == 0 or conv_a == 0:
return 0
numerator = conv_a - conv_c
frac_c = (conv_c * (1 - conv_c)) / float(num_c)
frac_a = (conv_a * (1 - conv_a)) / float(num_a)
if frac_c + frac_a == 0:
# square root of 0 is 0, so no need to calculate
return 0
elif frac_c + frac_a < 0:
# can't take a square root of a negative number,
# so return 'Invalid'
return 'Invalid'
return numerator / math.sqrt((frac_c + frac_a)) | Calculate the Z-Score between this alternative and the project control.
Statistical formulas based on:
http://20bits.com/article/statistical-analysis-and-ab-testing | train | https://github.com/ryanpetrello/cleaver/blob/0ef266e1a0603c6d414df4b9926ce2a59844db35/cleaver/experiment.py#L128-L164 | null | class VariantStat(object):
"""
Used to calculate statistics related to Experiment variants.
"""
def __init__(self, name, experiment):
self.name = name
self.experiment = experiment
@property
def participant_count(self):
"""
The number of participants for this variant.
Returns a > 0 integer.
"""
return self.experiment.participants_for(self.name)
@property
def conversion_rate(self):
"""
The percentage of participants that have converted for this variant.
Returns a > 0 float representing a percentage rate.
"""
participants = self.participant_count
if participants == 0:
return 0.0
return self.experiment.conversions_for(self.name) / float(participants)
@property
@property
def confidence_level(self):
"""
Based on the variant's Z-Score, returns a human-readable string that
describes the confidence with which we can say the results are
statistically significant.
"""
z = self.z_score
if isinstance(z, string_types):
return z
z = abs(round(z, 3))
if z == 0.0:
return "No Change"
elif z < 1.65:
return "No Confidence"
elif z < 2.33:
return "95% Confidence"
elif z < 3.08:
return "99% Confidence"
return "99.9% Confidence"
|
ryanpetrello/cleaver | cleaver/experiment.py | VariantStat.confidence_level | python | def confidence_level(self):
z = self.z_score
if isinstance(z, string_types):
return z
z = abs(round(z, 3))
if z == 0.0:
return "No Change"
elif z < 1.65:
return "No Confidence"
elif z < 2.33:
return "95% Confidence"
elif z < 3.08:
return "99% Confidence"
return "99.9% Confidence" | Based on the variant's Z-Score, returns a human-readable string that
describes the confidence with which we can say the results are
statistically significant. | train | https://github.com/ryanpetrello/cleaver/blob/0ef266e1a0603c6d414df4b9926ce2a59844db35/cleaver/experiment.py#L167-L187 | null | class VariantStat(object):
"""
Used to calculate statistics related to Experiment variants.
"""
def __init__(self, name, experiment):
self.name = name
self.experiment = experiment
@property
def participant_count(self):
"""
The number of participants for this variant.
Returns a > 0 integer.
"""
return self.experiment.participants_for(self.name)
@property
def conversion_rate(self):
"""
The percentage of participants that have converted for this variant.
Returns a > 0 float representing a percentage rate.
"""
participants = self.participant_count
if participants == 0:
return 0.0
return self.experiment.conversions_for(self.name) / float(participants)
@property
def z_score(self):
"""
Calculate the Z-Score between this alternative and the project control.
Statistical formulas based on:
http://20bits.com/article/statistical-analysis-and-ab-testing
"""
control = VariantStat(self.experiment.control, self.experiment)
alternative = self
if control.name == alternative.name:
return 'N/A'
conv_c = control.conversion_rate
conv_a = alternative.conversion_rate
num_c = control.participant_count
num_a = alternative.participant_count
if conv_c == 0 or conv_a == 0:
return 0
numerator = conv_a - conv_c
frac_c = (conv_c * (1 - conv_c)) / float(num_c)
frac_a = (conv_a * (1 - conv_a)) / float(num_a)
if frac_c + frac_a == 0:
# square root of 0 is 0, so no need to calculate
return 0
elif frac_c + frac_a < 0:
# can't take a square root of a negative number,
# so return 'Invalid'
return 'Invalid'
return numerator / math.sqrt((frac_c + frac_a))
@property
|
ryanpetrello/cleaver | cleaver/backend/__init__.py | CleaverBackend.participate | python | def participate(self, identity, experiment_name, variant):
self.set_variant(identity, experiment_name, variant)
if self.is_verified_human(identity):
self.mark_participant(experiment_name, variant) | Set the variant for a specific user and mark a participation for the
experiment.
Participation will *only* be marked for visitors who have been verified
as humans (to avoid skewing reports with requests from bots and web
crawlers). | train | https://github.com/ryanpetrello/cleaver/blob/0ef266e1a0603c6d414df4b9926ce2a59844db35/cleaver/backend/__init__.py#L104-L115 | [
"def is_verified_human(self, identity):\n return # pragma: nocover\n",
"def set_variant(self, identity, experiment_name, variant):\n \"\"\"\n Set the variant for a specific user.\n\n :param identity a unique user identifier\n :param experiment_name the string name of the experiment\n :param var... | class CleaverBackend(object):
"""
Provides an interface for persisting and retrieving A/B test results,
generally a database, cache, or file on disk.
Generally speaking, base implementations need to:
* Provide a list of all experiments and the datetime they started.
* Save and retrieve an experiment and its ordered list of variants.
* Save and retrieve a mapping between unique user identifiers and
experiment/variant pairs those users were served.
* Remember whether a certain unique visitor has been verified as
a human (defaulting to False to prevent robots from skewing
reporting).
* Provide the ability to score a conversion for a certain (experiment,
variant) pair.
* Provide a running tally of participants and conversions for any
(experiment, variant) pair.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def all_experiments(self):
"""
Retrieve every available experiment.
Returns a list of ``cleaver.experiment.Experiment``s
"""
return # pragma: nocover
@abc.abstractmethod
def get_experiment(self, name, variants):
"""
Retrieve an experiment by its name and variants (assuming it exists).
:param name a unique string name for the experiment
:param variants a list of strings, each with a unique variant name
Returns a ``cleaver.experiment.Experiment`` or ``None``
"""
return # pragma: nocover
@abc.abstractmethod
def save_experiment(self, name, variants):
"""
Persist an experiment and its variants (unless they already exist).
Variants should be stored in such a way that *order can be guaranteed*
on retrieval.
:param name a unique string name for the experiment
:param variants a list of strings, each with a unique variant name
"""
return # pragma: nocover
@abc.abstractmethod
def is_verified_human(self, identity):
return # pragma: nocover
@abc.abstractmethod
def mark_human(self, identity):
return # pragma: nocover
@abc.abstractmethod
def get_variant(self, identity, experiment_name):
"""
Retrieve the variant for a specific user and experiment (if it exists).
:param identity a unique user identifier
:param experiment_name the string name of the experiment
Returns a ``String`` or `None`
"""
return # pragma: nocover
@abc.abstractmethod
def set_variant(self, identity, experiment_name, variant):
"""
Set the variant for a specific user.
:param identity a unique user identifier
:param experiment_name the string name of the experiment
:param variant the string name of the variant
"""
return # pragma: nocover
@abc.abstractmethod
def mark_participant(self, experiment_name, variant):
"""
Mark a participation for a specific experiment variant.
:param experiment_name the string name of the experiment
:param variant the string name of the variant
"""
return # pragma: nocover
@abc.abstractmethod
def mark_conversion(self, experiment_name, variant):
"""
Mark a conversion for a specific experiment variant.
:param experiment_name the string name of the experiment
:param variant the string name of the variant
"""
return # pragma: nocover
@abc.abstractmethod
def participants(self, experiment_name, variant):
"""
The number of participants for a certain variant.
Returns an integer.
"""
return # pragma: nocover
@abc.abstractmethod
def conversions(self, experiment_name, variant):
"""
The number of conversions for a certain variant.
Returns an integer.
"""
return # pragma: nocover
|
ryanpetrello/cleaver | cleaver/base.py | Cleaver.identity | python | def identity(self):
if hasattr(self._identity, 'get_identity'):
return self._identity.get_identity(self._environ)
return self._identity(self._environ) | A unique identifier for the current visitor. | train | https://github.com/ryanpetrello/cleaver/blob/0ef266e1a0603c6d414df4b9926ce2a59844db35/cleaver/base.py#L54-L60 | null | class Cleaver(object):
def __init__(self, environ, identity, backend,
count_humans_only=False,
human_callback_token='__cleaver_human_verification__'):
"""
Create a new Cleaver instance.
Not generally instantiated directly, but established automatically by
``cleaver.SplitMiddleware`` and used within a WSGI application via
``request.environ['cleaver']``.
:param environ the WSGI environ dictionary for the current request
:param identity any implementation of
``identity.CleaverIdentityProvider`` or
a callable that emulates
``identity.CleaverIdentityProvider.get_identity``.
:param backend any implementation of
``backend.CleaverBackend``
:param count_humans_only when False, every request (including those
originating from bots and web crawlers) is
treated as a unique visit (defaults to False).
:param human_callback_token when ``count_humans_only`` is True, this
token in the URL will trigger a simple
verification process for humans.
"""
if not isinstance(identity, CleaverIdentityProvider) and \
not callable(identity):
raise RuntimeError(
'%s must be callable or implement '
'cleaver.identity.CleaverIdentityProvider' % identity
)
if not isinstance(backend, CleaverBackend):
raise RuntimeError(
'%s must implement cleaver.backend.CleaverBackend' % backend
)
self._identity = identity
self._backend = backend
self._environ = environ
self.count_humans_only = count_humans_only
self.human_callback_token = human_callback_token
def __call__(self, *args):
return self.split(*args)
@property
@property
def human(self):
return self._backend.is_verified_human(self.identity)
def split(self, experiment_name, *variants):
"""
Used to split and track user experience amongst one or more variants.
:param experiment_name a unique string name for the experiment
:param *variants can take many forms, depending on usage.
Variants should be provided as arbitrary tuples in the
format ('unique_string_label', any_value), ... e.g.,
>>> split('text_color', ('red', '#F00'), ('blue', '#00F'))
...where the first variant (in this example, 'red') represents the
control and any additional variants represent alternatives.
By default, variants are chosen with equal weight. You can tip the
scales if you like by passing a proportional *integer* weight as
the third element in each variant tuple:
>>> split('text_color', ('red', '#F00', 2), ('blue', '#00F', 4))
Optionally, variants may be excluded entirely to fall back to
a simple True/False 50/50 split, where True is the control and
False is the experiment, e.g.,
>>> sidebar() if split('include_sidebar') else empty()
"""
# Perform some minimal type checking
if not isinstance(experiment_name, string_types):
raise RuntimeError(
'Invalid experiment name: %s must be a string.' %
experiment_name
)
keys, values, weights = self._parse_variants(variants)
b = self._backend
# Record the experiment if it doesn't exist already
experiment = b.get_experiment(experiment_name, keys)
# If the current visitor hasn't been verified as a human, and we've not
# required human verification, go ahead and mark them as a human.
if self.count_humans_only is False and self.human is not True:
b.mark_human(self.identity)
if experiment is None:
b.save_experiment(experiment_name, keys)
experiment = b.get_experiment(experiment_name, keys)
else:
if set(experiment.variants) != set(keys):
raise RuntimeError(
'An experiment named %s already exists with different '
'variants.' % experiment_name
)
# Retrieve the variant assigned to the current user
if experiment.name in self._environ.get('cleaver.override', {}):
variant = self._environ['cleaver.override'][experiment.name]
else:
variant = b.get_variant(self.identity, experiment.name)
if variant is None:
# ...or choose (and store) one randomly if it doesn't exist yet
variant = next(util.random_variant(keys, weights))
b.participate(self.identity, experiment.name, variant)
return dict(zip(keys, values))[variant]
def score(self, experiment_name):
"""
Used to mark the current user's experiment variant as "converted" e.g.,
"Suzy, who was shown the large button, just signed up."
Conversions will *only* be marked for visitors who have been verified
as humans (to avoid skewing reports with requests from bots and web
crawlers).
:param experiment_name the string name of the experiment
"""
if self._backend.get_variant(self.identity, experiment_name) and \
self.human is True:
self._backend.mark_conversion(
experiment_name,
self._backend.get_variant(self.identity, experiment_name)
)
def _parse_variants(self, variants):
if not len(variants):
variants = [('True', True), ('False', False)]
if len(variants) == 1:
raise RuntimeError(
'Experiments must have at least two variants '
'(a control and an alternative).'
)
def add_defaults(v):
if len(v) < 3:
v = tuple(
list(v) + (
[None, 1] if len(v) == 1 else [1]
)
)
# Perform some minimal type checking
if not isinstance(v[0], string_types):
raise RuntimeError(
'Invalid variant name: %s must be a string.' %
v[0]
)
if not isinstance(v[2], int):
raise RuntimeError(
'Invalid variant weight: %s must be an integer.' %
v[2]
)
return v
variants = map(
add_defaults,
variants
)
return zip_longest(*variants)
def humanizing_javascript(self):
if self.human:
return ''
return """
<script type="text/javascript">
var x = Math.floor(Math.random()*100);
var y = Math.floor(Math.random()*100);
var url = "%s";
var params = "x="+x+"&y="+y+"&z="+(x+y);
var h;
if (window.XMLHttpRequest){
h = new XMLHttpRequest();
} else {
h = new ActiveXObject("Microsoft.XMLHTTP");
}
h.open("POST", url, true);
h.setRequestHeader("Content-Type", "%s");
h.setRequestHeader("Connection", "close");
h.send(params);
</script>
""" % (self.human_callback_token, "application/x-www-form-urlencoded")
|
ryanpetrello/cleaver | cleaver/base.py | Cleaver.split | python | def split(self, experiment_name, *variants):
# Perform some minimal type checking
if not isinstance(experiment_name, string_types):
raise RuntimeError(
'Invalid experiment name: %s must be a string.' %
experiment_name
)
keys, values, weights = self._parse_variants(variants)
b = self._backend
# Record the experiment if it doesn't exist already
experiment = b.get_experiment(experiment_name, keys)
# If the current visitor hasn't been verified as a human, and we've not
# required human verification, go ahead and mark them as a human.
if self.count_humans_only is False and self.human is not True:
b.mark_human(self.identity)
if experiment is None:
b.save_experiment(experiment_name, keys)
experiment = b.get_experiment(experiment_name, keys)
else:
if set(experiment.variants) != set(keys):
raise RuntimeError(
'An experiment named %s already exists with different '
'variants.' % experiment_name
)
# Retrieve the variant assigned to the current user
if experiment.name in self._environ.get('cleaver.override', {}):
variant = self._environ['cleaver.override'][experiment.name]
else:
variant = b.get_variant(self.identity, experiment.name)
if variant is None:
# ...or choose (and store) one randomly if it doesn't exist yet
variant = next(util.random_variant(keys, weights))
b.participate(self.identity, experiment.name, variant)
return dict(zip(keys, values))[variant] | Used to split and track user experience amongst one or more variants.
:param experiment_name a unique string name for the experiment
:param *variants can take many forms, depending on usage.
Variants should be provided as arbitrary tuples in the
format ('unique_string_label', any_value), ... e.g.,
>>> split('text_color', ('red', '#F00'), ('blue', '#00F'))
...where the first variant (in this example, 'red') represents the
control and any additional variants represent alternatives.
By default, variants are chosen with equal weight. You can tip the
scales if you like by passing a proportional *integer* weight as
the third element in each variant tuple:
>>> split('text_color', ('red', '#F00', 2), ('blue', '#00F', 4))
Optionally, variants may be excluded entirely to fall back to
a simple True/False 50/50 split, where True is the control and
False is the experiment, e.g.,
>>> sidebar() if split('include_sidebar') else empty() | train | https://github.com/ryanpetrello/cleaver/blob/0ef266e1a0603c6d414df4b9926ce2a59844db35/cleaver/base.py#L66-L132 | [
"def random_variant(variants, weights):\n \"\"\"\n A generator that, given a list of variants and a corresponding list of\n weights, returns one random weighted selection.\n \"\"\"\n total = 0\n accumulator = []\n for w in weights:\n total += w\n accumulator.append(total)\n\n r... | class Cleaver(object):
def __init__(self, environ, identity, backend,
count_humans_only=False,
human_callback_token='__cleaver_human_verification__'):
"""
Create a new Cleaver instance.
Not generally instantiated directly, but established automatically by
``cleaver.SplitMiddleware`` and used within a WSGI application via
``request.environ['cleaver']``.
:param environ the WSGI environ dictionary for the current request
:param identity any implementation of
``identity.CleaverIdentityProvider`` or
a callable that emulates
``identity.CleaverIdentityProvider.get_identity``.
:param backend any implementation of
``backend.CleaverBackend``
:param count_humans_only when False, every request (including those
originating from bots and web crawlers) is
treated as a unique visit (defaults to False).
:param human_callback_token when ``count_humans_only`` is True, this
token in the URL will trigger a simple
verification process for humans.
"""
if not isinstance(identity, CleaverIdentityProvider) and \
not callable(identity):
raise RuntimeError(
'%s must be callable or implement '
'cleaver.identity.CleaverIdentityProvider' % identity
)
if not isinstance(backend, CleaverBackend):
raise RuntimeError(
'%s must implement cleaver.backend.CleaverBackend' % backend
)
self._identity = identity
self._backend = backend
self._environ = environ
self.count_humans_only = count_humans_only
self.human_callback_token = human_callback_token
def __call__(self, *args):
return self.split(*args)
@property
def identity(self):
"""
A unique identifier for the current visitor.
"""
if hasattr(self._identity, 'get_identity'):
return self._identity.get_identity(self._environ)
return self._identity(self._environ)
@property
def human(self):
return self._backend.is_verified_human(self.identity)
def score(self, experiment_name):
"""
Used to mark the current user's experiment variant as "converted" e.g.,
"Suzy, who was shown the large button, just signed up."
Conversions will *only* be marked for visitors who have been verified
as humans (to avoid skewing reports with requests from bots and web
crawlers).
:param experiment_name the string name of the experiment
"""
if self._backend.get_variant(self.identity, experiment_name) and \
self.human is True:
self._backend.mark_conversion(
experiment_name,
self._backend.get_variant(self.identity, experiment_name)
)
def _parse_variants(self, variants):
if not len(variants):
variants = [('True', True), ('False', False)]
if len(variants) == 1:
raise RuntimeError(
'Experiments must have at least two variants '
'(a control and an alternative).'
)
def add_defaults(v):
if len(v) < 3:
v = tuple(
list(v) + (
[None, 1] if len(v) == 1 else [1]
)
)
# Perform some minimal type checking
if not isinstance(v[0], string_types):
raise RuntimeError(
'Invalid variant name: %s must be a string.' %
v[0]
)
if not isinstance(v[2], int):
raise RuntimeError(
'Invalid variant weight: %s must be an integer.' %
v[2]
)
return v
variants = map(
add_defaults,
variants
)
return zip_longest(*variants)
def humanizing_javascript(self):
if self.human:
return ''
return """
<script type="text/javascript">
var x = Math.floor(Math.random()*100);
var y = Math.floor(Math.random()*100);
var url = "%s";
var params = "x="+x+"&y="+y+"&z="+(x+y);
var h;
if (window.XMLHttpRequest){
h = new XMLHttpRequest();
} else {
h = new ActiveXObject("Microsoft.XMLHTTP");
}
h.open("POST", url, true);
h.setRequestHeader("Content-Type", "%s");
h.setRequestHeader("Connection", "close");
h.send(params);
</script>
""" % (self.human_callback_token, "application/x-www-form-urlencoded")
|
ryanpetrello/cleaver | cleaver/base.py | Cleaver.score | python | def score(self, experiment_name):
if self._backend.get_variant(self.identity, experiment_name) and \
self.human is True:
self._backend.mark_conversion(
experiment_name,
self._backend.get_variant(self.identity, experiment_name)
) | Used to mark the current user's experiment variant as "converted" e.g.,
"Suzy, who was shown the large button, just signed up."
Conversions will *only* be marked for visitors who have been verified
as humans (to avoid skewing reports with requests from bots and web
crawlers).
:param experiment_name the string name of the experiment | train | https://github.com/ryanpetrello/cleaver/blob/0ef266e1a0603c6d414df4b9926ce2a59844db35/cleaver/base.py#L134-L151 | [
"def get_variant(self, identity, experiment_name):\n pass # pragma: nocover\n"
] | class Cleaver(object):
def __init__(self, environ, identity, backend,
count_humans_only=False,
human_callback_token='__cleaver_human_verification__'):
"""
Create a new Cleaver instance.
Not generally instantiated directly, but established automatically by
``cleaver.SplitMiddleware`` and used within a WSGI application via
``request.environ['cleaver']``.
:param environ the WSGI environ dictionary for the current request
:param identity any implementation of
``identity.CleaverIdentityProvider`` or
a callable that emulates
``identity.CleaverIdentityProvider.get_identity``.
:param backend any implementation of
``backend.CleaverBackend``
:param count_humans_only when False, every request (including those
originating from bots and web crawlers) is
treated as a unique visit (defaults to False).
:param human_callback_token when ``count_humans_only`` is True, this
token in the URL will trigger a simple
verification process for humans.
"""
if not isinstance(identity, CleaverIdentityProvider) and \
not callable(identity):
raise RuntimeError(
'%s must be callable or implement '
'cleaver.identity.CleaverIdentityProvider' % identity
)
if not isinstance(backend, CleaverBackend):
raise RuntimeError(
'%s must implement cleaver.backend.CleaverBackend' % backend
)
self._identity = identity
self._backend = backend
self._environ = environ
self.count_humans_only = count_humans_only
self.human_callback_token = human_callback_token
def __call__(self, *args):
return self.split(*args)
@property
def identity(self):
"""
A unique identifier for the current visitor.
"""
if hasattr(self._identity, 'get_identity'):
return self._identity.get_identity(self._environ)
return self._identity(self._environ)
@property
def human(self):
return self._backend.is_verified_human(self.identity)
def split(self, experiment_name, *variants):
"""
Used to split and track user experience amongst one or more variants.
:param experiment_name a unique string name for the experiment
:param *variants can take many forms, depending on usage.
Variants should be provided as arbitrary tuples in the
format ('unique_string_label', any_value), ... e.g.,
>>> split('text_color', ('red', '#F00'), ('blue', '#00F'))
...where the first variant (in this example, 'red') represents the
control and any additional variants represent alternatives.
By default, variants are chosen with equal weight. You can tip the
scales if you like by passing a proportional *integer* weight as
the third element in each variant tuple:
>>> split('text_color', ('red', '#F00', 2), ('blue', '#00F', 4))
Optionally, variants may be excluded entirely to fall back to
a simple True/False 50/50 split, where True is the control and
False is the experiment, e.g.,
>>> sidebar() if split('include_sidebar') else empty()
"""
# Perform some minimal type checking
if not isinstance(experiment_name, string_types):
raise RuntimeError(
'Invalid experiment name: %s must be a string.' %
experiment_name
)
keys, values, weights = self._parse_variants(variants)
b = self._backend
# Record the experiment if it doesn't exist already
experiment = b.get_experiment(experiment_name, keys)
# If the current visitor hasn't been verified as a human, and we've not
# required human verification, go ahead and mark them as a human.
if self.count_humans_only is False and self.human is not True:
b.mark_human(self.identity)
if experiment is None:
b.save_experiment(experiment_name, keys)
experiment = b.get_experiment(experiment_name, keys)
else:
if set(experiment.variants) != set(keys):
raise RuntimeError(
'An experiment named %s already exists with different '
'variants.' % experiment_name
)
# Retrieve the variant assigned to the current user
if experiment.name in self._environ.get('cleaver.override', {}):
variant = self._environ['cleaver.override'][experiment.name]
else:
variant = b.get_variant(self.identity, experiment.name)
if variant is None:
# ...or choose (and store) one randomly if it doesn't exist yet
variant = next(util.random_variant(keys, weights))
b.participate(self.identity, experiment.name, variant)
return dict(zip(keys, values))[variant]
def _parse_variants(self, variants):
if not len(variants):
variants = [('True', True), ('False', False)]
if len(variants) == 1:
raise RuntimeError(
'Experiments must have at least two variants '
'(a control and an alternative).'
)
def add_defaults(v):
if len(v) < 3:
v = tuple(
list(v) + (
[None, 1] if len(v) == 1 else [1]
)
)
# Perform some minimal type checking
if not isinstance(v[0], string_types):
raise RuntimeError(
'Invalid variant name: %s must be a string.' %
v[0]
)
if not isinstance(v[2], int):
raise RuntimeError(
'Invalid variant weight: %s must be an integer.' %
v[2]
)
return v
variants = map(
add_defaults,
variants
)
return zip_longest(*variants)
def humanizing_javascript(self):
if self.human:
return ''
return """
<script type="text/javascript">
var x = Math.floor(Math.random()*100);
var y = Math.floor(Math.random()*100);
var url = "%s";
var params = "x="+x+"&y="+y+"&z="+(x+y);
var h;
if (window.XMLHttpRequest){
h = new XMLHttpRequest();
} else {
h = new ActiveXObject("Microsoft.XMLHTTP");
}
h.open("POST", url, true);
h.setRequestHeader("Content-Type", "%s");
h.setRequestHeader("Connection", "close");
h.send(params);
</script>
""" % (self.human_callback_token, "application/x-www-form-urlencoded")
|
ryanpetrello/cleaver | cleaver/util.py | random_variant | python | def random_variant(variants, weights):
total = 0
accumulator = []
for w in weights:
total += w
accumulator.append(total)
r = randint(0, total - 1)
yield variants[bisect(accumulator, r)] | A generator that, given a list of variants and a corresponding list of
weights, returns one random weighted selection. | train | https://github.com/ryanpetrello/cleaver/blob/0ef266e1a0603c6d414df4b9926ce2a59844db35/cleaver/util.py#L7-L19 | null | from random import randint
from bisect import bisect
__all__ = ['random_variant']
|
timstaley/voeventdb | voeventdb/server/bin/voeventdb_create.py | handle_args | python | def handle_args():
default_database_name = dbconfig.testdb_corpus_url.database
parser = argparse.ArgumentParser(
prog=os.path.basename(__file__),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('dbname',
nargs='?',
default=str(default_database_name),
help='Database name',
)
return parser.parse_args() | Default values are defined here. | train | https://github.com/timstaley/voeventdb/blob/e37b176d65fced4ca4f059109a95d6974bb8a091/voeventdb/server/bin/voeventdb_create.py#L23-L37 | null | #!/usr/bin/env python
import sys
import os
import argparse
import logging
from sqlalchemy.engine.url import make_url
from sqlalchemy import create_engine
from voeventdb.server.database import db_utils
import voeventdb.server.database.config as dbconfig
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger("create_db")
def database_url(s):
return make_url(s)
def main():
args = handle_args()
dburl = dbconfig.make_db_url(dbconfig.default_admin_db_params, args.dbname)
if not db_utils.check_database_exists(dburl):
db_utils.create_empty_database(dbconfig.default_admin_db_url,
args.dbname)
logger.info('Database "{}" created.'.format(dburl.database))
engine = create_engine(dburl)
db_utils.create_tables_and_indexes(engine.connect())
return 0
if __name__ == '__main__':
sys.exit(main())
|
timstaley/voeventdb | voeventdb/server/bin/voeventdb_ingest_packet.py | handle_args | python | def handle_args():
default_database_name = os.environ.get(
'VOEVENTDB_DBNAME',
dbconfig.testdb_corpus_url.database)
default_logfile_path = os.path.expanduser("~/voeventdb_packet_ingest.log")
parser = argparse.ArgumentParser(
prog=os.path.basename(__file__),
formatter_class=argparse.ArgumentDefaultsHelpFormatter, )
parser.description = """
Ingest a packet from stdin and attempt to ingest into a voeventdb database.
Usage:
cat test.xml | voeventdb_ingest_packet.py -d mydb -l /tmp/my.log
"""
parser.add_argument('-d', '--dbname', nargs='?',
default=str(default_database_name),
help='Database name')
parser.add_argument('-l', '--logfile_path', nargs='?',
default=default_logfile_path,
)
return parser.parse_args() | Default values are defined here. | train | https://github.com/timstaley/voeventdb/blob/e37b176d65fced4ca4f059109a95d6974bb8a091/voeventdb/server/bin/voeventdb_ingest_packet.py#L19-L49 | null | #!/usr/bin/env python
import argparse
import logging
import logging.handlers
import os
import sys
import six
import voeventparse
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
import voeventdb.server.database.config as dbconfig
import voeventdb.server.database.convenience as conv
from voeventdb.server.database import db_utils
def setup_logging(logfile_path):
"""
Set up basic (INFO level) and debug logfiles
"""
date_fmt = "%y-%m-%d (%a) %H:%M:%S"
std_formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s',
date_fmt)
# Get to the following size before splitting log into multiple files:
log_chunk_bytesize = 5e6
info_logfile = logging.handlers.RotatingFileHandler(logfile_path,
maxBytes=log_chunk_bytesize,
backupCount=10)
info_logfile.setFormatter(std_formatter)
info_logfile.setLevel(logging.DEBUG)
stdout_log = logging.StreamHandler()
stdout_log.setLevel(logging.DEBUG)
stdout_log.setFormatter(std_formatter)
# Set up root logger
logger = logging.getLogger()
logger.handlers = []
logger.setLevel(logging.DEBUG)
logger.addHandler(info_logfile)
logger.addHandler(stdout_log)
logging.getLogger('iso8601').setLevel(
logging.ERROR) # Suppress iso8601 debug log.
return logger
def main():
args = handle_args()
logger = setup_logging(args.logfile_path)
dburl = dbconfig.make_db_url(dbconfig.default_admin_db_params, args.dbname)
if not db_utils.check_database_exists(dburl):
raise RuntimeError("Database not found")
if six.PY3:
stdin = sys.stdin.buffer.read()
else:
stdin = sys.stdin.read() # Py2
v = voeventparse.loads(stdin)
session = Session(bind=create_engine(dburl))
try:
conv.safe_insert_voevent(session, v)
session.commit()
except:
logger.exception("Could not insert packet with ivorn {} into {}".format(
v.attrib['ivorn'], args.dbname))
logger.info("Loaded packet with ivorn {} into {}".format(
v.attrib['ivorn'], args.dbname))
return 0
if __name__ == '__main__':
sys.exit(main())
|
timstaley/voeventdb | voeventdb/server/bin/voeventdb_ingest_packet.py | setup_logging | python | def setup_logging(logfile_path):
date_fmt = "%y-%m-%d (%a) %H:%M:%S"
std_formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s',
date_fmt)
# Get to the following size before splitting log into multiple files:
log_chunk_bytesize = 5e6
info_logfile = logging.handlers.RotatingFileHandler(logfile_path,
maxBytes=log_chunk_bytesize,
backupCount=10)
info_logfile.setFormatter(std_formatter)
info_logfile.setLevel(logging.DEBUG)
stdout_log = logging.StreamHandler()
stdout_log.setLevel(logging.DEBUG)
stdout_log.setFormatter(std_formatter)
# Set up root logger
logger = logging.getLogger()
logger.handlers = []
logger.setLevel(logging.DEBUG)
logger.addHandler(info_logfile)
logger.addHandler(stdout_log)
logging.getLogger('iso8601').setLevel(
logging.ERROR) # Suppress iso8601 debug log.
return logger | Set up basic (INFO level) and debug logfiles | train | https://github.com/timstaley/voeventdb/blob/e37b176d65fced4ca4f059109a95d6974bb8a091/voeventdb/server/bin/voeventdb_ingest_packet.py#L52-L82 | null | #!/usr/bin/env python
import argparse
import logging
import logging.handlers
import os
import sys
import six
import voeventparse
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
import voeventdb.server.database.config as dbconfig
import voeventdb.server.database.convenience as conv
from voeventdb.server.database import db_utils
def handle_args():
"""
Default values are defined here.
"""
default_database_name = os.environ.get(
'VOEVENTDB_DBNAME',
dbconfig.testdb_corpus_url.database)
default_logfile_path = os.path.expanduser("~/voeventdb_packet_ingest.log")
parser = argparse.ArgumentParser(
prog=os.path.basename(__file__),
formatter_class=argparse.ArgumentDefaultsHelpFormatter, )
parser.description = """
Ingest a packet from stdin and attempt to ingest into a voeventdb database.
Usage:
cat test.xml | voeventdb_ingest_packet.py -d mydb -l /tmp/my.log
"""
parser.add_argument('-d', '--dbname', nargs='?',
default=str(default_database_name),
help='Database name')
parser.add_argument('-l', '--logfile_path', nargs='?',
default=default_logfile_path,
)
return parser.parse_args()
def main():
args = handle_args()
logger = setup_logging(args.logfile_path)
dburl = dbconfig.make_db_url(dbconfig.default_admin_db_params, args.dbname)
if not db_utils.check_database_exists(dburl):
raise RuntimeError("Database not found")
if six.PY3:
stdin = sys.stdin.buffer.read()
else:
stdin = sys.stdin.read() # Py2
v = voeventparse.loads(stdin)
session = Session(bind=create_engine(dburl))
try:
conv.safe_insert_voevent(session, v)
session.commit()
except:
logger.exception("Could not insert packet with ivorn {} into {}".format(
v.attrib['ivorn'], args.dbname))
logger.info("Loaded packet with ivorn {} into {}".format(
v.attrib['ivorn'], args.dbname))
return 0
if __name__ == '__main__':
sys.exit(main())
|
timstaley/voeventdb | voeventdb/server/restapi/v1/definitions.py | _list_class_vars | python | def _list_class_vars(cls, exclude=None):
cvars = {k: v for k, v in vars(cls).items()
if not k.startswith('_')}
cvars = deepcopy(cvars)
if exclude is not None:
for e in exclude:
cvars.pop(e)
return cvars | Return a dict of all 'regular' (i.e. not prefixed ``_``) class attributes. | train | https://github.com/timstaley/voeventdb/blob/e37b176d65fced4ca4f059109a95d6974bb8a091/voeventdb/server/restapi/v1/definitions.py#L4-L14 | null | from copy import deepcopy
from voeventdb.server.database.models import (Voevent,Cite,Coord)
def add_value_list_attribute(cls):
"""
Adds ``_value_list`` attribute to a class, listing other attribute values.
Below, I extensively (ab)use classes for namespacing, to define
various string literals in a way that is both easy to refactor and enables
autocompletion (given a suitable REPL or IDE).
This decorator collects the attribute-values of such a class and gathers
them into a list for easy cross-checking. The list is assigned to the
class as the ``_value_list`` attribute.
"""
cls._value_list = _list_class_vars(cls).values()
return cls
@add_value_list_attribute
class OrderValues:
"""
Values that may be used in a querystring with the 'order' key.
E.g. By specifying ``order=author_datetime`` in a querystring,
results are returned ordered by author_datetime (ascending, i.e. oldest
first). By default, results are returned in database-id (ascending) order,
which in effect means that the first results to be loaded into the database
are returned first.
Each value has a pairing with a '-' prefix, implying reverse
(descending) ordering.
"""
author_datetime = 'author_datetime'
author_datetime_desc = '-author_datetime'
"""
Order results by author_datetime (timestamp from the *Who* section).
Default ('ascending') implies oldest-first.
"""
id = 'id'
id_desc = '-id'
"""
Order results by database-id (assigned as events are added to the database).
This is the default setting.
"""
ivorn = 'ivorn'
ivorn_desc = '-ivorn'
"""
Order results by ivorn (alphabetically).
"""
order_by_string_to_col_map = {
OrderValues.author_datetime: Voevent.author_datetime,
OrderValues.id : Voevent.id,
OrderValues.ivorn : Voevent.ivorn,
None : Voevent.id,
}
@add_value_list_attribute
class PaginationKeys:
"""
These query-keys control the ordering and subset ('page') of results.
Use *limit* and *offset* values in your querystring to control slicing,
i.e. the subset of an ordered list returned in the current query.
(Only applies to list views, e.g. the IVORN listing endpoint.)
The keywords are adopted from SQL,
cf http://www.postgresql.org/docs/9.3/static/queries-limit.html
Note that if no values are supplied, a default limit value is applied.
(You can still check what it was, by inspecting the relevant value in the
:ref:`result-dict <returned-content>`.)
"""
# These hardly need soft-defining, but we include them for completeness.
limit = 'limit'
"""
The maximum number of results returned for a single request.
"""
offset = 'offset'
"""
The number of rows 'skipped' before returning results for a request.
"""
order = 'order'
"""
Controls the ordering of results, before limit and offset are applied.
Valid values are enumerated by the :class:`.OrderValues` class.
"""
class ResultKeys:
"""
Most :ref:`endpoints <apiv1_endpoints>` return a JSON-encoded dictionary.
[#ApartFromXml]_
At the top level, the dictionary will contain some or all of the following
keys:
.. [#ApartFromXml] (The exception is the XML-retrieval endpoint, obviously.)
.. note::
The key-strings can be imported and used in autocomplete-friendly
fashion, for example::
from voeventdb.server.restapi.v1 import ResultKeys as rkeys
print rkeys.querystring
"""
endpoint = 'endpoint'
"The endpoint the query was made against."
limit = 'limit'
"""
The maximum number of entries returned in a single request
(Only applies to list-view endpoints.)
"""
querystring = 'querystring'
"""
A dictionary displaying the query-string values applied.
(With urlencode-decoding applied as necessary.)
Note that each entry contains a list, as a query-key may be applied
multiple times.
"""
result = 'result'
"""
The data returned by your query, either in dictionary
or list format according to the endpoint.
See :ref:`endpoint listings <apiv1_endpoints>` for detail.
"""
url = 'url'
"The complete URL the query was made against."
|
timstaley/voeventdb | voeventdb/server/bin/voeventdb_dump_tarball.py | handle_args | python | def handle_args():
default_database_name = dbconfig.testdb_corpus_url.database
parser = argparse.ArgumentParser(
prog=os.path.basename(__file__),
# formatter_class=argparse.ArgumentDefaultsHelpFormatter,
formatter_class=MyFormatter,
description=textwrap.dedent("""\
Dump the raw VOEvents as XML files, collected into bzip2'd tarballs.
If start or end times are specified, then the range is start-inclusive
end-exclusive, i.e.
start <= author_datetime < end
NB when writing compressed tarballs in Python, the entire file is
composed in memory before writing to file. This means that setting
`nsplit` too large will result in very high memory usage! The default
value of 5000 seems to peak at <250MB of RAM (though this varies
according to the size of the VOEvent packets, and assumes
`prefetch=False`). Some quick tests suggest typical RAM usage
~= 40MB + 30MB*(nsplit/1000) .
"""),
)
parser.add_argument('tarfile_pathstem',
help='Path to tarball to create, e.g. `foobar`. '
'Suffix ``.tar.bz2`` will be appended.'
)
parser.add_argument('-d', '--dbname', nargs='?',
default=str(default_database_name),
help='Database name')
parser.add_argument('-n', '--nsplit',
type=int,
default=5000,
help=
"Output multiple files, `NSPLIT` packets per tarball."
"Naming convention is `<stem>.001.tar.bz2, <stem>.002.tar.bz2, ...`"
)
parser.add_argument('-s', '--start',
type=iso8601.parse_date,
default=None,
help="Filter events by author_date>=`START`")
parser.add_argument('-e', '--end',
type=iso8601.parse_date,
default=datetime.datetime.now(tz=pytz.UTC),
help=
"Filter events by author_date<`END`")
parser.add_argument('-p', '--prefetch', action='store_true', default=False,
help=
"Bulk-fetch XML packets from DB (~3x faster, but "
"uses considerably more RAM, depending on value of "
"`nsplit`.)"
)
parser.add_argument('-a', '--all', action='store_true', default=False,
help=
"Ignore any datetime filters, dump **all** VOEvents."
"(This option is provided to allow dumping of VOEvents"
"with author_datetime=Null, which are otherwise ignored.)"
)
return parser.parse_args() | Default values are defined here. | train | https://github.com/timstaley/voeventdb/blob/e37b176d65fced4ca4f059109a95d6974bb8a091/voeventdb/server/bin/voeventdb_dump_tarball.py#L32-L97 | null | #!/usr/bin/env python
import argparse
import datetime
import logging
import os
import sys
import textwrap
import iso8601
import pytz
import voeventdb.server.database.config as dbconfig
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from voeventdb.server.database import db_utils
from voeventdb.server.database.models import Voevent
from voeventdb.server.utils.filestore import (
write_tarball,
)
logging.basicConfig(level=logging.INFO)
logging.getLogger('iso8601').setLevel(
logging.ERROR) # Suppress iso8601 debug log.
logger = logging.getLogger("voeventdb-dump")
class MyFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
def main():
args = handle_args()
dburl = dbconfig.make_db_url(dbconfig.default_admin_db_params, args.dbname)
if not db_utils.check_database_exists(dburl):
raise RuntimeError("Database not found")
filecount = 1
n_packets_written = 0
def get_tarfile_path():
if args.nsplit:
suffix = '.{0:03d}.tar.bz2'.format(filecount)
else:
suffix = '.tar.bz2'
return args.tarfile_pathstem + suffix
session = Session(bind=create_engine(dburl))
if args.prefetch:
qry = session.query(Voevent.ivorn, Voevent.xml)
else:
qry = session.query(Voevent)
if args.all:
logger.info("Dumping **all** packets currently in database")
else:
qry = qry.filter(Voevent.author_datetime < args.end)
if args.start is not None:
qry = qry.filter(Voevent.author_datetime >= args.start)
logger.info("Fetching packets from {}".format(args.start))
else:
logger.info("Fetching packets from beginning of time")
logger.info("...until: {}".format(args.end))
qry = qry.order_by(Voevent.id)
n_matching = qry.count()
logger.info("Dumping {} packets".format(n_matching))
start_time = datetime.datetime.now()
while n_packets_written < n_matching:
logger.debug("Fetching batch of up to {} packets".format(args.nsplit))
voevents = qry.limit(args.nsplit).offset(n_packets_written).all()
n_packets_written += write_tarball(voevents,
get_tarfile_path())
elapsed = (datetime.datetime.now() - start_time).total_seconds()
logger.info(
"{} packets dumped so far, in {} ({:.0f} kilopacket/s)".format(
n_packets_written,
elapsed,
n_packets_written / elapsed
))
filecount += 1
session.close()
logger.info("Wrote {} packets".format(n_packets_written))
return 0
if __name__ == '__main__':
sys.exit(main())
|
timstaley/voeventdb | voeventdb/server/restapi/v1/views.py | apiv1_root_view | python | def apiv1_root_view():
docs_url = current_app.config.get('DOCS_URL',
'http://' + request.host + '/docs')
message = "Welcome to the voeventdb REST API, " \
"interface version '{}'.".format(
apiv1.name)
api_details = {
'message': message,
'api_version': apiv1.name,
'git_sha': package_version_dict['full-revisionid'][:8],
'version_tag': package_version_dict['version'],
'endpoints': [str(r) for r in get_apiv1_rules()],
'docs_url': docs_url
}
if 'text/html' in request.headers.get("Accept", ""):
return render_template('landing.html',
**api_details
)
else:
return jsonify(api_details) | API root url. Shows a list of active endpoints. | train | https://github.com/timstaley/voeventdb/blob/e37b176d65fced4ca4f059109a95d6974bb8a091/voeventdb/server/restapi/v1/views.py#L82-L105 | [
"def get_apiv1_rules():\n rules = [r for r in sorted(current_app.url_map.iter_rules(),\n key=lambda x: str(x))\n if r.endpoint.startswith('apiv1')]\n endpoints_listed = set()\n pruned_rules = []\n for r in rules:\n if r.endpoint not in endpoints_listed:\n... | from __future__ import absolute_import
from flask import (
Blueprint, request, make_response, render_template, current_app,
jsonify, url_for
)
from voeventdb.server import __versiondict__ as package_version_dict
from voeventdb.server.restapi.annotate import lookup_relevant_urls
from voeventdb.server.database import session_registry as db_session
from voeventdb.server.database.models import Voevent, Cite, Coord
import voeventdb.server.database.convenience as convenience
import voeventdb.server.restapi.v1.apierror as apierror
import voeventdb.server.database.query as query
from voeventdb.server.restapi.v1.viewbase import (
QueryView, ListQueryView, _add_to_blueprint, make_response_dict
)
import six
if six.PY3:
from urllib.parse import unquote
else:
from urllib import unquote
# This import may look unused, but activates the filter registry -
# Do not delete!
# noinspection PyUnresolvedReferences
import voeventdb.server.restapi.v1.filters
apiv1 = Blueprint('apiv1', __name__,
url_prefix='/apiv1')
# First define a few helper functions...
def add_to_apiv1(queryview_class):
"""
Partially bind the 'add_to_blueprint' wrapper so we can use it as a decorator.
"""
return _add_to_blueprint(queryview_class, apiv1)
def get_apiv1_rules():
rules = [r for r in sorted(current_app.url_map.iter_rules(),
key=lambda x: str(x))
if r.endpoint.startswith('apiv1')]
endpoints_listed = set()
pruned_rules = []
for r in rules:
if r.endpoint not in endpoints_listed:
pruned_rules.append(r)
endpoints_listed.add(r.endpoint)
return pruned_rules
def error_to_dict(error):
return {
'error': {
'code': error.code,
'description': error.description,
'message': str(error).replace('\n', '').strip()
}
}
def validate_ivorn(url_encoded_ivorn):
if url_encoded_ivorn and current_app.config.get('APACHE_NODECODE'):
ivorn = unquote(url_encoded_ivorn)
else:
ivorn = url_encoded_ivorn
if ivorn is None:
raise apierror.IvornNotSupplied(
suggested_ivorn_url=url_for(apiv1.name + '.' + ListIvorn.view_name))
if not convenience.ivorn_present(db_session, ivorn):
raise apierror.IvornNotFound(
ivorn,
suggested_ivorn_url=url_for(apiv1.name + '.' + ListIvorn.view_name))
return ivorn
# Now root url, error handlers:
@apiv1.route('/')
@apiv1.errorhandler(apierror.LimitMaxExceeded)
@apiv1.errorhandler(apierror.InvalidQueryString)
@apiv1.errorhandler(apierror.IvornNotFound)
@apiv1.errorhandler(apierror.IvornNotSupplied)
def ivorn_error(error):
if 'text/html' in request.headers.get("Accept", ""):
return render_template('errorbase.html',
error=error
), error.code
else:
return jsonify(error_to_dict(error)), error.code
@apiv1.app_errorhandler(404)
def page_not_found(abort_error):
if 'text/html' in request.headers.get("Accept", ""):
docs_url = current_app.config.get('DOCS_URL',
'http://' + request.host + '/docs')
return render_template('404.html',
rules=get_apiv1_rules(),
docs_url=docs_url,
error=abort_error,
endpoints=[str(r) for r in get_apiv1_rules()],
), abort_error.code
else:
return jsonify(error_to_dict(abort_error)), abort_error.code
# -----------------------------------------------
# Alphabetically ordered endpoints from here on
# -----------------------------------------------
@add_to_apiv1
class Count(QueryView):
"""
Result (int):
Number of packets matching querystring.
Returns total number of packets in database if the querystring is blank.
"""
view_name = 'count'
def get_query(self):
return db_session.query(Voevent)
def process_query(self, q):
return q.count()
@add_to_apiv1
class ListIvorn(ListQueryView):
"""
Result (list of strings):
``[ ivorn1, ivorn2, ... ]``
List of ivorns matching querystring. Number returned is limited by the
``limit`` parameter, which defaults to 100 (see :ref:`pagination`).
"""
view_name = 'list/ivorn'
def get_query(self):
return db_session.query(Voevent.ivorn)
def process_query(self, query):
"""
Grab the first entry from every tuple as a single list.
"""
raw_results = query.all()
if len(raw_results):
return list(zip(*raw_results))[0]
else:
return raw_results
@add_to_apiv1
class ListIvornReferenceCount(ListQueryView):
"""
Result (list of 2-element lists):
``[[ivorn, n_refs], ...]``
Get rows containing reference counts. Row entries are
- IVORN of packet
- Number of references to other packets, in this packet.
"""
view_name = 'list/ivorn_nrefs'
def get_query(self):
return query.ivorn_cites_to_others_count_q(db_session)
def process_query(self, query):
return [tuple(r) for r in query.all()]
@add_to_apiv1
class ListIvornCitedCount(ListQueryView):
"""
Result (list of 2-element lists):
``[[ivorn, n_cited], ...]``
Get rows containing citation counts. Row entries are:
- IVORN of packet
- Number of times this packet is cited by others
"""
view_name = 'list/ivorn_ncites'
def get_query(self):
return query.ivorn_cited_from_others_count_q(db_session)
def process_query(self, query):
return [tuple(r) for r in query.all()]
@add_to_apiv1
class MapAuthoredMonthCount(QueryView):
"""
Result:
Dict: Mapping month -> packet counts per-month.
Here, 'month' refers to the month of the 'authoring' DateTime,
i.e. the ``Who.Date`` element of the VOEvent. NB, may be None.
"""
view_name = 'map/authored_month_count'
def get_query(self):
return query.authored_month_counts_q(db_session)
def process_query(self, q):
raw_results = q.all()
converted_results = []
for r in raw_results:
if r.month_id:
newrow = (r.month_id.date().isoformat()[:-3], r.month_count)
else:
newrow = r
converted_results.append(newrow)
return dict(converted_results)
@add_to_apiv1
class MapRoleCount(QueryView):
"""
Result:
Dict: Mapping role -> packet counts per-role.
"""
view_name = 'map/role_count'
def get_query(self):
return query.role_counts_q(db_session)
def process_query(self, q):
return dict(q.all())
@add_to_apiv1
class MapStreamCount(QueryView):
"""
Result:
Dict: Mapping stream -> packet counts per-stream.
"""
view_name = 'map/stream_count'
def get_query(self):
return query.stream_counts_q(db_session)
def process_query(self, q):
return dict(q.all())
@add_to_apiv1
class MapStreamRoleCount(QueryView):
"""
Result:
Nested dict: Mapping stream -> role -> packet counts per-stream-and-role.
"""
view_name = 'map/stream_role_count'
def get_query(self):
return query.stream_counts_role_breakdown_q(db_session)
def process_query(self, q):
return convenience.to_nested_dict(q.all())
@apiv1.route('/packet/synopsis/')
@apiv1.route('/packet/synopsis/<path:url_encoded_ivorn>')
def packet_synopsis(url_encoded_ivorn=None):
"""
Result:
Nested dict providing key details, e.g.::
{"coords": [
{
"dec": 10.9712,
"error": 0.05,
"ra": 233.7307,
"time": "2015-10-01T15:04:22.930000+00:00"
},
...
],
"refs": [
{
"cite_type": u"followup",
"description": "This is the XRT Position ...",
"ref_ivorn": "ivo://nasa.gsfc.gcn/SWIFT#BAT_..."
},
...
],
"voevent": {
"author_datetime": "2015-10-01T15:04:46+00:00",
"author_ivorn": "ivo://nasa.gsfc.tan/gcn",
"ivorn": "ivo://nasa.gsfc.gcn/SWIFT#BAT_GRB_Pos_657286-112",
"received": "2015-11-19T20:41:38.226431+00:00",
"role": "observation",
"stream": "nasa.gsfc.gcn/SWIFT",
"version": "2.0"
}
"relevant_urls": [ "http://address1.foo.bar",
"http://address2.foo.bar"
]
}
Returns some key details for the packet specified by IVORN.
The required IVORN should be appended to the URL after ``/synopsis/``
in :ref:`URL-encoded <url-encoding>` form.
"""
ivorn = validate_ivorn(url_encoded_ivorn)
voevent_row = db_session.query(Voevent).filter(
Voevent.ivorn == ivorn).one()
cites = db_session.query(Cite). \
filter(Cite.voevent_id == voevent_row.id).all()
coords = db_session.query(Coord). \
filter(Coord.voevent_id == voevent_row.id).all()
v_dict = voevent_row.to_odict(exclude=('id', 'xml'))
cite_list = [c.to_odict(exclude=('id', 'voevent_id')) for c in cites]
coord_list = [c.to_odict(exclude=('id', 'voevent_id')) for c in coords]
relevant_urls = lookup_relevant_urls(voevent_row, cites)
result = {'voevent': v_dict,
'refs': cite_list,
'coords': coord_list,
'relevant_urls': relevant_urls,
}
return jsonify(make_response_dict(result))
@apiv1.route('/packet/xml/')
@apiv1.route('/packet/xml/<path:url_encoded_ivorn>')
def packet_xml(url_encoded_ivorn=None):
"""
Returns the XML packet contents stored for a given IVORN.
The required IVORN should be appended to the URL after ``/xml/``
in :ref:`URL-encoded <url-encoding>` form.
"""
# Handle Apache / Debug server difference...
# Apache conf must include the setting::
# AllowEncodedSlashes NoDecode
# otherwise urlencoded paths have
# double-slashes ('//') replaced with single-slashes ('/').
# However, the werkzeug simple-server decodes these by default,
# resulting in differing dev / production behaviour, which we handle here.
ivorn = validate_ivorn(url_encoded_ivorn)
xml = db_session.query(Voevent.xml).filter(
Voevent.ivorn == ivorn
).scalar()
r = make_response(xml)
r.mimetype = 'text/xml'
return r
|
timstaley/voeventdb | voeventdb/server/restapi/v1/views.py | packet_synopsis | python | def packet_synopsis(url_encoded_ivorn=None):
ivorn = validate_ivorn(url_encoded_ivorn)
voevent_row = db_session.query(Voevent).filter(
Voevent.ivorn == ivorn).one()
cites = db_session.query(Cite). \
filter(Cite.voevent_id == voevent_row.id).all()
coords = db_session.query(Coord). \
filter(Coord.voevent_id == voevent_row.id).all()
v_dict = voevent_row.to_odict(exclude=('id', 'xml'))
cite_list = [c.to_odict(exclude=('id', 'voevent_id')) for c in cites]
coord_list = [c.to_odict(exclude=('id', 'voevent_id')) for c in coords]
relevant_urls = lookup_relevant_urls(voevent_row, cites)
result = {'voevent': v_dict,
'refs': cite_list,
'coords': coord_list,
'relevant_urls': relevant_urls,
}
return jsonify(make_response_dict(result)) | Result:
Nested dict providing key details, e.g.::
{"coords": [
{
"dec": 10.9712,
"error": 0.05,
"ra": 233.7307,
"time": "2015-10-01T15:04:22.930000+00:00"
},
...
],
"refs": [
{
"cite_type": u"followup",
"description": "This is the XRT Position ...",
"ref_ivorn": "ivo://nasa.gsfc.gcn/SWIFT#BAT_..."
},
...
],
"voevent": {
"author_datetime": "2015-10-01T15:04:46+00:00",
"author_ivorn": "ivo://nasa.gsfc.tan/gcn",
"ivorn": "ivo://nasa.gsfc.gcn/SWIFT#BAT_GRB_Pos_657286-112",
"received": "2015-11-19T20:41:38.226431+00:00",
"role": "observation",
"stream": "nasa.gsfc.gcn/SWIFT",
"version": "2.0"
}
"relevant_urls": [ "http://address1.foo.bar",
"http://address2.foo.bar"
]
}
Returns some key details for the packet specified by IVORN.
The required IVORN should be appended to the URL after ``/synopsis/``
in :ref:`URL-encoded <url-encoding>` form. | train | https://github.com/timstaley/voeventdb/blob/e37b176d65fced4ca4f059109a95d6974bb8a091/voeventdb/server/restapi/v1/views.py#L298-L364 | [
"def lookup_relevant_urls(voevent_row, cite_rows):\n assert isinstance(voevent_row, Voevent)\n urls = []\n urls.extend(lookup_swift_urls(voevent_row,cite_rows))\n return urls\n",
"def make_response_dict(result):\n resultdict = {\n ResultKeys.endpoint: request.url_rule.rule,\n ResultKe... | from __future__ import absolute_import
from flask import (
Blueprint, request, make_response, render_template, current_app,
jsonify, url_for
)
from voeventdb.server import __versiondict__ as package_version_dict
from voeventdb.server.restapi.annotate import lookup_relevant_urls
from voeventdb.server.database import session_registry as db_session
from voeventdb.server.database.models import Voevent, Cite, Coord
import voeventdb.server.database.convenience as convenience
import voeventdb.server.restapi.v1.apierror as apierror
import voeventdb.server.database.query as query
from voeventdb.server.restapi.v1.viewbase import (
QueryView, ListQueryView, _add_to_blueprint, make_response_dict
)
import six
if six.PY3:
from urllib.parse import unquote
else:
from urllib import unquote
# This import may look unused, but activates the filter registry -
# Do not delete!
# noinspection PyUnresolvedReferences
import voeventdb.server.restapi.v1.filters
apiv1 = Blueprint('apiv1', __name__,
url_prefix='/apiv1')
# First define a few helper functions...
def add_to_apiv1(queryview_class):
"""
Partially bind the 'add_to_blueprint' wrapper so we can use it as a decorator.
"""
return _add_to_blueprint(queryview_class, apiv1)
def get_apiv1_rules():
rules = [r for r in sorted(current_app.url_map.iter_rules(),
key=lambda x: str(x))
if r.endpoint.startswith('apiv1')]
endpoints_listed = set()
pruned_rules = []
for r in rules:
if r.endpoint not in endpoints_listed:
pruned_rules.append(r)
endpoints_listed.add(r.endpoint)
return pruned_rules
def error_to_dict(error):
return {
'error': {
'code': error.code,
'description': error.description,
'message': str(error).replace('\n', '').strip()
}
}
def validate_ivorn(url_encoded_ivorn):
if url_encoded_ivorn and current_app.config.get('APACHE_NODECODE'):
ivorn = unquote(url_encoded_ivorn)
else:
ivorn = url_encoded_ivorn
if ivorn is None:
raise apierror.IvornNotSupplied(
suggested_ivorn_url=url_for(apiv1.name + '.' + ListIvorn.view_name))
if not convenience.ivorn_present(db_session, ivorn):
raise apierror.IvornNotFound(
ivorn,
suggested_ivorn_url=url_for(apiv1.name + '.' + ListIvorn.view_name))
return ivorn
# Now root url, error handlers:
@apiv1.route('/')
def apiv1_root_view():
"""
API root url. Shows a list of active endpoints.
"""
docs_url = current_app.config.get('DOCS_URL',
'http://' + request.host + '/docs')
message = "Welcome to the voeventdb REST API, " \
"interface version '{}'.".format(
apiv1.name)
api_details = {
'message': message,
'api_version': apiv1.name,
'git_sha': package_version_dict['full-revisionid'][:8],
'version_tag': package_version_dict['version'],
'endpoints': [str(r) for r in get_apiv1_rules()],
'docs_url': docs_url
}
if 'text/html' in request.headers.get("Accept", ""):
return render_template('landing.html',
**api_details
)
else:
return jsonify(api_details)
@apiv1.errorhandler(apierror.LimitMaxExceeded)
@apiv1.errorhandler(apierror.InvalidQueryString)
@apiv1.errorhandler(apierror.IvornNotFound)
@apiv1.errorhandler(apierror.IvornNotSupplied)
def ivorn_error(error):
if 'text/html' in request.headers.get("Accept", ""):
return render_template('errorbase.html',
error=error
), error.code
else:
return jsonify(error_to_dict(error)), error.code
@apiv1.app_errorhandler(404)
def page_not_found(abort_error):
if 'text/html' in request.headers.get("Accept", ""):
docs_url = current_app.config.get('DOCS_URL',
'http://' + request.host + '/docs')
return render_template('404.html',
rules=get_apiv1_rules(),
docs_url=docs_url,
error=abort_error,
endpoints=[str(r) for r in get_apiv1_rules()],
), abort_error.code
else:
return jsonify(error_to_dict(abort_error)), abort_error.code
# -----------------------------------------------
# Alphabetically ordered endpoints from here on
# -----------------------------------------------
@add_to_apiv1
class Count(QueryView):
"""
Result (int):
Number of packets matching querystring.
Returns total number of packets in database if the querystring is blank.
"""
view_name = 'count'
def get_query(self):
return db_session.query(Voevent)
def process_query(self, q):
return q.count()
@add_to_apiv1
class ListIvorn(ListQueryView):
"""
Result (list of strings):
``[ ivorn1, ivorn2, ... ]``
List of ivorns matching querystring. Number returned is limited by the
``limit`` parameter, which defaults to 100 (see :ref:`pagination`).
"""
view_name = 'list/ivorn'
def get_query(self):
return db_session.query(Voevent.ivorn)
def process_query(self, query):
"""
Grab the first entry from every tuple as a single list.
"""
raw_results = query.all()
if len(raw_results):
return list(zip(*raw_results))[0]
else:
return raw_results
@add_to_apiv1
class ListIvornReferenceCount(ListQueryView):
"""
Result (list of 2-element lists):
``[[ivorn, n_refs], ...]``
Get rows containing reference counts. Row entries are
- IVORN of packet
- Number of references to other packets, in this packet.
"""
view_name = 'list/ivorn_nrefs'
def get_query(self):
return query.ivorn_cites_to_others_count_q(db_session)
def process_query(self, query):
return [tuple(r) for r in query.all()]
@add_to_apiv1
class ListIvornCitedCount(ListQueryView):
"""
Result (list of 2-element lists):
``[[ivorn, n_cited], ...]``
Get rows containing citation counts. Row entries are:
- IVORN of packet
- Number of times this packet is cited by others
"""
view_name = 'list/ivorn_ncites'
def get_query(self):
return query.ivorn_cited_from_others_count_q(db_session)
def process_query(self, query):
return [tuple(r) for r in query.all()]
@add_to_apiv1
class MapAuthoredMonthCount(QueryView):
"""
Result:
Dict: Mapping month -> packet counts per-month.
Here, 'month' refers to the month of the 'authoring' DateTime,
i.e. the ``Who.Date`` element of the VOEvent. NB, may be None.
"""
view_name = 'map/authored_month_count'
def get_query(self):
return query.authored_month_counts_q(db_session)
def process_query(self, q):
raw_results = q.all()
converted_results = []
for r in raw_results:
if r.month_id:
newrow = (r.month_id.date().isoformat()[:-3], r.month_count)
else:
newrow = r
converted_results.append(newrow)
return dict(converted_results)
@add_to_apiv1
class MapRoleCount(QueryView):
"""
Result:
Dict: Mapping role -> packet counts per-role.
"""
view_name = 'map/role_count'
def get_query(self):
return query.role_counts_q(db_session)
def process_query(self, q):
return dict(q.all())
@add_to_apiv1
class MapStreamCount(QueryView):
"""
Result:
Dict: Mapping stream -> packet counts per-stream.
"""
view_name = 'map/stream_count'
def get_query(self):
return query.stream_counts_q(db_session)
def process_query(self, q):
return dict(q.all())
@add_to_apiv1
class MapStreamRoleCount(QueryView):
"""
Result:
Nested dict: Mapping stream -> role -> packet counts per-stream-and-role.
"""
view_name = 'map/stream_role_count'
def get_query(self):
return query.stream_counts_role_breakdown_q(db_session)
def process_query(self, q):
return convenience.to_nested_dict(q.all())
@apiv1.route('/packet/synopsis/')
@apiv1.route('/packet/synopsis/<path:url_encoded_ivorn>')
@apiv1.route('/packet/xml/')
@apiv1.route('/packet/xml/<path:url_encoded_ivorn>')
def packet_xml(url_encoded_ivorn=None):
"""
Returns the XML packet contents stored for a given IVORN.
The required IVORN should be appended to the URL after ``/xml/``
in :ref:`URL-encoded <url-encoding>` form.
"""
# Handle Apache / Debug server difference...
# Apache conf must include the setting::
# AllowEncodedSlashes NoDecode
# otherwise urlencoded paths have
# double-slashes ('//') replaced with single-slashes ('/').
# However, the werkzeug simple-server decodes these by default,
# resulting in differing dev / production behaviour, which we handle here.
ivorn = validate_ivorn(url_encoded_ivorn)
xml = db_session.query(Voevent.xml).filter(
Voevent.ivorn == ivorn
).scalar()
r = make_response(xml)
r.mimetype = 'text/xml'
return r
|
timstaley/voeventdb | voeventdb/server/restapi/v1/views.py | packet_xml | python | def packet_xml(url_encoded_ivorn=None):
# Handle Apache / Debug server difference...
# Apache conf must include the setting::
# AllowEncodedSlashes NoDecode
# otherwise urlencoded paths have
# double-slashes ('//') replaced with single-slashes ('/').
# However, the werkzeug simple-server decodes these by default,
# resulting in differing dev / production behaviour, which we handle here.
ivorn = validate_ivorn(url_encoded_ivorn)
xml = db_session.query(Voevent.xml).filter(
Voevent.ivorn == ivorn
).scalar()
r = make_response(xml)
r.mimetype = 'text/xml'
return r | Returns the XML packet contents stored for a given IVORN.
The required IVORN should be appended to the URL after ``/xml/``
in :ref:`URL-encoded <url-encoding>` form. | train | https://github.com/timstaley/voeventdb/blob/e37b176d65fced4ca4f059109a95d6974bb8a091/voeventdb/server/restapi/v1/views.py#L369-L390 | [
"def validate_ivorn(url_encoded_ivorn):\n if url_encoded_ivorn and current_app.config.get('APACHE_NODECODE'):\n ivorn = unquote(url_encoded_ivorn)\n else:\n ivorn = url_encoded_ivorn\n if ivorn is None:\n raise apierror.IvornNotSupplied(\n suggested_ivorn_url=url_for(apiv1.n... | from __future__ import absolute_import
from flask import (
Blueprint, request, make_response, render_template, current_app,
jsonify, url_for
)
from voeventdb.server import __versiondict__ as package_version_dict
from voeventdb.server.restapi.annotate import lookup_relevant_urls
from voeventdb.server.database import session_registry as db_session
from voeventdb.server.database.models import Voevent, Cite, Coord
import voeventdb.server.database.convenience as convenience
import voeventdb.server.restapi.v1.apierror as apierror
import voeventdb.server.database.query as query
from voeventdb.server.restapi.v1.viewbase import (
QueryView, ListQueryView, _add_to_blueprint, make_response_dict
)
import six
if six.PY3:
from urllib.parse import unquote
else:
from urllib import unquote
# This import may look unused, but activates the filter registry -
# Do not delete!
# noinspection PyUnresolvedReferences
import voeventdb.server.restapi.v1.filters
apiv1 = Blueprint('apiv1', __name__,
url_prefix='/apiv1')
# First define a few helper functions...
def add_to_apiv1(queryview_class):
"""
Partially bind the 'add_to_blueprint' wrapper so we can use it as a decorator.
"""
return _add_to_blueprint(queryview_class, apiv1)
def get_apiv1_rules():
rules = [r for r in sorted(current_app.url_map.iter_rules(),
key=lambda x: str(x))
if r.endpoint.startswith('apiv1')]
endpoints_listed = set()
pruned_rules = []
for r in rules:
if r.endpoint not in endpoints_listed:
pruned_rules.append(r)
endpoints_listed.add(r.endpoint)
return pruned_rules
def error_to_dict(error):
return {
'error': {
'code': error.code,
'description': error.description,
'message': str(error).replace('\n', '').strip()
}
}
def validate_ivorn(url_encoded_ivorn):
if url_encoded_ivorn and current_app.config.get('APACHE_NODECODE'):
ivorn = unquote(url_encoded_ivorn)
else:
ivorn = url_encoded_ivorn
if ivorn is None:
raise apierror.IvornNotSupplied(
suggested_ivorn_url=url_for(apiv1.name + '.' + ListIvorn.view_name))
if not convenience.ivorn_present(db_session, ivorn):
raise apierror.IvornNotFound(
ivorn,
suggested_ivorn_url=url_for(apiv1.name + '.' + ListIvorn.view_name))
return ivorn
# Now root url, error handlers:
@apiv1.route('/')
def apiv1_root_view():
"""
API root url. Shows a list of active endpoints.
"""
docs_url = current_app.config.get('DOCS_URL',
'http://' + request.host + '/docs')
message = "Welcome to the voeventdb REST API, " \
"interface version '{}'.".format(
apiv1.name)
api_details = {
'message': message,
'api_version': apiv1.name,
'git_sha': package_version_dict['full-revisionid'][:8],
'version_tag': package_version_dict['version'],
'endpoints': [str(r) for r in get_apiv1_rules()],
'docs_url': docs_url
}
if 'text/html' in request.headers.get("Accept", ""):
return render_template('landing.html',
**api_details
)
else:
return jsonify(api_details)
@apiv1.errorhandler(apierror.LimitMaxExceeded)
@apiv1.errorhandler(apierror.InvalidQueryString)
@apiv1.errorhandler(apierror.IvornNotFound)
@apiv1.errorhandler(apierror.IvornNotSupplied)
def ivorn_error(error):
if 'text/html' in request.headers.get("Accept", ""):
return render_template('errorbase.html',
error=error
), error.code
else:
return jsonify(error_to_dict(error)), error.code
@apiv1.app_errorhandler(404)
def page_not_found(abort_error):
if 'text/html' in request.headers.get("Accept", ""):
docs_url = current_app.config.get('DOCS_URL',
'http://' + request.host + '/docs')
return render_template('404.html',
rules=get_apiv1_rules(),
docs_url=docs_url,
error=abort_error,
endpoints=[str(r) for r in get_apiv1_rules()],
), abort_error.code
else:
return jsonify(error_to_dict(abort_error)), abort_error.code
# -----------------------------------------------
# Alphabetically ordered endpoints from here on
# -----------------------------------------------
@add_to_apiv1
class Count(QueryView):
"""
Result (int):
Number of packets matching querystring.
Returns total number of packets in database if the querystring is blank.
"""
view_name = 'count'
def get_query(self):
return db_session.query(Voevent)
def process_query(self, q):
return q.count()
@add_to_apiv1
class ListIvorn(ListQueryView):
"""
Result (list of strings):
``[ ivorn1, ivorn2, ... ]``
List of ivorns matching querystring. Number returned is limited by the
``limit`` parameter, which defaults to 100 (see :ref:`pagination`).
"""
view_name = 'list/ivorn'
def get_query(self):
return db_session.query(Voevent.ivorn)
def process_query(self, query):
"""
Grab the first entry from every tuple as a single list.
"""
raw_results = query.all()
if len(raw_results):
return list(zip(*raw_results))[0]
else:
return raw_results
@add_to_apiv1
class ListIvornReferenceCount(ListQueryView):
"""
Result (list of 2-element lists):
``[[ivorn, n_refs], ...]``
Get rows containing reference counts. Row entries are
- IVORN of packet
- Number of references to other packets, in this packet.
"""
view_name = 'list/ivorn_nrefs'
def get_query(self):
return query.ivorn_cites_to_others_count_q(db_session)
def process_query(self, query):
return [tuple(r) for r in query.all()]
@add_to_apiv1
class ListIvornCitedCount(ListQueryView):
"""
Result (list of 2-element lists):
``[[ivorn, n_cited], ...]``
Get rows containing citation counts. Row entries are:
- IVORN of packet
- Number of times this packet is cited by others
"""
view_name = 'list/ivorn_ncites'
def get_query(self):
return query.ivorn_cited_from_others_count_q(db_session)
def process_query(self, query):
return [tuple(r) for r in query.all()]
@add_to_apiv1
class MapAuthoredMonthCount(QueryView):
"""
Result:
Dict: Mapping month -> packet counts per-month.
Here, 'month' refers to the month of the 'authoring' DateTime,
i.e. the ``Who.Date`` element of the VOEvent. NB, may be None.
"""
view_name = 'map/authored_month_count'
def get_query(self):
return query.authored_month_counts_q(db_session)
def process_query(self, q):
raw_results = q.all()
converted_results = []
for r in raw_results:
if r.month_id:
newrow = (r.month_id.date().isoformat()[:-3], r.month_count)
else:
newrow = r
converted_results.append(newrow)
return dict(converted_results)
@add_to_apiv1
class MapRoleCount(QueryView):
"""
Result:
Dict: Mapping role -> packet counts per-role.
"""
view_name = 'map/role_count'
def get_query(self):
return query.role_counts_q(db_session)
def process_query(self, q):
return dict(q.all())
@add_to_apiv1
class MapStreamCount(QueryView):
"""
Result:
Dict: Mapping stream -> packet counts per-stream.
"""
view_name = 'map/stream_count'
def get_query(self):
return query.stream_counts_q(db_session)
def process_query(self, q):
return dict(q.all())
@add_to_apiv1
class MapStreamRoleCount(QueryView):
"""
Result:
Nested dict: Mapping stream -> role -> packet counts per-stream-and-role.
"""
view_name = 'map/stream_role_count'
def get_query(self):
return query.stream_counts_role_breakdown_q(db_session)
def process_query(self, q):
return convenience.to_nested_dict(q.all())
@apiv1.route('/packet/synopsis/')
@apiv1.route('/packet/synopsis/<path:url_encoded_ivorn>')
def packet_synopsis(url_encoded_ivorn=None):
"""
Result:
Nested dict providing key details, e.g.::
{"coords": [
{
"dec": 10.9712,
"error": 0.05,
"ra": 233.7307,
"time": "2015-10-01T15:04:22.930000+00:00"
},
...
],
"refs": [
{
"cite_type": u"followup",
"description": "This is the XRT Position ...",
"ref_ivorn": "ivo://nasa.gsfc.gcn/SWIFT#BAT_..."
},
...
],
"voevent": {
"author_datetime": "2015-10-01T15:04:46+00:00",
"author_ivorn": "ivo://nasa.gsfc.tan/gcn",
"ivorn": "ivo://nasa.gsfc.gcn/SWIFT#BAT_GRB_Pos_657286-112",
"received": "2015-11-19T20:41:38.226431+00:00",
"role": "observation",
"stream": "nasa.gsfc.gcn/SWIFT",
"version": "2.0"
}
"relevant_urls": [ "http://address1.foo.bar",
"http://address2.foo.bar"
]
}
Returns some key details for the packet specified by IVORN.
The required IVORN should be appended to the URL after ``/synopsis/``
in :ref:`URL-encoded <url-encoding>` form.
"""
ivorn = validate_ivorn(url_encoded_ivorn)
voevent_row = db_session.query(Voevent).filter(
Voevent.ivorn == ivorn).one()
cites = db_session.query(Cite). \
filter(Cite.voevent_id == voevent_row.id).all()
coords = db_session.query(Coord). \
filter(Coord.voevent_id == voevent_row.id).all()
v_dict = voevent_row.to_odict(exclude=('id', 'xml'))
cite_list = [c.to_odict(exclude=('id', 'voevent_id')) for c in cites]
coord_list = [c.to_odict(exclude=('id', 'voevent_id')) for c in coords]
relevant_urls = lookup_relevant_urls(voevent_row, cites)
result = {'voevent': v_dict,
'refs': cite_list,
'coords': coord_list,
'relevant_urls': relevant_urls,
}
return jsonify(make_response_dict(result))
@apiv1.route('/packet/xml/')
@apiv1.route('/packet/xml/<path:url_encoded_ivorn>')
|
timstaley/voeventdb | voeventdb/server/restapi/v1/filter_base.py | apply_filters | python | def apply_filters(query, args):
pre_joins = []
for querystring_key, filter_value in args.items(multi=True):
if querystring_key in filter_registry:
cls_inst = filter_registry[querystring_key]
query = cls_inst.apply_filter(query, args, pre_joins)
elif querystring_key in PaginationKeys._value_list:
pass
else:
raise InvalidQueryString(querystring_key, filter_value)
return query | Apply all QueryFilters, validating the querystring in the process. | train | https://github.com/timstaley/voeventdb/blob/e37b176d65fced4ca4f059109a95d6974bb8a091/voeventdb/server/restapi/v1/filter_base.py#L76-L89 | null | from __future__ import absolute_import
from voeventdb.server.restapi.v1.apierror import InvalidQueryString
from voeventdb.server.restapi.v1.definitions import PaginationKeys
"""
Define the underlying machinery we'll use to implement query filters.
"""
filter_registry = {}
def add_to_filter_registry(cls):
"""
To be used as a class decorator
"""
filter_registry[cls.querystring_key] = cls()
return cls
class QueryFilter(object):
"""
Provides an interface example for QueryFilter classes to follow, and
centralises some common code.
.. Note::
In earlier iterations, query-filtering was simply performing in one big
function, which was simpler in terms of using basic language constructs.
However, it was starting to get very messy, with effectively a
many-entry 'case' statement, some tricky repeated syntax around
generating multi-clause filters, etc. Documenting the possible
query-string filter-keys was also a bit awkward.
By adopting a 'class-registry' pattern, we require a bit more fussy
set-up code, but the result is a set of clearly defined filters
with regular docstrings and self-documenting examples.
It also allows us to easily check whether a given query-string key is
valid, and return the relevant HTTP-error if not.
As a bonus, we can use the registry to create a unit-test matrix via
pytest's fixture generation, which is neat.
"""
querystring_key = None
example_values = None
simplejoin_tables = None
def combinator(self, filters):
"""
Function used to combine multiple filter clauses.
By default, if a query-key is passed multiple times we just use the
first value (see below).
Alternatively, a QueryFilter class may set this equal to SQLAlchemy's
:py:class:`sqlalchemy,or_` or :py:class:`sqlalchemy.and_` functions.
"""
return list(filters)[0]
def filter(self, filter_value):
return NotImplementedError
def generate_filter_set(self, args):
return self.combinator(
self.filter(filter_value)
for filter_value in args.getlist(self.querystring_key)
)
def apply_filter(self, query, args, pre_joins):
if self.simplejoin_tables:
for tbl in self.simplejoin_tables:
if tbl not in pre_joins:
query = query.join(tbl)
pre_joins.append(tbl)
query = query.filter(self.generate_filter_set(args))
return query
|
timstaley/voeventdb | voeventdb/server/database/convenience.py | ivorn_present | python | def ivorn_present(session, ivorn):
return bool(
session.query(Voevent.id).filter(Voevent.ivorn == ivorn).count()) | Predicate, returns whether the IVORN is in the database. | train | https://github.com/timstaley/voeventdb/blob/e37b176d65fced4ca4f059109a95d6974bb8a091/voeventdb/server/database/convenience.py#L11-L16 | null | from __future__ import absolute_import
from voeventdb.server.database.models import Voevent
from sqlalchemy import func
import voeventdb.server.database.query as query
import logging
logger = logging.getLogger(__name__)
def ivorn_prefix_present(session, ivorn_prefix):
"""
Predicate, returns whether there is an entry in the database with matching
IVORN prefix.
"""
n_matches = session.query(Voevent.ivorn).filter(
Voevent.ivorn.like('{}%'.format(ivorn_prefix))).count()
return bool(n_matches)
def safe_insert_voevent(session, etree):
"""
Insert a VOEvent, or skip with a warning if it's a duplicate.
NB XML contents are checked to confirm duplication - if there's a mismatch,
we raise a ValueError.
"""
new_row = Voevent.from_etree(etree)
if not ivorn_present(session, new_row.ivorn):
session.add(new_row)
else:
old_xml = session.query(Voevent.xml).filter(
Voevent.ivorn == new_row.ivorn).scalar()
if old_xml != new_row.xml:
raise ValueError('Tried to load a VOEvent with duplicate IVORN,'
'but XML contents differ - not clear what to do.')
else:
logger.warning('Skipping insert for packet with duplicate IVORN, '
'XML matches OK.')
def to_nested_dict(bi_grouped_rowset):
nested = {}
for r in bi_grouped_rowset:
if r[0] not in nested:
nested[r[0]]={}
nested[r[0]][r[1]] = r[2]
return nested
|
timstaley/voeventdb | voeventdb/server/database/convenience.py | ivorn_prefix_present | python | def ivorn_prefix_present(session, ivorn_prefix):
n_matches = session.query(Voevent.ivorn).filter(
Voevent.ivorn.like('{}%'.format(ivorn_prefix))).count()
return bool(n_matches) | Predicate, returns whether there is an entry in the database with matching
IVORN prefix. | train | https://github.com/timstaley/voeventdb/blob/e37b176d65fced4ca4f059109a95d6974bb8a091/voeventdb/server/database/convenience.py#L18-L25 | null | from __future__ import absolute_import
from voeventdb.server.database.models import Voevent
from sqlalchemy import func
import voeventdb.server.database.query as query
import logging
logger = logging.getLogger(__name__)
def ivorn_present(session, ivorn):
"""
Predicate, returns whether the IVORN is in the database.
"""
return bool(
session.query(Voevent.id).filter(Voevent.ivorn == ivorn).count())
def safe_insert_voevent(session, etree):
"""
Insert a VOEvent, or skip with a warning if it's a duplicate.
NB XML contents are checked to confirm duplication - if there's a mismatch,
we raise a ValueError.
"""
new_row = Voevent.from_etree(etree)
if not ivorn_present(session, new_row.ivorn):
session.add(new_row)
else:
old_xml = session.query(Voevent.xml).filter(
Voevent.ivorn == new_row.ivorn).scalar()
if old_xml != new_row.xml:
raise ValueError('Tried to load a VOEvent with duplicate IVORN,'
'but XML contents differ - not clear what to do.')
else:
logger.warning('Skipping insert for packet with duplicate IVORN, '
'XML matches OK.')
def to_nested_dict(bi_grouped_rowset):
nested = {}
for r in bi_grouped_rowset:
if r[0] not in nested:
nested[r[0]]={}
nested[r[0]][r[1]] = r[2]
return nested
|
timstaley/voeventdb | voeventdb/server/database/convenience.py | safe_insert_voevent | python | def safe_insert_voevent(session, etree):
new_row = Voevent.from_etree(etree)
if not ivorn_present(session, new_row.ivorn):
session.add(new_row)
else:
old_xml = session.query(Voevent.xml).filter(
Voevent.ivorn == new_row.ivorn).scalar()
if old_xml != new_row.xml:
raise ValueError('Tried to load a VOEvent with duplicate IVORN,'
'but XML contents differ - not clear what to do.')
else:
logger.warning('Skipping insert for packet with duplicate IVORN, '
'XML matches OK.') | Insert a VOEvent, or skip with a warning if it's a duplicate.
NB XML contents are checked to confirm duplication - if there's a mismatch,
we raise a ValueError. | train | https://github.com/timstaley/voeventdb/blob/e37b176d65fced4ca4f059109a95d6974bb8a091/voeventdb/server/database/convenience.py#L28-L46 | [
"def ivorn_present(session, ivorn):\n \"\"\"\n Predicate, returns whether the IVORN is in the database.\n \"\"\"\n return bool(\n session.query(Voevent.id).filter(Voevent.ivorn == ivorn).count())\n",
"def from_etree(root, received=pytz.UTC.localize(datetime.utcnow())):\n \"\"\"\n Init a V... | from __future__ import absolute_import
from voeventdb.server.database.models import Voevent
from sqlalchemy import func
import voeventdb.server.database.query as query
import logging
logger = logging.getLogger(__name__)
def ivorn_present(session, ivorn):
"""
Predicate, returns whether the IVORN is in the database.
"""
return bool(
session.query(Voevent.id).filter(Voevent.ivorn == ivorn).count())
def ivorn_prefix_present(session, ivorn_prefix):
"""
Predicate, returns whether there is an entry in the database with matching
IVORN prefix.
"""
n_matches = session.query(Voevent.ivorn).filter(
Voevent.ivorn.like('{}%'.format(ivorn_prefix))).count()
return bool(n_matches)
def to_nested_dict(bi_grouped_rowset):
nested = {}
for r in bi_grouped_rowset:
if r[0] not in nested:
nested[r[0]]={}
nested[r[0]][r[1]] = r[2]
return nested
|
timstaley/voeventdb | voeventdb/server/database/models.py | _grab_xpath | python | def _grab_xpath(root, xpath, converter=lambda x: x):
elements = root.xpath(xpath)
if elements:
return converter(str(elements[0]))
else:
return None | XML convenience - grabs the first element at xpath if present, else returns None. | train | https://github.com/timstaley/voeventdb/blob/e37b176d65fced4ca4f059109a95d6974bb8a091/voeventdb/server/database/models.py#L18-L26 | null | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import (backref, deferred, relationship,
)
from sqlalchemy import Column, ForeignKey, Index, func
import sqlalchemy as sql
import voeventparse as vp
from datetime import datetime
import iso8601
import pytz
from collections import OrderedDict
import logging
logger = logging.getLogger(__name__)
Base = declarative_base()
def _has_bad_coords(root, stream):
"""
Predicate function encapsulating 'data clean up' filter code.
Currently minimal, but these sort of functions tend to grow over time.
Problem 1:
Some of the GCN packets have an RA /Dec equal to (0,0) in the WhereWhen,
and a flag in the What signifying that those are actually dummy co-ords.
(This is used for time-stamping an event which is not localised).
So, we don't load those positions, to avoid muddying the database
corpus.
Problem 2:
com.dc3/dc3.broker#BrokerTest packets have dummy RA/Dec values,
with no units specified.
(They're also marked role=test, so it's not such a big deal,
but it generates a lot of debug-log churn.)
"""
if stream == "com.dc3/dc3.broker":
return True
if not stream.split('/')[0] == 'nasa.gsfc.gcn':
return False
toplevel_params = vp.get_toplevel_params(root)
if "Coords_String" in toplevel_params:
if (toplevel_params["Coords_String"]['value'] ==
"unavailable/inappropriate"):
return True
return False
class OdictMixin(object):
def to_odict(self, exclude=None):
"""
Returns an OrderedDict representation of the SQLalchemy table row.
"""
if exclude is None:
exclude = tuple()
colnames = [c.name for c in self.__table__.columns
if c.name not in exclude]
return OrderedDict(((col, getattr(self, col)) for col in colnames))
class Voevent(Base, OdictMixin):
"""
Define the core VOEvent table.
.. NOTE::
On datetimes:
We store datetimes 'with timezone' even though we'll use the convention
of storing UTC throughout (and VOEvents are UTC too).
This helps to make explicit what convention we're using and avoid
any possible timezone-naive mixups down the line.
However, if this ever gets used at (really large!) scale, then may
need to be wary of issues with partitioning really large datasets, cf:
http://justatheory.com/computers/databases/postgresql/use-timestamptz.html
http://www.postgresql.org/docs/9.1/static/ddl-partitioning.html
"""
__tablename__ = 'voevent'
# Basics: Attributes or associated metadata present for almost every VOEvent:
id = Column(sql.Integer, primary_key=True)
received = Column(
sql.DateTime(timezone=True), nullable=False,
doc="Records when the packet was loaded into the database"
)
ivorn = Column(sql.String, nullable=False, unique=True, index=True)
stream = Column(sql.String, index=True)
role = Column(sql.Enum(vp.definitions.roles.observation,
vp.definitions.roles.prediction,
vp.definitions.roles.utility,
vp.definitions.roles.test,
name="roles_enum",
),
index=True
)
version = Column(sql.String)
# Who
author_ivorn = Column(sql.String)
author_datetime = Column(sql.DateTime(timezone=True))
# Finally, the raw XML. Mark this for lazy-loading, cf:
# http://docs.sqlalchemy.org/en/latest/orm/loading_columns.html
xml = deferred(Column(sql.LargeBinary))
cites = relationship("Cite", backref=backref('voevent', order_by=id),
cascade="all, delete, delete-orphan")
coords = relationship('Coord', backref=backref('voevent', order_by=id),
cascade="all, delete, delete-orphan")
@staticmethod
def from_etree(root, received=pytz.UTC.localize(datetime.utcnow())):
"""
Init a Voevent row from an LXML etree loaded with voevent-parse
"""
ivorn = root.attrib['ivorn']
# Stream- Everything except before the '#' separator,
# with the prefix 'ivo://' removed:
stream = ivorn.split('#')[0][6:]
row = Voevent(ivorn=ivorn,
role=root.attrib['role'],
version=root.attrib['version'],
stream=stream,
xml=vp.dumps(root),
received=received,
)
row.author_datetime = _grab_xpath(root, 'Who/Date',
converter=iso8601.parse_date)
row.author_ivorn = _grab_xpath(root, 'Who/AuthorIVORN')
row.cites = Cite.from_etree(root)
if not _has_bad_coords(root, stream):
try:
row.coords = Coord.from_etree(root)
except:
logger.exception(
'Error loading coords for ivorn {}, coords dropped.'.format(
ivorn)
)
return row
def _reformatted_prettydict(self, valformat=str):
pd = self.prettydict()
return '\n'.join(
("{}={}".format(k, valformat(v)) for k, v in pd.iteritems()))
def __repr__(self):
od = self.to_odict()
content = ',\n'.join(
("{}={}".format(k, repr(v)) for k, v in od.iteritems()))
return """<Voevent({})>""".format(content)
def __str__(self):
od = self.to_odict()
od.pop('xml')
content = ',\n '.join(
("{}={}".format(k, str(v)) for k, v in od.iteritems()))
return """<Voevent({})>""".format(content)
class Cite(Base, OdictMixin):
"""
Record the references ('Cites') contained in each VOEvent.
Relationship is one Voevent -> Many Cites.
.. note:: On naming
`Reference` would be a more appropriate class name,
since in the conventions of bibliometrics, 'references are made,
and citations are received'.
However, 'references' is a reserved Postgres word, cf
http://www.postgresql.org/docs/9.3/static/sql-keywords-appendix.html .
Grammatically speaking, `cite` is a valid noun form, in addition to
verb: http://www.grammarphobia.com/blog/2011/10/cite.html And it's much
shorter than 'citation'.
.. note:: On store-by-value vs store-by-reference
NB, we store the ref_ivorn string values in the Cite table rows. This is
quite inefficient compared to referencing the ID of a Voevent that has
been previously loaded (in the case that one IVORN is cited by many
Voevents). However, it's necessary, since we may see an IVORN cited for
which we don't have the primary entry. If this inefficiency ever becomes
an issue, I can imagine various schemes where e.g. a Voevent is created
with just a bare IVORN and no other data if it's cited but not ingested,
with a flag-bit set accordingly. Or we could create a separate 'cited
IVORNS' table. But probably you ain't gonna need it.
.. note:: On descriptions
Note that technically there's a slight model mismatch here: What we're
really modelling are the EventIVORN entries in the Citations section of
the VOEvent, which typically share a description between them. This may
result in duplicated descriptions (but most packets only have a single
reference anyway).
"""
__tablename__ = 'cite'
id = Column(sql.Integer, primary_key=True)
voevent_id = Column(sql.Integer, ForeignKey(Voevent.id))
ref_ivorn = Column(sql.String, nullable=False, index=True)
cite_type = Column(sql.Enum(vp.definitions.cite_types.followup,
vp.definitions.cite_types.retraction,
vp.definitions.cite_types.supersedes,
name="cite_types_enum",
),
nullable=False
)
description = Column(sql.String)
@staticmethod
def from_etree(root):
"""
Load up the citations, if present, for initializing with the Voevent.
"""
cite_list = []
citations = root.xpath('Citations/EventIVORN')
if citations:
description = root.xpath('Citations/Description')
if description:
description_text = description[0].text
else:
description_text = None
for entry in root.Citations.EventIVORN:
if entry.text:
cite_list.append(
Cite(ref_ivorn=entry.text,
cite_type=entry.attrib['cite'],
description=description_text)
)
else:
logger.info(
'Ignoring empty citation in {}'.format(
root.attrib['ivorn']))
return cite_list
def __repr__(self):
od = self.to_odict()
content = ',\n'.join(
("{}={}".format(k, repr(v)) for k, v in od.iteritems()))
return """<Cite({})>""".format(content)
class Coord(Base, OdictMixin):
"""
Represents a co-ordinate position.
I.e. an entry in the WhereWhen section of a VOEvent.
For these entries to be of any use, we must choose a single standard format
from the wide array of possible VOEvent / STC recommended co-ordinate
systems and representations. See
http://www.ivoa.net/documents/REC/DM/STC-20071030.html
for reference.
Nominally, we will adopt UTC as the time-system, ICRS decimal degrees as the
celestial system / representation, and GEO as the reference position.
In practice, we take a relaxed attitude where GEO / TOPO are assumed
approximately equal, as are FK5/ICRS, and hence any matching substitutes are
loaded into the database without further co-ordinate transformation.
Additional transformation code may be implemented in future as requirements
and developer time dictate. As a fallback, the client can always request the
XML packet and inspect the native VOEvent representation for themselves,
assuming that other fields / naively parsed co-ordinates can be used to
restrict the number of plausibly relevant packets.
"""
__tablename__ = 'coord'
id = Column(sql.Integer, primary_key=True)
voevent_id = Column(sql.Integer, ForeignKey(Voevent.id))
ra = Column(sql.Float, nullable=False, index=True)
dec = Column(sql.Float, nullable=False, index=True)
error = Column(
sql.Float,
doc="Error-circle radius associated with coordinate-position (degrees)"
)
time = Column(
sql.DateTime(timezone=True), nullable=True,
doc="Records timestamp associated with co-ordinate position of event"
)
@staticmethod
def from_etree(root):
"""
Load up the coords, if present, for initializing with the Voevent.
.. note::
Current implementation is quite slack with regard to co-ordinate
systems - it is assumed that, for purposes of searching the database
using spatial queries, the FK5 / ICRS reference systems and and
geocentric/barycentric reference frames are sufficiently similar
that we can just take the RA/Dec and insert it 'as-is' into the
database.
This is partly justified on the assumption that anyone in
need of ultra-high precision co-ordinates will need to take
account of mission specific properties anyway (e.g. position
of GAIA at time of detection) and so will only be using the
spatial query for a coarse search, then parsing packets
to determine precise locations.
"""
acceptable_coord_systems = (
vp.definitions.sky_coord_system.utc_fk5_geo,
vp.definitions.sky_coord_system.utc_fk5_topo,
vp.definitions.sky_coord_system.utc_icrs_geo,
vp.definitions.sky_coord_system.utc_icrs_topo,
vp.definitions.sky_coord_system.tdb_fk5_bary,
vp.definitions.sky_coord_system.tdb_icrs_bary,
)
position_list = []
astrocoords = root.xpath(
'WhereWhen/ObsDataLocation/ObservationLocation/AstroCoords'
)
if astrocoords:
for idx, entry in enumerate(astrocoords):
posn = vp.get_event_position(root,idx)
if posn.system not in acceptable_coord_systems:
raise NotImplementedError(
"Loading position from coord-sys "
"is not yet implemented: {} ".format(
posn.system
)
)
if posn.units != vp.definitions.units.degrees:
raise NotImplementedError(
"Loading positions in formats other than degrees "
"is not yet implemented."
)
try:
isotime = vp.get_event_time_as_utc(root,idx)
except:
logger.warning(
"Error pulling event time for ivorn {}, "
"setting to NULL".format(root.attrib['ivorn'])
)
isotime = None
position_list.append(
Coord(ra = posn.ra,
dec = posn.dec,
error = posn.err,
time = isotime)
)
return position_list
# Q3C indexes for spatial queries:
Index('q3c_coord_idx', func.q3c_ang2ipix(Coord.ra, Coord.dec)) |
timstaley/voeventdb | voeventdb/server/database/models.py | _has_bad_coords | python | def _has_bad_coords(root, stream):
if stream == "com.dc3/dc3.broker":
return True
if not stream.split('/')[0] == 'nasa.gsfc.gcn':
return False
toplevel_params = vp.get_toplevel_params(root)
if "Coords_String" in toplevel_params:
if (toplevel_params["Coords_String"]['value'] ==
"unavailable/inappropriate"):
return True
return False | Predicate function encapsulating 'data clean up' filter code.
Currently minimal, but these sort of functions tend to grow over time.
Problem 1:
Some of the GCN packets have an RA /Dec equal to (0,0) in the WhereWhen,
and a flag in the What signifying that those are actually dummy co-ords.
(This is used for time-stamping an event which is not localised).
So, we don't load those positions, to avoid muddying the database
corpus.
Problem 2:
com.dc3/dc3.broker#BrokerTest packets have dummy RA/Dec values,
with no units specified.
(They're also marked role=test, so it's not such a big deal,
but it generates a lot of debug-log churn.) | train | https://github.com/timstaley/voeventdb/blob/e37b176d65fced4ca4f059109a95d6974bb8a091/voeventdb/server/database/models.py#L28-L56 | null | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import (backref, deferred, relationship,
)
from sqlalchemy import Column, ForeignKey, Index, func
import sqlalchemy as sql
import voeventparse as vp
from datetime import datetime
import iso8601
import pytz
from collections import OrderedDict
import logging
logger = logging.getLogger(__name__)
Base = declarative_base()
def _grab_xpath(root, xpath, converter=lambda x: x):
"""
XML convenience - grabs the first element at xpath if present, else returns None.
"""
elements = root.xpath(xpath)
if elements:
return converter(str(elements[0]))
else:
return None
class OdictMixin(object):
def to_odict(self, exclude=None):
"""
Returns an OrderedDict representation of the SQLalchemy table row.
"""
if exclude is None:
exclude = tuple()
colnames = [c.name for c in self.__table__.columns
if c.name not in exclude]
return OrderedDict(((col, getattr(self, col)) for col in colnames))
class Voevent(Base, OdictMixin):
"""
Define the core VOEvent table.
.. NOTE::
On datetimes:
We store datetimes 'with timezone' even though we'll use the convention
of storing UTC throughout (and VOEvents are UTC too).
This helps to make explicit what convention we're using and avoid
any possible timezone-naive mixups down the line.
However, if this ever gets used at (really large!) scale, then may
need to be wary of issues with partitioning really large datasets, cf:
http://justatheory.com/computers/databases/postgresql/use-timestamptz.html
http://www.postgresql.org/docs/9.1/static/ddl-partitioning.html
"""
__tablename__ = 'voevent'
# Basics: Attributes or associated metadata present for almost every VOEvent:
id = Column(sql.Integer, primary_key=True)
received = Column(
sql.DateTime(timezone=True), nullable=False,
doc="Records when the packet was loaded into the database"
)
ivorn = Column(sql.String, nullable=False, unique=True, index=True)
stream = Column(sql.String, index=True)
role = Column(sql.Enum(vp.definitions.roles.observation,
vp.definitions.roles.prediction,
vp.definitions.roles.utility,
vp.definitions.roles.test,
name="roles_enum",
),
index=True
)
version = Column(sql.String)
# Who
author_ivorn = Column(sql.String)
author_datetime = Column(sql.DateTime(timezone=True))
# Finally, the raw XML. Mark this for lazy-loading, cf:
# http://docs.sqlalchemy.org/en/latest/orm/loading_columns.html
xml = deferred(Column(sql.LargeBinary))
cites = relationship("Cite", backref=backref('voevent', order_by=id),
cascade="all, delete, delete-orphan")
coords = relationship('Coord', backref=backref('voevent', order_by=id),
cascade="all, delete, delete-orphan")
@staticmethod
def from_etree(root, received=pytz.UTC.localize(datetime.utcnow())):
"""
Init a Voevent row from an LXML etree loaded with voevent-parse
"""
ivorn = root.attrib['ivorn']
# Stream- Everything except before the '#' separator,
# with the prefix 'ivo://' removed:
stream = ivorn.split('#')[0][6:]
row = Voevent(ivorn=ivorn,
role=root.attrib['role'],
version=root.attrib['version'],
stream=stream,
xml=vp.dumps(root),
received=received,
)
row.author_datetime = _grab_xpath(root, 'Who/Date',
converter=iso8601.parse_date)
row.author_ivorn = _grab_xpath(root, 'Who/AuthorIVORN')
row.cites = Cite.from_etree(root)
if not _has_bad_coords(root, stream):
try:
row.coords = Coord.from_etree(root)
except:
logger.exception(
'Error loading coords for ivorn {}, coords dropped.'.format(
ivorn)
)
return row
def _reformatted_prettydict(self, valformat=str):
pd = self.prettydict()
return '\n'.join(
("{}={}".format(k, valformat(v)) for k, v in pd.iteritems()))
def __repr__(self):
od = self.to_odict()
content = ',\n'.join(
("{}={}".format(k, repr(v)) for k, v in od.iteritems()))
return """<Voevent({})>""".format(content)
def __str__(self):
od = self.to_odict()
od.pop('xml')
content = ',\n '.join(
("{}={}".format(k, str(v)) for k, v in od.iteritems()))
return """<Voevent({})>""".format(content)
class Cite(Base, OdictMixin):
"""
Record the references ('Cites') contained in each VOEvent.
Relationship is one Voevent -> Many Cites.
.. note:: On naming
`Reference` would be a more appropriate class name,
since in the conventions of bibliometrics, 'references are made,
and citations are received'.
However, 'references' is a reserved Postgres word, cf
http://www.postgresql.org/docs/9.3/static/sql-keywords-appendix.html .
Grammatically speaking, `cite` is a valid noun form, in addition to
verb: http://www.grammarphobia.com/blog/2011/10/cite.html And it's much
shorter than 'citation'.
.. note:: On store-by-value vs store-by-reference
NB, we store the ref_ivorn string values in the Cite table rows. This is
quite inefficient compared to referencing the ID of a Voevent that has
been previously loaded (in the case that one IVORN is cited by many
Voevents). However, it's necessary, since we may see an IVORN cited for
which we don't have the primary entry. If this inefficiency ever becomes
an issue, I can imagine various schemes where e.g. a Voevent is created
with just a bare IVORN and no other data if it's cited but not ingested,
with a flag-bit set accordingly. Or we could create a separate 'cited
IVORNS' table. But probably you ain't gonna need it.
.. note:: On descriptions
Note that technically there's a slight model mismatch here: What we're
really modelling are the EventIVORN entries in the Citations section of
the VOEvent, which typically share a description between them. This may
result in duplicated descriptions (but most packets only have a single
reference anyway).
"""
__tablename__ = 'cite'
id = Column(sql.Integer, primary_key=True)
voevent_id = Column(sql.Integer, ForeignKey(Voevent.id))
ref_ivorn = Column(sql.String, nullable=False, index=True)
cite_type = Column(sql.Enum(vp.definitions.cite_types.followup,
vp.definitions.cite_types.retraction,
vp.definitions.cite_types.supersedes,
name="cite_types_enum",
),
nullable=False
)
description = Column(sql.String)
@staticmethod
def from_etree(root):
"""
Load up the citations, if present, for initializing with the Voevent.
"""
cite_list = []
citations = root.xpath('Citations/EventIVORN')
if citations:
description = root.xpath('Citations/Description')
if description:
description_text = description[0].text
else:
description_text = None
for entry in root.Citations.EventIVORN:
if entry.text:
cite_list.append(
Cite(ref_ivorn=entry.text,
cite_type=entry.attrib['cite'],
description=description_text)
)
else:
logger.info(
'Ignoring empty citation in {}'.format(
root.attrib['ivorn']))
return cite_list
def __repr__(self):
od = self.to_odict()
content = ',\n'.join(
("{}={}".format(k, repr(v)) for k, v in od.iteritems()))
return """<Cite({})>""".format(content)
class Coord(Base, OdictMixin):
"""
Represents a co-ordinate position.
I.e. an entry in the WhereWhen section of a VOEvent.
For these entries to be of any use, we must choose a single standard format
from the wide array of possible VOEvent / STC recommended co-ordinate
systems and representations. See
http://www.ivoa.net/documents/REC/DM/STC-20071030.html
for reference.
Nominally, we will adopt UTC as the time-system, ICRS decimal degrees as the
celestial system / representation, and GEO as the reference position.
In practice, we take a relaxed attitude where GEO / TOPO are assumed
approximately equal, as are FK5/ICRS, and hence any matching substitutes are
loaded into the database without further co-ordinate transformation.
Additional transformation code may be implemented in future as requirements
and developer time dictate. As a fallback, the client can always request the
XML packet and inspect the native VOEvent representation for themselves,
assuming that other fields / naively parsed co-ordinates can be used to
restrict the number of plausibly relevant packets.
"""
__tablename__ = 'coord'
id = Column(sql.Integer, primary_key=True)
voevent_id = Column(sql.Integer, ForeignKey(Voevent.id))
ra = Column(sql.Float, nullable=False, index=True)
dec = Column(sql.Float, nullable=False, index=True)
error = Column(
sql.Float,
doc="Error-circle radius associated with coordinate-position (degrees)"
)
time = Column(
sql.DateTime(timezone=True), nullable=True,
doc="Records timestamp associated with co-ordinate position of event"
)
@staticmethod
def from_etree(root):
"""
Load up the coords, if present, for initializing with the Voevent.
.. note::
Current implementation is quite slack with regard to co-ordinate
systems - it is assumed that, for purposes of searching the database
using spatial queries, the FK5 / ICRS reference systems and and
geocentric/barycentric reference frames are sufficiently similar
that we can just take the RA/Dec and insert it 'as-is' into the
database.
This is partly justified on the assumption that anyone in
need of ultra-high precision co-ordinates will need to take
account of mission specific properties anyway (e.g. position
of GAIA at time of detection) and so will only be using the
spatial query for a coarse search, then parsing packets
to determine precise locations.
"""
acceptable_coord_systems = (
vp.definitions.sky_coord_system.utc_fk5_geo,
vp.definitions.sky_coord_system.utc_fk5_topo,
vp.definitions.sky_coord_system.utc_icrs_geo,
vp.definitions.sky_coord_system.utc_icrs_topo,
vp.definitions.sky_coord_system.tdb_fk5_bary,
vp.definitions.sky_coord_system.tdb_icrs_bary,
)
position_list = []
astrocoords = root.xpath(
'WhereWhen/ObsDataLocation/ObservationLocation/AstroCoords'
)
if astrocoords:
for idx, entry in enumerate(astrocoords):
posn = vp.get_event_position(root,idx)
if posn.system not in acceptable_coord_systems:
raise NotImplementedError(
"Loading position from coord-sys "
"is not yet implemented: {} ".format(
posn.system
)
)
if posn.units != vp.definitions.units.degrees:
raise NotImplementedError(
"Loading positions in formats other than degrees "
"is not yet implemented."
)
try:
isotime = vp.get_event_time_as_utc(root,idx)
except:
logger.warning(
"Error pulling event time for ivorn {}, "
"setting to NULL".format(root.attrib['ivorn'])
)
isotime = None
position_list.append(
Coord(ra = posn.ra,
dec = posn.dec,
error = posn.err,
time = isotime)
)
return position_list
# Q3C indexes for spatial queries:
Index('q3c_coord_idx', func.q3c_ang2ipix(Coord.ra, Coord.dec)) |
timstaley/voeventdb | voeventdb/server/database/models.py | OdictMixin.to_odict | python | def to_odict(self, exclude=None):
if exclude is None:
exclude = tuple()
colnames = [c.name for c in self.__table__.columns
if c.name not in exclude]
return OrderedDict(((col, getattr(self, col)) for col in colnames)) | Returns an OrderedDict representation of the SQLalchemy table row. | train | https://github.com/timstaley/voeventdb/blob/e37b176d65fced4ca4f059109a95d6974bb8a091/voeventdb/server/database/models.py#L62-L70 | null | class OdictMixin(object):
|
timstaley/voeventdb | voeventdb/server/database/models.py | Voevent.from_etree | python | def from_etree(root, received=pytz.UTC.localize(datetime.utcnow())):
ivorn = root.attrib['ivorn']
# Stream- Everything except before the '#' separator,
# with the prefix 'ivo://' removed:
stream = ivorn.split('#')[0][6:]
row = Voevent(ivorn=ivorn,
role=root.attrib['role'],
version=root.attrib['version'],
stream=stream,
xml=vp.dumps(root),
received=received,
)
row.author_datetime = _grab_xpath(root, 'Who/Date',
converter=iso8601.parse_date)
row.author_ivorn = _grab_xpath(root, 'Who/AuthorIVORN')
row.cites = Cite.from_etree(root)
if not _has_bad_coords(root, stream):
try:
row.coords = Coord.from_etree(root)
except:
logger.exception(
'Error loading coords for ivorn {}, coords dropped.'.format(
ivorn)
)
return row | Init a Voevent row from an LXML etree loaded with voevent-parse | train | https://github.com/timstaley/voeventdb/blob/e37b176d65fced4ca4f059109a95d6974bb8a091/voeventdb/server/database/models.py#L122-L150 | [
"def _grab_xpath(root, xpath, converter=lambda x: x):\n \"\"\"\n XML convenience - grabs the first element at xpath if present, else returns None.\n \"\"\"\n elements = root.xpath(xpath)\n if elements:\n return converter(str(elements[0]))\n else:\n return None\n",
"def _has_bad_coo... | class Voevent(Base, OdictMixin):
"""
Define the core VOEvent table.
.. NOTE::
On datetimes:
We store datetimes 'with timezone' even though we'll use the convention
of storing UTC throughout (and VOEvents are UTC too).
This helps to make explicit what convention we're using and avoid
any possible timezone-naive mixups down the line.
However, if this ever gets used at (really large!) scale, then may
need to be wary of issues with partitioning really large datasets, cf:
http://justatheory.com/computers/databases/postgresql/use-timestamptz.html
http://www.postgresql.org/docs/9.1/static/ddl-partitioning.html
"""
__tablename__ = 'voevent'
# Basics: Attributes or associated metadata present for almost every VOEvent:
id = Column(sql.Integer, primary_key=True)
received = Column(
sql.DateTime(timezone=True), nullable=False,
doc="Records when the packet was loaded into the database"
)
ivorn = Column(sql.String, nullable=False, unique=True, index=True)
stream = Column(sql.String, index=True)
role = Column(sql.Enum(vp.definitions.roles.observation,
vp.definitions.roles.prediction,
vp.definitions.roles.utility,
vp.definitions.roles.test,
name="roles_enum",
),
index=True
)
version = Column(sql.String)
# Who
author_ivorn = Column(sql.String)
author_datetime = Column(sql.DateTime(timezone=True))
# Finally, the raw XML. Mark this for lazy-loading, cf:
# http://docs.sqlalchemy.org/en/latest/orm/loading_columns.html
xml = deferred(Column(sql.LargeBinary))
cites = relationship("Cite", backref=backref('voevent', order_by=id),
cascade="all, delete, delete-orphan")
coords = relationship('Coord', backref=backref('voevent', order_by=id),
cascade="all, delete, delete-orphan")
@staticmethod
def _reformatted_prettydict(self, valformat=str):
pd = self.prettydict()
return '\n'.join(
("{}={}".format(k, valformat(v)) for k, v in pd.iteritems()))
def __repr__(self):
od = self.to_odict()
content = ',\n'.join(
("{}={}".format(k, repr(v)) for k, v in od.iteritems()))
return """<Voevent({})>""".format(content)
def __str__(self):
od = self.to_odict()
od.pop('xml')
content = ',\n '.join(
("{}={}".format(k, str(v)) for k, v in od.iteritems()))
return """<Voevent({})>""".format(content)
|
timstaley/voeventdb | voeventdb/server/database/models.py | Cite.from_etree | python | def from_etree(root):
cite_list = []
citations = root.xpath('Citations/EventIVORN')
if citations:
description = root.xpath('Citations/Description')
if description:
description_text = description[0].text
else:
description_text = None
for entry in root.Citations.EventIVORN:
if entry.text:
cite_list.append(
Cite(ref_ivorn=entry.text,
cite_type=entry.attrib['cite'],
description=description_text)
)
else:
logger.info(
'Ignoring empty citation in {}'.format(
root.attrib['ivorn']))
return cite_list | Load up the citations, if present, for initializing with the Voevent. | train | https://github.com/timstaley/voeventdb/blob/e37b176d65fced4ca4f059109a95d6974bb8a091/voeventdb/server/database/models.py#L225-L248 | null | class Cite(Base, OdictMixin):
"""
Record the references ('Cites') contained in each VOEvent.
Relationship is one Voevent -> Many Cites.
.. note:: On naming
`Reference` would be a more appropriate class name,
since in the conventions of bibliometrics, 'references are made,
and citations are received'.
However, 'references' is a reserved Postgres word, cf
http://www.postgresql.org/docs/9.3/static/sql-keywords-appendix.html .
Grammatically speaking, `cite` is a valid noun form, in addition to
verb: http://www.grammarphobia.com/blog/2011/10/cite.html And it's much
shorter than 'citation'.
.. note:: On store-by-value vs store-by-reference
NB, we store the ref_ivorn string values in the Cite table rows. This is
quite inefficient compared to referencing the ID of a Voevent that has
been previously loaded (in the case that one IVORN is cited by many
Voevents). However, it's necessary, since we may see an IVORN cited for
which we don't have the primary entry. If this inefficiency ever becomes
an issue, I can imagine various schemes where e.g. a Voevent is created
with just a bare IVORN and no other data if it's cited but not ingested,
with a flag-bit set accordingly. Or we could create a separate 'cited
IVORNS' table. But probably you ain't gonna need it.
.. note:: On descriptions
Note that technically there's a slight model mismatch here: What we're
really modelling are the EventIVORN entries in the Citations section of
the VOEvent, which typically share a description between them. This may
result in duplicated descriptions (but most packets only have a single
reference anyway).
"""
__tablename__ = 'cite'
id = Column(sql.Integer, primary_key=True)
voevent_id = Column(sql.Integer, ForeignKey(Voevent.id))
ref_ivorn = Column(sql.String, nullable=False, index=True)
cite_type = Column(sql.Enum(vp.definitions.cite_types.followup,
vp.definitions.cite_types.retraction,
vp.definitions.cite_types.supersedes,
name="cite_types_enum",
),
nullable=False
)
description = Column(sql.String)
@staticmethod
def __repr__(self):
od = self.to_odict()
content = ',\n'.join(
("{}={}".format(k, repr(v)) for k, v in od.iteritems()))
return """<Cite({})>""".format(content)
|
timstaley/voeventdb | voeventdb/server/database/models.py | Coord.from_etree | python | def from_etree(root):
acceptable_coord_systems = (
vp.definitions.sky_coord_system.utc_fk5_geo,
vp.definitions.sky_coord_system.utc_fk5_topo,
vp.definitions.sky_coord_system.utc_icrs_geo,
vp.definitions.sky_coord_system.utc_icrs_topo,
vp.definitions.sky_coord_system.tdb_fk5_bary,
vp.definitions.sky_coord_system.tdb_icrs_bary,
)
position_list = []
astrocoords = root.xpath(
'WhereWhen/ObsDataLocation/ObservationLocation/AstroCoords'
)
if astrocoords:
for idx, entry in enumerate(astrocoords):
posn = vp.get_event_position(root,idx)
if posn.system not in acceptable_coord_systems:
raise NotImplementedError(
"Loading position from coord-sys "
"is not yet implemented: {} ".format(
posn.system
)
)
if posn.units != vp.definitions.units.degrees:
raise NotImplementedError(
"Loading positions in formats other than degrees "
"is not yet implemented."
)
try:
isotime = vp.get_event_time_as_utc(root,idx)
except:
logger.warning(
"Error pulling event time for ivorn {}, "
"setting to NULL".format(root.attrib['ivorn'])
)
isotime = None
position_list.append(
Coord(ra = posn.ra,
dec = posn.dec,
error = posn.err,
time = isotime)
)
return position_list | Load up the coords, if present, for initializing with the Voevent.
.. note::
Current implementation is quite slack with regard to co-ordinate
systems - it is assumed that, for purposes of searching the database
using spatial queries, the FK5 / ICRS reference systems and and
geocentric/barycentric reference frames are sufficiently similar
that we can just take the RA/Dec and insert it 'as-is' into the
database.
This is partly justified on the assumption that anyone in
need of ultra-high precision co-ordinates will need to take
account of mission specific properties anyway (e.g. position
of GAIA at time of detection) and so will only be using the
spatial query for a coarse search, then parsing packets
to determine precise locations. | train | https://github.com/timstaley/voeventdb/blob/e37b176d65fced4ca4f059109a95d6974bb8a091/voeventdb/server/database/models.py#L298-L362 | null | class Coord(Base, OdictMixin):
"""
Represents a co-ordinate position.
I.e. an entry in the WhereWhen section of a VOEvent.
For these entries to be of any use, we must choose a single standard format
from the wide array of possible VOEvent / STC recommended co-ordinate
systems and representations. See
http://www.ivoa.net/documents/REC/DM/STC-20071030.html
for reference.
Nominally, we will adopt UTC as the time-system, ICRS decimal degrees as the
celestial system / representation, and GEO as the reference position.
In practice, we take a relaxed attitude where GEO / TOPO are assumed
approximately equal, as are FK5/ICRS, and hence any matching substitutes are
loaded into the database without further co-ordinate transformation.
Additional transformation code may be implemented in future as requirements
and developer time dictate. As a fallback, the client can always request the
XML packet and inspect the native VOEvent representation for themselves,
assuming that other fields / naively parsed co-ordinates can be used to
restrict the number of plausibly relevant packets.
"""
__tablename__ = 'coord'
id = Column(sql.Integer, primary_key=True)
voevent_id = Column(sql.Integer, ForeignKey(Voevent.id))
ra = Column(sql.Float, nullable=False, index=True)
dec = Column(sql.Float, nullable=False, index=True)
error = Column(
sql.Float,
doc="Error-circle radius associated with coordinate-position (degrees)"
)
time = Column(
sql.DateTime(timezone=True), nullable=True,
doc="Records timestamp associated with co-ordinate position of event"
)
@staticmethod
|
timstaley/voeventdb | voeventdb/server/utils/filestore.py | write_tarball | python | def write_tarball(voevents, filepath):
tuple_gen = ( (v.ivorn, v.xml) for v in voevents)
return write_tarball_from_ivorn_xml_tuples(tuple_gen,
filepath) | Iterate over voevent models / dbrows and write to bz'd tarball.
Args:
voevents (iterable): An iterable (e.g. list) of e.g. Voevent db-rows,
with access to the 'ivorn' and 'xml' attributes.
filepath (string): Path to the new tarball to create. Typically of form
'/path/to/foo.tar.bz2'
Returns
packet_count (int): Number of packets written to tarball | train | https://github.com/timstaley/voeventdb/blob/e37b176d65fced4ca4f059109a95d6974bb8a091/voeventdb/server/utils/filestore.py#L46-L60 | [
"def write_tarball_from_ivorn_xml_tuples(ivorn_xml_tuples, filepath):\n \"\"\"\n Iterate over a series of ivorn / xml bstring tuples and write to bz'd tarball.\n\n Args:\n ivorn_xml_tuples (iterable): [(ivorn,xml)]\n An iterable (e.g. list) of tuples containing two entries -\n ... | import logging
import tarfile
import voeventparse
from collections import namedtuple
from io import BytesIO
import six
logger = logging.getLogger(__name__)
def bytestring_to_tar_tuple(filename, bytes):
"""
Take a string + filename, return a (tarinfo, stringbuf) tuple for insertion.
Args:
bytes (bstring): Bytestring representation of the filedata.
filename (string): Filepath relative to tarfile root.
Returns:
tuple: (tarfile.TarInfo,io.BytesIO).
This can be passed directly to TarFile.addfile().
"""
info = tarfile.TarInfo(filename)
info.size = len(bytes)
return info, BytesIO(bytes)
def filename_from_ivorn(ivorn):
"""
Derive a sensible folder / filename path from ivorn.
Args:
ivorn (string): IVORN identifier including 'ivo://' prefix.
Returns:
string: relative path for xml output file.
"""
return ivorn.split('//')[1].replace('#', '/') + '.xml'
def voevent_etree_to_ivorn_xml_tuple(voevent):
"""
Args:
voevent (etree): Root of an lxml.etree loaded with voeventparse.
"""
return (voevent.attrib['ivorn'], voeventparse.dumps(voevent))
def write_tarball_from_ivorn_xml_tuples(ivorn_xml_tuples, filepath):
"""
Iterate over a series of ivorn / xml bstring tuples and write to bz'd tarball.
Args:
ivorn_xml_tuples (iterable): [(ivorn,xml)]
An iterable (e.g. list) of tuples containing two entries -
an ivorn string and an xml bytestring.
filepath (string): Path to the new tarball to create. Typically of form
'/path/to/foo.tar.bz2'
Returns
packet_count (int): Number of packets written to tarball
"""
out = tarfile.open(filepath, mode='w:bz2')
logger.info("Writing packets to tarball at " + filepath)
packet_count = 0
try:
for (ivorn, xml) in ivorn_xml_tuples:
out.addfile(*bytestring_to_tar_tuple(
filename_from_ivorn(ivorn),
xml
))
packet_count += 1
finally:
out.close()
return packet_count
class TarXML(namedtuple('TarXML', 'name xml')):
"""
A namedtuple for pairing a filename and XML bytestring
Attributes:
name (str): Filename from the tarball
xml (builtins.bytes): Bytestring containing the raw XML data.
"""
pass # Just wrapping a namedtuple so we can assign a docstring.
def tarfile_xml_generator(fname):
"""
Generator for iterating through xml files in a tarball.
Returns strings.
Example usage::
xmlgen = tarfile_xml_generator(fname)
xml0 = next(xmlgen)
for pkt in xmlgen:
foo(pkt)
"""
tf = tarfile.open(fname, mode='r')
try:
tarinf = tf.next()
while tarinf is not None:
if tarinf.isfile() and tarinf.name[-4:] == '.xml':
fbuf = tf.extractfile(tarinf)
yield TarXML(name=tarinf.name, xml=fbuf.read())
tarinf = tf.next()
# Kludge around tarfile memory leak, cf
# http://blogs.it.ox.ac.uk/inapickle/2011/06/20/high-memory-usage-when-using-pythons-tarfile-module/
tf.members = []
finally:
tf.close()
|
timstaley/voeventdb | voeventdb/server/utils/filestore.py | write_tarball_from_ivorn_xml_tuples | python | def write_tarball_from_ivorn_xml_tuples(ivorn_xml_tuples, filepath):
out = tarfile.open(filepath, mode='w:bz2')
logger.info("Writing packets to tarball at " + filepath)
packet_count = 0
try:
for (ivorn, xml) in ivorn_xml_tuples:
out.addfile(*bytestring_to_tar_tuple(
filename_from_ivorn(ivorn),
xml
))
packet_count += 1
finally:
out.close()
return packet_count | Iterate over a series of ivorn / xml bstring tuples and write to bz'd tarball.
Args:
ivorn_xml_tuples (iterable): [(ivorn,xml)]
An iterable (e.g. list) of tuples containing two entries -
an ivorn string and an xml bytestring.
filepath (string): Path to the new tarball to create. Typically of form
'/path/to/foo.tar.bz2'
Returns
packet_count (int): Number of packets written to tarball | train | https://github.com/timstaley/voeventdb/blob/e37b176d65fced4ca4f059109a95d6974bb8a091/voeventdb/server/utils/filestore.py#L63-L88 | [
"def bytestring_to_tar_tuple(filename, bytes):\n \"\"\"\n Take a string + filename, return a (tarinfo, stringbuf) tuple for insertion.\n\n Args:\n bytes (bstring): Bytestring representation of the filedata.\n filename (string): Filepath relative to tarfile root.\n Returns:\n tuple: ... | import logging
import tarfile
import voeventparse
from collections import namedtuple
from io import BytesIO
import six
logger = logging.getLogger(__name__)
def bytestring_to_tar_tuple(filename, bytes):
"""
Take a string + filename, return a (tarinfo, stringbuf) tuple for insertion.
Args:
bytes (bstring): Bytestring representation of the filedata.
filename (string): Filepath relative to tarfile root.
Returns:
tuple: (tarfile.TarInfo,io.BytesIO).
This can be passed directly to TarFile.addfile().
"""
info = tarfile.TarInfo(filename)
info.size = len(bytes)
return info, BytesIO(bytes)
def filename_from_ivorn(ivorn):
"""
Derive a sensible folder / filename path from ivorn.
Args:
ivorn (string): IVORN identifier including 'ivo://' prefix.
Returns:
string: relative path for xml output file.
"""
return ivorn.split('//')[1].replace('#', '/') + '.xml'
def voevent_etree_to_ivorn_xml_tuple(voevent):
"""
Args:
voevent (etree): Root of an lxml.etree loaded with voeventparse.
"""
return (voevent.attrib['ivorn'], voeventparse.dumps(voevent))
def write_tarball(voevents, filepath):
"""
Iterate over voevent models / dbrows and write to bz'd tarball.
Args:
voevents (iterable): An iterable (e.g. list) of e.g. Voevent db-rows,
with access to the 'ivorn' and 'xml' attributes.
filepath (string): Path to the new tarball to create. Typically of form
'/path/to/foo.tar.bz2'
Returns
packet_count (int): Number of packets written to tarball
"""
tuple_gen = ( (v.ivorn, v.xml) for v in voevents)
return write_tarball_from_ivorn_xml_tuples(tuple_gen,
filepath)
class TarXML(namedtuple('TarXML', 'name xml')):
"""
A namedtuple for pairing a filename and XML bytestring
Attributes:
name (str): Filename from the tarball
xml (builtins.bytes): Bytestring containing the raw XML data.
"""
pass # Just wrapping a namedtuple so we can assign a docstring.
def tarfile_xml_generator(fname):
"""
Generator for iterating through xml files in a tarball.
Returns strings.
Example usage::
xmlgen = tarfile_xml_generator(fname)
xml0 = next(xmlgen)
for pkt in xmlgen:
foo(pkt)
"""
tf = tarfile.open(fname, mode='r')
try:
tarinf = tf.next()
while tarinf is not None:
if tarinf.isfile() and tarinf.name[-4:] == '.xml':
fbuf = tf.extractfile(tarinf)
yield TarXML(name=tarinf.name, xml=fbuf.read())
tarinf = tf.next()
# Kludge around tarfile memory leak, cf
# http://blogs.it.ox.ac.uk/inapickle/2011/06/20/high-memory-usage-when-using-pythons-tarfile-module/
tf.members = []
finally:
tf.close()
|
timstaley/voeventdb | voeventdb/server/utils/filestore.py | tarfile_xml_generator | python | def tarfile_xml_generator(fname):
tf = tarfile.open(fname, mode='r')
try:
tarinf = tf.next()
while tarinf is not None:
if tarinf.isfile() and tarinf.name[-4:] == '.xml':
fbuf = tf.extractfile(tarinf)
yield TarXML(name=tarinf.name, xml=fbuf.read())
tarinf = tf.next()
# Kludge around tarfile memory leak, cf
# http://blogs.it.ox.ac.uk/inapickle/2011/06/20/high-memory-usage-when-using-pythons-tarfile-module/
tf.members = []
finally:
tf.close() | Generator for iterating through xml files in a tarball.
Returns strings.
Example usage::
xmlgen = tarfile_xml_generator(fname)
xml0 = next(xmlgen)
for pkt in xmlgen:
foo(pkt) | train | https://github.com/timstaley/voeventdb/blob/e37b176d65fced4ca4f059109a95d6974bb8a091/voeventdb/server/utils/filestore.py#L104-L131 | null | import logging
import tarfile
import voeventparse
from collections import namedtuple
from io import BytesIO
import six
logger = logging.getLogger(__name__)
def bytestring_to_tar_tuple(filename, bytes):
"""
Take a string + filename, return a (tarinfo, stringbuf) tuple for insertion.
Args:
bytes (bstring): Bytestring representation of the filedata.
filename (string): Filepath relative to tarfile root.
Returns:
tuple: (tarfile.TarInfo,io.BytesIO).
This can be passed directly to TarFile.addfile().
"""
info = tarfile.TarInfo(filename)
info.size = len(bytes)
return info, BytesIO(bytes)
def filename_from_ivorn(ivorn):
"""
Derive a sensible folder / filename path from ivorn.
Args:
ivorn (string): IVORN identifier including 'ivo://' prefix.
Returns:
string: relative path for xml output file.
"""
return ivorn.split('//')[1].replace('#', '/') + '.xml'
def voevent_etree_to_ivorn_xml_tuple(voevent):
"""
Args:
voevent (etree): Root of an lxml.etree loaded with voeventparse.
"""
return (voevent.attrib['ivorn'], voeventparse.dumps(voevent))
def write_tarball(voevents, filepath):
"""
Iterate over voevent models / dbrows and write to bz'd tarball.
Args:
voevents (iterable): An iterable (e.g. list) of e.g. Voevent db-rows,
with access to the 'ivorn' and 'xml' attributes.
filepath (string): Path to the new tarball to create. Typically of form
'/path/to/foo.tar.bz2'
Returns
packet_count (int): Number of packets written to tarball
"""
tuple_gen = ( (v.ivorn, v.xml) for v in voevents)
return write_tarball_from_ivorn_xml_tuples(tuple_gen,
filepath)
def write_tarball_from_ivorn_xml_tuples(ivorn_xml_tuples, filepath):
"""
Iterate over a series of ivorn / xml bstring tuples and write to bz'd tarball.
Args:
ivorn_xml_tuples (iterable): [(ivorn,xml)]
An iterable (e.g. list) of tuples containing two entries -
an ivorn string and an xml bytestring.
filepath (string): Path to the new tarball to create. Typically of form
'/path/to/foo.tar.bz2'
Returns
packet_count (int): Number of packets written to tarball
"""
out = tarfile.open(filepath, mode='w:bz2')
logger.info("Writing packets to tarball at " + filepath)
packet_count = 0
try:
for (ivorn, xml) in ivorn_xml_tuples:
out.addfile(*bytestring_to_tar_tuple(
filename_from_ivorn(ivorn),
xml
))
packet_count += 1
finally:
out.close()
return packet_count
class TarXML(namedtuple('TarXML', 'name xml')):
"""
A namedtuple for pairing a filename and XML bytestring
Attributes:
name (str): Filename from the tarball
xml (builtins.bytes): Bytestring containing the raw XML data.
"""
pass # Just wrapping a namedtuple so we can assign a docstring.
|
timstaley/voeventdb | voeventdb/server/database/ingest.py | load_from_tarfile | python | def load_from_tarfile(session, tarfile_path, check_for_duplicates,
pkts_per_commit=1000):
tf_stream = tarfile_xml_generator(tarfile_path)
logger.info("Loading: " + tarfile_path)
n_parsed = 0
n_loaded = 0
for tarinf in tf_stream:
try:
v = vp.loads(tarinf.xml, check_version=False)
if v.attrib['version'] != '2.0':
logger.debug(
'Packet: {} is not VO-schema version 2.0.'.format(
tarinf.name))
n_parsed += 1
except:
logger.exception('Error loading file {}, skipping'.format(
tarinf.name))
continue
try:
new_row = Voevent.from_etree(v)
if check_for_duplicates:
if ivorn_present(session, new_row.ivorn):
logger.debug(
"Ignoring duplicate ivorn: {} in file {}".format(
new_row.ivorn, tarinf.name))
continue
session.add(new_row)
n_loaded += 1
except:
logger.exception(
'Error converting file {} to database row, skipping'.
format(tarinf.name))
continue
if n_loaded % pkts_per_commit == 0:
session.commit()
session.commit()
logger.info("Successfully parsed {} packets, of which loaded {}.".format(n_parsed, n_loaded))
return n_parsed, n_loaded | Iterate through xml files in a tarball and attempt to load into database.
.. warning::
Very slow with duplicate checking enabled.
Returns:
tuple: (n_parsed, n_loaded) - Total number of packets parsed from
tarbar, and number successfully loaded. | train | https://github.com/timstaley/voeventdb/blob/e37b176d65fced4ca4f059109a95d6974bb8a091/voeventdb/server/database/ingest.py#L13-L62 | [
"def ivorn_present(session, ivorn):\n \"\"\"\n Predicate, returns whether the IVORN is in the database.\n \"\"\"\n return bool(\n session.query(Voevent.id).filter(Voevent.ivorn == ivorn).count())\n",
"def tarfile_xml_generator(fname):\n \"\"\"\n Generator for iterating through xml files i... | from __future__ import absolute_import
import voeventparse as vp
from voeventdb.server.utils.filestore import tarfile_xml_generator
from voeventdb.server.database.models import Voevent
from voeventdb.server.database.convenience import ivorn_present
import sys
import logging
logger = logging.getLogger(__name__)
|
jorahn/icy | icy/ml/metrics.py | ae | python | def ae(actual, predicted):
return np.abs(np.array(actual)-np.array(predicted)) | Computes the absolute error.
This function computes the absolute error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The absolute error between actual and predicted | train | https://github.com/jorahn/icy/blob/d0bd765c933b2d9bff4d7d646c0938348b9c5c25/icy/ml/metrics.py#L5-L25 | null | import numpy as np
from sklearn.metrics import make_scorer
from scipy.stats import sem
def ce(actual, predicted):
"""
Computes the classification error.
This function computes the classification error between two lists
Parameters
----------
actual : list
A list of the true classes
predicted : list
A list of the predicted classes
Returns
-------
score : double
The classification error between actual and predicted
"""
return (sum([1.0 for x,y in zip(actual,predicted) if x != y]) /
len(actual))
def mae(actual, predicted):
"""
Computes the mean absolute error.
This function computes the mean absolute error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The mean absolute error between actual and predicted
"""
return np.mean(ae(actual, predicted))
def mse(actual, predicted):
"""
Computes the mean squared error.
This function computes the mean squared error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The mean squared error between actual and predicted
"""
return np.mean(se(actual, predicted))
def msle(actual, predicted):
"""
Computes the mean squared log error.
This function computes the mean squared log error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The mean squared log error between actual and predicted
"""
return np.mean(sle(actual, predicted))
def rmse(actual, predicted):
"""
Computes the root mean squared error.
This function computes the root mean squared error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The root mean squared error between actual and predicted
"""
return np.sqrt(mse(actual, predicted))
def rmsle(actual, predicted):
"""
Computes the root mean squared log error.
This function computes the root mean squared log error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The root mean squared log error between actual and predicted
"""
return np.sqrt(msle(actual, predicted))
def se(actual, predicted):
"""
Computes the squared error.
This function computes the squared error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The squared error between actual and predicted
"""
return np.power(np.array(actual)-np.array(predicted), 2)
def sle(actual, predicted):
"""
Computes the squared log error.
This function computes the squared log error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The squared log error between actual and predicted
"""
return (np.power(np.log(np.array(actual)+1) -
np.log(np.array(predicted)+1), 2))
def ll(actual, predicted):
"""
Computes the log likelihood.
This function computes the log likelihood between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The log likelihood error between actual and predicted
"""
actual = np.array(actual)
predicted = np.array(predicted)
err = np.seterr(all='ignore')
score = -(actual*np.log(predicted)+(1-actual)*np.log(1-predicted))
np.seterr(divide=err['divide'], over=err['over'],
under=err['under'], invalid=err['invalid'])
if type(score)==np.ndarray:
score[np.isnan(score)] = 0
else:
if np.isnan(score):
score = 0
return score
def log_loss(actual, predicted):
"""
Computes the log loss.
This function computes the log loss between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The log loss between actual and predicted
"""
return np.mean(ll(actual, predicted))
def rmspe(actual, predicted):
"""
Computes the root mean square percentage error between two lists
of numbers. Ignores elements where actual[n] == 0.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The root mean squared percentage error between actual and predicted
"""
actual = np.array(actual)
predicted = np.array(predicted)
zeros = actual == 0
actual = actual[~zeros]
predicted = predicted[~zeros]
return np.sqrt(np.sum(np.power((actual - predicted) / actual, 2)) / len(actual))
def scorer(func, greater_is_better=True):
return make_scorer(eval(func), greater_is_better=greater_is_better)
def display_scores(params, scores, append_star=False):
"""Format the mean score +/- std error for params"""
params = ", ".join("{0}={1}".format(k, v)
for k, v in params.items())
line = "{0}:\t\t{1:.3f} (+/-{2:.3f})".format(
params, np.mean(scores), sem(scores))
if append_star:
line += " *"
return line
def display_grid_scores(grid_scores, top=None):
"""Helper function to format a report on a grid of scores"""
grid_scores = sorted(grid_scores, key=lambda x: x[1], reverse=True)
if top is not None:
grid_scores = grid_scores[:top]
# Compute a threshold for staring models with overlapping stderr:
_, best_mean, best_scores = grid_scores[0]
threshold = best_mean - 2 * sem(best_scores)
for params, mean_score, scores in grid_scores:
append_star = mean_score + 2 * sem(scores) > threshold
print(display_scores(params, scores, append_star=append_star))
|
jorahn/icy | icy/ml/metrics.py | ce | python | def ce(actual, predicted):
return (sum([1.0 for x,y in zip(actual,predicted) if x != y]) /
len(actual)) | Computes the classification error.
This function computes the classification error between two lists
Parameters
----------
actual : list
A list of the true classes
predicted : list
A list of the predicted classes
Returns
-------
score : double
The classification error between actual and predicted | train | https://github.com/jorahn/icy/blob/d0bd765c933b2d9bff4d7d646c0938348b9c5c25/icy/ml/metrics.py#L27-L47 | null | import numpy as np
from sklearn.metrics import make_scorer
from scipy.stats import sem
def ae(actual, predicted):
"""
Computes the absolute error.
This function computes the absolute error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The absolute error between actual and predicted
"""
return np.abs(np.array(actual)-np.array(predicted))
def mae(actual, predicted):
"""
Computes the mean absolute error.
This function computes the mean absolute error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The mean absolute error between actual and predicted
"""
return np.mean(ae(actual, predicted))
def mse(actual, predicted):
"""
Computes the mean squared error.
This function computes the mean squared error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The mean squared error between actual and predicted
"""
return np.mean(se(actual, predicted))
def msle(actual, predicted):
"""
Computes the mean squared log error.
This function computes the mean squared log error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The mean squared log error between actual and predicted
"""
return np.mean(sle(actual, predicted))
def rmse(actual, predicted):
"""
Computes the root mean squared error.
This function computes the root mean squared error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The root mean squared error between actual and predicted
"""
return np.sqrt(mse(actual, predicted))
def rmsle(actual, predicted):
"""
Computes the root mean squared log error.
This function computes the root mean squared log error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The root mean squared log error between actual and predicted
"""
return np.sqrt(msle(actual, predicted))
def se(actual, predicted):
"""
Computes the squared error.
This function computes the squared error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The squared error between actual and predicted
"""
return np.power(np.array(actual)-np.array(predicted), 2)
def sle(actual, predicted):
"""
Computes the squared log error.
This function computes the squared log error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The squared log error between actual and predicted
"""
return (np.power(np.log(np.array(actual)+1) -
np.log(np.array(predicted)+1), 2))
def ll(actual, predicted):
"""
Computes the log likelihood.
This function computes the log likelihood between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The log likelihood error between actual and predicted
"""
actual = np.array(actual)
predicted = np.array(predicted)
err = np.seterr(all='ignore')
score = -(actual*np.log(predicted)+(1-actual)*np.log(1-predicted))
np.seterr(divide=err['divide'], over=err['over'],
under=err['under'], invalid=err['invalid'])
if type(score)==np.ndarray:
score[np.isnan(score)] = 0
else:
if np.isnan(score):
score = 0
return score
def log_loss(actual, predicted):
"""
Computes the log loss.
This function computes the log loss between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The log loss between actual and predicted
"""
return np.mean(ll(actual, predicted))
def rmspe(actual, predicted):
"""
Computes the root mean square percentage error between two lists
of numbers. Ignores elements where actual[n] == 0.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The root mean squared percentage error between actual and predicted
"""
actual = np.array(actual)
predicted = np.array(predicted)
zeros = actual == 0
actual = actual[~zeros]
predicted = predicted[~zeros]
return np.sqrt(np.sum(np.power((actual - predicted) / actual, 2)) / len(actual))
def scorer(func, greater_is_better=True):
return make_scorer(eval(func), greater_is_better=greater_is_better)
def display_scores(params, scores, append_star=False):
"""Format the mean score +/- std error for params"""
params = ", ".join("{0}={1}".format(k, v)
for k, v in params.items())
line = "{0}:\t\t{1:.3f} (+/-{2:.3f})".format(
params, np.mean(scores), sem(scores))
if append_star:
line += " *"
return line
def display_grid_scores(grid_scores, top=None):
"""Helper function to format a report on a grid of scores"""
grid_scores = sorted(grid_scores, key=lambda x: x[1], reverse=True)
if top is not None:
grid_scores = grid_scores[:top]
# Compute a threshold for staring models with overlapping stderr:
_, best_mean, best_scores = grid_scores[0]
threshold = best_mean - 2 * sem(best_scores)
for params, mean_score, scores in grid_scores:
append_star = mean_score + 2 * sem(scores) > threshold
print(display_scores(params, scores, append_star=append_star))
|
jorahn/icy | icy/ml/metrics.py | se | python | def se(actual, predicted):
return np.power(np.array(actual)-np.array(predicted), 2) | Computes the squared error.
This function computes the squared error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The squared error between actual and predicted | train | https://github.com/jorahn/icy/blob/d0bd765c933b2d9bff4d7d646c0938348b9c5c25/icy/ml/metrics.py#L159-L179 | null | import numpy as np
from sklearn.metrics import make_scorer
from scipy.stats import sem
def ae(actual, predicted):
"""
Computes the absolute error.
This function computes the absolute error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The absolute error between actual and predicted
"""
return np.abs(np.array(actual)-np.array(predicted))
def ce(actual, predicted):
"""
Computes the classification error.
This function computes the classification error between two lists
Parameters
----------
actual : list
A list of the true classes
predicted : list
A list of the predicted classes
Returns
-------
score : double
The classification error between actual and predicted
"""
return (sum([1.0 for x,y in zip(actual,predicted) if x != y]) /
len(actual))
def mae(actual, predicted):
"""
Computes the mean absolute error.
This function computes the mean absolute error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The mean absolute error between actual and predicted
"""
return np.mean(ae(actual, predicted))
def mse(actual, predicted):
"""
Computes the mean squared error.
This function computes the mean squared error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The mean squared error between actual and predicted
"""
return np.mean(se(actual, predicted))
def msle(actual, predicted):
"""
Computes the mean squared log error.
This function computes the mean squared log error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The mean squared log error between actual and predicted
"""
return np.mean(sle(actual, predicted))
def rmse(actual, predicted):
"""
Computes the root mean squared error.
This function computes the root mean squared error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The root mean squared error between actual and predicted
"""
return np.sqrt(mse(actual, predicted))
def rmsle(actual, predicted):
"""
Computes the root mean squared log error.
This function computes the root mean squared log error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The root mean squared log error between actual and predicted
"""
return np.sqrt(msle(actual, predicted))
def sle(actual, predicted):
"""
Computes the squared log error.
This function computes the squared log error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The squared log error between actual and predicted
"""
return (np.power(np.log(np.array(actual)+1) -
np.log(np.array(predicted)+1), 2))
def ll(actual, predicted):
"""
Computes the log likelihood.
This function computes the log likelihood between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The log likelihood error between actual and predicted
"""
actual = np.array(actual)
predicted = np.array(predicted)
err = np.seterr(all='ignore')
score = -(actual*np.log(predicted)+(1-actual)*np.log(1-predicted))
np.seterr(divide=err['divide'], over=err['over'],
under=err['under'], invalid=err['invalid'])
if type(score)==np.ndarray:
score[np.isnan(score)] = 0
else:
if np.isnan(score):
score = 0
return score
def log_loss(actual, predicted):
"""
Computes the log loss.
This function computes the log loss between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The log loss between actual and predicted
"""
return np.mean(ll(actual, predicted))
def rmspe(actual, predicted):
"""
Computes the root mean square percentage error between two lists
of numbers. Ignores elements where actual[n] == 0.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The root mean squared percentage error between actual and predicted
"""
actual = np.array(actual)
predicted = np.array(predicted)
zeros = actual == 0
actual = actual[~zeros]
predicted = predicted[~zeros]
return np.sqrt(np.sum(np.power((actual - predicted) / actual, 2)) / len(actual))
def scorer(func, greater_is_better=True):
return make_scorer(eval(func), greater_is_better=greater_is_better)
def display_scores(params, scores, append_star=False):
"""Format the mean score +/- std error for params"""
params = ", ".join("{0}={1}".format(k, v)
for k, v in params.items())
line = "{0}:\t\t{1:.3f} (+/-{2:.3f})".format(
params, np.mean(scores), sem(scores))
if append_star:
line += " *"
return line
def display_grid_scores(grid_scores, top=None):
"""Helper function to format a report on a grid of scores"""
grid_scores = sorted(grid_scores, key=lambda x: x[1], reverse=True)
if top is not None:
grid_scores = grid_scores[:top]
# Compute a threshold for staring models with overlapping stderr:
_, best_mean, best_scores = grid_scores[0]
threshold = best_mean - 2 * sem(best_scores)
for params, mean_score, scores in grid_scores:
append_star = mean_score + 2 * sem(scores) > threshold
print(display_scores(params, scores, append_star=append_star))
|
jorahn/icy | icy/ml/metrics.py | sle | python | def sle(actual, predicted):
return (np.power(np.log(np.array(actual)+1) -
np.log(np.array(predicted)+1), 2)) | Computes the squared log error.
This function computes the squared log error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The squared log error between actual and predicted | train | https://github.com/jorahn/icy/blob/d0bd765c933b2d9bff4d7d646c0938348b9c5c25/icy/ml/metrics.py#L181-L202 | null | import numpy as np
from sklearn.metrics import make_scorer
from scipy.stats import sem
def ae(actual, predicted):
"""
Computes the absolute error.
This function computes the absolute error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The absolute error between actual and predicted
"""
return np.abs(np.array(actual)-np.array(predicted))
def ce(actual, predicted):
"""
Computes the classification error.
This function computes the classification error between two lists
Parameters
----------
actual : list
A list of the true classes
predicted : list
A list of the predicted classes
Returns
-------
score : double
The classification error between actual and predicted
"""
return (sum([1.0 for x,y in zip(actual,predicted) if x != y]) /
len(actual))
def mae(actual, predicted):
"""
Computes the mean absolute error.
This function computes the mean absolute error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The mean absolute error between actual and predicted
"""
return np.mean(ae(actual, predicted))
def mse(actual, predicted):
"""
Computes the mean squared error.
This function computes the mean squared error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The mean squared error between actual and predicted
"""
return np.mean(se(actual, predicted))
def msle(actual, predicted):
"""
Computes the mean squared log error.
This function computes the mean squared log error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The mean squared log error between actual and predicted
"""
return np.mean(sle(actual, predicted))
def rmse(actual, predicted):
"""
Computes the root mean squared error.
This function computes the root mean squared error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The root mean squared error between actual and predicted
"""
return np.sqrt(mse(actual, predicted))
def rmsle(actual, predicted):
"""
Computes the root mean squared log error.
This function computes the root mean squared log error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The root mean squared log error between actual and predicted
"""
return np.sqrt(msle(actual, predicted))
def se(actual, predicted):
"""
Computes the squared error.
This function computes the squared error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The squared error between actual and predicted
"""
return np.power(np.array(actual)-np.array(predicted), 2)
def ll(actual, predicted):
"""
Computes the log likelihood.
This function computes the log likelihood between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The log likelihood error between actual and predicted
"""
actual = np.array(actual)
predicted = np.array(predicted)
err = np.seterr(all='ignore')
score = -(actual*np.log(predicted)+(1-actual)*np.log(1-predicted))
np.seterr(divide=err['divide'], over=err['over'],
under=err['under'], invalid=err['invalid'])
if type(score)==np.ndarray:
score[np.isnan(score)] = 0
else:
if np.isnan(score):
score = 0
return score
def log_loss(actual, predicted):
"""
Computes the log loss.
This function computes the log loss between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The log loss between actual and predicted
"""
return np.mean(ll(actual, predicted))
def rmspe(actual, predicted):
"""
Computes the root mean square percentage error between two lists
of numbers. Ignores elements where actual[n] == 0.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The root mean squared percentage error between actual and predicted
"""
actual = np.array(actual)
predicted = np.array(predicted)
zeros = actual == 0
actual = actual[~zeros]
predicted = predicted[~zeros]
return np.sqrt(np.sum(np.power((actual - predicted) / actual, 2)) / len(actual))
def scorer(func, greater_is_better=True):
return make_scorer(eval(func), greater_is_better=greater_is_better)
def display_scores(params, scores, append_star=False):
"""Format the mean score +/- std error for params"""
params = ", ".join("{0}={1}".format(k, v)
for k, v in params.items())
line = "{0}:\t\t{1:.3f} (+/-{2:.3f})".format(
params, np.mean(scores), sem(scores))
if append_star:
line += " *"
return line
def display_grid_scores(grid_scores, top=None):
"""Helper function to format a report on a grid of scores"""
grid_scores = sorted(grid_scores, key=lambda x: x[1], reverse=True)
if top is not None:
grid_scores = grid_scores[:top]
# Compute a threshold for staring models with overlapping stderr:
_, best_mean, best_scores = grid_scores[0]
threshold = best_mean - 2 * sem(best_scores)
for params, mean_score, scores in grid_scores:
append_star = mean_score + 2 * sem(scores) > threshold
print(display_scores(params, scores, append_star=append_star))
|
jorahn/icy | icy/ml/metrics.py | ll | python | def ll(actual, predicted):
actual = np.array(actual)
predicted = np.array(predicted)
err = np.seterr(all='ignore')
score = -(actual*np.log(predicted)+(1-actual)*np.log(1-predicted))
np.seterr(divide=err['divide'], over=err['over'],
under=err['under'], invalid=err['invalid'])
if type(score)==np.ndarray:
score[np.isnan(score)] = 0
else:
if np.isnan(score):
score = 0
return score | Computes the log likelihood.
This function computes the log likelihood between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The log likelihood error between actual and predicted | train | https://github.com/jorahn/icy/blob/d0bd765c933b2d9bff4d7d646c0938348b9c5c25/icy/ml/metrics.py#L204-L235 | null | import numpy as np
from sklearn.metrics import make_scorer
from scipy.stats import sem
def ae(actual, predicted):
"""
Computes the absolute error.
This function computes the absolute error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The absolute error between actual and predicted
"""
return np.abs(np.array(actual)-np.array(predicted))
def ce(actual, predicted):
"""
Computes the classification error.
This function computes the classification error between two lists
Parameters
----------
actual : list
A list of the true classes
predicted : list
A list of the predicted classes
Returns
-------
score : double
The classification error between actual and predicted
"""
return (sum([1.0 for x,y in zip(actual,predicted) if x != y]) /
len(actual))
def mae(actual, predicted):
"""
Computes the mean absolute error.
This function computes the mean absolute error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The mean absolute error between actual and predicted
"""
return np.mean(ae(actual, predicted))
def mse(actual, predicted):
"""
Computes the mean squared error.
This function computes the mean squared error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The mean squared error between actual and predicted
"""
return np.mean(se(actual, predicted))
def msle(actual, predicted):
"""
Computes the mean squared log error.
This function computes the mean squared log error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The mean squared log error between actual and predicted
"""
return np.mean(sle(actual, predicted))
def rmse(actual, predicted):
"""
Computes the root mean squared error.
This function computes the root mean squared error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The root mean squared error between actual and predicted
"""
return np.sqrt(mse(actual, predicted))
def rmsle(actual, predicted):
"""
Computes the root mean squared log error.
This function computes the root mean squared log error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The root mean squared log error between actual and predicted
"""
return np.sqrt(msle(actual, predicted))
def se(actual, predicted):
"""
Computes the squared error.
This function computes the squared error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The squared error between actual and predicted
"""
return np.power(np.array(actual)-np.array(predicted), 2)
def sle(actual, predicted):
"""
Computes the squared log error.
This function computes the squared log error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The squared log error between actual and predicted
"""
return (np.power(np.log(np.array(actual)+1) -
np.log(np.array(predicted)+1), 2))
def log_loss(actual, predicted):
"""
Computes the log loss.
This function computes the log loss between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The log loss between actual and predicted
"""
return np.mean(ll(actual, predicted))
def rmspe(actual, predicted):
"""
Computes the root mean square percentage error between two lists
of numbers. Ignores elements where actual[n] == 0.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The root mean squared percentage error between actual and predicted
"""
actual = np.array(actual)
predicted = np.array(predicted)
zeros = actual == 0
actual = actual[~zeros]
predicted = predicted[~zeros]
return np.sqrt(np.sum(np.power((actual - predicted) / actual, 2)) / len(actual))
def scorer(func, greater_is_better=True):
return make_scorer(eval(func), greater_is_better=greater_is_better)
def display_scores(params, scores, append_star=False):
"""Format the mean score +/- std error for params"""
params = ", ".join("{0}={1}".format(k, v)
for k, v in params.items())
line = "{0}:\t\t{1:.3f} (+/-{2:.3f})".format(
params, np.mean(scores), sem(scores))
if append_star:
line += " *"
return line
def display_grid_scores(grid_scores, top=None):
"""Helper function to format a report on a grid of scores"""
grid_scores = sorted(grid_scores, key=lambda x: x[1], reverse=True)
if top is not None:
grid_scores = grid_scores[:top]
# Compute a threshold for staring models with overlapping stderr:
_, best_mean, best_scores = grid_scores[0]
threshold = best_mean - 2 * sem(best_scores)
for params, mean_score, scores in grid_scores:
append_star = mean_score + 2 * sem(scores) > threshold
print(display_scores(params, scores, append_star=append_star))
|
jorahn/icy | icy/ml/metrics.py | rmspe | python | def rmspe(actual, predicted):
actual = np.array(actual)
predicted = np.array(predicted)
zeros = actual == 0
actual = actual[~zeros]
predicted = predicted[~zeros]
return np.sqrt(np.sum(np.power((actual - predicted) / actual, 2)) / len(actual)) | Computes the root mean square percentage error between two lists
of numbers. Ignores elements where actual[n] == 0.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The root mean squared percentage error between actual and predicted | train | https://github.com/jorahn/icy/blob/d0bd765c933b2d9bff4d7d646c0938348b9c5c25/icy/ml/metrics.py#L259-L283 | null | import numpy as np
from sklearn.metrics import make_scorer
from scipy.stats import sem
def ae(actual, predicted):
"""
Computes the absolute error.
This function computes the absolute error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The absolute error between actual and predicted
"""
return np.abs(np.array(actual)-np.array(predicted))
def ce(actual, predicted):
"""
Computes the classification error.
This function computes the classification error between two lists
Parameters
----------
actual : list
A list of the true classes
predicted : list
A list of the predicted classes
Returns
-------
score : double
The classification error between actual and predicted
"""
return (sum([1.0 for x,y in zip(actual,predicted) if x != y]) /
len(actual))
def mae(actual, predicted):
"""
Computes the mean absolute error.
This function computes the mean absolute error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The mean absolute error between actual and predicted
"""
return np.mean(ae(actual, predicted))
def mse(actual, predicted):
"""
Computes the mean squared error.
This function computes the mean squared error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The mean squared error between actual and predicted
"""
return np.mean(se(actual, predicted))
def msle(actual, predicted):
"""
Computes the mean squared log error.
This function computes the mean squared log error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The mean squared log error between actual and predicted
"""
return np.mean(sle(actual, predicted))
def rmse(actual, predicted):
"""
Computes the root mean squared error.
This function computes the root mean squared error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The root mean squared error between actual and predicted
"""
return np.sqrt(mse(actual, predicted))
def rmsle(actual, predicted):
"""
Computes the root mean squared log error.
This function computes the root mean squared log error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The root mean squared log error between actual and predicted
"""
return np.sqrt(msle(actual, predicted))
def se(actual, predicted):
"""
Computes the squared error.
This function computes the squared error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The squared error between actual and predicted
"""
return np.power(np.array(actual)-np.array(predicted), 2)
def sle(actual, predicted):
"""
Computes the squared log error.
This function computes the squared log error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The squared log error between actual and predicted
"""
return (np.power(np.log(np.array(actual)+1) -
np.log(np.array(predicted)+1), 2))
def ll(actual, predicted):
"""
Computes the log likelihood.
This function computes the log likelihood between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The log likelihood error between actual and predicted
"""
actual = np.array(actual)
predicted = np.array(predicted)
err = np.seterr(all='ignore')
score = -(actual*np.log(predicted)+(1-actual)*np.log(1-predicted))
np.seterr(divide=err['divide'], over=err['over'],
under=err['under'], invalid=err['invalid'])
if type(score)==np.ndarray:
score[np.isnan(score)] = 0
else:
if np.isnan(score):
score = 0
return score
def log_loss(actual, predicted):
"""
Computes the log loss.
This function computes the log loss between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The log loss between actual and predicted
"""
return np.mean(ll(actual, predicted))
def rmspe(actual, predicted):
"""
Computes the root mean square percentage error between two lists
of numbers. Ignores elements where actual[n] == 0.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The root mean squared percentage error between actual and predicted
"""
actual = np.array(actual)
predicted = np.array(predicted)
zeros = actual == 0
actual = actual[~zeros]
predicted = predicted[~zeros]
return np.sqrt(np.sum(np.power((actual - predicted) / actual, 2)) / len(actual))
def scorer(func, greater_is_better=True):
return make_scorer(eval(func), greater_is_better=greater_is_better)
def display_scores(params, scores, append_star=False):
"""Format the mean score +/- std error for params"""
params = ", ".join("{0}={1}".format(k, v)
for k, v in params.items())
line = "{0}:\t\t{1:.3f} (+/-{2:.3f})".format(
params, np.mean(scores), sem(scores))
if append_star:
line += " *"
return line
def display_grid_scores(grid_scores, top=None):
"""Helper function to format a report on a grid of scores"""
grid_scores = sorted(grid_scores, key=lambda x: x[1], reverse=True)
if top is not None:
grid_scores = grid_scores[:top]
# Compute a threshold for staring models with overlapping stderr:
_, best_mean, best_scores = grid_scores[0]
threshold = best_mean - 2 * sem(best_scores)
for params, mean_score, scores in grid_scores:
append_star = mean_score + 2 * sem(scores) > threshold
print(display_scores(params, scores, append_star=append_star))
|
jorahn/icy | icy/ml/metrics.py | display_scores | python | def display_scores(params, scores, append_star=False):
params = ", ".join("{0}={1}".format(k, v)
for k, v in params.items())
line = "{0}:\t\t{1:.3f} (+/-{2:.3f})".format(
params, np.mean(scores), sem(scores))
if append_star:
line += " *"
return line | Format the mean score +/- std error for params | train | https://github.com/jorahn/icy/blob/d0bd765c933b2d9bff4d7d646c0938348b9c5c25/icy/ml/metrics.py#L288-L296 | null | import numpy as np
from sklearn.metrics import make_scorer
from scipy.stats import sem
def ae(actual, predicted):
"""
Computes the absolute error.
This function computes the absolute error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The absolute error between actual and predicted
"""
return np.abs(np.array(actual)-np.array(predicted))
def ce(actual, predicted):
"""
Computes the classification error.
This function computes the classification error between two lists
Parameters
----------
actual : list
A list of the true classes
predicted : list
A list of the predicted classes
Returns
-------
score : double
The classification error between actual and predicted
"""
return (sum([1.0 for x,y in zip(actual,predicted) if x != y]) /
len(actual))
def mae(actual, predicted):
"""
Computes the mean absolute error.
This function computes the mean absolute error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The mean absolute error between actual and predicted
"""
return np.mean(ae(actual, predicted))
def mse(actual, predicted):
"""
Computes the mean squared error.
This function computes the mean squared error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The mean squared error between actual and predicted
"""
return np.mean(se(actual, predicted))
def msle(actual, predicted):
"""
Computes the mean squared log error.
This function computes the mean squared log error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The mean squared log error between actual and predicted
"""
return np.mean(sle(actual, predicted))
def rmse(actual, predicted):
"""
Computes the root mean squared error.
This function computes the root mean squared error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The root mean squared error between actual and predicted
"""
return np.sqrt(mse(actual, predicted))
def rmsle(actual, predicted):
"""
Computes the root mean squared log error.
This function computes the root mean squared log error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The root mean squared log error between actual and predicted
"""
return np.sqrt(msle(actual, predicted))
def se(actual, predicted):
"""
Computes the squared error.
This function computes the squared error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The squared error between actual and predicted
"""
return np.power(np.array(actual)-np.array(predicted), 2)
def sle(actual, predicted):
"""
Computes the squared log error.
This function computes the squared log error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The squared log error between actual and predicted
"""
return (np.power(np.log(np.array(actual)+1) -
np.log(np.array(predicted)+1), 2))
def ll(actual, predicted):
"""
Computes the log likelihood.
This function computes the log likelihood between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The log likelihood error between actual and predicted
"""
actual = np.array(actual)
predicted = np.array(predicted)
err = np.seterr(all='ignore')
score = -(actual*np.log(predicted)+(1-actual)*np.log(1-predicted))
np.seterr(divide=err['divide'], over=err['over'],
under=err['under'], invalid=err['invalid'])
if type(score)==np.ndarray:
score[np.isnan(score)] = 0
else:
if np.isnan(score):
score = 0
return score
def log_loss(actual, predicted):
"""
Computes the log loss.
This function computes the log loss between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The log loss between actual and predicted
"""
return np.mean(ll(actual, predicted))
def rmspe(actual, predicted):
"""
Computes the root mean square percentage error between two lists
of numbers. Ignores elements where actual[n] == 0.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The root mean squared percentage error between actual and predicted
"""
actual = np.array(actual)
predicted = np.array(predicted)
zeros = actual == 0
actual = actual[~zeros]
predicted = predicted[~zeros]
return np.sqrt(np.sum(np.power((actual - predicted) / actual, 2)) / len(actual))
def scorer(func, greater_is_better=True):
return make_scorer(eval(func), greater_is_better=greater_is_better)
def display_grid_scores(grid_scores, top=None):
"""Helper function to format a report on a grid of scores"""
grid_scores = sorted(grid_scores, key=lambda x: x[1], reverse=True)
if top is not None:
grid_scores = grid_scores[:top]
# Compute a threshold for staring models with overlapping stderr:
_, best_mean, best_scores = grid_scores[0]
threshold = best_mean - 2 * sem(best_scores)
for params, mean_score, scores in grid_scores:
append_star = mean_score + 2 * sem(scores) > threshold
print(display_scores(params, scores, append_star=append_star))
|
jorahn/icy | icy/ml/metrics.py | display_grid_scores | python | def display_grid_scores(grid_scores, top=None):
grid_scores = sorted(grid_scores, key=lambda x: x[1], reverse=True)
if top is not None:
grid_scores = grid_scores[:top]
# Compute a threshold for staring models with overlapping stderr:
_, best_mean, best_scores = grid_scores[0]
threshold = best_mean - 2 * sem(best_scores)
for params, mean_score, scores in grid_scores:
append_star = mean_score + 2 * sem(scores) > threshold
print(display_scores(params, scores, append_star=append_star)) | Helper function to format a report on a grid of scores | train | https://github.com/jorahn/icy/blob/d0bd765c933b2d9bff4d7d646c0938348b9c5c25/icy/ml/metrics.py#L298-L310 | [
"def display_scores(params, scores, append_star=False):\n \"\"\"Format the mean score +/- std error for params\"\"\"\n params = \", \".join(\"{0}={1}\".format(k, v)\n for k, v in params.items())\n line = \"{0}:\\t\\t{1:.3f} (+/-{2:.3f})\".format(\n params, np.mean(scores), sem(s... | import numpy as np
from sklearn.metrics import make_scorer
from scipy.stats import sem
def ae(actual, predicted):
"""
Computes the absolute error.
This function computes the absolute error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The absolute error between actual and predicted
"""
return np.abs(np.array(actual)-np.array(predicted))
def ce(actual, predicted):
"""
Computes the classification error.
This function computes the classification error between two lists
Parameters
----------
actual : list
A list of the true classes
predicted : list
A list of the predicted classes
Returns
-------
score : double
The classification error between actual and predicted
"""
return (sum([1.0 for x,y in zip(actual,predicted) if x != y]) /
len(actual))
def mae(actual, predicted):
"""
Computes the mean absolute error.
This function computes the mean absolute error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The mean absolute error between actual and predicted
"""
return np.mean(ae(actual, predicted))
def mse(actual, predicted):
"""
Computes the mean squared error.
This function computes the mean squared error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The mean squared error between actual and predicted
"""
return np.mean(se(actual, predicted))
def msle(actual, predicted):
"""
Computes the mean squared log error.
This function computes the mean squared log error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The mean squared log error between actual and predicted
"""
return np.mean(sle(actual, predicted))
def rmse(actual, predicted):
"""
Computes the root mean squared error.
This function computes the root mean squared error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The root mean squared error between actual and predicted
"""
return np.sqrt(mse(actual, predicted))
def rmsle(actual, predicted):
"""
Computes the root mean squared log error.
This function computes the root mean squared log error between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The root mean squared log error between actual and predicted
"""
return np.sqrt(msle(actual, predicted))
def se(actual, predicted):
"""
Computes the squared error.
This function computes the squared error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The squared error between actual and predicted
"""
return np.power(np.array(actual)-np.array(predicted), 2)
def sle(actual, predicted):
"""
Computes the squared log error.
This function computes the squared log error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The squared log error between actual and predicted
"""
return (np.power(np.log(np.array(actual)+1) -
np.log(np.array(predicted)+1), 2))
def ll(actual, predicted):
"""
Computes the log likelihood.
This function computes the log likelihood between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The log likelihood error between actual and predicted
"""
actual = np.array(actual)
predicted = np.array(predicted)
err = np.seterr(all='ignore')
score = -(actual*np.log(predicted)+(1-actual)*np.log(1-predicted))
np.seterr(divide=err['divide'], over=err['over'],
under=err['under'], invalid=err['invalid'])
if type(score)==np.ndarray:
score[np.isnan(score)] = 0
else:
if np.isnan(score):
score = 0
return score
def log_loss(actual, predicted):
"""
Computes the log loss.
This function computes the log loss between two lists
of numbers.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The log loss between actual and predicted
"""
return np.mean(ll(actual, predicted))
def rmspe(actual, predicted):
"""
Computes the root mean square percentage error between two lists
of numbers. Ignores elements where actual[n] == 0.
Parameters
----------
actual : list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double
The root mean squared percentage error between actual and predicted
"""
actual = np.array(actual)
predicted = np.array(predicted)
zeros = actual == 0
actual = actual[~zeros]
predicted = predicted[~zeros]
return np.sqrt(np.sum(np.power((actual - predicted) / actual, 2)) / len(actual))
def scorer(func, greater_is_better=True):
return make_scorer(eval(func), greater_is_better=greater_is_better)
def display_scores(params, scores, append_star=False):
"""Format the mean score +/- std error for params"""
params = ", ".join("{0}={1}".format(k, v)
for k, v in params.items())
line = "{0}:\t\t{1:.3f} (+/-{2:.3f})".format(
params, np.mean(scores), sem(scores))
if append_star:
line += " *"
return line
def display_grid_scores(grid_scores, top=None):
"""Helper function to format a report on a grid of scores"""
grid_scores = sorted(grid_scores, key=lambda x: x[1], reverse=True)
if top is not None:
grid_scores = grid_scores[:top]
# Compute a threshold for staring models with overlapping stderr:
_, best_mean, best_scores = grid_scores[0]
threshold = best_mean - 2 * sem(best_scores)
for params, mean_score, scores in grid_scores:
append_star = mean_score + 2 * sem(scores) > threshold
print(display_scores(params, scores, append_star=append_star))
|
jorahn/icy | icy/icy.py | _path_to_objs | python | def _path_to_objs(path, include=['*', '.*'], exclude=['.*', '_*']):
if '://' in path:
# don't modify when path is in uri-notation, except for local files
if path.startswith('file://'):
path = path[7:]
else:
return [path]
path = os.path.abspath(os.path.expanduser(path))
if os.path.isfile(path):
if not path.lower().endswith(('.xlsx', '.xls')) and zipfile.is_zipfile(path):
# zipfile misidentifies xlsx as archive of xml files
with zipfile.ZipFile(path) as myzip:
zipped = []
for z in myzip.namelist():
z_fn = os.path.basename(z)
if z_fn != '' and any([fnmatch(z_fn, i) for i in include]) and \
not any([fnmatch(z_fn, e) for e in exclude]):
zipped.append(z)
return [myzip.open(z) for z in zipped]
else:
return [path]
elif os.path.isdir(path):
cands = [os.path.abspath(os.path.join(path, p)) for p in os.listdir(path)]
dirname = path
else:
cands = []
dirname = os.path.dirname(path)
include = list(chain.from_iterable(glob(os.path.join(dirname, i)) for i in include))
exclude = list(chain.from_iterable(glob(os.path.join(dirname, e)) for e in exclude))
objs = []
if cands == []:
cands = glob(path)
for p in cands:
if os.path.isfile(p) and p in include and not p in exclude:
objs.append(p)
zipped = [zipfile.is_zipfile(o) and not o.lower().endswith(('.xlsx', '.xls')) \
for o in objs]
toappend = []
todelete = []
for ix, o in enumerate(objs):
# if zipfile in objs replace zipfile with its contents
if zipped[ix]:
for new_o in _path_to_objs(o):
toappend.append(new_o)
todelete.append(ix)
shiftindex = 0
for d in todelete:
del objs[d - shiftindex]
shiftindex += 1
for new_o in toappend:
objs.append(new_o)
return objs | Turn path with opt. globbing into valid list of files respecting
include and exclude patterns.
Parameters
----------
path : str
Path to process. Can be location of a file, folder or glob.
Can be in uri-notation, can be relative or absolute or start with ~.
include : list, optional
Globbing patterns to require in result, defaults to ['*', '.*'].
exclude : list, optional
Globbing patterns to exclude from result, defaults to ['.*', '_*'].
Returns
-------
objs : list
List of valid files
Notes
-----
- Doesn't show hidden files starting with '.' by default. To enable hidden files, make sure '.*' is in `include` and '.*' is not in `exclude`.
- Doesn't show files starting with '_' by default. To enable these files, make sure '_*' is not in `exclude`. | train | https://github.com/jorahn/icy/blob/d0bd765c933b2d9bff4d7d646c0938348b9c5c25/icy/icy.py#L45-L129 | null | """
icy: Python 3 data wrangling glue code
--------------------------------------
saving time handling multiple different data sources
"""
import os
import sys
import re
import zipfile
import pandas as pd
import numpy as np
import yaml
from odo import odo
from glob import glob
from fnmatch import fnmatch
from datetime import datetime
from copy import deepcopy
from itertools import chain
from collections import namedtuple
from importlib.util import find_spec
examples = {
'artists': ('artists.zip', 'artists_read.yml', {}),
'babynames': ('babynames.zip', 'babynames_read.yml', {}),
'bank': ('bank.zip', 'bank_read.yml', {}),
'caterpillar': ('caterpillar.zip', 'caterpillar_read.yml', {}),
'churn': ('churn.zip', 'churn_read.yml', {}),
'comunio': ('comunio', {}, {}),
'crossdevice': ('crossdevice.zip', {}, {}),
'crunchbase': ('crunchbase', {}, {}),
'egg': ('egg', 'egg_read.yml', {}),
# 'fed': ('fed.zip', {}, {}),
'formats': ('formats', {}, {}),
'lahman': ('lahman.zip', 'lahman_read.yml', {}),
'nyse1': ('nyse_1.zip', 'nyse_1_read.yml', {}),
'nyse2': ('nyse_2.tsv.gz', 'nyse_2_read.yml', {}),
'nyt_title': ('nyt_title.zip', 'nyt_title_read.yml', {}),
'otto': ('otto.zip', {}, {}),
'spam': ('sms_spam.zip', 'sms_spam_read.yml', {}),
'titanic': ('titanic.zip', {}, {}),
'wikipedia': ('wikipedia_langs.zip', 'wikipedia_read.yml', {})
}
def _path_to_objs(path, include=['*', '.*'], exclude=['.*', '_*']):
"""Turn path with opt. globbing into valid list of files respecting
include and exclude patterns.
Parameters
----------
path : str
Path to process. Can be location of a file, folder or glob.
Can be in uri-notation, can be relative or absolute or start with ~.
include : list, optional
Globbing patterns to require in result, defaults to ['*', '.*'].
exclude : list, optional
Globbing patterns to exclude from result, defaults to ['.*', '_*'].
Returns
-------
objs : list
List of valid files
Notes
-----
- Doesn't show hidden files starting with '.' by default. To enable hidden files, make sure '.*' is in `include` and '.*' is not in `exclude`.
- Doesn't show files starting with '_' by default. To enable these files, make sure '_*' is not in `exclude`.
"""
if '://' in path:
# don't modify when path is in uri-notation, except for local files
if path.startswith('file://'):
path = path[7:]
else:
return [path]
path = os.path.abspath(os.path.expanduser(path))
if os.path.isfile(path):
if not path.lower().endswith(('.xlsx', '.xls')) and zipfile.is_zipfile(path):
# zipfile misidentifies xlsx as archive of xml files
with zipfile.ZipFile(path) as myzip:
zipped = []
for z in myzip.namelist():
z_fn = os.path.basename(z)
if z_fn != '' and any([fnmatch(z_fn, i) for i in include]) and \
not any([fnmatch(z_fn, e) for e in exclude]):
zipped.append(z)
return [myzip.open(z) for z in zipped]
else:
return [path]
elif os.path.isdir(path):
cands = [os.path.abspath(os.path.join(path, p)) for p in os.listdir(path)]
dirname = path
else:
cands = []
dirname = os.path.dirname(path)
include = list(chain.from_iterable(glob(os.path.join(dirname, i)) for i in include))
exclude = list(chain.from_iterable(glob(os.path.join(dirname, e)) for e in exclude))
objs = []
if cands == []:
cands = glob(path)
for p in cands:
if os.path.isfile(p) and p in include and not p in exclude:
objs.append(p)
zipped = [zipfile.is_zipfile(o) and not o.lower().endswith(('.xlsx', '.xls')) \
for o in objs]
toappend = []
todelete = []
for ix, o in enumerate(objs):
# if zipfile in objs replace zipfile with its contents
if zipped[ix]:
for new_o in _path_to_objs(o):
toappend.append(new_o)
todelete.append(ix)
shiftindex = 0
for d in todelete:
del objs[d - shiftindex]
shiftindex += 1
for new_o in toappend:
objs.append(new_o)
return objs
def to_df(obj, cfg={}, raise_on_error=True, silent=False, verbose=False):
"""Convert obj to pandas.DataFrame, determine parser from filename.
Falls back to odo, esp. for database uri's.
"""
if type(obj) == str:
name = obj
else:
name = obj.name
name = name[name.rfind('/') + 1:]
if not raise_on_error:
try:
return to_df(obj=obj, cfg=cfg, raise_on_error=True)
except (pd.parser.CParserError, AttributeError, ValueError, TypeError, IOError) as e:
if not silent:
print('WARNING in {}: {} {}'.format(name, e.__class__, e))
return None
except:
if not silent:
print('WARNING in {}: {}'.format(name, sys.exc_info()[0]))
return None
params = {}
if 'default' in cfg:
params = deepcopy(cfg['default'])
if name in cfg:
for e in cfg[name]:
params[e] = deepcopy(cfg[name][e])
if 'custom_date_parser' in params:
params['date_parser'] = DtParser(params['custom_date_parser']).parse
del params['custom_date_parser']
if verbose:
print(name, params)
if name.lower().startswith('s3:'):
if not find_spec('boto'):
raise ImportError('reading from aws-s3 requires the boto package to be installed')
if '.csv' in name.lower():
# name can be .csv.gz or .csv.bz2
return pd.read_csv(obj, **params)
elif '.tsv' in name.lower() or '.txt' in name.lower():
# name can be .tsv.gz or .txt.bz2
return pd.read_table(obj, **params)
elif name.lower().endswith(('.htm', '.html')):
if not find_spec('lxml'):
params['flavor'] = 'bs4'
if not find_spec('bs4') and not find_spec('html5lib'):
raise ImportError('reading html requires the lxml or bs4 + html5lib packages to be installed')
if 'nrows' in params:
del params['nrows']
if type(obj) == zipfile.ZipExtFile:
obj = obj.read()
data = pd.read_html(obj, **params)
data = {str(i): data[i] for i in range(len(data))}
return data
elif name.lower().endswith('.xml'):
if 'nrows' in params:
del params['nrows']
from icy.utils import xml_to_json
with open(obj) as f:
json = xml_to_json(str(f.read()))
return pd.read_json(json, **params)
elif name.lower().endswith('.json'):
if 'nrows' in params:
del params['nrows']
return pd.read_json(obj, **params)
elif name.lower().endswith(('.xls', '.xlsx')):
if not find_spec('xlrd'):
raise ImportError('reading excel files requires the xlrd package to be installed')
if 'nrows' in params:
del params['nrows']
data = {}
xls = pd.ExcelFile(obj)
for key in xls.sheet_names:
data[key] = xls.parse(key, **params)
return data
elif name.lower().endswith(('.h5', '.hdf5')):
if not find_spec('tables'):
raise ImportError('reading hdf5 files requires the pytables package to be installed')
if 'nrows' in params:
del params['nrows']
# params['chunksize'] = params.pop('nrows') # returns iterator
with pd.HDFStore(obj) as store:
data = {}
for key in store.keys():
data[key[1:]] = store[key]
return data
elif name.lower().endswith(('.sqlite', '.sql', '.db')):
import sqlite3
if type(obj) != str:
raise IOError('sqlite-database must be decompressed before import')
if 'nrows' in params:
del params['nrows']
# params['chunksize'] = params.pop('nrows') # returns iterator
with sqlite3.connect(obj) as con:
data = {}
cursor = con.cursor()
cursor.execute('SELECT name FROM sqlite_master WHERE type="table"')
tables = [t[0] for t in cursor.fetchall()]
for t in tables:
sql = 'SELECT * FROM ' + t
data[t] = pd.read_sql_query(sql, con, **params)
return data
else:
try:
data = {name: odo(obj, pd.DataFrame)}
if type(data[name]) == pd.DataFrame:
return data
except NotImplementedError:
pass
raise NotImplementedError('Error creating DataFrame from object', obj)
def read(path, cfg={}, raise_on_error=False, silent=False, verbose=False, return_errors=False):
"""Wraps pandas.IO & odo to create a dictionary of pandas.DataFrames from multiple different sources
Parameters
----------
path : str
Location of file, folder or zip-file to be parsed. Can include globbing (e.g. `*.csv`).
Can be remote with URI-notation beginning with e.g. http://, https://, file://, ftp://, s3:// and ssh://.
Can be odo-supported database (SQL, MongoDB, Hadoop, Spark) if dependencies are available.
Parser will be selected based on file extension.
cfg : dict or str, optional
Dictionary of kwargs to be provided to the pandas parser (http://pandas.pydata.org/pandas-docs/stable/api.html#input-output)
or str with path to YAML, that will be parsed.
Special keys:
**filters** : str or list of strings, optional. For a file to be processed, it must contain one of the Strings (e.g. ['.csv', '.tsv'])
**default** : kwargs to be used for every file
**custom_date_parser** : strptime-format string (https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior), generates a parser that used as the *date_parser* argument
If filename in keys, use kwargs from that key in addition to or overwriting *default* kwargs.
silent : boolean, optional
If True, doesn't print to stdout.
verbose : boolean, optional
If True, prints parsing arguments for each file processed to stdout.
raise_on_error : boolean, optional
Raise exception or only display warning, if a file cannot be parsed successfully.
return_errors : boolean, optional
If True, read() returns (data, errors) tuple instead of only data, with errors as a list of all files that could not be parsed.
Returns
-------
data : dict
Dictionary of parsed pandas.DataFrames, with file names as keys.
Notes
-----
- Start with basic cfg and tune until the desired parsing result is achieved.
- File extensions are critical to determine the parser, make sure they are *common*.
- Avoid files named 'default' or 'filters'.
- Avoid duplicate file names.
- Subfolders and file names beginning with '.' or '_' are ignored.
- If an https:// URI isn't correctly processed, try http:// instead.
- To connect to a database or s3-bucket, make sure the required dependencies like sqlalchemy, pymongo, pyspark or boto are available in the active environment.
"""
if type(cfg) == str:
cfg = os.path.abspath(os.path.expanduser(cfg))
yml = _read_yaml(cfg)
if yml == None:
if not silent:
print('creating read.yml config file draft ...')
cfg = {'filters': ['.csv'], 'default': {'sep': ',', 'parse_dates': []}}
with open('local/read.yml', 'xt') as f:
yaml.dump(cfg, f)
yml = _read_yaml('local/read.yml')
if 'filters' in yml:
filters = yml['filters']
if type(filters) == str:
filters = [filters]
del yml['filters']
else:
filters = []
cfg = yml
data = {}
errors = []
if not silent:
print('processing', path, '...')
for f in _path_to_objs(path):
if type(f) == str:
fname = os.path.basename(f)
elif type(f) == zipfile.ZipExtFile:
fname = f.name
else:
raise RuntimeError('_path_to_objs() returned unknown type', f)
data, errors = _read_append(data=data, errors=errors, path=f, fname=fname, \
cfg=cfg, raise_on_error=raise_on_error, silent=silent, verbose=verbose)
if raise_on_error and data == {}:
raise IOError('path is invalid or empty')
if not silent:
print('imported {} DataFrames'.format(len(data)))
if len(data) > 0:
print('total memory usage: {}'.format(mem(data)))
if len(errors) > 0:
print('import errors in files: {}'.format(', '.join(errors)))
if return_errors:
return data, errors
else:
return data
def _read_append(data, errors, path, fname, cfg, raise_on_error, silent, verbose):
key = fname[fname.rfind('/') + 1:]
result = to_df(obj=path, cfg=cfg, raise_on_error=raise_on_error, silent=silent, verbose=verbose)
if type(result) == dict:
if len(result) == 0:
errors.append(key)
# elif len(result) == 1:
# r = next(iter(result))
# data[r] = result[r]
else:
for r in result:
data['_'.join([key, r])] = result[r]
elif type(result) == type(None):
errors.append(key)
else:
data[key] = result
return data, errors
def preview(path, cfg={}, rows=5, silent=True, verbose=False, raise_on_error=False):
if type(cfg) == str:
cfg = os.path.abspath(os.path.expanduser(cfg))
yml = _read_yaml(cfg)
if yml == None:
yml = {}
if 'filters' in yml:
filters = yml['filters']
if type(filters) == str:
filters = [filters]
del yml['filters']
cfg = yml
if type(cfg) != dict:
cfg = {'default': {'nrows': rows}}
else:
if 'filters' in cfg:
filters = cfg['filters']
if type(filters) == str:
filters = [filters]
del cfg['filters']
if 'default' in cfg:
if type(cfg['default']) == dict:
cfg['default']['nrows'] = rows
else:
cfg['default'] = {'nrows': rows}
else:
cfg['default'] = {'nrows': rows}
if silent:
# if not silent, output will be generated from icy.read()
print('processing', path, '...')
prev, errors = read(path=path, cfg=cfg, silent=silent, verbose=verbose, \
raise_on_error=raise_on_error, return_errors=True)
for key in sorted(prev):
print('File: {}'.format(key))
print()
prev[key].info(verbose=True, memory_usage=True, null_counts=True)
print()
print('{:<20} | first {} VALUES'.format('COLUMN', rows))
print('-'*40)
for col in prev[key].columns:
print('{:<20} | {}'.format(col, str(list(prev[key][col].values)[:rows])))
print('='*40)
print('Successfully parsed first {} rows of {} files:'.format(rows, len(prev)))
print(', '.join(sorted(prev)))
if len(errors) > 0 and silent:
print()
print('Errors parsing files: {}'.format(', '.join(errors)))
print()
print('Try icy.preview(path, cfg, silent=False) for a more verbose output.')
return
def mem(data):
"""Total memory used by data
Parameters
----------
data : dict of pandas.DataFrames or pandas.DataFrame
Returns
-------
str : str
Human readable amount of memory used with unit (like KB, MB, GB etc.).
"""
if type(data) == dict:
num = sum([data[k].memory_usage(index=True).sum() for k in data])
else:
num = data.memory_usage(index=True).sum()
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
return "%3.1f %s" % (num, 'PB')
def _read_yaml(path):
if os.path.isfile(path):
with open(path) as f:
return yaml.safe_load(f)
else:
return None
def merge(data, cfg=None):
""" WORK IN PROGRESS
Concat, merge, join, drop keys in dictionary of pandas.DataFrames
into one pandas.DataFrame (data) and a pandas.Series (labels).
Parameters
----------
data : dict of pandas.DataFrames
Result of icy.read()
cfg : dict or str, optional
Dictionary of actions to perform on data
or str with path to YAML, that will be parsed.
Returns
-------
data : pandas.DataFrame
The aggregated dataset
labels : pandas.Series
The target variable for analysis of the dataset,
can have fewer samples than the aggregated dataset
Notes
-----
"""
# go from a dict of dataframes (data) to one dataframe (data) and one series (labels)
# pd.concat([df1, df2], join, join_axes, ignore_index) and pd.merge(left, right, how, on, suffixes)
# should be easy to iterate from normalized tables to a fully joined set of dataframes
if type(cfg) == str:
cfg = _read_yaml(cfg)
if cfg == None:
cfg = _read_yaml('local/merge.yml')
if cfg == None:
print('creating merge.yml config file draft ...')
cfg = {}
# find all tables with identical column names
# if no common key-col
# concat along rows, add col (src)
# e.g. chimps
# find all tables with same length
# if no duplicate column names
# concat along columns
# find master table (by length?)
# from smalles to biggest table
# find possible key-cols by uniques == len
# find bigger tables with common column names -> cands
# check for highest overlap-ratio of uniques -> cand (prefer smaller table if equal ratio)
# join table on best cand
# if ratio below treshold put table on unidentified list
for key in data:
cfg[key] = list(data[key].columns)
with open('local/merge.yml', 'xt') as f:
yaml.dump(cfg, f)
cfg = _read_yaml('local/merge.yml')
# if cfg == None:
# if not os.path.exists(default_cfg):
# create default_cfg draft
# else:
# join on default_cfg
# report join_result
# else:
# join on cfg
# report join_result
labels = None
return data, labels
def _find_key_cols(df):
"""Identify columns in a DataFrame that could be a unique key"""
keys = []
for col in df:
if len(df[col].unique()) == len(df[col]):
keys.append(col)
return keys
def _dtparse(s, pattern):
return datetime.strptime(s, pattern)
class DtParser():
def __init__(self, pattern):
self.pattern = pattern
self.vfunc = np.vectorize(_dtparse)
def parse(self, s):
if type(s) == str:
return _dtparse(s, self.pattern)
elif type(s) == list:
return [_dtparse(e, self.pattern) for e in s]
elif type(s) == np.ndarray:
return self.vfunc(s, self.pattern)
def run_examples(examples):
"""Run read() on a number of examples, supress output, generate summary.
Parameters
----------
examples : list of tuples of three str elements
Tuples contain the path and cfg argument to the read function,
as well as the cfg argument to the merge function (*TODO*)
e.g. [(path, read_cfg, merge_cfg), (...)]
Returns
-------
None
Prints all results to stdout.
"""
import inspect
PATH_TEST_DATA = os.path.join(os.path.dirname(os.path.abspath( \
inspect.getfile(inspect.currentframe()))), '../local/test_data')
print('running examples ...')
t0 = datetime.now()
results = [0, 0, 0]
for ex in sorted(examples):
t1 = datetime.now()
src, cfg, _ = examples[ex]
src = os.path.abspath(os.path.join(PATH_TEST_DATA, src))
if not os.path.isfile(src) and not os.path.isdir(src):
print('{} not a file'.format(src))
break
if type(cfg) == str:
cfg = os.path.abspath(os.path.join(PATH_TEST_DATA, cfg))
if not os.path.isfile(cfg):
print('{} not a file'.format(cfg))
break
try:
data = read(src, cfg=cfg, silent=True)
n_keys = len(data.keys())
if n_keys > 0:
print('data {:<15} [SUCCESS] {:.1f}s, {} dfs, {}'.format(
ex, (datetime.now()-t1).total_seconds(), n_keys, mem(data)))
results[0] += 1
else:
print('data {:<15} [NO IMPORT] {:.1f}s'.format(ex, (datetime.now()-t1).total_seconds()))
results[1] += 1
except:
print('data {:<15} [EXCEPTION] {:.1f}s'.format(ex, (datetime.now()-t1).total_seconds()))
results[2] += 1
print()
print('ran {} tests in {:.1f} seconds'.format(len(examples),
(datetime.now()-t0).total_seconds()))
print('{} success / {} no import / {} exception'.format(
str(results[0]), str(results[1]), str(results[2])))
if __name__ == '__main__':
run_examples(examples)
|
jorahn/icy | icy/icy.py | to_df | python | def to_df(obj, cfg={}, raise_on_error=True, silent=False, verbose=False):
if type(obj) == str:
name = obj
else:
name = obj.name
name = name[name.rfind('/') + 1:]
if not raise_on_error:
try:
return to_df(obj=obj, cfg=cfg, raise_on_error=True)
except (pd.parser.CParserError, AttributeError, ValueError, TypeError, IOError) as e:
if not silent:
print('WARNING in {}: {} {}'.format(name, e.__class__, e))
return None
except:
if not silent:
print('WARNING in {}: {}'.format(name, sys.exc_info()[0]))
return None
params = {}
if 'default' in cfg:
params = deepcopy(cfg['default'])
if name in cfg:
for e in cfg[name]:
params[e] = deepcopy(cfg[name][e])
if 'custom_date_parser' in params:
params['date_parser'] = DtParser(params['custom_date_parser']).parse
del params['custom_date_parser']
if verbose:
print(name, params)
if name.lower().startswith('s3:'):
if not find_spec('boto'):
raise ImportError('reading from aws-s3 requires the boto package to be installed')
if '.csv' in name.lower():
# name can be .csv.gz or .csv.bz2
return pd.read_csv(obj, **params)
elif '.tsv' in name.lower() or '.txt' in name.lower():
# name can be .tsv.gz or .txt.bz2
return pd.read_table(obj, **params)
elif name.lower().endswith(('.htm', '.html')):
if not find_spec('lxml'):
params['flavor'] = 'bs4'
if not find_spec('bs4') and not find_spec('html5lib'):
raise ImportError('reading html requires the lxml or bs4 + html5lib packages to be installed')
if 'nrows' in params:
del params['nrows']
if type(obj) == zipfile.ZipExtFile:
obj = obj.read()
data = pd.read_html(obj, **params)
data = {str(i): data[i] for i in range(len(data))}
return data
elif name.lower().endswith('.xml'):
if 'nrows' in params:
del params['nrows']
from icy.utils import xml_to_json
with open(obj) as f:
json = xml_to_json(str(f.read()))
return pd.read_json(json, **params)
elif name.lower().endswith('.json'):
if 'nrows' in params:
del params['nrows']
return pd.read_json(obj, **params)
elif name.lower().endswith(('.xls', '.xlsx')):
if not find_spec('xlrd'):
raise ImportError('reading excel files requires the xlrd package to be installed')
if 'nrows' in params:
del params['nrows']
data = {}
xls = pd.ExcelFile(obj)
for key in xls.sheet_names:
data[key] = xls.parse(key, **params)
return data
elif name.lower().endswith(('.h5', '.hdf5')):
if not find_spec('tables'):
raise ImportError('reading hdf5 files requires the pytables package to be installed')
if 'nrows' in params:
del params['nrows']
# params['chunksize'] = params.pop('nrows') # returns iterator
with pd.HDFStore(obj) as store:
data = {}
for key in store.keys():
data[key[1:]] = store[key]
return data
elif name.lower().endswith(('.sqlite', '.sql', '.db')):
import sqlite3
if type(obj) != str:
raise IOError('sqlite-database must be decompressed before import')
if 'nrows' in params:
del params['nrows']
# params['chunksize'] = params.pop('nrows') # returns iterator
with sqlite3.connect(obj) as con:
data = {}
cursor = con.cursor()
cursor.execute('SELECT name FROM sqlite_master WHERE type="table"')
tables = [t[0] for t in cursor.fetchall()]
for t in tables:
sql = 'SELECT * FROM ' + t
data[t] = pd.read_sql_query(sql, con, **params)
return data
else:
try:
data = {name: odo(obj, pd.DataFrame)}
if type(data[name]) == pd.DataFrame:
return data
except NotImplementedError:
pass
raise NotImplementedError('Error creating DataFrame from object', obj) | Convert obj to pandas.DataFrame, determine parser from filename.
Falls back to odo, esp. for database uri's. | train | https://github.com/jorahn/icy/blob/d0bd765c933b2d9bff4d7d646c0938348b9c5c25/icy/icy.py#L131-L264 | [
"def to_df(obj, cfg={}, raise_on_error=True, silent=False, verbose=False):\n \"\"\"Convert obj to pandas.DataFrame, determine parser from filename.\n Falls back to odo, esp. for database uri's.\n \"\"\"\n\n if type(obj) == str:\n name = obj\n else:\n name = obj.name\n name = name[nam... | """
icy: Python 3 data wrangling glue code
--------------------------------------
saving time handling multiple different data sources
"""
import os
import sys
import re
import zipfile
import pandas as pd
import numpy as np
import yaml
from odo import odo
from glob import glob
from fnmatch import fnmatch
from datetime import datetime
from copy import deepcopy
from itertools import chain
from collections import namedtuple
from importlib.util import find_spec
examples = {
'artists': ('artists.zip', 'artists_read.yml', {}),
'babynames': ('babynames.zip', 'babynames_read.yml', {}),
'bank': ('bank.zip', 'bank_read.yml', {}),
'caterpillar': ('caterpillar.zip', 'caterpillar_read.yml', {}),
'churn': ('churn.zip', 'churn_read.yml', {}),
'comunio': ('comunio', {}, {}),
'crossdevice': ('crossdevice.zip', {}, {}),
'crunchbase': ('crunchbase', {}, {}),
'egg': ('egg', 'egg_read.yml', {}),
# 'fed': ('fed.zip', {}, {}),
'formats': ('formats', {}, {}),
'lahman': ('lahman.zip', 'lahman_read.yml', {}),
'nyse1': ('nyse_1.zip', 'nyse_1_read.yml', {}),
'nyse2': ('nyse_2.tsv.gz', 'nyse_2_read.yml', {}),
'nyt_title': ('nyt_title.zip', 'nyt_title_read.yml', {}),
'otto': ('otto.zip', {}, {}),
'spam': ('sms_spam.zip', 'sms_spam_read.yml', {}),
'titanic': ('titanic.zip', {}, {}),
'wikipedia': ('wikipedia_langs.zip', 'wikipedia_read.yml', {})
}
def _path_to_objs(path, include=['*', '.*'], exclude=['.*', '_*']):
"""Turn path with opt. globbing into valid list of files respecting
include and exclude patterns.
Parameters
----------
path : str
Path to process. Can be location of a file, folder or glob.
Can be in uri-notation, can be relative or absolute or start with ~.
include : list, optional
Globbing patterns to require in result, defaults to ['*', '.*'].
exclude : list, optional
Globbing patterns to exclude from result, defaults to ['.*', '_*'].
Returns
-------
objs : list
List of valid files
Notes
-----
- Doesn't show hidden files starting with '.' by default. To enable hidden files, make sure '.*' is in `include` and '.*' is not in `exclude`.
- Doesn't show files starting with '_' by default. To enable these files, make sure '_*' is not in `exclude`.
"""
if '://' in path:
# don't modify when path is in uri-notation, except for local files
if path.startswith('file://'):
path = path[7:]
else:
return [path]
path = os.path.abspath(os.path.expanduser(path))
if os.path.isfile(path):
if not path.lower().endswith(('.xlsx', '.xls')) and zipfile.is_zipfile(path):
# zipfile misidentifies xlsx as archive of xml files
with zipfile.ZipFile(path) as myzip:
zipped = []
for z in myzip.namelist():
z_fn = os.path.basename(z)
if z_fn != '' and any([fnmatch(z_fn, i) for i in include]) and \
not any([fnmatch(z_fn, e) for e in exclude]):
zipped.append(z)
return [myzip.open(z) for z in zipped]
else:
return [path]
elif os.path.isdir(path):
cands = [os.path.abspath(os.path.join(path, p)) for p in os.listdir(path)]
dirname = path
else:
cands = []
dirname = os.path.dirname(path)
include = list(chain.from_iterable(glob(os.path.join(dirname, i)) for i in include))
exclude = list(chain.from_iterable(glob(os.path.join(dirname, e)) for e in exclude))
objs = []
if cands == []:
cands = glob(path)
for p in cands:
if os.path.isfile(p) and p in include and not p in exclude:
objs.append(p)
zipped = [zipfile.is_zipfile(o) and not o.lower().endswith(('.xlsx', '.xls')) \
for o in objs]
toappend = []
todelete = []
for ix, o in enumerate(objs):
# if zipfile in objs replace zipfile with its contents
if zipped[ix]:
for new_o in _path_to_objs(o):
toappend.append(new_o)
todelete.append(ix)
shiftindex = 0
for d in todelete:
del objs[d - shiftindex]
shiftindex += 1
for new_o in toappend:
objs.append(new_o)
return objs
def to_df(obj, cfg={}, raise_on_error=True, silent=False, verbose=False):
"""Convert obj to pandas.DataFrame, determine parser from filename.
Falls back to odo, esp. for database uri's.
"""
if type(obj) == str:
name = obj
else:
name = obj.name
name = name[name.rfind('/') + 1:]
if not raise_on_error:
try:
return to_df(obj=obj, cfg=cfg, raise_on_error=True)
except (pd.parser.CParserError, AttributeError, ValueError, TypeError, IOError) as e:
if not silent:
print('WARNING in {}: {} {}'.format(name, e.__class__, e))
return None
except:
if not silent:
print('WARNING in {}: {}'.format(name, sys.exc_info()[0]))
return None
params = {}
if 'default' in cfg:
params = deepcopy(cfg['default'])
if name in cfg:
for e in cfg[name]:
params[e] = deepcopy(cfg[name][e])
if 'custom_date_parser' in params:
params['date_parser'] = DtParser(params['custom_date_parser']).parse
del params['custom_date_parser']
if verbose:
print(name, params)
if name.lower().startswith('s3:'):
if not find_spec('boto'):
raise ImportError('reading from aws-s3 requires the boto package to be installed')
if '.csv' in name.lower():
# name can be .csv.gz or .csv.bz2
return pd.read_csv(obj, **params)
elif '.tsv' in name.lower() or '.txt' in name.lower():
# name can be .tsv.gz or .txt.bz2
return pd.read_table(obj, **params)
elif name.lower().endswith(('.htm', '.html')):
if not find_spec('lxml'):
params['flavor'] = 'bs4'
if not find_spec('bs4') and not find_spec('html5lib'):
raise ImportError('reading html requires the lxml or bs4 + html5lib packages to be installed')
if 'nrows' in params:
del params['nrows']
if type(obj) == zipfile.ZipExtFile:
obj = obj.read()
data = pd.read_html(obj, **params)
data = {str(i): data[i] for i in range(len(data))}
return data
elif name.lower().endswith('.xml'):
if 'nrows' in params:
del params['nrows']
from icy.utils import xml_to_json
with open(obj) as f:
json = xml_to_json(str(f.read()))
return pd.read_json(json, **params)
elif name.lower().endswith('.json'):
if 'nrows' in params:
del params['nrows']
return pd.read_json(obj, **params)
elif name.lower().endswith(('.xls', '.xlsx')):
if not find_spec('xlrd'):
raise ImportError('reading excel files requires the xlrd package to be installed')
if 'nrows' in params:
del params['nrows']
data = {}
xls = pd.ExcelFile(obj)
for key in xls.sheet_names:
data[key] = xls.parse(key, **params)
return data
elif name.lower().endswith(('.h5', '.hdf5')):
if not find_spec('tables'):
raise ImportError('reading hdf5 files requires the pytables package to be installed')
if 'nrows' in params:
del params['nrows']
# params['chunksize'] = params.pop('nrows') # returns iterator
with pd.HDFStore(obj) as store:
data = {}
for key in store.keys():
data[key[1:]] = store[key]
return data
elif name.lower().endswith(('.sqlite', '.sql', '.db')):
import sqlite3
if type(obj) != str:
raise IOError('sqlite-database must be decompressed before import')
if 'nrows' in params:
del params['nrows']
# params['chunksize'] = params.pop('nrows') # returns iterator
with sqlite3.connect(obj) as con:
data = {}
cursor = con.cursor()
cursor.execute('SELECT name FROM sqlite_master WHERE type="table"')
tables = [t[0] for t in cursor.fetchall()]
for t in tables:
sql = 'SELECT * FROM ' + t
data[t] = pd.read_sql_query(sql, con, **params)
return data
else:
try:
data = {name: odo(obj, pd.DataFrame)}
if type(data[name]) == pd.DataFrame:
return data
except NotImplementedError:
pass
raise NotImplementedError('Error creating DataFrame from object', obj)
def read(path, cfg={}, raise_on_error=False, silent=False, verbose=False, return_errors=False):
"""Wraps pandas.IO & odo to create a dictionary of pandas.DataFrames from multiple different sources
Parameters
----------
path : str
Location of file, folder or zip-file to be parsed. Can include globbing (e.g. `*.csv`).
Can be remote with URI-notation beginning with e.g. http://, https://, file://, ftp://, s3:// and ssh://.
Can be odo-supported database (SQL, MongoDB, Hadoop, Spark) if dependencies are available.
Parser will be selected based on file extension.
cfg : dict or str, optional
Dictionary of kwargs to be provided to the pandas parser (http://pandas.pydata.org/pandas-docs/stable/api.html#input-output)
or str with path to YAML, that will be parsed.
Special keys:
**filters** : str or list of strings, optional. For a file to be processed, it must contain one of the Strings (e.g. ['.csv', '.tsv'])
**default** : kwargs to be used for every file
**custom_date_parser** : strptime-format string (https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior), generates a parser that used as the *date_parser* argument
If filename in keys, use kwargs from that key in addition to or overwriting *default* kwargs.
silent : boolean, optional
If True, doesn't print to stdout.
verbose : boolean, optional
If True, prints parsing arguments for each file processed to stdout.
raise_on_error : boolean, optional
Raise exception or only display warning, if a file cannot be parsed successfully.
return_errors : boolean, optional
If True, read() returns (data, errors) tuple instead of only data, with errors as a list of all files that could not be parsed.
Returns
-------
data : dict
Dictionary of parsed pandas.DataFrames, with file names as keys.
Notes
-----
- Start with basic cfg and tune until the desired parsing result is achieved.
- File extensions are critical to determine the parser, make sure they are *common*.
- Avoid files named 'default' or 'filters'.
- Avoid duplicate file names.
- Subfolders and file names beginning with '.' or '_' are ignored.
- If an https:// URI isn't correctly processed, try http:// instead.
- To connect to a database or s3-bucket, make sure the required dependencies like sqlalchemy, pymongo, pyspark or boto are available in the active environment.
"""
if type(cfg) == str:
cfg = os.path.abspath(os.path.expanduser(cfg))
yml = _read_yaml(cfg)
if yml == None:
if not silent:
print('creating read.yml config file draft ...')
cfg = {'filters': ['.csv'], 'default': {'sep': ',', 'parse_dates': []}}
with open('local/read.yml', 'xt') as f:
yaml.dump(cfg, f)
yml = _read_yaml('local/read.yml')
if 'filters' in yml:
filters = yml['filters']
if type(filters) == str:
filters = [filters]
del yml['filters']
else:
filters = []
cfg = yml
data = {}
errors = []
if not silent:
print('processing', path, '...')
for f in _path_to_objs(path):
if type(f) == str:
fname = os.path.basename(f)
elif type(f) == zipfile.ZipExtFile:
fname = f.name
else:
raise RuntimeError('_path_to_objs() returned unknown type', f)
data, errors = _read_append(data=data, errors=errors, path=f, fname=fname, \
cfg=cfg, raise_on_error=raise_on_error, silent=silent, verbose=verbose)
if raise_on_error and data == {}:
raise IOError('path is invalid or empty')
if not silent:
print('imported {} DataFrames'.format(len(data)))
if len(data) > 0:
print('total memory usage: {}'.format(mem(data)))
if len(errors) > 0:
print('import errors in files: {}'.format(', '.join(errors)))
if return_errors:
return data, errors
else:
return data
def _read_append(data, errors, path, fname, cfg, raise_on_error, silent, verbose):
key = fname[fname.rfind('/') + 1:]
result = to_df(obj=path, cfg=cfg, raise_on_error=raise_on_error, silent=silent, verbose=verbose)
if type(result) == dict:
if len(result) == 0:
errors.append(key)
# elif len(result) == 1:
# r = next(iter(result))
# data[r] = result[r]
else:
for r in result:
data['_'.join([key, r])] = result[r]
elif type(result) == type(None):
errors.append(key)
else:
data[key] = result
return data, errors
def preview(path, cfg={}, rows=5, silent=True, verbose=False, raise_on_error=False):
if type(cfg) == str:
cfg = os.path.abspath(os.path.expanduser(cfg))
yml = _read_yaml(cfg)
if yml == None:
yml = {}
if 'filters' in yml:
filters = yml['filters']
if type(filters) == str:
filters = [filters]
del yml['filters']
cfg = yml
if type(cfg) != dict:
cfg = {'default': {'nrows': rows}}
else:
if 'filters' in cfg:
filters = cfg['filters']
if type(filters) == str:
filters = [filters]
del cfg['filters']
if 'default' in cfg:
if type(cfg['default']) == dict:
cfg['default']['nrows'] = rows
else:
cfg['default'] = {'nrows': rows}
else:
cfg['default'] = {'nrows': rows}
if silent:
# if not silent, output will be generated from icy.read()
print('processing', path, '...')
prev, errors = read(path=path, cfg=cfg, silent=silent, verbose=verbose, \
raise_on_error=raise_on_error, return_errors=True)
for key in sorted(prev):
print('File: {}'.format(key))
print()
prev[key].info(verbose=True, memory_usage=True, null_counts=True)
print()
print('{:<20} | first {} VALUES'.format('COLUMN', rows))
print('-'*40)
for col in prev[key].columns:
print('{:<20} | {}'.format(col, str(list(prev[key][col].values)[:rows])))
print('='*40)
print('Successfully parsed first {} rows of {} files:'.format(rows, len(prev)))
print(', '.join(sorted(prev)))
if len(errors) > 0 and silent:
print()
print('Errors parsing files: {}'.format(', '.join(errors)))
print()
print('Try icy.preview(path, cfg, silent=False) for a more verbose output.')
return
def mem(data):
"""Total memory used by data
Parameters
----------
data : dict of pandas.DataFrames or pandas.DataFrame
Returns
-------
str : str
Human readable amount of memory used with unit (like KB, MB, GB etc.).
"""
if type(data) == dict:
num = sum([data[k].memory_usage(index=True).sum() for k in data])
else:
num = data.memory_usage(index=True).sum()
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
return "%3.1f %s" % (num, 'PB')
def _read_yaml(path):
if os.path.isfile(path):
with open(path) as f:
return yaml.safe_load(f)
else:
return None
def merge(data, cfg=None):
""" WORK IN PROGRESS
Concat, merge, join, drop keys in dictionary of pandas.DataFrames
into one pandas.DataFrame (data) and a pandas.Series (labels).
Parameters
----------
data : dict of pandas.DataFrames
Result of icy.read()
cfg : dict or str, optional
Dictionary of actions to perform on data
or str with path to YAML, that will be parsed.
Returns
-------
data : pandas.DataFrame
The aggregated dataset
labels : pandas.Series
The target variable for analysis of the dataset,
can have fewer samples than the aggregated dataset
Notes
-----
"""
# go from a dict of dataframes (data) to one dataframe (data) and one series (labels)
# pd.concat([df1, df2], join, join_axes, ignore_index) and pd.merge(left, right, how, on, suffixes)
# should be easy to iterate from normalized tables to a fully joined set of dataframes
if type(cfg) == str:
cfg = _read_yaml(cfg)
if cfg == None:
cfg = _read_yaml('local/merge.yml')
if cfg == None:
print('creating merge.yml config file draft ...')
cfg = {}
# find all tables with identical column names
# if no common key-col
# concat along rows, add col (src)
# e.g. chimps
# find all tables with same length
# if no duplicate column names
# concat along columns
# find master table (by length?)
# from smalles to biggest table
# find possible key-cols by uniques == len
# find bigger tables with common column names -> cands
# check for highest overlap-ratio of uniques -> cand (prefer smaller table if equal ratio)
# join table on best cand
# if ratio below treshold put table on unidentified list
for key in data:
cfg[key] = list(data[key].columns)
with open('local/merge.yml', 'xt') as f:
yaml.dump(cfg, f)
cfg = _read_yaml('local/merge.yml')
# if cfg == None:
# if not os.path.exists(default_cfg):
# create default_cfg draft
# else:
# join on default_cfg
# report join_result
# else:
# join on cfg
# report join_result
labels = None
return data, labels
def _find_key_cols(df):
"""Identify columns in a DataFrame that could be a unique key"""
keys = []
for col in df:
if len(df[col].unique()) == len(df[col]):
keys.append(col)
return keys
def _dtparse(s, pattern):
return datetime.strptime(s, pattern)
class DtParser():
def __init__(self, pattern):
self.pattern = pattern
self.vfunc = np.vectorize(_dtparse)
def parse(self, s):
if type(s) == str:
return _dtparse(s, self.pattern)
elif type(s) == list:
return [_dtparse(e, self.pattern) for e in s]
elif type(s) == np.ndarray:
return self.vfunc(s, self.pattern)
def run_examples(examples):
"""Run read() on a number of examples, supress output, generate summary.
Parameters
----------
examples : list of tuples of three str elements
Tuples contain the path and cfg argument to the read function,
as well as the cfg argument to the merge function (*TODO*)
e.g. [(path, read_cfg, merge_cfg), (...)]
Returns
-------
None
Prints all results to stdout.
"""
import inspect
PATH_TEST_DATA = os.path.join(os.path.dirname(os.path.abspath( \
inspect.getfile(inspect.currentframe()))), '../local/test_data')
print('running examples ...')
t0 = datetime.now()
results = [0, 0, 0]
for ex in sorted(examples):
t1 = datetime.now()
src, cfg, _ = examples[ex]
src = os.path.abspath(os.path.join(PATH_TEST_DATA, src))
if not os.path.isfile(src) and not os.path.isdir(src):
print('{} not a file'.format(src))
break
if type(cfg) == str:
cfg = os.path.abspath(os.path.join(PATH_TEST_DATA, cfg))
if not os.path.isfile(cfg):
print('{} not a file'.format(cfg))
break
try:
data = read(src, cfg=cfg, silent=True)
n_keys = len(data.keys())
if n_keys > 0:
print('data {:<15} [SUCCESS] {:.1f}s, {} dfs, {}'.format(
ex, (datetime.now()-t1).total_seconds(), n_keys, mem(data)))
results[0] += 1
else:
print('data {:<15} [NO IMPORT] {:.1f}s'.format(ex, (datetime.now()-t1).total_seconds()))
results[1] += 1
except:
print('data {:<15} [EXCEPTION] {:.1f}s'.format(ex, (datetime.now()-t1).total_seconds()))
results[2] += 1
print()
print('ran {} tests in {:.1f} seconds'.format(len(examples),
(datetime.now()-t0).total_seconds()))
print('{} success / {} no import / {} exception'.format(
str(results[0]), str(results[1]), str(results[2])))
if __name__ == '__main__':
run_examples(examples)
|
jorahn/icy | icy/icy.py | read | python | def read(path, cfg={}, raise_on_error=False, silent=False, verbose=False, return_errors=False):
if type(cfg) == str:
cfg = os.path.abspath(os.path.expanduser(cfg))
yml = _read_yaml(cfg)
if yml == None:
if not silent:
print('creating read.yml config file draft ...')
cfg = {'filters': ['.csv'], 'default': {'sep': ',', 'parse_dates': []}}
with open('local/read.yml', 'xt') as f:
yaml.dump(cfg, f)
yml = _read_yaml('local/read.yml')
if 'filters' in yml:
filters = yml['filters']
if type(filters) == str:
filters = [filters]
del yml['filters']
else:
filters = []
cfg = yml
data = {}
errors = []
if not silent:
print('processing', path, '...')
for f in _path_to_objs(path):
if type(f) == str:
fname = os.path.basename(f)
elif type(f) == zipfile.ZipExtFile:
fname = f.name
else:
raise RuntimeError('_path_to_objs() returned unknown type', f)
data, errors = _read_append(data=data, errors=errors, path=f, fname=fname, \
cfg=cfg, raise_on_error=raise_on_error, silent=silent, verbose=verbose)
if raise_on_error and data == {}:
raise IOError('path is invalid or empty')
if not silent:
print('imported {} DataFrames'.format(len(data)))
if len(data) > 0:
print('total memory usage: {}'.format(mem(data)))
if len(errors) > 0:
print('import errors in files: {}'.format(', '.join(errors)))
if return_errors:
return data, errors
else:
return data | Wraps pandas.IO & odo to create a dictionary of pandas.DataFrames from multiple different sources
Parameters
----------
path : str
Location of file, folder or zip-file to be parsed. Can include globbing (e.g. `*.csv`).
Can be remote with URI-notation beginning with e.g. http://, https://, file://, ftp://, s3:// and ssh://.
Can be odo-supported database (SQL, MongoDB, Hadoop, Spark) if dependencies are available.
Parser will be selected based on file extension.
cfg : dict or str, optional
Dictionary of kwargs to be provided to the pandas parser (http://pandas.pydata.org/pandas-docs/stable/api.html#input-output)
or str with path to YAML, that will be parsed.
Special keys:
**filters** : str or list of strings, optional. For a file to be processed, it must contain one of the Strings (e.g. ['.csv', '.tsv'])
**default** : kwargs to be used for every file
**custom_date_parser** : strptime-format string (https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior), generates a parser that used as the *date_parser* argument
If filename in keys, use kwargs from that key in addition to or overwriting *default* kwargs.
silent : boolean, optional
If True, doesn't print to stdout.
verbose : boolean, optional
If True, prints parsing arguments for each file processed to stdout.
raise_on_error : boolean, optional
Raise exception or only display warning, if a file cannot be parsed successfully.
return_errors : boolean, optional
If True, read() returns (data, errors) tuple instead of only data, with errors as a list of all files that could not be parsed.
Returns
-------
data : dict
Dictionary of parsed pandas.DataFrames, with file names as keys.
Notes
-----
- Start with basic cfg and tune until the desired parsing result is achieved.
- File extensions are critical to determine the parser, make sure they are *common*.
- Avoid files named 'default' or 'filters'.
- Avoid duplicate file names.
- Subfolders and file names beginning with '.' or '_' are ignored.
- If an https:// URI isn't correctly processed, try http:// instead.
- To connect to a database or s3-bucket, make sure the required dependencies like sqlalchemy, pymongo, pyspark or boto are available in the active environment. | train | https://github.com/jorahn/icy/blob/d0bd765c933b2d9bff4d7d646c0938348b9c5c25/icy/icy.py#L266-L362 | [
"def mem(data):\n \"\"\"Total memory used by data\n\n Parameters\n ----------\n data : dict of pandas.DataFrames or pandas.DataFrame\n\n Returns\n -------\n str : str\n Human readable amount of memory used with unit (like KB, MB, GB etc.).\n \"\"\"\n\n if type(data) == dict:\n ... | """
icy: Python 3 data wrangling glue code
--------------------------------------
saving time handling multiple different data sources
"""
import os
import sys
import re
import zipfile
import pandas as pd
import numpy as np
import yaml
from odo import odo
from glob import glob
from fnmatch import fnmatch
from datetime import datetime
from copy import deepcopy
from itertools import chain
from collections import namedtuple
from importlib.util import find_spec
examples = {
'artists': ('artists.zip', 'artists_read.yml', {}),
'babynames': ('babynames.zip', 'babynames_read.yml', {}),
'bank': ('bank.zip', 'bank_read.yml', {}),
'caterpillar': ('caterpillar.zip', 'caterpillar_read.yml', {}),
'churn': ('churn.zip', 'churn_read.yml', {}),
'comunio': ('comunio', {}, {}),
'crossdevice': ('crossdevice.zip', {}, {}),
'crunchbase': ('crunchbase', {}, {}),
'egg': ('egg', 'egg_read.yml', {}),
# 'fed': ('fed.zip', {}, {}),
'formats': ('formats', {}, {}),
'lahman': ('lahman.zip', 'lahman_read.yml', {}),
'nyse1': ('nyse_1.zip', 'nyse_1_read.yml', {}),
'nyse2': ('nyse_2.tsv.gz', 'nyse_2_read.yml', {}),
'nyt_title': ('nyt_title.zip', 'nyt_title_read.yml', {}),
'otto': ('otto.zip', {}, {}),
'spam': ('sms_spam.zip', 'sms_spam_read.yml', {}),
'titanic': ('titanic.zip', {}, {}),
'wikipedia': ('wikipedia_langs.zip', 'wikipedia_read.yml', {})
}
def _path_to_objs(path, include=['*', '.*'], exclude=['.*', '_*']):
"""Turn path with opt. globbing into valid list of files respecting
include and exclude patterns.
Parameters
----------
path : str
Path to process. Can be location of a file, folder or glob.
Can be in uri-notation, can be relative or absolute or start with ~.
include : list, optional
Globbing patterns to require in result, defaults to ['*', '.*'].
exclude : list, optional
Globbing patterns to exclude from result, defaults to ['.*', '_*'].
Returns
-------
objs : list
List of valid files
Notes
-----
- Doesn't show hidden files starting with '.' by default. To enable hidden files, make sure '.*' is in `include` and '.*' is not in `exclude`.
- Doesn't show files starting with '_' by default. To enable these files, make sure '_*' is not in `exclude`.
"""
if '://' in path:
# don't modify when path is in uri-notation, except for local files
if path.startswith('file://'):
path = path[7:]
else:
return [path]
path = os.path.abspath(os.path.expanduser(path))
if os.path.isfile(path):
if not path.lower().endswith(('.xlsx', '.xls')) and zipfile.is_zipfile(path):
# zipfile misidentifies xlsx as archive of xml files
with zipfile.ZipFile(path) as myzip:
zipped = []
for z in myzip.namelist():
z_fn = os.path.basename(z)
if z_fn != '' and any([fnmatch(z_fn, i) for i in include]) and \
not any([fnmatch(z_fn, e) for e in exclude]):
zipped.append(z)
return [myzip.open(z) for z in zipped]
else:
return [path]
elif os.path.isdir(path):
cands = [os.path.abspath(os.path.join(path, p)) for p in os.listdir(path)]
dirname = path
else:
cands = []
dirname = os.path.dirname(path)
include = list(chain.from_iterable(glob(os.path.join(dirname, i)) for i in include))
exclude = list(chain.from_iterable(glob(os.path.join(dirname, e)) for e in exclude))
objs = []
if cands == []:
cands = glob(path)
for p in cands:
if os.path.isfile(p) and p in include and not p in exclude:
objs.append(p)
zipped = [zipfile.is_zipfile(o) and not o.lower().endswith(('.xlsx', '.xls')) \
for o in objs]
toappend = []
todelete = []
for ix, o in enumerate(objs):
# if zipfile in objs replace zipfile with its contents
if zipped[ix]:
for new_o in _path_to_objs(o):
toappend.append(new_o)
todelete.append(ix)
shiftindex = 0
for d in todelete:
del objs[d - shiftindex]
shiftindex += 1
for new_o in toappend:
objs.append(new_o)
return objs
def to_df(obj, cfg={}, raise_on_error=True, silent=False, verbose=False):
"""Convert obj to pandas.DataFrame, determine parser from filename.
Falls back to odo, esp. for database uri's.
"""
if type(obj) == str:
name = obj
else:
name = obj.name
name = name[name.rfind('/') + 1:]
if not raise_on_error:
try:
return to_df(obj=obj, cfg=cfg, raise_on_error=True)
except (pd.parser.CParserError, AttributeError, ValueError, TypeError, IOError) as e:
if not silent:
print('WARNING in {}: {} {}'.format(name, e.__class__, e))
return None
except:
if not silent:
print('WARNING in {}: {}'.format(name, sys.exc_info()[0]))
return None
params = {}
if 'default' in cfg:
params = deepcopy(cfg['default'])
if name in cfg:
for e in cfg[name]:
params[e] = deepcopy(cfg[name][e])
if 'custom_date_parser' in params:
params['date_parser'] = DtParser(params['custom_date_parser']).parse
del params['custom_date_parser']
if verbose:
print(name, params)
if name.lower().startswith('s3:'):
if not find_spec('boto'):
raise ImportError('reading from aws-s3 requires the boto package to be installed')
if '.csv' in name.lower():
# name can be .csv.gz or .csv.bz2
return pd.read_csv(obj, **params)
elif '.tsv' in name.lower() or '.txt' in name.lower():
# name can be .tsv.gz or .txt.bz2
return pd.read_table(obj, **params)
elif name.lower().endswith(('.htm', '.html')):
if not find_spec('lxml'):
params['flavor'] = 'bs4'
if not find_spec('bs4') and not find_spec('html5lib'):
raise ImportError('reading html requires the lxml or bs4 + html5lib packages to be installed')
if 'nrows' in params:
del params['nrows']
if type(obj) == zipfile.ZipExtFile:
obj = obj.read()
data = pd.read_html(obj, **params)
data = {str(i): data[i] for i in range(len(data))}
return data
elif name.lower().endswith('.xml'):
if 'nrows' in params:
del params['nrows']
from icy.utils import xml_to_json
with open(obj) as f:
json = xml_to_json(str(f.read()))
return pd.read_json(json, **params)
elif name.lower().endswith('.json'):
if 'nrows' in params:
del params['nrows']
return pd.read_json(obj, **params)
elif name.lower().endswith(('.xls', '.xlsx')):
if not find_spec('xlrd'):
raise ImportError('reading excel files requires the xlrd package to be installed')
if 'nrows' in params:
del params['nrows']
data = {}
xls = pd.ExcelFile(obj)
for key in xls.sheet_names:
data[key] = xls.parse(key, **params)
return data
elif name.lower().endswith(('.h5', '.hdf5')):
if not find_spec('tables'):
raise ImportError('reading hdf5 files requires the pytables package to be installed')
if 'nrows' in params:
del params['nrows']
# params['chunksize'] = params.pop('nrows') # returns iterator
with pd.HDFStore(obj) as store:
data = {}
for key in store.keys():
data[key[1:]] = store[key]
return data
elif name.lower().endswith(('.sqlite', '.sql', '.db')):
import sqlite3
if type(obj) != str:
raise IOError('sqlite-database must be decompressed before import')
if 'nrows' in params:
del params['nrows']
# params['chunksize'] = params.pop('nrows') # returns iterator
with sqlite3.connect(obj) as con:
data = {}
cursor = con.cursor()
cursor.execute('SELECT name FROM sqlite_master WHERE type="table"')
tables = [t[0] for t in cursor.fetchall()]
for t in tables:
sql = 'SELECT * FROM ' + t
data[t] = pd.read_sql_query(sql, con, **params)
return data
else:
try:
data = {name: odo(obj, pd.DataFrame)}
if type(data[name]) == pd.DataFrame:
return data
except NotImplementedError:
pass
raise NotImplementedError('Error creating DataFrame from object', obj)
def read(path, cfg={}, raise_on_error=False, silent=False, verbose=False, return_errors=False):
"""Wraps pandas.IO & odo to create a dictionary of pandas.DataFrames from multiple different sources
Parameters
----------
path : str
Location of file, folder or zip-file to be parsed. Can include globbing (e.g. `*.csv`).
Can be remote with URI-notation beginning with e.g. http://, https://, file://, ftp://, s3:// and ssh://.
Can be odo-supported database (SQL, MongoDB, Hadoop, Spark) if dependencies are available.
Parser will be selected based on file extension.
cfg : dict or str, optional
Dictionary of kwargs to be provided to the pandas parser (http://pandas.pydata.org/pandas-docs/stable/api.html#input-output)
or str with path to YAML, that will be parsed.
Special keys:
**filters** : str or list of strings, optional. For a file to be processed, it must contain one of the Strings (e.g. ['.csv', '.tsv'])
**default** : kwargs to be used for every file
**custom_date_parser** : strptime-format string (https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior), generates a parser that used as the *date_parser* argument
If filename in keys, use kwargs from that key in addition to or overwriting *default* kwargs.
silent : boolean, optional
If True, doesn't print to stdout.
verbose : boolean, optional
If True, prints parsing arguments for each file processed to stdout.
raise_on_error : boolean, optional
Raise exception or only display warning, if a file cannot be parsed successfully.
return_errors : boolean, optional
If True, read() returns (data, errors) tuple instead of only data, with errors as a list of all files that could not be parsed.
Returns
-------
data : dict
Dictionary of parsed pandas.DataFrames, with file names as keys.
Notes
-----
- Start with basic cfg and tune until the desired parsing result is achieved.
- File extensions are critical to determine the parser, make sure they are *common*.
- Avoid files named 'default' or 'filters'.
- Avoid duplicate file names.
- Subfolders and file names beginning with '.' or '_' are ignored.
- If an https:// URI isn't correctly processed, try http:// instead.
- To connect to a database or s3-bucket, make sure the required dependencies like sqlalchemy, pymongo, pyspark or boto are available in the active environment.
"""
if type(cfg) == str:
cfg = os.path.abspath(os.path.expanduser(cfg))
yml = _read_yaml(cfg)
if yml == None:
if not silent:
print('creating read.yml config file draft ...')
cfg = {'filters': ['.csv'], 'default': {'sep': ',', 'parse_dates': []}}
with open('local/read.yml', 'xt') as f:
yaml.dump(cfg, f)
yml = _read_yaml('local/read.yml')
if 'filters' in yml:
filters = yml['filters']
if type(filters) == str:
filters = [filters]
del yml['filters']
else:
filters = []
cfg = yml
data = {}
errors = []
if not silent:
print('processing', path, '...')
for f in _path_to_objs(path):
if type(f) == str:
fname = os.path.basename(f)
elif type(f) == zipfile.ZipExtFile:
fname = f.name
else:
raise RuntimeError('_path_to_objs() returned unknown type', f)
data, errors = _read_append(data=data, errors=errors, path=f, fname=fname, \
cfg=cfg, raise_on_error=raise_on_error, silent=silent, verbose=verbose)
if raise_on_error and data == {}:
raise IOError('path is invalid or empty')
if not silent:
print('imported {} DataFrames'.format(len(data)))
if len(data) > 0:
print('total memory usage: {}'.format(mem(data)))
if len(errors) > 0:
print('import errors in files: {}'.format(', '.join(errors)))
if return_errors:
return data, errors
else:
return data
def _read_append(data, errors, path, fname, cfg, raise_on_error, silent, verbose):
key = fname[fname.rfind('/') + 1:]
result = to_df(obj=path, cfg=cfg, raise_on_error=raise_on_error, silent=silent, verbose=verbose)
if type(result) == dict:
if len(result) == 0:
errors.append(key)
# elif len(result) == 1:
# r = next(iter(result))
# data[r] = result[r]
else:
for r in result:
data['_'.join([key, r])] = result[r]
elif type(result) == type(None):
errors.append(key)
else:
data[key] = result
return data, errors
def preview(path, cfg={}, rows=5, silent=True, verbose=False, raise_on_error=False):
if type(cfg) == str:
cfg = os.path.abspath(os.path.expanduser(cfg))
yml = _read_yaml(cfg)
if yml == None:
yml = {}
if 'filters' in yml:
filters = yml['filters']
if type(filters) == str:
filters = [filters]
del yml['filters']
cfg = yml
if type(cfg) != dict:
cfg = {'default': {'nrows': rows}}
else:
if 'filters' in cfg:
filters = cfg['filters']
if type(filters) == str:
filters = [filters]
del cfg['filters']
if 'default' in cfg:
if type(cfg['default']) == dict:
cfg['default']['nrows'] = rows
else:
cfg['default'] = {'nrows': rows}
else:
cfg['default'] = {'nrows': rows}
if silent:
# if not silent, output will be generated from icy.read()
print('processing', path, '...')
prev, errors = read(path=path, cfg=cfg, silent=silent, verbose=verbose, \
raise_on_error=raise_on_error, return_errors=True)
for key in sorted(prev):
print('File: {}'.format(key))
print()
prev[key].info(verbose=True, memory_usage=True, null_counts=True)
print()
print('{:<20} | first {} VALUES'.format('COLUMN', rows))
print('-'*40)
for col in prev[key].columns:
print('{:<20} | {}'.format(col, str(list(prev[key][col].values)[:rows])))
print('='*40)
print('Successfully parsed first {} rows of {} files:'.format(rows, len(prev)))
print(', '.join(sorted(prev)))
if len(errors) > 0 and silent:
print()
print('Errors parsing files: {}'.format(', '.join(errors)))
print()
print('Try icy.preview(path, cfg, silent=False) for a more verbose output.')
return
def mem(data):
"""Total memory used by data
Parameters
----------
data : dict of pandas.DataFrames or pandas.DataFrame
Returns
-------
str : str
Human readable amount of memory used with unit (like KB, MB, GB etc.).
"""
if type(data) == dict:
num = sum([data[k].memory_usage(index=True).sum() for k in data])
else:
num = data.memory_usage(index=True).sum()
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
return "%3.1f %s" % (num, 'PB')
def _read_yaml(path):
if os.path.isfile(path):
with open(path) as f:
return yaml.safe_load(f)
else:
return None
def merge(data, cfg=None):
""" WORK IN PROGRESS
Concat, merge, join, drop keys in dictionary of pandas.DataFrames
into one pandas.DataFrame (data) and a pandas.Series (labels).
Parameters
----------
data : dict of pandas.DataFrames
Result of icy.read()
cfg : dict or str, optional
Dictionary of actions to perform on data
or str with path to YAML, that will be parsed.
Returns
-------
data : pandas.DataFrame
The aggregated dataset
labels : pandas.Series
The target variable for analysis of the dataset,
can have fewer samples than the aggregated dataset
Notes
-----
"""
# go from a dict of dataframes (data) to one dataframe (data) and one series (labels)
# pd.concat([df1, df2], join, join_axes, ignore_index) and pd.merge(left, right, how, on, suffixes)
# should be easy to iterate from normalized tables to a fully joined set of dataframes
if type(cfg) == str:
cfg = _read_yaml(cfg)
if cfg == None:
cfg = _read_yaml('local/merge.yml')
if cfg == None:
print('creating merge.yml config file draft ...')
cfg = {}
# find all tables with identical column names
# if no common key-col
# concat along rows, add col (src)
# e.g. chimps
# find all tables with same length
# if no duplicate column names
# concat along columns
# find master table (by length?)
# from smalles to biggest table
# find possible key-cols by uniques == len
# find bigger tables with common column names -> cands
# check for highest overlap-ratio of uniques -> cand (prefer smaller table if equal ratio)
# join table on best cand
# if ratio below treshold put table on unidentified list
for key in data:
cfg[key] = list(data[key].columns)
with open('local/merge.yml', 'xt') as f:
yaml.dump(cfg, f)
cfg = _read_yaml('local/merge.yml')
# if cfg == None:
# if not os.path.exists(default_cfg):
# create default_cfg draft
# else:
# join on default_cfg
# report join_result
# else:
# join on cfg
# report join_result
labels = None
return data, labels
def _find_key_cols(df):
"""Identify columns in a DataFrame that could be a unique key"""
keys = []
for col in df:
if len(df[col].unique()) == len(df[col]):
keys.append(col)
return keys
def _dtparse(s, pattern):
return datetime.strptime(s, pattern)
class DtParser():
def __init__(self, pattern):
self.pattern = pattern
self.vfunc = np.vectorize(_dtparse)
def parse(self, s):
if type(s) == str:
return _dtparse(s, self.pattern)
elif type(s) == list:
return [_dtparse(e, self.pattern) for e in s]
elif type(s) == np.ndarray:
return self.vfunc(s, self.pattern)
def run_examples(examples):
"""Run read() on a number of examples, supress output, generate summary.
Parameters
----------
examples : list of tuples of three str elements
Tuples contain the path and cfg argument to the read function,
as well as the cfg argument to the merge function (*TODO*)
e.g. [(path, read_cfg, merge_cfg), (...)]
Returns
-------
None
Prints all results to stdout.
"""
import inspect
PATH_TEST_DATA = os.path.join(os.path.dirname(os.path.abspath( \
inspect.getfile(inspect.currentframe()))), '../local/test_data')
print('running examples ...')
t0 = datetime.now()
results = [0, 0, 0]
for ex in sorted(examples):
t1 = datetime.now()
src, cfg, _ = examples[ex]
src = os.path.abspath(os.path.join(PATH_TEST_DATA, src))
if not os.path.isfile(src) and not os.path.isdir(src):
print('{} not a file'.format(src))
break
if type(cfg) == str:
cfg = os.path.abspath(os.path.join(PATH_TEST_DATA, cfg))
if not os.path.isfile(cfg):
print('{} not a file'.format(cfg))
break
try:
data = read(src, cfg=cfg, silent=True)
n_keys = len(data.keys())
if n_keys > 0:
print('data {:<15} [SUCCESS] {:.1f}s, {} dfs, {}'.format(
ex, (datetime.now()-t1).total_seconds(), n_keys, mem(data)))
results[0] += 1
else:
print('data {:<15} [NO IMPORT] {:.1f}s'.format(ex, (datetime.now()-t1).total_seconds()))
results[1] += 1
except:
print('data {:<15} [EXCEPTION] {:.1f}s'.format(ex, (datetime.now()-t1).total_seconds()))
results[2] += 1
print()
print('ran {} tests in {:.1f} seconds'.format(len(examples),
(datetime.now()-t0).total_seconds()))
print('{} success / {} no import / {} exception'.format(
str(results[0]), str(results[1]), str(results[2])))
if __name__ == '__main__':
run_examples(examples)
|
jorahn/icy | icy/icy.py | mem | python | def mem(data):
if type(data) == dict:
num = sum([data[k].memory_usage(index=True).sum() for k in data])
else:
num = data.memory_usage(index=True).sum()
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
return "%3.1f %s" % (num, 'PB') | Total memory used by data
Parameters
----------
data : dict of pandas.DataFrames or pandas.DataFrame
Returns
-------
str : str
Human readable amount of memory used with unit (like KB, MB, GB etc.). | train | https://github.com/jorahn/icy/blob/d0bd765c933b2d9bff4d7d646c0938348b9c5c25/icy/icy.py#L439-L460 | null | """
icy: Python 3 data wrangling glue code
--------------------------------------
saving time handling multiple different data sources
"""
import os
import sys
import re
import zipfile
import pandas as pd
import numpy as np
import yaml
from odo import odo
from glob import glob
from fnmatch import fnmatch
from datetime import datetime
from copy import deepcopy
from itertools import chain
from collections import namedtuple
from importlib.util import find_spec
examples = {
'artists': ('artists.zip', 'artists_read.yml', {}),
'babynames': ('babynames.zip', 'babynames_read.yml', {}),
'bank': ('bank.zip', 'bank_read.yml', {}),
'caterpillar': ('caterpillar.zip', 'caterpillar_read.yml', {}),
'churn': ('churn.zip', 'churn_read.yml', {}),
'comunio': ('comunio', {}, {}),
'crossdevice': ('crossdevice.zip', {}, {}),
'crunchbase': ('crunchbase', {}, {}),
'egg': ('egg', 'egg_read.yml', {}),
# 'fed': ('fed.zip', {}, {}),
'formats': ('formats', {}, {}),
'lahman': ('lahman.zip', 'lahman_read.yml', {}),
'nyse1': ('nyse_1.zip', 'nyse_1_read.yml', {}),
'nyse2': ('nyse_2.tsv.gz', 'nyse_2_read.yml', {}),
'nyt_title': ('nyt_title.zip', 'nyt_title_read.yml', {}),
'otto': ('otto.zip', {}, {}),
'spam': ('sms_spam.zip', 'sms_spam_read.yml', {}),
'titanic': ('titanic.zip', {}, {}),
'wikipedia': ('wikipedia_langs.zip', 'wikipedia_read.yml', {})
}
def _path_to_objs(path, include=['*', '.*'], exclude=['.*', '_*']):
"""Turn path with opt. globbing into valid list of files respecting
include and exclude patterns.
Parameters
----------
path : str
Path to process. Can be location of a file, folder or glob.
Can be in uri-notation, can be relative or absolute or start with ~.
include : list, optional
Globbing patterns to require in result, defaults to ['*', '.*'].
exclude : list, optional
Globbing patterns to exclude from result, defaults to ['.*', '_*'].
Returns
-------
objs : list
List of valid files
Notes
-----
- Doesn't show hidden files starting with '.' by default. To enable hidden files, make sure '.*' is in `include` and '.*' is not in `exclude`.
- Doesn't show files starting with '_' by default. To enable these files, make sure '_*' is not in `exclude`.
"""
if '://' in path:
# don't modify when path is in uri-notation, except for local files
if path.startswith('file://'):
path = path[7:]
else:
return [path]
path = os.path.abspath(os.path.expanduser(path))
if os.path.isfile(path):
if not path.lower().endswith(('.xlsx', '.xls')) and zipfile.is_zipfile(path):
# zipfile misidentifies xlsx as archive of xml files
with zipfile.ZipFile(path) as myzip:
zipped = []
for z in myzip.namelist():
z_fn = os.path.basename(z)
if z_fn != '' and any([fnmatch(z_fn, i) for i in include]) and \
not any([fnmatch(z_fn, e) for e in exclude]):
zipped.append(z)
return [myzip.open(z) for z in zipped]
else:
return [path]
elif os.path.isdir(path):
cands = [os.path.abspath(os.path.join(path, p)) for p in os.listdir(path)]
dirname = path
else:
cands = []
dirname = os.path.dirname(path)
include = list(chain.from_iterable(glob(os.path.join(dirname, i)) for i in include))
exclude = list(chain.from_iterable(glob(os.path.join(dirname, e)) for e in exclude))
objs = []
if cands == []:
cands = glob(path)
for p in cands:
if os.path.isfile(p) and p in include and not p in exclude:
objs.append(p)
zipped = [zipfile.is_zipfile(o) and not o.lower().endswith(('.xlsx', '.xls')) \
for o in objs]
toappend = []
todelete = []
for ix, o in enumerate(objs):
# if zipfile in objs replace zipfile with its contents
if zipped[ix]:
for new_o in _path_to_objs(o):
toappend.append(new_o)
todelete.append(ix)
shiftindex = 0
for d in todelete:
del objs[d - shiftindex]
shiftindex += 1
for new_o in toappend:
objs.append(new_o)
return objs
def to_df(obj, cfg={}, raise_on_error=True, silent=False, verbose=False):
"""Convert obj to pandas.DataFrame, determine parser from filename.
Falls back to odo, esp. for database uri's.
"""
if type(obj) == str:
name = obj
else:
name = obj.name
name = name[name.rfind('/') + 1:]
if not raise_on_error:
try:
return to_df(obj=obj, cfg=cfg, raise_on_error=True)
except (pd.parser.CParserError, AttributeError, ValueError, TypeError, IOError) as e:
if not silent:
print('WARNING in {}: {} {}'.format(name, e.__class__, e))
return None
except:
if not silent:
print('WARNING in {}: {}'.format(name, sys.exc_info()[0]))
return None
params = {}
if 'default' in cfg:
params = deepcopy(cfg['default'])
if name in cfg:
for e in cfg[name]:
params[e] = deepcopy(cfg[name][e])
if 'custom_date_parser' in params:
params['date_parser'] = DtParser(params['custom_date_parser']).parse
del params['custom_date_parser']
if verbose:
print(name, params)
if name.lower().startswith('s3:'):
if not find_spec('boto'):
raise ImportError('reading from aws-s3 requires the boto package to be installed')
if '.csv' in name.lower():
# name can be .csv.gz or .csv.bz2
return pd.read_csv(obj, **params)
elif '.tsv' in name.lower() or '.txt' in name.lower():
# name can be .tsv.gz or .txt.bz2
return pd.read_table(obj, **params)
elif name.lower().endswith(('.htm', '.html')):
if not find_spec('lxml'):
params['flavor'] = 'bs4'
if not find_spec('bs4') and not find_spec('html5lib'):
raise ImportError('reading html requires the lxml or bs4 + html5lib packages to be installed')
if 'nrows' in params:
del params['nrows']
if type(obj) == zipfile.ZipExtFile:
obj = obj.read()
data = pd.read_html(obj, **params)
data = {str(i): data[i] for i in range(len(data))}
return data
elif name.lower().endswith('.xml'):
if 'nrows' in params:
del params['nrows']
from icy.utils import xml_to_json
with open(obj) as f:
json = xml_to_json(str(f.read()))
return pd.read_json(json, **params)
elif name.lower().endswith('.json'):
if 'nrows' in params:
del params['nrows']
return pd.read_json(obj, **params)
elif name.lower().endswith(('.xls', '.xlsx')):
if not find_spec('xlrd'):
raise ImportError('reading excel files requires the xlrd package to be installed')
if 'nrows' in params:
del params['nrows']
data = {}
xls = pd.ExcelFile(obj)
for key in xls.sheet_names:
data[key] = xls.parse(key, **params)
return data
elif name.lower().endswith(('.h5', '.hdf5')):
if not find_spec('tables'):
raise ImportError('reading hdf5 files requires the pytables package to be installed')
if 'nrows' in params:
del params['nrows']
# params['chunksize'] = params.pop('nrows') # returns iterator
with pd.HDFStore(obj) as store:
data = {}
for key in store.keys():
data[key[1:]] = store[key]
return data
elif name.lower().endswith(('.sqlite', '.sql', '.db')):
import sqlite3
if type(obj) != str:
raise IOError('sqlite-database must be decompressed before import')
if 'nrows' in params:
del params['nrows']
# params['chunksize'] = params.pop('nrows') # returns iterator
with sqlite3.connect(obj) as con:
data = {}
cursor = con.cursor()
cursor.execute('SELECT name FROM sqlite_master WHERE type="table"')
tables = [t[0] for t in cursor.fetchall()]
for t in tables:
sql = 'SELECT * FROM ' + t
data[t] = pd.read_sql_query(sql, con, **params)
return data
else:
try:
data = {name: odo(obj, pd.DataFrame)}
if type(data[name]) == pd.DataFrame:
return data
except NotImplementedError:
pass
raise NotImplementedError('Error creating DataFrame from object', obj)
def read(path, cfg={}, raise_on_error=False, silent=False, verbose=False, return_errors=False):
"""Wraps pandas.IO & odo to create a dictionary of pandas.DataFrames from multiple different sources
Parameters
----------
path : str
Location of file, folder or zip-file to be parsed. Can include globbing (e.g. `*.csv`).
Can be remote with URI-notation beginning with e.g. http://, https://, file://, ftp://, s3:// and ssh://.
Can be odo-supported database (SQL, MongoDB, Hadoop, Spark) if dependencies are available.
Parser will be selected based on file extension.
cfg : dict or str, optional
Dictionary of kwargs to be provided to the pandas parser (http://pandas.pydata.org/pandas-docs/stable/api.html#input-output)
or str with path to YAML, that will be parsed.
Special keys:
**filters** : str or list of strings, optional. For a file to be processed, it must contain one of the Strings (e.g. ['.csv', '.tsv'])
**default** : kwargs to be used for every file
**custom_date_parser** : strptime-format string (https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior), generates a parser that used as the *date_parser* argument
If filename in keys, use kwargs from that key in addition to or overwriting *default* kwargs.
silent : boolean, optional
If True, doesn't print to stdout.
verbose : boolean, optional
If True, prints parsing arguments for each file processed to stdout.
raise_on_error : boolean, optional
Raise exception or only display warning, if a file cannot be parsed successfully.
return_errors : boolean, optional
If True, read() returns (data, errors) tuple instead of only data, with errors as a list of all files that could not be parsed.
Returns
-------
data : dict
Dictionary of parsed pandas.DataFrames, with file names as keys.
Notes
-----
- Start with basic cfg and tune until the desired parsing result is achieved.
- File extensions are critical to determine the parser, make sure they are *common*.
- Avoid files named 'default' or 'filters'.
- Avoid duplicate file names.
- Subfolders and file names beginning with '.' or '_' are ignored.
- If an https:// URI isn't correctly processed, try http:// instead.
- To connect to a database or s3-bucket, make sure the required dependencies like sqlalchemy, pymongo, pyspark or boto are available in the active environment.
"""
if type(cfg) == str:
cfg = os.path.abspath(os.path.expanduser(cfg))
yml = _read_yaml(cfg)
if yml == None:
if not silent:
print('creating read.yml config file draft ...')
cfg = {'filters': ['.csv'], 'default': {'sep': ',', 'parse_dates': []}}
with open('local/read.yml', 'xt') as f:
yaml.dump(cfg, f)
yml = _read_yaml('local/read.yml')
if 'filters' in yml:
filters = yml['filters']
if type(filters) == str:
filters = [filters]
del yml['filters']
else:
filters = []
cfg = yml
data = {}
errors = []
if not silent:
print('processing', path, '...')
for f in _path_to_objs(path):
if type(f) == str:
fname = os.path.basename(f)
elif type(f) == zipfile.ZipExtFile:
fname = f.name
else:
raise RuntimeError('_path_to_objs() returned unknown type', f)
data, errors = _read_append(data=data, errors=errors, path=f, fname=fname, \
cfg=cfg, raise_on_error=raise_on_error, silent=silent, verbose=verbose)
if raise_on_error and data == {}:
raise IOError('path is invalid or empty')
if not silent:
print('imported {} DataFrames'.format(len(data)))
if len(data) > 0:
print('total memory usage: {}'.format(mem(data)))
if len(errors) > 0:
print('import errors in files: {}'.format(', '.join(errors)))
if return_errors:
return data, errors
else:
return data
def _read_append(data, errors, path, fname, cfg, raise_on_error, silent, verbose):
key = fname[fname.rfind('/') + 1:]
result = to_df(obj=path, cfg=cfg, raise_on_error=raise_on_error, silent=silent, verbose=verbose)
if type(result) == dict:
if len(result) == 0:
errors.append(key)
# elif len(result) == 1:
# r = next(iter(result))
# data[r] = result[r]
else:
for r in result:
data['_'.join([key, r])] = result[r]
elif type(result) == type(None):
errors.append(key)
else:
data[key] = result
return data, errors
def preview(path, cfg={}, rows=5, silent=True, verbose=False, raise_on_error=False):
if type(cfg) == str:
cfg = os.path.abspath(os.path.expanduser(cfg))
yml = _read_yaml(cfg)
if yml == None:
yml = {}
if 'filters' in yml:
filters = yml['filters']
if type(filters) == str:
filters = [filters]
del yml['filters']
cfg = yml
if type(cfg) != dict:
cfg = {'default': {'nrows': rows}}
else:
if 'filters' in cfg:
filters = cfg['filters']
if type(filters) == str:
filters = [filters]
del cfg['filters']
if 'default' in cfg:
if type(cfg['default']) == dict:
cfg['default']['nrows'] = rows
else:
cfg['default'] = {'nrows': rows}
else:
cfg['default'] = {'nrows': rows}
if silent:
# if not silent, output will be generated from icy.read()
print('processing', path, '...')
prev, errors = read(path=path, cfg=cfg, silent=silent, verbose=verbose, \
raise_on_error=raise_on_error, return_errors=True)
for key in sorted(prev):
print('File: {}'.format(key))
print()
prev[key].info(verbose=True, memory_usage=True, null_counts=True)
print()
print('{:<20} | first {} VALUES'.format('COLUMN', rows))
print('-'*40)
for col in prev[key].columns:
print('{:<20} | {}'.format(col, str(list(prev[key][col].values)[:rows])))
print('='*40)
print('Successfully parsed first {} rows of {} files:'.format(rows, len(prev)))
print(', '.join(sorted(prev)))
if len(errors) > 0 and silent:
print()
print('Errors parsing files: {}'.format(', '.join(errors)))
print()
print('Try icy.preview(path, cfg, silent=False) for a more verbose output.')
return
def mem(data):
"""Total memory used by data
Parameters
----------
data : dict of pandas.DataFrames or pandas.DataFrame
Returns
-------
str : str
Human readable amount of memory used with unit (like KB, MB, GB etc.).
"""
if type(data) == dict:
num = sum([data[k].memory_usage(index=True).sum() for k in data])
else:
num = data.memory_usage(index=True).sum()
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
return "%3.1f %s" % (num, 'PB')
def _read_yaml(path):
if os.path.isfile(path):
with open(path) as f:
return yaml.safe_load(f)
else:
return None
def merge(data, cfg=None):
""" WORK IN PROGRESS
Concat, merge, join, drop keys in dictionary of pandas.DataFrames
into one pandas.DataFrame (data) and a pandas.Series (labels).
Parameters
----------
data : dict of pandas.DataFrames
Result of icy.read()
cfg : dict or str, optional
Dictionary of actions to perform on data
or str with path to YAML, that will be parsed.
Returns
-------
data : pandas.DataFrame
The aggregated dataset
labels : pandas.Series
The target variable for analysis of the dataset,
can have fewer samples than the aggregated dataset
Notes
-----
"""
# go from a dict of dataframes (data) to one dataframe (data) and one series (labels)
# pd.concat([df1, df2], join, join_axes, ignore_index) and pd.merge(left, right, how, on, suffixes)
# should be easy to iterate from normalized tables to a fully joined set of dataframes
if type(cfg) == str:
cfg = _read_yaml(cfg)
if cfg == None:
cfg = _read_yaml('local/merge.yml')
if cfg == None:
print('creating merge.yml config file draft ...')
cfg = {}
# find all tables with identical column names
# if no common key-col
# concat along rows, add col (src)
# e.g. chimps
# find all tables with same length
# if no duplicate column names
# concat along columns
# find master table (by length?)
# from smalles to biggest table
# find possible key-cols by uniques == len
# find bigger tables with common column names -> cands
# check for highest overlap-ratio of uniques -> cand (prefer smaller table if equal ratio)
# join table on best cand
# if ratio below treshold put table on unidentified list
for key in data:
cfg[key] = list(data[key].columns)
with open('local/merge.yml', 'xt') as f:
yaml.dump(cfg, f)
cfg = _read_yaml('local/merge.yml')
# if cfg == None:
# if not os.path.exists(default_cfg):
# create default_cfg draft
# else:
# join on default_cfg
# report join_result
# else:
# join on cfg
# report join_result
labels = None
return data, labels
def _find_key_cols(df):
"""Identify columns in a DataFrame that could be a unique key"""
keys = []
for col in df:
if len(df[col].unique()) == len(df[col]):
keys.append(col)
return keys
def _dtparse(s, pattern):
return datetime.strptime(s, pattern)
class DtParser():
def __init__(self, pattern):
self.pattern = pattern
self.vfunc = np.vectorize(_dtparse)
def parse(self, s):
if type(s) == str:
return _dtparse(s, self.pattern)
elif type(s) == list:
return [_dtparse(e, self.pattern) for e in s]
elif type(s) == np.ndarray:
return self.vfunc(s, self.pattern)
def run_examples(examples):
"""Run read() on a number of examples, supress output, generate summary.
Parameters
----------
examples : list of tuples of three str elements
Tuples contain the path and cfg argument to the read function,
as well as the cfg argument to the merge function (*TODO*)
e.g. [(path, read_cfg, merge_cfg), (...)]
Returns
-------
None
Prints all results to stdout.
"""
import inspect
PATH_TEST_DATA = os.path.join(os.path.dirname(os.path.abspath( \
inspect.getfile(inspect.currentframe()))), '../local/test_data')
print('running examples ...')
t0 = datetime.now()
results = [0, 0, 0]
for ex in sorted(examples):
t1 = datetime.now()
src, cfg, _ = examples[ex]
src = os.path.abspath(os.path.join(PATH_TEST_DATA, src))
if not os.path.isfile(src) and not os.path.isdir(src):
print('{} not a file'.format(src))
break
if type(cfg) == str:
cfg = os.path.abspath(os.path.join(PATH_TEST_DATA, cfg))
if not os.path.isfile(cfg):
print('{} not a file'.format(cfg))
break
try:
data = read(src, cfg=cfg, silent=True)
n_keys = len(data.keys())
if n_keys > 0:
print('data {:<15} [SUCCESS] {:.1f}s, {} dfs, {}'.format(
ex, (datetime.now()-t1).total_seconds(), n_keys, mem(data)))
results[0] += 1
else:
print('data {:<15} [NO IMPORT] {:.1f}s'.format(ex, (datetime.now()-t1).total_seconds()))
results[1] += 1
except:
print('data {:<15} [EXCEPTION] {:.1f}s'.format(ex, (datetime.now()-t1).total_seconds()))
results[2] += 1
print()
print('ran {} tests in {:.1f} seconds'.format(len(examples),
(datetime.now()-t0).total_seconds()))
print('{} success / {} no import / {} exception'.format(
str(results[0]), str(results[1]), str(results[2])))
if __name__ == '__main__':
run_examples(examples)
|
jorahn/icy | icy/icy.py | merge | python | def merge(data, cfg=None):
# go from a dict of dataframes (data) to one dataframe (data) and one series (labels)
# pd.concat([df1, df2], join, join_axes, ignore_index) and pd.merge(left, right, how, on, suffixes)
# should be easy to iterate from normalized tables to a fully joined set of dataframes
if type(cfg) == str:
cfg = _read_yaml(cfg)
if cfg == None:
cfg = _read_yaml('local/merge.yml')
if cfg == None:
print('creating merge.yml config file draft ...')
cfg = {}
# find all tables with identical column names
# if no common key-col
# concat along rows, add col (src)
# e.g. chimps
# find all tables with same length
# if no duplicate column names
# concat along columns
# find master table (by length?)
# from smalles to biggest table
# find possible key-cols by uniques == len
# find bigger tables with common column names -> cands
# check for highest overlap-ratio of uniques -> cand (prefer smaller table if equal ratio)
# join table on best cand
# if ratio below treshold put table on unidentified list
for key in data:
cfg[key] = list(data[key].columns)
with open('local/merge.yml', 'xt') as f:
yaml.dump(cfg, f)
cfg = _read_yaml('local/merge.yml')
# if cfg == None:
# if not os.path.exists(default_cfg):
# create default_cfg draft
# else:
# join on default_cfg
# report join_result
# else:
# join on cfg
# report join_result
labels = None
return data, labels | WORK IN PROGRESS
Concat, merge, join, drop keys in dictionary of pandas.DataFrames
into one pandas.DataFrame (data) and a pandas.Series (labels).
Parameters
----------
data : dict of pandas.DataFrames
Result of icy.read()
cfg : dict or str, optional
Dictionary of actions to perform on data
or str with path to YAML, that will be parsed.
Returns
-------
data : pandas.DataFrame
The aggregated dataset
labels : pandas.Series
The target variable for analysis of the dataset,
can have fewer samples than the aggregated dataset
Notes
----- | train | https://github.com/jorahn/icy/blob/d0bd765c933b2d9bff4d7d646c0938348b9c5c25/icy/icy.py#L469-L541 | [
"def _read_yaml(path):\n if os.path.isfile(path):\n with open(path) as f:\n return yaml.safe_load(f)\n else:\n return None\n"
] | """
icy: Python 3 data wrangling glue code
--------------------------------------
saving time handling multiple different data sources
"""
import os
import sys
import re
import zipfile
import pandas as pd
import numpy as np
import yaml
from odo import odo
from glob import glob
from fnmatch import fnmatch
from datetime import datetime
from copy import deepcopy
from itertools import chain
from collections import namedtuple
from importlib.util import find_spec
examples = {
'artists': ('artists.zip', 'artists_read.yml', {}),
'babynames': ('babynames.zip', 'babynames_read.yml', {}),
'bank': ('bank.zip', 'bank_read.yml', {}),
'caterpillar': ('caterpillar.zip', 'caterpillar_read.yml', {}),
'churn': ('churn.zip', 'churn_read.yml', {}),
'comunio': ('comunio', {}, {}),
'crossdevice': ('crossdevice.zip', {}, {}),
'crunchbase': ('crunchbase', {}, {}),
'egg': ('egg', 'egg_read.yml', {}),
# 'fed': ('fed.zip', {}, {}),
'formats': ('formats', {}, {}),
'lahman': ('lahman.zip', 'lahman_read.yml', {}),
'nyse1': ('nyse_1.zip', 'nyse_1_read.yml', {}),
'nyse2': ('nyse_2.tsv.gz', 'nyse_2_read.yml', {}),
'nyt_title': ('nyt_title.zip', 'nyt_title_read.yml', {}),
'otto': ('otto.zip', {}, {}),
'spam': ('sms_spam.zip', 'sms_spam_read.yml', {}),
'titanic': ('titanic.zip', {}, {}),
'wikipedia': ('wikipedia_langs.zip', 'wikipedia_read.yml', {})
}
def _path_to_objs(path, include=['*', '.*'], exclude=['.*', '_*']):
"""Turn path with opt. globbing into valid list of files respecting
include and exclude patterns.
Parameters
----------
path : str
Path to process. Can be location of a file, folder or glob.
Can be in uri-notation, can be relative or absolute or start with ~.
include : list, optional
Globbing patterns to require in result, defaults to ['*', '.*'].
exclude : list, optional
Globbing patterns to exclude from result, defaults to ['.*', '_*'].
Returns
-------
objs : list
List of valid files
Notes
-----
- Doesn't show hidden files starting with '.' by default. To enable hidden files, make sure '.*' is in `include` and '.*' is not in `exclude`.
- Doesn't show files starting with '_' by default. To enable these files, make sure '_*' is not in `exclude`.
"""
if '://' in path:
# don't modify when path is in uri-notation, except for local files
if path.startswith('file://'):
path = path[7:]
else:
return [path]
path = os.path.abspath(os.path.expanduser(path))
if os.path.isfile(path):
if not path.lower().endswith(('.xlsx', '.xls')) and zipfile.is_zipfile(path):
# zipfile misidentifies xlsx as archive of xml files
with zipfile.ZipFile(path) as myzip:
zipped = []
for z in myzip.namelist():
z_fn = os.path.basename(z)
if z_fn != '' and any([fnmatch(z_fn, i) for i in include]) and \
not any([fnmatch(z_fn, e) for e in exclude]):
zipped.append(z)
return [myzip.open(z) for z in zipped]
else:
return [path]
elif os.path.isdir(path):
cands = [os.path.abspath(os.path.join(path, p)) for p in os.listdir(path)]
dirname = path
else:
cands = []
dirname = os.path.dirname(path)
include = list(chain.from_iterable(glob(os.path.join(dirname, i)) for i in include))
exclude = list(chain.from_iterable(glob(os.path.join(dirname, e)) for e in exclude))
objs = []
if cands == []:
cands = glob(path)
for p in cands:
if os.path.isfile(p) and p in include and not p in exclude:
objs.append(p)
zipped = [zipfile.is_zipfile(o) and not o.lower().endswith(('.xlsx', '.xls')) \
for o in objs]
toappend = []
todelete = []
for ix, o in enumerate(objs):
# if zipfile in objs replace zipfile with its contents
if zipped[ix]:
for new_o in _path_to_objs(o):
toappend.append(new_o)
todelete.append(ix)
shiftindex = 0
for d in todelete:
del objs[d - shiftindex]
shiftindex += 1
for new_o in toappend:
objs.append(new_o)
return objs
def to_df(obj, cfg={}, raise_on_error=True, silent=False, verbose=False):
"""Convert obj to pandas.DataFrame, determine parser from filename.
Falls back to odo, esp. for database uri's.
"""
if type(obj) == str:
name = obj
else:
name = obj.name
name = name[name.rfind('/') + 1:]
if not raise_on_error:
try:
return to_df(obj=obj, cfg=cfg, raise_on_error=True)
except (pd.parser.CParserError, AttributeError, ValueError, TypeError, IOError) as e:
if not silent:
print('WARNING in {}: {} {}'.format(name, e.__class__, e))
return None
except:
if not silent:
print('WARNING in {}: {}'.format(name, sys.exc_info()[0]))
return None
params = {}
if 'default' in cfg:
params = deepcopy(cfg['default'])
if name in cfg:
for e in cfg[name]:
params[e] = deepcopy(cfg[name][e])
if 'custom_date_parser' in params:
params['date_parser'] = DtParser(params['custom_date_parser']).parse
del params['custom_date_parser']
if verbose:
print(name, params)
if name.lower().startswith('s3:'):
if not find_spec('boto'):
raise ImportError('reading from aws-s3 requires the boto package to be installed')
if '.csv' in name.lower():
# name can be .csv.gz or .csv.bz2
return pd.read_csv(obj, **params)
elif '.tsv' in name.lower() or '.txt' in name.lower():
# name can be .tsv.gz or .txt.bz2
return pd.read_table(obj, **params)
elif name.lower().endswith(('.htm', '.html')):
if not find_spec('lxml'):
params['flavor'] = 'bs4'
if not find_spec('bs4') and not find_spec('html5lib'):
raise ImportError('reading html requires the lxml or bs4 + html5lib packages to be installed')
if 'nrows' in params:
del params['nrows']
if type(obj) == zipfile.ZipExtFile:
obj = obj.read()
data = pd.read_html(obj, **params)
data = {str(i): data[i] for i in range(len(data))}
return data
elif name.lower().endswith('.xml'):
if 'nrows' in params:
del params['nrows']
from icy.utils import xml_to_json
with open(obj) as f:
json = xml_to_json(str(f.read()))
return pd.read_json(json, **params)
elif name.lower().endswith('.json'):
if 'nrows' in params:
del params['nrows']
return pd.read_json(obj, **params)
elif name.lower().endswith(('.xls', '.xlsx')):
if not find_spec('xlrd'):
raise ImportError('reading excel files requires the xlrd package to be installed')
if 'nrows' in params:
del params['nrows']
data = {}
xls = pd.ExcelFile(obj)
for key in xls.sheet_names:
data[key] = xls.parse(key, **params)
return data
elif name.lower().endswith(('.h5', '.hdf5')):
if not find_spec('tables'):
raise ImportError('reading hdf5 files requires the pytables package to be installed')
if 'nrows' in params:
del params['nrows']
# params['chunksize'] = params.pop('nrows') # returns iterator
with pd.HDFStore(obj) as store:
data = {}
for key in store.keys():
data[key[1:]] = store[key]
return data
elif name.lower().endswith(('.sqlite', '.sql', '.db')):
import sqlite3
if type(obj) != str:
raise IOError('sqlite-database must be decompressed before import')
if 'nrows' in params:
del params['nrows']
# params['chunksize'] = params.pop('nrows') # returns iterator
with sqlite3.connect(obj) as con:
data = {}
cursor = con.cursor()
cursor.execute('SELECT name FROM sqlite_master WHERE type="table"')
tables = [t[0] for t in cursor.fetchall()]
for t in tables:
sql = 'SELECT * FROM ' + t
data[t] = pd.read_sql_query(sql, con, **params)
return data
else:
try:
data = {name: odo(obj, pd.DataFrame)}
if type(data[name]) == pd.DataFrame:
return data
except NotImplementedError:
pass
raise NotImplementedError('Error creating DataFrame from object', obj)
def read(path, cfg={}, raise_on_error=False, silent=False, verbose=False, return_errors=False):
"""Wraps pandas.IO & odo to create a dictionary of pandas.DataFrames from multiple different sources
Parameters
----------
path : str
Location of file, folder or zip-file to be parsed. Can include globbing (e.g. `*.csv`).
Can be remote with URI-notation beginning with e.g. http://, https://, file://, ftp://, s3:// and ssh://.
Can be odo-supported database (SQL, MongoDB, Hadoop, Spark) if dependencies are available.
Parser will be selected based on file extension.
cfg : dict or str, optional
Dictionary of kwargs to be provided to the pandas parser (http://pandas.pydata.org/pandas-docs/stable/api.html#input-output)
or str with path to YAML, that will be parsed.
Special keys:
**filters** : str or list of strings, optional. For a file to be processed, it must contain one of the Strings (e.g. ['.csv', '.tsv'])
**default** : kwargs to be used for every file
**custom_date_parser** : strptime-format string (https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior), generates a parser that used as the *date_parser* argument
If filename in keys, use kwargs from that key in addition to or overwriting *default* kwargs.
silent : boolean, optional
If True, doesn't print to stdout.
verbose : boolean, optional
If True, prints parsing arguments for each file processed to stdout.
raise_on_error : boolean, optional
Raise exception or only display warning, if a file cannot be parsed successfully.
return_errors : boolean, optional
If True, read() returns (data, errors) tuple instead of only data, with errors as a list of all files that could not be parsed.
Returns
-------
data : dict
Dictionary of parsed pandas.DataFrames, with file names as keys.
Notes
-----
- Start with basic cfg and tune until the desired parsing result is achieved.
- File extensions are critical to determine the parser, make sure they are *common*.
- Avoid files named 'default' or 'filters'.
- Avoid duplicate file names.
- Subfolders and file names beginning with '.' or '_' are ignored.
- If an https:// URI isn't correctly processed, try http:// instead.
- To connect to a database or s3-bucket, make sure the required dependencies like sqlalchemy, pymongo, pyspark or boto are available in the active environment.
"""
if type(cfg) == str:
cfg = os.path.abspath(os.path.expanduser(cfg))
yml = _read_yaml(cfg)
if yml == None:
if not silent:
print('creating read.yml config file draft ...')
cfg = {'filters': ['.csv'], 'default': {'sep': ',', 'parse_dates': []}}
with open('local/read.yml', 'xt') as f:
yaml.dump(cfg, f)
yml = _read_yaml('local/read.yml')
if 'filters' in yml:
filters = yml['filters']
if type(filters) == str:
filters = [filters]
del yml['filters']
else:
filters = []
cfg = yml
data = {}
errors = []
if not silent:
print('processing', path, '...')
for f in _path_to_objs(path):
if type(f) == str:
fname = os.path.basename(f)
elif type(f) == zipfile.ZipExtFile:
fname = f.name
else:
raise RuntimeError('_path_to_objs() returned unknown type', f)
data, errors = _read_append(data=data, errors=errors, path=f, fname=fname, \
cfg=cfg, raise_on_error=raise_on_error, silent=silent, verbose=verbose)
if raise_on_error and data == {}:
raise IOError('path is invalid or empty')
if not silent:
print('imported {} DataFrames'.format(len(data)))
if len(data) > 0:
print('total memory usage: {}'.format(mem(data)))
if len(errors) > 0:
print('import errors in files: {}'.format(', '.join(errors)))
if return_errors:
return data, errors
else:
return data
def _read_append(data, errors, path, fname, cfg, raise_on_error, silent, verbose):
key = fname[fname.rfind('/') + 1:]
result = to_df(obj=path, cfg=cfg, raise_on_error=raise_on_error, silent=silent, verbose=verbose)
if type(result) == dict:
if len(result) == 0:
errors.append(key)
# elif len(result) == 1:
# r = next(iter(result))
# data[r] = result[r]
else:
for r in result:
data['_'.join([key, r])] = result[r]
elif type(result) == type(None):
errors.append(key)
else:
data[key] = result
return data, errors
def preview(path, cfg={}, rows=5, silent=True, verbose=False, raise_on_error=False):
if type(cfg) == str:
cfg = os.path.abspath(os.path.expanduser(cfg))
yml = _read_yaml(cfg)
if yml == None:
yml = {}
if 'filters' in yml:
filters = yml['filters']
if type(filters) == str:
filters = [filters]
del yml['filters']
cfg = yml
if type(cfg) != dict:
cfg = {'default': {'nrows': rows}}
else:
if 'filters' in cfg:
filters = cfg['filters']
if type(filters) == str:
filters = [filters]
del cfg['filters']
if 'default' in cfg:
if type(cfg['default']) == dict:
cfg['default']['nrows'] = rows
else:
cfg['default'] = {'nrows': rows}
else:
cfg['default'] = {'nrows': rows}
if silent:
# if not silent, output will be generated from icy.read()
print('processing', path, '...')
prev, errors = read(path=path, cfg=cfg, silent=silent, verbose=verbose, \
raise_on_error=raise_on_error, return_errors=True)
for key in sorted(prev):
print('File: {}'.format(key))
print()
prev[key].info(verbose=True, memory_usage=True, null_counts=True)
print()
print('{:<20} | first {} VALUES'.format('COLUMN', rows))
print('-'*40)
for col in prev[key].columns:
print('{:<20} | {}'.format(col, str(list(prev[key][col].values)[:rows])))
print('='*40)
print('Successfully parsed first {} rows of {} files:'.format(rows, len(prev)))
print(', '.join(sorted(prev)))
if len(errors) > 0 and silent:
print()
print('Errors parsing files: {}'.format(', '.join(errors)))
print()
print('Try icy.preview(path, cfg, silent=False) for a more verbose output.')
return
def mem(data):
"""Total memory used by data
Parameters
----------
data : dict of pandas.DataFrames or pandas.DataFrame
Returns
-------
str : str
Human readable amount of memory used with unit (like KB, MB, GB etc.).
"""
if type(data) == dict:
num = sum([data[k].memory_usage(index=True).sum() for k in data])
else:
num = data.memory_usage(index=True).sum()
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
return "%3.1f %s" % (num, 'PB')
def _read_yaml(path):
if os.path.isfile(path):
with open(path) as f:
return yaml.safe_load(f)
else:
return None
def merge(data, cfg=None):
""" WORK IN PROGRESS
Concat, merge, join, drop keys in dictionary of pandas.DataFrames
into one pandas.DataFrame (data) and a pandas.Series (labels).
Parameters
----------
data : dict of pandas.DataFrames
Result of icy.read()
cfg : dict or str, optional
Dictionary of actions to perform on data
or str with path to YAML, that will be parsed.
Returns
-------
data : pandas.DataFrame
The aggregated dataset
labels : pandas.Series
The target variable for analysis of the dataset,
can have fewer samples than the aggregated dataset
Notes
-----
"""
# go from a dict of dataframes (data) to one dataframe (data) and one series (labels)
# pd.concat([df1, df2], join, join_axes, ignore_index) and pd.merge(left, right, how, on, suffixes)
# should be easy to iterate from normalized tables to a fully joined set of dataframes
if type(cfg) == str:
cfg = _read_yaml(cfg)
if cfg == None:
cfg = _read_yaml('local/merge.yml')
if cfg == None:
print('creating merge.yml config file draft ...')
cfg = {}
# find all tables with identical column names
# if no common key-col
# concat along rows, add col (src)
# e.g. chimps
# find all tables with same length
# if no duplicate column names
# concat along columns
# find master table (by length?)
# from smalles to biggest table
# find possible key-cols by uniques == len
# find bigger tables with common column names -> cands
# check for highest overlap-ratio of uniques -> cand (prefer smaller table if equal ratio)
# join table on best cand
# if ratio below treshold put table on unidentified list
for key in data:
cfg[key] = list(data[key].columns)
with open('local/merge.yml', 'xt') as f:
yaml.dump(cfg, f)
cfg = _read_yaml('local/merge.yml')
# if cfg == None:
# if not os.path.exists(default_cfg):
# create default_cfg draft
# else:
# join on default_cfg
# report join_result
# else:
# join on cfg
# report join_result
labels = None
return data, labels
def _find_key_cols(df):
"""Identify columns in a DataFrame that could be a unique key"""
keys = []
for col in df:
if len(df[col].unique()) == len(df[col]):
keys.append(col)
return keys
def _dtparse(s, pattern):
return datetime.strptime(s, pattern)
class DtParser():
def __init__(self, pattern):
self.pattern = pattern
self.vfunc = np.vectorize(_dtparse)
def parse(self, s):
if type(s) == str:
return _dtparse(s, self.pattern)
elif type(s) == list:
return [_dtparse(e, self.pattern) for e in s]
elif type(s) == np.ndarray:
return self.vfunc(s, self.pattern)
def run_examples(examples):
"""Run read() on a number of examples, supress output, generate summary.
Parameters
----------
examples : list of tuples of three str elements
Tuples contain the path and cfg argument to the read function,
as well as the cfg argument to the merge function (*TODO*)
e.g. [(path, read_cfg, merge_cfg), (...)]
Returns
-------
None
Prints all results to stdout.
"""
import inspect
PATH_TEST_DATA = os.path.join(os.path.dirname(os.path.abspath( \
inspect.getfile(inspect.currentframe()))), '../local/test_data')
print('running examples ...')
t0 = datetime.now()
results = [0, 0, 0]
for ex in sorted(examples):
t1 = datetime.now()
src, cfg, _ = examples[ex]
src = os.path.abspath(os.path.join(PATH_TEST_DATA, src))
if not os.path.isfile(src) and not os.path.isdir(src):
print('{} not a file'.format(src))
break
if type(cfg) == str:
cfg = os.path.abspath(os.path.join(PATH_TEST_DATA, cfg))
if not os.path.isfile(cfg):
print('{} not a file'.format(cfg))
break
try:
data = read(src, cfg=cfg, silent=True)
n_keys = len(data.keys())
if n_keys > 0:
print('data {:<15} [SUCCESS] {:.1f}s, {} dfs, {}'.format(
ex, (datetime.now()-t1).total_seconds(), n_keys, mem(data)))
results[0] += 1
else:
print('data {:<15} [NO IMPORT] {:.1f}s'.format(ex, (datetime.now()-t1).total_seconds()))
results[1] += 1
except:
print('data {:<15} [EXCEPTION] {:.1f}s'.format(ex, (datetime.now()-t1).total_seconds()))
results[2] += 1
print()
print('ran {} tests in {:.1f} seconds'.format(len(examples),
(datetime.now()-t0).total_seconds()))
print('{} success / {} no import / {} exception'.format(
str(results[0]), str(results[1]), str(results[2])))
if __name__ == '__main__':
run_examples(examples)
|
jorahn/icy | icy/icy.py | _find_key_cols | python | def _find_key_cols(df):
keys = []
for col in df:
if len(df[col].unique()) == len(df[col]):
keys.append(col)
return keys | Identify columns in a DataFrame that could be a unique key | train | https://github.com/jorahn/icy/blob/d0bd765c933b2d9bff4d7d646c0938348b9c5c25/icy/icy.py#L543-L550 | null | """
icy: Python 3 data wrangling glue code
--------------------------------------
saving time handling multiple different data sources
"""
import os
import sys
import re
import zipfile
import pandas as pd
import numpy as np
import yaml
from odo import odo
from glob import glob
from fnmatch import fnmatch
from datetime import datetime
from copy import deepcopy
from itertools import chain
from collections import namedtuple
from importlib.util import find_spec
examples = {
'artists': ('artists.zip', 'artists_read.yml', {}),
'babynames': ('babynames.zip', 'babynames_read.yml', {}),
'bank': ('bank.zip', 'bank_read.yml', {}),
'caterpillar': ('caterpillar.zip', 'caterpillar_read.yml', {}),
'churn': ('churn.zip', 'churn_read.yml', {}),
'comunio': ('comunio', {}, {}),
'crossdevice': ('crossdevice.zip', {}, {}),
'crunchbase': ('crunchbase', {}, {}),
'egg': ('egg', 'egg_read.yml', {}),
# 'fed': ('fed.zip', {}, {}),
'formats': ('formats', {}, {}),
'lahman': ('lahman.zip', 'lahman_read.yml', {}),
'nyse1': ('nyse_1.zip', 'nyse_1_read.yml', {}),
'nyse2': ('nyse_2.tsv.gz', 'nyse_2_read.yml', {}),
'nyt_title': ('nyt_title.zip', 'nyt_title_read.yml', {}),
'otto': ('otto.zip', {}, {}),
'spam': ('sms_spam.zip', 'sms_spam_read.yml', {}),
'titanic': ('titanic.zip', {}, {}),
'wikipedia': ('wikipedia_langs.zip', 'wikipedia_read.yml', {})
}
def _path_to_objs(path, include=['*', '.*'], exclude=['.*', '_*']):
"""Turn path with opt. globbing into valid list of files respecting
include and exclude patterns.
Parameters
----------
path : str
Path to process. Can be location of a file, folder or glob.
Can be in uri-notation, can be relative or absolute or start with ~.
include : list, optional
Globbing patterns to require in result, defaults to ['*', '.*'].
exclude : list, optional
Globbing patterns to exclude from result, defaults to ['.*', '_*'].
Returns
-------
objs : list
List of valid files
Notes
-----
- Doesn't show hidden files starting with '.' by default. To enable hidden files, make sure '.*' is in `include` and '.*' is not in `exclude`.
- Doesn't show files starting with '_' by default. To enable these files, make sure '_*' is not in `exclude`.
"""
if '://' in path:
# don't modify when path is in uri-notation, except for local files
if path.startswith('file://'):
path = path[7:]
else:
return [path]
path = os.path.abspath(os.path.expanduser(path))
if os.path.isfile(path):
if not path.lower().endswith(('.xlsx', '.xls')) and zipfile.is_zipfile(path):
# zipfile misidentifies xlsx as archive of xml files
with zipfile.ZipFile(path) as myzip:
zipped = []
for z in myzip.namelist():
z_fn = os.path.basename(z)
if z_fn != '' and any([fnmatch(z_fn, i) for i in include]) and \
not any([fnmatch(z_fn, e) for e in exclude]):
zipped.append(z)
return [myzip.open(z) for z in zipped]
else:
return [path]
elif os.path.isdir(path):
cands = [os.path.abspath(os.path.join(path, p)) for p in os.listdir(path)]
dirname = path
else:
cands = []
dirname = os.path.dirname(path)
include = list(chain.from_iterable(glob(os.path.join(dirname, i)) for i in include))
exclude = list(chain.from_iterable(glob(os.path.join(dirname, e)) for e in exclude))
objs = []
if cands == []:
cands = glob(path)
for p in cands:
if os.path.isfile(p) and p in include and not p in exclude:
objs.append(p)
zipped = [zipfile.is_zipfile(o) and not o.lower().endswith(('.xlsx', '.xls')) \
for o in objs]
toappend = []
todelete = []
for ix, o in enumerate(objs):
# if zipfile in objs replace zipfile with its contents
if zipped[ix]:
for new_o in _path_to_objs(o):
toappend.append(new_o)
todelete.append(ix)
shiftindex = 0
for d in todelete:
del objs[d - shiftindex]
shiftindex += 1
for new_o in toappend:
objs.append(new_o)
return objs
def to_df(obj, cfg={}, raise_on_error=True, silent=False, verbose=False):
"""Convert obj to pandas.DataFrame, determine parser from filename.
Falls back to odo, esp. for database uri's.
"""
if type(obj) == str:
name = obj
else:
name = obj.name
name = name[name.rfind('/') + 1:]
if not raise_on_error:
try:
return to_df(obj=obj, cfg=cfg, raise_on_error=True)
except (pd.parser.CParserError, AttributeError, ValueError, TypeError, IOError) as e:
if not silent:
print('WARNING in {}: {} {}'.format(name, e.__class__, e))
return None
except:
if not silent:
print('WARNING in {}: {}'.format(name, sys.exc_info()[0]))
return None
params = {}
if 'default' in cfg:
params = deepcopy(cfg['default'])
if name in cfg:
for e in cfg[name]:
params[e] = deepcopy(cfg[name][e])
if 'custom_date_parser' in params:
params['date_parser'] = DtParser(params['custom_date_parser']).parse
del params['custom_date_parser']
if verbose:
print(name, params)
if name.lower().startswith('s3:'):
if not find_spec('boto'):
raise ImportError('reading from aws-s3 requires the boto package to be installed')
if '.csv' in name.lower():
# name can be .csv.gz or .csv.bz2
return pd.read_csv(obj, **params)
elif '.tsv' in name.lower() or '.txt' in name.lower():
# name can be .tsv.gz or .txt.bz2
return pd.read_table(obj, **params)
elif name.lower().endswith(('.htm', '.html')):
if not find_spec('lxml'):
params['flavor'] = 'bs4'
if not find_spec('bs4') and not find_spec('html5lib'):
raise ImportError('reading html requires the lxml or bs4 + html5lib packages to be installed')
if 'nrows' in params:
del params['nrows']
if type(obj) == zipfile.ZipExtFile:
obj = obj.read()
data = pd.read_html(obj, **params)
data = {str(i): data[i] for i in range(len(data))}
return data
elif name.lower().endswith('.xml'):
if 'nrows' in params:
del params['nrows']
from icy.utils import xml_to_json
with open(obj) as f:
json = xml_to_json(str(f.read()))
return pd.read_json(json, **params)
elif name.lower().endswith('.json'):
if 'nrows' in params:
del params['nrows']
return pd.read_json(obj, **params)
elif name.lower().endswith(('.xls', '.xlsx')):
if not find_spec('xlrd'):
raise ImportError('reading excel files requires the xlrd package to be installed')
if 'nrows' in params:
del params['nrows']
data = {}
xls = pd.ExcelFile(obj)
for key in xls.sheet_names:
data[key] = xls.parse(key, **params)
return data
elif name.lower().endswith(('.h5', '.hdf5')):
if not find_spec('tables'):
raise ImportError('reading hdf5 files requires the pytables package to be installed')
if 'nrows' in params:
del params['nrows']
# params['chunksize'] = params.pop('nrows') # returns iterator
with pd.HDFStore(obj) as store:
data = {}
for key in store.keys():
data[key[1:]] = store[key]
return data
elif name.lower().endswith(('.sqlite', '.sql', '.db')):
import sqlite3
if type(obj) != str:
raise IOError('sqlite-database must be decompressed before import')
if 'nrows' in params:
del params['nrows']
# params['chunksize'] = params.pop('nrows') # returns iterator
with sqlite3.connect(obj) as con:
data = {}
cursor = con.cursor()
cursor.execute('SELECT name FROM sqlite_master WHERE type="table"')
tables = [t[0] for t in cursor.fetchall()]
for t in tables:
sql = 'SELECT * FROM ' + t
data[t] = pd.read_sql_query(sql, con, **params)
return data
else:
try:
data = {name: odo(obj, pd.DataFrame)}
if type(data[name]) == pd.DataFrame:
return data
except NotImplementedError:
pass
raise NotImplementedError('Error creating DataFrame from object', obj)
def read(path, cfg={}, raise_on_error=False, silent=False, verbose=False, return_errors=False):
"""Wraps pandas.IO & odo to create a dictionary of pandas.DataFrames from multiple different sources
Parameters
----------
path : str
Location of file, folder or zip-file to be parsed. Can include globbing (e.g. `*.csv`).
Can be remote with URI-notation beginning with e.g. http://, https://, file://, ftp://, s3:// and ssh://.
Can be odo-supported database (SQL, MongoDB, Hadoop, Spark) if dependencies are available.
Parser will be selected based on file extension.
cfg : dict or str, optional
Dictionary of kwargs to be provided to the pandas parser (http://pandas.pydata.org/pandas-docs/stable/api.html#input-output)
or str with path to YAML, that will be parsed.
Special keys:
**filters** : str or list of strings, optional. For a file to be processed, it must contain one of the Strings (e.g. ['.csv', '.tsv'])
**default** : kwargs to be used for every file
**custom_date_parser** : strptime-format string (https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior), generates a parser that used as the *date_parser* argument
If filename in keys, use kwargs from that key in addition to or overwriting *default* kwargs.
silent : boolean, optional
If True, doesn't print to stdout.
verbose : boolean, optional
If True, prints parsing arguments for each file processed to stdout.
raise_on_error : boolean, optional
Raise exception or only display warning, if a file cannot be parsed successfully.
return_errors : boolean, optional
If True, read() returns (data, errors) tuple instead of only data, with errors as a list of all files that could not be parsed.
Returns
-------
data : dict
Dictionary of parsed pandas.DataFrames, with file names as keys.
Notes
-----
- Start with basic cfg and tune until the desired parsing result is achieved.
- File extensions are critical to determine the parser, make sure they are *common*.
- Avoid files named 'default' or 'filters'.
- Avoid duplicate file names.
- Subfolders and file names beginning with '.' or '_' are ignored.
- If an https:// URI isn't correctly processed, try http:// instead.
- To connect to a database or s3-bucket, make sure the required dependencies like sqlalchemy, pymongo, pyspark or boto are available in the active environment.
"""
if type(cfg) == str:
cfg = os.path.abspath(os.path.expanduser(cfg))
yml = _read_yaml(cfg)
if yml == None:
if not silent:
print('creating read.yml config file draft ...')
cfg = {'filters': ['.csv'], 'default': {'sep': ',', 'parse_dates': []}}
with open('local/read.yml', 'xt') as f:
yaml.dump(cfg, f)
yml = _read_yaml('local/read.yml')
if 'filters' in yml:
filters = yml['filters']
if type(filters) == str:
filters = [filters]
del yml['filters']
else:
filters = []
cfg = yml
data = {}
errors = []
if not silent:
print('processing', path, '...')
for f in _path_to_objs(path):
if type(f) == str:
fname = os.path.basename(f)
elif type(f) == zipfile.ZipExtFile:
fname = f.name
else:
raise RuntimeError('_path_to_objs() returned unknown type', f)
data, errors = _read_append(data=data, errors=errors, path=f, fname=fname, \
cfg=cfg, raise_on_error=raise_on_error, silent=silent, verbose=verbose)
if raise_on_error and data == {}:
raise IOError('path is invalid or empty')
if not silent:
print('imported {} DataFrames'.format(len(data)))
if len(data) > 0:
print('total memory usage: {}'.format(mem(data)))
if len(errors) > 0:
print('import errors in files: {}'.format(', '.join(errors)))
if return_errors:
return data, errors
else:
return data
def _read_append(data, errors, path, fname, cfg, raise_on_error, silent, verbose):
key = fname[fname.rfind('/') + 1:]
result = to_df(obj=path, cfg=cfg, raise_on_error=raise_on_error, silent=silent, verbose=verbose)
if type(result) == dict:
if len(result) == 0:
errors.append(key)
# elif len(result) == 1:
# r = next(iter(result))
# data[r] = result[r]
else:
for r in result:
data['_'.join([key, r])] = result[r]
elif type(result) == type(None):
errors.append(key)
else:
data[key] = result
return data, errors
def preview(path, cfg={}, rows=5, silent=True, verbose=False, raise_on_error=False):
if type(cfg) == str:
cfg = os.path.abspath(os.path.expanduser(cfg))
yml = _read_yaml(cfg)
if yml == None:
yml = {}
if 'filters' in yml:
filters = yml['filters']
if type(filters) == str:
filters = [filters]
del yml['filters']
cfg = yml
if type(cfg) != dict:
cfg = {'default': {'nrows': rows}}
else:
if 'filters' in cfg:
filters = cfg['filters']
if type(filters) == str:
filters = [filters]
del cfg['filters']
if 'default' in cfg:
if type(cfg['default']) == dict:
cfg['default']['nrows'] = rows
else:
cfg['default'] = {'nrows': rows}
else:
cfg['default'] = {'nrows': rows}
if silent:
# if not silent, output will be generated from icy.read()
print('processing', path, '...')
prev, errors = read(path=path, cfg=cfg, silent=silent, verbose=verbose, \
raise_on_error=raise_on_error, return_errors=True)
for key in sorted(prev):
print('File: {}'.format(key))
print()
prev[key].info(verbose=True, memory_usage=True, null_counts=True)
print()
print('{:<20} | first {} VALUES'.format('COLUMN', rows))
print('-'*40)
for col in prev[key].columns:
print('{:<20} | {}'.format(col, str(list(prev[key][col].values)[:rows])))
print('='*40)
print('Successfully parsed first {} rows of {} files:'.format(rows, len(prev)))
print(', '.join(sorted(prev)))
if len(errors) > 0 and silent:
print()
print('Errors parsing files: {}'.format(', '.join(errors)))
print()
print('Try icy.preview(path, cfg, silent=False) for a more verbose output.')
return
def mem(data):
"""Total memory used by data
Parameters
----------
data : dict of pandas.DataFrames or pandas.DataFrame
Returns
-------
str : str
Human readable amount of memory used with unit (like KB, MB, GB etc.).
"""
if type(data) == dict:
num = sum([data[k].memory_usage(index=True).sum() for k in data])
else:
num = data.memory_usage(index=True).sum()
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
return "%3.1f %s" % (num, 'PB')
def _read_yaml(path):
if os.path.isfile(path):
with open(path) as f:
return yaml.safe_load(f)
else:
return None
def merge(data, cfg=None):
""" WORK IN PROGRESS
Concat, merge, join, drop keys in dictionary of pandas.DataFrames
into one pandas.DataFrame (data) and a pandas.Series (labels).
Parameters
----------
data : dict of pandas.DataFrames
Result of icy.read()
cfg : dict or str, optional
Dictionary of actions to perform on data
or str with path to YAML, that will be parsed.
Returns
-------
data : pandas.DataFrame
The aggregated dataset
labels : pandas.Series
The target variable for analysis of the dataset,
can have fewer samples than the aggregated dataset
Notes
-----
"""
# go from a dict of dataframes (data) to one dataframe (data) and one series (labels)
# pd.concat([df1, df2], join, join_axes, ignore_index) and pd.merge(left, right, how, on, suffixes)
# should be easy to iterate from normalized tables to a fully joined set of dataframes
if type(cfg) == str:
cfg = _read_yaml(cfg)
if cfg == None:
cfg = _read_yaml('local/merge.yml')
if cfg == None:
print('creating merge.yml config file draft ...')
cfg = {}
# find all tables with identical column names
# if no common key-col
# concat along rows, add col (src)
# e.g. chimps
# find all tables with same length
# if no duplicate column names
# concat along columns
# find master table (by length?)
# from smalles to biggest table
# find possible key-cols by uniques == len
# find bigger tables with common column names -> cands
# check for highest overlap-ratio of uniques -> cand (prefer smaller table if equal ratio)
# join table on best cand
# if ratio below treshold put table on unidentified list
for key in data:
cfg[key] = list(data[key].columns)
with open('local/merge.yml', 'xt') as f:
yaml.dump(cfg, f)
cfg = _read_yaml('local/merge.yml')
# if cfg == None:
# if not os.path.exists(default_cfg):
# create default_cfg draft
# else:
# join on default_cfg
# report join_result
# else:
# join on cfg
# report join_result
labels = None
return data, labels
def _find_key_cols(df):
"""Identify columns in a DataFrame that could be a unique key"""
keys = []
for col in df:
if len(df[col].unique()) == len(df[col]):
keys.append(col)
return keys
def _dtparse(s, pattern):
return datetime.strptime(s, pattern)
class DtParser():
def __init__(self, pattern):
self.pattern = pattern
self.vfunc = np.vectorize(_dtparse)
def parse(self, s):
if type(s) == str:
return _dtparse(s, self.pattern)
elif type(s) == list:
return [_dtparse(e, self.pattern) for e in s]
elif type(s) == np.ndarray:
return self.vfunc(s, self.pattern)
def run_examples(examples):
"""Run read() on a number of examples, supress output, generate summary.
Parameters
----------
examples : list of tuples of three str elements
Tuples contain the path and cfg argument to the read function,
as well as the cfg argument to the merge function (*TODO*)
e.g. [(path, read_cfg, merge_cfg), (...)]
Returns
-------
None
Prints all results to stdout.
"""
import inspect
PATH_TEST_DATA = os.path.join(os.path.dirname(os.path.abspath( \
inspect.getfile(inspect.currentframe()))), '../local/test_data')
print('running examples ...')
t0 = datetime.now()
results = [0, 0, 0]
for ex in sorted(examples):
t1 = datetime.now()
src, cfg, _ = examples[ex]
src = os.path.abspath(os.path.join(PATH_TEST_DATA, src))
if not os.path.isfile(src) and not os.path.isdir(src):
print('{} not a file'.format(src))
break
if type(cfg) == str:
cfg = os.path.abspath(os.path.join(PATH_TEST_DATA, cfg))
if not os.path.isfile(cfg):
print('{} not a file'.format(cfg))
break
try:
data = read(src, cfg=cfg, silent=True)
n_keys = len(data.keys())
if n_keys > 0:
print('data {:<15} [SUCCESS] {:.1f}s, {} dfs, {}'.format(
ex, (datetime.now()-t1).total_seconds(), n_keys, mem(data)))
results[0] += 1
else:
print('data {:<15} [NO IMPORT] {:.1f}s'.format(ex, (datetime.now()-t1).total_seconds()))
results[1] += 1
except:
print('data {:<15} [EXCEPTION] {:.1f}s'.format(ex, (datetime.now()-t1).total_seconds()))
results[2] += 1
print()
print('ran {} tests in {:.1f} seconds'.format(len(examples),
(datetime.now()-t0).total_seconds()))
print('{} success / {} no import / {} exception'.format(
str(results[0]), str(results[1]), str(results[2])))
if __name__ == '__main__':
run_examples(examples)
|
jorahn/icy | icy/icy.py | run_examples | python | def run_examples(examples):
import inspect
PATH_TEST_DATA = os.path.join(os.path.dirname(os.path.abspath( \
inspect.getfile(inspect.currentframe()))), '../local/test_data')
print('running examples ...')
t0 = datetime.now()
results = [0, 0, 0]
for ex in sorted(examples):
t1 = datetime.now()
src, cfg, _ = examples[ex]
src = os.path.abspath(os.path.join(PATH_TEST_DATA, src))
if not os.path.isfile(src) and not os.path.isdir(src):
print('{} not a file'.format(src))
break
if type(cfg) == str:
cfg = os.path.abspath(os.path.join(PATH_TEST_DATA, cfg))
if not os.path.isfile(cfg):
print('{} not a file'.format(cfg))
break
try:
data = read(src, cfg=cfg, silent=True)
n_keys = len(data.keys())
if n_keys > 0:
print('data {:<15} [SUCCESS] {:.1f}s, {} dfs, {}'.format(
ex, (datetime.now()-t1).total_seconds(), n_keys, mem(data)))
results[0] += 1
else:
print('data {:<15} [NO IMPORT] {:.1f}s'.format(ex, (datetime.now()-t1).total_seconds()))
results[1] += 1
except:
print('data {:<15} [EXCEPTION] {:.1f}s'.format(ex, (datetime.now()-t1).total_seconds()))
results[2] += 1
print()
print('ran {} tests in {:.1f} seconds'.format(len(examples),
(datetime.now()-t0).total_seconds()))
print('{} success / {} no import / {} exception'.format(
str(results[0]), str(results[1]), str(results[2]))) | Run read() on a number of examples, supress output, generate summary.
Parameters
----------
examples : list of tuples of three str elements
Tuples contain the path and cfg argument to the read function,
as well as the cfg argument to the merge function (*TODO*)
e.g. [(path, read_cfg, merge_cfg), (...)]
Returns
-------
None
Prints all results to stdout. | train | https://github.com/jorahn/icy/blob/d0bd765c933b2d9bff4d7d646c0938348b9c5c25/icy/icy.py#L568-L621 | [
"def read(path, cfg={}, raise_on_error=False, silent=False, verbose=False, return_errors=False):\n \"\"\"Wraps pandas.IO & odo to create a dictionary of pandas.DataFrames from multiple different sources\n\n Parameters\n ----------\n path : str\n Location of file, folder or zip-file to be parsed. ... | """
icy: Python 3 data wrangling glue code
--------------------------------------
saving time handling multiple different data sources
"""
import os
import sys
import re
import zipfile
import pandas as pd
import numpy as np
import yaml
from odo import odo
from glob import glob
from fnmatch import fnmatch
from datetime import datetime
from copy import deepcopy
from itertools import chain
from collections import namedtuple
from importlib.util import find_spec
examples = {
'artists': ('artists.zip', 'artists_read.yml', {}),
'babynames': ('babynames.zip', 'babynames_read.yml', {}),
'bank': ('bank.zip', 'bank_read.yml', {}),
'caterpillar': ('caterpillar.zip', 'caterpillar_read.yml', {}),
'churn': ('churn.zip', 'churn_read.yml', {}),
'comunio': ('comunio', {}, {}),
'crossdevice': ('crossdevice.zip', {}, {}),
'crunchbase': ('crunchbase', {}, {}),
'egg': ('egg', 'egg_read.yml', {}),
# 'fed': ('fed.zip', {}, {}),
'formats': ('formats', {}, {}),
'lahman': ('lahman.zip', 'lahman_read.yml', {}),
'nyse1': ('nyse_1.zip', 'nyse_1_read.yml', {}),
'nyse2': ('nyse_2.tsv.gz', 'nyse_2_read.yml', {}),
'nyt_title': ('nyt_title.zip', 'nyt_title_read.yml', {}),
'otto': ('otto.zip', {}, {}),
'spam': ('sms_spam.zip', 'sms_spam_read.yml', {}),
'titanic': ('titanic.zip', {}, {}),
'wikipedia': ('wikipedia_langs.zip', 'wikipedia_read.yml', {})
}
def _path_to_objs(path, include=['*', '.*'], exclude=['.*', '_*']):
"""Turn path with opt. globbing into valid list of files respecting
include and exclude patterns.
Parameters
----------
path : str
Path to process. Can be location of a file, folder or glob.
Can be in uri-notation, can be relative or absolute or start with ~.
include : list, optional
Globbing patterns to require in result, defaults to ['*', '.*'].
exclude : list, optional
Globbing patterns to exclude from result, defaults to ['.*', '_*'].
Returns
-------
objs : list
List of valid files
Notes
-----
- Doesn't show hidden files starting with '.' by default. To enable hidden files, make sure '.*' is in `include` and '.*' is not in `exclude`.
- Doesn't show files starting with '_' by default. To enable these files, make sure '_*' is not in `exclude`.
"""
if '://' in path:
# don't modify when path is in uri-notation, except for local files
if path.startswith('file://'):
path = path[7:]
else:
return [path]
path = os.path.abspath(os.path.expanduser(path))
if os.path.isfile(path):
if not path.lower().endswith(('.xlsx', '.xls')) and zipfile.is_zipfile(path):
# zipfile misidentifies xlsx as archive of xml files
with zipfile.ZipFile(path) as myzip:
zipped = []
for z in myzip.namelist():
z_fn = os.path.basename(z)
if z_fn != '' and any([fnmatch(z_fn, i) for i in include]) and \
not any([fnmatch(z_fn, e) for e in exclude]):
zipped.append(z)
return [myzip.open(z) for z in zipped]
else:
return [path]
elif os.path.isdir(path):
cands = [os.path.abspath(os.path.join(path, p)) for p in os.listdir(path)]
dirname = path
else:
cands = []
dirname = os.path.dirname(path)
include = list(chain.from_iterable(glob(os.path.join(dirname, i)) for i in include))
exclude = list(chain.from_iterable(glob(os.path.join(dirname, e)) for e in exclude))
objs = []
if cands == []:
cands = glob(path)
for p in cands:
if os.path.isfile(p) and p in include and not p in exclude:
objs.append(p)
zipped = [zipfile.is_zipfile(o) and not o.lower().endswith(('.xlsx', '.xls')) \
for o in objs]
toappend = []
todelete = []
for ix, o in enumerate(objs):
# if zipfile in objs replace zipfile with its contents
if zipped[ix]:
for new_o in _path_to_objs(o):
toappend.append(new_o)
todelete.append(ix)
shiftindex = 0
for d in todelete:
del objs[d - shiftindex]
shiftindex += 1
for new_o in toappend:
objs.append(new_o)
return objs
def to_df(obj, cfg={}, raise_on_error=True, silent=False, verbose=False):
"""Convert obj to pandas.DataFrame, determine parser from filename.
Falls back to odo, esp. for database uri's.
"""
if type(obj) == str:
name = obj
else:
name = obj.name
name = name[name.rfind('/') + 1:]
if not raise_on_error:
try:
return to_df(obj=obj, cfg=cfg, raise_on_error=True)
except (pd.parser.CParserError, AttributeError, ValueError, TypeError, IOError) as e:
if not silent:
print('WARNING in {}: {} {}'.format(name, e.__class__, e))
return None
except:
if not silent:
print('WARNING in {}: {}'.format(name, sys.exc_info()[0]))
return None
params = {}
if 'default' in cfg:
params = deepcopy(cfg['default'])
if name in cfg:
for e in cfg[name]:
params[e] = deepcopy(cfg[name][e])
if 'custom_date_parser' in params:
params['date_parser'] = DtParser(params['custom_date_parser']).parse
del params['custom_date_parser']
if verbose:
print(name, params)
if name.lower().startswith('s3:'):
if not find_spec('boto'):
raise ImportError('reading from aws-s3 requires the boto package to be installed')
if '.csv' in name.lower():
# name can be .csv.gz or .csv.bz2
return pd.read_csv(obj, **params)
elif '.tsv' in name.lower() or '.txt' in name.lower():
# name can be .tsv.gz or .txt.bz2
return pd.read_table(obj, **params)
elif name.lower().endswith(('.htm', '.html')):
if not find_spec('lxml'):
params['flavor'] = 'bs4'
if not find_spec('bs4') and not find_spec('html5lib'):
raise ImportError('reading html requires the lxml or bs4 + html5lib packages to be installed')
if 'nrows' in params:
del params['nrows']
if type(obj) == zipfile.ZipExtFile:
obj = obj.read()
data = pd.read_html(obj, **params)
data = {str(i): data[i] for i in range(len(data))}
return data
elif name.lower().endswith('.xml'):
if 'nrows' in params:
del params['nrows']
from icy.utils import xml_to_json
with open(obj) as f:
json = xml_to_json(str(f.read()))
return pd.read_json(json, **params)
elif name.lower().endswith('.json'):
if 'nrows' in params:
del params['nrows']
return pd.read_json(obj, **params)
elif name.lower().endswith(('.xls', '.xlsx')):
if not find_spec('xlrd'):
raise ImportError('reading excel files requires the xlrd package to be installed')
if 'nrows' in params:
del params['nrows']
data = {}
xls = pd.ExcelFile(obj)
for key in xls.sheet_names:
data[key] = xls.parse(key, **params)
return data
elif name.lower().endswith(('.h5', '.hdf5')):
if not find_spec('tables'):
raise ImportError('reading hdf5 files requires the pytables package to be installed')
if 'nrows' in params:
del params['nrows']
# params['chunksize'] = params.pop('nrows') # returns iterator
with pd.HDFStore(obj) as store:
data = {}
for key in store.keys():
data[key[1:]] = store[key]
return data
elif name.lower().endswith(('.sqlite', '.sql', '.db')):
import sqlite3
if type(obj) != str:
raise IOError('sqlite-database must be decompressed before import')
if 'nrows' in params:
del params['nrows']
# params['chunksize'] = params.pop('nrows') # returns iterator
with sqlite3.connect(obj) as con:
data = {}
cursor = con.cursor()
cursor.execute('SELECT name FROM sqlite_master WHERE type="table"')
tables = [t[0] for t in cursor.fetchall()]
for t in tables:
sql = 'SELECT * FROM ' + t
data[t] = pd.read_sql_query(sql, con, **params)
return data
else:
try:
data = {name: odo(obj, pd.DataFrame)}
if type(data[name]) == pd.DataFrame:
return data
except NotImplementedError:
pass
raise NotImplementedError('Error creating DataFrame from object', obj)
def read(path, cfg={}, raise_on_error=False, silent=False, verbose=False, return_errors=False):
"""Wraps pandas.IO & odo to create a dictionary of pandas.DataFrames from multiple different sources
Parameters
----------
path : str
Location of file, folder or zip-file to be parsed. Can include globbing (e.g. `*.csv`).
Can be remote with URI-notation beginning with e.g. http://, https://, file://, ftp://, s3:// and ssh://.
Can be odo-supported database (SQL, MongoDB, Hadoop, Spark) if dependencies are available.
Parser will be selected based on file extension.
cfg : dict or str, optional
Dictionary of kwargs to be provided to the pandas parser (http://pandas.pydata.org/pandas-docs/stable/api.html#input-output)
or str with path to YAML, that will be parsed.
Special keys:
**filters** : str or list of strings, optional. For a file to be processed, it must contain one of the Strings (e.g. ['.csv', '.tsv'])
**default** : kwargs to be used for every file
**custom_date_parser** : strptime-format string (https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior), generates a parser that used as the *date_parser* argument
If filename in keys, use kwargs from that key in addition to or overwriting *default* kwargs.
silent : boolean, optional
If True, doesn't print to stdout.
verbose : boolean, optional
If True, prints parsing arguments for each file processed to stdout.
raise_on_error : boolean, optional
Raise exception or only display warning, if a file cannot be parsed successfully.
return_errors : boolean, optional
If True, read() returns (data, errors) tuple instead of only data, with errors as a list of all files that could not be parsed.
Returns
-------
data : dict
Dictionary of parsed pandas.DataFrames, with file names as keys.
Notes
-----
- Start with basic cfg and tune until the desired parsing result is achieved.
- File extensions are critical to determine the parser, make sure they are *common*.
- Avoid files named 'default' or 'filters'.
- Avoid duplicate file names.
- Subfolders and file names beginning with '.' or '_' are ignored.
- If an https:// URI isn't correctly processed, try http:// instead.
- To connect to a database or s3-bucket, make sure the required dependencies like sqlalchemy, pymongo, pyspark or boto are available in the active environment.
"""
if type(cfg) == str:
cfg = os.path.abspath(os.path.expanduser(cfg))
yml = _read_yaml(cfg)
if yml == None:
if not silent:
print('creating read.yml config file draft ...')
cfg = {'filters': ['.csv'], 'default': {'sep': ',', 'parse_dates': []}}
with open('local/read.yml', 'xt') as f:
yaml.dump(cfg, f)
yml = _read_yaml('local/read.yml')
if 'filters' in yml:
filters = yml['filters']
if type(filters) == str:
filters = [filters]
del yml['filters']
else:
filters = []
cfg = yml
data = {}
errors = []
if not silent:
print('processing', path, '...')
for f in _path_to_objs(path):
if type(f) == str:
fname = os.path.basename(f)
elif type(f) == zipfile.ZipExtFile:
fname = f.name
else:
raise RuntimeError('_path_to_objs() returned unknown type', f)
data, errors = _read_append(data=data, errors=errors, path=f, fname=fname, \
cfg=cfg, raise_on_error=raise_on_error, silent=silent, verbose=verbose)
if raise_on_error and data == {}:
raise IOError('path is invalid or empty')
if not silent:
print('imported {} DataFrames'.format(len(data)))
if len(data) > 0:
print('total memory usage: {}'.format(mem(data)))
if len(errors) > 0:
print('import errors in files: {}'.format(', '.join(errors)))
if return_errors:
return data, errors
else:
return data
def _read_append(data, errors, path, fname, cfg, raise_on_error, silent, verbose):
key = fname[fname.rfind('/') + 1:]
result = to_df(obj=path, cfg=cfg, raise_on_error=raise_on_error, silent=silent, verbose=verbose)
if type(result) == dict:
if len(result) == 0:
errors.append(key)
# elif len(result) == 1:
# r = next(iter(result))
# data[r] = result[r]
else:
for r in result:
data['_'.join([key, r])] = result[r]
elif type(result) == type(None):
errors.append(key)
else:
data[key] = result
return data, errors
def preview(path, cfg={}, rows=5, silent=True, verbose=False, raise_on_error=False):
if type(cfg) == str:
cfg = os.path.abspath(os.path.expanduser(cfg))
yml = _read_yaml(cfg)
if yml == None:
yml = {}
if 'filters' in yml:
filters = yml['filters']
if type(filters) == str:
filters = [filters]
del yml['filters']
cfg = yml
if type(cfg) != dict:
cfg = {'default': {'nrows': rows}}
else:
if 'filters' in cfg:
filters = cfg['filters']
if type(filters) == str:
filters = [filters]
del cfg['filters']
if 'default' in cfg:
if type(cfg['default']) == dict:
cfg['default']['nrows'] = rows
else:
cfg['default'] = {'nrows': rows}
else:
cfg['default'] = {'nrows': rows}
if silent:
# if not silent, output will be generated from icy.read()
print('processing', path, '...')
prev, errors = read(path=path, cfg=cfg, silent=silent, verbose=verbose, \
raise_on_error=raise_on_error, return_errors=True)
for key in sorted(prev):
print('File: {}'.format(key))
print()
prev[key].info(verbose=True, memory_usage=True, null_counts=True)
print()
print('{:<20} | first {} VALUES'.format('COLUMN', rows))
print('-'*40)
for col in prev[key].columns:
print('{:<20} | {}'.format(col, str(list(prev[key][col].values)[:rows])))
print('='*40)
print('Successfully parsed first {} rows of {} files:'.format(rows, len(prev)))
print(', '.join(sorted(prev)))
if len(errors) > 0 and silent:
print()
print('Errors parsing files: {}'.format(', '.join(errors)))
print()
print('Try icy.preview(path, cfg, silent=False) for a more verbose output.')
return
def mem(data):
"""Total memory used by data
Parameters
----------
data : dict of pandas.DataFrames or pandas.DataFrame
Returns
-------
str : str
Human readable amount of memory used with unit (like KB, MB, GB etc.).
"""
if type(data) == dict:
num = sum([data[k].memory_usage(index=True).sum() for k in data])
else:
num = data.memory_usage(index=True).sum()
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
return "%3.1f %s" % (num, 'PB')
def _read_yaml(path):
if os.path.isfile(path):
with open(path) as f:
return yaml.safe_load(f)
else:
return None
def merge(data, cfg=None):
""" WORK IN PROGRESS
Concat, merge, join, drop keys in dictionary of pandas.DataFrames
into one pandas.DataFrame (data) and a pandas.Series (labels).
Parameters
----------
data : dict of pandas.DataFrames
Result of icy.read()
cfg : dict or str, optional
Dictionary of actions to perform on data
or str with path to YAML, that will be parsed.
Returns
-------
data : pandas.DataFrame
The aggregated dataset
labels : pandas.Series
The target variable for analysis of the dataset,
can have fewer samples than the aggregated dataset
Notes
-----
"""
# go from a dict of dataframes (data) to one dataframe (data) and one series (labels)
# pd.concat([df1, df2], join, join_axes, ignore_index) and pd.merge(left, right, how, on, suffixes)
# should be easy to iterate from normalized tables to a fully joined set of dataframes
if type(cfg) == str:
cfg = _read_yaml(cfg)
if cfg == None:
cfg = _read_yaml('local/merge.yml')
if cfg == None:
print('creating merge.yml config file draft ...')
cfg = {}
# find all tables with identical column names
# if no common key-col
# concat along rows, add col (src)
# e.g. chimps
# find all tables with same length
# if no duplicate column names
# concat along columns
# find master table (by length?)
# from smalles to biggest table
# find possible key-cols by uniques == len
# find bigger tables with common column names -> cands
# check for highest overlap-ratio of uniques -> cand (prefer smaller table if equal ratio)
# join table on best cand
# if ratio below treshold put table on unidentified list
for key in data:
cfg[key] = list(data[key].columns)
with open('local/merge.yml', 'xt') as f:
yaml.dump(cfg, f)
cfg = _read_yaml('local/merge.yml')
# if cfg == None:
# if not os.path.exists(default_cfg):
# create default_cfg draft
# else:
# join on default_cfg
# report join_result
# else:
# join on cfg
# report join_result
labels = None
return data, labels
def _find_key_cols(df):
"""Identify columns in a DataFrame that could be a unique key"""
keys = []
for col in df:
if len(df[col].unique()) == len(df[col]):
keys.append(col)
return keys
def _dtparse(s, pattern):
return datetime.strptime(s, pattern)
class DtParser():
def __init__(self, pattern):
self.pattern = pattern
self.vfunc = np.vectorize(_dtparse)
def parse(self, s):
if type(s) == str:
return _dtparse(s, self.pattern)
elif type(s) == list:
return [_dtparse(e, self.pattern) for e in s]
elif type(s) == np.ndarray:
return self.vfunc(s, self.pattern)
def run_examples(examples):
"""Run read() on a number of examples, supress output, generate summary.
Parameters
----------
examples : list of tuples of three str elements
Tuples contain the path and cfg argument to the read function,
as well as the cfg argument to the merge function (*TODO*)
e.g. [(path, read_cfg, merge_cfg), (...)]
Returns
-------
None
Prints all results to stdout.
"""
import inspect
PATH_TEST_DATA = os.path.join(os.path.dirname(os.path.abspath( \
inspect.getfile(inspect.currentframe()))), '../local/test_data')
print('running examples ...')
t0 = datetime.now()
results = [0, 0, 0]
for ex in sorted(examples):
t1 = datetime.now()
src, cfg, _ = examples[ex]
src = os.path.abspath(os.path.join(PATH_TEST_DATA, src))
if not os.path.isfile(src) and not os.path.isdir(src):
print('{} not a file'.format(src))
break
if type(cfg) == str:
cfg = os.path.abspath(os.path.join(PATH_TEST_DATA, cfg))
if not os.path.isfile(cfg):
print('{} not a file'.format(cfg))
break
try:
data = read(src, cfg=cfg, silent=True)
n_keys = len(data.keys())
if n_keys > 0:
print('data {:<15} [SUCCESS] {:.1f}s, {} dfs, {}'.format(
ex, (datetime.now()-t1).total_seconds(), n_keys, mem(data)))
results[0] += 1
else:
print('data {:<15} [NO IMPORT] {:.1f}s'.format(ex, (datetime.now()-t1).total_seconds()))
results[1] += 1
except:
print('data {:<15} [EXCEPTION] {:.1f}s'.format(ex, (datetime.now()-t1).total_seconds()))
results[2] += 1
print()
print('ran {} tests in {:.1f} seconds'.format(len(examples),
(datetime.now()-t0).total_seconds()))
print('{} success / {} no import / {} exception'.format(
str(results[0]), str(results[1]), str(results[2])))
if __name__ == '__main__':
run_examples(examples)
|
jorahn/icy | icy/ext/xml2json.py | elem_to_internal | python | def elem_to_internal(elem, strip_ns=1, strip=1):
d = {}
elem_tag = elem.tag
if strip_ns:
elem_tag = strip_tag(elem.tag)
else:
for key, value in list(elem.attrib.items()):
d['@' + key] = value
# loop over subelements to merge them
for subelem in elem:
v = elem_to_internal(subelem, strip_ns=strip_ns, strip=strip)
tag = subelem.tag
if strip_ns:
tag = strip_tag(subelem.tag)
value = v[tag]
try:
# add to existing list for this tag
d[tag].append(value)
except AttributeError:
# turn existing entry into a list
d[tag] = [d[tag], value]
except KeyError:
# add a new non-list entry
d[tag] = value
text = elem.text
tail = elem.tail
if strip:
# ignore leading and trailing whitespace
if text:
text = text.strip()
if tail:
tail = tail.strip()
if tail:
d['#tail'] = tail
if d:
# use #text element if other attributes exist
if text:
d["#text"] = text
else:
# text is the value if no attributes
d = text or None
return {elem_tag: d} | Convert an Element into an internal dictionary (not JSON!). | train | https://github.com/jorahn/icy/blob/d0bd765c933b2d9bff4d7d646c0938348b9c5c25/icy/ext/xml2json.py#L55-L104 | [
"def strip_tag(tag):\n strip_ns_tag = tag\n split_array = tag.split('}')\n if len(split_array) > 1:\n strip_ns_tag = split_array[1]\n tag = strip_ns_tag\n return tag\n",
"def elem_to_internal(elem, strip_ns=1, strip=1):\n \"\"\"Convert an Element into an internal dictionary (not JSON!... | """
https://github.com/hay/xml2json
xml2json.py Convert XML to JSON
Relies on ElementTree for the XML parsing. This is based on
pesterfish.py but uses a different XML->JSON mapping.
The XML->JSON mapping is described at
http://www.xml.com/pub/a/2006/05/31/converting-between-xml-and-json.html
Rewritten to a command line utility by Hay Kranen < github.com/hay > with
contributions from George Hamilton (gmh04) and Dan Brown (jdanbrown)
XML JSON
<e/> "e": null
<e>text</e> "e": "text"
<e name="value" /> "e": { "@name": "value" }
<e name="value">text</e> "e": { "@name": "value", "#text": "text" }
<e> <a>text</a ><b>text</b> </e> "e": { "a": "text", "b": "text" }
<e> <a>text</a> <a>text</a> </e> "e": { "a": ["text", "text"] }
<e> text <a>text</a> </e> "e": { "#text": "text", "a": "text" }
This is very similar to the mapping used for Yahoo Web Services
(http://developer.yahoo.com/common/json.html#xml).
This is a mess in that it is so unpredictable -- it requires lots of testing
(e.g. to see if values are lists or strings or dictionaries). For use
in Python this could be vastly cleaner. Think about whether the internal
form can be more self-consistent while maintaining good external
characteristics for the JSON.
Look at the Yahoo version closely to see how it works. Maybe can adopt
that completely if it makes more sense...
R. White, 2006 November 6
"""
import json
import optparse
import sys
import os
import xml.etree.cElementTree as ET
def strip_tag(tag):
strip_ns_tag = tag
split_array = tag.split('}')
if len(split_array) > 1:
strip_ns_tag = split_array[1]
tag = strip_ns_tag
return tag
def internal_to_elem(pfsh, factory=ET.Element):
"""Convert an internal dictionary (not JSON!) into an Element.
Whatever Element implementation we could import will be
used by default; if you want to use something else, pass the
Element class as the factory parameter.
"""
attribs = {}
text = None
tail = None
sublist = []
tag = list(pfsh.keys())
if len(tag) != 1:
raise ValueError("Illegal structure with multiple tags: %s" % tag)
tag = tag[0]
value = pfsh[tag]
if isinstance(value, dict):
for k, v in list(value.items()):
if k[:1] == "@":
attribs[k[1:]] = v
elif k == "#text":
text = v
elif k == "#tail":
tail = v
elif isinstance(v, list):
for v2 in v:
sublist.append(internal_to_elem({k: v2}, factory=factory))
else:
sublist.append(internal_to_elem({k: v}, factory=factory))
else:
text = value
e = factory(tag, attribs)
for sub in sublist:
e.append(sub)
e.text = text
e.tail = tail
return e
def elem2json(elem, options, strip_ns=1, strip=1):
"""Convert an ElementTree or Element into a JSON string."""
if hasattr(elem, 'getroot'):
elem = elem.getroot()
if options.pretty:
return json.dumps(elem_to_internal(elem, strip_ns=strip_ns, strip=strip), sort_keys=True, indent=4, separators=(',', ': '))
else:
return json.dumps(elem_to_internal(elem, strip_ns=strip_ns, strip=strip))
def json2elem(json_data, factory=ET.Element):
"""Convert a JSON string into an Element.
Whatever Element implementation we could import will be used by
default; if you want to use something else, pass the Element class
as the factory parameter.
"""
return internal_to_elem(json.loads(json_data), factory)
def xml2json(xmlstring, options, strip_ns=1, strip=1):
"""Convert an XML string into a JSON string."""
elem = ET.fromstring(xmlstring)
return elem2json(elem, options, strip_ns=strip_ns, strip=strip)
def json2xml(json_data, factory=ET.Element):
"""Convert a JSON string into an XML string.
Whatever Element implementation we could import will be used by
default; if you want to use something else, pass the Element class
as the factory parameter.
"""
if not isinstance(json_data, dict):
json_data = json.loads(json_data)
elem = internal_to_elem(json_data, factory)
return ET.tostring(elem)
def main():
p = optparse.OptionParser(
description='Converts XML to JSON or the other way around. Reads from standard input by default, or from file if given.',
prog='xml2json',
usage='%prog -t xml2json -o file.json [file]'
)
p.add_option('--type', '-t', help="'xml2json' or 'json2xml'", default="xml2json")
p.add_option('--out', '-o', help="Write to OUT instead of stdout")
p.add_option(
'--strip_text', action="store_true",
dest="strip_text", help="Strip text for xml2json")
p.add_option(
'--pretty', action="store_true",
dest="pretty", help="Format JSON output so it is easier to read")
p.add_option(
'--strip_namespace', action="store_true",
dest="strip_ns", help="Strip namespace for xml2json")
p.add_option(
'--strip_newlines', action="store_true",
dest="strip_nl", help="Strip newlines for xml2json")
options, arguments = p.parse_args()
inputstream = sys.stdin
if len(arguments) == 1:
try:
inputstream = open(arguments[0])
except:
sys.stderr.write("Problem reading '{0}'\n".format(arguments[0]))
p.print_help()
sys.exit(-1)
input = inputstream.read()
strip = 0
strip_ns = 0
if options.strip_text:
strip = 1
if options.strip_ns:
strip_ns = 1
if options.strip_nl:
input = input.replace('\n', '').replace('\r','')
if (options.type == "xml2json"):
out = xml2json(input, options, strip_ns, strip)
else:
out = json2xml(input)
if (options.out):
file = open(options.out, 'w')
file.write(out)
file.close()
else:
print(out)
if __name__ == "__main__":
main()
|
jorahn/icy | icy/ext/xml2json.py | internal_to_elem | python | def internal_to_elem(pfsh, factory=ET.Element):
attribs = {}
text = None
tail = None
sublist = []
tag = list(pfsh.keys())
if len(tag) != 1:
raise ValueError("Illegal structure with multiple tags: %s" % tag)
tag = tag[0]
value = pfsh[tag]
if isinstance(value, dict):
for k, v in list(value.items()):
if k[:1] == "@":
attribs[k[1:]] = v
elif k == "#text":
text = v
elif k == "#tail":
tail = v
elif isinstance(v, list):
for v2 in v:
sublist.append(internal_to_elem({k: v2}, factory=factory))
else:
sublist.append(internal_to_elem({k: v}, factory=factory))
else:
text = value
e = factory(tag, attribs)
for sub in sublist:
e.append(sub)
e.text = text
e.tail = tail
return e | Convert an internal dictionary (not JSON!) into an Element.
Whatever Element implementation we could import will be
used by default; if you want to use something else, pass the
Element class as the factory parameter. | train | https://github.com/jorahn/icy/blob/d0bd765c933b2d9bff4d7d646c0938348b9c5c25/icy/ext/xml2json.py#L107-L145 | [
"def internal_to_elem(pfsh, factory=ET.Element):\n\n \"\"\"Convert an internal dictionary (not JSON!) into an Element.\n\n Whatever Element implementation we could import will be\n used by default; if you want to use something else, pass the\n Element class as the factory parameter.\n \"\"\"\n\n a... | """
https://github.com/hay/xml2json
xml2json.py Convert XML to JSON
Relies on ElementTree for the XML parsing. This is based on
pesterfish.py but uses a different XML->JSON mapping.
The XML->JSON mapping is described at
http://www.xml.com/pub/a/2006/05/31/converting-between-xml-and-json.html
Rewritten to a command line utility by Hay Kranen < github.com/hay > with
contributions from George Hamilton (gmh04) and Dan Brown (jdanbrown)
XML JSON
<e/> "e": null
<e>text</e> "e": "text"
<e name="value" /> "e": { "@name": "value" }
<e name="value">text</e> "e": { "@name": "value", "#text": "text" }
<e> <a>text</a ><b>text</b> </e> "e": { "a": "text", "b": "text" }
<e> <a>text</a> <a>text</a> </e> "e": { "a": ["text", "text"] }
<e> text <a>text</a> </e> "e": { "#text": "text", "a": "text" }
This is very similar to the mapping used for Yahoo Web Services
(http://developer.yahoo.com/common/json.html#xml).
This is a mess in that it is so unpredictable -- it requires lots of testing
(e.g. to see if values are lists or strings or dictionaries). For use
in Python this could be vastly cleaner. Think about whether the internal
form can be more self-consistent while maintaining good external
characteristics for the JSON.
Look at the Yahoo version closely to see how it works. Maybe can adopt
that completely if it makes more sense...
R. White, 2006 November 6
"""
import json
import optparse
import sys
import os
import xml.etree.cElementTree as ET
def strip_tag(tag):
strip_ns_tag = tag
split_array = tag.split('}')
if len(split_array) > 1:
strip_ns_tag = split_array[1]
tag = strip_ns_tag
return tag
def elem_to_internal(elem, strip_ns=1, strip=1):
"""Convert an Element into an internal dictionary (not JSON!)."""
d = {}
elem_tag = elem.tag
if strip_ns:
elem_tag = strip_tag(elem.tag)
else:
for key, value in list(elem.attrib.items()):
d['@' + key] = value
# loop over subelements to merge them
for subelem in elem:
v = elem_to_internal(subelem, strip_ns=strip_ns, strip=strip)
tag = subelem.tag
if strip_ns:
tag = strip_tag(subelem.tag)
value = v[tag]
try:
# add to existing list for this tag
d[tag].append(value)
except AttributeError:
# turn existing entry into a list
d[tag] = [d[tag], value]
except KeyError:
# add a new non-list entry
d[tag] = value
text = elem.text
tail = elem.tail
if strip:
# ignore leading and trailing whitespace
if text:
text = text.strip()
if tail:
tail = tail.strip()
if tail:
d['#tail'] = tail
if d:
# use #text element if other attributes exist
if text:
d["#text"] = text
else:
# text is the value if no attributes
d = text or None
return {elem_tag: d}
def elem2json(elem, options, strip_ns=1, strip=1):
"""Convert an ElementTree or Element into a JSON string."""
if hasattr(elem, 'getroot'):
elem = elem.getroot()
if options.pretty:
return json.dumps(elem_to_internal(elem, strip_ns=strip_ns, strip=strip), sort_keys=True, indent=4, separators=(',', ': '))
else:
return json.dumps(elem_to_internal(elem, strip_ns=strip_ns, strip=strip))
def json2elem(json_data, factory=ET.Element):
"""Convert a JSON string into an Element.
Whatever Element implementation we could import will be used by
default; if you want to use something else, pass the Element class
as the factory parameter.
"""
return internal_to_elem(json.loads(json_data), factory)
def xml2json(xmlstring, options, strip_ns=1, strip=1):
"""Convert an XML string into a JSON string."""
elem = ET.fromstring(xmlstring)
return elem2json(elem, options, strip_ns=strip_ns, strip=strip)
def json2xml(json_data, factory=ET.Element):
"""Convert a JSON string into an XML string.
Whatever Element implementation we could import will be used by
default; if you want to use something else, pass the Element class
as the factory parameter.
"""
if not isinstance(json_data, dict):
json_data = json.loads(json_data)
elem = internal_to_elem(json_data, factory)
return ET.tostring(elem)
def main():
p = optparse.OptionParser(
description='Converts XML to JSON or the other way around. Reads from standard input by default, or from file if given.',
prog='xml2json',
usage='%prog -t xml2json -o file.json [file]'
)
p.add_option('--type', '-t', help="'xml2json' or 'json2xml'", default="xml2json")
p.add_option('--out', '-o', help="Write to OUT instead of stdout")
p.add_option(
'--strip_text', action="store_true",
dest="strip_text", help="Strip text for xml2json")
p.add_option(
'--pretty', action="store_true",
dest="pretty", help="Format JSON output so it is easier to read")
p.add_option(
'--strip_namespace', action="store_true",
dest="strip_ns", help="Strip namespace for xml2json")
p.add_option(
'--strip_newlines', action="store_true",
dest="strip_nl", help="Strip newlines for xml2json")
options, arguments = p.parse_args()
inputstream = sys.stdin
if len(arguments) == 1:
try:
inputstream = open(arguments[0])
except:
sys.stderr.write("Problem reading '{0}'\n".format(arguments[0]))
p.print_help()
sys.exit(-1)
input = inputstream.read()
strip = 0
strip_ns = 0
if options.strip_text:
strip = 1
if options.strip_ns:
strip_ns = 1
if options.strip_nl:
input = input.replace('\n', '').replace('\r','')
if (options.type == "xml2json"):
out = xml2json(input, options, strip_ns, strip)
else:
out = json2xml(input)
if (options.out):
file = open(options.out, 'w')
file.write(out)
file.close()
else:
print(out)
if __name__ == "__main__":
main()
|
jorahn/icy | icy/ext/xml2json.py | elem2json | python | def elem2json(elem, options, strip_ns=1, strip=1):
if hasattr(elem, 'getroot'):
elem = elem.getroot()
if options.pretty:
return json.dumps(elem_to_internal(elem, strip_ns=strip_ns, strip=strip), sort_keys=True, indent=4, separators=(',', ': '))
else:
return json.dumps(elem_to_internal(elem, strip_ns=strip_ns, strip=strip)) | Convert an ElementTree or Element into a JSON string. | train | https://github.com/jorahn/icy/blob/d0bd765c933b2d9bff4d7d646c0938348b9c5c25/icy/ext/xml2json.py#L148-L158 | [
"def elem_to_internal(elem, strip_ns=1, strip=1):\n \"\"\"Convert an Element into an internal dictionary (not JSON!).\"\"\"\n\n d = {}\n elem_tag = elem.tag\n if strip_ns:\n elem_tag = strip_tag(elem.tag)\n else:\n for key, value in list(elem.attrib.items()):\n d['@' + key] =... | """
https://github.com/hay/xml2json
xml2json.py Convert XML to JSON
Relies on ElementTree for the XML parsing. This is based on
pesterfish.py but uses a different XML->JSON mapping.
The XML->JSON mapping is described at
http://www.xml.com/pub/a/2006/05/31/converting-between-xml-and-json.html
Rewritten to a command line utility by Hay Kranen < github.com/hay > with
contributions from George Hamilton (gmh04) and Dan Brown (jdanbrown)
XML JSON
<e/> "e": null
<e>text</e> "e": "text"
<e name="value" /> "e": { "@name": "value" }
<e name="value">text</e> "e": { "@name": "value", "#text": "text" }
<e> <a>text</a ><b>text</b> </e> "e": { "a": "text", "b": "text" }
<e> <a>text</a> <a>text</a> </e> "e": { "a": ["text", "text"] }
<e> text <a>text</a> </e> "e": { "#text": "text", "a": "text" }
This is very similar to the mapping used for Yahoo Web Services
(http://developer.yahoo.com/common/json.html#xml).
This is a mess in that it is so unpredictable -- it requires lots of testing
(e.g. to see if values are lists or strings or dictionaries). For use
in Python this could be vastly cleaner. Think about whether the internal
form can be more self-consistent while maintaining good external
characteristics for the JSON.
Look at the Yahoo version closely to see how it works. Maybe can adopt
that completely if it makes more sense...
R. White, 2006 November 6
"""
import json
import optparse
import sys
import os
import xml.etree.cElementTree as ET
def strip_tag(tag):
strip_ns_tag = tag
split_array = tag.split('}')
if len(split_array) > 1:
strip_ns_tag = split_array[1]
tag = strip_ns_tag
return tag
def elem_to_internal(elem, strip_ns=1, strip=1):
"""Convert an Element into an internal dictionary (not JSON!)."""
d = {}
elem_tag = elem.tag
if strip_ns:
elem_tag = strip_tag(elem.tag)
else:
for key, value in list(elem.attrib.items()):
d['@' + key] = value
# loop over subelements to merge them
for subelem in elem:
v = elem_to_internal(subelem, strip_ns=strip_ns, strip=strip)
tag = subelem.tag
if strip_ns:
tag = strip_tag(subelem.tag)
value = v[tag]
try:
# add to existing list for this tag
d[tag].append(value)
except AttributeError:
# turn existing entry into a list
d[tag] = [d[tag], value]
except KeyError:
# add a new non-list entry
d[tag] = value
text = elem.text
tail = elem.tail
if strip:
# ignore leading and trailing whitespace
if text:
text = text.strip()
if tail:
tail = tail.strip()
if tail:
d['#tail'] = tail
if d:
# use #text element if other attributes exist
if text:
d["#text"] = text
else:
# text is the value if no attributes
d = text or None
return {elem_tag: d}
def internal_to_elem(pfsh, factory=ET.Element):
"""Convert an internal dictionary (not JSON!) into an Element.
Whatever Element implementation we could import will be
used by default; if you want to use something else, pass the
Element class as the factory parameter.
"""
attribs = {}
text = None
tail = None
sublist = []
tag = list(pfsh.keys())
if len(tag) != 1:
raise ValueError("Illegal structure with multiple tags: %s" % tag)
tag = tag[0]
value = pfsh[tag]
if isinstance(value, dict):
for k, v in list(value.items()):
if k[:1] == "@":
attribs[k[1:]] = v
elif k == "#text":
text = v
elif k == "#tail":
tail = v
elif isinstance(v, list):
for v2 in v:
sublist.append(internal_to_elem({k: v2}, factory=factory))
else:
sublist.append(internal_to_elem({k: v}, factory=factory))
else:
text = value
e = factory(tag, attribs)
for sub in sublist:
e.append(sub)
e.text = text
e.tail = tail
return e
def json2elem(json_data, factory=ET.Element):
"""Convert a JSON string into an Element.
Whatever Element implementation we could import will be used by
default; if you want to use something else, pass the Element class
as the factory parameter.
"""
return internal_to_elem(json.loads(json_data), factory)
def xml2json(xmlstring, options, strip_ns=1, strip=1):
"""Convert an XML string into a JSON string."""
elem = ET.fromstring(xmlstring)
return elem2json(elem, options, strip_ns=strip_ns, strip=strip)
def json2xml(json_data, factory=ET.Element):
"""Convert a JSON string into an XML string.
Whatever Element implementation we could import will be used by
default; if you want to use something else, pass the Element class
as the factory parameter.
"""
if not isinstance(json_data, dict):
json_data = json.loads(json_data)
elem = internal_to_elem(json_data, factory)
return ET.tostring(elem)
def main():
p = optparse.OptionParser(
description='Converts XML to JSON or the other way around. Reads from standard input by default, or from file if given.',
prog='xml2json',
usage='%prog -t xml2json -o file.json [file]'
)
p.add_option('--type', '-t', help="'xml2json' or 'json2xml'", default="xml2json")
p.add_option('--out', '-o', help="Write to OUT instead of stdout")
p.add_option(
'--strip_text', action="store_true",
dest="strip_text", help="Strip text for xml2json")
p.add_option(
'--pretty', action="store_true",
dest="pretty", help="Format JSON output so it is easier to read")
p.add_option(
'--strip_namespace', action="store_true",
dest="strip_ns", help="Strip namespace for xml2json")
p.add_option(
'--strip_newlines', action="store_true",
dest="strip_nl", help="Strip newlines for xml2json")
options, arguments = p.parse_args()
inputstream = sys.stdin
if len(arguments) == 1:
try:
inputstream = open(arguments[0])
except:
sys.stderr.write("Problem reading '{0}'\n".format(arguments[0]))
p.print_help()
sys.exit(-1)
input = inputstream.read()
strip = 0
strip_ns = 0
if options.strip_text:
strip = 1
if options.strip_ns:
strip_ns = 1
if options.strip_nl:
input = input.replace('\n', '').replace('\r','')
if (options.type == "xml2json"):
out = xml2json(input, options, strip_ns, strip)
else:
out = json2xml(input)
if (options.out):
file = open(options.out, 'w')
file.write(out)
file.close()
else:
print(out)
if __name__ == "__main__":
main()
|
jorahn/icy | icy/ext/xml2json.py | json2elem | python | def json2elem(json_data, factory=ET.Element):
return internal_to_elem(json.loads(json_data), factory) | Convert a JSON string into an Element.
Whatever Element implementation we could import will be used by
default; if you want to use something else, pass the Element class
as the factory parameter. | train | https://github.com/jorahn/icy/blob/d0bd765c933b2d9bff4d7d646c0938348b9c5c25/icy/ext/xml2json.py#L161-L170 | [
"def internal_to_elem(pfsh, factory=ET.Element):\n\n \"\"\"Convert an internal dictionary (not JSON!) into an Element.\n\n Whatever Element implementation we could import will be\n used by default; if you want to use something else, pass the\n Element class as the factory parameter.\n \"\"\"\n\n a... | """
https://github.com/hay/xml2json
xml2json.py Convert XML to JSON
Relies on ElementTree for the XML parsing. This is based on
pesterfish.py but uses a different XML->JSON mapping.
The XML->JSON mapping is described at
http://www.xml.com/pub/a/2006/05/31/converting-between-xml-and-json.html
Rewritten to a command line utility by Hay Kranen < github.com/hay > with
contributions from George Hamilton (gmh04) and Dan Brown (jdanbrown)
XML JSON
<e/> "e": null
<e>text</e> "e": "text"
<e name="value" /> "e": { "@name": "value" }
<e name="value">text</e> "e": { "@name": "value", "#text": "text" }
<e> <a>text</a ><b>text</b> </e> "e": { "a": "text", "b": "text" }
<e> <a>text</a> <a>text</a> </e> "e": { "a": ["text", "text"] }
<e> text <a>text</a> </e> "e": { "#text": "text", "a": "text" }
This is very similar to the mapping used for Yahoo Web Services
(http://developer.yahoo.com/common/json.html#xml).
This is a mess in that it is so unpredictable -- it requires lots of testing
(e.g. to see if values are lists or strings or dictionaries). For use
in Python this could be vastly cleaner. Think about whether the internal
form can be more self-consistent while maintaining good external
characteristics for the JSON.
Look at the Yahoo version closely to see how it works. Maybe can adopt
that completely if it makes more sense...
R. White, 2006 November 6
"""
import json
import optparse
import sys
import os
import xml.etree.cElementTree as ET
def strip_tag(tag):
strip_ns_tag = tag
split_array = tag.split('}')
if len(split_array) > 1:
strip_ns_tag = split_array[1]
tag = strip_ns_tag
return tag
def elem_to_internal(elem, strip_ns=1, strip=1):
"""Convert an Element into an internal dictionary (not JSON!)."""
d = {}
elem_tag = elem.tag
if strip_ns:
elem_tag = strip_tag(elem.tag)
else:
for key, value in list(elem.attrib.items()):
d['@' + key] = value
# loop over subelements to merge them
for subelem in elem:
v = elem_to_internal(subelem, strip_ns=strip_ns, strip=strip)
tag = subelem.tag
if strip_ns:
tag = strip_tag(subelem.tag)
value = v[tag]
try:
# add to existing list for this tag
d[tag].append(value)
except AttributeError:
# turn existing entry into a list
d[tag] = [d[tag], value]
except KeyError:
# add a new non-list entry
d[tag] = value
text = elem.text
tail = elem.tail
if strip:
# ignore leading and trailing whitespace
if text:
text = text.strip()
if tail:
tail = tail.strip()
if tail:
d['#tail'] = tail
if d:
# use #text element if other attributes exist
if text:
d["#text"] = text
else:
# text is the value if no attributes
d = text or None
return {elem_tag: d}
def internal_to_elem(pfsh, factory=ET.Element):
"""Convert an internal dictionary (not JSON!) into an Element.
Whatever Element implementation we could import will be
used by default; if you want to use something else, pass the
Element class as the factory parameter.
"""
attribs = {}
text = None
tail = None
sublist = []
tag = list(pfsh.keys())
if len(tag) != 1:
raise ValueError("Illegal structure with multiple tags: %s" % tag)
tag = tag[0]
value = pfsh[tag]
if isinstance(value, dict):
for k, v in list(value.items()):
if k[:1] == "@":
attribs[k[1:]] = v
elif k == "#text":
text = v
elif k == "#tail":
tail = v
elif isinstance(v, list):
for v2 in v:
sublist.append(internal_to_elem({k: v2}, factory=factory))
else:
sublist.append(internal_to_elem({k: v}, factory=factory))
else:
text = value
e = factory(tag, attribs)
for sub in sublist:
e.append(sub)
e.text = text
e.tail = tail
return e
def elem2json(elem, options, strip_ns=1, strip=1):
"""Convert an ElementTree or Element into a JSON string."""
if hasattr(elem, 'getroot'):
elem = elem.getroot()
if options.pretty:
return json.dumps(elem_to_internal(elem, strip_ns=strip_ns, strip=strip), sort_keys=True, indent=4, separators=(',', ': '))
else:
return json.dumps(elem_to_internal(elem, strip_ns=strip_ns, strip=strip))
def xml2json(xmlstring, options, strip_ns=1, strip=1):
"""Convert an XML string into a JSON string."""
elem = ET.fromstring(xmlstring)
return elem2json(elem, options, strip_ns=strip_ns, strip=strip)
def json2xml(json_data, factory=ET.Element):
"""Convert a JSON string into an XML string.
Whatever Element implementation we could import will be used by
default; if you want to use something else, pass the Element class
as the factory parameter.
"""
if not isinstance(json_data, dict):
json_data = json.loads(json_data)
elem = internal_to_elem(json_data, factory)
return ET.tostring(elem)
def main():
p = optparse.OptionParser(
description='Converts XML to JSON or the other way around. Reads from standard input by default, or from file if given.',
prog='xml2json',
usage='%prog -t xml2json -o file.json [file]'
)
p.add_option('--type', '-t', help="'xml2json' or 'json2xml'", default="xml2json")
p.add_option('--out', '-o', help="Write to OUT instead of stdout")
p.add_option(
'--strip_text', action="store_true",
dest="strip_text", help="Strip text for xml2json")
p.add_option(
'--pretty', action="store_true",
dest="pretty", help="Format JSON output so it is easier to read")
p.add_option(
'--strip_namespace', action="store_true",
dest="strip_ns", help="Strip namespace for xml2json")
p.add_option(
'--strip_newlines', action="store_true",
dest="strip_nl", help="Strip newlines for xml2json")
options, arguments = p.parse_args()
inputstream = sys.stdin
if len(arguments) == 1:
try:
inputstream = open(arguments[0])
except:
sys.stderr.write("Problem reading '{0}'\n".format(arguments[0]))
p.print_help()
sys.exit(-1)
input = inputstream.read()
strip = 0
strip_ns = 0
if options.strip_text:
strip = 1
if options.strip_ns:
strip_ns = 1
if options.strip_nl:
input = input.replace('\n', '').replace('\r','')
if (options.type == "xml2json"):
out = xml2json(input, options, strip_ns, strip)
else:
out = json2xml(input)
if (options.out):
file = open(options.out, 'w')
file.write(out)
file.close()
else:
print(out)
if __name__ == "__main__":
main()
|
jorahn/icy | icy/ext/xml2json.py | xml2json | python | def xml2json(xmlstring, options, strip_ns=1, strip=1):
elem = ET.fromstring(xmlstring)
return elem2json(elem, options, strip_ns=strip_ns, strip=strip) | Convert an XML string into a JSON string. | train | https://github.com/jorahn/icy/blob/d0bd765c933b2d9bff4d7d646c0938348b9c5c25/icy/ext/xml2json.py#L173-L178 | [
"def elem2json(elem, options, strip_ns=1, strip=1):\n\n \"\"\"Convert an ElementTree or Element into a JSON string.\"\"\"\n\n if hasattr(elem, 'getroot'):\n elem = elem.getroot()\n\n if options.pretty:\n return json.dumps(elem_to_internal(elem, strip_ns=strip_ns, strip=strip), sort_keys=True,... | """
https://github.com/hay/xml2json
xml2json.py Convert XML to JSON
Relies on ElementTree for the XML parsing. This is based on
pesterfish.py but uses a different XML->JSON mapping.
The XML->JSON mapping is described at
http://www.xml.com/pub/a/2006/05/31/converting-between-xml-and-json.html
Rewritten to a command line utility by Hay Kranen < github.com/hay > with
contributions from George Hamilton (gmh04) and Dan Brown (jdanbrown)
XML JSON
<e/> "e": null
<e>text</e> "e": "text"
<e name="value" /> "e": { "@name": "value" }
<e name="value">text</e> "e": { "@name": "value", "#text": "text" }
<e> <a>text</a ><b>text</b> </e> "e": { "a": "text", "b": "text" }
<e> <a>text</a> <a>text</a> </e> "e": { "a": ["text", "text"] }
<e> text <a>text</a> </e> "e": { "#text": "text", "a": "text" }
This is very similar to the mapping used for Yahoo Web Services
(http://developer.yahoo.com/common/json.html#xml).
This is a mess in that it is so unpredictable -- it requires lots of testing
(e.g. to see if values are lists or strings or dictionaries). For use
in Python this could be vastly cleaner. Think about whether the internal
form can be more self-consistent while maintaining good external
characteristics for the JSON.
Look at the Yahoo version closely to see how it works. Maybe can adopt
that completely if it makes more sense...
R. White, 2006 November 6
"""
import json
import optparse
import sys
import os
import xml.etree.cElementTree as ET
def strip_tag(tag):
strip_ns_tag = tag
split_array = tag.split('}')
if len(split_array) > 1:
strip_ns_tag = split_array[1]
tag = strip_ns_tag
return tag
def elem_to_internal(elem, strip_ns=1, strip=1):
"""Convert an Element into an internal dictionary (not JSON!)."""
d = {}
elem_tag = elem.tag
if strip_ns:
elem_tag = strip_tag(elem.tag)
else:
for key, value in list(elem.attrib.items()):
d['@' + key] = value
# loop over subelements to merge them
for subelem in elem:
v = elem_to_internal(subelem, strip_ns=strip_ns, strip=strip)
tag = subelem.tag
if strip_ns:
tag = strip_tag(subelem.tag)
value = v[tag]
try:
# add to existing list for this tag
d[tag].append(value)
except AttributeError:
# turn existing entry into a list
d[tag] = [d[tag], value]
except KeyError:
# add a new non-list entry
d[tag] = value
text = elem.text
tail = elem.tail
if strip:
# ignore leading and trailing whitespace
if text:
text = text.strip()
if tail:
tail = tail.strip()
if tail:
d['#tail'] = tail
if d:
# use #text element if other attributes exist
if text:
d["#text"] = text
else:
# text is the value if no attributes
d = text or None
return {elem_tag: d}
def internal_to_elem(pfsh, factory=ET.Element):
"""Convert an internal dictionary (not JSON!) into an Element.
Whatever Element implementation we could import will be
used by default; if you want to use something else, pass the
Element class as the factory parameter.
"""
attribs = {}
text = None
tail = None
sublist = []
tag = list(pfsh.keys())
if len(tag) != 1:
raise ValueError("Illegal structure with multiple tags: %s" % tag)
tag = tag[0]
value = pfsh[tag]
if isinstance(value, dict):
for k, v in list(value.items()):
if k[:1] == "@":
attribs[k[1:]] = v
elif k == "#text":
text = v
elif k == "#tail":
tail = v
elif isinstance(v, list):
for v2 in v:
sublist.append(internal_to_elem({k: v2}, factory=factory))
else:
sublist.append(internal_to_elem({k: v}, factory=factory))
else:
text = value
e = factory(tag, attribs)
for sub in sublist:
e.append(sub)
e.text = text
e.tail = tail
return e
def elem2json(elem, options, strip_ns=1, strip=1):
"""Convert an ElementTree or Element into a JSON string."""
if hasattr(elem, 'getroot'):
elem = elem.getroot()
if options.pretty:
return json.dumps(elem_to_internal(elem, strip_ns=strip_ns, strip=strip), sort_keys=True, indent=4, separators=(',', ': '))
else:
return json.dumps(elem_to_internal(elem, strip_ns=strip_ns, strip=strip))
def json2elem(json_data, factory=ET.Element):
"""Convert a JSON string into an Element.
Whatever Element implementation we could import will be used by
default; if you want to use something else, pass the Element class
as the factory parameter.
"""
return internal_to_elem(json.loads(json_data), factory)
def json2xml(json_data, factory=ET.Element):
"""Convert a JSON string into an XML string.
Whatever Element implementation we could import will be used by
default; if you want to use something else, pass the Element class
as the factory parameter.
"""
if not isinstance(json_data, dict):
json_data = json.loads(json_data)
elem = internal_to_elem(json_data, factory)
return ET.tostring(elem)
def main():
p = optparse.OptionParser(
description='Converts XML to JSON or the other way around. Reads from standard input by default, or from file if given.',
prog='xml2json',
usage='%prog -t xml2json -o file.json [file]'
)
p.add_option('--type', '-t', help="'xml2json' or 'json2xml'", default="xml2json")
p.add_option('--out', '-o', help="Write to OUT instead of stdout")
p.add_option(
'--strip_text', action="store_true",
dest="strip_text", help="Strip text for xml2json")
p.add_option(
'--pretty', action="store_true",
dest="pretty", help="Format JSON output so it is easier to read")
p.add_option(
'--strip_namespace', action="store_true",
dest="strip_ns", help="Strip namespace for xml2json")
p.add_option(
'--strip_newlines', action="store_true",
dest="strip_nl", help="Strip newlines for xml2json")
options, arguments = p.parse_args()
inputstream = sys.stdin
if len(arguments) == 1:
try:
inputstream = open(arguments[0])
except:
sys.stderr.write("Problem reading '{0}'\n".format(arguments[0]))
p.print_help()
sys.exit(-1)
input = inputstream.read()
strip = 0
strip_ns = 0
if options.strip_text:
strip = 1
if options.strip_ns:
strip_ns = 1
if options.strip_nl:
input = input.replace('\n', '').replace('\r','')
if (options.type == "xml2json"):
out = xml2json(input, options, strip_ns, strip)
else:
out = json2xml(input)
if (options.out):
file = open(options.out, 'w')
file.write(out)
file.close()
else:
print(out)
if __name__ == "__main__":
main()
|
jorahn/icy | icy/ext/xml2json.py | json2xml | python | def json2xml(json_data, factory=ET.Element):
if not isinstance(json_data, dict):
json_data = json.loads(json_data)
elem = internal_to_elem(json_data, factory)
return ET.tostring(elem) | Convert a JSON string into an XML string.
Whatever Element implementation we could import will be used by
default; if you want to use something else, pass the Element class
as the factory parameter. | train | https://github.com/jorahn/icy/blob/d0bd765c933b2d9bff4d7d646c0938348b9c5c25/icy/ext/xml2json.py#L181-L193 | [
"def internal_to_elem(pfsh, factory=ET.Element):\n\n \"\"\"Convert an internal dictionary (not JSON!) into an Element.\n\n Whatever Element implementation we could import will be\n used by default; if you want to use something else, pass the\n Element class as the factory parameter.\n \"\"\"\n\n a... | """
https://github.com/hay/xml2json
xml2json.py Convert XML to JSON
Relies on ElementTree for the XML parsing. This is based on
pesterfish.py but uses a different XML->JSON mapping.
The XML->JSON mapping is described at
http://www.xml.com/pub/a/2006/05/31/converting-between-xml-and-json.html
Rewritten to a command line utility by Hay Kranen < github.com/hay > with
contributions from George Hamilton (gmh04) and Dan Brown (jdanbrown)
XML JSON
<e/> "e": null
<e>text</e> "e": "text"
<e name="value" /> "e": { "@name": "value" }
<e name="value">text</e> "e": { "@name": "value", "#text": "text" }
<e> <a>text</a ><b>text</b> </e> "e": { "a": "text", "b": "text" }
<e> <a>text</a> <a>text</a> </e> "e": { "a": ["text", "text"] }
<e> text <a>text</a> </e> "e": { "#text": "text", "a": "text" }
This is very similar to the mapping used for Yahoo Web Services
(http://developer.yahoo.com/common/json.html#xml).
This is a mess in that it is so unpredictable -- it requires lots of testing
(e.g. to see if values are lists or strings or dictionaries). For use
in Python this could be vastly cleaner. Think about whether the internal
form can be more self-consistent while maintaining good external
characteristics for the JSON.
Look at the Yahoo version closely to see how it works. Maybe can adopt
that completely if it makes more sense...
R. White, 2006 November 6
"""
import json
import optparse
import sys
import os
import xml.etree.cElementTree as ET
def strip_tag(tag):
strip_ns_tag = tag
split_array = tag.split('}')
if len(split_array) > 1:
strip_ns_tag = split_array[1]
tag = strip_ns_tag
return tag
def elem_to_internal(elem, strip_ns=1, strip=1):
"""Convert an Element into an internal dictionary (not JSON!)."""
d = {}
elem_tag = elem.tag
if strip_ns:
elem_tag = strip_tag(elem.tag)
else:
for key, value in list(elem.attrib.items()):
d['@' + key] = value
# loop over subelements to merge them
for subelem in elem:
v = elem_to_internal(subelem, strip_ns=strip_ns, strip=strip)
tag = subelem.tag
if strip_ns:
tag = strip_tag(subelem.tag)
value = v[tag]
try:
# add to existing list for this tag
d[tag].append(value)
except AttributeError:
# turn existing entry into a list
d[tag] = [d[tag], value]
except KeyError:
# add a new non-list entry
d[tag] = value
text = elem.text
tail = elem.tail
if strip:
# ignore leading and trailing whitespace
if text:
text = text.strip()
if tail:
tail = tail.strip()
if tail:
d['#tail'] = tail
if d:
# use #text element if other attributes exist
if text:
d["#text"] = text
else:
# text is the value if no attributes
d = text or None
return {elem_tag: d}
def internal_to_elem(pfsh, factory=ET.Element):
"""Convert an internal dictionary (not JSON!) into an Element.
Whatever Element implementation we could import will be
used by default; if you want to use something else, pass the
Element class as the factory parameter.
"""
attribs = {}
text = None
tail = None
sublist = []
tag = list(pfsh.keys())
if len(tag) != 1:
raise ValueError("Illegal structure with multiple tags: %s" % tag)
tag = tag[0]
value = pfsh[tag]
if isinstance(value, dict):
for k, v in list(value.items()):
if k[:1] == "@":
attribs[k[1:]] = v
elif k == "#text":
text = v
elif k == "#tail":
tail = v
elif isinstance(v, list):
for v2 in v:
sublist.append(internal_to_elem({k: v2}, factory=factory))
else:
sublist.append(internal_to_elem({k: v}, factory=factory))
else:
text = value
e = factory(tag, attribs)
for sub in sublist:
e.append(sub)
e.text = text
e.tail = tail
return e
def elem2json(elem, options, strip_ns=1, strip=1):
"""Convert an ElementTree or Element into a JSON string."""
if hasattr(elem, 'getroot'):
elem = elem.getroot()
if options.pretty:
return json.dumps(elem_to_internal(elem, strip_ns=strip_ns, strip=strip), sort_keys=True, indent=4, separators=(',', ': '))
else:
return json.dumps(elem_to_internal(elem, strip_ns=strip_ns, strip=strip))
def json2elem(json_data, factory=ET.Element):
"""Convert a JSON string into an Element.
Whatever Element implementation we could import will be used by
default; if you want to use something else, pass the Element class
as the factory parameter.
"""
return internal_to_elem(json.loads(json_data), factory)
def xml2json(xmlstring, options, strip_ns=1, strip=1):
"""Convert an XML string into a JSON string."""
elem = ET.fromstring(xmlstring)
return elem2json(elem, options, strip_ns=strip_ns, strip=strip)
def main():
p = optparse.OptionParser(
description='Converts XML to JSON or the other way around. Reads from standard input by default, or from file if given.',
prog='xml2json',
usage='%prog -t xml2json -o file.json [file]'
)
p.add_option('--type', '-t', help="'xml2json' or 'json2xml'", default="xml2json")
p.add_option('--out', '-o', help="Write to OUT instead of stdout")
p.add_option(
'--strip_text', action="store_true",
dest="strip_text", help="Strip text for xml2json")
p.add_option(
'--pretty', action="store_true",
dest="pretty", help="Format JSON output so it is easier to read")
p.add_option(
'--strip_namespace', action="store_true",
dest="strip_ns", help="Strip namespace for xml2json")
p.add_option(
'--strip_newlines', action="store_true",
dest="strip_nl", help="Strip newlines for xml2json")
options, arguments = p.parse_args()
inputstream = sys.stdin
if len(arguments) == 1:
try:
inputstream = open(arguments[0])
except:
sys.stderr.write("Problem reading '{0}'\n".format(arguments[0]))
p.print_help()
sys.exit(-1)
input = inputstream.read()
strip = 0
strip_ns = 0
if options.strip_text:
strip = 1
if options.strip_ns:
strip_ns = 1
if options.strip_nl:
input = input.replace('\n', '').replace('\r','')
if (options.type == "xml2json"):
out = xml2json(input, options, strip_ns, strip)
else:
out = json2xml(input)
if (options.out):
file = open(options.out, 'w')
file.write(out)
file.close()
else:
print(out)
if __name__ == "__main__":
main()
|
jorahn/icy | icy/utils.py | pdf_extract_text | python | def pdf_extract_text(path, pdfbox_path, pwd='', timeout=120):
if not os.path.isfile(path):
raise IOError('path must be the location of the source pdf-file')
if not os.path.isfile(pdfbox_path):
raise IOError('pdfbox_path must be the location of the pdfbox.jar')
import subprocess
for p in os.environ['PATH'].split(':'):
if os.path.isfile(os.path.join(p, 'java')):
break
else:
print('java is not on the PATH')
return
try:
if pwd == '':
cmd = ['java', '-jar', pdfbox_path, 'ExtractText', path, path+'.txt']
else:
cmd = ['java', '-jar', pdfbox_path, 'ExtractText', '-password', pwd,
path, path+'.txt']
subprocess.check_call(cmd, stdin=subprocess.DEVNULL,
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, timeout=timeout)
except subprocess.TimeoutExpired as e:
print('Timeout of {:.1f} min expired'.format(timeout/60))
except subprocess.CalledProcessError as e:
print('Text could not successfully be extracted.') | Utility to use PDFBox from pdfbox.apache.org to extract Text from a PDF
Parameters
----------
path : str
Path to source pdf-file
pdfbox_path : str
Path to pdfbox-app-x.y.z.jar
pwd : str, optional
Password for protected pdf files
timeout : int, optional
Seconds to wait for a result before raising an exception (defaults to 120).
Returns
-------
file
Writes the result as the name of the source file and appends '.txt'.
Notes
-----
- Requires pdfbox-app-x.y.z.jar in a recent version (see http://pdfbox.apache.org).
- Requires Java (JDK) 1.5 or newer (see http://www.oracle.com/technetwork/java/javase/downloads/index.html).
- Requires java to be on the PATH. | train | https://github.com/jorahn/icy/blob/d0bd765c933b2d9bff4d7d646c0938348b9c5c25/icy/utils.py#L9-L61 | null | import os
def str_remove_accents(s):
"""Utility to remove accents from characters in string"""
import unicodedata
return unicodedata.normalize('NFD', s).encode('ascii','ignore').decode('ascii')
def pdf_extract_text(path, pdfbox_path, pwd='', timeout=120):
"""Utility to use PDFBox from pdfbox.apache.org to extract Text from a PDF
Parameters
----------
path : str
Path to source pdf-file
pdfbox_path : str
Path to pdfbox-app-x.y.z.jar
pwd : str, optional
Password for protected pdf files
timeout : int, optional
Seconds to wait for a result before raising an exception (defaults to 120).
Returns
-------
file
Writes the result as the name of the source file and appends '.txt'.
Notes
-----
- Requires pdfbox-app-x.y.z.jar in a recent version (see http://pdfbox.apache.org).
- Requires Java (JDK) 1.5 or newer (see http://www.oracle.com/technetwork/java/javase/downloads/index.html).
- Requires java to be on the PATH.
"""
if not os.path.isfile(path):
raise IOError('path must be the location of the source pdf-file')
if not os.path.isfile(pdfbox_path):
raise IOError('pdfbox_path must be the location of the pdfbox.jar')
import subprocess
for p in os.environ['PATH'].split(':'):
if os.path.isfile(os.path.join(p, 'java')):
break
else:
print('java is not on the PATH')
return
try:
if pwd == '':
cmd = ['java', '-jar', pdfbox_path, 'ExtractText', path, path+'.txt']
else:
cmd = ['java', '-jar', pdfbox_path, 'ExtractText', '-password', pwd,
path, path+'.txt']
subprocess.check_call(cmd, stdin=subprocess.DEVNULL,
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, timeout=timeout)
except subprocess.TimeoutExpired as e:
print('Timeout of {:.1f} min expired'.format(timeout/60))
except subprocess.CalledProcessError as e:
print('Text could not successfully be extracted.')
def xml_to_json(s):
from icy.ext.xml2json import xml2json
from collections import namedtuple
Options = namedtuple('options', ['pretty'])
xml2json_opts = Options(True)
return xml2json(s, xml2json_opts) |
simonw/datasette | datasette/publish/common.py | fail_if_publish_binary_not_installed | python | def fail_if_publish_binary_not_installed(binary, publish_target, install_link):
if not shutil.which(binary):
click.secho(
"Publishing to {publish_target} requires {binary} to be installed and configured".format(
publish_target=publish_target, binary=binary
),
bg="red",
fg="white",
bold=True,
err=True,
)
click.echo(
"Follow the instructions at {install_link}".format(
install_link=install_link
),
err=True,
)
sys.exit(1) | Exit (with error message) if ``binary` isn't installed | train | https://github.com/simonw/datasette/blob/11b352b4d52fd02a422776edebb14f12e4994d3b/datasette/publish/common.py#L52-L70 | null | from ..utils import StaticMount
import click
import shutil
import sys
def add_common_publish_arguments_and_options(subcommand):
for decorator in reversed((
click.argument("files", type=click.Path(exists=True), nargs=-1),
click.option(
"-m",
"--metadata",
type=click.File(mode="r"),
help="Path to JSON file containing metadata to publish",
),
click.option("--extra-options", help="Extra options to pass to datasette serve"),
click.option("--branch", help="Install datasette from a GitHub branch e.g. master"),
click.option(
"--template-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help="Path to directory containing custom templates",
),
click.option(
"--plugins-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help="Path to directory containing custom plugins",
),
click.option(
"--static",
type=StaticMount(),
help="mountpoint:path-to-directory for serving static files",
multiple=True,
),
click.option(
"--install",
help="Additional packages (e.g. plugins) to install",
multiple=True,
),
click.option("--version-note", help="Additional note to show on /-/versions"),
click.option("--title", help="Title for metadata"),
click.option("--license", help="License label for metadata"),
click.option("--license_url", help="License URL for metadata"),
click.option("--source", help="Source label for metadata"),
click.option("--source_url", help="Source URL for metadata"),
click.option("--about", help="About label for metadata"),
click.option("--about_url", help="About URL for metadata"),
)):
subcommand = decorator(subcommand)
return subcommand
|
simonw/datasette | datasette/utils.py | path_from_row_pks | python | def path_from_row_pks(row, pks, use_rowid, quote=True):
if use_rowid:
bits = [row['rowid']]
else:
bits = [
row[pk]["value"] if isinstance(row[pk], dict) else row[pk]
for pk in pks
]
if quote:
bits = [urllib.parse.quote_plus(str(bit)) for bit in bits]
else:
bits = [str(bit) for bit in bits]
return ','.join(bits) | Generate an optionally URL-quoted unique identifier
for a row from its primary keys. | train | https://github.com/simonw/datasette/blob/11b352b4d52fd02a422776edebb14f12e4994d3b/datasette/utils.py#L75-L90 | null | from contextlib import contextmanager
from collections import OrderedDict
import base64
import click
import hashlib
import imp
import json
import os
import pkg_resources
import re
import shlex
import tempfile
import time
import shutil
import urllib
import numbers
try:
import pysqlite3 as sqlite3
except ImportError:
import sqlite3
# From https://www.sqlite.org/lang_keywords.html
reserved_words = set((
'abort action add after all alter analyze and as asc attach autoincrement '
'before begin between by cascade case cast check collate column commit '
'conflict constraint create cross current_date current_time '
'current_timestamp database default deferrable deferred delete desc detach '
'distinct drop each else end escape except exclusive exists explain fail '
'for foreign from full glob group having if ignore immediate in index '
'indexed initially inner insert instead intersect into is isnull join key '
'left like limit match natural no not notnull null of offset on or order '
'outer plan pragma primary query raise recursive references regexp reindex '
'release rename replace restrict right rollback row savepoint select set '
'table temp temporary then to transaction trigger union unique update using '
'vacuum values view virtual when where with without'
).split())
SPATIALITE_DOCKERFILE_EXTRAS = r'''
RUN apt-get update && \
apt-get install -y python3-dev gcc libsqlite3-mod-spatialite && \
rm -rf /var/lib/apt/lists/*
ENV SQLITE_EXTENSIONS /usr/lib/x86_64-linux-gnu/mod_spatialite.so
'''
class InterruptedError(Exception):
pass
class Results:
def __init__(self, rows, truncated, description):
self.rows = rows
self.truncated = truncated
self.description = description
@property
def columns(self):
return [d[0] for d in self.description]
def __iter__(self):
return iter(self.rows)
def __len__(self):
return len(self.rows)
def urlsafe_components(token):
"Splits token on commas and URL decodes each component"
return [
urllib.parse.unquote_plus(b) for b in token.split(',')
]
def compound_keys_after_sql(pks, start_index=0):
# Implementation of keyset pagination
# See https://github.com/simonw/datasette/issues/190
# For pk1/pk2/pk3 returns:
#
# ([pk1] > :p0)
# or
# ([pk1] = :p0 and [pk2] > :p1)
# or
# ([pk1] = :p0 and [pk2] = :p1 and [pk3] > :p2)
or_clauses = []
pks_left = pks[:]
while pks_left:
and_clauses = []
last = pks_left[-1]
rest = pks_left[:-1]
and_clauses = ['{} = :p{}'.format(
escape_sqlite(pk), (i + start_index)
) for i, pk in enumerate(rest)]
and_clauses.append('{} > :p{}'.format(
escape_sqlite(last), (len(rest) + start_index)
))
or_clauses.append('({})'.format(' and '.join(and_clauses)))
pks_left.pop()
or_clauses.reverse()
return '({})'.format('\n or\n'.join(or_clauses))
class CustomJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, sqlite3.Row):
return tuple(obj)
if isinstance(obj, sqlite3.Cursor):
return list(obj)
if isinstance(obj, bytes):
# Does it encode to utf8?
try:
return obj.decode('utf8')
except UnicodeDecodeError:
return {
'$base64': True,
'encoded': base64.b64encode(obj).decode('latin1'),
}
return json.JSONEncoder.default(self, obj)
@contextmanager
def sqlite_timelimit(conn, ms):
deadline = time.time() + (ms / 1000)
# n is the number of SQLite virtual machine instructions that will be
# executed between each check. It's hard to know what to pick here.
# After some experimentation, I've decided to go with 1000 by default and
# 1 for time limits that are less than 50ms
n = 1000
if ms < 50:
n = 1
def handler():
if time.time() >= deadline:
return 1
conn.set_progress_handler(handler, n)
yield
conn.set_progress_handler(None, n)
class InvalidSql(Exception):
pass
allowed_sql_res = [
re.compile(r'^select\b'),
re.compile(r'^explain select\b'),
re.compile(r'^explain query plan select\b'),
re.compile(r'^with\b'),
]
disallawed_sql_res = [
(re.compile('pragma'), 'Statement may not contain PRAGMA'),
]
def validate_sql_select(sql):
sql = sql.strip().lower()
if not any(r.match(sql) for r in allowed_sql_res):
raise InvalidSql('Statement must be a SELECT')
for r, msg in disallawed_sql_res:
if r.search(sql):
raise InvalidSql(msg)
def append_querystring(url, querystring):
op = "&" if ("?" in url) else "?"
return "{}{}{}".format(
url, op, querystring
)
def path_with_added_args(request, args, path=None):
path = path or request.path
if isinstance(args, dict):
args = args.items()
args_to_remove = {k for k, v in args if v is None}
current = []
for key, value in urllib.parse.parse_qsl(request.query_string):
if key not in args_to_remove:
current.append((key, value))
current.extend([
(key, value)
for key, value in args
if value is not None
])
query_string = urllib.parse.urlencode(current)
if query_string:
query_string = '?{}'.format(query_string)
return path + query_string
def path_with_removed_args(request, args, path=None):
query_string = request.query_string
if path is None:
path = request.path
else:
if "?" in path:
bits = path.split("?", 1)
path, query_string = bits
# args can be a dict or a set
current = []
if isinstance(args, set):
def should_remove(key, value):
return key in args
elif isinstance(args, dict):
# Must match key AND value
def should_remove(key, value):
return args.get(key) == value
for key, value in urllib.parse.parse_qsl(query_string):
if not should_remove(key, value):
current.append((key, value))
query_string = urllib.parse.urlencode(current)
if query_string:
query_string = '?{}'.format(query_string)
return path + query_string
def path_with_replaced_args(request, args, path=None):
path = path or request.path
if isinstance(args, dict):
args = args.items()
keys_to_replace = {p[0] for p in args}
current = []
for key, value in urllib.parse.parse_qsl(request.query_string):
if key not in keys_to_replace:
current.append((key, value))
current.extend([p for p in args if p[1] is not None])
query_string = urllib.parse.urlencode(current)
if query_string:
query_string = '?{}'.format(query_string)
return path + query_string
_css_re = re.compile(r'''['"\n\\]''')
_boring_keyword_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
def escape_css_string(s):
return _css_re.sub(lambda m: '\\{:X}'.format(ord(m.group())), s)
def escape_sqlite(s):
if _boring_keyword_re.match(s) and (s.lower() not in reserved_words):
return s
else:
return '[{}]'.format(s)
def make_dockerfile(files, metadata_file, extra_options, branch, template_dir, plugins_dir, static, install, spatialite, version_note):
cmd = ['"datasette"', '"serve"', '"--host"', '"0.0.0.0"']
cmd.append('"' + '", "'.join(files) + '"')
cmd.extend(['"--cors"', '"--port"', '"8001"', '"--inspect-file"', '"inspect-data.json"'])
if metadata_file:
cmd.extend(['"--metadata"', '"{}"'.format(metadata_file)])
if template_dir:
cmd.extend(['"--template-dir"', '"templates/"'])
if plugins_dir:
cmd.extend(['"--plugins-dir"', '"plugins/"'])
if version_note:
cmd.extend(['"--version-note"', '"{}"'.format(version_note)])
if static:
for mount_point, _ in static:
cmd.extend(['"--static"', '"{}:{}"'.format(mount_point, mount_point)])
if extra_options:
for opt in extra_options.split():
cmd.append('"{}"'.format(opt))
if branch:
install = ['https://github.com/simonw/datasette/archive/{}.zip'.format(
branch
)] + list(install)
else:
install = ['datasette'] + list(install)
return '''
FROM python:3.6
COPY . /app
WORKDIR /app
{spatialite_extras}
RUN pip install -U {install_from}
RUN datasette inspect {files} --inspect-file inspect-data.json
EXPOSE 8001
CMD [{cmd}]'''.format(
files=' '.join(files),
cmd=', '.join(cmd),
install_from=' '.join(install),
spatialite_extras=SPATIALITE_DOCKERFILE_EXTRAS if spatialite else '',
).strip()
@contextmanager
def temporary_docker_directory(
files,
name,
metadata,
extra_options,
branch,
template_dir,
plugins_dir,
static,
install,
spatialite,
version_note,
extra_metadata=None
):
extra_metadata = extra_metadata or {}
tmp = tempfile.TemporaryDirectory()
# We create a datasette folder in there to get a nicer now deploy name
datasette_dir = os.path.join(tmp.name, name)
os.mkdir(datasette_dir)
saved_cwd = os.getcwd()
file_paths = [
os.path.join(saved_cwd, file_path)
for file_path in files
]
file_names = [os.path.split(f)[-1] for f in files]
if metadata:
metadata_content = json.load(metadata)
else:
metadata_content = {}
for key, value in extra_metadata.items():
if value:
metadata_content[key] = value
try:
dockerfile = make_dockerfile(
file_names,
metadata_content and 'metadata.json',
extra_options,
branch,
template_dir,
plugins_dir,
static,
install,
spatialite,
version_note,
)
os.chdir(datasette_dir)
if metadata_content:
open('metadata.json', 'w').write(json.dumps(metadata_content, indent=2))
open('Dockerfile', 'w').write(dockerfile)
for path, filename in zip(file_paths, file_names):
link_or_copy(path, os.path.join(datasette_dir, filename))
if template_dir:
link_or_copy_directory(
os.path.join(saved_cwd, template_dir),
os.path.join(datasette_dir, 'templates')
)
if plugins_dir:
link_or_copy_directory(
os.path.join(saved_cwd, plugins_dir),
os.path.join(datasette_dir, 'plugins')
)
for mount_point, path in static:
link_or_copy_directory(
os.path.join(saved_cwd, path),
os.path.join(datasette_dir, mount_point)
)
yield datasette_dir
finally:
tmp.cleanup()
os.chdir(saved_cwd)
@contextmanager
def temporary_heroku_directory(
files,
name,
metadata,
extra_options,
branch,
template_dir,
plugins_dir,
static,
install,
version_note,
extra_metadata=None
):
# FIXME: lots of duplicated code from above
extra_metadata = extra_metadata or {}
tmp = tempfile.TemporaryDirectory()
saved_cwd = os.getcwd()
file_paths = [
os.path.join(saved_cwd, file_path)
for file_path in files
]
file_names = [os.path.split(f)[-1] for f in files]
if metadata:
metadata_content = json.load(metadata)
else:
metadata_content = {}
for key, value in extra_metadata.items():
if value:
metadata_content[key] = value
try:
os.chdir(tmp.name)
if metadata_content:
open('metadata.json', 'w').write(json.dumps(metadata_content, indent=2))
open('runtime.txt', 'w').write('python-3.6.7')
if branch:
install = ['https://github.com/simonw/datasette/archive/{branch}.zip'.format(
branch=branch
)] + list(install)
else:
install = ['datasette'] + list(install)
open('requirements.txt', 'w').write('\n'.join(install))
os.mkdir('bin')
open('bin/post_compile', 'w').write('datasette inspect --inspect-file inspect-data.json')
extras = []
if template_dir:
link_or_copy_directory(
os.path.join(saved_cwd, template_dir),
os.path.join(tmp.name, 'templates')
)
extras.extend(['--template-dir', 'templates/'])
if plugins_dir:
link_or_copy_directory(
os.path.join(saved_cwd, plugins_dir),
os.path.join(tmp.name, 'plugins')
)
extras.extend(['--plugins-dir', 'plugins/'])
if version_note:
extras.extend(['--version-note', version_note])
if metadata_content:
extras.extend(['--metadata', 'metadata.json'])
if extra_options:
extras.extend(extra_options.split())
for mount_point, path in static:
link_or_copy_directory(
os.path.join(saved_cwd, path),
os.path.join(tmp.name, mount_point)
)
extras.extend(['--static', '{}:{}'.format(mount_point, mount_point)])
quoted_files = " ".join(map(shlex.quote, file_names))
procfile_cmd = 'web: datasette serve --host 0.0.0.0 {quoted_files} --cors --port $PORT --inspect-file inspect-data.json {extras}'.format(
quoted_files=quoted_files,
extras=' '.join(extras),
)
open('Procfile', 'w').write(procfile_cmd)
for path, filename in zip(file_paths, file_names):
link_or_copy(path, os.path.join(tmp.name, filename))
yield
finally:
tmp.cleanup()
os.chdir(saved_cwd)
def detect_primary_keys(conn, table):
" Figure out primary keys for a table. "
table_info_rows = [
row
for row in conn.execute(
'PRAGMA table_info("{}")'.format(table)
).fetchall()
if row[-1]
]
table_info_rows.sort(key=lambda row: row[-1])
return [str(r[1]) for r in table_info_rows]
def get_outbound_foreign_keys(conn, table):
infos = conn.execute(
'PRAGMA foreign_key_list([{}])'.format(table)
).fetchall()
fks = []
for info in infos:
if info is not None:
id, seq, table_name, from_, to_, on_update, on_delete, match = info
fks.append({
'other_table': table_name,
'column': from_,
'other_column': to_
})
return fks
def get_all_foreign_keys(conn):
tables = [r[0] for r in conn.execute('select name from sqlite_master where type="table"')]
table_to_foreign_keys = {}
for table in tables:
table_to_foreign_keys[table] = {
'incoming': [],
'outgoing': [],
}
for table in tables:
infos = conn.execute(
'PRAGMA foreign_key_list([{}])'.format(table)
).fetchall()
for info in infos:
if info is not None:
id, seq, table_name, from_, to_, on_update, on_delete, match = info
if table_name not in table_to_foreign_keys:
# Weird edge case where something refers to a table that does
# not actually exist
continue
table_to_foreign_keys[table_name]['incoming'].append({
'other_table': table,
'column': to_,
'other_column': from_
})
table_to_foreign_keys[table]['outgoing'].append({
'other_table': table_name,
'column': from_,
'other_column': to_
})
return table_to_foreign_keys
def detect_spatialite(conn):
rows = conn.execute('select 1 from sqlite_master where tbl_name = "geometry_columns"').fetchall()
return len(rows) > 0
def detect_fts(conn, table):
"Detect if table has a corresponding FTS virtual table and return it"
rows = conn.execute(detect_fts_sql(table)).fetchall()
if len(rows) == 0:
return None
else:
return rows[0][0]
def detect_fts_sql(table):
return r'''
select name from sqlite_master
where rootpage = 0
and (
sql like '%VIRTUAL TABLE%USING FTS%content="{table}"%'
or (
tbl_name = "{table}"
and sql like '%VIRTUAL TABLE%USING FTS%'
)
)
'''.format(table=table)
def detect_json1(conn=None):
if conn is None:
conn = sqlite3.connect(":memory:")
try:
conn.execute("SELECT json('{}')")
return True
except Exception:
return False
def table_columns(conn, table):
return [
r[1]
for r in conn.execute(
"PRAGMA table_info({});".format(escape_sqlite(table))
).fetchall()
]
filter_column_re = re.compile(r'^_filter_column_\d+$')
def filters_should_redirect(special_args):
redirect_params = []
# Handle _filter_column=foo&_filter_op=exact&_filter_value=...
filter_column = special_args.get('_filter_column')
filter_op = special_args.get('_filter_op') or ''
filter_value = special_args.get('_filter_value') or ''
if '__' in filter_op:
filter_op, filter_value = filter_op.split('__', 1)
if filter_column:
redirect_params.append(
('{}__{}'.format(filter_column, filter_op), filter_value)
)
for key in ('_filter_column', '_filter_op', '_filter_value'):
if key in special_args:
redirect_params.append((key, None))
# Now handle _filter_column_1=name&_filter_op_1=contains&_filter_value_1=hello
column_keys = [k for k in special_args if filter_column_re.match(k)]
for column_key in column_keys:
number = column_key.split('_')[-1]
column = special_args[column_key]
op = special_args.get('_filter_op_{}'.format(number)) or 'exact'
value = special_args.get('_filter_value_{}'.format(number)) or ''
if '__' in op:
op, value = op.split('__', 1)
if column:
redirect_params.append(('{}__{}'.format(column, op), value))
redirect_params.extend([
('_filter_column_{}'.format(number), None),
('_filter_op_{}'.format(number), None),
('_filter_value_{}'.format(number), None),
])
return redirect_params
whitespace_re = re.compile(r'\s')
def is_url(value):
"Must start with http:// or https:// and contain JUST a URL"
if not isinstance(value, str):
return False
if not value.startswith('http://') and not value.startswith('https://'):
return False
# Any whitespace at all is invalid
if whitespace_re.search(value):
return False
return True
css_class_re = re.compile(r'^[a-zA-Z]+[_a-zA-Z0-9-]*$')
css_invalid_chars_re = re.compile(r'[^a-zA-Z0-9_\-]')
def to_css_class(s):
"""
Given a string (e.g. a table name) returns a valid unique CSS class.
For simple cases, just returns the string again. If the string is not a
valid CSS class (we disallow - and _ prefixes even though they are valid
as they may be confused with browser prefixes) we strip invalid characters
and add a 6 char md5 sum suffix, to make sure two tables with identical
names after stripping characters don't end up with the same CSS class.
"""
if css_class_re.match(s):
return s
md5_suffix = hashlib.md5(s.encode('utf8')).hexdigest()[:6]
# Strip leading _, -
s = s.lstrip('_').lstrip('-')
# Replace any whitespace with hyphens
s = '-'.join(s.split())
# Remove any remaining invalid characters
s = css_invalid_chars_re.sub('', s)
# Attach the md5 suffix
bits = [b for b in (s, md5_suffix) if b]
return '-'.join(bits)
def link_or_copy(src, dst):
# Intended for use in populating a temp directory. We link if possible,
# but fall back to copying if the temp directory is on a different device
# https://github.com/simonw/datasette/issues/141
try:
os.link(src, dst)
except OSError:
shutil.copyfile(src, dst)
def link_or_copy_directory(src, dst):
try:
shutil.copytree(src, dst, copy_function=os.link)
except OSError:
shutil.copytree(src, dst)
def module_from_path(path, name):
# Adapted from http://sayspy.blogspot.com/2011/07/how-to-import-module-from-just-file.html
mod = imp.new_module(name)
mod.__file__ = path
with open(path, 'r') as file:
code = compile(file.read(), path, 'exec', dont_inherit=True)
exec(code, mod.__dict__)
return mod
def get_plugins(pm):
plugins = []
plugin_to_distinfo = dict(pm.list_plugin_distinfo())
for plugin in pm.get_plugins():
static_path = None
templates_path = None
try:
if pkg_resources.resource_isdir(plugin.__name__, 'static'):
static_path = pkg_resources.resource_filename(plugin.__name__, 'static')
if pkg_resources.resource_isdir(plugin.__name__, 'templates'):
templates_path = pkg_resources.resource_filename(plugin.__name__, 'templates')
except (KeyError, ImportError):
# Caused by --plugins_dir= plugins - KeyError/ImportError thrown in Py3.5
pass
plugin_info = {
'name': plugin.__name__,
'static_path': static_path,
'templates_path': templates_path,
}
distinfo = plugin_to_distinfo.get(plugin)
if distinfo:
plugin_info['version'] = distinfo.version
plugins.append(plugin_info)
return plugins
FORMATS = ('csv', 'json', 'jsono')
async def resolve_table_and_format(table_and_format, table_exists):
if '.' in table_and_format:
# Check if a table exists with this exact name
it_exists = await table_exists(table_and_format)
if it_exists:
return table_and_format, None
# Check if table ends with a known format
for _format in FORMATS:
if table_and_format.endswith(".{}".format(_format)):
table = table_and_format[:-(len(_format) + 1)]
return table, _format
return table_and_format, None
def path_with_format(request, format, extra_qs=None):
qs = extra_qs or {}
path = request.path
if "." in request.path:
qs["_format"] = format
else:
path = "{}.{}".format(path, format)
if qs:
extra = urllib.parse.urlencode(sorted(qs.items()))
if request.query_string:
path = "{}?{}&{}".format(
path, request.query_string, extra
)
else:
path = "{}?{}".format(path, extra)
elif request.query_string:
path = "{}?{}".format(path, request.query_string)
return path
class CustomRow(OrderedDict):
# Loose imitation of sqlite3.Row which offers
# both index-based AND key-based lookups
def __init__(self, columns, values=None):
self.columns = columns
if values:
self.update(values)
def __getitem__(self, key):
if isinstance(key, int):
return super().__getitem__(self.columns[key])
else:
return super().__getitem__(key)
def __iter__(self):
for column in self.columns:
yield self[column]
def value_as_boolean(value):
if value.lower() not in ('on', 'off', 'true', 'false', '1', '0'):
raise ValueAsBooleanError
return value.lower() in ('on', 'true', '1')
class ValueAsBooleanError(ValueError):
pass
class WriteLimitExceeded(Exception):
pass
class LimitedWriter:
def __init__(self, writer, limit_mb):
self.writer = writer
self.limit_bytes = limit_mb * 1024 * 1024
self.bytes_count = 0
def write(self, bytes):
self.bytes_count += len(bytes)
if self.limit_bytes and (self.bytes_count > self.limit_bytes):
raise WriteLimitExceeded("CSV contains more than {} bytes".format(
self.limit_bytes
))
self.writer.write(bytes)
_infinities = {float("inf"), float("-inf")}
def remove_infinites(row):
if any((c in _infinities) if isinstance(c, float) else 0 for c in row):
return [
None if (isinstance(c, float) and c in _infinities) else c
for c in row
]
return row
class StaticMount(click.ParamType):
name = "static mount"
def convert(self, value, param, ctx):
if ":" not in value:
self.fail(
'"{}" should be of format mountpoint:directory'.format(value),
param, ctx
)
path, dirpath = value.split(":")
if not os.path.exists(dirpath) or not os.path.isdir(dirpath):
self.fail("%s is not a valid directory path" % value, param, ctx)
return path, dirpath
def format_bytes(bytes):
current = float(bytes)
for unit in ("bytes", "KB", "MB", "GB", "TB"):
if current < 1024:
break
current = current / 1024
if unit == "bytes":
return "{} {}".format(int(current), unit)
else:
return "{:.1f} {}".format(current, unit)
|
simonw/datasette | datasette/utils.py | detect_primary_keys | python | def detect_primary_keys(conn, table):
" Figure out primary keys for a table. "
table_info_rows = [
row
for row in conn.execute(
'PRAGMA table_info("{}")'.format(table)
).fetchall()
if row[-1]
]
table_info_rows.sort(key=lambda row: row[-1])
return [str(r[1]) for r in table_info_rows] | Figure out primary keys for a table. | train | https://github.com/simonw/datasette/blob/11b352b4d52fd02a422776edebb14f12e4994d3b/datasette/utils.py#L478-L488 | null | from contextlib import contextmanager
from collections import OrderedDict
import base64
import click
import hashlib
import imp
import json
import os
import pkg_resources
import re
import shlex
import tempfile
import time
import shutil
import urllib
import numbers
try:
import pysqlite3 as sqlite3
except ImportError:
import sqlite3
# From https://www.sqlite.org/lang_keywords.html
reserved_words = set((
'abort action add after all alter analyze and as asc attach autoincrement '
'before begin between by cascade case cast check collate column commit '
'conflict constraint create cross current_date current_time '
'current_timestamp database default deferrable deferred delete desc detach '
'distinct drop each else end escape except exclusive exists explain fail '
'for foreign from full glob group having if ignore immediate in index '
'indexed initially inner insert instead intersect into is isnull join key '
'left like limit match natural no not notnull null of offset on or order '
'outer plan pragma primary query raise recursive references regexp reindex '
'release rename replace restrict right rollback row savepoint select set '
'table temp temporary then to transaction trigger union unique update using '
'vacuum values view virtual when where with without'
).split())
SPATIALITE_DOCKERFILE_EXTRAS = r'''
RUN apt-get update && \
apt-get install -y python3-dev gcc libsqlite3-mod-spatialite && \
rm -rf /var/lib/apt/lists/*
ENV SQLITE_EXTENSIONS /usr/lib/x86_64-linux-gnu/mod_spatialite.so
'''
class InterruptedError(Exception):
pass
class Results:
def __init__(self, rows, truncated, description):
self.rows = rows
self.truncated = truncated
self.description = description
@property
def columns(self):
return [d[0] for d in self.description]
def __iter__(self):
return iter(self.rows)
def __len__(self):
return len(self.rows)
def urlsafe_components(token):
"Splits token on commas and URL decodes each component"
return [
urllib.parse.unquote_plus(b) for b in token.split(',')
]
def path_from_row_pks(row, pks, use_rowid, quote=True):
""" Generate an optionally URL-quoted unique identifier
for a row from its primary keys."""
if use_rowid:
bits = [row['rowid']]
else:
bits = [
row[pk]["value"] if isinstance(row[pk], dict) else row[pk]
for pk in pks
]
if quote:
bits = [urllib.parse.quote_plus(str(bit)) for bit in bits]
else:
bits = [str(bit) for bit in bits]
return ','.join(bits)
def compound_keys_after_sql(pks, start_index=0):
# Implementation of keyset pagination
# See https://github.com/simonw/datasette/issues/190
# For pk1/pk2/pk3 returns:
#
# ([pk1] > :p0)
# or
# ([pk1] = :p0 and [pk2] > :p1)
# or
# ([pk1] = :p0 and [pk2] = :p1 and [pk3] > :p2)
or_clauses = []
pks_left = pks[:]
while pks_left:
and_clauses = []
last = pks_left[-1]
rest = pks_left[:-1]
and_clauses = ['{} = :p{}'.format(
escape_sqlite(pk), (i + start_index)
) for i, pk in enumerate(rest)]
and_clauses.append('{} > :p{}'.format(
escape_sqlite(last), (len(rest) + start_index)
))
or_clauses.append('({})'.format(' and '.join(and_clauses)))
pks_left.pop()
or_clauses.reverse()
return '({})'.format('\n or\n'.join(or_clauses))
class CustomJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, sqlite3.Row):
return tuple(obj)
if isinstance(obj, sqlite3.Cursor):
return list(obj)
if isinstance(obj, bytes):
# Does it encode to utf8?
try:
return obj.decode('utf8')
except UnicodeDecodeError:
return {
'$base64': True,
'encoded': base64.b64encode(obj).decode('latin1'),
}
return json.JSONEncoder.default(self, obj)
@contextmanager
def sqlite_timelimit(conn, ms):
deadline = time.time() + (ms / 1000)
# n is the number of SQLite virtual machine instructions that will be
# executed between each check. It's hard to know what to pick here.
# After some experimentation, I've decided to go with 1000 by default and
# 1 for time limits that are less than 50ms
n = 1000
if ms < 50:
n = 1
def handler():
if time.time() >= deadline:
return 1
conn.set_progress_handler(handler, n)
yield
conn.set_progress_handler(None, n)
class InvalidSql(Exception):
pass
allowed_sql_res = [
re.compile(r'^select\b'),
re.compile(r'^explain select\b'),
re.compile(r'^explain query plan select\b'),
re.compile(r'^with\b'),
]
disallawed_sql_res = [
(re.compile('pragma'), 'Statement may not contain PRAGMA'),
]
def validate_sql_select(sql):
sql = sql.strip().lower()
if not any(r.match(sql) for r in allowed_sql_res):
raise InvalidSql('Statement must be a SELECT')
for r, msg in disallawed_sql_res:
if r.search(sql):
raise InvalidSql(msg)
def append_querystring(url, querystring):
op = "&" if ("?" in url) else "?"
return "{}{}{}".format(
url, op, querystring
)
def path_with_added_args(request, args, path=None):
path = path or request.path
if isinstance(args, dict):
args = args.items()
args_to_remove = {k for k, v in args if v is None}
current = []
for key, value in urllib.parse.parse_qsl(request.query_string):
if key not in args_to_remove:
current.append((key, value))
current.extend([
(key, value)
for key, value in args
if value is not None
])
query_string = urllib.parse.urlencode(current)
if query_string:
query_string = '?{}'.format(query_string)
return path + query_string
def path_with_removed_args(request, args, path=None):
query_string = request.query_string
if path is None:
path = request.path
else:
if "?" in path:
bits = path.split("?", 1)
path, query_string = bits
# args can be a dict or a set
current = []
if isinstance(args, set):
def should_remove(key, value):
return key in args
elif isinstance(args, dict):
# Must match key AND value
def should_remove(key, value):
return args.get(key) == value
for key, value in urllib.parse.parse_qsl(query_string):
if not should_remove(key, value):
current.append((key, value))
query_string = urllib.parse.urlencode(current)
if query_string:
query_string = '?{}'.format(query_string)
return path + query_string
def path_with_replaced_args(request, args, path=None):
path = path or request.path
if isinstance(args, dict):
args = args.items()
keys_to_replace = {p[0] for p in args}
current = []
for key, value in urllib.parse.parse_qsl(request.query_string):
if key not in keys_to_replace:
current.append((key, value))
current.extend([p for p in args if p[1] is not None])
query_string = urllib.parse.urlencode(current)
if query_string:
query_string = '?{}'.format(query_string)
return path + query_string
_css_re = re.compile(r'''['"\n\\]''')
_boring_keyword_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
def escape_css_string(s):
return _css_re.sub(lambda m: '\\{:X}'.format(ord(m.group())), s)
def escape_sqlite(s):
if _boring_keyword_re.match(s) and (s.lower() not in reserved_words):
return s
else:
return '[{}]'.format(s)
def make_dockerfile(files, metadata_file, extra_options, branch, template_dir, plugins_dir, static, install, spatialite, version_note):
cmd = ['"datasette"', '"serve"', '"--host"', '"0.0.0.0"']
cmd.append('"' + '", "'.join(files) + '"')
cmd.extend(['"--cors"', '"--port"', '"8001"', '"--inspect-file"', '"inspect-data.json"'])
if metadata_file:
cmd.extend(['"--metadata"', '"{}"'.format(metadata_file)])
if template_dir:
cmd.extend(['"--template-dir"', '"templates/"'])
if plugins_dir:
cmd.extend(['"--plugins-dir"', '"plugins/"'])
if version_note:
cmd.extend(['"--version-note"', '"{}"'.format(version_note)])
if static:
for mount_point, _ in static:
cmd.extend(['"--static"', '"{}:{}"'.format(mount_point, mount_point)])
if extra_options:
for opt in extra_options.split():
cmd.append('"{}"'.format(opt))
if branch:
install = ['https://github.com/simonw/datasette/archive/{}.zip'.format(
branch
)] + list(install)
else:
install = ['datasette'] + list(install)
return '''
FROM python:3.6
COPY . /app
WORKDIR /app
{spatialite_extras}
RUN pip install -U {install_from}
RUN datasette inspect {files} --inspect-file inspect-data.json
EXPOSE 8001
CMD [{cmd}]'''.format(
files=' '.join(files),
cmd=', '.join(cmd),
install_from=' '.join(install),
spatialite_extras=SPATIALITE_DOCKERFILE_EXTRAS if spatialite else '',
).strip()
@contextmanager
def temporary_docker_directory(
files,
name,
metadata,
extra_options,
branch,
template_dir,
plugins_dir,
static,
install,
spatialite,
version_note,
extra_metadata=None
):
extra_metadata = extra_metadata or {}
tmp = tempfile.TemporaryDirectory()
# We create a datasette folder in there to get a nicer now deploy name
datasette_dir = os.path.join(tmp.name, name)
os.mkdir(datasette_dir)
saved_cwd = os.getcwd()
file_paths = [
os.path.join(saved_cwd, file_path)
for file_path in files
]
file_names = [os.path.split(f)[-1] for f in files]
if metadata:
metadata_content = json.load(metadata)
else:
metadata_content = {}
for key, value in extra_metadata.items():
if value:
metadata_content[key] = value
try:
dockerfile = make_dockerfile(
file_names,
metadata_content and 'metadata.json',
extra_options,
branch,
template_dir,
plugins_dir,
static,
install,
spatialite,
version_note,
)
os.chdir(datasette_dir)
if metadata_content:
open('metadata.json', 'w').write(json.dumps(metadata_content, indent=2))
open('Dockerfile', 'w').write(dockerfile)
for path, filename in zip(file_paths, file_names):
link_or_copy(path, os.path.join(datasette_dir, filename))
if template_dir:
link_or_copy_directory(
os.path.join(saved_cwd, template_dir),
os.path.join(datasette_dir, 'templates')
)
if plugins_dir:
link_or_copy_directory(
os.path.join(saved_cwd, plugins_dir),
os.path.join(datasette_dir, 'plugins')
)
for mount_point, path in static:
link_or_copy_directory(
os.path.join(saved_cwd, path),
os.path.join(datasette_dir, mount_point)
)
yield datasette_dir
finally:
tmp.cleanup()
os.chdir(saved_cwd)
@contextmanager
def temporary_heroku_directory(
files,
name,
metadata,
extra_options,
branch,
template_dir,
plugins_dir,
static,
install,
version_note,
extra_metadata=None
):
# FIXME: lots of duplicated code from above
extra_metadata = extra_metadata or {}
tmp = tempfile.TemporaryDirectory()
saved_cwd = os.getcwd()
file_paths = [
os.path.join(saved_cwd, file_path)
for file_path in files
]
file_names = [os.path.split(f)[-1] for f in files]
if metadata:
metadata_content = json.load(metadata)
else:
metadata_content = {}
for key, value in extra_metadata.items():
if value:
metadata_content[key] = value
try:
os.chdir(tmp.name)
if metadata_content:
open('metadata.json', 'w').write(json.dumps(metadata_content, indent=2))
open('runtime.txt', 'w').write('python-3.6.7')
if branch:
install = ['https://github.com/simonw/datasette/archive/{branch}.zip'.format(
branch=branch
)] + list(install)
else:
install = ['datasette'] + list(install)
open('requirements.txt', 'w').write('\n'.join(install))
os.mkdir('bin')
open('bin/post_compile', 'w').write('datasette inspect --inspect-file inspect-data.json')
extras = []
if template_dir:
link_or_copy_directory(
os.path.join(saved_cwd, template_dir),
os.path.join(tmp.name, 'templates')
)
extras.extend(['--template-dir', 'templates/'])
if plugins_dir:
link_or_copy_directory(
os.path.join(saved_cwd, plugins_dir),
os.path.join(tmp.name, 'plugins')
)
extras.extend(['--plugins-dir', 'plugins/'])
if version_note:
extras.extend(['--version-note', version_note])
if metadata_content:
extras.extend(['--metadata', 'metadata.json'])
if extra_options:
extras.extend(extra_options.split())
for mount_point, path in static:
link_or_copy_directory(
os.path.join(saved_cwd, path),
os.path.join(tmp.name, mount_point)
)
extras.extend(['--static', '{}:{}'.format(mount_point, mount_point)])
quoted_files = " ".join(map(shlex.quote, file_names))
procfile_cmd = 'web: datasette serve --host 0.0.0.0 {quoted_files} --cors --port $PORT --inspect-file inspect-data.json {extras}'.format(
quoted_files=quoted_files,
extras=' '.join(extras),
)
open('Procfile', 'w').write(procfile_cmd)
for path, filename in zip(file_paths, file_names):
link_or_copy(path, os.path.join(tmp.name, filename))
yield
finally:
tmp.cleanup()
os.chdir(saved_cwd)
def get_outbound_foreign_keys(conn, table):
infos = conn.execute(
'PRAGMA foreign_key_list([{}])'.format(table)
).fetchall()
fks = []
for info in infos:
if info is not None:
id, seq, table_name, from_, to_, on_update, on_delete, match = info
fks.append({
'other_table': table_name,
'column': from_,
'other_column': to_
})
return fks
def get_all_foreign_keys(conn):
tables = [r[0] for r in conn.execute('select name from sqlite_master where type="table"')]
table_to_foreign_keys = {}
for table in tables:
table_to_foreign_keys[table] = {
'incoming': [],
'outgoing': [],
}
for table in tables:
infos = conn.execute(
'PRAGMA foreign_key_list([{}])'.format(table)
).fetchall()
for info in infos:
if info is not None:
id, seq, table_name, from_, to_, on_update, on_delete, match = info
if table_name not in table_to_foreign_keys:
# Weird edge case where something refers to a table that does
# not actually exist
continue
table_to_foreign_keys[table_name]['incoming'].append({
'other_table': table,
'column': to_,
'other_column': from_
})
table_to_foreign_keys[table]['outgoing'].append({
'other_table': table_name,
'column': from_,
'other_column': to_
})
return table_to_foreign_keys
def detect_spatialite(conn):
rows = conn.execute('select 1 from sqlite_master where tbl_name = "geometry_columns"').fetchall()
return len(rows) > 0
def detect_fts(conn, table):
"Detect if table has a corresponding FTS virtual table and return it"
rows = conn.execute(detect_fts_sql(table)).fetchall()
if len(rows) == 0:
return None
else:
return rows[0][0]
def detect_fts_sql(table):
return r'''
select name from sqlite_master
where rootpage = 0
and (
sql like '%VIRTUAL TABLE%USING FTS%content="{table}"%'
or (
tbl_name = "{table}"
and sql like '%VIRTUAL TABLE%USING FTS%'
)
)
'''.format(table=table)
def detect_json1(conn=None):
if conn is None:
conn = sqlite3.connect(":memory:")
try:
conn.execute("SELECT json('{}')")
return True
except Exception:
return False
def table_columns(conn, table):
return [
r[1]
for r in conn.execute(
"PRAGMA table_info({});".format(escape_sqlite(table))
).fetchall()
]
filter_column_re = re.compile(r'^_filter_column_\d+$')
def filters_should_redirect(special_args):
redirect_params = []
# Handle _filter_column=foo&_filter_op=exact&_filter_value=...
filter_column = special_args.get('_filter_column')
filter_op = special_args.get('_filter_op') or ''
filter_value = special_args.get('_filter_value') or ''
if '__' in filter_op:
filter_op, filter_value = filter_op.split('__', 1)
if filter_column:
redirect_params.append(
('{}__{}'.format(filter_column, filter_op), filter_value)
)
for key in ('_filter_column', '_filter_op', '_filter_value'):
if key in special_args:
redirect_params.append((key, None))
# Now handle _filter_column_1=name&_filter_op_1=contains&_filter_value_1=hello
column_keys = [k for k in special_args if filter_column_re.match(k)]
for column_key in column_keys:
number = column_key.split('_')[-1]
column = special_args[column_key]
op = special_args.get('_filter_op_{}'.format(number)) or 'exact'
value = special_args.get('_filter_value_{}'.format(number)) or ''
if '__' in op:
op, value = op.split('__', 1)
if column:
redirect_params.append(('{}__{}'.format(column, op), value))
redirect_params.extend([
('_filter_column_{}'.format(number), None),
('_filter_op_{}'.format(number), None),
('_filter_value_{}'.format(number), None),
])
return redirect_params
whitespace_re = re.compile(r'\s')
def is_url(value):
"Must start with http:// or https:// and contain JUST a URL"
if not isinstance(value, str):
return False
if not value.startswith('http://') and not value.startswith('https://'):
return False
# Any whitespace at all is invalid
if whitespace_re.search(value):
return False
return True
css_class_re = re.compile(r'^[a-zA-Z]+[_a-zA-Z0-9-]*$')
css_invalid_chars_re = re.compile(r'[^a-zA-Z0-9_\-]')
def to_css_class(s):
"""
Given a string (e.g. a table name) returns a valid unique CSS class.
For simple cases, just returns the string again. If the string is not a
valid CSS class (we disallow - and _ prefixes even though they are valid
as they may be confused with browser prefixes) we strip invalid characters
and add a 6 char md5 sum suffix, to make sure two tables with identical
names after stripping characters don't end up with the same CSS class.
"""
if css_class_re.match(s):
return s
md5_suffix = hashlib.md5(s.encode('utf8')).hexdigest()[:6]
# Strip leading _, -
s = s.lstrip('_').lstrip('-')
# Replace any whitespace with hyphens
s = '-'.join(s.split())
# Remove any remaining invalid characters
s = css_invalid_chars_re.sub('', s)
# Attach the md5 suffix
bits = [b for b in (s, md5_suffix) if b]
return '-'.join(bits)
def link_or_copy(src, dst):
# Intended for use in populating a temp directory. We link if possible,
# but fall back to copying if the temp directory is on a different device
# https://github.com/simonw/datasette/issues/141
try:
os.link(src, dst)
except OSError:
shutil.copyfile(src, dst)
def link_or_copy_directory(src, dst):
try:
shutil.copytree(src, dst, copy_function=os.link)
except OSError:
shutil.copytree(src, dst)
def module_from_path(path, name):
# Adapted from http://sayspy.blogspot.com/2011/07/how-to-import-module-from-just-file.html
mod = imp.new_module(name)
mod.__file__ = path
with open(path, 'r') as file:
code = compile(file.read(), path, 'exec', dont_inherit=True)
exec(code, mod.__dict__)
return mod
def get_plugins(pm):
plugins = []
plugin_to_distinfo = dict(pm.list_plugin_distinfo())
for plugin in pm.get_plugins():
static_path = None
templates_path = None
try:
if pkg_resources.resource_isdir(plugin.__name__, 'static'):
static_path = pkg_resources.resource_filename(plugin.__name__, 'static')
if pkg_resources.resource_isdir(plugin.__name__, 'templates'):
templates_path = pkg_resources.resource_filename(plugin.__name__, 'templates')
except (KeyError, ImportError):
# Caused by --plugins_dir= plugins - KeyError/ImportError thrown in Py3.5
pass
plugin_info = {
'name': plugin.__name__,
'static_path': static_path,
'templates_path': templates_path,
}
distinfo = plugin_to_distinfo.get(plugin)
if distinfo:
plugin_info['version'] = distinfo.version
plugins.append(plugin_info)
return plugins
FORMATS = ('csv', 'json', 'jsono')
async def resolve_table_and_format(table_and_format, table_exists):
if '.' in table_and_format:
# Check if a table exists with this exact name
it_exists = await table_exists(table_and_format)
if it_exists:
return table_and_format, None
# Check if table ends with a known format
for _format in FORMATS:
if table_and_format.endswith(".{}".format(_format)):
table = table_and_format[:-(len(_format) + 1)]
return table, _format
return table_and_format, None
def path_with_format(request, format, extra_qs=None):
qs = extra_qs or {}
path = request.path
if "." in request.path:
qs["_format"] = format
else:
path = "{}.{}".format(path, format)
if qs:
extra = urllib.parse.urlencode(sorted(qs.items()))
if request.query_string:
path = "{}?{}&{}".format(
path, request.query_string, extra
)
else:
path = "{}?{}".format(path, extra)
elif request.query_string:
path = "{}?{}".format(path, request.query_string)
return path
class CustomRow(OrderedDict):
# Loose imitation of sqlite3.Row which offers
# both index-based AND key-based lookups
def __init__(self, columns, values=None):
self.columns = columns
if values:
self.update(values)
def __getitem__(self, key):
if isinstance(key, int):
return super().__getitem__(self.columns[key])
else:
return super().__getitem__(key)
def __iter__(self):
for column in self.columns:
yield self[column]
def value_as_boolean(value):
if value.lower() not in ('on', 'off', 'true', 'false', '1', '0'):
raise ValueAsBooleanError
return value.lower() in ('on', 'true', '1')
class ValueAsBooleanError(ValueError):
pass
class WriteLimitExceeded(Exception):
pass
class LimitedWriter:
def __init__(self, writer, limit_mb):
self.writer = writer
self.limit_bytes = limit_mb * 1024 * 1024
self.bytes_count = 0
def write(self, bytes):
self.bytes_count += len(bytes)
if self.limit_bytes and (self.bytes_count > self.limit_bytes):
raise WriteLimitExceeded("CSV contains more than {} bytes".format(
self.limit_bytes
))
self.writer.write(bytes)
_infinities = {float("inf"), float("-inf")}
def remove_infinites(row):
if any((c in _infinities) if isinstance(c, float) else 0 for c in row):
return [
None if (isinstance(c, float) and c in _infinities) else c
for c in row
]
return row
class StaticMount(click.ParamType):
name = "static mount"
def convert(self, value, param, ctx):
if ":" not in value:
self.fail(
'"{}" should be of format mountpoint:directory'.format(value),
param, ctx
)
path, dirpath = value.split(":")
if not os.path.exists(dirpath) or not os.path.isdir(dirpath):
self.fail("%s is not a valid directory path" % value, param, ctx)
return path, dirpath
def format_bytes(bytes):
current = float(bytes)
for unit in ("bytes", "KB", "MB", "GB", "TB"):
if current < 1024:
break
current = current / 1024
if unit == "bytes":
return "{} {}".format(int(current), unit)
else:
return "{:.1f} {}".format(current, unit)
|
simonw/datasette | datasette/utils.py | detect_fts | python | def detect_fts(conn, table):
"Detect if table has a corresponding FTS virtual table and return it"
rows = conn.execute(detect_fts_sql(table)).fetchall()
if len(rows) == 0:
return None
else:
return rows[0][0] | Detect if table has a corresponding FTS virtual table and return it | train | https://github.com/simonw/datasette/blob/11b352b4d52fd02a422776edebb14f12e4994d3b/datasette/utils.py#L545-L551 | [
"def detect_fts_sql(table):\n return r'''\n select name from sqlite_master\n where rootpage = 0\n and (\n sql like '%VIRTUAL TABLE%USING FTS%content=\"{table}\"%'\n or (\n tbl_name = \"{table}\"\n and sql like '%VIRT... | from contextlib import contextmanager
from collections import OrderedDict
import base64
import click
import hashlib
import imp
import json
import os
import pkg_resources
import re
import shlex
import tempfile
import time
import shutil
import urllib
import numbers
try:
import pysqlite3 as sqlite3
except ImportError:
import sqlite3
# From https://www.sqlite.org/lang_keywords.html
reserved_words = set((
'abort action add after all alter analyze and as asc attach autoincrement '
'before begin between by cascade case cast check collate column commit '
'conflict constraint create cross current_date current_time '
'current_timestamp database default deferrable deferred delete desc detach '
'distinct drop each else end escape except exclusive exists explain fail '
'for foreign from full glob group having if ignore immediate in index '
'indexed initially inner insert instead intersect into is isnull join key '
'left like limit match natural no not notnull null of offset on or order '
'outer plan pragma primary query raise recursive references regexp reindex '
'release rename replace restrict right rollback row savepoint select set '
'table temp temporary then to transaction trigger union unique update using '
'vacuum values view virtual when where with without'
).split())
SPATIALITE_DOCKERFILE_EXTRAS = r'''
RUN apt-get update && \
apt-get install -y python3-dev gcc libsqlite3-mod-spatialite && \
rm -rf /var/lib/apt/lists/*
ENV SQLITE_EXTENSIONS /usr/lib/x86_64-linux-gnu/mod_spatialite.so
'''
class InterruptedError(Exception):
pass
class Results:
def __init__(self, rows, truncated, description):
self.rows = rows
self.truncated = truncated
self.description = description
@property
def columns(self):
return [d[0] for d in self.description]
def __iter__(self):
return iter(self.rows)
def __len__(self):
return len(self.rows)
def urlsafe_components(token):
"Splits token on commas and URL decodes each component"
return [
urllib.parse.unquote_plus(b) for b in token.split(',')
]
def path_from_row_pks(row, pks, use_rowid, quote=True):
""" Generate an optionally URL-quoted unique identifier
for a row from its primary keys."""
if use_rowid:
bits = [row['rowid']]
else:
bits = [
row[pk]["value"] if isinstance(row[pk], dict) else row[pk]
for pk in pks
]
if quote:
bits = [urllib.parse.quote_plus(str(bit)) for bit in bits]
else:
bits = [str(bit) for bit in bits]
return ','.join(bits)
def compound_keys_after_sql(pks, start_index=0):
# Implementation of keyset pagination
# See https://github.com/simonw/datasette/issues/190
# For pk1/pk2/pk3 returns:
#
# ([pk1] > :p0)
# or
# ([pk1] = :p0 and [pk2] > :p1)
# or
# ([pk1] = :p0 and [pk2] = :p1 and [pk3] > :p2)
or_clauses = []
pks_left = pks[:]
while pks_left:
and_clauses = []
last = pks_left[-1]
rest = pks_left[:-1]
and_clauses = ['{} = :p{}'.format(
escape_sqlite(pk), (i + start_index)
) for i, pk in enumerate(rest)]
and_clauses.append('{} > :p{}'.format(
escape_sqlite(last), (len(rest) + start_index)
))
or_clauses.append('({})'.format(' and '.join(and_clauses)))
pks_left.pop()
or_clauses.reverse()
return '({})'.format('\n or\n'.join(or_clauses))
class CustomJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, sqlite3.Row):
return tuple(obj)
if isinstance(obj, sqlite3.Cursor):
return list(obj)
if isinstance(obj, bytes):
# Does it encode to utf8?
try:
return obj.decode('utf8')
except UnicodeDecodeError:
return {
'$base64': True,
'encoded': base64.b64encode(obj).decode('latin1'),
}
return json.JSONEncoder.default(self, obj)
@contextmanager
def sqlite_timelimit(conn, ms):
deadline = time.time() + (ms / 1000)
# n is the number of SQLite virtual machine instructions that will be
# executed between each check. It's hard to know what to pick here.
# After some experimentation, I've decided to go with 1000 by default and
# 1 for time limits that are less than 50ms
n = 1000
if ms < 50:
n = 1
def handler():
if time.time() >= deadline:
return 1
conn.set_progress_handler(handler, n)
yield
conn.set_progress_handler(None, n)
class InvalidSql(Exception):
pass
allowed_sql_res = [
re.compile(r'^select\b'),
re.compile(r'^explain select\b'),
re.compile(r'^explain query plan select\b'),
re.compile(r'^with\b'),
]
disallawed_sql_res = [
(re.compile('pragma'), 'Statement may not contain PRAGMA'),
]
def validate_sql_select(sql):
sql = sql.strip().lower()
if not any(r.match(sql) for r in allowed_sql_res):
raise InvalidSql('Statement must be a SELECT')
for r, msg in disallawed_sql_res:
if r.search(sql):
raise InvalidSql(msg)
def append_querystring(url, querystring):
op = "&" if ("?" in url) else "?"
return "{}{}{}".format(
url, op, querystring
)
def path_with_added_args(request, args, path=None):
path = path or request.path
if isinstance(args, dict):
args = args.items()
args_to_remove = {k for k, v in args if v is None}
current = []
for key, value in urllib.parse.parse_qsl(request.query_string):
if key not in args_to_remove:
current.append((key, value))
current.extend([
(key, value)
for key, value in args
if value is not None
])
query_string = urllib.parse.urlencode(current)
if query_string:
query_string = '?{}'.format(query_string)
return path + query_string
def path_with_removed_args(request, args, path=None):
query_string = request.query_string
if path is None:
path = request.path
else:
if "?" in path:
bits = path.split("?", 1)
path, query_string = bits
# args can be a dict or a set
current = []
if isinstance(args, set):
def should_remove(key, value):
return key in args
elif isinstance(args, dict):
# Must match key AND value
def should_remove(key, value):
return args.get(key) == value
for key, value in urllib.parse.parse_qsl(query_string):
if not should_remove(key, value):
current.append((key, value))
query_string = urllib.parse.urlencode(current)
if query_string:
query_string = '?{}'.format(query_string)
return path + query_string
def path_with_replaced_args(request, args, path=None):
path = path or request.path
if isinstance(args, dict):
args = args.items()
keys_to_replace = {p[0] for p in args}
current = []
for key, value in urllib.parse.parse_qsl(request.query_string):
if key not in keys_to_replace:
current.append((key, value))
current.extend([p for p in args if p[1] is not None])
query_string = urllib.parse.urlencode(current)
if query_string:
query_string = '?{}'.format(query_string)
return path + query_string
_css_re = re.compile(r'''['"\n\\]''')
_boring_keyword_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
def escape_css_string(s):
return _css_re.sub(lambda m: '\\{:X}'.format(ord(m.group())), s)
def escape_sqlite(s):
if _boring_keyword_re.match(s) and (s.lower() not in reserved_words):
return s
else:
return '[{}]'.format(s)
def make_dockerfile(files, metadata_file, extra_options, branch, template_dir, plugins_dir, static, install, spatialite, version_note):
cmd = ['"datasette"', '"serve"', '"--host"', '"0.0.0.0"']
cmd.append('"' + '", "'.join(files) + '"')
cmd.extend(['"--cors"', '"--port"', '"8001"', '"--inspect-file"', '"inspect-data.json"'])
if metadata_file:
cmd.extend(['"--metadata"', '"{}"'.format(metadata_file)])
if template_dir:
cmd.extend(['"--template-dir"', '"templates/"'])
if plugins_dir:
cmd.extend(['"--plugins-dir"', '"plugins/"'])
if version_note:
cmd.extend(['"--version-note"', '"{}"'.format(version_note)])
if static:
for mount_point, _ in static:
cmd.extend(['"--static"', '"{}:{}"'.format(mount_point, mount_point)])
if extra_options:
for opt in extra_options.split():
cmd.append('"{}"'.format(opt))
if branch:
install = ['https://github.com/simonw/datasette/archive/{}.zip'.format(
branch
)] + list(install)
else:
install = ['datasette'] + list(install)
return '''
FROM python:3.6
COPY . /app
WORKDIR /app
{spatialite_extras}
RUN pip install -U {install_from}
RUN datasette inspect {files} --inspect-file inspect-data.json
EXPOSE 8001
CMD [{cmd}]'''.format(
files=' '.join(files),
cmd=', '.join(cmd),
install_from=' '.join(install),
spatialite_extras=SPATIALITE_DOCKERFILE_EXTRAS if spatialite else '',
).strip()
@contextmanager
def temporary_docker_directory(
files,
name,
metadata,
extra_options,
branch,
template_dir,
plugins_dir,
static,
install,
spatialite,
version_note,
extra_metadata=None
):
extra_metadata = extra_metadata or {}
tmp = tempfile.TemporaryDirectory()
# We create a datasette folder in there to get a nicer now deploy name
datasette_dir = os.path.join(tmp.name, name)
os.mkdir(datasette_dir)
saved_cwd = os.getcwd()
file_paths = [
os.path.join(saved_cwd, file_path)
for file_path in files
]
file_names = [os.path.split(f)[-1] for f in files]
if metadata:
metadata_content = json.load(metadata)
else:
metadata_content = {}
for key, value in extra_metadata.items():
if value:
metadata_content[key] = value
try:
dockerfile = make_dockerfile(
file_names,
metadata_content and 'metadata.json',
extra_options,
branch,
template_dir,
plugins_dir,
static,
install,
spatialite,
version_note,
)
os.chdir(datasette_dir)
if metadata_content:
open('metadata.json', 'w').write(json.dumps(metadata_content, indent=2))
open('Dockerfile', 'w').write(dockerfile)
for path, filename in zip(file_paths, file_names):
link_or_copy(path, os.path.join(datasette_dir, filename))
if template_dir:
link_or_copy_directory(
os.path.join(saved_cwd, template_dir),
os.path.join(datasette_dir, 'templates')
)
if plugins_dir:
link_or_copy_directory(
os.path.join(saved_cwd, plugins_dir),
os.path.join(datasette_dir, 'plugins')
)
for mount_point, path in static:
link_or_copy_directory(
os.path.join(saved_cwd, path),
os.path.join(datasette_dir, mount_point)
)
yield datasette_dir
finally:
tmp.cleanup()
os.chdir(saved_cwd)
@contextmanager
def temporary_heroku_directory(
files,
name,
metadata,
extra_options,
branch,
template_dir,
plugins_dir,
static,
install,
version_note,
extra_metadata=None
):
# FIXME: lots of duplicated code from above
extra_metadata = extra_metadata or {}
tmp = tempfile.TemporaryDirectory()
saved_cwd = os.getcwd()
file_paths = [
os.path.join(saved_cwd, file_path)
for file_path in files
]
file_names = [os.path.split(f)[-1] for f in files]
if metadata:
metadata_content = json.load(metadata)
else:
metadata_content = {}
for key, value in extra_metadata.items():
if value:
metadata_content[key] = value
try:
os.chdir(tmp.name)
if metadata_content:
open('metadata.json', 'w').write(json.dumps(metadata_content, indent=2))
open('runtime.txt', 'w').write('python-3.6.7')
if branch:
install = ['https://github.com/simonw/datasette/archive/{branch}.zip'.format(
branch=branch
)] + list(install)
else:
install = ['datasette'] + list(install)
open('requirements.txt', 'w').write('\n'.join(install))
os.mkdir('bin')
open('bin/post_compile', 'w').write('datasette inspect --inspect-file inspect-data.json')
extras = []
if template_dir:
link_or_copy_directory(
os.path.join(saved_cwd, template_dir),
os.path.join(tmp.name, 'templates')
)
extras.extend(['--template-dir', 'templates/'])
if plugins_dir:
link_or_copy_directory(
os.path.join(saved_cwd, plugins_dir),
os.path.join(tmp.name, 'plugins')
)
extras.extend(['--plugins-dir', 'plugins/'])
if version_note:
extras.extend(['--version-note', version_note])
if metadata_content:
extras.extend(['--metadata', 'metadata.json'])
if extra_options:
extras.extend(extra_options.split())
for mount_point, path in static:
link_or_copy_directory(
os.path.join(saved_cwd, path),
os.path.join(tmp.name, mount_point)
)
extras.extend(['--static', '{}:{}'.format(mount_point, mount_point)])
quoted_files = " ".join(map(shlex.quote, file_names))
procfile_cmd = 'web: datasette serve --host 0.0.0.0 {quoted_files} --cors --port $PORT --inspect-file inspect-data.json {extras}'.format(
quoted_files=quoted_files,
extras=' '.join(extras),
)
open('Procfile', 'w').write(procfile_cmd)
for path, filename in zip(file_paths, file_names):
link_or_copy(path, os.path.join(tmp.name, filename))
yield
finally:
tmp.cleanup()
os.chdir(saved_cwd)
def detect_primary_keys(conn, table):
" Figure out primary keys for a table. "
table_info_rows = [
row
for row in conn.execute(
'PRAGMA table_info("{}")'.format(table)
).fetchall()
if row[-1]
]
table_info_rows.sort(key=lambda row: row[-1])
return [str(r[1]) for r in table_info_rows]
def get_outbound_foreign_keys(conn, table):
infos = conn.execute(
'PRAGMA foreign_key_list([{}])'.format(table)
).fetchall()
fks = []
for info in infos:
if info is not None:
id, seq, table_name, from_, to_, on_update, on_delete, match = info
fks.append({
'other_table': table_name,
'column': from_,
'other_column': to_
})
return fks
def get_all_foreign_keys(conn):
tables = [r[0] for r in conn.execute('select name from sqlite_master where type="table"')]
table_to_foreign_keys = {}
for table in tables:
table_to_foreign_keys[table] = {
'incoming': [],
'outgoing': [],
}
for table in tables:
infos = conn.execute(
'PRAGMA foreign_key_list([{}])'.format(table)
).fetchall()
for info in infos:
if info is not None:
id, seq, table_name, from_, to_, on_update, on_delete, match = info
if table_name not in table_to_foreign_keys:
# Weird edge case where something refers to a table that does
# not actually exist
continue
table_to_foreign_keys[table_name]['incoming'].append({
'other_table': table,
'column': to_,
'other_column': from_
})
table_to_foreign_keys[table]['outgoing'].append({
'other_table': table_name,
'column': from_,
'other_column': to_
})
return table_to_foreign_keys
def detect_spatialite(conn):
rows = conn.execute('select 1 from sqlite_master where tbl_name = "geometry_columns"').fetchall()
return len(rows) > 0
def detect_fts_sql(table):
return r'''
select name from sqlite_master
where rootpage = 0
and (
sql like '%VIRTUAL TABLE%USING FTS%content="{table}"%'
or (
tbl_name = "{table}"
and sql like '%VIRTUAL TABLE%USING FTS%'
)
)
'''.format(table=table)
def detect_json1(conn=None):
if conn is None:
conn = sqlite3.connect(":memory:")
try:
conn.execute("SELECT json('{}')")
return True
except Exception:
return False
def table_columns(conn, table):
return [
r[1]
for r in conn.execute(
"PRAGMA table_info({});".format(escape_sqlite(table))
).fetchall()
]
filter_column_re = re.compile(r'^_filter_column_\d+$')
def filters_should_redirect(special_args):
redirect_params = []
# Handle _filter_column=foo&_filter_op=exact&_filter_value=...
filter_column = special_args.get('_filter_column')
filter_op = special_args.get('_filter_op') or ''
filter_value = special_args.get('_filter_value') or ''
if '__' in filter_op:
filter_op, filter_value = filter_op.split('__', 1)
if filter_column:
redirect_params.append(
('{}__{}'.format(filter_column, filter_op), filter_value)
)
for key in ('_filter_column', '_filter_op', '_filter_value'):
if key in special_args:
redirect_params.append((key, None))
# Now handle _filter_column_1=name&_filter_op_1=contains&_filter_value_1=hello
column_keys = [k for k in special_args if filter_column_re.match(k)]
for column_key in column_keys:
number = column_key.split('_')[-1]
column = special_args[column_key]
op = special_args.get('_filter_op_{}'.format(number)) or 'exact'
value = special_args.get('_filter_value_{}'.format(number)) or ''
if '__' in op:
op, value = op.split('__', 1)
if column:
redirect_params.append(('{}__{}'.format(column, op), value))
redirect_params.extend([
('_filter_column_{}'.format(number), None),
('_filter_op_{}'.format(number), None),
('_filter_value_{}'.format(number), None),
])
return redirect_params
whitespace_re = re.compile(r'\s')
def is_url(value):
"Must start with http:// or https:// and contain JUST a URL"
if not isinstance(value, str):
return False
if not value.startswith('http://') and not value.startswith('https://'):
return False
# Any whitespace at all is invalid
if whitespace_re.search(value):
return False
return True
css_class_re = re.compile(r'^[a-zA-Z]+[_a-zA-Z0-9-]*$')
css_invalid_chars_re = re.compile(r'[^a-zA-Z0-9_\-]')
def to_css_class(s):
"""
Given a string (e.g. a table name) returns a valid unique CSS class.
For simple cases, just returns the string again. If the string is not a
valid CSS class (we disallow - and _ prefixes even though they are valid
as they may be confused with browser prefixes) we strip invalid characters
and add a 6 char md5 sum suffix, to make sure two tables with identical
names after stripping characters don't end up with the same CSS class.
"""
if css_class_re.match(s):
return s
md5_suffix = hashlib.md5(s.encode('utf8')).hexdigest()[:6]
# Strip leading _, -
s = s.lstrip('_').lstrip('-')
# Replace any whitespace with hyphens
s = '-'.join(s.split())
# Remove any remaining invalid characters
s = css_invalid_chars_re.sub('', s)
# Attach the md5 suffix
bits = [b for b in (s, md5_suffix) if b]
return '-'.join(bits)
def link_or_copy(src, dst):
# Intended for use in populating a temp directory. We link if possible,
# but fall back to copying if the temp directory is on a different device
# https://github.com/simonw/datasette/issues/141
try:
os.link(src, dst)
except OSError:
shutil.copyfile(src, dst)
def link_or_copy_directory(src, dst):
try:
shutil.copytree(src, dst, copy_function=os.link)
except OSError:
shutil.copytree(src, dst)
def module_from_path(path, name):
# Adapted from http://sayspy.blogspot.com/2011/07/how-to-import-module-from-just-file.html
mod = imp.new_module(name)
mod.__file__ = path
with open(path, 'r') as file:
code = compile(file.read(), path, 'exec', dont_inherit=True)
exec(code, mod.__dict__)
return mod
def get_plugins(pm):
plugins = []
plugin_to_distinfo = dict(pm.list_plugin_distinfo())
for plugin in pm.get_plugins():
static_path = None
templates_path = None
try:
if pkg_resources.resource_isdir(plugin.__name__, 'static'):
static_path = pkg_resources.resource_filename(plugin.__name__, 'static')
if pkg_resources.resource_isdir(plugin.__name__, 'templates'):
templates_path = pkg_resources.resource_filename(plugin.__name__, 'templates')
except (KeyError, ImportError):
# Caused by --plugins_dir= plugins - KeyError/ImportError thrown in Py3.5
pass
plugin_info = {
'name': plugin.__name__,
'static_path': static_path,
'templates_path': templates_path,
}
distinfo = plugin_to_distinfo.get(plugin)
if distinfo:
plugin_info['version'] = distinfo.version
plugins.append(plugin_info)
return plugins
FORMATS = ('csv', 'json', 'jsono')
async def resolve_table_and_format(table_and_format, table_exists):
if '.' in table_and_format:
# Check if a table exists with this exact name
it_exists = await table_exists(table_and_format)
if it_exists:
return table_and_format, None
# Check if table ends with a known format
for _format in FORMATS:
if table_and_format.endswith(".{}".format(_format)):
table = table_and_format[:-(len(_format) + 1)]
return table, _format
return table_and_format, None
def path_with_format(request, format, extra_qs=None):
qs = extra_qs or {}
path = request.path
if "." in request.path:
qs["_format"] = format
else:
path = "{}.{}".format(path, format)
if qs:
extra = urllib.parse.urlencode(sorted(qs.items()))
if request.query_string:
path = "{}?{}&{}".format(
path, request.query_string, extra
)
else:
path = "{}?{}".format(path, extra)
elif request.query_string:
path = "{}?{}".format(path, request.query_string)
return path
class CustomRow(OrderedDict):
# Loose imitation of sqlite3.Row which offers
# both index-based AND key-based lookups
def __init__(self, columns, values=None):
self.columns = columns
if values:
self.update(values)
def __getitem__(self, key):
if isinstance(key, int):
return super().__getitem__(self.columns[key])
else:
return super().__getitem__(key)
def __iter__(self):
for column in self.columns:
yield self[column]
def value_as_boolean(value):
if value.lower() not in ('on', 'off', 'true', 'false', '1', '0'):
raise ValueAsBooleanError
return value.lower() in ('on', 'true', '1')
class ValueAsBooleanError(ValueError):
pass
class WriteLimitExceeded(Exception):
pass
class LimitedWriter:
def __init__(self, writer, limit_mb):
self.writer = writer
self.limit_bytes = limit_mb * 1024 * 1024
self.bytes_count = 0
def write(self, bytes):
self.bytes_count += len(bytes)
if self.limit_bytes and (self.bytes_count > self.limit_bytes):
raise WriteLimitExceeded("CSV contains more than {} bytes".format(
self.limit_bytes
))
self.writer.write(bytes)
_infinities = {float("inf"), float("-inf")}
def remove_infinites(row):
if any((c in _infinities) if isinstance(c, float) else 0 for c in row):
return [
None if (isinstance(c, float) and c in _infinities) else c
for c in row
]
return row
class StaticMount(click.ParamType):
name = "static mount"
def convert(self, value, param, ctx):
if ":" not in value:
self.fail(
'"{}" should be of format mountpoint:directory'.format(value),
param, ctx
)
path, dirpath = value.split(":")
if not os.path.exists(dirpath) or not os.path.isdir(dirpath):
self.fail("%s is not a valid directory path" % value, param, ctx)
return path, dirpath
def format_bytes(bytes):
current = float(bytes)
for unit in ("bytes", "KB", "MB", "GB", "TB"):
if current < 1024:
break
current = current / 1024
if unit == "bytes":
return "{} {}".format(int(current), unit)
else:
return "{:.1f} {}".format(current, unit)
|
simonw/datasette | datasette/utils.py | is_url | python | def is_url(value):
"Must start with http:// or https:// and contain JUST a URL"
if not isinstance(value, str):
return False
if not value.startswith('http://') and not value.startswith('https://'):
return False
# Any whitespace at all is invalid
if whitespace_re.search(value):
return False
return True | Must start with http:// or https:// and contain JUST a URL | train | https://github.com/simonw/datasette/blob/11b352b4d52fd02a422776edebb14f12e4994d3b/datasette/utils.py#L627-L636 | null | from contextlib import contextmanager
from collections import OrderedDict
import base64
import click
import hashlib
import imp
import json
import os
import pkg_resources
import re
import shlex
import tempfile
import time
import shutil
import urllib
import numbers
try:
import pysqlite3 as sqlite3
except ImportError:
import sqlite3
# From https://www.sqlite.org/lang_keywords.html
reserved_words = set((
'abort action add after all alter analyze and as asc attach autoincrement '
'before begin between by cascade case cast check collate column commit '
'conflict constraint create cross current_date current_time '
'current_timestamp database default deferrable deferred delete desc detach '
'distinct drop each else end escape except exclusive exists explain fail '
'for foreign from full glob group having if ignore immediate in index '
'indexed initially inner insert instead intersect into is isnull join key '
'left like limit match natural no not notnull null of offset on or order '
'outer plan pragma primary query raise recursive references regexp reindex '
'release rename replace restrict right rollback row savepoint select set '
'table temp temporary then to transaction trigger union unique update using '
'vacuum values view virtual when where with without'
).split())
SPATIALITE_DOCKERFILE_EXTRAS = r'''
RUN apt-get update && \
apt-get install -y python3-dev gcc libsqlite3-mod-spatialite && \
rm -rf /var/lib/apt/lists/*
ENV SQLITE_EXTENSIONS /usr/lib/x86_64-linux-gnu/mod_spatialite.so
'''
class InterruptedError(Exception):
pass
class Results:
def __init__(self, rows, truncated, description):
self.rows = rows
self.truncated = truncated
self.description = description
@property
def columns(self):
return [d[0] for d in self.description]
def __iter__(self):
return iter(self.rows)
def __len__(self):
return len(self.rows)
def urlsafe_components(token):
"Splits token on commas and URL decodes each component"
return [
urllib.parse.unquote_plus(b) for b in token.split(',')
]
def path_from_row_pks(row, pks, use_rowid, quote=True):
""" Generate an optionally URL-quoted unique identifier
for a row from its primary keys."""
if use_rowid:
bits = [row['rowid']]
else:
bits = [
row[pk]["value"] if isinstance(row[pk], dict) else row[pk]
for pk in pks
]
if quote:
bits = [urllib.parse.quote_plus(str(bit)) for bit in bits]
else:
bits = [str(bit) for bit in bits]
return ','.join(bits)
def compound_keys_after_sql(pks, start_index=0):
# Implementation of keyset pagination
# See https://github.com/simonw/datasette/issues/190
# For pk1/pk2/pk3 returns:
#
# ([pk1] > :p0)
# or
# ([pk1] = :p0 and [pk2] > :p1)
# or
# ([pk1] = :p0 and [pk2] = :p1 and [pk3] > :p2)
or_clauses = []
pks_left = pks[:]
while pks_left:
and_clauses = []
last = pks_left[-1]
rest = pks_left[:-1]
and_clauses = ['{} = :p{}'.format(
escape_sqlite(pk), (i + start_index)
) for i, pk in enumerate(rest)]
and_clauses.append('{} > :p{}'.format(
escape_sqlite(last), (len(rest) + start_index)
))
or_clauses.append('({})'.format(' and '.join(and_clauses)))
pks_left.pop()
or_clauses.reverse()
return '({})'.format('\n or\n'.join(or_clauses))
class CustomJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, sqlite3.Row):
return tuple(obj)
if isinstance(obj, sqlite3.Cursor):
return list(obj)
if isinstance(obj, bytes):
# Does it encode to utf8?
try:
return obj.decode('utf8')
except UnicodeDecodeError:
return {
'$base64': True,
'encoded': base64.b64encode(obj).decode('latin1'),
}
return json.JSONEncoder.default(self, obj)
@contextmanager
def sqlite_timelimit(conn, ms):
deadline = time.time() + (ms / 1000)
# n is the number of SQLite virtual machine instructions that will be
# executed between each check. It's hard to know what to pick here.
# After some experimentation, I've decided to go with 1000 by default and
# 1 for time limits that are less than 50ms
n = 1000
if ms < 50:
n = 1
def handler():
if time.time() >= deadline:
return 1
conn.set_progress_handler(handler, n)
yield
conn.set_progress_handler(None, n)
class InvalidSql(Exception):
pass
allowed_sql_res = [
re.compile(r'^select\b'),
re.compile(r'^explain select\b'),
re.compile(r'^explain query plan select\b'),
re.compile(r'^with\b'),
]
disallawed_sql_res = [
(re.compile('pragma'), 'Statement may not contain PRAGMA'),
]
def validate_sql_select(sql):
sql = sql.strip().lower()
if not any(r.match(sql) for r in allowed_sql_res):
raise InvalidSql('Statement must be a SELECT')
for r, msg in disallawed_sql_res:
if r.search(sql):
raise InvalidSql(msg)
def append_querystring(url, querystring):
op = "&" if ("?" in url) else "?"
return "{}{}{}".format(
url, op, querystring
)
def path_with_added_args(request, args, path=None):
path = path or request.path
if isinstance(args, dict):
args = args.items()
args_to_remove = {k for k, v in args if v is None}
current = []
for key, value in urllib.parse.parse_qsl(request.query_string):
if key not in args_to_remove:
current.append((key, value))
current.extend([
(key, value)
for key, value in args
if value is not None
])
query_string = urllib.parse.urlencode(current)
if query_string:
query_string = '?{}'.format(query_string)
return path + query_string
def path_with_removed_args(request, args, path=None):
query_string = request.query_string
if path is None:
path = request.path
else:
if "?" in path:
bits = path.split("?", 1)
path, query_string = bits
# args can be a dict or a set
current = []
if isinstance(args, set):
def should_remove(key, value):
return key in args
elif isinstance(args, dict):
# Must match key AND value
def should_remove(key, value):
return args.get(key) == value
for key, value in urllib.parse.parse_qsl(query_string):
if not should_remove(key, value):
current.append((key, value))
query_string = urllib.parse.urlencode(current)
if query_string:
query_string = '?{}'.format(query_string)
return path + query_string
def path_with_replaced_args(request, args, path=None):
path = path or request.path
if isinstance(args, dict):
args = args.items()
keys_to_replace = {p[0] for p in args}
current = []
for key, value in urllib.parse.parse_qsl(request.query_string):
if key not in keys_to_replace:
current.append((key, value))
current.extend([p for p in args if p[1] is not None])
query_string = urllib.parse.urlencode(current)
if query_string:
query_string = '?{}'.format(query_string)
return path + query_string
_css_re = re.compile(r'''['"\n\\]''')
_boring_keyword_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
def escape_css_string(s):
return _css_re.sub(lambda m: '\\{:X}'.format(ord(m.group())), s)
def escape_sqlite(s):
if _boring_keyword_re.match(s) and (s.lower() not in reserved_words):
return s
else:
return '[{}]'.format(s)
def make_dockerfile(files, metadata_file, extra_options, branch, template_dir, plugins_dir, static, install, spatialite, version_note):
cmd = ['"datasette"', '"serve"', '"--host"', '"0.0.0.0"']
cmd.append('"' + '", "'.join(files) + '"')
cmd.extend(['"--cors"', '"--port"', '"8001"', '"--inspect-file"', '"inspect-data.json"'])
if metadata_file:
cmd.extend(['"--metadata"', '"{}"'.format(metadata_file)])
if template_dir:
cmd.extend(['"--template-dir"', '"templates/"'])
if plugins_dir:
cmd.extend(['"--plugins-dir"', '"plugins/"'])
if version_note:
cmd.extend(['"--version-note"', '"{}"'.format(version_note)])
if static:
for mount_point, _ in static:
cmd.extend(['"--static"', '"{}:{}"'.format(mount_point, mount_point)])
if extra_options:
for opt in extra_options.split():
cmd.append('"{}"'.format(opt))
if branch:
install = ['https://github.com/simonw/datasette/archive/{}.zip'.format(
branch
)] + list(install)
else:
install = ['datasette'] + list(install)
return '''
FROM python:3.6
COPY . /app
WORKDIR /app
{spatialite_extras}
RUN pip install -U {install_from}
RUN datasette inspect {files} --inspect-file inspect-data.json
EXPOSE 8001
CMD [{cmd}]'''.format(
files=' '.join(files),
cmd=', '.join(cmd),
install_from=' '.join(install),
spatialite_extras=SPATIALITE_DOCKERFILE_EXTRAS if spatialite else '',
).strip()
@contextmanager
def temporary_docker_directory(
files,
name,
metadata,
extra_options,
branch,
template_dir,
plugins_dir,
static,
install,
spatialite,
version_note,
extra_metadata=None
):
extra_metadata = extra_metadata or {}
tmp = tempfile.TemporaryDirectory()
# We create a datasette folder in there to get a nicer now deploy name
datasette_dir = os.path.join(tmp.name, name)
os.mkdir(datasette_dir)
saved_cwd = os.getcwd()
file_paths = [
os.path.join(saved_cwd, file_path)
for file_path in files
]
file_names = [os.path.split(f)[-1] for f in files]
if metadata:
metadata_content = json.load(metadata)
else:
metadata_content = {}
for key, value in extra_metadata.items():
if value:
metadata_content[key] = value
try:
dockerfile = make_dockerfile(
file_names,
metadata_content and 'metadata.json',
extra_options,
branch,
template_dir,
plugins_dir,
static,
install,
spatialite,
version_note,
)
os.chdir(datasette_dir)
if metadata_content:
open('metadata.json', 'w').write(json.dumps(metadata_content, indent=2))
open('Dockerfile', 'w').write(dockerfile)
for path, filename in zip(file_paths, file_names):
link_or_copy(path, os.path.join(datasette_dir, filename))
if template_dir:
link_or_copy_directory(
os.path.join(saved_cwd, template_dir),
os.path.join(datasette_dir, 'templates')
)
if plugins_dir:
link_or_copy_directory(
os.path.join(saved_cwd, plugins_dir),
os.path.join(datasette_dir, 'plugins')
)
for mount_point, path in static:
link_or_copy_directory(
os.path.join(saved_cwd, path),
os.path.join(datasette_dir, mount_point)
)
yield datasette_dir
finally:
tmp.cleanup()
os.chdir(saved_cwd)
@contextmanager
def temporary_heroku_directory(
files,
name,
metadata,
extra_options,
branch,
template_dir,
plugins_dir,
static,
install,
version_note,
extra_metadata=None
):
# FIXME: lots of duplicated code from above
extra_metadata = extra_metadata or {}
tmp = tempfile.TemporaryDirectory()
saved_cwd = os.getcwd()
file_paths = [
os.path.join(saved_cwd, file_path)
for file_path in files
]
file_names = [os.path.split(f)[-1] for f in files]
if metadata:
metadata_content = json.load(metadata)
else:
metadata_content = {}
for key, value in extra_metadata.items():
if value:
metadata_content[key] = value
try:
os.chdir(tmp.name)
if metadata_content:
open('metadata.json', 'w').write(json.dumps(metadata_content, indent=2))
open('runtime.txt', 'w').write('python-3.6.7')
if branch:
install = ['https://github.com/simonw/datasette/archive/{branch}.zip'.format(
branch=branch
)] + list(install)
else:
install = ['datasette'] + list(install)
open('requirements.txt', 'w').write('\n'.join(install))
os.mkdir('bin')
open('bin/post_compile', 'w').write('datasette inspect --inspect-file inspect-data.json')
extras = []
if template_dir:
link_or_copy_directory(
os.path.join(saved_cwd, template_dir),
os.path.join(tmp.name, 'templates')
)
extras.extend(['--template-dir', 'templates/'])
if plugins_dir:
link_or_copy_directory(
os.path.join(saved_cwd, plugins_dir),
os.path.join(tmp.name, 'plugins')
)
extras.extend(['--plugins-dir', 'plugins/'])
if version_note:
extras.extend(['--version-note', version_note])
if metadata_content:
extras.extend(['--metadata', 'metadata.json'])
if extra_options:
extras.extend(extra_options.split())
for mount_point, path in static:
link_or_copy_directory(
os.path.join(saved_cwd, path),
os.path.join(tmp.name, mount_point)
)
extras.extend(['--static', '{}:{}'.format(mount_point, mount_point)])
quoted_files = " ".join(map(shlex.quote, file_names))
procfile_cmd = 'web: datasette serve --host 0.0.0.0 {quoted_files} --cors --port $PORT --inspect-file inspect-data.json {extras}'.format(
quoted_files=quoted_files,
extras=' '.join(extras),
)
open('Procfile', 'w').write(procfile_cmd)
for path, filename in zip(file_paths, file_names):
link_or_copy(path, os.path.join(tmp.name, filename))
yield
finally:
tmp.cleanup()
os.chdir(saved_cwd)
def detect_primary_keys(conn, table):
" Figure out primary keys for a table. "
table_info_rows = [
row
for row in conn.execute(
'PRAGMA table_info("{}")'.format(table)
).fetchall()
if row[-1]
]
table_info_rows.sort(key=lambda row: row[-1])
return [str(r[1]) for r in table_info_rows]
def get_outbound_foreign_keys(conn, table):
infos = conn.execute(
'PRAGMA foreign_key_list([{}])'.format(table)
).fetchall()
fks = []
for info in infos:
if info is not None:
id, seq, table_name, from_, to_, on_update, on_delete, match = info
fks.append({
'other_table': table_name,
'column': from_,
'other_column': to_
})
return fks
def get_all_foreign_keys(conn):
tables = [r[0] for r in conn.execute('select name from sqlite_master where type="table"')]
table_to_foreign_keys = {}
for table in tables:
table_to_foreign_keys[table] = {
'incoming': [],
'outgoing': [],
}
for table in tables:
infos = conn.execute(
'PRAGMA foreign_key_list([{}])'.format(table)
).fetchall()
for info in infos:
if info is not None:
id, seq, table_name, from_, to_, on_update, on_delete, match = info
if table_name not in table_to_foreign_keys:
# Weird edge case where something refers to a table that does
# not actually exist
continue
table_to_foreign_keys[table_name]['incoming'].append({
'other_table': table,
'column': to_,
'other_column': from_
})
table_to_foreign_keys[table]['outgoing'].append({
'other_table': table_name,
'column': from_,
'other_column': to_
})
return table_to_foreign_keys
def detect_spatialite(conn):
rows = conn.execute('select 1 from sqlite_master where tbl_name = "geometry_columns"').fetchall()
return len(rows) > 0
def detect_fts(conn, table):
"Detect if table has a corresponding FTS virtual table and return it"
rows = conn.execute(detect_fts_sql(table)).fetchall()
if len(rows) == 0:
return None
else:
return rows[0][0]
def detect_fts_sql(table):
return r'''
select name from sqlite_master
where rootpage = 0
and (
sql like '%VIRTUAL TABLE%USING FTS%content="{table}"%'
or (
tbl_name = "{table}"
and sql like '%VIRTUAL TABLE%USING FTS%'
)
)
'''.format(table=table)
def detect_json1(conn=None):
if conn is None:
conn = sqlite3.connect(":memory:")
try:
conn.execute("SELECT json('{}')")
return True
except Exception:
return False
def table_columns(conn, table):
return [
r[1]
for r in conn.execute(
"PRAGMA table_info({});".format(escape_sqlite(table))
).fetchall()
]
filter_column_re = re.compile(r'^_filter_column_\d+$')
def filters_should_redirect(special_args):
redirect_params = []
# Handle _filter_column=foo&_filter_op=exact&_filter_value=...
filter_column = special_args.get('_filter_column')
filter_op = special_args.get('_filter_op') or ''
filter_value = special_args.get('_filter_value') or ''
if '__' in filter_op:
filter_op, filter_value = filter_op.split('__', 1)
if filter_column:
redirect_params.append(
('{}__{}'.format(filter_column, filter_op), filter_value)
)
for key in ('_filter_column', '_filter_op', '_filter_value'):
if key in special_args:
redirect_params.append((key, None))
# Now handle _filter_column_1=name&_filter_op_1=contains&_filter_value_1=hello
column_keys = [k for k in special_args if filter_column_re.match(k)]
for column_key in column_keys:
number = column_key.split('_')[-1]
column = special_args[column_key]
op = special_args.get('_filter_op_{}'.format(number)) or 'exact'
value = special_args.get('_filter_value_{}'.format(number)) or ''
if '__' in op:
op, value = op.split('__', 1)
if column:
redirect_params.append(('{}__{}'.format(column, op), value))
redirect_params.extend([
('_filter_column_{}'.format(number), None),
('_filter_op_{}'.format(number), None),
('_filter_value_{}'.format(number), None),
])
return redirect_params
whitespace_re = re.compile(r'\s')
css_class_re = re.compile(r'^[a-zA-Z]+[_a-zA-Z0-9-]*$')
css_invalid_chars_re = re.compile(r'[^a-zA-Z0-9_\-]')
def to_css_class(s):
"""
Given a string (e.g. a table name) returns a valid unique CSS class.
For simple cases, just returns the string again. If the string is not a
valid CSS class (we disallow - and _ prefixes even though they are valid
as they may be confused with browser prefixes) we strip invalid characters
and add a 6 char md5 sum suffix, to make sure two tables with identical
names after stripping characters don't end up with the same CSS class.
"""
if css_class_re.match(s):
return s
md5_suffix = hashlib.md5(s.encode('utf8')).hexdigest()[:6]
# Strip leading _, -
s = s.lstrip('_').lstrip('-')
# Replace any whitespace with hyphens
s = '-'.join(s.split())
# Remove any remaining invalid characters
s = css_invalid_chars_re.sub('', s)
# Attach the md5 suffix
bits = [b for b in (s, md5_suffix) if b]
return '-'.join(bits)
def link_or_copy(src, dst):
# Intended for use in populating a temp directory. We link if possible,
# but fall back to copying if the temp directory is on a different device
# https://github.com/simonw/datasette/issues/141
try:
os.link(src, dst)
except OSError:
shutil.copyfile(src, dst)
def link_or_copy_directory(src, dst):
try:
shutil.copytree(src, dst, copy_function=os.link)
except OSError:
shutil.copytree(src, dst)
def module_from_path(path, name):
# Adapted from http://sayspy.blogspot.com/2011/07/how-to-import-module-from-just-file.html
mod = imp.new_module(name)
mod.__file__ = path
with open(path, 'r') as file:
code = compile(file.read(), path, 'exec', dont_inherit=True)
exec(code, mod.__dict__)
return mod
def get_plugins(pm):
plugins = []
plugin_to_distinfo = dict(pm.list_plugin_distinfo())
for plugin in pm.get_plugins():
static_path = None
templates_path = None
try:
if pkg_resources.resource_isdir(plugin.__name__, 'static'):
static_path = pkg_resources.resource_filename(plugin.__name__, 'static')
if pkg_resources.resource_isdir(plugin.__name__, 'templates'):
templates_path = pkg_resources.resource_filename(plugin.__name__, 'templates')
except (KeyError, ImportError):
# Caused by --plugins_dir= plugins - KeyError/ImportError thrown in Py3.5
pass
plugin_info = {
'name': plugin.__name__,
'static_path': static_path,
'templates_path': templates_path,
}
distinfo = plugin_to_distinfo.get(plugin)
if distinfo:
plugin_info['version'] = distinfo.version
plugins.append(plugin_info)
return plugins
FORMATS = ('csv', 'json', 'jsono')
async def resolve_table_and_format(table_and_format, table_exists):
if '.' in table_and_format:
# Check if a table exists with this exact name
it_exists = await table_exists(table_and_format)
if it_exists:
return table_and_format, None
# Check if table ends with a known format
for _format in FORMATS:
if table_and_format.endswith(".{}".format(_format)):
table = table_and_format[:-(len(_format) + 1)]
return table, _format
return table_and_format, None
def path_with_format(request, format, extra_qs=None):
qs = extra_qs or {}
path = request.path
if "." in request.path:
qs["_format"] = format
else:
path = "{}.{}".format(path, format)
if qs:
extra = urllib.parse.urlencode(sorted(qs.items()))
if request.query_string:
path = "{}?{}&{}".format(
path, request.query_string, extra
)
else:
path = "{}?{}".format(path, extra)
elif request.query_string:
path = "{}?{}".format(path, request.query_string)
return path
class CustomRow(OrderedDict):
# Loose imitation of sqlite3.Row which offers
# both index-based AND key-based lookups
def __init__(self, columns, values=None):
self.columns = columns
if values:
self.update(values)
def __getitem__(self, key):
if isinstance(key, int):
return super().__getitem__(self.columns[key])
else:
return super().__getitem__(key)
def __iter__(self):
for column in self.columns:
yield self[column]
def value_as_boolean(value):
if value.lower() not in ('on', 'off', 'true', 'false', '1', '0'):
raise ValueAsBooleanError
return value.lower() in ('on', 'true', '1')
class ValueAsBooleanError(ValueError):
pass
class WriteLimitExceeded(Exception):
pass
class LimitedWriter:
def __init__(self, writer, limit_mb):
self.writer = writer
self.limit_bytes = limit_mb * 1024 * 1024
self.bytes_count = 0
def write(self, bytes):
self.bytes_count += len(bytes)
if self.limit_bytes and (self.bytes_count > self.limit_bytes):
raise WriteLimitExceeded("CSV contains more than {} bytes".format(
self.limit_bytes
))
self.writer.write(bytes)
_infinities = {float("inf"), float("-inf")}
def remove_infinites(row):
if any((c in _infinities) if isinstance(c, float) else 0 for c in row):
return [
None if (isinstance(c, float) and c in _infinities) else c
for c in row
]
return row
class StaticMount(click.ParamType):
name = "static mount"
def convert(self, value, param, ctx):
if ":" not in value:
self.fail(
'"{}" should be of format mountpoint:directory'.format(value),
param, ctx
)
path, dirpath = value.split(":")
if not os.path.exists(dirpath) or not os.path.isdir(dirpath):
self.fail("%s is not a valid directory path" % value, param, ctx)
return path, dirpath
def format_bytes(bytes):
current = float(bytes)
for unit in ("bytes", "KB", "MB", "GB", "TB"):
if current < 1024:
break
current = current / 1024
if unit == "bytes":
return "{} {}".format(int(current), unit)
else:
return "{:.1f} {}".format(current, unit)
|
simonw/datasette | datasette/utils.py | to_css_class | python | def to_css_class(s):
if css_class_re.match(s):
return s
md5_suffix = hashlib.md5(s.encode('utf8')).hexdigest()[:6]
# Strip leading _, -
s = s.lstrip('_').lstrip('-')
# Replace any whitespace with hyphens
s = '-'.join(s.split())
# Remove any remaining invalid characters
s = css_invalid_chars_re.sub('', s)
# Attach the md5 suffix
bits = [b for b in (s, md5_suffix) if b]
return '-'.join(bits) | Given a string (e.g. a table name) returns a valid unique CSS class.
For simple cases, just returns the string again. If the string is not a
valid CSS class (we disallow - and _ prefixes even though they are valid
as they may be confused with browser prefixes) we strip invalid characters
and add a 6 char md5 sum suffix, to make sure two tables with identical
names after stripping characters don't end up with the same CSS class. | train | https://github.com/simonw/datasette/blob/11b352b4d52fd02a422776edebb14f12e4994d3b/datasette/utils.py#L643-L663 | null | from contextlib import contextmanager
from collections import OrderedDict
import base64
import click
import hashlib
import imp
import json
import os
import pkg_resources
import re
import shlex
import tempfile
import time
import shutil
import urllib
import numbers
try:
import pysqlite3 as sqlite3
except ImportError:
import sqlite3
# From https://www.sqlite.org/lang_keywords.html
reserved_words = set((
'abort action add after all alter analyze and as asc attach autoincrement '
'before begin between by cascade case cast check collate column commit '
'conflict constraint create cross current_date current_time '
'current_timestamp database default deferrable deferred delete desc detach '
'distinct drop each else end escape except exclusive exists explain fail '
'for foreign from full glob group having if ignore immediate in index '
'indexed initially inner insert instead intersect into is isnull join key '
'left like limit match natural no not notnull null of offset on or order '
'outer plan pragma primary query raise recursive references regexp reindex '
'release rename replace restrict right rollback row savepoint select set '
'table temp temporary then to transaction trigger union unique update using '
'vacuum values view virtual when where with without'
).split())
SPATIALITE_DOCKERFILE_EXTRAS = r'''
RUN apt-get update && \
apt-get install -y python3-dev gcc libsqlite3-mod-spatialite && \
rm -rf /var/lib/apt/lists/*
ENV SQLITE_EXTENSIONS /usr/lib/x86_64-linux-gnu/mod_spatialite.so
'''
class InterruptedError(Exception):
pass
class Results:
def __init__(self, rows, truncated, description):
self.rows = rows
self.truncated = truncated
self.description = description
@property
def columns(self):
return [d[0] for d in self.description]
def __iter__(self):
return iter(self.rows)
def __len__(self):
return len(self.rows)
def urlsafe_components(token):
"Splits token on commas and URL decodes each component"
return [
urllib.parse.unquote_plus(b) for b in token.split(',')
]
def path_from_row_pks(row, pks, use_rowid, quote=True):
""" Generate an optionally URL-quoted unique identifier
for a row from its primary keys."""
if use_rowid:
bits = [row['rowid']]
else:
bits = [
row[pk]["value"] if isinstance(row[pk], dict) else row[pk]
for pk in pks
]
if quote:
bits = [urllib.parse.quote_plus(str(bit)) for bit in bits]
else:
bits = [str(bit) for bit in bits]
return ','.join(bits)
def compound_keys_after_sql(pks, start_index=0):
# Implementation of keyset pagination
# See https://github.com/simonw/datasette/issues/190
# For pk1/pk2/pk3 returns:
#
# ([pk1] > :p0)
# or
# ([pk1] = :p0 and [pk2] > :p1)
# or
# ([pk1] = :p0 and [pk2] = :p1 and [pk3] > :p2)
or_clauses = []
pks_left = pks[:]
while pks_left:
and_clauses = []
last = pks_left[-1]
rest = pks_left[:-1]
and_clauses = ['{} = :p{}'.format(
escape_sqlite(pk), (i + start_index)
) for i, pk in enumerate(rest)]
and_clauses.append('{} > :p{}'.format(
escape_sqlite(last), (len(rest) + start_index)
))
or_clauses.append('({})'.format(' and '.join(and_clauses)))
pks_left.pop()
or_clauses.reverse()
return '({})'.format('\n or\n'.join(or_clauses))
class CustomJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, sqlite3.Row):
return tuple(obj)
if isinstance(obj, sqlite3.Cursor):
return list(obj)
if isinstance(obj, bytes):
# Does it encode to utf8?
try:
return obj.decode('utf8')
except UnicodeDecodeError:
return {
'$base64': True,
'encoded': base64.b64encode(obj).decode('latin1'),
}
return json.JSONEncoder.default(self, obj)
@contextmanager
def sqlite_timelimit(conn, ms):
deadline = time.time() + (ms / 1000)
# n is the number of SQLite virtual machine instructions that will be
# executed between each check. It's hard to know what to pick here.
# After some experimentation, I've decided to go with 1000 by default and
# 1 for time limits that are less than 50ms
n = 1000
if ms < 50:
n = 1
def handler():
if time.time() >= deadline:
return 1
conn.set_progress_handler(handler, n)
yield
conn.set_progress_handler(None, n)
class InvalidSql(Exception):
pass
allowed_sql_res = [
re.compile(r'^select\b'),
re.compile(r'^explain select\b'),
re.compile(r'^explain query plan select\b'),
re.compile(r'^with\b'),
]
disallawed_sql_res = [
(re.compile('pragma'), 'Statement may not contain PRAGMA'),
]
def validate_sql_select(sql):
sql = sql.strip().lower()
if not any(r.match(sql) for r in allowed_sql_res):
raise InvalidSql('Statement must be a SELECT')
for r, msg in disallawed_sql_res:
if r.search(sql):
raise InvalidSql(msg)
def append_querystring(url, querystring):
op = "&" if ("?" in url) else "?"
return "{}{}{}".format(
url, op, querystring
)
def path_with_added_args(request, args, path=None):
path = path or request.path
if isinstance(args, dict):
args = args.items()
args_to_remove = {k for k, v in args if v is None}
current = []
for key, value in urllib.parse.parse_qsl(request.query_string):
if key not in args_to_remove:
current.append((key, value))
current.extend([
(key, value)
for key, value in args
if value is not None
])
query_string = urllib.parse.urlencode(current)
if query_string:
query_string = '?{}'.format(query_string)
return path + query_string
def path_with_removed_args(request, args, path=None):
query_string = request.query_string
if path is None:
path = request.path
else:
if "?" in path:
bits = path.split("?", 1)
path, query_string = bits
# args can be a dict or a set
current = []
if isinstance(args, set):
def should_remove(key, value):
return key in args
elif isinstance(args, dict):
# Must match key AND value
def should_remove(key, value):
return args.get(key) == value
for key, value in urllib.parse.parse_qsl(query_string):
if not should_remove(key, value):
current.append((key, value))
query_string = urllib.parse.urlencode(current)
if query_string:
query_string = '?{}'.format(query_string)
return path + query_string
def path_with_replaced_args(request, args, path=None):
path = path or request.path
if isinstance(args, dict):
args = args.items()
keys_to_replace = {p[0] for p in args}
current = []
for key, value in urllib.parse.parse_qsl(request.query_string):
if key not in keys_to_replace:
current.append((key, value))
current.extend([p for p in args if p[1] is not None])
query_string = urllib.parse.urlencode(current)
if query_string:
query_string = '?{}'.format(query_string)
return path + query_string
_css_re = re.compile(r'''['"\n\\]''')
_boring_keyword_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
def escape_css_string(s):
return _css_re.sub(lambda m: '\\{:X}'.format(ord(m.group())), s)
def escape_sqlite(s):
if _boring_keyword_re.match(s) and (s.lower() not in reserved_words):
return s
else:
return '[{}]'.format(s)
def make_dockerfile(files, metadata_file, extra_options, branch, template_dir, plugins_dir, static, install, spatialite, version_note):
cmd = ['"datasette"', '"serve"', '"--host"', '"0.0.0.0"']
cmd.append('"' + '", "'.join(files) + '"')
cmd.extend(['"--cors"', '"--port"', '"8001"', '"--inspect-file"', '"inspect-data.json"'])
if metadata_file:
cmd.extend(['"--metadata"', '"{}"'.format(metadata_file)])
if template_dir:
cmd.extend(['"--template-dir"', '"templates/"'])
if plugins_dir:
cmd.extend(['"--plugins-dir"', '"plugins/"'])
if version_note:
cmd.extend(['"--version-note"', '"{}"'.format(version_note)])
if static:
for mount_point, _ in static:
cmd.extend(['"--static"', '"{}:{}"'.format(mount_point, mount_point)])
if extra_options:
for opt in extra_options.split():
cmd.append('"{}"'.format(opt))
if branch:
install = ['https://github.com/simonw/datasette/archive/{}.zip'.format(
branch
)] + list(install)
else:
install = ['datasette'] + list(install)
return '''
FROM python:3.6
COPY . /app
WORKDIR /app
{spatialite_extras}
RUN pip install -U {install_from}
RUN datasette inspect {files} --inspect-file inspect-data.json
EXPOSE 8001
CMD [{cmd}]'''.format(
files=' '.join(files),
cmd=', '.join(cmd),
install_from=' '.join(install),
spatialite_extras=SPATIALITE_DOCKERFILE_EXTRAS if spatialite else '',
).strip()
@contextmanager
def temporary_docker_directory(
files,
name,
metadata,
extra_options,
branch,
template_dir,
plugins_dir,
static,
install,
spatialite,
version_note,
extra_metadata=None
):
extra_metadata = extra_metadata or {}
tmp = tempfile.TemporaryDirectory()
# We create a datasette folder in there to get a nicer now deploy name
datasette_dir = os.path.join(tmp.name, name)
os.mkdir(datasette_dir)
saved_cwd = os.getcwd()
file_paths = [
os.path.join(saved_cwd, file_path)
for file_path in files
]
file_names = [os.path.split(f)[-1] for f in files]
if metadata:
metadata_content = json.load(metadata)
else:
metadata_content = {}
for key, value in extra_metadata.items():
if value:
metadata_content[key] = value
try:
dockerfile = make_dockerfile(
file_names,
metadata_content and 'metadata.json',
extra_options,
branch,
template_dir,
plugins_dir,
static,
install,
spatialite,
version_note,
)
os.chdir(datasette_dir)
if metadata_content:
open('metadata.json', 'w').write(json.dumps(metadata_content, indent=2))
open('Dockerfile', 'w').write(dockerfile)
for path, filename in zip(file_paths, file_names):
link_or_copy(path, os.path.join(datasette_dir, filename))
if template_dir:
link_or_copy_directory(
os.path.join(saved_cwd, template_dir),
os.path.join(datasette_dir, 'templates')
)
if plugins_dir:
link_or_copy_directory(
os.path.join(saved_cwd, plugins_dir),
os.path.join(datasette_dir, 'plugins')
)
for mount_point, path in static:
link_or_copy_directory(
os.path.join(saved_cwd, path),
os.path.join(datasette_dir, mount_point)
)
yield datasette_dir
finally:
tmp.cleanup()
os.chdir(saved_cwd)
@contextmanager
def temporary_heroku_directory(
files,
name,
metadata,
extra_options,
branch,
template_dir,
plugins_dir,
static,
install,
version_note,
extra_metadata=None
):
# FIXME: lots of duplicated code from above
extra_metadata = extra_metadata or {}
tmp = tempfile.TemporaryDirectory()
saved_cwd = os.getcwd()
file_paths = [
os.path.join(saved_cwd, file_path)
for file_path in files
]
file_names = [os.path.split(f)[-1] for f in files]
if metadata:
metadata_content = json.load(metadata)
else:
metadata_content = {}
for key, value in extra_metadata.items():
if value:
metadata_content[key] = value
try:
os.chdir(tmp.name)
if metadata_content:
open('metadata.json', 'w').write(json.dumps(metadata_content, indent=2))
open('runtime.txt', 'w').write('python-3.6.7')
if branch:
install = ['https://github.com/simonw/datasette/archive/{branch}.zip'.format(
branch=branch
)] + list(install)
else:
install = ['datasette'] + list(install)
open('requirements.txt', 'w').write('\n'.join(install))
os.mkdir('bin')
open('bin/post_compile', 'w').write('datasette inspect --inspect-file inspect-data.json')
extras = []
if template_dir:
link_or_copy_directory(
os.path.join(saved_cwd, template_dir),
os.path.join(tmp.name, 'templates')
)
extras.extend(['--template-dir', 'templates/'])
if plugins_dir:
link_or_copy_directory(
os.path.join(saved_cwd, plugins_dir),
os.path.join(tmp.name, 'plugins')
)
extras.extend(['--plugins-dir', 'plugins/'])
if version_note:
extras.extend(['--version-note', version_note])
if metadata_content:
extras.extend(['--metadata', 'metadata.json'])
if extra_options:
extras.extend(extra_options.split())
for mount_point, path in static:
link_or_copy_directory(
os.path.join(saved_cwd, path),
os.path.join(tmp.name, mount_point)
)
extras.extend(['--static', '{}:{}'.format(mount_point, mount_point)])
quoted_files = " ".join(map(shlex.quote, file_names))
procfile_cmd = 'web: datasette serve --host 0.0.0.0 {quoted_files} --cors --port $PORT --inspect-file inspect-data.json {extras}'.format(
quoted_files=quoted_files,
extras=' '.join(extras),
)
open('Procfile', 'w').write(procfile_cmd)
for path, filename in zip(file_paths, file_names):
link_or_copy(path, os.path.join(tmp.name, filename))
yield
finally:
tmp.cleanup()
os.chdir(saved_cwd)
def detect_primary_keys(conn, table):
" Figure out primary keys for a table. "
table_info_rows = [
row
for row in conn.execute(
'PRAGMA table_info("{}")'.format(table)
).fetchall()
if row[-1]
]
table_info_rows.sort(key=lambda row: row[-1])
return [str(r[1]) for r in table_info_rows]
def get_outbound_foreign_keys(conn, table):
infos = conn.execute(
'PRAGMA foreign_key_list([{}])'.format(table)
).fetchall()
fks = []
for info in infos:
if info is not None:
id, seq, table_name, from_, to_, on_update, on_delete, match = info
fks.append({
'other_table': table_name,
'column': from_,
'other_column': to_
})
return fks
def get_all_foreign_keys(conn):
tables = [r[0] for r in conn.execute('select name from sqlite_master where type="table"')]
table_to_foreign_keys = {}
for table in tables:
table_to_foreign_keys[table] = {
'incoming': [],
'outgoing': [],
}
for table in tables:
infos = conn.execute(
'PRAGMA foreign_key_list([{}])'.format(table)
).fetchall()
for info in infos:
if info is not None:
id, seq, table_name, from_, to_, on_update, on_delete, match = info
if table_name not in table_to_foreign_keys:
# Weird edge case where something refers to a table that does
# not actually exist
continue
table_to_foreign_keys[table_name]['incoming'].append({
'other_table': table,
'column': to_,
'other_column': from_
})
table_to_foreign_keys[table]['outgoing'].append({
'other_table': table_name,
'column': from_,
'other_column': to_
})
return table_to_foreign_keys
def detect_spatialite(conn):
rows = conn.execute('select 1 from sqlite_master where tbl_name = "geometry_columns"').fetchall()
return len(rows) > 0
def detect_fts(conn, table):
"Detect if table has a corresponding FTS virtual table and return it"
rows = conn.execute(detect_fts_sql(table)).fetchall()
if len(rows) == 0:
return None
else:
return rows[0][0]
def detect_fts_sql(table):
return r'''
select name from sqlite_master
where rootpage = 0
and (
sql like '%VIRTUAL TABLE%USING FTS%content="{table}"%'
or (
tbl_name = "{table}"
and sql like '%VIRTUAL TABLE%USING FTS%'
)
)
'''.format(table=table)
def detect_json1(conn=None):
if conn is None:
conn = sqlite3.connect(":memory:")
try:
conn.execute("SELECT json('{}')")
return True
except Exception:
return False
def table_columns(conn, table):
return [
r[1]
for r in conn.execute(
"PRAGMA table_info({});".format(escape_sqlite(table))
).fetchall()
]
filter_column_re = re.compile(r'^_filter_column_\d+$')
def filters_should_redirect(special_args):
redirect_params = []
# Handle _filter_column=foo&_filter_op=exact&_filter_value=...
filter_column = special_args.get('_filter_column')
filter_op = special_args.get('_filter_op') or ''
filter_value = special_args.get('_filter_value') or ''
if '__' in filter_op:
filter_op, filter_value = filter_op.split('__', 1)
if filter_column:
redirect_params.append(
('{}__{}'.format(filter_column, filter_op), filter_value)
)
for key in ('_filter_column', '_filter_op', '_filter_value'):
if key in special_args:
redirect_params.append((key, None))
# Now handle _filter_column_1=name&_filter_op_1=contains&_filter_value_1=hello
column_keys = [k for k in special_args if filter_column_re.match(k)]
for column_key in column_keys:
number = column_key.split('_')[-1]
column = special_args[column_key]
op = special_args.get('_filter_op_{}'.format(number)) or 'exact'
value = special_args.get('_filter_value_{}'.format(number)) or ''
if '__' in op:
op, value = op.split('__', 1)
if column:
redirect_params.append(('{}__{}'.format(column, op), value))
redirect_params.extend([
('_filter_column_{}'.format(number), None),
('_filter_op_{}'.format(number), None),
('_filter_value_{}'.format(number), None),
])
return redirect_params
whitespace_re = re.compile(r'\s')
def is_url(value):
"Must start with http:// or https:// and contain JUST a URL"
if not isinstance(value, str):
return False
if not value.startswith('http://') and not value.startswith('https://'):
return False
# Any whitespace at all is invalid
if whitespace_re.search(value):
return False
return True
css_class_re = re.compile(r'^[a-zA-Z]+[_a-zA-Z0-9-]*$')
css_invalid_chars_re = re.compile(r'[^a-zA-Z0-9_\-]')
def link_or_copy(src, dst):
# Intended for use in populating a temp directory. We link if possible,
# but fall back to copying if the temp directory is on a different device
# https://github.com/simonw/datasette/issues/141
try:
os.link(src, dst)
except OSError:
shutil.copyfile(src, dst)
def link_or_copy_directory(src, dst):
try:
shutil.copytree(src, dst, copy_function=os.link)
except OSError:
shutil.copytree(src, dst)
def module_from_path(path, name):
# Adapted from http://sayspy.blogspot.com/2011/07/how-to-import-module-from-just-file.html
mod = imp.new_module(name)
mod.__file__ = path
with open(path, 'r') as file:
code = compile(file.read(), path, 'exec', dont_inherit=True)
exec(code, mod.__dict__)
return mod
def get_plugins(pm):
plugins = []
plugin_to_distinfo = dict(pm.list_plugin_distinfo())
for plugin in pm.get_plugins():
static_path = None
templates_path = None
try:
if pkg_resources.resource_isdir(plugin.__name__, 'static'):
static_path = pkg_resources.resource_filename(plugin.__name__, 'static')
if pkg_resources.resource_isdir(plugin.__name__, 'templates'):
templates_path = pkg_resources.resource_filename(plugin.__name__, 'templates')
except (KeyError, ImportError):
# Caused by --plugins_dir= plugins - KeyError/ImportError thrown in Py3.5
pass
plugin_info = {
'name': plugin.__name__,
'static_path': static_path,
'templates_path': templates_path,
}
distinfo = plugin_to_distinfo.get(plugin)
if distinfo:
plugin_info['version'] = distinfo.version
plugins.append(plugin_info)
return plugins
FORMATS = ('csv', 'json', 'jsono')
async def resolve_table_and_format(table_and_format, table_exists):
if '.' in table_and_format:
# Check if a table exists with this exact name
it_exists = await table_exists(table_and_format)
if it_exists:
return table_and_format, None
# Check if table ends with a known format
for _format in FORMATS:
if table_and_format.endswith(".{}".format(_format)):
table = table_and_format[:-(len(_format) + 1)]
return table, _format
return table_and_format, None
def path_with_format(request, format, extra_qs=None):
qs = extra_qs or {}
path = request.path
if "." in request.path:
qs["_format"] = format
else:
path = "{}.{}".format(path, format)
if qs:
extra = urllib.parse.urlencode(sorted(qs.items()))
if request.query_string:
path = "{}?{}&{}".format(
path, request.query_string, extra
)
else:
path = "{}?{}".format(path, extra)
elif request.query_string:
path = "{}?{}".format(path, request.query_string)
return path
class CustomRow(OrderedDict):
# Loose imitation of sqlite3.Row which offers
# both index-based AND key-based lookups
def __init__(self, columns, values=None):
self.columns = columns
if values:
self.update(values)
def __getitem__(self, key):
if isinstance(key, int):
return super().__getitem__(self.columns[key])
else:
return super().__getitem__(key)
def __iter__(self):
for column in self.columns:
yield self[column]
def value_as_boolean(value):
if value.lower() not in ('on', 'off', 'true', 'false', '1', '0'):
raise ValueAsBooleanError
return value.lower() in ('on', 'true', '1')
class ValueAsBooleanError(ValueError):
pass
class WriteLimitExceeded(Exception):
pass
class LimitedWriter:
def __init__(self, writer, limit_mb):
self.writer = writer
self.limit_bytes = limit_mb * 1024 * 1024
self.bytes_count = 0
def write(self, bytes):
self.bytes_count += len(bytes)
if self.limit_bytes and (self.bytes_count > self.limit_bytes):
raise WriteLimitExceeded("CSV contains more than {} bytes".format(
self.limit_bytes
))
self.writer.write(bytes)
_infinities = {float("inf"), float("-inf")}
def remove_infinites(row):
if any((c in _infinities) if isinstance(c, float) else 0 for c in row):
return [
None if (isinstance(c, float) and c in _infinities) else c
for c in row
]
return row
class StaticMount(click.ParamType):
name = "static mount"
def convert(self, value, param, ctx):
if ":" not in value:
self.fail(
'"{}" should be of format mountpoint:directory'.format(value),
param, ctx
)
path, dirpath = value.split(":")
if not os.path.exists(dirpath) or not os.path.isdir(dirpath):
self.fail("%s is not a valid directory path" % value, param, ctx)
return path, dirpath
def format_bytes(bytes):
current = float(bytes)
for unit in ("bytes", "KB", "MB", "GB", "TB"):
if current < 1024:
break
current = current / 1024
if unit == "bytes":
return "{} {}".format(int(current), unit)
else:
return "{:.1f} {}".format(current, unit)
|
simonw/datasette | datasette/app.py | Datasette.metadata | python | def metadata(self, key=None, database=None, table=None, fallback=True):
assert not (database is None and table is not None), \
"Cannot call metadata() with table= specified but not database="
databases = self._metadata.get("databases") or {}
search_list = []
if database is not None:
search_list.append(databases.get(database) or {})
if table is not None:
table_metadata = (
(databases.get(database) or {}).get("tables") or {}
).get(table) or {}
search_list.insert(0, table_metadata)
search_list.append(self._metadata)
if not fallback:
# No fallback allowed, so just use the first one in the list
search_list = search_list[:1]
if key is not None:
for item in search_list:
if key in item:
return item[key]
return None
else:
# Return the merged list
m = {}
for item in search_list:
m.update(item)
return m | Looks up metadata, cascading backwards from specified level.
Returns None if metadata value is not found. | train | https://github.com/simonw/datasette/blob/11b352b4d52fd02a422776edebb14f12e4994d3b/datasette/app.py#L239-L269 | null | class Datasette:
def __init__(
self,
files,
immutables=None,
cache_headers=True,
cors=False,
inspect_data=None,
metadata=None,
sqlite_extensions=None,
template_dir=None,
plugins_dir=None,
static_mounts=None,
memory=False,
config=None,
version_note=None,
):
immutables = immutables or []
self.files = tuple(files) + tuple(immutables)
self.immutables = set(immutables)
if not self.files:
self.files = [MEMORY]
elif memory:
self.files = (MEMORY,) + self.files
self.databases = {}
for file in self.files:
path = file
is_memory = False
if file is MEMORY:
path = None
is_memory = True
db = ConnectedDatabase(path, is_mutable=path not in self.immutables, is_memory=is_memory)
if db.name in self.databases:
raise Exception("Multiple files with same stem: {}".format(db.name))
self.databases[db.name] = db
self.cache_headers = cache_headers
self.cors = cors
self._inspect = inspect_data
self._metadata = metadata or {}
self.sqlite_functions = []
self.sqlite_extensions = sqlite_extensions or []
self.template_dir = template_dir
self.plugins_dir = plugins_dir
self.static_mounts = static_mounts or []
self._config = dict(DEFAULT_CONFIG, **(config or {}))
self.version_note = version_note
self.executor = futures.ThreadPoolExecutor(
max_workers=self.config("num_sql_threads")
)
self.max_returned_rows = self.config("max_returned_rows")
self.sql_time_limit_ms = self.config("sql_time_limit_ms")
self.page_size = self.config("default_page_size")
# Execute plugins in constructor, to ensure they are available
# when the rest of `datasette inspect` executes
if self.plugins_dir:
for filename in os.listdir(self.plugins_dir):
filepath = os.path.join(self.plugins_dir, filename)
mod = module_from_path(filepath, name=filename)
try:
pm.register(mod)
except ValueError:
# Plugin already registered
pass
def config(self, key):
return self._config.get(key, None)
def config_dict(self):
# Returns a fully resolved config dictionary, useful for templates
return {
option.name: self.config(option.name)
for option in CONFIG_OPTIONS
}
def plugin_config(
self, plugin_name, database=None, table=None, fallback=True
):
"Return config for plugin, falling back from specified database/table"
plugins = self.metadata(
"plugins", database=database, table=table, fallback=fallback
)
if plugins is None:
return None
return plugins.get(plugin_name)
def app_css_hash(self):
if not hasattr(self, "_app_css_hash"):
self._app_css_hash = hashlib.sha1(
open(
os.path.join(str(app_root), "datasette/static/app.css")
).read().encode(
"utf8"
)
).hexdigest()[
:6
]
return self._app_css_hash
def get_canned_queries(self, database_name):
queries = self.metadata(
"queries", database=database_name, fallback=False
) or {}
names = queries.keys()
return [
self.get_canned_query(database_name, name) for name in names
]
def get_canned_query(self, database_name, query_name):
queries = self.metadata(
"queries", database=database_name, fallback=False
) or {}
query = queries.get(query_name)
if query:
if not isinstance(query, dict):
query = {"sql": query}
query["name"] = query_name
return query
async def get_table_definition(self, database_name, table, type_="table"):
table_definition_rows = list(
await self.execute(
database_name,
'select sql from sqlite_master where name = :n and type=:t',
{"n": table, "t": type_},
)
)
if not table_definition_rows:
return None
return table_definition_rows[0][0]
def get_view_definition(self, database_name, view):
return self.get_table_definition(database_name, view, 'view')
def update_with_inherited_metadata(self, metadata):
# Fills in source/license with defaults, if available
metadata.update(
{
"source": metadata.get("source") or self.metadata("source"),
"source_url": metadata.get("source_url")
or self.metadata("source_url"),
"license": metadata.get("license") or self.metadata("license"),
"license_url": metadata.get("license_url")
or self.metadata("license_url"),
"about": metadata.get("about") or self.metadata("about"),
"about_url": metadata.get("about_url")
or self.metadata("about_url"),
}
)
def prepare_connection(self, conn):
conn.row_factory = sqlite3.Row
conn.text_factory = lambda x: str(x, "utf-8", "replace")
for name, num_args, func in self.sqlite_functions:
conn.create_function(name, num_args, func)
if self.sqlite_extensions:
conn.enable_load_extension(True)
for extension in self.sqlite_extensions:
conn.execute("SELECT load_extension('{}')".format(extension))
if self.config("cache_size_kb"):
conn.execute('PRAGMA cache_size=-{}'.format(self.config("cache_size_kb")))
# pylint: disable=no-member
pm.hook.prepare_connection(conn=conn)
async def table_exists(self, database, table):
results = await self.execute(
database,
"select 1 from sqlite_master where type='table' and name=?",
params=(table,)
)
return bool(results.rows)
async def expand_foreign_keys(self, database, table, column, values):
"Returns dict mapping (column, value) -> label"
labeled_fks = {}
foreign_keys = await self.foreign_keys_for_table(database, table)
# Find the foreign_key for this column
try:
fk = [
foreign_key for foreign_key in foreign_keys
if foreign_key["column"] == column
][0]
except IndexError:
return {}
label_column = await self.label_column_for_table(database, fk["other_table"])
if not label_column:
return {
(fk["column"], value): str(value)
for value in values
}
labeled_fks = {}
sql = '''
select {other_column}, {label_column}
from {other_table}
where {other_column} in ({placeholders})
'''.format(
other_column=escape_sqlite(fk["other_column"]),
label_column=escape_sqlite(label_column),
other_table=escape_sqlite(fk["other_table"]),
placeholders=", ".join(["?"] * len(set(values))),
)
try:
results = await self.execute(
database, sql, list(set(values))
)
except InterruptedError:
pass
else:
for id, value in results:
labeled_fks[(fk["column"], id)] = value
return labeled_fks
def absolute_url(self, request, path):
url = urllib.parse.urljoin(request.url, path)
if url.startswith("http://") and self.config("force_https_urls"):
url = "https://" + url[len("http://"):]
return url
def inspect(self):
" Inspect the database and return a dictionary of table metadata "
if self._inspect:
return self._inspect
self._inspect = {}
for filename in self.files:
if filename is MEMORY:
self._inspect[":memory:"] = {
"hash": "000",
"file": ":memory:",
"size": 0,
"views": {},
"tables": {},
}
else:
path = Path(filename)
name = path.stem
if name in self._inspect:
raise Exception("Multiple files with same stem %s" % name)
try:
with sqlite3.connect(
"file:{}?mode=ro".format(path), uri=True
) as conn:
self.prepare_connection(conn)
self._inspect[name] = {
"hash": inspect_hash(path),
"file": str(path),
"size": path.stat().st_size,
"views": inspect_views(conn),
"tables": inspect_tables(conn, (self.metadata("databases") or {}).get(name, {}))
}
except sqlite3.OperationalError as e:
if (e.args[0] == 'no such module: VirtualSpatialIndex'):
raise click.UsageError(
"It looks like you're trying to load a SpatiaLite"
" database without first loading the SpatiaLite module."
"\n\nRead more: https://datasette.readthedocs.io/en/latest/spatialite.html"
)
else:
raise
return self._inspect
def register_custom_units(self):
"Register any custom units defined in the metadata.json with Pint"
for unit in self.metadata("custom_units") or []:
ureg.define(unit)
def versions(self):
conn = sqlite3.connect(":memory:")
self.prepare_connection(conn)
sqlite_version = conn.execute("select sqlite_version()").fetchone()[0]
sqlite_extensions = {}
for extension, testsql, hasversion in (
("json1", "SELECT json('{}')", False),
("spatialite", "SELECT spatialite_version()", True),
):
try:
result = conn.execute(testsql)
if hasversion:
sqlite_extensions[extension] = result.fetchone()[0]
else:
sqlite_extensions[extension] = None
except Exception:
pass
# Figure out supported FTS versions
fts_versions = []
for fts in ("FTS5", "FTS4", "FTS3"):
try:
conn.execute(
"CREATE VIRTUAL TABLE v{fts} USING {fts} (data)".format(fts=fts)
)
fts_versions.append(fts)
except sqlite3.OperationalError:
continue
datasette_version = {"version": __version__}
if self.version_note:
datasette_version["note"] = self.version_note
return {
"python": {
"version": ".".join(map(str, sys.version_info[:3])), "full": sys.version
},
"datasette": datasette_version,
"sqlite": {
"version": sqlite_version,
"fts_versions": fts_versions,
"extensions": sqlite_extensions,
"compile_options": [
r[0] for r in conn.execute("pragma compile_options;").fetchall()
],
},
}
def plugins(self, show_all=False):
ps = list(get_plugins(pm))
if not show_all:
ps = [p for p in ps if p["name"] not in DEFAULT_PLUGINS]
return [
{
"name": p["name"],
"static": p["static_path"] is not None,
"templates": p["templates_path"] is not None,
"version": p.get("version"),
}
for p in ps
]
def table_metadata(self, database, table):
"Fetch table-specific metadata."
return (self.metadata("databases") or {}).get(database, {}).get(
"tables", {}
).get(
table, {}
)
async def table_columns(self, db_name, table):
return await self.execute_against_connection_in_thread(
db_name, lambda conn: table_columns(conn, table)
)
async def foreign_keys_for_table(self, database, table):
return await self.execute_against_connection_in_thread(
database, lambda conn: get_outbound_foreign_keys(conn, table)
)
async def label_column_for_table(self, db_name, table):
explicit_label_column = (
self.table_metadata(
db_name, table
).get("label_column")
)
if explicit_label_column:
return explicit_label_column
# If a table has two columns, one of which is ID, then label_column is the other one
column_names = await self.table_columns(db_name, table)
if (column_names and len(column_names) == 2 and "id" in column_names):
return [c for c in column_names if c != "id"][0]
# Couldn't find a label:
return None
async def execute_against_connection_in_thread(self, db_name, fn):
def in_thread():
conn = getattr(connections, db_name, None)
if not conn:
db = self.databases[db_name]
if db.is_memory:
conn = sqlite3.connect(":memory:")
else:
# mode=ro or immutable=1?
if db.is_mutable:
qs = "mode=ro"
else:
qs = "immutable=1"
conn = sqlite3.connect(
"file:{}?{}".format(db.path, qs),
uri=True,
check_same_thread=False,
)
self.prepare_connection(conn)
setattr(connections, db_name, conn)
return fn(conn)
return await asyncio.get_event_loop().run_in_executor(
self.executor, in_thread
)
async def execute(
self,
db_name,
sql,
params=None,
truncate=False,
custom_time_limit=None,
page_size=None,
):
"""Executes sql against db_name in a thread"""
page_size = page_size or self.page_size
def sql_operation_in_thread(conn):
time_limit_ms = self.sql_time_limit_ms
if custom_time_limit and custom_time_limit < time_limit_ms:
time_limit_ms = custom_time_limit
with sqlite_timelimit(conn, time_limit_ms):
try:
cursor = conn.cursor()
cursor.execute(sql, params or {})
max_returned_rows = self.max_returned_rows
if max_returned_rows == page_size:
max_returned_rows += 1
if max_returned_rows and truncate:
rows = cursor.fetchmany(max_returned_rows + 1)
truncated = len(rows) > max_returned_rows
rows = rows[:max_returned_rows]
else:
rows = cursor.fetchall()
truncated = False
except sqlite3.OperationalError as e:
if e.args == ('interrupted',):
raise InterruptedError(e)
print(
"ERROR: conn={}, sql = {}, params = {}: {}".format(
conn, repr(sql), params, e
)
)
raise
if truncate:
return Results(rows, truncated, cursor.description)
else:
return Results(rows, False, cursor.description)
with trace("sql", (db_name, sql.strip(), params)):
results = await self.execute_against_connection_in_thread(
db_name, sql_operation_in_thread
)
return results
def app(self):
class TracingSanic(Sanic):
async def handle_request(self, request, write_callback, stream_callback):
if request.args.get("_trace"):
request["traces"] = []
with capture_traces(request["traces"]):
res = await super().handle_request(request, write_callback, stream_callback)
else:
res = await super().handle_request(request, write_callback, stream_callback)
return res
app = TracingSanic(__name__)
default_templates = str(app_root / "datasette" / "templates")
template_paths = []
if self.template_dir:
template_paths.append(self.template_dir)
template_paths.extend(
[
plugin["templates_path"]
for plugin in get_plugins(pm)
if plugin["templates_path"]
]
)
template_paths.append(default_templates)
template_loader = ChoiceLoader(
[
FileSystemLoader(template_paths),
# Support {% extends "default:table.html" %}:
PrefixLoader(
{"default": FileSystemLoader(default_templates)}, delimiter=":"
),
]
)
self.jinja_env = Environment(loader=template_loader, autoescape=True)
self.jinja_env.filters["escape_css_string"] = escape_css_string
self.jinja_env.filters["quote_plus"] = lambda u: urllib.parse.quote_plus(u)
self.jinja_env.filters["escape_sqlite"] = escape_sqlite
self.jinja_env.filters["to_css_class"] = to_css_class
# pylint: disable=no-member
pm.hook.prepare_jinja2_environment(env=self.jinja_env)
app.add_route(IndexView.as_view(self), r"/<as_format:(\.jsono?)?$>")
# TODO: /favicon.ico and /-/static/ deserve far-future cache expires
app.add_route(favicon, "/favicon.ico")
app.static("/-/static/", str(app_root / "datasette" / "static"))
for path, dirname in self.static_mounts:
app.static(path, dirname)
# Mount any plugin static/ directories
for plugin in get_plugins(pm):
if plugin["static_path"]:
modpath = "/-/static-plugins/{}/".format(plugin["name"])
app.static(modpath, plugin["static_path"])
app.add_route(
JsonDataView.as_view(self, "inspect.json", self.inspect),
r"/-/inspect<as_format:(\.json)?$>",
)
app.add_route(
JsonDataView.as_view(self, "metadata.json", lambda: self._metadata),
r"/-/metadata<as_format:(\.json)?$>",
)
app.add_route(
JsonDataView.as_view(self, "versions.json", self.versions),
r"/-/versions<as_format:(\.json)?$>",
)
app.add_route(
JsonDataView.as_view(self, "plugins.json", self.plugins),
r"/-/plugins<as_format:(\.json)?$>",
)
app.add_route(
JsonDataView.as_view(self, "config.json", lambda: self._config),
r"/-/config<as_format:(\.json)?$>",
)
app.add_route(
DatabaseDownload.as_view(self), r"/<db_name:[^/]+?><as_db:(\.db)$>"
)
app.add_route(
DatabaseView.as_view(self), r"/<db_name:[^/]+?><as_format:(\.jsono?|\.csv)?$>"
)
app.add_route(
TableView.as_view(self),
r"/<db_name:[^/]+>/<table_and_format:[^/]+?$>",
)
app.add_route(
RowView.as_view(self),
r"/<db_name:[^/]+>/<table:[^/]+?>/<pk_path:[^/]+?><as_format:(\.jsono?)?$>",
)
self.register_custom_units()
# On 404 with a trailing slash redirect to path without that slash:
# pylint: disable=unused-variable
@app.middleware("response")
def redirect_on_404_with_trailing_slash(request, original_response):
if original_response.status == 404 and request.path.endswith("/"):
path = request.path.rstrip("/")
if request.query_string:
path = "{}?{}".format(path, request.query_string)
return response.redirect(path)
@app.middleware("response")
async def add_traces_to_response(request, response):
if request.get("traces") is None:
return
traces = request["traces"]
if "text/html" in response.content_type and b'</body>' in response.body:
extra = json.dumps(traces, indent=2)
extra_html = "<pre>{}</pre></body>".format(extra).encode("utf8")
response.body = response.body.replace(b"</body>", extra_html)
elif "json" in response.content_type and response.body.startswith(b"{"):
data = json.loads(response.body.decode("utf8"))
if "_traces" not in data:
data["_traces"] = {
"num_traces": len(traces),
"traces": traces,
"duration_sum_ms": sum(t[-1] for t in traces),
}
response.body = json.dumps(data).encode("utf8")
@app.exception(Exception)
def on_exception(request, exception):
title = None
help = None
if isinstance(exception, NotFound):
status = 404
info = {}
message = exception.args[0]
elif isinstance(exception, InvalidUsage):
status = 405
info = {}
message = exception.args[0]
elif isinstance(exception, DatasetteError):
status = exception.status
info = exception.error_dict
message = exception.message
if exception.messagge_is_html:
message = Markup(message)
title = exception.title
else:
status = 500
info = {}
message = str(exception)
traceback.print_exc()
templates = ["500.html"]
if status != 500:
templates = ["{}.html".format(status)] + templates
info.update(
{"ok": False, "error": message, "status": status, "title": title}
)
if request is not None and request.path.split("?")[0].endswith(".json"):
return response.json(info, status=status)
else:
template = self.jinja_env.select_template(templates)
return response.html(template.render(info), status=status)
return app
|
simonw/datasette | datasette/app.py | Datasette.plugin_config | python | def plugin_config(
self, plugin_name, database=None, table=None, fallback=True
):
"Return config for plugin, falling back from specified database/table"
plugins = self.metadata(
"plugins", database=database, table=table, fallback=fallback
)
if plugins is None:
return None
return plugins.get(plugin_name) | Return config for plugin, falling back from specified database/table | train | https://github.com/simonw/datasette/blob/11b352b4d52fd02a422776edebb14f12e4994d3b/datasette/app.py#L271-L280 | [
"def metadata(self, key=None, database=None, table=None, fallback=True):\n \"\"\"\n Looks up metadata, cascading backwards from specified level.\n Returns None if metadata value is not found.\n \"\"\"\n assert not (database is None and table is not None), \\\n \"Cannot call metadata() with tab... | class Datasette:
def __init__(
self,
files,
immutables=None,
cache_headers=True,
cors=False,
inspect_data=None,
metadata=None,
sqlite_extensions=None,
template_dir=None,
plugins_dir=None,
static_mounts=None,
memory=False,
config=None,
version_note=None,
):
immutables = immutables or []
self.files = tuple(files) + tuple(immutables)
self.immutables = set(immutables)
if not self.files:
self.files = [MEMORY]
elif memory:
self.files = (MEMORY,) + self.files
self.databases = {}
for file in self.files:
path = file
is_memory = False
if file is MEMORY:
path = None
is_memory = True
db = ConnectedDatabase(path, is_mutable=path not in self.immutables, is_memory=is_memory)
if db.name in self.databases:
raise Exception("Multiple files with same stem: {}".format(db.name))
self.databases[db.name] = db
self.cache_headers = cache_headers
self.cors = cors
self._inspect = inspect_data
self._metadata = metadata or {}
self.sqlite_functions = []
self.sqlite_extensions = sqlite_extensions or []
self.template_dir = template_dir
self.plugins_dir = plugins_dir
self.static_mounts = static_mounts or []
self._config = dict(DEFAULT_CONFIG, **(config or {}))
self.version_note = version_note
self.executor = futures.ThreadPoolExecutor(
max_workers=self.config("num_sql_threads")
)
self.max_returned_rows = self.config("max_returned_rows")
self.sql_time_limit_ms = self.config("sql_time_limit_ms")
self.page_size = self.config("default_page_size")
# Execute plugins in constructor, to ensure they are available
# when the rest of `datasette inspect` executes
if self.plugins_dir:
for filename in os.listdir(self.plugins_dir):
filepath = os.path.join(self.plugins_dir, filename)
mod = module_from_path(filepath, name=filename)
try:
pm.register(mod)
except ValueError:
# Plugin already registered
pass
def config(self, key):
return self._config.get(key, None)
def config_dict(self):
# Returns a fully resolved config dictionary, useful for templates
return {
option.name: self.config(option.name)
for option in CONFIG_OPTIONS
}
def metadata(self, key=None, database=None, table=None, fallback=True):
"""
Looks up metadata, cascading backwards from specified level.
Returns None if metadata value is not found.
"""
assert not (database is None and table is not None), \
"Cannot call metadata() with table= specified but not database="
databases = self._metadata.get("databases") or {}
search_list = []
if database is not None:
search_list.append(databases.get(database) or {})
if table is not None:
table_metadata = (
(databases.get(database) or {}).get("tables") or {}
).get(table) or {}
search_list.insert(0, table_metadata)
search_list.append(self._metadata)
if not fallback:
# No fallback allowed, so just use the first one in the list
search_list = search_list[:1]
if key is not None:
for item in search_list:
if key in item:
return item[key]
return None
else:
# Return the merged list
m = {}
for item in search_list:
m.update(item)
return m
def app_css_hash(self):
if not hasattr(self, "_app_css_hash"):
self._app_css_hash = hashlib.sha1(
open(
os.path.join(str(app_root), "datasette/static/app.css")
).read().encode(
"utf8"
)
).hexdigest()[
:6
]
return self._app_css_hash
def get_canned_queries(self, database_name):
queries = self.metadata(
"queries", database=database_name, fallback=False
) or {}
names = queries.keys()
return [
self.get_canned_query(database_name, name) for name in names
]
def get_canned_query(self, database_name, query_name):
queries = self.metadata(
"queries", database=database_name, fallback=False
) or {}
query = queries.get(query_name)
if query:
if not isinstance(query, dict):
query = {"sql": query}
query["name"] = query_name
return query
async def get_table_definition(self, database_name, table, type_="table"):
table_definition_rows = list(
await self.execute(
database_name,
'select sql from sqlite_master where name = :n and type=:t',
{"n": table, "t": type_},
)
)
if not table_definition_rows:
return None
return table_definition_rows[0][0]
def get_view_definition(self, database_name, view):
return self.get_table_definition(database_name, view, 'view')
def update_with_inherited_metadata(self, metadata):
# Fills in source/license with defaults, if available
metadata.update(
{
"source": metadata.get("source") or self.metadata("source"),
"source_url": metadata.get("source_url")
or self.metadata("source_url"),
"license": metadata.get("license") or self.metadata("license"),
"license_url": metadata.get("license_url")
or self.metadata("license_url"),
"about": metadata.get("about") or self.metadata("about"),
"about_url": metadata.get("about_url")
or self.metadata("about_url"),
}
)
def prepare_connection(self, conn):
conn.row_factory = sqlite3.Row
conn.text_factory = lambda x: str(x, "utf-8", "replace")
for name, num_args, func in self.sqlite_functions:
conn.create_function(name, num_args, func)
if self.sqlite_extensions:
conn.enable_load_extension(True)
for extension in self.sqlite_extensions:
conn.execute("SELECT load_extension('{}')".format(extension))
if self.config("cache_size_kb"):
conn.execute('PRAGMA cache_size=-{}'.format(self.config("cache_size_kb")))
# pylint: disable=no-member
pm.hook.prepare_connection(conn=conn)
async def table_exists(self, database, table):
results = await self.execute(
database,
"select 1 from sqlite_master where type='table' and name=?",
params=(table,)
)
return bool(results.rows)
async def expand_foreign_keys(self, database, table, column, values):
"Returns dict mapping (column, value) -> label"
labeled_fks = {}
foreign_keys = await self.foreign_keys_for_table(database, table)
# Find the foreign_key for this column
try:
fk = [
foreign_key for foreign_key in foreign_keys
if foreign_key["column"] == column
][0]
except IndexError:
return {}
label_column = await self.label_column_for_table(database, fk["other_table"])
if not label_column:
return {
(fk["column"], value): str(value)
for value in values
}
labeled_fks = {}
sql = '''
select {other_column}, {label_column}
from {other_table}
where {other_column} in ({placeholders})
'''.format(
other_column=escape_sqlite(fk["other_column"]),
label_column=escape_sqlite(label_column),
other_table=escape_sqlite(fk["other_table"]),
placeholders=", ".join(["?"] * len(set(values))),
)
try:
results = await self.execute(
database, sql, list(set(values))
)
except InterruptedError:
pass
else:
for id, value in results:
labeled_fks[(fk["column"], id)] = value
return labeled_fks
def absolute_url(self, request, path):
url = urllib.parse.urljoin(request.url, path)
if url.startswith("http://") and self.config("force_https_urls"):
url = "https://" + url[len("http://"):]
return url
def inspect(self):
" Inspect the database and return a dictionary of table metadata "
if self._inspect:
return self._inspect
self._inspect = {}
for filename in self.files:
if filename is MEMORY:
self._inspect[":memory:"] = {
"hash": "000",
"file": ":memory:",
"size": 0,
"views": {},
"tables": {},
}
else:
path = Path(filename)
name = path.stem
if name in self._inspect:
raise Exception("Multiple files with same stem %s" % name)
try:
with sqlite3.connect(
"file:{}?mode=ro".format(path), uri=True
) as conn:
self.prepare_connection(conn)
self._inspect[name] = {
"hash": inspect_hash(path),
"file": str(path),
"size": path.stat().st_size,
"views": inspect_views(conn),
"tables": inspect_tables(conn, (self.metadata("databases") or {}).get(name, {}))
}
except sqlite3.OperationalError as e:
if (e.args[0] == 'no such module: VirtualSpatialIndex'):
raise click.UsageError(
"It looks like you're trying to load a SpatiaLite"
" database without first loading the SpatiaLite module."
"\n\nRead more: https://datasette.readthedocs.io/en/latest/spatialite.html"
)
else:
raise
return self._inspect
def register_custom_units(self):
"Register any custom units defined in the metadata.json with Pint"
for unit in self.metadata("custom_units") or []:
ureg.define(unit)
def versions(self):
conn = sqlite3.connect(":memory:")
self.prepare_connection(conn)
sqlite_version = conn.execute("select sqlite_version()").fetchone()[0]
sqlite_extensions = {}
for extension, testsql, hasversion in (
("json1", "SELECT json('{}')", False),
("spatialite", "SELECT spatialite_version()", True),
):
try:
result = conn.execute(testsql)
if hasversion:
sqlite_extensions[extension] = result.fetchone()[0]
else:
sqlite_extensions[extension] = None
except Exception:
pass
# Figure out supported FTS versions
fts_versions = []
for fts in ("FTS5", "FTS4", "FTS3"):
try:
conn.execute(
"CREATE VIRTUAL TABLE v{fts} USING {fts} (data)".format(fts=fts)
)
fts_versions.append(fts)
except sqlite3.OperationalError:
continue
datasette_version = {"version": __version__}
if self.version_note:
datasette_version["note"] = self.version_note
return {
"python": {
"version": ".".join(map(str, sys.version_info[:3])), "full": sys.version
},
"datasette": datasette_version,
"sqlite": {
"version": sqlite_version,
"fts_versions": fts_versions,
"extensions": sqlite_extensions,
"compile_options": [
r[0] for r in conn.execute("pragma compile_options;").fetchall()
],
},
}
def plugins(self, show_all=False):
ps = list(get_plugins(pm))
if not show_all:
ps = [p for p in ps if p["name"] not in DEFAULT_PLUGINS]
return [
{
"name": p["name"],
"static": p["static_path"] is not None,
"templates": p["templates_path"] is not None,
"version": p.get("version"),
}
for p in ps
]
def table_metadata(self, database, table):
"Fetch table-specific metadata."
return (self.metadata("databases") or {}).get(database, {}).get(
"tables", {}
).get(
table, {}
)
async def table_columns(self, db_name, table):
return await self.execute_against_connection_in_thread(
db_name, lambda conn: table_columns(conn, table)
)
async def foreign_keys_for_table(self, database, table):
return await self.execute_against_connection_in_thread(
database, lambda conn: get_outbound_foreign_keys(conn, table)
)
async def label_column_for_table(self, db_name, table):
explicit_label_column = (
self.table_metadata(
db_name, table
).get("label_column")
)
if explicit_label_column:
return explicit_label_column
# If a table has two columns, one of which is ID, then label_column is the other one
column_names = await self.table_columns(db_name, table)
if (column_names and len(column_names) == 2 and "id" in column_names):
return [c for c in column_names if c != "id"][0]
# Couldn't find a label:
return None
async def execute_against_connection_in_thread(self, db_name, fn):
def in_thread():
conn = getattr(connections, db_name, None)
if not conn:
db = self.databases[db_name]
if db.is_memory:
conn = sqlite3.connect(":memory:")
else:
# mode=ro or immutable=1?
if db.is_mutable:
qs = "mode=ro"
else:
qs = "immutable=1"
conn = sqlite3.connect(
"file:{}?{}".format(db.path, qs),
uri=True,
check_same_thread=False,
)
self.prepare_connection(conn)
setattr(connections, db_name, conn)
return fn(conn)
return await asyncio.get_event_loop().run_in_executor(
self.executor, in_thread
)
async def execute(
self,
db_name,
sql,
params=None,
truncate=False,
custom_time_limit=None,
page_size=None,
):
"""Executes sql against db_name in a thread"""
page_size = page_size or self.page_size
def sql_operation_in_thread(conn):
time_limit_ms = self.sql_time_limit_ms
if custom_time_limit and custom_time_limit < time_limit_ms:
time_limit_ms = custom_time_limit
with sqlite_timelimit(conn, time_limit_ms):
try:
cursor = conn.cursor()
cursor.execute(sql, params or {})
max_returned_rows = self.max_returned_rows
if max_returned_rows == page_size:
max_returned_rows += 1
if max_returned_rows and truncate:
rows = cursor.fetchmany(max_returned_rows + 1)
truncated = len(rows) > max_returned_rows
rows = rows[:max_returned_rows]
else:
rows = cursor.fetchall()
truncated = False
except sqlite3.OperationalError as e:
if e.args == ('interrupted',):
raise InterruptedError(e)
print(
"ERROR: conn={}, sql = {}, params = {}: {}".format(
conn, repr(sql), params, e
)
)
raise
if truncate:
return Results(rows, truncated, cursor.description)
else:
return Results(rows, False, cursor.description)
with trace("sql", (db_name, sql.strip(), params)):
results = await self.execute_against_connection_in_thread(
db_name, sql_operation_in_thread
)
return results
def app(self):
class TracingSanic(Sanic):
async def handle_request(self, request, write_callback, stream_callback):
if request.args.get("_trace"):
request["traces"] = []
with capture_traces(request["traces"]):
res = await super().handle_request(request, write_callback, stream_callback)
else:
res = await super().handle_request(request, write_callback, stream_callback)
return res
app = TracingSanic(__name__)
default_templates = str(app_root / "datasette" / "templates")
template_paths = []
if self.template_dir:
template_paths.append(self.template_dir)
template_paths.extend(
[
plugin["templates_path"]
for plugin in get_plugins(pm)
if plugin["templates_path"]
]
)
template_paths.append(default_templates)
template_loader = ChoiceLoader(
[
FileSystemLoader(template_paths),
# Support {% extends "default:table.html" %}:
PrefixLoader(
{"default": FileSystemLoader(default_templates)}, delimiter=":"
),
]
)
self.jinja_env = Environment(loader=template_loader, autoescape=True)
self.jinja_env.filters["escape_css_string"] = escape_css_string
self.jinja_env.filters["quote_plus"] = lambda u: urllib.parse.quote_plus(u)
self.jinja_env.filters["escape_sqlite"] = escape_sqlite
self.jinja_env.filters["to_css_class"] = to_css_class
# pylint: disable=no-member
pm.hook.prepare_jinja2_environment(env=self.jinja_env)
app.add_route(IndexView.as_view(self), r"/<as_format:(\.jsono?)?$>")
# TODO: /favicon.ico and /-/static/ deserve far-future cache expires
app.add_route(favicon, "/favicon.ico")
app.static("/-/static/", str(app_root / "datasette" / "static"))
for path, dirname in self.static_mounts:
app.static(path, dirname)
# Mount any plugin static/ directories
for plugin in get_plugins(pm):
if plugin["static_path"]:
modpath = "/-/static-plugins/{}/".format(plugin["name"])
app.static(modpath, plugin["static_path"])
app.add_route(
JsonDataView.as_view(self, "inspect.json", self.inspect),
r"/-/inspect<as_format:(\.json)?$>",
)
app.add_route(
JsonDataView.as_view(self, "metadata.json", lambda: self._metadata),
r"/-/metadata<as_format:(\.json)?$>",
)
app.add_route(
JsonDataView.as_view(self, "versions.json", self.versions),
r"/-/versions<as_format:(\.json)?$>",
)
app.add_route(
JsonDataView.as_view(self, "plugins.json", self.plugins),
r"/-/plugins<as_format:(\.json)?$>",
)
app.add_route(
JsonDataView.as_view(self, "config.json", lambda: self._config),
r"/-/config<as_format:(\.json)?$>",
)
app.add_route(
DatabaseDownload.as_view(self), r"/<db_name:[^/]+?><as_db:(\.db)$>"
)
app.add_route(
DatabaseView.as_view(self), r"/<db_name:[^/]+?><as_format:(\.jsono?|\.csv)?$>"
)
app.add_route(
TableView.as_view(self),
r"/<db_name:[^/]+>/<table_and_format:[^/]+?$>",
)
app.add_route(
RowView.as_view(self),
r"/<db_name:[^/]+>/<table:[^/]+?>/<pk_path:[^/]+?><as_format:(\.jsono?)?$>",
)
self.register_custom_units()
# On 404 with a trailing slash redirect to path without that slash:
# pylint: disable=unused-variable
@app.middleware("response")
def redirect_on_404_with_trailing_slash(request, original_response):
if original_response.status == 404 and request.path.endswith("/"):
path = request.path.rstrip("/")
if request.query_string:
path = "{}?{}".format(path, request.query_string)
return response.redirect(path)
@app.middleware("response")
async def add_traces_to_response(request, response):
if request.get("traces") is None:
return
traces = request["traces"]
if "text/html" in response.content_type and b'</body>' in response.body:
extra = json.dumps(traces, indent=2)
extra_html = "<pre>{}</pre></body>".format(extra).encode("utf8")
response.body = response.body.replace(b"</body>", extra_html)
elif "json" in response.content_type and response.body.startswith(b"{"):
data = json.loads(response.body.decode("utf8"))
if "_traces" not in data:
data["_traces"] = {
"num_traces": len(traces),
"traces": traces,
"duration_sum_ms": sum(t[-1] for t in traces),
}
response.body = json.dumps(data).encode("utf8")
@app.exception(Exception)
def on_exception(request, exception):
title = None
help = None
if isinstance(exception, NotFound):
status = 404
info = {}
message = exception.args[0]
elif isinstance(exception, InvalidUsage):
status = 405
info = {}
message = exception.args[0]
elif isinstance(exception, DatasetteError):
status = exception.status
info = exception.error_dict
message = exception.message
if exception.messagge_is_html:
message = Markup(message)
title = exception.title
else:
status = 500
info = {}
message = str(exception)
traceback.print_exc()
templates = ["500.html"]
if status != 500:
templates = ["{}.html".format(status)] + templates
info.update(
{"ok": False, "error": message, "status": status, "title": title}
)
if request is not None and request.path.split("?")[0].endswith(".json"):
return response.json(info, status=status)
else:
template = self.jinja_env.select_template(templates)
return response.html(template.render(info), status=status)
return app
|
simonw/datasette | datasette/app.py | Datasette.expand_foreign_keys | python | async def expand_foreign_keys(self, database, table, column, values):
"Returns dict mapping (column, value) -> label"
labeled_fks = {}
foreign_keys = await self.foreign_keys_for_table(database, table)
# Find the foreign_key for this column
try:
fk = [
foreign_key for foreign_key in foreign_keys
if foreign_key["column"] == column
][0]
except IndexError:
return {}
label_column = await self.label_column_for_table(database, fk["other_table"])
if not label_column:
return {
(fk["column"], value): str(value)
for value in values
}
labeled_fks = {}
sql = '''
select {other_column}, {label_column}
from {other_table}
where {other_column} in ({placeholders})
'''.format(
other_column=escape_sqlite(fk["other_column"]),
label_column=escape_sqlite(label_column),
other_table=escape_sqlite(fk["other_table"]),
placeholders=", ".join(["?"] * len(set(values))),
)
try:
results = await self.execute(
database, sql, list(set(values))
)
except InterruptedError:
pass
else:
for id, value in results:
labeled_fks[(fk["column"], id)] = value
return labeled_fks | Returns dict mapping (column, value) -> label | train | https://github.com/simonw/datasette/blob/11b352b4d52fd02a422776edebb14f12e4994d3b/datasette/app.py#L368-L406 | [
"async def foreign_keys_for_table(self, database, table):\n return await self.execute_against_connection_in_thread(\n database, lambda conn: get_outbound_foreign_keys(conn, table)\n )\n",
"async def label_column_for_table(self, db_name, table):\n explicit_label_column = (\n self.table_metad... | class Datasette:
def __init__(
self,
files,
immutables=None,
cache_headers=True,
cors=False,
inspect_data=None,
metadata=None,
sqlite_extensions=None,
template_dir=None,
plugins_dir=None,
static_mounts=None,
memory=False,
config=None,
version_note=None,
):
immutables = immutables or []
self.files = tuple(files) + tuple(immutables)
self.immutables = set(immutables)
if not self.files:
self.files = [MEMORY]
elif memory:
self.files = (MEMORY,) + self.files
self.databases = {}
for file in self.files:
path = file
is_memory = False
if file is MEMORY:
path = None
is_memory = True
db = ConnectedDatabase(path, is_mutable=path not in self.immutables, is_memory=is_memory)
if db.name in self.databases:
raise Exception("Multiple files with same stem: {}".format(db.name))
self.databases[db.name] = db
self.cache_headers = cache_headers
self.cors = cors
self._inspect = inspect_data
self._metadata = metadata or {}
self.sqlite_functions = []
self.sqlite_extensions = sqlite_extensions or []
self.template_dir = template_dir
self.plugins_dir = plugins_dir
self.static_mounts = static_mounts or []
self._config = dict(DEFAULT_CONFIG, **(config or {}))
self.version_note = version_note
self.executor = futures.ThreadPoolExecutor(
max_workers=self.config("num_sql_threads")
)
self.max_returned_rows = self.config("max_returned_rows")
self.sql_time_limit_ms = self.config("sql_time_limit_ms")
self.page_size = self.config("default_page_size")
# Execute plugins in constructor, to ensure they are available
# when the rest of `datasette inspect` executes
if self.plugins_dir:
for filename in os.listdir(self.plugins_dir):
filepath = os.path.join(self.plugins_dir, filename)
mod = module_from_path(filepath, name=filename)
try:
pm.register(mod)
except ValueError:
# Plugin already registered
pass
def config(self, key):
return self._config.get(key, None)
def config_dict(self):
# Returns a fully resolved config dictionary, useful for templates
return {
option.name: self.config(option.name)
for option in CONFIG_OPTIONS
}
def metadata(self, key=None, database=None, table=None, fallback=True):
"""
Looks up metadata, cascading backwards from specified level.
Returns None if metadata value is not found.
"""
assert not (database is None and table is not None), \
"Cannot call metadata() with table= specified but not database="
databases = self._metadata.get("databases") or {}
search_list = []
if database is not None:
search_list.append(databases.get(database) or {})
if table is not None:
table_metadata = (
(databases.get(database) or {}).get("tables") or {}
).get(table) or {}
search_list.insert(0, table_metadata)
search_list.append(self._metadata)
if not fallback:
# No fallback allowed, so just use the first one in the list
search_list = search_list[:1]
if key is not None:
for item in search_list:
if key in item:
return item[key]
return None
else:
# Return the merged list
m = {}
for item in search_list:
m.update(item)
return m
def plugin_config(
self, plugin_name, database=None, table=None, fallback=True
):
"Return config for plugin, falling back from specified database/table"
plugins = self.metadata(
"plugins", database=database, table=table, fallback=fallback
)
if plugins is None:
return None
return plugins.get(plugin_name)
def app_css_hash(self):
if not hasattr(self, "_app_css_hash"):
self._app_css_hash = hashlib.sha1(
open(
os.path.join(str(app_root), "datasette/static/app.css")
).read().encode(
"utf8"
)
).hexdigest()[
:6
]
return self._app_css_hash
def get_canned_queries(self, database_name):
queries = self.metadata(
"queries", database=database_name, fallback=False
) or {}
names = queries.keys()
return [
self.get_canned_query(database_name, name) for name in names
]
def get_canned_query(self, database_name, query_name):
queries = self.metadata(
"queries", database=database_name, fallback=False
) or {}
query = queries.get(query_name)
if query:
if not isinstance(query, dict):
query = {"sql": query}
query["name"] = query_name
return query
async def get_table_definition(self, database_name, table, type_="table"):
table_definition_rows = list(
await self.execute(
database_name,
'select sql from sqlite_master where name = :n and type=:t',
{"n": table, "t": type_},
)
)
if not table_definition_rows:
return None
return table_definition_rows[0][0]
def get_view_definition(self, database_name, view):
return self.get_table_definition(database_name, view, 'view')
def update_with_inherited_metadata(self, metadata):
# Fills in source/license with defaults, if available
metadata.update(
{
"source": metadata.get("source") or self.metadata("source"),
"source_url": metadata.get("source_url")
or self.metadata("source_url"),
"license": metadata.get("license") or self.metadata("license"),
"license_url": metadata.get("license_url")
or self.metadata("license_url"),
"about": metadata.get("about") or self.metadata("about"),
"about_url": metadata.get("about_url")
or self.metadata("about_url"),
}
)
def prepare_connection(self, conn):
conn.row_factory = sqlite3.Row
conn.text_factory = lambda x: str(x, "utf-8", "replace")
for name, num_args, func in self.sqlite_functions:
conn.create_function(name, num_args, func)
if self.sqlite_extensions:
conn.enable_load_extension(True)
for extension in self.sqlite_extensions:
conn.execute("SELECT load_extension('{}')".format(extension))
if self.config("cache_size_kb"):
conn.execute('PRAGMA cache_size=-{}'.format(self.config("cache_size_kb")))
# pylint: disable=no-member
pm.hook.prepare_connection(conn=conn)
async def table_exists(self, database, table):
results = await self.execute(
database,
"select 1 from sqlite_master where type='table' and name=?",
params=(table,)
)
return bool(results.rows)
def absolute_url(self, request, path):
url = urllib.parse.urljoin(request.url, path)
if url.startswith("http://") and self.config("force_https_urls"):
url = "https://" + url[len("http://"):]
return url
def inspect(self):
" Inspect the database and return a dictionary of table metadata "
if self._inspect:
return self._inspect
self._inspect = {}
for filename in self.files:
if filename is MEMORY:
self._inspect[":memory:"] = {
"hash": "000",
"file": ":memory:",
"size": 0,
"views": {},
"tables": {},
}
else:
path = Path(filename)
name = path.stem
if name in self._inspect:
raise Exception("Multiple files with same stem %s" % name)
try:
with sqlite3.connect(
"file:{}?mode=ro".format(path), uri=True
) as conn:
self.prepare_connection(conn)
self._inspect[name] = {
"hash": inspect_hash(path),
"file": str(path),
"size": path.stat().st_size,
"views": inspect_views(conn),
"tables": inspect_tables(conn, (self.metadata("databases") or {}).get(name, {}))
}
except sqlite3.OperationalError as e:
if (e.args[0] == 'no such module: VirtualSpatialIndex'):
raise click.UsageError(
"It looks like you're trying to load a SpatiaLite"
" database without first loading the SpatiaLite module."
"\n\nRead more: https://datasette.readthedocs.io/en/latest/spatialite.html"
)
else:
raise
return self._inspect
def register_custom_units(self):
"Register any custom units defined in the metadata.json with Pint"
for unit in self.metadata("custom_units") or []:
ureg.define(unit)
def versions(self):
conn = sqlite3.connect(":memory:")
self.prepare_connection(conn)
sqlite_version = conn.execute("select sqlite_version()").fetchone()[0]
sqlite_extensions = {}
for extension, testsql, hasversion in (
("json1", "SELECT json('{}')", False),
("spatialite", "SELECT spatialite_version()", True),
):
try:
result = conn.execute(testsql)
if hasversion:
sqlite_extensions[extension] = result.fetchone()[0]
else:
sqlite_extensions[extension] = None
except Exception:
pass
# Figure out supported FTS versions
fts_versions = []
for fts in ("FTS5", "FTS4", "FTS3"):
try:
conn.execute(
"CREATE VIRTUAL TABLE v{fts} USING {fts} (data)".format(fts=fts)
)
fts_versions.append(fts)
except sqlite3.OperationalError:
continue
datasette_version = {"version": __version__}
if self.version_note:
datasette_version["note"] = self.version_note
return {
"python": {
"version": ".".join(map(str, sys.version_info[:3])), "full": sys.version
},
"datasette": datasette_version,
"sqlite": {
"version": sqlite_version,
"fts_versions": fts_versions,
"extensions": sqlite_extensions,
"compile_options": [
r[0] for r in conn.execute("pragma compile_options;").fetchall()
],
},
}
def plugins(self, show_all=False):
ps = list(get_plugins(pm))
if not show_all:
ps = [p for p in ps if p["name"] not in DEFAULT_PLUGINS]
return [
{
"name": p["name"],
"static": p["static_path"] is not None,
"templates": p["templates_path"] is not None,
"version": p.get("version"),
}
for p in ps
]
def table_metadata(self, database, table):
"Fetch table-specific metadata."
return (self.metadata("databases") or {}).get(database, {}).get(
"tables", {}
).get(
table, {}
)
async def table_columns(self, db_name, table):
return await self.execute_against_connection_in_thread(
db_name, lambda conn: table_columns(conn, table)
)
async def foreign_keys_for_table(self, database, table):
return await self.execute_against_connection_in_thread(
database, lambda conn: get_outbound_foreign_keys(conn, table)
)
async def label_column_for_table(self, db_name, table):
explicit_label_column = (
self.table_metadata(
db_name, table
).get("label_column")
)
if explicit_label_column:
return explicit_label_column
# If a table has two columns, one of which is ID, then label_column is the other one
column_names = await self.table_columns(db_name, table)
if (column_names and len(column_names) == 2 and "id" in column_names):
return [c for c in column_names if c != "id"][0]
# Couldn't find a label:
return None
async def execute_against_connection_in_thread(self, db_name, fn):
def in_thread():
conn = getattr(connections, db_name, None)
if not conn:
db = self.databases[db_name]
if db.is_memory:
conn = sqlite3.connect(":memory:")
else:
# mode=ro or immutable=1?
if db.is_mutable:
qs = "mode=ro"
else:
qs = "immutable=1"
conn = sqlite3.connect(
"file:{}?{}".format(db.path, qs),
uri=True,
check_same_thread=False,
)
self.prepare_connection(conn)
setattr(connections, db_name, conn)
return fn(conn)
return await asyncio.get_event_loop().run_in_executor(
self.executor, in_thread
)
async def execute(
self,
db_name,
sql,
params=None,
truncate=False,
custom_time_limit=None,
page_size=None,
):
"""Executes sql against db_name in a thread"""
page_size = page_size or self.page_size
def sql_operation_in_thread(conn):
time_limit_ms = self.sql_time_limit_ms
if custom_time_limit and custom_time_limit < time_limit_ms:
time_limit_ms = custom_time_limit
with sqlite_timelimit(conn, time_limit_ms):
try:
cursor = conn.cursor()
cursor.execute(sql, params or {})
max_returned_rows = self.max_returned_rows
if max_returned_rows == page_size:
max_returned_rows += 1
if max_returned_rows and truncate:
rows = cursor.fetchmany(max_returned_rows + 1)
truncated = len(rows) > max_returned_rows
rows = rows[:max_returned_rows]
else:
rows = cursor.fetchall()
truncated = False
except sqlite3.OperationalError as e:
if e.args == ('interrupted',):
raise InterruptedError(e)
print(
"ERROR: conn={}, sql = {}, params = {}: {}".format(
conn, repr(sql), params, e
)
)
raise
if truncate:
return Results(rows, truncated, cursor.description)
else:
return Results(rows, False, cursor.description)
with trace("sql", (db_name, sql.strip(), params)):
results = await self.execute_against_connection_in_thread(
db_name, sql_operation_in_thread
)
return results
def app(self):
class TracingSanic(Sanic):
async def handle_request(self, request, write_callback, stream_callback):
if request.args.get("_trace"):
request["traces"] = []
with capture_traces(request["traces"]):
res = await super().handle_request(request, write_callback, stream_callback)
else:
res = await super().handle_request(request, write_callback, stream_callback)
return res
app = TracingSanic(__name__)
default_templates = str(app_root / "datasette" / "templates")
template_paths = []
if self.template_dir:
template_paths.append(self.template_dir)
template_paths.extend(
[
plugin["templates_path"]
for plugin in get_plugins(pm)
if plugin["templates_path"]
]
)
template_paths.append(default_templates)
template_loader = ChoiceLoader(
[
FileSystemLoader(template_paths),
# Support {% extends "default:table.html" %}:
PrefixLoader(
{"default": FileSystemLoader(default_templates)}, delimiter=":"
),
]
)
self.jinja_env = Environment(loader=template_loader, autoescape=True)
self.jinja_env.filters["escape_css_string"] = escape_css_string
self.jinja_env.filters["quote_plus"] = lambda u: urllib.parse.quote_plus(u)
self.jinja_env.filters["escape_sqlite"] = escape_sqlite
self.jinja_env.filters["to_css_class"] = to_css_class
# pylint: disable=no-member
pm.hook.prepare_jinja2_environment(env=self.jinja_env)
app.add_route(IndexView.as_view(self), r"/<as_format:(\.jsono?)?$>")
# TODO: /favicon.ico and /-/static/ deserve far-future cache expires
app.add_route(favicon, "/favicon.ico")
app.static("/-/static/", str(app_root / "datasette" / "static"))
for path, dirname in self.static_mounts:
app.static(path, dirname)
# Mount any plugin static/ directories
for plugin in get_plugins(pm):
if plugin["static_path"]:
modpath = "/-/static-plugins/{}/".format(plugin["name"])
app.static(modpath, plugin["static_path"])
app.add_route(
JsonDataView.as_view(self, "inspect.json", self.inspect),
r"/-/inspect<as_format:(\.json)?$>",
)
app.add_route(
JsonDataView.as_view(self, "metadata.json", lambda: self._metadata),
r"/-/metadata<as_format:(\.json)?$>",
)
app.add_route(
JsonDataView.as_view(self, "versions.json", self.versions),
r"/-/versions<as_format:(\.json)?$>",
)
app.add_route(
JsonDataView.as_view(self, "plugins.json", self.plugins),
r"/-/plugins<as_format:(\.json)?$>",
)
app.add_route(
JsonDataView.as_view(self, "config.json", lambda: self._config),
r"/-/config<as_format:(\.json)?$>",
)
app.add_route(
DatabaseDownload.as_view(self), r"/<db_name:[^/]+?><as_db:(\.db)$>"
)
app.add_route(
DatabaseView.as_view(self), r"/<db_name:[^/]+?><as_format:(\.jsono?|\.csv)?$>"
)
app.add_route(
TableView.as_view(self),
r"/<db_name:[^/]+>/<table_and_format:[^/]+?$>",
)
app.add_route(
RowView.as_view(self),
r"/<db_name:[^/]+>/<table:[^/]+?>/<pk_path:[^/]+?><as_format:(\.jsono?)?$>",
)
self.register_custom_units()
# On 404 with a trailing slash redirect to path without that slash:
# pylint: disable=unused-variable
@app.middleware("response")
def redirect_on_404_with_trailing_slash(request, original_response):
if original_response.status == 404 and request.path.endswith("/"):
path = request.path.rstrip("/")
if request.query_string:
path = "{}?{}".format(path, request.query_string)
return response.redirect(path)
@app.middleware("response")
async def add_traces_to_response(request, response):
if request.get("traces") is None:
return
traces = request["traces"]
if "text/html" in response.content_type and b'</body>' in response.body:
extra = json.dumps(traces, indent=2)
extra_html = "<pre>{}</pre></body>".format(extra).encode("utf8")
response.body = response.body.replace(b"</body>", extra_html)
elif "json" in response.content_type and response.body.startswith(b"{"):
data = json.loads(response.body.decode("utf8"))
if "_traces" not in data:
data["_traces"] = {
"num_traces": len(traces),
"traces": traces,
"duration_sum_ms": sum(t[-1] for t in traces),
}
response.body = json.dumps(data).encode("utf8")
@app.exception(Exception)
def on_exception(request, exception):
title = None
help = None
if isinstance(exception, NotFound):
status = 404
info = {}
message = exception.args[0]
elif isinstance(exception, InvalidUsage):
status = 405
info = {}
message = exception.args[0]
elif isinstance(exception, DatasetteError):
status = exception.status
info = exception.error_dict
message = exception.message
if exception.messagge_is_html:
message = Markup(message)
title = exception.title
else:
status = 500
info = {}
message = str(exception)
traceback.print_exc()
templates = ["500.html"]
if status != 500:
templates = ["{}.html".format(status)] + templates
info.update(
{"ok": False, "error": message, "status": status, "title": title}
)
if request is not None and request.path.split("?")[0].endswith(".json"):
return response.json(info, status=status)
else:
template = self.jinja_env.select_template(templates)
return response.html(template.render(info), status=status)
return app
|
simonw/datasette | datasette/app.py | Datasette.inspect | python | def inspect(self):
" Inspect the database and return a dictionary of table metadata "
if self._inspect:
return self._inspect
self._inspect = {}
for filename in self.files:
if filename is MEMORY:
self._inspect[":memory:"] = {
"hash": "000",
"file": ":memory:",
"size": 0,
"views": {},
"tables": {},
}
else:
path = Path(filename)
name = path.stem
if name in self._inspect:
raise Exception("Multiple files with same stem %s" % name)
try:
with sqlite3.connect(
"file:{}?mode=ro".format(path), uri=True
) as conn:
self.prepare_connection(conn)
self._inspect[name] = {
"hash": inspect_hash(path),
"file": str(path),
"size": path.stat().st_size,
"views": inspect_views(conn),
"tables": inspect_tables(conn, (self.metadata("databases") or {}).get(name, {}))
}
except sqlite3.OperationalError as e:
if (e.args[0] == 'no such module: VirtualSpatialIndex'):
raise click.UsageError(
"It looks like you're trying to load a SpatiaLite"
" database without first loading the SpatiaLite module."
"\n\nRead more: https://datasette.readthedocs.io/en/latest/spatialite.html"
)
else:
raise
return self._inspect | Inspect the database and return a dictionary of table metadata | train | https://github.com/simonw/datasette/blob/11b352b4d52fd02a422776edebb14f12e4994d3b/datasette/app.py#L414-L455 | [
"def inspect_hash(path):\n \" Calculate the hash of a database, efficiently. \"\n m = hashlib.sha256()\n with path.open(\"rb\") as fp:\n while True:\n data = fp.read(HASH_BLOCK_SIZE)\n if not data:\n break\n m.update(data)\n\n return m.hexdigest()\n... | class Datasette:
def __init__(
self,
files,
immutables=None,
cache_headers=True,
cors=False,
inspect_data=None,
metadata=None,
sqlite_extensions=None,
template_dir=None,
plugins_dir=None,
static_mounts=None,
memory=False,
config=None,
version_note=None,
):
immutables = immutables or []
self.files = tuple(files) + tuple(immutables)
self.immutables = set(immutables)
if not self.files:
self.files = [MEMORY]
elif memory:
self.files = (MEMORY,) + self.files
self.databases = {}
for file in self.files:
path = file
is_memory = False
if file is MEMORY:
path = None
is_memory = True
db = ConnectedDatabase(path, is_mutable=path not in self.immutables, is_memory=is_memory)
if db.name in self.databases:
raise Exception("Multiple files with same stem: {}".format(db.name))
self.databases[db.name] = db
self.cache_headers = cache_headers
self.cors = cors
self._inspect = inspect_data
self._metadata = metadata or {}
self.sqlite_functions = []
self.sqlite_extensions = sqlite_extensions or []
self.template_dir = template_dir
self.plugins_dir = plugins_dir
self.static_mounts = static_mounts or []
self._config = dict(DEFAULT_CONFIG, **(config or {}))
self.version_note = version_note
self.executor = futures.ThreadPoolExecutor(
max_workers=self.config("num_sql_threads")
)
self.max_returned_rows = self.config("max_returned_rows")
self.sql_time_limit_ms = self.config("sql_time_limit_ms")
self.page_size = self.config("default_page_size")
# Execute plugins in constructor, to ensure they are available
# when the rest of `datasette inspect` executes
if self.plugins_dir:
for filename in os.listdir(self.plugins_dir):
filepath = os.path.join(self.plugins_dir, filename)
mod = module_from_path(filepath, name=filename)
try:
pm.register(mod)
except ValueError:
# Plugin already registered
pass
def config(self, key):
return self._config.get(key, None)
def config_dict(self):
# Returns a fully resolved config dictionary, useful for templates
return {
option.name: self.config(option.name)
for option in CONFIG_OPTIONS
}
def metadata(self, key=None, database=None, table=None, fallback=True):
"""
Looks up metadata, cascading backwards from specified level.
Returns None if metadata value is not found.
"""
assert not (database is None and table is not None), \
"Cannot call metadata() with table= specified but not database="
databases = self._metadata.get("databases") or {}
search_list = []
if database is not None:
search_list.append(databases.get(database) or {})
if table is not None:
table_metadata = (
(databases.get(database) or {}).get("tables") or {}
).get(table) or {}
search_list.insert(0, table_metadata)
search_list.append(self._metadata)
if not fallback:
# No fallback allowed, so just use the first one in the list
search_list = search_list[:1]
if key is not None:
for item in search_list:
if key in item:
return item[key]
return None
else:
# Return the merged list
m = {}
for item in search_list:
m.update(item)
return m
def plugin_config(
self, plugin_name, database=None, table=None, fallback=True
):
"Return config for plugin, falling back from specified database/table"
plugins = self.metadata(
"plugins", database=database, table=table, fallback=fallback
)
if plugins is None:
return None
return plugins.get(plugin_name)
def app_css_hash(self):
if not hasattr(self, "_app_css_hash"):
self._app_css_hash = hashlib.sha1(
open(
os.path.join(str(app_root), "datasette/static/app.css")
).read().encode(
"utf8"
)
).hexdigest()[
:6
]
return self._app_css_hash
def get_canned_queries(self, database_name):
queries = self.metadata(
"queries", database=database_name, fallback=False
) or {}
names = queries.keys()
return [
self.get_canned_query(database_name, name) for name in names
]
def get_canned_query(self, database_name, query_name):
queries = self.metadata(
"queries", database=database_name, fallback=False
) or {}
query = queries.get(query_name)
if query:
if not isinstance(query, dict):
query = {"sql": query}
query["name"] = query_name
return query
async def get_table_definition(self, database_name, table, type_="table"):
table_definition_rows = list(
await self.execute(
database_name,
'select sql from sqlite_master where name = :n and type=:t',
{"n": table, "t": type_},
)
)
if not table_definition_rows:
return None
return table_definition_rows[0][0]
def get_view_definition(self, database_name, view):
return self.get_table_definition(database_name, view, 'view')
def update_with_inherited_metadata(self, metadata):
# Fills in source/license with defaults, if available
metadata.update(
{
"source": metadata.get("source") or self.metadata("source"),
"source_url": metadata.get("source_url")
or self.metadata("source_url"),
"license": metadata.get("license") or self.metadata("license"),
"license_url": metadata.get("license_url")
or self.metadata("license_url"),
"about": metadata.get("about") or self.metadata("about"),
"about_url": metadata.get("about_url")
or self.metadata("about_url"),
}
)
def prepare_connection(self, conn):
conn.row_factory = sqlite3.Row
conn.text_factory = lambda x: str(x, "utf-8", "replace")
for name, num_args, func in self.sqlite_functions:
conn.create_function(name, num_args, func)
if self.sqlite_extensions:
conn.enable_load_extension(True)
for extension in self.sqlite_extensions:
conn.execute("SELECT load_extension('{}')".format(extension))
if self.config("cache_size_kb"):
conn.execute('PRAGMA cache_size=-{}'.format(self.config("cache_size_kb")))
# pylint: disable=no-member
pm.hook.prepare_connection(conn=conn)
async def table_exists(self, database, table):
results = await self.execute(
database,
"select 1 from sqlite_master where type='table' and name=?",
params=(table,)
)
return bool(results.rows)
async def expand_foreign_keys(self, database, table, column, values):
"Returns dict mapping (column, value) -> label"
labeled_fks = {}
foreign_keys = await self.foreign_keys_for_table(database, table)
# Find the foreign_key for this column
try:
fk = [
foreign_key for foreign_key in foreign_keys
if foreign_key["column"] == column
][0]
except IndexError:
return {}
label_column = await self.label_column_for_table(database, fk["other_table"])
if not label_column:
return {
(fk["column"], value): str(value)
for value in values
}
labeled_fks = {}
sql = '''
select {other_column}, {label_column}
from {other_table}
where {other_column} in ({placeholders})
'''.format(
other_column=escape_sqlite(fk["other_column"]),
label_column=escape_sqlite(label_column),
other_table=escape_sqlite(fk["other_table"]),
placeholders=", ".join(["?"] * len(set(values))),
)
try:
results = await self.execute(
database, sql, list(set(values))
)
except InterruptedError:
pass
else:
for id, value in results:
labeled_fks[(fk["column"], id)] = value
return labeled_fks
def absolute_url(self, request, path):
url = urllib.parse.urljoin(request.url, path)
if url.startswith("http://") and self.config("force_https_urls"):
url = "https://" + url[len("http://"):]
return url
def register_custom_units(self):
"Register any custom units defined in the metadata.json with Pint"
for unit in self.metadata("custom_units") or []:
ureg.define(unit)
def versions(self):
conn = sqlite3.connect(":memory:")
self.prepare_connection(conn)
sqlite_version = conn.execute("select sqlite_version()").fetchone()[0]
sqlite_extensions = {}
for extension, testsql, hasversion in (
("json1", "SELECT json('{}')", False),
("spatialite", "SELECT spatialite_version()", True),
):
try:
result = conn.execute(testsql)
if hasversion:
sqlite_extensions[extension] = result.fetchone()[0]
else:
sqlite_extensions[extension] = None
except Exception:
pass
# Figure out supported FTS versions
fts_versions = []
for fts in ("FTS5", "FTS4", "FTS3"):
try:
conn.execute(
"CREATE VIRTUAL TABLE v{fts} USING {fts} (data)".format(fts=fts)
)
fts_versions.append(fts)
except sqlite3.OperationalError:
continue
datasette_version = {"version": __version__}
if self.version_note:
datasette_version["note"] = self.version_note
return {
"python": {
"version": ".".join(map(str, sys.version_info[:3])), "full": sys.version
},
"datasette": datasette_version,
"sqlite": {
"version": sqlite_version,
"fts_versions": fts_versions,
"extensions": sqlite_extensions,
"compile_options": [
r[0] for r in conn.execute("pragma compile_options;").fetchall()
],
},
}
def plugins(self, show_all=False):
ps = list(get_plugins(pm))
if not show_all:
ps = [p for p in ps if p["name"] not in DEFAULT_PLUGINS]
return [
{
"name": p["name"],
"static": p["static_path"] is not None,
"templates": p["templates_path"] is not None,
"version": p.get("version"),
}
for p in ps
]
def table_metadata(self, database, table):
"Fetch table-specific metadata."
return (self.metadata("databases") or {}).get(database, {}).get(
"tables", {}
).get(
table, {}
)
async def table_columns(self, db_name, table):
return await self.execute_against_connection_in_thread(
db_name, lambda conn: table_columns(conn, table)
)
async def foreign_keys_for_table(self, database, table):
return await self.execute_against_connection_in_thread(
database, lambda conn: get_outbound_foreign_keys(conn, table)
)
async def label_column_for_table(self, db_name, table):
explicit_label_column = (
self.table_metadata(
db_name, table
).get("label_column")
)
if explicit_label_column:
return explicit_label_column
# If a table has two columns, one of which is ID, then label_column is the other one
column_names = await self.table_columns(db_name, table)
if (column_names and len(column_names) == 2 and "id" in column_names):
return [c for c in column_names if c != "id"][0]
# Couldn't find a label:
return None
async def execute_against_connection_in_thread(self, db_name, fn):
def in_thread():
conn = getattr(connections, db_name, None)
if not conn:
db = self.databases[db_name]
if db.is_memory:
conn = sqlite3.connect(":memory:")
else:
# mode=ro or immutable=1?
if db.is_mutable:
qs = "mode=ro"
else:
qs = "immutable=1"
conn = sqlite3.connect(
"file:{}?{}".format(db.path, qs),
uri=True,
check_same_thread=False,
)
self.prepare_connection(conn)
setattr(connections, db_name, conn)
return fn(conn)
return await asyncio.get_event_loop().run_in_executor(
self.executor, in_thread
)
async def execute(
self,
db_name,
sql,
params=None,
truncate=False,
custom_time_limit=None,
page_size=None,
):
"""Executes sql against db_name in a thread"""
page_size = page_size or self.page_size
def sql_operation_in_thread(conn):
time_limit_ms = self.sql_time_limit_ms
if custom_time_limit and custom_time_limit < time_limit_ms:
time_limit_ms = custom_time_limit
with sqlite_timelimit(conn, time_limit_ms):
try:
cursor = conn.cursor()
cursor.execute(sql, params or {})
max_returned_rows = self.max_returned_rows
if max_returned_rows == page_size:
max_returned_rows += 1
if max_returned_rows and truncate:
rows = cursor.fetchmany(max_returned_rows + 1)
truncated = len(rows) > max_returned_rows
rows = rows[:max_returned_rows]
else:
rows = cursor.fetchall()
truncated = False
except sqlite3.OperationalError as e:
if e.args == ('interrupted',):
raise InterruptedError(e)
print(
"ERROR: conn={}, sql = {}, params = {}: {}".format(
conn, repr(sql), params, e
)
)
raise
if truncate:
return Results(rows, truncated, cursor.description)
else:
return Results(rows, False, cursor.description)
with trace("sql", (db_name, sql.strip(), params)):
results = await self.execute_against_connection_in_thread(
db_name, sql_operation_in_thread
)
return results
def app(self):
class TracingSanic(Sanic):
async def handle_request(self, request, write_callback, stream_callback):
if request.args.get("_trace"):
request["traces"] = []
with capture_traces(request["traces"]):
res = await super().handle_request(request, write_callback, stream_callback)
else:
res = await super().handle_request(request, write_callback, stream_callback)
return res
app = TracingSanic(__name__)
default_templates = str(app_root / "datasette" / "templates")
template_paths = []
if self.template_dir:
template_paths.append(self.template_dir)
template_paths.extend(
[
plugin["templates_path"]
for plugin in get_plugins(pm)
if plugin["templates_path"]
]
)
template_paths.append(default_templates)
template_loader = ChoiceLoader(
[
FileSystemLoader(template_paths),
# Support {% extends "default:table.html" %}:
PrefixLoader(
{"default": FileSystemLoader(default_templates)}, delimiter=":"
),
]
)
self.jinja_env = Environment(loader=template_loader, autoescape=True)
self.jinja_env.filters["escape_css_string"] = escape_css_string
self.jinja_env.filters["quote_plus"] = lambda u: urllib.parse.quote_plus(u)
self.jinja_env.filters["escape_sqlite"] = escape_sqlite
self.jinja_env.filters["to_css_class"] = to_css_class
# pylint: disable=no-member
pm.hook.prepare_jinja2_environment(env=self.jinja_env)
app.add_route(IndexView.as_view(self), r"/<as_format:(\.jsono?)?$>")
# TODO: /favicon.ico and /-/static/ deserve far-future cache expires
app.add_route(favicon, "/favicon.ico")
app.static("/-/static/", str(app_root / "datasette" / "static"))
for path, dirname in self.static_mounts:
app.static(path, dirname)
# Mount any plugin static/ directories
for plugin in get_plugins(pm):
if plugin["static_path"]:
modpath = "/-/static-plugins/{}/".format(plugin["name"])
app.static(modpath, plugin["static_path"])
app.add_route(
JsonDataView.as_view(self, "inspect.json", self.inspect),
r"/-/inspect<as_format:(\.json)?$>",
)
app.add_route(
JsonDataView.as_view(self, "metadata.json", lambda: self._metadata),
r"/-/metadata<as_format:(\.json)?$>",
)
app.add_route(
JsonDataView.as_view(self, "versions.json", self.versions),
r"/-/versions<as_format:(\.json)?$>",
)
app.add_route(
JsonDataView.as_view(self, "plugins.json", self.plugins),
r"/-/plugins<as_format:(\.json)?$>",
)
app.add_route(
JsonDataView.as_view(self, "config.json", lambda: self._config),
r"/-/config<as_format:(\.json)?$>",
)
app.add_route(
DatabaseDownload.as_view(self), r"/<db_name:[^/]+?><as_db:(\.db)$>"
)
app.add_route(
DatabaseView.as_view(self), r"/<db_name:[^/]+?><as_format:(\.jsono?|\.csv)?$>"
)
app.add_route(
TableView.as_view(self),
r"/<db_name:[^/]+>/<table_and_format:[^/]+?$>",
)
app.add_route(
RowView.as_view(self),
r"/<db_name:[^/]+>/<table:[^/]+?>/<pk_path:[^/]+?><as_format:(\.jsono?)?$>",
)
self.register_custom_units()
# On 404 with a trailing slash redirect to path without that slash:
# pylint: disable=unused-variable
@app.middleware("response")
def redirect_on_404_with_trailing_slash(request, original_response):
if original_response.status == 404 and request.path.endswith("/"):
path = request.path.rstrip("/")
if request.query_string:
path = "{}?{}".format(path, request.query_string)
return response.redirect(path)
@app.middleware("response")
async def add_traces_to_response(request, response):
if request.get("traces") is None:
return
traces = request["traces"]
if "text/html" in response.content_type and b'</body>' in response.body:
extra = json.dumps(traces, indent=2)
extra_html = "<pre>{}</pre></body>".format(extra).encode("utf8")
response.body = response.body.replace(b"</body>", extra_html)
elif "json" in response.content_type and response.body.startswith(b"{"):
data = json.loads(response.body.decode("utf8"))
if "_traces" not in data:
data["_traces"] = {
"num_traces": len(traces),
"traces": traces,
"duration_sum_ms": sum(t[-1] for t in traces),
}
response.body = json.dumps(data).encode("utf8")
@app.exception(Exception)
def on_exception(request, exception):
title = None
help = None
if isinstance(exception, NotFound):
status = 404
info = {}
message = exception.args[0]
elif isinstance(exception, InvalidUsage):
status = 405
info = {}
message = exception.args[0]
elif isinstance(exception, DatasetteError):
status = exception.status
info = exception.error_dict
message = exception.message
if exception.messagge_is_html:
message = Markup(message)
title = exception.title
else:
status = 500
info = {}
message = str(exception)
traceback.print_exc()
templates = ["500.html"]
if status != 500:
templates = ["{}.html".format(status)] + templates
info.update(
{"ok": False, "error": message, "status": status, "title": title}
)
if request is not None and request.path.split("?")[0].endswith(".json"):
return response.json(info, status=status)
else:
template = self.jinja_env.select_template(templates)
return response.html(template.render(info), status=status)
return app
|
simonw/datasette | datasette/app.py | Datasette.table_metadata | python | def table_metadata(self, database, table):
"Fetch table-specific metadata."
return (self.metadata("databases") or {}).get(database, {}).get(
"tables", {}
).get(
table, {}
) | Fetch table-specific metadata. | train | https://github.com/simonw/datasette/blob/11b352b4d52fd02a422776edebb14f12e4994d3b/datasette/app.py#L521-L527 | [
"def metadata(self, key=None, database=None, table=None, fallback=True):\n \"\"\"\n Looks up metadata, cascading backwards from specified level.\n Returns None if metadata value is not found.\n \"\"\"\n assert not (database is None and table is not None), \\\n \"Cannot call metadata() with tab... | class Datasette:
def __init__(
self,
files,
immutables=None,
cache_headers=True,
cors=False,
inspect_data=None,
metadata=None,
sqlite_extensions=None,
template_dir=None,
plugins_dir=None,
static_mounts=None,
memory=False,
config=None,
version_note=None,
):
immutables = immutables or []
self.files = tuple(files) + tuple(immutables)
self.immutables = set(immutables)
if not self.files:
self.files = [MEMORY]
elif memory:
self.files = (MEMORY,) + self.files
self.databases = {}
for file in self.files:
path = file
is_memory = False
if file is MEMORY:
path = None
is_memory = True
db = ConnectedDatabase(path, is_mutable=path not in self.immutables, is_memory=is_memory)
if db.name in self.databases:
raise Exception("Multiple files with same stem: {}".format(db.name))
self.databases[db.name] = db
self.cache_headers = cache_headers
self.cors = cors
self._inspect = inspect_data
self._metadata = metadata or {}
self.sqlite_functions = []
self.sqlite_extensions = sqlite_extensions or []
self.template_dir = template_dir
self.plugins_dir = plugins_dir
self.static_mounts = static_mounts or []
self._config = dict(DEFAULT_CONFIG, **(config or {}))
self.version_note = version_note
self.executor = futures.ThreadPoolExecutor(
max_workers=self.config("num_sql_threads")
)
self.max_returned_rows = self.config("max_returned_rows")
self.sql_time_limit_ms = self.config("sql_time_limit_ms")
self.page_size = self.config("default_page_size")
# Execute plugins in constructor, to ensure they are available
# when the rest of `datasette inspect` executes
if self.plugins_dir:
for filename in os.listdir(self.plugins_dir):
filepath = os.path.join(self.plugins_dir, filename)
mod = module_from_path(filepath, name=filename)
try:
pm.register(mod)
except ValueError:
# Plugin already registered
pass
def config(self, key):
return self._config.get(key, None)
def config_dict(self):
# Returns a fully resolved config dictionary, useful for templates
return {
option.name: self.config(option.name)
for option in CONFIG_OPTIONS
}
def metadata(self, key=None, database=None, table=None, fallback=True):
"""
Looks up metadata, cascading backwards from specified level.
Returns None if metadata value is not found.
"""
assert not (database is None and table is not None), \
"Cannot call metadata() with table= specified but not database="
databases = self._metadata.get("databases") or {}
search_list = []
if database is not None:
search_list.append(databases.get(database) or {})
if table is not None:
table_metadata = (
(databases.get(database) or {}).get("tables") or {}
).get(table) or {}
search_list.insert(0, table_metadata)
search_list.append(self._metadata)
if not fallback:
# No fallback allowed, so just use the first one in the list
search_list = search_list[:1]
if key is not None:
for item in search_list:
if key in item:
return item[key]
return None
else:
# Return the merged list
m = {}
for item in search_list:
m.update(item)
return m
def plugin_config(
self, plugin_name, database=None, table=None, fallback=True
):
"Return config for plugin, falling back from specified database/table"
plugins = self.metadata(
"plugins", database=database, table=table, fallback=fallback
)
if plugins is None:
return None
return plugins.get(plugin_name)
def app_css_hash(self):
if not hasattr(self, "_app_css_hash"):
self._app_css_hash = hashlib.sha1(
open(
os.path.join(str(app_root), "datasette/static/app.css")
).read().encode(
"utf8"
)
).hexdigest()[
:6
]
return self._app_css_hash
def get_canned_queries(self, database_name):
queries = self.metadata(
"queries", database=database_name, fallback=False
) or {}
names = queries.keys()
return [
self.get_canned_query(database_name, name) for name in names
]
def get_canned_query(self, database_name, query_name):
queries = self.metadata(
"queries", database=database_name, fallback=False
) or {}
query = queries.get(query_name)
if query:
if not isinstance(query, dict):
query = {"sql": query}
query["name"] = query_name
return query
async def get_table_definition(self, database_name, table, type_="table"):
table_definition_rows = list(
await self.execute(
database_name,
'select sql from sqlite_master where name = :n and type=:t',
{"n": table, "t": type_},
)
)
if not table_definition_rows:
return None
return table_definition_rows[0][0]
def get_view_definition(self, database_name, view):
return self.get_table_definition(database_name, view, 'view')
def update_with_inherited_metadata(self, metadata):
# Fills in source/license with defaults, if available
metadata.update(
{
"source": metadata.get("source") or self.metadata("source"),
"source_url": metadata.get("source_url")
or self.metadata("source_url"),
"license": metadata.get("license") or self.metadata("license"),
"license_url": metadata.get("license_url")
or self.metadata("license_url"),
"about": metadata.get("about") or self.metadata("about"),
"about_url": metadata.get("about_url")
or self.metadata("about_url"),
}
)
def prepare_connection(self, conn):
conn.row_factory = sqlite3.Row
conn.text_factory = lambda x: str(x, "utf-8", "replace")
for name, num_args, func in self.sqlite_functions:
conn.create_function(name, num_args, func)
if self.sqlite_extensions:
conn.enable_load_extension(True)
for extension in self.sqlite_extensions:
conn.execute("SELECT load_extension('{}')".format(extension))
if self.config("cache_size_kb"):
conn.execute('PRAGMA cache_size=-{}'.format(self.config("cache_size_kb")))
# pylint: disable=no-member
pm.hook.prepare_connection(conn=conn)
async def table_exists(self, database, table):
results = await self.execute(
database,
"select 1 from sqlite_master where type='table' and name=?",
params=(table,)
)
return bool(results.rows)
async def expand_foreign_keys(self, database, table, column, values):
"Returns dict mapping (column, value) -> label"
labeled_fks = {}
foreign_keys = await self.foreign_keys_for_table(database, table)
# Find the foreign_key for this column
try:
fk = [
foreign_key for foreign_key in foreign_keys
if foreign_key["column"] == column
][0]
except IndexError:
return {}
label_column = await self.label_column_for_table(database, fk["other_table"])
if not label_column:
return {
(fk["column"], value): str(value)
for value in values
}
labeled_fks = {}
sql = '''
select {other_column}, {label_column}
from {other_table}
where {other_column} in ({placeholders})
'''.format(
other_column=escape_sqlite(fk["other_column"]),
label_column=escape_sqlite(label_column),
other_table=escape_sqlite(fk["other_table"]),
placeholders=", ".join(["?"] * len(set(values))),
)
try:
results = await self.execute(
database, sql, list(set(values))
)
except InterruptedError:
pass
else:
for id, value in results:
labeled_fks[(fk["column"], id)] = value
return labeled_fks
def absolute_url(self, request, path):
url = urllib.parse.urljoin(request.url, path)
if url.startswith("http://") and self.config("force_https_urls"):
url = "https://" + url[len("http://"):]
return url
def inspect(self):
" Inspect the database and return a dictionary of table metadata "
if self._inspect:
return self._inspect
self._inspect = {}
for filename in self.files:
if filename is MEMORY:
self._inspect[":memory:"] = {
"hash": "000",
"file": ":memory:",
"size": 0,
"views": {},
"tables": {},
}
else:
path = Path(filename)
name = path.stem
if name in self._inspect:
raise Exception("Multiple files with same stem %s" % name)
try:
with sqlite3.connect(
"file:{}?mode=ro".format(path), uri=True
) as conn:
self.prepare_connection(conn)
self._inspect[name] = {
"hash": inspect_hash(path),
"file": str(path),
"size": path.stat().st_size,
"views": inspect_views(conn),
"tables": inspect_tables(conn, (self.metadata("databases") or {}).get(name, {}))
}
except sqlite3.OperationalError as e:
if (e.args[0] == 'no such module: VirtualSpatialIndex'):
raise click.UsageError(
"It looks like you're trying to load a SpatiaLite"
" database without first loading the SpatiaLite module."
"\n\nRead more: https://datasette.readthedocs.io/en/latest/spatialite.html"
)
else:
raise
return self._inspect
def register_custom_units(self):
"Register any custom units defined in the metadata.json with Pint"
for unit in self.metadata("custom_units") or []:
ureg.define(unit)
def versions(self):
conn = sqlite3.connect(":memory:")
self.prepare_connection(conn)
sqlite_version = conn.execute("select sqlite_version()").fetchone()[0]
sqlite_extensions = {}
for extension, testsql, hasversion in (
("json1", "SELECT json('{}')", False),
("spatialite", "SELECT spatialite_version()", True),
):
try:
result = conn.execute(testsql)
if hasversion:
sqlite_extensions[extension] = result.fetchone()[0]
else:
sqlite_extensions[extension] = None
except Exception:
pass
# Figure out supported FTS versions
fts_versions = []
for fts in ("FTS5", "FTS4", "FTS3"):
try:
conn.execute(
"CREATE VIRTUAL TABLE v{fts} USING {fts} (data)".format(fts=fts)
)
fts_versions.append(fts)
except sqlite3.OperationalError:
continue
datasette_version = {"version": __version__}
if self.version_note:
datasette_version["note"] = self.version_note
return {
"python": {
"version": ".".join(map(str, sys.version_info[:3])), "full": sys.version
},
"datasette": datasette_version,
"sqlite": {
"version": sqlite_version,
"fts_versions": fts_versions,
"extensions": sqlite_extensions,
"compile_options": [
r[0] for r in conn.execute("pragma compile_options;").fetchall()
],
},
}
def plugins(self, show_all=False):
ps = list(get_plugins(pm))
if not show_all:
ps = [p for p in ps if p["name"] not in DEFAULT_PLUGINS]
return [
{
"name": p["name"],
"static": p["static_path"] is not None,
"templates": p["templates_path"] is not None,
"version": p.get("version"),
}
for p in ps
]
async def table_columns(self, db_name, table):
return await self.execute_against_connection_in_thread(
db_name, lambda conn: table_columns(conn, table)
)
async def foreign_keys_for_table(self, database, table):
return await self.execute_against_connection_in_thread(
database, lambda conn: get_outbound_foreign_keys(conn, table)
)
async def label_column_for_table(self, db_name, table):
explicit_label_column = (
self.table_metadata(
db_name, table
).get("label_column")
)
if explicit_label_column:
return explicit_label_column
# If a table has two columns, one of which is ID, then label_column is the other one
column_names = await self.table_columns(db_name, table)
if (column_names and len(column_names) == 2 and "id" in column_names):
return [c for c in column_names if c != "id"][0]
# Couldn't find a label:
return None
async def execute_against_connection_in_thread(self, db_name, fn):
def in_thread():
conn = getattr(connections, db_name, None)
if not conn:
db = self.databases[db_name]
if db.is_memory:
conn = sqlite3.connect(":memory:")
else:
# mode=ro or immutable=1?
if db.is_mutable:
qs = "mode=ro"
else:
qs = "immutable=1"
conn = sqlite3.connect(
"file:{}?{}".format(db.path, qs),
uri=True,
check_same_thread=False,
)
self.prepare_connection(conn)
setattr(connections, db_name, conn)
return fn(conn)
return await asyncio.get_event_loop().run_in_executor(
self.executor, in_thread
)
async def execute(
self,
db_name,
sql,
params=None,
truncate=False,
custom_time_limit=None,
page_size=None,
):
"""Executes sql against db_name in a thread"""
page_size = page_size or self.page_size
def sql_operation_in_thread(conn):
time_limit_ms = self.sql_time_limit_ms
if custom_time_limit and custom_time_limit < time_limit_ms:
time_limit_ms = custom_time_limit
with sqlite_timelimit(conn, time_limit_ms):
try:
cursor = conn.cursor()
cursor.execute(sql, params or {})
max_returned_rows = self.max_returned_rows
if max_returned_rows == page_size:
max_returned_rows += 1
if max_returned_rows and truncate:
rows = cursor.fetchmany(max_returned_rows + 1)
truncated = len(rows) > max_returned_rows
rows = rows[:max_returned_rows]
else:
rows = cursor.fetchall()
truncated = False
except sqlite3.OperationalError as e:
if e.args == ('interrupted',):
raise InterruptedError(e)
print(
"ERROR: conn={}, sql = {}, params = {}: {}".format(
conn, repr(sql), params, e
)
)
raise
if truncate:
return Results(rows, truncated, cursor.description)
else:
return Results(rows, False, cursor.description)
with trace("sql", (db_name, sql.strip(), params)):
results = await self.execute_against_connection_in_thread(
db_name, sql_operation_in_thread
)
return results
def app(self):
class TracingSanic(Sanic):
async def handle_request(self, request, write_callback, stream_callback):
if request.args.get("_trace"):
request["traces"] = []
with capture_traces(request["traces"]):
res = await super().handle_request(request, write_callback, stream_callback)
else:
res = await super().handle_request(request, write_callback, stream_callback)
return res
app = TracingSanic(__name__)
default_templates = str(app_root / "datasette" / "templates")
template_paths = []
if self.template_dir:
template_paths.append(self.template_dir)
template_paths.extend(
[
plugin["templates_path"]
for plugin in get_plugins(pm)
if plugin["templates_path"]
]
)
template_paths.append(default_templates)
template_loader = ChoiceLoader(
[
FileSystemLoader(template_paths),
# Support {% extends "default:table.html" %}:
PrefixLoader(
{"default": FileSystemLoader(default_templates)}, delimiter=":"
),
]
)
self.jinja_env = Environment(loader=template_loader, autoescape=True)
self.jinja_env.filters["escape_css_string"] = escape_css_string
self.jinja_env.filters["quote_plus"] = lambda u: urllib.parse.quote_plus(u)
self.jinja_env.filters["escape_sqlite"] = escape_sqlite
self.jinja_env.filters["to_css_class"] = to_css_class
# pylint: disable=no-member
pm.hook.prepare_jinja2_environment(env=self.jinja_env)
app.add_route(IndexView.as_view(self), r"/<as_format:(\.jsono?)?$>")
# TODO: /favicon.ico and /-/static/ deserve far-future cache expires
app.add_route(favicon, "/favicon.ico")
app.static("/-/static/", str(app_root / "datasette" / "static"))
for path, dirname in self.static_mounts:
app.static(path, dirname)
# Mount any plugin static/ directories
for plugin in get_plugins(pm):
if plugin["static_path"]:
modpath = "/-/static-plugins/{}/".format(plugin["name"])
app.static(modpath, plugin["static_path"])
app.add_route(
JsonDataView.as_view(self, "inspect.json", self.inspect),
r"/-/inspect<as_format:(\.json)?$>",
)
app.add_route(
JsonDataView.as_view(self, "metadata.json", lambda: self._metadata),
r"/-/metadata<as_format:(\.json)?$>",
)
app.add_route(
JsonDataView.as_view(self, "versions.json", self.versions),
r"/-/versions<as_format:(\.json)?$>",
)
app.add_route(
JsonDataView.as_view(self, "plugins.json", self.plugins),
r"/-/plugins<as_format:(\.json)?$>",
)
app.add_route(
JsonDataView.as_view(self, "config.json", lambda: self._config),
r"/-/config<as_format:(\.json)?$>",
)
app.add_route(
DatabaseDownload.as_view(self), r"/<db_name:[^/]+?><as_db:(\.db)$>"
)
app.add_route(
DatabaseView.as_view(self), r"/<db_name:[^/]+?><as_format:(\.jsono?|\.csv)?$>"
)
app.add_route(
TableView.as_view(self),
r"/<db_name:[^/]+>/<table_and_format:[^/]+?$>",
)
app.add_route(
RowView.as_view(self),
r"/<db_name:[^/]+>/<table:[^/]+?>/<pk_path:[^/]+?><as_format:(\.jsono?)?$>",
)
self.register_custom_units()
# On 404 with a trailing slash redirect to path without that slash:
# pylint: disable=unused-variable
@app.middleware("response")
def redirect_on_404_with_trailing_slash(request, original_response):
if original_response.status == 404 and request.path.endswith("/"):
path = request.path.rstrip("/")
if request.query_string:
path = "{}?{}".format(path, request.query_string)
return response.redirect(path)
@app.middleware("response")
async def add_traces_to_response(request, response):
if request.get("traces") is None:
return
traces = request["traces"]
if "text/html" in response.content_type and b'</body>' in response.body:
extra = json.dumps(traces, indent=2)
extra_html = "<pre>{}</pre></body>".format(extra).encode("utf8")
response.body = response.body.replace(b"</body>", extra_html)
elif "json" in response.content_type and response.body.startswith(b"{"):
data = json.loads(response.body.decode("utf8"))
if "_traces" not in data:
data["_traces"] = {
"num_traces": len(traces),
"traces": traces,
"duration_sum_ms": sum(t[-1] for t in traces),
}
response.body = json.dumps(data).encode("utf8")
@app.exception(Exception)
def on_exception(request, exception):
title = None
help = None
if isinstance(exception, NotFound):
status = 404
info = {}
message = exception.args[0]
elif isinstance(exception, InvalidUsage):
status = 405
info = {}
message = exception.args[0]
elif isinstance(exception, DatasetteError):
status = exception.status
info = exception.error_dict
message = exception.message
if exception.messagge_is_html:
message = Markup(message)
title = exception.title
else:
status = 500
info = {}
message = str(exception)
traceback.print_exc()
templates = ["500.html"]
if status != 500:
templates = ["{}.html".format(status)] + templates
info.update(
{"ok": False, "error": message, "status": status, "title": title}
)
if request is not None and request.path.split("?")[0].endswith(".json"):
return response.json(info, status=status)
else:
template = self.jinja_env.select_template(templates)
return response.html(template.render(info), status=status)
return app
|
simonw/datasette | datasette/app.py | Datasette.execute | python | async def execute(
self,
db_name,
sql,
params=None,
truncate=False,
custom_time_limit=None,
page_size=None,
):
page_size = page_size or self.page_size
def sql_operation_in_thread(conn):
time_limit_ms = self.sql_time_limit_ms
if custom_time_limit and custom_time_limit < time_limit_ms:
time_limit_ms = custom_time_limit
with sqlite_timelimit(conn, time_limit_ms):
try:
cursor = conn.cursor()
cursor.execute(sql, params or {})
max_returned_rows = self.max_returned_rows
if max_returned_rows == page_size:
max_returned_rows += 1
if max_returned_rows and truncate:
rows = cursor.fetchmany(max_returned_rows + 1)
truncated = len(rows) > max_returned_rows
rows = rows[:max_returned_rows]
else:
rows = cursor.fetchall()
truncated = False
except sqlite3.OperationalError as e:
if e.args == ('interrupted',):
raise InterruptedError(e)
print(
"ERROR: conn={}, sql = {}, params = {}: {}".format(
conn, repr(sql), params, e
)
)
raise
if truncate:
return Results(rows, truncated, cursor.description)
else:
return Results(rows, False, cursor.description)
with trace("sql", (db_name, sql.strip(), params)):
results = await self.execute_against_connection_in_thread(
db_name, sql_operation_in_thread
)
return results | Executes sql against db_name in a thread | train | https://github.com/simonw/datasette/blob/11b352b4d52fd02a422776edebb14f12e4994d3b/datasette/app.py#L580-L631 | [
"async def execute_against_connection_in_thread(self, db_name, fn):\n def in_thread():\n conn = getattr(connections, db_name, None)\n if not conn:\n db = self.databases[db_name]\n if db.is_memory:\n conn = sqlite3.connect(\":memory:\")\n else:\n ... | class Datasette:
def __init__(
self,
files,
immutables=None,
cache_headers=True,
cors=False,
inspect_data=None,
metadata=None,
sqlite_extensions=None,
template_dir=None,
plugins_dir=None,
static_mounts=None,
memory=False,
config=None,
version_note=None,
):
immutables = immutables or []
self.files = tuple(files) + tuple(immutables)
self.immutables = set(immutables)
if not self.files:
self.files = [MEMORY]
elif memory:
self.files = (MEMORY,) + self.files
self.databases = {}
for file in self.files:
path = file
is_memory = False
if file is MEMORY:
path = None
is_memory = True
db = ConnectedDatabase(path, is_mutable=path not in self.immutables, is_memory=is_memory)
if db.name in self.databases:
raise Exception("Multiple files with same stem: {}".format(db.name))
self.databases[db.name] = db
self.cache_headers = cache_headers
self.cors = cors
self._inspect = inspect_data
self._metadata = metadata or {}
self.sqlite_functions = []
self.sqlite_extensions = sqlite_extensions or []
self.template_dir = template_dir
self.plugins_dir = plugins_dir
self.static_mounts = static_mounts or []
self._config = dict(DEFAULT_CONFIG, **(config or {}))
self.version_note = version_note
self.executor = futures.ThreadPoolExecutor(
max_workers=self.config("num_sql_threads")
)
self.max_returned_rows = self.config("max_returned_rows")
self.sql_time_limit_ms = self.config("sql_time_limit_ms")
self.page_size = self.config("default_page_size")
# Execute plugins in constructor, to ensure they are available
# when the rest of `datasette inspect` executes
if self.plugins_dir:
for filename in os.listdir(self.plugins_dir):
filepath = os.path.join(self.plugins_dir, filename)
mod = module_from_path(filepath, name=filename)
try:
pm.register(mod)
except ValueError:
# Plugin already registered
pass
def config(self, key):
return self._config.get(key, None)
def config_dict(self):
# Returns a fully resolved config dictionary, useful for templates
return {
option.name: self.config(option.name)
for option in CONFIG_OPTIONS
}
def metadata(self, key=None, database=None, table=None, fallback=True):
"""
Looks up metadata, cascading backwards from specified level.
Returns None if metadata value is not found.
"""
assert not (database is None and table is not None), \
"Cannot call metadata() with table= specified but not database="
databases = self._metadata.get("databases") or {}
search_list = []
if database is not None:
search_list.append(databases.get(database) or {})
if table is not None:
table_metadata = (
(databases.get(database) or {}).get("tables") or {}
).get(table) or {}
search_list.insert(0, table_metadata)
search_list.append(self._metadata)
if not fallback:
# No fallback allowed, so just use the first one in the list
search_list = search_list[:1]
if key is not None:
for item in search_list:
if key in item:
return item[key]
return None
else:
# Return the merged list
m = {}
for item in search_list:
m.update(item)
return m
def plugin_config(
self, plugin_name, database=None, table=None, fallback=True
):
"Return config for plugin, falling back from specified database/table"
plugins = self.metadata(
"plugins", database=database, table=table, fallback=fallback
)
if plugins is None:
return None
return plugins.get(plugin_name)
def app_css_hash(self):
if not hasattr(self, "_app_css_hash"):
self._app_css_hash = hashlib.sha1(
open(
os.path.join(str(app_root), "datasette/static/app.css")
).read().encode(
"utf8"
)
).hexdigest()[
:6
]
return self._app_css_hash
def get_canned_queries(self, database_name):
queries = self.metadata(
"queries", database=database_name, fallback=False
) or {}
names = queries.keys()
return [
self.get_canned_query(database_name, name) for name in names
]
def get_canned_query(self, database_name, query_name):
queries = self.metadata(
"queries", database=database_name, fallback=False
) or {}
query = queries.get(query_name)
if query:
if not isinstance(query, dict):
query = {"sql": query}
query["name"] = query_name
return query
async def get_table_definition(self, database_name, table, type_="table"):
table_definition_rows = list(
await self.execute(
database_name,
'select sql from sqlite_master where name = :n and type=:t',
{"n": table, "t": type_},
)
)
if not table_definition_rows:
return None
return table_definition_rows[0][0]
def get_view_definition(self, database_name, view):
return self.get_table_definition(database_name, view, 'view')
def update_with_inherited_metadata(self, metadata):
# Fills in source/license with defaults, if available
metadata.update(
{
"source": metadata.get("source") or self.metadata("source"),
"source_url": metadata.get("source_url")
or self.metadata("source_url"),
"license": metadata.get("license") or self.metadata("license"),
"license_url": metadata.get("license_url")
or self.metadata("license_url"),
"about": metadata.get("about") or self.metadata("about"),
"about_url": metadata.get("about_url")
or self.metadata("about_url"),
}
)
def prepare_connection(self, conn):
conn.row_factory = sqlite3.Row
conn.text_factory = lambda x: str(x, "utf-8", "replace")
for name, num_args, func in self.sqlite_functions:
conn.create_function(name, num_args, func)
if self.sqlite_extensions:
conn.enable_load_extension(True)
for extension in self.sqlite_extensions:
conn.execute("SELECT load_extension('{}')".format(extension))
if self.config("cache_size_kb"):
conn.execute('PRAGMA cache_size=-{}'.format(self.config("cache_size_kb")))
# pylint: disable=no-member
pm.hook.prepare_connection(conn=conn)
async def table_exists(self, database, table):
results = await self.execute(
database,
"select 1 from sqlite_master where type='table' and name=?",
params=(table,)
)
return bool(results.rows)
async def expand_foreign_keys(self, database, table, column, values):
"Returns dict mapping (column, value) -> label"
labeled_fks = {}
foreign_keys = await self.foreign_keys_for_table(database, table)
# Find the foreign_key for this column
try:
fk = [
foreign_key for foreign_key in foreign_keys
if foreign_key["column"] == column
][0]
except IndexError:
return {}
label_column = await self.label_column_for_table(database, fk["other_table"])
if not label_column:
return {
(fk["column"], value): str(value)
for value in values
}
labeled_fks = {}
sql = '''
select {other_column}, {label_column}
from {other_table}
where {other_column} in ({placeholders})
'''.format(
other_column=escape_sqlite(fk["other_column"]),
label_column=escape_sqlite(label_column),
other_table=escape_sqlite(fk["other_table"]),
placeholders=", ".join(["?"] * len(set(values))),
)
try:
results = await self.execute(
database, sql, list(set(values))
)
except InterruptedError:
pass
else:
for id, value in results:
labeled_fks[(fk["column"], id)] = value
return labeled_fks
def absolute_url(self, request, path):
url = urllib.parse.urljoin(request.url, path)
if url.startswith("http://") and self.config("force_https_urls"):
url = "https://" + url[len("http://"):]
return url
def inspect(self):
" Inspect the database and return a dictionary of table metadata "
if self._inspect:
return self._inspect
self._inspect = {}
for filename in self.files:
if filename is MEMORY:
self._inspect[":memory:"] = {
"hash": "000",
"file": ":memory:",
"size": 0,
"views": {},
"tables": {},
}
else:
path = Path(filename)
name = path.stem
if name in self._inspect:
raise Exception("Multiple files with same stem %s" % name)
try:
with sqlite3.connect(
"file:{}?mode=ro".format(path), uri=True
) as conn:
self.prepare_connection(conn)
self._inspect[name] = {
"hash": inspect_hash(path),
"file": str(path),
"size": path.stat().st_size,
"views": inspect_views(conn),
"tables": inspect_tables(conn, (self.metadata("databases") or {}).get(name, {}))
}
except sqlite3.OperationalError as e:
if (e.args[0] == 'no such module: VirtualSpatialIndex'):
raise click.UsageError(
"It looks like you're trying to load a SpatiaLite"
" database without first loading the SpatiaLite module."
"\n\nRead more: https://datasette.readthedocs.io/en/latest/spatialite.html"
)
else:
raise
return self._inspect
def register_custom_units(self):
"Register any custom units defined in the metadata.json with Pint"
for unit in self.metadata("custom_units") or []:
ureg.define(unit)
def versions(self):
conn = sqlite3.connect(":memory:")
self.prepare_connection(conn)
sqlite_version = conn.execute("select sqlite_version()").fetchone()[0]
sqlite_extensions = {}
for extension, testsql, hasversion in (
("json1", "SELECT json('{}')", False),
("spatialite", "SELECT spatialite_version()", True),
):
try:
result = conn.execute(testsql)
if hasversion:
sqlite_extensions[extension] = result.fetchone()[0]
else:
sqlite_extensions[extension] = None
except Exception:
pass
# Figure out supported FTS versions
fts_versions = []
for fts in ("FTS5", "FTS4", "FTS3"):
try:
conn.execute(
"CREATE VIRTUAL TABLE v{fts} USING {fts} (data)".format(fts=fts)
)
fts_versions.append(fts)
except sqlite3.OperationalError:
continue
datasette_version = {"version": __version__}
if self.version_note:
datasette_version["note"] = self.version_note
return {
"python": {
"version": ".".join(map(str, sys.version_info[:3])), "full": sys.version
},
"datasette": datasette_version,
"sqlite": {
"version": sqlite_version,
"fts_versions": fts_versions,
"extensions": sqlite_extensions,
"compile_options": [
r[0] for r in conn.execute("pragma compile_options;").fetchall()
],
},
}
def plugins(self, show_all=False):
ps = list(get_plugins(pm))
if not show_all:
ps = [p for p in ps if p["name"] not in DEFAULT_PLUGINS]
return [
{
"name": p["name"],
"static": p["static_path"] is not None,
"templates": p["templates_path"] is not None,
"version": p.get("version"),
}
for p in ps
]
def table_metadata(self, database, table):
"Fetch table-specific metadata."
return (self.metadata("databases") or {}).get(database, {}).get(
"tables", {}
).get(
table, {}
)
async def table_columns(self, db_name, table):
return await self.execute_against_connection_in_thread(
db_name, lambda conn: table_columns(conn, table)
)
async def foreign_keys_for_table(self, database, table):
return await self.execute_against_connection_in_thread(
database, lambda conn: get_outbound_foreign_keys(conn, table)
)
async def label_column_for_table(self, db_name, table):
explicit_label_column = (
self.table_metadata(
db_name, table
).get("label_column")
)
if explicit_label_column:
return explicit_label_column
# If a table has two columns, one of which is ID, then label_column is the other one
column_names = await self.table_columns(db_name, table)
if (column_names and len(column_names) == 2 and "id" in column_names):
return [c for c in column_names if c != "id"][0]
# Couldn't find a label:
return None
async def execute_against_connection_in_thread(self, db_name, fn):
def in_thread():
conn = getattr(connections, db_name, None)
if not conn:
db = self.databases[db_name]
if db.is_memory:
conn = sqlite3.connect(":memory:")
else:
# mode=ro or immutable=1?
if db.is_mutable:
qs = "mode=ro"
else:
qs = "immutable=1"
conn = sqlite3.connect(
"file:{}?{}".format(db.path, qs),
uri=True,
check_same_thread=False,
)
self.prepare_connection(conn)
setattr(connections, db_name, conn)
return fn(conn)
return await asyncio.get_event_loop().run_in_executor(
self.executor, in_thread
)
def app(self):
class TracingSanic(Sanic):
async def handle_request(self, request, write_callback, stream_callback):
if request.args.get("_trace"):
request["traces"] = []
with capture_traces(request["traces"]):
res = await super().handle_request(request, write_callback, stream_callback)
else:
res = await super().handle_request(request, write_callback, stream_callback)
return res
app = TracingSanic(__name__)
default_templates = str(app_root / "datasette" / "templates")
template_paths = []
if self.template_dir:
template_paths.append(self.template_dir)
template_paths.extend(
[
plugin["templates_path"]
for plugin in get_plugins(pm)
if plugin["templates_path"]
]
)
template_paths.append(default_templates)
template_loader = ChoiceLoader(
[
FileSystemLoader(template_paths),
# Support {% extends "default:table.html" %}:
PrefixLoader(
{"default": FileSystemLoader(default_templates)}, delimiter=":"
),
]
)
self.jinja_env = Environment(loader=template_loader, autoescape=True)
self.jinja_env.filters["escape_css_string"] = escape_css_string
self.jinja_env.filters["quote_plus"] = lambda u: urllib.parse.quote_plus(u)
self.jinja_env.filters["escape_sqlite"] = escape_sqlite
self.jinja_env.filters["to_css_class"] = to_css_class
# pylint: disable=no-member
pm.hook.prepare_jinja2_environment(env=self.jinja_env)
app.add_route(IndexView.as_view(self), r"/<as_format:(\.jsono?)?$>")
# TODO: /favicon.ico and /-/static/ deserve far-future cache expires
app.add_route(favicon, "/favicon.ico")
app.static("/-/static/", str(app_root / "datasette" / "static"))
for path, dirname in self.static_mounts:
app.static(path, dirname)
# Mount any plugin static/ directories
for plugin in get_plugins(pm):
if plugin["static_path"]:
modpath = "/-/static-plugins/{}/".format(plugin["name"])
app.static(modpath, plugin["static_path"])
app.add_route(
JsonDataView.as_view(self, "inspect.json", self.inspect),
r"/-/inspect<as_format:(\.json)?$>",
)
app.add_route(
JsonDataView.as_view(self, "metadata.json", lambda: self._metadata),
r"/-/metadata<as_format:(\.json)?$>",
)
app.add_route(
JsonDataView.as_view(self, "versions.json", self.versions),
r"/-/versions<as_format:(\.json)?$>",
)
app.add_route(
JsonDataView.as_view(self, "plugins.json", self.plugins),
r"/-/plugins<as_format:(\.json)?$>",
)
app.add_route(
JsonDataView.as_view(self, "config.json", lambda: self._config),
r"/-/config<as_format:(\.json)?$>",
)
app.add_route(
DatabaseDownload.as_view(self), r"/<db_name:[^/]+?><as_db:(\.db)$>"
)
app.add_route(
DatabaseView.as_view(self), r"/<db_name:[^/]+?><as_format:(\.jsono?|\.csv)?$>"
)
app.add_route(
TableView.as_view(self),
r"/<db_name:[^/]+>/<table_and_format:[^/]+?$>",
)
app.add_route(
RowView.as_view(self),
r"/<db_name:[^/]+>/<table:[^/]+?>/<pk_path:[^/]+?><as_format:(\.jsono?)?$>",
)
self.register_custom_units()
# On 404 with a trailing slash redirect to path without that slash:
# pylint: disable=unused-variable
@app.middleware("response")
def redirect_on_404_with_trailing_slash(request, original_response):
if original_response.status == 404 and request.path.endswith("/"):
path = request.path.rstrip("/")
if request.query_string:
path = "{}?{}".format(path, request.query_string)
return response.redirect(path)
@app.middleware("response")
async def add_traces_to_response(request, response):
if request.get("traces") is None:
return
traces = request["traces"]
if "text/html" in response.content_type and b'</body>' in response.body:
extra = json.dumps(traces, indent=2)
extra_html = "<pre>{}</pre></body>".format(extra).encode("utf8")
response.body = response.body.replace(b"</body>", extra_html)
elif "json" in response.content_type and response.body.startswith(b"{"):
data = json.loads(response.body.decode("utf8"))
if "_traces" not in data:
data["_traces"] = {
"num_traces": len(traces),
"traces": traces,
"duration_sum_ms": sum(t[-1] for t in traces),
}
response.body = json.dumps(data).encode("utf8")
@app.exception(Exception)
def on_exception(request, exception):
title = None
help = None
if isinstance(exception, NotFound):
status = 404
info = {}
message = exception.args[0]
elif isinstance(exception, InvalidUsage):
status = 405
info = {}
message = exception.args[0]
elif isinstance(exception, DatasetteError):
status = exception.status
info = exception.error_dict
message = exception.message
if exception.messagge_is_html:
message = Markup(message)
title = exception.title
else:
status = 500
info = {}
message = str(exception)
traceback.print_exc()
templates = ["500.html"]
if status != 500:
templates = ["{}.html".format(status)] + templates
info.update(
{"ok": False, "error": message, "status": status, "title": title}
)
if request is not None and request.path.split("?")[0].endswith(".json"):
return response.json(info, status=status)
else:
template = self.jinja_env.select_template(templates)
return response.html(template.render(info), status=status)
return app
|
simonw/datasette | datasette/views/table.py | RowTableShared.display_columns_and_rows | python | async def display_columns_and_rows(
self,
database,
table,
description,
rows,
link_column=False,
truncate_cells=0,
):
"Returns columns, rows for specified table - including fancy foreign key treatment"
table_metadata = self.ds.table_metadata(database, table)
sortable_columns = await self.sortable_columns_for_table(database, table, True)
columns = [
{"name": r[0], "sortable": r[0] in sortable_columns} for r in description
]
pks = await self.ds.execute_against_connection_in_thread(
database, lambda conn: detect_primary_keys(conn, table)
)
column_to_foreign_key_table = {
fk["column"]: fk["other_table"]
for fk in await self.ds.foreign_keys_for_table(database, table)
}
cell_rows = []
for row in rows:
cells = []
# Unless we are a view, the first column is a link - either to the rowid
# or to the simple or compound primary key
if link_column:
cells.append(
{
"column": pks[0] if len(pks) == 1 else "Link",
"value": jinja2.Markup(
'<a href="/{database}/{table}/{flat_pks_quoted}">{flat_pks}</a>'.format(
database=database,
table=urllib.parse.quote_plus(table),
flat_pks=str(
jinja2.escape(
path_from_row_pks(row, pks, not pks, False)
)
),
flat_pks_quoted=path_from_row_pks(row, pks, not pks),
)
),
}
)
for value, column_dict in zip(row, columns):
column = column_dict["name"]
if link_column and len(pks) == 1 and column == pks[0]:
# If there's a simple primary key, don't repeat the value as it's
# already shown in the link column.
continue
# First let the plugins have a go
# pylint: disable=no-member
plugin_display_value = pm.hook.render_cell(
value=value,
column=column,
table=table,
database=database,
datasette=self.ds,
)
if plugin_display_value is not None:
display_value = plugin_display_value
elif isinstance(value, dict):
# It's an expanded foreign key - display link to other row
label = value["label"]
value = value["value"]
# The table we link to depends on the column
other_table = column_to_foreign_key_table[column]
link_template = (
LINK_WITH_LABEL if (label != value) else LINK_WITH_VALUE
)
display_value = jinja2.Markup(link_template.format(
database=database,
table=urllib.parse.quote_plus(other_table),
link_id=urllib.parse.quote_plus(str(value)),
id=str(jinja2.escape(value)),
label=str(jinja2.escape(label)),
))
elif value in ("", None):
display_value = jinja2.Markup(" ")
elif is_url(str(value).strip()):
display_value = jinja2.Markup(
'<a href="{url}">{url}</a>'.format(
url=jinja2.escape(value.strip())
)
)
elif column in table_metadata.get("units", {}) and value != "":
# Interpret units using pint
value = value * ureg(table_metadata["units"][column])
# Pint uses floating point which sometimes introduces errors in the compact
# representation, which we have to round off to avoid ugliness. In the vast
# majority of cases this rounding will be inconsequential. I hope.
value = round(value.to_compact(), 6)
display_value = jinja2.Markup(
"{:~P}".format(value).replace(" ", " ")
)
else:
display_value = str(value)
if truncate_cells and len(display_value) > truncate_cells:
display_value = display_value[:truncate_cells] + u"\u2026"
cells.append({"column": column, "value": display_value})
cell_rows.append(cells)
if link_column:
# Add the link column header.
# If it's a simple primary key, we have to remove and re-add that column name at
# the beginning of the header row.
if len(pks) == 1:
columns = [col for col in columns if col["name"] != pks[0]]
columns = [
{"name": pks[0] if len(pks) == 1 else "Link", "sortable": len(pks) == 1}
] + columns
return columns, cell_rows | Returns columns, rows for specified table - including fancy foreign key treatment | train | https://github.com/simonw/datasette/blob/11b352b4d52fd02a422776edebb14f12e4994d3b/datasette/views/table.py#L56-L173 | [
"def is_url(value):\n \"Must start with http:// or https:// and contain JUST a URL\"\n if not isinstance(value, str):\n return False\n if not value.startswith('http://') and not value.startswith('https://'):\n return False\n # Any whitespace at all is invalid\n if whitespace_re.search(v... | class RowTableShared(BaseView):
async def sortable_columns_for_table(self, database, table, use_rowid):
table_metadata = self.ds.table_metadata(database, table)
if "sortable_columns" in table_metadata:
sortable_columns = set(table_metadata["sortable_columns"])
else:
sortable_columns = set(await self.ds.table_columns(database, table))
if use_rowid:
sortable_columns.add("rowid")
return sortable_columns
async def expandable_columns(self, database, table):
# Returns list of (fk_dict, label_column-or-None) pairs for that table
expandables = []
for fk in await self.ds.foreign_keys_for_table(database, table):
label_column = await self.ds.label_column_for_table(database, fk["other_table"])
expandables.append((fk, label_column))
return expandables
|
simonw/datasette | datasette/inspect.py | inspect_hash | python | def inspect_hash(path):
" Calculate the hash of a database, efficiently. "
m = hashlib.sha256()
with path.open("rb") as fp:
while True:
data = fp.read(HASH_BLOCK_SIZE)
if not data:
break
m.update(data)
return m.hexdigest() | Calculate the hash of a database, efficiently. | train | https://github.com/simonw/datasette/blob/11b352b4d52fd02a422776edebb14f12e4994d3b/datasette/inspect.py#L17-L27 | null | import hashlib
from .utils import (
detect_spatialite,
detect_fts,
detect_primary_keys,
escape_sqlite,
get_all_foreign_keys,
table_columns,
sqlite3
)
HASH_BLOCK_SIZE = 1024 * 1024
def inspect_views(conn):
" List views in a database. "
return [v[0] for v in conn.execute('select name from sqlite_master where type = "view"')]
def inspect_tables(conn, database_metadata):
" List tables and their row counts, excluding uninteresting tables. "
tables = {}
table_names = [
r["name"]
for r in conn.execute(
'select * from sqlite_master where type="table"'
)
]
for table in table_names:
table_metadata = database_metadata.get("tables", {}).get(
table, {}
)
try:
count = conn.execute(
"select count(*) from {}".format(escape_sqlite(table))
).fetchone()[0]
except sqlite3.OperationalError:
# This can happen when running against a FTS virtual table
# e.g. "select count(*) from some_fts;"
count = 0
column_names = table_columns(conn, table)
tables[table] = {
"name": table,
"columns": column_names,
"primary_keys": detect_primary_keys(conn, table),
"count": count,
"hidden": table_metadata.get("hidden") or False,
"fts_table": detect_fts(conn, table),
}
foreign_keys = get_all_foreign_keys(conn)
for table, info in foreign_keys.items():
tables[table]["foreign_keys"] = info
# Mark tables 'hidden' if they relate to FTS virtual tables
hidden_tables = [
r["name"]
for r in conn.execute(
"""
select name from sqlite_master
where rootpage = 0
and sql like '%VIRTUAL TABLE%USING FTS%'
"""
)
]
if detect_spatialite(conn):
# Also hide Spatialite internal tables
hidden_tables += [
"ElementaryGeometries",
"SpatialIndex",
"geometry_columns",
"spatial_ref_sys",
"spatialite_history",
"sql_statements_log",
"sqlite_sequence",
"views_geometry_columns",
"virts_geometry_columns",
] + [
r["name"]
for r in conn.execute(
"""
select name from sqlite_master
where name like "idx_%"
and type = "table"
"""
)
]
for t in tables.keys():
for hidden_table in hidden_tables:
if t == hidden_table or t.startswith(hidden_table):
tables[t]["hidden"] = True
continue
return tables
|
simonw/datasette | datasette/inspect.py | inspect_tables | python | def inspect_tables(conn, database_metadata):
" List tables and their row counts, excluding uninteresting tables. "
tables = {}
table_names = [
r["name"]
for r in conn.execute(
'select * from sqlite_master where type="table"'
)
]
for table in table_names:
table_metadata = database_metadata.get("tables", {}).get(
table, {}
)
try:
count = conn.execute(
"select count(*) from {}".format(escape_sqlite(table))
).fetchone()[0]
except sqlite3.OperationalError:
# This can happen when running against a FTS virtual table
# e.g. "select count(*) from some_fts;"
count = 0
column_names = table_columns(conn, table)
tables[table] = {
"name": table,
"columns": column_names,
"primary_keys": detect_primary_keys(conn, table),
"count": count,
"hidden": table_metadata.get("hidden") or False,
"fts_table": detect_fts(conn, table),
}
foreign_keys = get_all_foreign_keys(conn)
for table, info in foreign_keys.items():
tables[table]["foreign_keys"] = info
# Mark tables 'hidden' if they relate to FTS virtual tables
hidden_tables = [
r["name"]
for r in conn.execute(
"""
select name from sqlite_master
where rootpage = 0
and sql like '%VIRTUAL TABLE%USING FTS%'
"""
)
]
if detect_spatialite(conn):
# Also hide Spatialite internal tables
hidden_tables += [
"ElementaryGeometries",
"SpatialIndex",
"geometry_columns",
"spatial_ref_sys",
"spatialite_history",
"sql_statements_log",
"sqlite_sequence",
"views_geometry_columns",
"virts_geometry_columns",
] + [
r["name"]
for r in conn.execute(
"""
select name from sqlite_master
where name like "idx_%"
and type = "table"
"""
)
]
for t in tables.keys():
for hidden_table in hidden_tables:
if t == hidden_table or t.startswith(hidden_table):
tables[t]["hidden"] = True
continue
return tables | List tables and their row counts, excluding uninteresting tables. | train | https://github.com/simonw/datasette/blob/11b352b4d52fd02a422776edebb14f12e4994d3b/datasette/inspect.py#L35-L115 | [
"def escape_sqlite(s):\n if _boring_keyword_re.match(s) and (s.lower() not in reserved_words):\n return s\n else:\n return '[{}]'.format(s)\n",
"def table_columns(conn, table):\n return [\n r[1]\n for r in conn.execute(\n \"PRAGMA table_info({});\".format(escape_sql... | import hashlib
from .utils import (
detect_spatialite,
detect_fts,
detect_primary_keys,
escape_sqlite,
get_all_foreign_keys,
table_columns,
sqlite3
)
HASH_BLOCK_SIZE = 1024 * 1024
def inspect_hash(path):
" Calculate the hash of a database, efficiently. "
m = hashlib.sha256()
with path.open("rb") as fp:
while True:
data = fp.read(HASH_BLOCK_SIZE)
if not data:
break
m.update(data)
return m.hexdigest()
def inspect_views(conn):
" List views in a database. "
return [v[0] for v in conn.execute('select name from sqlite_master where type = "view"')]
|
simonw/datasette | datasette/filters.py | Filters.lookups | python | def lookups(self):
"Yields (lookup, display, no_argument) pairs"
for filter in self._filters:
yield filter.key, filter.display, filter.no_argument | Yields (lookup, display, no_argument) pairs | train | https://github.com/simonw/datasette/blob/11b352b4d52fd02a422776edebb14f12e4994d3b/datasette/filters.py#L118-L121 | null | class Filters:
_filters = [
# key, display, sql_template, human_template, format=, numeric=, no_argument=
TemplatedFilter('exact', '=', '"{c}" = :{p}', lambda c, v: '{c} = {v}' if v.isdigit() else '{c} = "{v}"'),
TemplatedFilter('not', '!=', '"{c}" != :{p}', lambda c, v: '{c} != {v}' if v.isdigit() else '{c} != "{v}"'),
TemplatedFilter('contains', 'contains', '"{c}" like :{p}', '{c} contains "{v}"', format='%{}%'),
TemplatedFilter('endswith', 'ends with', '"{c}" like :{p}', '{c} ends with "{v}"', format='%{}'),
TemplatedFilter('startswith', 'starts with', '"{c}" like :{p}', '{c} starts with "{v}"', format='{}%'),
TemplatedFilter('gt', '>', '"{c}" > :{p}', '{c} > {v}', numeric=True),
TemplatedFilter('gte', '\u2265', '"{c}" >= :{p}', '{c} \u2265 {v}', numeric=True),
TemplatedFilter('lt', '<', '"{c}" < :{p}', '{c} < {v}', numeric=True),
TemplatedFilter('lte', '\u2264', '"{c}" <= :{p}', '{c} \u2264 {v}', numeric=True),
TemplatedFilter('like', 'like', '"{c}" like :{p}', '{c} like "{v}"'),
TemplatedFilter('glob', 'glob', '"{c}" glob :{p}', '{c} glob "{v}"'),
InFilter(),
] + ([TemplatedFilter('arraycontains', 'array contains', """rowid in (
select {t}.rowid from {t}, json_each({t}.{c}) j
where j.value = :{p}
)""", '{c} contains "{v}"')
] if detect_json1() else []) + [
TemplatedFilter('date', 'date', 'date({c}) = :{p}', '"{c}" is on date {v}'),
TemplatedFilter('isnull', 'is null', '"{c}" is null', '{c} is null', no_argument=True),
TemplatedFilter('notnull', 'is not null', '"{c}" is not null', '{c} is not null', no_argument=True),
TemplatedFilter('isblank', 'is blank', '("{c}" is null or "{c}" = "")', '{c} is blank', no_argument=True),
TemplatedFilter('notblank', 'is not blank', '("{c}" is not null and "{c}" != "")', '{c} is not blank', no_argument=True),
]
_filters_by_key = {
f.key: f for f in _filters
}
def __init__(self, pairs, units={}, ureg=None):
self.pairs = pairs
self.units = units
self.ureg = ureg
def human_description_en(self, extra=None):
bits = []
if extra:
bits.extend(extra)
for column, lookup, value in self.selections():
filter = self._filters_by_key.get(lookup, None)
if filter:
bits.append(filter.human_clause(column, value))
# Comma separated, with an ' and ' at the end
and_bits = []
commas, tail = bits[:-1], bits[-1:]
if commas:
and_bits.append(', '.join(commas))
if tail:
and_bits.append(tail[0])
s = ' and '.join(and_bits)
if not s:
return ''
return 'where {}'.format(s)
def selections(self):
"Yields (column, lookup, value) tuples"
for key, value in self.pairs:
if '__' in key:
column, lookup = key.rsplit('__', 1)
else:
column = key
lookup = 'exact'
yield column, lookup, value
def has_selections(self):
return bool(self.pairs)
def convert_unit(self, column, value):
"If the user has provided a unit in the query, convert it into the column unit, if present."
if column not in self.units:
return value
# Try to interpret the value as a unit
value = self.ureg(value)
if isinstance(value, numbers.Number):
# It's just a bare number, assume it's the column unit
return value
column_unit = self.ureg(self.units[column])
return value.to(column_unit).magnitude
def build_where_clauses(self, table):
sql_bits = []
params = {}
i = 0
for column, lookup, value in self.selections():
filter = self._filters_by_key.get(lookup, None)
if filter:
sql_bit, param = filter.where_clause(table, column, self.convert_unit(column, value), i)
sql_bits.append(sql_bit)
if param is not None:
if not isinstance(param, list):
param = [param]
for individual_param in param:
param_id = 'p{}'.format(i)
params[param_id] = individual_param
i += 1
return sql_bits, params
|
simonw/datasette | datasette/filters.py | Filters.selections | python | def selections(self):
"Yields (column, lookup, value) tuples"
for key, value in self.pairs:
if '__' in key:
column, lookup = key.rsplit('__', 1)
else:
column = key
lookup = 'exact'
yield column, lookup, value | Yields (column, lookup, value) tuples | train | https://github.com/simonw/datasette/blob/11b352b4d52fd02a422776edebb14f12e4994d3b/datasette/filters.py#L143-L151 | null | class Filters:
_filters = [
# key, display, sql_template, human_template, format=, numeric=, no_argument=
TemplatedFilter('exact', '=', '"{c}" = :{p}', lambda c, v: '{c} = {v}' if v.isdigit() else '{c} = "{v}"'),
TemplatedFilter('not', '!=', '"{c}" != :{p}', lambda c, v: '{c} != {v}' if v.isdigit() else '{c} != "{v}"'),
TemplatedFilter('contains', 'contains', '"{c}" like :{p}', '{c} contains "{v}"', format='%{}%'),
TemplatedFilter('endswith', 'ends with', '"{c}" like :{p}', '{c} ends with "{v}"', format='%{}'),
TemplatedFilter('startswith', 'starts with', '"{c}" like :{p}', '{c} starts with "{v}"', format='{}%'),
TemplatedFilter('gt', '>', '"{c}" > :{p}', '{c} > {v}', numeric=True),
TemplatedFilter('gte', '\u2265', '"{c}" >= :{p}', '{c} \u2265 {v}', numeric=True),
TemplatedFilter('lt', '<', '"{c}" < :{p}', '{c} < {v}', numeric=True),
TemplatedFilter('lte', '\u2264', '"{c}" <= :{p}', '{c} \u2264 {v}', numeric=True),
TemplatedFilter('like', 'like', '"{c}" like :{p}', '{c} like "{v}"'),
TemplatedFilter('glob', 'glob', '"{c}" glob :{p}', '{c} glob "{v}"'),
InFilter(),
] + ([TemplatedFilter('arraycontains', 'array contains', """rowid in (
select {t}.rowid from {t}, json_each({t}.{c}) j
where j.value = :{p}
)""", '{c} contains "{v}"')
] if detect_json1() else []) + [
TemplatedFilter('date', 'date', 'date({c}) = :{p}', '"{c}" is on date {v}'),
TemplatedFilter('isnull', 'is null', '"{c}" is null', '{c} is null', no_argument=True),
TemplatedFilter('notnull', 'is not null', '"{c}" is not null', '{c} is not null', no_argument=True),
TemplatedFilter('isblank', 'is blank', '("{c}" is null or "{c}" = "")', '{c} is blank', no_argument=True),
TemplatedFilter('notblank', 'is not blank', '("{c}" is not null and "{c}" != "")', '{c} is not blank', no_argument=True),
]
_filters_by_key = {
f.key: f for f in _filters
}
def __init__(self, pairs, units={}, ureg=None):
self.pairs = pairs
self.units = units
self.ureg = ureg
def lookups(self):
"Yields (lookup, display, no_argument) pairs"
for filter in self._filters:
yield filter.key, filter.display, filter.no_argument
def human_description_en(self, extra=None):
bits = []
if extra:
bits.extend(extra)
for column, lookup, value in self.selections():
filter = self._filters_by_key.get(lookup, None)
if filter:
bits.append(filter.human_clause(column, value))
# Comma separated, with an ' and ' at the end
and_bits = []
commas, tail = bits[:-1], bits[-1:]
if commas:
and_bits.append(', '.join(commas))
if tail:
and_bits.append(tail[0])
s = ' and '.join(and_bits)
if not s:
return ''
return 'where {}'.format(s)
def has_selections(self):
return bool(self.pairs)
def convert_unit(self, column, value):
"If the user has provided a unit in the query, convert it into the column unit, if present."
if column not in self.units:
return value
# Try to interpret the value as a unit
value = self.ureg(value)
if isinstance(value, numbers.Number):
# It's just a bare number, assume it's the column unit
return value
column_unit = self.ureg(self.units[column])
return value.to(column_unit).magnitude
def build_where_clauses(self, table):
sql_bits = []
params = {}
i = 0
for column, lookup, value in self.selections():
filter = self._filters_by_key.get(lookup, None)
if filter:
sql_bit, param = filter.where_clause(table, column, self.convert_unit(column, value), i)
sql_bits.append(sql_bit)
if param is not None:
if not isinstance(param, list):
param = [param]
for individual_param in param:
param_id = 'p{}'.format(i)
params[param_id] = individual_param
i += 1
return sql_bits, params
|
simonw/datasette | datasette/filters.py | Filters.convert_unit | python | def convert_unit(self, column, value):
"If the user has provided a unit in the query, convert it into the column unit, if present."
if column not in self.units:
return value
# Try to interpret the value as a unit
value = self.ureg(value)
if isinstance(value, numbers.Number):
# It's just a bare number, assume it's the column unit
return value
column_unit = self.ureg(self.units[column])
return value.to(column_unit).magnitude | If the user has provided a unit in the query, convert it into the column unit, if present. | train | https://github.com/simonw/datasette/blob/11b352b4d52fd02a422776edebb14f12e4994d3b/datasette/filters.py#L156-L168 | null | class Filters:
_filters = [
# key, display, sql_template, human_template, format=, numeric=, no_argument=
TemplatedFilter('exact', '=', '"{c}" = :{p}', lambda c, v: '{c} = {v}' if v.isdigit() else '{c} = "{v}"'),
TemplatedFilter('not', '!=', '"{c}" != :{p}', lambda c, v: '{c} != {v}' if v.isdigit() else '{c} != "{v}"'),
TemplatedFilter('contains', 'contains', '"{c}" like :{p}', '{c} contains "{v}"', format='%{}%'),
TemplatedFilter('endswith', 'ends with', '"{c}" like :{p}', '{c} ends with "{v}"', format='%{}'),
TemplatedFilter('startswith', 'starts with', '"{c}" like :{p}', '{c} starts with "{v}"', format='{}%'),
TemplatedFilter('gt', '>', '"{c}" > :{p}', '{c} > {v}', numeric=True),
TemplatedFilter('gte', '\u2265', '"{c}" >= :{p}', '{c} \u2265 {v}', numeric=True),
TemplatedFilter('lt', '<', '"{c}" < :{p}', '{c} < {v}', numeric=True),
TemplatedFilter('lte', '\u2264', '"{c}" <= :{p}', '{c} \u2264 {v}', numeric=True),
TemplatedFilter('like', 'like', '"{c}" like :{p}', '{c} like "{v}"'),
TemplatedFilter('glob', 'glob', '"{c}" glob :{p}', '{c} glob "{v}"'),
InFilter(),
] + ([TemplatedFilter('arraycontains', 'array contains', """rowid in (
select {t}.rowid from {t}, json_each({t}.{c}) j
where j.value = :{p}
)""", '{c} contains "{v}"')
] if detect_json1() else []) + [
TemplatedFilter('date', 'date', 'date({c}) = :{p}', '"{c}" is on date {v}'),
TemplatedFilter('isnull', 'is null', '"{c}" is null', '{c} is null', no_argument=True),
TemplatedFilter('notnull', 'is not null', '"{c}" is not null', '{c} is not null', no_argument=True),
TemplatedFilter('isblank', 'is blank', '("{c}" is null or "{c}" = "")', '{c} is blank', no_argument=True),
TemplatedFilter('notblank', 'is not blank', '("{c}" is not null and "{c}" != "")', '{c} is not blank', no_argument=True),
]
_filters_by_key = {
f.key: f for f in _filters
}
def __init__(self, pairs, units={}, ureg=None):
self.pairs = pairs
self.units = units
self.ureg = ureg
def lookups(self):
"Yields (lookup, display, no_argument) pairs"
for filter in self._filters:
yield filter.key, filter.display, filter.no_argument
def human_description_en(self, extra=None):
bits = []
if extra:
bits.extend(extra)
for column, lookup, value in self.selections():
filter = self._filters_by_key.get(lookup, None)
if filter:
bits.append(filter.human_clause(column, value))
# Comma separated, with an ' and ' at the end
and_bits = []
commas, tail = bits[:-1], bits[-1:]
if commas:
and_bits.append(', '.join(commas))
if tail:
and_bits.append(tail[0])
s = ' and '.join(and_bits)
if not s:
return ''
return 'where {}'.format(s)
def selections(self):
"Yields (column, lookup, value) tuples"
for key, value in self.pairs:
if '__' in key:
column, lookup = key.rsplit('__', 1)
else:
column = key
lookup = 'exact'
yield column, lookup, value
def has_selections(self):
return bool(self.pairs)
def build_where_clauses(self, table):
sql_bits = []
params = {}
i = 0
for column, lookup, value in self.selections():
filter = self._filters_by_key.get(lookup, None)
if filter:
sql_bit, param = filter.where_clause(table, column, self.convert_unit(column, value), i)
sql_bits.append(sql_bit)
if param is not None:
if not isinstance(param, list):
param = [param]
for individual_param in param:
param_id = 'p{}'.format(i)
params[param_id] = individual_param
i += 1
return sql_bits, params
|
simonw/datasette | datasette/cli.py | skeleton | python | def skeleton(files, metadata, sqlite_extensions):
"Generate a skeleton metadata.json file for specified SQLite databases"
if os.path.exists(metadata):
click.secho(
"File {} already exists, will not over-write".format(metadata),
bg="red",
fg="white",
bold=True,
err=True,
)
sys.exit(1)
app = Datasette(files, sqlite_extensions=sqlite_extensions)
databases = {}
for database_name, info in app.inspect().items():
databases[database_name] = {
"title": None,
"description": None,
"description_html": None,
"license": None,
"license_url": None,
"source": None,
"source_url": None,
"queries": {},
"tables": {
table_name: {
"title": None,
"description": None,
"description_html": None,
"license": None,
"license_url": None,
"source": None,
"source_url": None,
"units": {},
}
for table_name in (info.get("tables") or {})
},
}
open(metadata, "w").write(
json.dumps(
{
"title": None,
"description": None,
"description_html": None,
"license": None,
"license_url": None,
"source": None,
"source_url": None,
"databases": databases,
},
indent=4,
)
)
click.echo("Wrote skeleton to {}".format(metadata)) | Generate a skeleton metadata.json file for specified SQLite databases | train | https://github.com/simonw/datasette/blob/11b352b4d52fd02a422776edebb14f12e4994d3b/datasette/cli.py#L107-L159 | [
"def inspect(self):\n \" Inspect the database and return a dictionary of table metadata \"\n if self._inspect:\n return self._inspect\n\n self._inspect = {}\n for filename in self.files:\n if filename is MEMORY:\n self._inspect[\":memory:\"] = {\n \"hash\": \"000\... | import click
from click import formatting
from click_default_group import DefaultGroup
import json
import os
import shutil
from subprocess import call
import sys
from .app import Datasette, DEFAULT_CONFIG, CONFIG_OPTIONS, pm
from .utils import (
temporary_docker_directory,
value_as_boolean,
StaticMount,
ValueAsBooleanError,
)
class Config(click.ParamType):
name = "config"
def convert(self, config, param, ctx):
if ":" not in config:
self.fail(
'"{}" should be name:value'.format(config), param, ctx
)
return
name, value = config.split(":")
if name not in DEFAULT_CONFIG:
self.fail(
"{} is not a valid option (--help-config to see all)".format(
name
), param, ctx
)
return
# Type checking
default = DEFAULT_CONFIG[name]
if isinstance(default, bool):
try:
return name, value_as_boolean(value)
except ValueAsBooleanError:
self.fail(
'"{}" should be on/off/true/false/1/0'.format(name), param, ctx
)
return
elif isinstance(default, int):
if not value.isdigit():
self.fail(
'"{}" should be an integer'.format(name), param, ctx
)
return
return name, int(value)
else:
# Should never happen:
self.fail('Invalid option')
@click.group(cls=DefaultGroup, default="serve", default_if_no_args=True)
@click.version_option()
def cli():
"""
Datasette!
"""
@cli.command()
@click.argument("files", type=click.Path(exists=True), nargs=-1)
@click.option("--inspect-file", default="inspect-data.json")
@click.option(
"sqlite_extensions",
"--load-extension",
envvar="SQLITE_EXTENSIONS",
multiple=True,
type=click.Path(exists=True, resolve_path=True),
help="Path to a SQLite extension to load",
)
def inspect(files, inspect_file, sqlite_extensions):
app = Datasette(files, sqlite_extensions=sqlite_extensions)
open(inspect_file, "w").write(json.dumps(app.inspect(), indent=2))
@cli.group()
def publish():
"Publish specified SQLite database files to the internet along with a Datasette-powered interface and API"
pass
# Register publish plugins
pm.hook.publish_subcommand(publish=publish)
@cli.command()
@click.argument("files", type=click.Path(exists=True), nargs=-1, required=True)
@click.option(
"-m",
"--metadata",
default="metadata.json",
help="Name of metadata file to generate",
)
@click.option(
"sqlite_extensions",
"--load-extension",
envvar="SQLITE_EXTENSIONS",
multiple=True,
type=click.Path(exists=True, resolve_path=True),
help="Path to a SQLite extension to load",
)
@cli.command()
@click.option("--all", help="Include built-in default plugins", is_flag=True)
@click.option(
"--plugins-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help="Path to directory containing custom plugins",
)
def plugins(all, plugins_dir):
"List currently available plugins"
app = Datasette([], plugins_dir=plugins_dir)
click.echo(json.dumps(app.plugins(all), indent=4))
@cli.command()
@click.argument("files", type=click.Path(exists=True), nargs=-1, required=True)
@click.option(
"-t",
"--tag",
help="Name for the resulting Docker container, can optionally use name:tag format",
)
@click.option(
"-m",
"--metadata",
type=click.File(mode="r"),
help="Path to JSON file containing metadata to publish",
)
@click.option("--extra-options", help="Extra options to pass to datasette serve")
@click.option("--branch", help="Install datasette from a GitHub branch e.g. master")
@click.option(
"--template-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help="Path to directory containing custom templates",
)
@click.option(
"--plugins-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help="Path to directory containing custom plugins",
)
@click.option(
"--static",
type=StaticMount(),
help="mountpoint:path-to-directory for serving static files",
multiple=True,
)
@click.option(
"--install",
help="Additional packages (e.g. plugins) to install",
multiple=True,
)
@click.option(
"--spatialite", is_flag=True, help="Enable SpatialLite extension"
)
@click.option("--version-note", help="Additional note to show on /-/versions")
@click.option("--title", help="Title for metadata")
@click.option("--license", help="License label for metadata")
@click.option("--license_url", help="License URL for metadata")
@click.option("--source", help="Source label for metadata")
@click.option("--source_url", help="Source URL for metadata")
@click.option("--about", help="About label for metadata")
@click.option("--about_url", help="About URL for metadata")
def package(
files,
tag,
metadata,
extra_options,
branch,
template_dir,
plugins_dir,
static,
install,
spatialite,
version_note,
**extra_metadata
):
"Package specified SQLite files into a new datasette Docker container"
if not shutil.which("docker"):
click.secho(
' The package command requires "docker" to be installed and configured ',
bg="red",
fg="white",
bold=True,
err=True,
)
sys.exit(1)
with temporary_docker_directory(
files,
"datasette",
metadata,
extra_options,
branch,
template_dir,
plugins_dir,
static,
install,
spatialite,
version_note,
extra_metadata,
):
args = ["docker", "build"]
if tag:
args.append("-t")
args.append(tag)
args.append(".")
call(args)
@cli.command()
@click.argument("files", type=click.Path(exists=True), nargs=-1)
@click.option(
"-i",
"--immutable",
type=click.Path(exists=True),
help="Database files to open in immutable mode",
multiple=True,
)
@click.option(
"-h", "--host", default="127.0.0.1", help="host for server, defaults to 127.0.0.1"
)
@click.option("-p", "--port", default=8001, help="port for server, defaults to 8001")
@click.option(
"--debug", is_flag=True, help="Enable debug mode - useful for development"
)
@click.option(
"--reload",
is_flag=True,
help="Automatically reload if database or code change detected - useful for development",
)
@click.option(
"--cors", is_flag=True, help="Enable CORS by serving Access-Control-Allow-Origin: *"
)
@click.option(
"sqlite_extensions",
"--load-extension",
envvar="SQLITE_EXTENSIONS",
multiple=True,
type=click.Path(exists=True, resolve_path=True),
help="Path to a SQLite extension to load",
)
@click.option(
"--inspect-file", help='Path to JSON file created using "datasette inspect"'
)
@click.option(
"-m",
"--metadata",
type=click.File(mode="r"),
help="Path to JSON file containing license/source metadata",
)
@click.option(
"--template-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help="Path to directory containing custom templates",
)
@click.option(
"--plugins-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help="Path to directory containing custom plugins",
)
@click.option(
"--static",
type=StaticMount(),
help="mountpoint:path-to-directory for serving static files",
multiple=True,
)
@click.option(
"--memory", is_flag=True, help="Make :memory: database available"
)
@click.option(
"--config",
type=Config(),
help="Set config option using configname:value datasette.readthedocs.io/en/latest/config.html",
multiple=True,
)
@click.option("--version-note", help="Additional note to show on /-/versions")
@click.option(
"--help-config",
is_flag=True,
help="Show available config options",
)
def serve(
files,
immutable,
host,
port,
debug,
reload,
cors,
sqlite_extensions,
inspect_file,
metadata,
template_dir,
plugins_dir,
static,
memory,
config,
version_note,
help_config,
):
"""Serve up specified SQLite database files with a web UI"""
if help_config:
formatter = formatting.HelpFormatter()
with formatter.section("Config options"):
formatter.write_dl([
(option.name, '{} (default={})'.format(
option.help, option.default
))
for option in CONFIG_OPTIONS
])
click.echo(formatter.getvalue())
sys.exit(0)
if reload:
import hupper
reloader = hupper.start_reloader("datasette.cli.serve")
reloader.watch_files(files)
if metadata:
reloader.watch_files([metadata.name])
inspect_data = None
if inspect_file:
inspect_data = json.load(open(inspect_file))
metadata_data = None
if metadata:
metadata_data = json.loads(metadata.read())
click.echo("Serve! files={} (immutables={}) on port {}".format(files, immutable, port))
ds = Datasette(
files,
immutables=immutable,
cache_headers=not debug and not reload,
cors=cors,
inspect_data=inspect_data,
metadata=metadata_data,
sqlite_extensions=sqlite_extensions,
template_dir=template_dir,
plugins_dir=plugins_dir,
static_mounts=static,
config=dict(config),
memory=memory,
version_note=version_note,
)
# Force initial hashing/table counting
ds.inspect()
ds.app().run(host=host, port=port, debug=debug)
|
simonw/datasette | datasette/cli.py | plugins | python | def plugins(all, plugins_dir):
"List currently available plugins"
app = Datasette([], plugins_dir=plugins_dir)
click.echo(json.dumps(app.plugins(all), indent=4)) | List currently available plugins | train | https://github.com/simonw/datasette/blob/11b352b4d52fd02a422776edebb14f12e4994d3b/datasette/cli.py#L169-L172 | [
"def plugins(self, show_all=False):\n ps = list(get_plugins(pm))\n if not show_all:\n ps = [p for p in ps if p[\"name\"] not in DEFAULT_PLUGINS]\n return [\n {\n \"name\": p[\"name\"],\n \"static\": p[\"static_path\"] is not None,\n \"templates\": p[\"template... | import click
from click import formatting
from click_default_group import DefaultGroup
import json
import os
import shutil
from subprocess import call
import sys
from .app import Datasette, DEFAULT_CONFIG, CONFIG_OPTIONS, pm
from .utils import (
temporary_docker_directory,
value_as_boolean,
StaticMount,
ValueAsBooleanError,
)
class Config(click.ParamType):
name = "config"
def convert(self, config, param, ctx):
if ":" not in config:
self.fail(
'"{}" should be name:value'.format(config), param, ctx
)
return
name, value = config.split(":")
if name not in DEFAULT_CONFIG:
self.fail(
"{} is not a valid option (--help-config to see all)".format(
name
), param, ctx
)
return
# Type checking
default = DEFAULT_CONFIG[name]
if isinstance(default, bool):
try:
return name, value_as_boolean(value)
except ValueAsBooleanError:
self.fail(
'"{}" should be on/off/true/false/1/0'.format(name), param, ctx
)
return
elif isinstance(default, int):
if not value.isdigit():
self.fail(
'"{}" should be an integer'.format(name), param, ctx
)
return
return name, int(value)
else:
# Should never happen:
self.fail('Invalid option')
@click.group(cls=DefaultGroup, default="serve", default_if_no_args=True)
@click.version_option()
def cli():
"""
Datasette!
"""
@cli.command()
@click.argument("files", type=click.Path(exists=True), nargs=-1)
@click.option("--inspect-file", default="inspect-data.json")
@click.option(
"sqlite_extensions",
"--load-extension",
envvar="SQLITE_EXTENSIONS",
multiple=True,
type=click.Path(exists=True, resolve_path=True),
help="Path to a SQLite extension to load",
)
def inspect(files, inspect_file, sqlite_extensions):
app = Datasette(files, sqlite_extensions=sqlite_extensions)
open(inspect_file, "w").write(json.dumps(app.inspect(), indent=2))
@cli.group()
def publish():
"Publish specified SQLite database files to the internet along with a Datasette-powered interface and API"
pass
# Register publish plugins
pm.hook.publish_subcommand(publish=publish)
@cli.command()
@click.argument("files", type=click.Path(exists=True), nargs=-1, required=True)
@click.option(
"-m",
"--metadata",
default="metadata.json",
help="Name of metadata file to generate",
)
@click.option(
"sqlite_extensions",
"--load-extension",
envvar="SQLITE_EXTENSIONS",
multiple=True,
type=click.Path(exists=True, resolve_path=True),
help="Path to a SQLite extension to load",
)
def skeleton(files, metadata, sqlite_extensions):
"Generate a skeleton metadata.json file for specified SQLite databases"
if os.path.exists(metadata):
click.secho(
"File {} already exists, will not over-write".format(metadata),
bg="red",
fg="white",
bold=True,
err=True,
)
sys.exit(1)
app = Datasette(files, sqlite_extensions=sqlite_extensions)
databases = {}
for database_name, info in app.inspect().items():
databases[database_name] = {
"title": None,
"description": None,
"description_html": None,
"license": None,
"license_url": None,
"source": None,
"source_url": None,
"queries": {},
"tables": {
table_name: {
"title": None,
"description": None,
"description_html": None,
"license": None,
"license_url": None,
"source": None,
"source_url": None,
"units": {},
}
for table_name in (info.get("tables") or {})
},
}
open(metadata, "w").write(
json.dumps(
{
"title": None,
"description": None,
"description_html": None,
"license": None,
"license_url": None,
"source": None,
"source_url": None,
"databases": databases,
},
indent=4,
)
)
click.echo("Wrote skeleton to {}".format(metadata))
@cli.command()
@click.option("--all", help="Include built-in default plugins", is_flag=True)
@click.option(
"--plugins-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help="Path to directory containing custom plugins",
)
@cli.command()
@click.argument("files", type=click.Path(exists=True), nargs=-1, required=True)
@click.option(
"-t",
"--tag",
help="Name for the resulting Docker container, can optionally use name:tag format",
)
@click.option(
"-m",
"--metadata",
type=click.File(mode="r"),
help="Path to JSON file containing metadata to publish",
)
@click.option("--extra-options", help="Extra options to pass to datasette serve")
@click.option("--branch", help="Install datasette from a GitHub branch e.g. master")
@click.option(
"--template-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help="Path to directory containing custom templates",
)
@click.option(
"--plugins-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help="Path to directory containing custom plugins",
)
@click.option(
"--static",
type=StaticMount(),
help="mountpoint:path-to-directory for serving static files",
multiple=True,
)
@click.option(
"--install",
help="Additional packages (e.g. plugins) to install",
multiple=True,
)
@click.option(
"--spatialite", is_flag=True, help="Enable SpatialLite extension"
)
@click.option("--version-note", help="Additional note to show on /-/versions")
@click.option("--title", help="Title for metadata")
@click.option("--license", help="License label for metadata")
@click.option("--license_url", help="License URL for metadata")
@click.option("--source", help="Source label for metadata")
@click.option("--source_url", help="Source URL for metadata")
@click.option("--about", help="About label for metadata")
@click.option("--about_url", help="About URL for metadata")
def package(
files,
tag,
metadata,
extra_options,
branch,
template_dir,
plugins_dir,
static,
install,
spatialite,
version_note,
**extra_metadata
):
"Package specified SQLite files into a new datasette Docker container"
if not shutil.which("docker"):
click.secho(
' The package command requires "docker" to be installed and configured ',
bg="red",
fg="white",
bold=True,
err=True,
)
sys.exit(1)
with temporary_docker_directory(
files,
"datasette",
metadata,
extra_options,
branch,
template_dir,
plugins_dir,
static,
install,
spatialite,
version_note,
extra_metadata,
):
args = ["docker", "build"]
if tag:
args.append("-t")
args.append(tag)
args.append(".")
call(args)
@cli.command()
@click.argument("files", type=click.Path(exists=True), nargs=-1)
@click.option(
"-i",
"--immutable",
type=click.Path(exists=True),
help="Database files to open in immutable mode",
multiple=True,
)
@click.option(
"-h", "--host", default="127.0.0.1", help="host for server, defaults to 127.0.0.1"
)
@click.option("-p", "--port", default=8001, help="port for server, defaults to 8001")
@click.option(
"--debug", is_flag=True, help="Enable debug mode - useful for development"
)
@click.option(
"--reload",
is_flag=True,
help="Automatically reload if database or code change detected - useful for development",
)
@click.option(
"--cors", is_flag=True, help="Enable CORS by serving Access-Control-Allow-Origin: *"
)
@click.option(
"sqlite_extensions",
"--load-extension",
envvar="SQLITE_EXTENSIONS",
multiple=True,
type=click.Path(exists=True, resolve_path=True),
help="Path to a SQLite extension to load",
)
@click.option(
"--inspect-file", help='Path to JSON file created using "datasette inspect"'
)
@click.option(
"-m",
"--metadata",
type=click.File(mode="r"),
help="Path to JSON file containing license/source metadata",
)
@click.option(
"--template-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help="Path to directory containing custom templates",
)
@click.option(
"--plugins-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help="Path to directory containing custom plugins",
)
@click.option(
"--static",
type=StaticMount(),
help="mountpoint:path-to-directory for serving static files",
multiple=True,
)
@click.option(
"--memory", is_flag=True, help="Make :memory: database available"
)
@click.option(
"--config",
type=Config(),
help="Set config option using configname:value datasette.readthedocs.io/en/latest/config.html",
multiple=True,
)
@click.option("--version-note", help="Additional note to show on /-/versions")
@click.option(
"--help-config",
is_flag=True,
help="Show available config options",
)
def serve(
files,
immutable,
host,
port,
debug,
reload,
cors,
sqlite_extensions,
inspect_file,
metadata,
template_dir,
plugins_dir,
static,
memory,
config,
version_note,
help_config,
):
"""Serve up specified SQLite database files with a web UI"""
if help_config:
formatter = formatting.HelpFormatter()
with formatter.section("Config options"):
formatter.write_dl([
(option.name, '{} (default={})'.format(
option.help, option.default
))
for option in CONFIG_OPTIONS
])
click.echo(formatter.getvalue())
sys.exit(0)
if reload:
import hupper
reloader = hupper.start_reloader("datasette.cli.serve")
reloader.watch_files(files)
if metadata:
reloader.watch_files([metadata.name])
inspect_data = None
if inspect_file:
inspect_data = json.load(open(inspect_file))
metadata_data = None
if metadata:
metadata_data = json.loads(metadata.read())
click.echo("Serve! files={} (immutables={}) on port {}".format(files, immutable, port))
ds = Datasette(
files,
immutables=immutable,
cache_headers=not debug and not reload,
cors=cors,
inspect_data=inspect_data,
metadata=metadata_data,
sqlite_extensions=sqlite_extensions,
template_dir=template_dir,
plugins_dir=plugins_dir,
static_mounts=static,
config=dict(config),
memory=memory,
version_note=version_note,
)
# Force initial hashing/table counting
ds.inspect()
ds.app().run(host=host, port=port, debug=debug)
|
simonw/datasette | datasette/cli.py | package | python | def package(
files,
tag,
metadata,
extra_options,
branch,
template_dir,
plugins_dir,
static,
install,
spatialite,
version_note,
**extra_metadata
):
"Package specified SQLite files into a new datasette Docker container"
if not shutil.which("docker"):
click.secho(
' The package command requires "docker" to be installed and configured ',
bg="red",
fg="white",
bold=True,
err=True,
)
sys.exit(1)
with temporary_docker_directory(
files,
"datasette",
metadata,
extra_options,
branch,
template_dir,
plugins_dir,
static,
install,
spatialite,
version_note,
extra_metadata,
):
args = ["docker", "build"]
if tag:
args.append("-t")
args.append(tag)
args.append(".")
call(args) | Package specified SQLite files into a new datasette Docker container | train | https://github.com/simonw/datasette/blob/11b352b4d52fd02a422776edebb14f12e4994d3b/datasette/cli.py#L222-L265 | null | import click
from click import formatting
from click_default_group import DefaultGroup
import json
import os
import shutil
from subprocess import call
import sys
from .app import Datasette, DEFAULT_CONFIG, CONFIG_OPTIONS, pm
from .utils import (
temporary_docker_directory,
value_as_boolean,
StaticMount,
ValueAsBooleanError,
)
class Config(click.ParamType):
name = "config"
def convert(self, config, param, ctx):
if ":" not in config:
self.fail(
'"{}" should be name:value'.format(config), param, ctx
)
return
name, value = config.split(":")
if name not in DEFAULT_CONFIG:
self.fail(
"{} is not a valid option (--help-config to see all)".format(
name
), param, ctx
)
return
# Type checking
default = DEFAULT_CONFIG[name]
if isinstance(default, bool):
try:
return name, value_as_boolean(value)
except ValueAsBooleanError:
self.fail(
'"{}" should be on/off/true/false/1/0'.format(name), param, ctx
)
return
elif isinstance(default, int):
if not value.isdigit():
self.fail(
'"{}" should be an integer'.format(name), param, ctx
)
return
return name, int(value)
else:
# Should never happen:
self.fail('Invalid option')
@click.group(cls=DefaultGroup, default="serve", default_if_no_args=True)
@click.version_option()
def cli():
"""
Datasette!
"""
@cli.command()
@click.argument("files", type=click.Path(exists=True), nargs=-1)
@click.option("--inspect-file", default="inspect-data.json")
@click.option(
"sqlite_extensions",
"--load-extension",
envvar="SQLITE_EXTENSIONS",
multiple=True,
type=click.Path(exists=True, resolve_path=True),
help="Path to a SQLite extension to load",
)
def inspect(files, inspect_file, sqlite_extensions):
app = Datasette(files, sqlite_extensions=sqlite_extensions)
open(inspect_file, "w").write(json.dumps(app.inspect(), indent=2))
@cli.group()
def publish():
"Publish specified SQLite database files to the internet along with a Datasette-powered interface and API"
pass
# Register publish plugins
pm.hook.publish_subcommand(publish=publish)
@cli.command()
@click.argument("files", type=click.Path(exists=True), nargs=-1, required=True)
@click.option(
"-m",
"--metadata",
default="metadata.json",
help="Name of metadata file to generate",
)
@click.option(
"sqlite_extensions",
"--load-extension",
envvar="SQLITE_EXTENSIONS",
multiple=True,
type=click.Path(exists=True, resolve_path=True),
help="Path to a SQLite extension to load",
)
def skeleton(files, metadata, sqlite_extensions):
"Generate a skeleton metadata.json file for specified SQLite databases"
if os.path.exists(metadata):
click.secho(
"File {} already exists, will not over-write".format(metadata),
bg="red",
fg="white",
bold=True,
err=True,
)
sys.exit(1)
app = Datasette(files, sqlite_extensions=sqlite_extensions)
databases = {}
for database_name, info in app.inspect().items():
databases[database_name] = {
"title": None,
"description": None,
"description_html": None,
"license": None,
"license_url": None,
"source": None,
"source_url": None,
"queries": {},
"tables": {
table_name: {
"title": None,
"description": None,
"description_html": None,
"license": None,
"license_url": None,
"source": None,
"source_url": None,
"units": {},
}
for table_name in (info.get("tables") or {})
},
}
open(metadata, "w").write(
json.dumps(
{
"title": None,
"description": None,
"description_html": None,
"license": None,
"license_url": None,
"source": None,
"source_url": None,
"databases": databases,
},
indent=4,
)
)
click.echo("Wrote skeleton to {}".format(metadata))
@cli.command()
@click.option("--all", help="Include built-in default plugins", is_flag=True)
@click.option(
"--plugins-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help="Path to directory containing custom plugins",
)
def plugins(all, plugins_dir):
"List currently available plugins"
app = Datasette([], plugins_dir=plugins_dir)
click.echo(json.dumps(app.plugins(all), indent=4))
@cli.command()
@click.argument("files", type=click.Path(exists=True), nargs=-1, required=True)
@click.option(
"-t",
"--tag",
help="Name for the resulting Docker container, can optionally use name:tag format",
)
@click.option(
"-m",
"--metadata",
type=click.File(mode="r"),
help="Path to JSON file containing metadata to publish",
)
@click.option("--extra-options", help="Extra options to pass to datasette serve")
@click.option("--branch", help="Install datasette from a GitHub branch e.g. master")
@click.option(
"--template-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help="Path to directory containing custom templates",
)
@click.option(
"--plugins-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help="Path to directory containing custom plugins",
)
@click.option(
"--static",
type=StaticMount(),
help="mountpoint:path-to-directory for serving static files",
multiple=True,
)
@click.option(
"--install",
help="Additional packages (e.g. plugins) to install",
multiple=True,
)
@click.option(
"--spatialite", is_flag=True, help="Enable SpatialLite extension"
)
@click.option("--version-note", help="Additional note to show on /-/versions")
@click.option("--title", help="Title for metadata")
@click.option("--license", help="License label for metadata")
@click.option("--license_url", help="License URL for metadata")
@click.option("--source", help="Source label for metadata")
@click.option("--source_url", help="Source URL for metadata")
@click.option("--about", help="About label for metadata")
@click.option("--about_url", help="About URL for metadata")
@cli.command()
@click.argument("files", type=click.Path(exists=True), nargs=-1)
@click.option(
"-i",
"--immutable",
type=click.Path(exists=True),
help="Database files to open in immutable mode",
multiple=True,
)
@click.option(
"-h", "--host", default="127.0.0.1", help="host for server, defaults to 127.0.0.1"
)
@click.option("-p", "--port", default=8001, help="port for server, defaults to 8001")
@click.option(
"--debug", is_flag=True, help="Enable debug mode - useful for development"
)
@click.option(
"--reload",
is_flag=True,
help="Automatically reload if database or code change detected - useful for development",
)
@click.option(
"--cors", is_flag=True, help="Enable CORS by serving Access-Control-Allow-Origin: *"
)
@click.option(
"sqlite_extensions",
"--load-extension",
envvar="SQLITE_EXTENSIONS",
multiple=True,
type=click.Path(exists=True, resolve_path=True),
help="Path to a SQLite extension to load",
)
@click.option(
"--inspect-file", help='Path to JSON file created using "datasette inspect"'
)
@click.option(
"-m",
"--metadata",
type=click.File(mode="r"),
help="Path to JSON file containing license/source metadata",
)
@click.option(
"--template-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help="Path to directory containing custom templates",
)
@click.option(
"--plugins-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help="Path to directory containing custom plugins",
)
@click.option(
"--static",
type=StaticMount(),
help="mountpoint:path-to-directory for serving static files",
multiple=True,
)
@click.option(
"--memory", is_flag=True, help="Make :memory: database available"
)
@click.option(
"--config",
type=Config(),
help="Set config option using configname:value datasette.readthedocs.io/en/latest/config.html",
multiple=True,
)
@click.option("--version-note", help="Additional note to show on /-/versions")
@click.option(
"--help-config",
is_flag=True,
help="Show available config options",
)
def serve(
files,
immutable,
host,
port,
debug,
reload,
cors,
sqlite_extensions,
inspect_file,
metadata,
template_dir,
plugins_dir,
static,
memory,
config,
version_note,
help_config,
):
"""Serve up specified SQLite database files with a web UI"""
if help_config:
formatter = formatting.HelpFormatter()
with formatter.section("Config options"):
formatter.write_dl([
(option.name, '{} (default={})'.format(
option.help, option.default
))
for option in CONFIG_OPTIONS
])
click.echo(formatter.getvalue())
sys.exit(0)
if reload:
import hupper
reloader = hupper.start_reloader("datasette.cli.serve")
reloader.watch_files(files)
if metadata:
reloader.watch_files([metadata.name])
inspect_data = None
if inspect_file:
inspect_data = json.load(open(inspect_file))
metadata_data = None
if metadata:
metadata_data = json.loads(metadata.read())
click.echo("Serve! files={} (immutables={}) on port {}".format(files, immutable, port))
ds = Datasette(
files,
immutables=immutable,
cache_headers=not debug and not reload,
cors=cors,
inspect_data=inspect_data,
metadata=metadata_data,
sqlite_extensions=sqlite_extensions,
template_dir=template_dir,
plugins_dir=plugins_dir,
static_mounts=static,
config=dict(config),
memory=memory,
version_note=version_note,
)
# Force initial hashing/table counting
ds.inspect()
ds.app().run(host=host, port=port, debug=debug)
|
simonw/datasette | datasette/cli.py | serve | python | def serve(
files,
immutable,
host,
port,
debug,
reload,
cors,
sqlite_extensions,
inspect_file,
metadata,
template_dir,
plugins_dir,
static,
memory,
config,
version_note,
help_config,
):
if help_config:
formatter = formatting.HelpFormatter()
with formatter.section("Config options"):
formatter.write_dl([
(option.name, '{} (default={})'.format(
option.help, option.default
))
for option in CONFIG_OPTIONS
])
click.echo(formatter.getvalue())
sys.exit(0)
if reload:
import hupper
reloader = hupper.start_reloader("datasette.cli.serve")
reloader.watch_files(files)
if metadata:
reloader.watch_files([metadata.name])
inspect_data = None
if inspect_file:
inspect_data = json.load(open(inspect_file))
metadata_data = None
if metadata:
metadata_data = json.loads(metadata.read())
click.echo("Serve! files={} (immutables={}) on port {}".format(files, immutable, port))
ds = Datasette(
files,
immutables=immutable,
cache_headers=not debug and not reload,
cors=cors,
inspect_data=inspect_data,
metadata=metadata_data,
sqlite_extensions=sqlite_extensions,
template_dir=template_dir,
plugins_dir=plugins_dir,
static_mounts=static,
config=dict(config),
memory=memory,
version_note=version_note,
)
# Force initial hashing/table counting
ds.inspect()
ds.app().run(host=host, port=port, debug=debug) | Serve up specified SQLite database files with a web UI | train | https://github.com/simonw/datasette/blob/11b352b4d52fd02a422776edebb14f12e4994d3b/datasette/cli.py#L340-L405 | [
"def inspect(self):\n \" Inspect the database and return a dictionary of table metadata \"\n if self._inspect:\n return self._inspect\n\n self._inspect = {}\n for filename in self.files:\n if filename is MEMORY:\n self._inspect[\":memory:\"] = {\n \"hash\": \"000\... | import click
from click import formatting
from click_default_group import DefaultGroup
import json
import os
import shutil
from subprocess import call
import sys
from .app import Datasette, DEFAULT_CONFIG, CONFIG_OPTIONS, pm
from .utils import (
temporary_docker_directory,
value_as_boolean,
StaticMount,
ValueAsBooleanError,
)
class Config(click.ParamType):
name = "config"
def convert(self, config, param, ctx):
if ":" not in config:
self.fail(
'"{}" should be name:value'.format(config), param, ctx
)
return
name, value = config.split(":")
if name not in DEFAULT_CONFIG:
self.fail(
"{} is not a valid option (--help-config to see all)".format(
name
), param, ctx
)
return
# Type checking
default = DEFAULT_CONFIG[name]
if isinstance(default, bool):
try:
return name, value_as_boolean(value)
except ValueAsBooleanError:
self.fail(
'"{}" should be on/off/true/false/1/0'.format(name), param, ctx
)
return
elif isinstance(default, int):
if not value.isdigit():
self.fail(
'"{}" should be an integer'.format(name), param, ctx
)
return
return name, int(value)
else:
# Should never happen:
self.fail('Invalid option')
@click.group(cls=DefaultGroup, default="serve", default_if_no_args=True)
@click.version_option()
def cli():
"""
Datasette!
"""
@cli.command()
@click.argument("files", type=click.Path(exists=True), nargs=-1)
@click.option("--inspect-file", default="inspect-data.json")
@click.option(
"sqlite_extensions",
"--load-extension",
envvar="SQLITE_EXTENSIONS",
multiple=True,
type=click.Path(exists=True, resolve_path=True),
help="Path to a SQLite extension to load",
)
def inspect(files, inspect_file, sqlite_extensions):
app = Datasette(files, sqlite_extensions=sqlite_extensions)
open(inspect_file, "w").write(json.dumps(app.inspect(), indent=2))
@cli.group()
def publish():
"Publish specified SQLite database files to the internet along with a Datasette-powered interface and API"
pass
# Register publish plugins
pm.hook.publish_subcommand(publish=publish)
@cli.command()
@click.argument("files", type=click.Path(exists=True), nargs=-1, required=True)
@click.option(
"-m",
"--metadata",
default="metadata.json",
help="Name of metadata file to generate",
)
@click.option(
"sqlite_extensions",
"--load-extension",
envvar="SQLITE_EXTENSIONS",
multiple=True,
type=click.Path(exists=True, resolve_path=True),
help="Path to a SQLite extension to load",
)
def skeleton(files, metadata, sqlite_extensions):
"Generate a skeleton metadata.json file for specified SQLite databases"
if os.path.exists(metadata):
click.secho(
"File {} already exists, will not over-write".format(metadata),
bg="red",
fg="white",
bold=True,
err=True,
)
sys.exit(1)
app = Datasette(files, sqlite_extensions=sqlite_extensions)
databases = {}
for database_name, info in app.inspect().items():
databases[database_name] = {
"title": None,
"description": None,
"description_html": None,
"license": None,
"license_url": None,
"source": None,
"source_url": None,
"queries": {},
"tables": {
table_name: {
"title": None,
"description": None,
"description_html": None,
"license": None,
"license_url": None,
"source": None,
"source_url": None,
"units": {},
}
for table_name in (info.get("tables") or {})
},
}
open(metadata, "w").write(
json.dumps(
{
"title": None,
"description": None,
"description_html": None,
"license": None,
"license_url": None,
"source": None,
"source_url": None,
"databases": databases,
},
indent=4,
)
)
click.echo("Wrote skeleton to {}".format(metadata))
@cli.command()
@click.option("--all", help="Include built-in default plugins", is_flag=True)
@click.option(
"--plugins-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help="Path to directory containing custom plugins",
)
def plugins(all, plugins_dir):
"List currently available plugins"
app = Datasette([], plugins_dir=plugins_dir)
click.echo(json.dumps(app.plugins(all), indent=4))
@cli.command()
@click.argument("files", type=click.Path(exists=True), nargs=-1, required=True)
@click.option(
"-t",
"--tag",
help="Name for the resulting Docker container, can optionally use name:tag format",
)
@click.option(
"-m",
"--metadata",
type=click.File(mode="r"),
help="Path to JSON file containing metadata to publish",
)
@click.option("--extra-options", help="Extra options to pass to datasette serve")
@click.option("--branch", help="Install datasette from a GitHub branch e.g. master")
@click.option(
"--template-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help="Path to directory containing custom templates",
)
@click.option(
"--plugins-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help="Path to directory containing custom plugins",
)
@click.option(
"--static",
type=StaticMount(),
help="mountpoint:path-to-directory for serving static files",
multiple=True,
)
@click.option(
"--install",
help="Additional packages (e.g. plugins) to install",
multiple=True,
)
@click.option(
"--spatialite", is_flag=True, help="Enable SpatialLite extension"
)
@click.option("--version-note", help="Additional note to show on /-/versions")
@click.option("--title", help="Title for metadata")
@click.option("--license", help="License label for metadata")
@click.option("--license_url", help="License URL for metadata")
@click.option("--source", help="Source label for metadata")
@click.option("--source_url", help="Source URL for metadata")
@click.option("--about", help="About label for metadata")
@click.option("--about_url", help="About URL for metadata")
def package(
files,
tag,
metadata,
extra_options,
branch,
template_dir,
plugins_dir,
static,
install,
spatialite,
version_note,
**extra_metadata
):
"Package specified SQLite files into a new datasette Docker container"
if not shutil.which("docker"):
click.secho(
' The package command requires "docker" to be installed and configured ',
bg="red",
fg="white",
bold=True,
err=True,
)
sys.exit(1)
with temporary_docker_directory(
files,
"datasette",
metadata,
extra_options,
branch,
template_dir,
plugins_dir,
static,
install,
spatialite,
version_note,
extra_metadata,
):
args = ["docker", "build"]
if tag:
args.append("-t")
args.append(tag)
args.append(".")
call(args)
@cli.command()
@click.argument("files", type=click.Path(exists=True), nargs=-1)
@click.option(
"-i",
"--immutable",
type=click.Path(exists=True),
help="Database files to open in immutable mode",
multiple=True,
)
@click.option(
"-h", "--host", default="127.0.0.1", help="host for server, defaults to 127.0.0.1"
)
@click.option("-p", "--port", default=8001, help="port for server, defaults to 8001")
@click.option(
"--debug", is_flag=True, help="Enable debug mode - useful for development"
)
@click.option(
"--reload",
is_flag=True,
help="Automatically reload if database or code change detected - useful for development",
)
@click.option(
"--cors", is_flag=True, help="Enable CORS by serving Access-Control-Allow-Origin: *"
)
@click.option(
"sqlite_extensions",
"--load-extension",
envvar="SQLITE_EXTENSIONS",
multiple=True,
type=click.Path(exists=True, resolve_path=True),
help="Path to a SQLite extension to load",
)
@click.option(
"--inspect-file", help='Path to JSON file created using "datasette inspect"'
)
@click.option(
"-m",
"--metadata",
type=click.File(mode="r"),
help="Path to JSON file containing license/source metadata",
)
@click.option(
"--template-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help="Path to directory containing custom templates",
)
@click.option(
"--plugins-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help="Path to directory containing custom plugins",
)
@click.option(
"--static",
type=StaticMount(),
help="mountpoint:path-to-directory for serving static files",
multiple=True,
)
@click.option(
"--memory", is_flag=True, help="Make :memory: database available"
)
@click.option(
"--config",
type=Config(),
help="Set config option using configname:value datasette.readthedocs.io/en/latest/config.html",
multiple=True,
)
@click.option("--version-note", help="Additional note to show on /-/versions")
@click.option(
"--help-config",
is_flag=True,
help="Show available config options",
)
|
joerick/pyinstrument | pyinstrument/__main__.py | file_supports_color | python | def file_supports_color(file_obj):
plat = sys.platform
supported_platform = plat != 'Pocket PC' and (plat != 'win32' or
'ANSICON' in os.environ)
is_a_tty = file_is_a_tty(file_obj)
return (supported_platform and is_a_tty) | Returns True if the running system's terminal supports color.
Borrowed from Django
https://github.com/django/django/blob/master/django/core/management/color.py | train | https://github.com/joerick/pyinstrument/blob/cc4f3f6fc1b493d7cd058ecf41ad012e0030a512/pyinstrument/__main__.py#L198-L211 | [
"def file_is_a_tty(file_obj):\n return hasattr(file_obj, 'isatty') and file_obj.isatty()\n"
] | import sys, os, codecs, runpy, tempfile, glob, time, fnmatch, optparse
import pyinstrument
from pyinstrument import Profiler, renderers
from pyinstrument.session import ProfilerSession
from pyinstrument.util import object_with_import_path
from pyinstrument.vendor.six import exec_, PY2
def main():
usage = ("usage: pyinstrument [options] scriptfile [arg] ...")
version_string = 'pyinstrument {v}, on Python {pyv[0]}.{pyv[1]}.{pyv[2]}'.format(
v=pyinstrument.__version__,
pyv=sys.version_info,
)
parser = optparse.OptionParser(usage=usage, version=version_string)
parser.allow_interspersed_args = False
def dash_m_callback(option, opt, value, parser):
parser.values.module_name = value
# everything after the -m argument should be passed to that module
parser.values.module_args = parser.rargs + parser.largs
parser.rargs[:] = []
parser.largs[:] = []
parser.add_option('', '--load-prev',
dest='load_prev', action='store', metavar='ID',
help="Instead of running a script, load a previous report")
parser.add_option('-m', '',
dest='module_name', action='callback', callback=dash_m_callback,
type="str",
help="run library module as a script, like 'python -m module'")
parser.add_option('-o', '--outfile',
dest="outfile", action='store',
help="save to <outfile>", default=None)
parser.add_option('-r', '--renderer',
dest='renderer', action='store', type='string',
help=("how the report should be rendered. One of: 'text', 'html', 'json', or python "
"import path to a renderer class"),
default='text')
parser.add_option('', '--html',
dest="output_html", action='store_true',
help=optparse.SUPPRESS_HELP, default=False) # deprecated shortcut for --renderer=html
parser.add_option('-t', '--timeline',
dest='timeline', action='store_true',
help="render as a timeline - preserve ordering and don't condense repeated calls")
parser.add_option('', '--hide',
dest='hide_fnmatch', action='store', metavar='EXPR',
help=("glob-style pattern matching the file paths whose frames to hide. Defaults to "
"'*{sep}lib{sep}*'.").format(sep=os.sep),
default='*{sep}lib{sep}*'.format(sep=os.sep))
parser.add_option('', '--hide-regex',
dest='hide_regex', action='store', metavar='REGEX',
help=("regex matching the file paths whose frames to hide. Useful if --hide doesn't give "
"enough control."))
parser.add_option('', '--show',
dest='show_fnmatch', action='store', metavar='EXPR',
help=("glob-style pattern matching the file paths whose frames to "
"show, regardless of --hide or --hide-regex. For example, use "
"--show '*/<library>/*' to show frames within a library that "
"would otherwise be hidden."))
parser.add_option('', '--show-regex',
dest='show_regex', action='store', metavar='REGEX',
help=("regex matching the file paths whose frames to always show. "
"Useful if --show doesn't give enough control."))
parser.add_option('', '--show-all',
dest='show_all', action='store_true',
help="show everything", default=False)
parser.add_option('', '--unicode',
dest='unicode', action='store_true',
help='(text renderer only) force unicode text output')
parser.add_option('', '--no-unicode',
dest='unicode', action='store_false',
help='(text renderer only) force ascii text output')
parser.add_option('', '--color',
dest='color', action='store_true',
help='(text renderer only) force ansi color text output')
parser.add_option('', '--no-color',
dest='color', action='store_false',
help='(text renderer only) force no color text output')
if not sys.argv[1:]:
parser.print_help()
sys.exit(2)
options, args = parser.parse_args()
if args == [] and options.module_name is None and options.load_prev is None:
parser.print_help()
sys.exit(2)
if not options.hide_regex:
options.hide_regex = fnmatch.translate(options.hide_fnmatch)
if not options.show_regex and options.show_fnmatch:
options.show_regex = fnmatch.translate(options.show_fnmatch)
if options.show_all:
options.show_regex = r'.*'
if options.load_prev:
session = load_report(options.load_prev)
else:
if options.module_name is not None:
sys.argv[:] = [options.module_name] + options.module_args
code = "run_module(modname, run_name='__main__')"
globs = {
'run_module': runpy.run_module,
'modname': options.module_name
}
else:
sys.argv[:] = args
progname = args[0]
sys.path.insert(0, os.path.dirname(progname))
with open(progname, 'rb') as fp:
code = compile(fp.read(), progname, 'exec')
globs = {
'__file__': progname,
'__name__': '__main__',
'__package__': None,
}
profiler = Profiler()
profiler.start()
try:
exec_(code, globs, None)
except (SystemExit, KeyboardInterrupt):
pass
profiler.stop()
session = profiler.last_session
if options.output_html:
options.renderer = 'html'
output_to_temp_file = (options.renderer == 'html'
and not options.outfile
and file_is_a_tty(sys.stdout))
if options.outfile:
f = codecs.open(options.outfile, 'w', 'utf-8')
should_close_f_after_writing = True
elif not output_to_temp_file:
if PY2:
f = codecs.getwriter('utf-8')(sys.stdout)
else:
f = sys.stdout
should_close_f_after_writing = False
renderer_kwargs = {'processor_options': {
'hide_regex': options.hide_regex,
'show_regex': options.show_regex,
}}
if options.timeline is not None:
renderer_kwargs['timeline'] = options.timeline
if options.renderer == 'text':
unicode_override = options.unicode != None
color_override = options.color != None
unicode = options.unicode if unicode_override else file_supports_unicode(f)
color = options.color if color_override else file_supports_color(f)
renderer_kwargs.update({'unicode': unicode, 'color': color})
renderer_class = get_renderer_class(options.renderer)
renderer = renderer_class(**renderer_kwargs)
# remove this frame from the trace
renderer.processors.append(remove_first_pyinstrument_frame_processor)
if output_to_temp_file:
output_filename = renderer.open_in_browser(session)
print('stdout is a terminal, so saved profile output to %s' % output_filename)
else:
f.write(renderer.render(session))
if should_close_f_after_writing:
f.close()
if options.renderer == 'text':
_, report_identifier = save_report(session)
print('To view this report with different options, run:')
print(' pyinstrument --load-prev %s [options]' % report_identifier)
print('')
def file_supports_unicode(file_obj):
encoding = getattr(file_obj, 'encoding', None)
if not encoding:
return False
codec_info = codecs.lookup(encoding)
return ('utf' in codec_info.name)
def file_is_a_tty(file_obj):
return hasattr(file_obj, 'isatty') and file_obj.isatty()
def get_renderer_class(renderer):
if renderer == 'text':
return renderers.ConsoleRenderer
elif renderer == 'html':
return renderers.HTMLRenderer
elif renderer == 'json':
return renderers.JSONRenderer
else:
return object_with_import_path(renderer)
def report_dir():
report_dir = os.path.join(tempfile.gettempdir(), 'pyinstrument')
if not os.path.exists(report_dir):
os.mkdir(report_dir)
return report_dir
def load_report(identifier=None):
'''
Returns the session referred to by identifier
'''
path = os.path.join(
report_dir(),
identifier + '.pyireport'
)
return ProfilerSession.load(path)
def save_report(session):
'''
Saves the session to a temp file, and returns that path.
Also prunes the number of reports to 10 so there aren't loads building up.
'''
# prune this folder to contain the last 10 sessions
previous_reports = glob.glob(os.path.join(report_dir(), '*.pyireport'))
previous_reports.sort(reverse=True)
while len(previous_reports) > 10:
report_file = previous_reports.pop()
os.remove(report_file)
identifier = time.strftime('%Y-%m-%dT%H-%M-%S', time.localtime(session.start_time))
path = os.path.join(
report_dir(),
identifier + '.pyireport'
)
session.save(path)
return path, identifier
# pylint: disable=W0613
def remove_first_pyinstrument_frame_processor(frame, options):
'''
The first frame when using the command line is always the __main__ function. I want to remove
that from the output.
'''
if frame is None:
return None
if 'pyinstrument' in frame.file_path and len(frame.children) == 1:
frame = frame.children[0]
frame.remove_from_parent()
return frame
return frame
if __name__ == '__main__':
main()
|
joerick/pyinstrument | pyinstrument/__main__.py | load_report | python | def load_report(identifier=None):
'''
Returns the session referred to by identifier
'''
path = os.path.join(
report_dir(),
identifier + '.pyireport'
)
return ProfilerSession.load(path) | Returns the session referred to by identifier | train | https://github.com/joerick/pyinstrument/blob/cc4f3f6fc1b493d7cd058ecf41ad012e0030a512/pyinstrument/__main__.py#L245-L253 | [
"def report_dir():\n report_dir = os.path.join(tempfile.gettempdir(), 'pyinstrument')\n if not os.path.exists(report_dir):\n os.mkdir(report_dir)\n return report_dir\n",
"def load(filename):\n with io.open(filename, 'rb' if PY2 else 'r') as f:\n return ProfilerSession.from_json(json.load... | import sys, os, codecs, runpy, tempfile, glob, time, fnmatch, optparse
import pyinstrument
from pyinstrument import Profiler, renderers
from pyinstrument.session import ProfilerSession
from pyinstrument.util import object_with_import_path
from pyinstrument.vendor.six import exec_, PY2
def main():
usage = ("usage: pyinstrument [options] scriptfile [arg] ...")
version_string = 'pyinstrument {v}, on Python {pyv[0]}.{pyv[1]}.{pyv[2]}'.format(
v=pyinstrument.__version__,
pyv=sys.version_info,
)
parser = optparse.OptionParser(usage=usage, version=version_string)
parser.allow_interspersed_args = False
def dash_m_callback(option, opt, value, parser):
parser.values.module_name = value
# everything after the -m argument should be passed to that module
parser.values.module_args = parser.rargs + parser.largs
parser.rargs[:] = []
parser.largs[:] = []
parser.add_option('', '--load-prev',
dest='load_prev', action='store', metavar='ID',
help="Instead of running a script, load a previous report")
parser.add_option('-m', '',
dest='module_name', action='callback', callback=dash_m_callback,
type="str",
help="run library module as a script, like 'python -m module'")
parser.add_option('-o', '--outfile',
dest="outfile", action='store',
help="save to <outfile>", default=None)
parser.add_option('-r', '--renderer',
dest='renderer', action='store', type='string',
help=("how the report should be rendered. One of: 'text', 'html', 'json', or python "
"import path to a renderer class"),
default='text')
parser.add_option('', '--html',
dest="output_html", action='store_true',
help=optparse.SUPPRESS_HELP, default=False) # deprecated shortcut for --renderer=html
parser.add_option('-t', '--timeline',
dest='timeline', action='store_true',
help="render as a timeline - preserve ordering and don't condense repeated calls")
parser.add_option('', '--hide',
dest='hide_fnmatch', action='store', metavar='EXPR',
help=("glob-style pattern matching the file paths whose frames to hide. Defaults to "
"'*{sep}lib{sep}*'.").format(sep=os.sep),
default='*{sep}lib{sep}*'.format(sep=os.sep))
parser.add_option('', '--hide-regex',
dest='hide_regex', action='store', metavar='REGEX',
help=("regex matching the file paths whose frames to hide. Useful if --hide doesn't give "
"enough control."))
parser.add_option('', '--show',
dest='show_fnmatch', action='store', metavar='EXPR',
help=("glob-style pattern matching the file paths whose frames to "
"show, regardless of --hide or --hide-regex. For example, use "
"--show '*/<library>/*' to show frames within a library that "
"would otherwise be hidden."))
parser.add_option('', '--show-regex',
dest='show_regex', action='store', metavar='REGEX',
help=("regex matching the file paths whose frames to always show. "
"Useful if --show doesn't give enough control."))
parser.add_option('', '--show-all',
dest='show_all', action='store_true',
help="show everything", default=False)
parser.add_option('', '--unicode',
dest='unicode', action='store_true',
help='(text renderer only) force unicode text output')
parser.add_option('', '--no-unicode',
dest='unicode', action='store_false',
help='(text renderer only) force ascii text output')
parser.add_option('', '--color',
dest='color', action='store_true',
help='(text renderer only) force ansi color text output')
parser.add_option('', '--no-color',
dest='color', action='store_false',
help='(text renderer only) force no color text output')
if not sys.argv[1:]:
parser.print_help()
sys.exit(2)
options, args = parser.parse_args()
if args == [] and options.module_name is None and options.load_prev is None:
parser.print_help()
sys.exit(2)
if not options.hide_regex:
options.hide_regex = fnmatch.translate(options.hide_fnmatch)
if not options.show_regex and options.show_fnmatch:
options.show_regex = fnmatch.translate(options.show_fnmatch)
if options.show_all:
options.show_regex = r'.*'
if options.load_prev:
session = load_report(options.load_prev)
else:
if options.module_name is not None:
sys.argv[:] = [options.module_name] + options.module_args
code = "run_module(modname, run_name='__main__')"
globs = {
'run_module': runpy.run_module,
'modname': options.module_name
}
else:
sys.argv[:] = args
progname = args[0]
sys.path.insert(0, os.path.dirname(progname))
with open(progname, 'rb') as fp:
code = compile(fp.read(), progname, 'exec')
globs = {
'__file__': progname,
'__name__': '__main__',
'__package__': None,
}
profiler = Profiler()
profiler.start()
try:
exec_(code, globs, None)
except (SystemExit, KeyboardInterrupt):
pass
profiler.stop()
session = profiler.last_session
if options.output_html:
options.renderer = 'html'
output_to_temp_file = (options.renderer == 'html'
and not options.outfile
and file_is_a_tty(sys.stdout))
if options.outfile:
f = codecs.open(options.outfile, 'w', 'utf-8')
should_close_f_after_writing = True
elif not output_to_temp_file:
if PY2:
f = codecs.getwriter('utf-8')(sys.stdout)
else:
f = sys.stdout
should_close_f_after_writing = False
renderer_kwargs = {'processor_options': {
'hide_regex': options.hide_regex,
'show_regex': options.show_regex,
}}
if options.timeline is not None:
renderer_kwargs['timeline'] = options.timeline
if options.renderer == 'text':
unicode_override = options.unicode != None
color_override = options.color != None
unicode = options.unicode if unicode_override else file_supports_unicode(f)
color = options.color if color_override else file_supports_color(f)
renderer_kwargs.update({'unicode': unicode, 'color': color})
renderer_class = get_renderer_class(options.renderer)
renderer = renderer_class(**renderer_kwargs)
# remove this frame from the trace
renderer.processors.append(remove_first_pyinstrument_frame_processor)
if output_to_temp_file:
output_filename = renderer.open_in_browser(session)
print('stdout is a terminal, so saved profile output to %s' % output_filename)
else:
f.write(renderer.render(session))
if should_close_f_after_writing:
f.close()
if options.renderer == 'text':
_, report_identifier = save_report(session)
print('To view this report with different options, run:')
print(' pyinstrument --load-prev %s [options]' % report_identifier)
print('')
def file_supports_color(file_obj):
"""
Returns True if the running system's terminal supports color.
Borrowed from Django
https://github.com/django/django/blob/master/django/core/management/color.py
"""
plat = sys.platform
supported_platform = plat != 'Pocket PC' and (plat != 'win32' or
'ANSICON' in os.environ)
is_a_tty = file_is_a_tty(file_obj)
return (supported_platform and is_a_tty)
def file_supports_unicode(file_obj):
encoding = getattr(file_obj, 'encoding', None)
if not encoding:
return False
codec_info = codecs.lookup(encoding)
return ('utf' in codec_info.name)
def file_is_a_tty(file_obj):
return hasattr(file_obj, 'isatty') and file_obj.isatty()
def get_renderer_class(renderer):
if renderer == 'text':
return renderers.ConsoleRenderer
elif renderer == 'html':
return renderers.HTMLRenderer
elif renderer == 'json':
return renderers.JSONRenderer
else:
return object_with_import_path(renderer)
def report_dir():
report_dir = os.path.join(tempfile.gettempdir(), 'pyinstrument')
if not os.path.exists(report_dir):
os.mkdir(report_dir)
return report_dir
def save_report(session):
'''
Saves the session to a temp file, and returns that path.
Also prunes the number of reports to 10 so there aren't loads building up.
'''
# prune this folder to contain the last 10 sessions
previous_reports = glob.glob(os.path.join(report_dir(), '*.pyireport'))
previous_reports.sort(reverse=True)
while len(previous_reports) > 10:
report_file = previous_reports.pop()
os.remove(report_file)
identifier = time.strftime('%Y-%m-%dT%H-%M-%S', time.localtime(session.start_time))
path = os.path.join(
report_dir(),
identifier + '.pyireport'
)
session.save(path)
return path, identifier
# pylint: disable=W0613
def remove_first_pyinstrument_frame_processor(frame, options):
'''
The first frame when using the command line is always the __main__ function. I want to remove
that from the output.
'''
if frame is None:
return None
if 'pyinstrument' in frame.file_path and len(frame.children) == 1:
frame = frame.children[0]
frame.remove_from_parent()
return frame
return frame
if __name__ == '__main__':
main()
|
joerick/pyinstrument | pyinstrument/__main__.py | save_report | python | def save_report(session):
'''
Saves the session to a temp file, and returns that path.
Also prunes the number of reports to 10 so there aren't loads building up.
'''
# prune this folder to contain the last 10 sessions
previous_reports = glob.glob(os.path.join(report_dir(), '*.pyireport'))
previous_reports.sort(reverse=True)
while len(previous_reports) > 10:
report_file = previous_reports.pop()
os.remove(report_file)
identifier = time.strftime('%Y-%m-%dT%H-%M-%S', time.localtime(session.start_time))
path = os.path.join(
report_dir(),
identifier + '.pyireport'
)
session.save(path)
return path, identifier | Saves the session to a temp file, and returns that path.
Also prunes the number of reports to 10 so there aren't loads building up. | train | https://github.com/joerick/pyinstrument/blob/cc4f3f6fc1b493d7cd058ecf41ad012e0030a512/pyinstrument/__main__.py#L255-L274 | [
"def report_dir():\n report_dir = os.path.join(tempfile.gettempdir(), 'pyinstrument')\n if not os.path.exists(report_dir):\n os.mkdir(report_dir)\n return report_dir\n"
] | import sys, os, codecs, runpy, tempfile, glob, time, fnmatch, optparse
import pyinstrument
from pyinstrument import Profiler, renderers
from pyinstrument.session import ProfilerSession
from pyinstrument.util import object_with_import_path
from pyinstrument.vendor.six import exec_, PY2
def main():
usage = ("usage: pyinstrument [options] scriptfile [arg] ...")
version_string = 'pyinstrument {v}, on Python {pyv[0]}.{pyv[1]}.{pyv[2]}'.format(
v=pyinstrument.__version__,
pyv=sys.version_info,
)
parser = optparse.OptionParser(usage=usage, version=version_string)
parser.allow_interspersed_args = False
def dash_m_callback(option, opt, value, parser):
parser.values.module_name = value
# everything after the -m argument should be passed to that module
parser.values.module_args = parser.rargs + parser.largs
parser.rargs[:] = []
parser.largs[:] = []
parser.add_option('', '--load-prev',
dest='load_prev', action='store', metavar='ID',
help="Instead of running a script, load a previous report")
parser.add_option('-m', '',
dest='module_name', action='callback', callback=dash_m_callback,
type="str",
help="run library module as a script, like 'python -m module'")
parser.add_option('-o', '--outfile',
dest="outfile", action='store',
help="save to <outfile>", default=None)
parser.add_option('-r', '--renderer',
dest='renderer', action='store', type='string',
help=("how the report should be rendered. One of: 'text', 'html', 'json', or python "
"import path to a renderer class"),
default='text')
parser.add_option('', '--html',
dest="output_html", action='store_true',
help=optparse.SUPPRESS_HELP, default=False) # deprecated shortcut for --renderer=html
parser.add_option('-t', '--timeline',
dest='timeline', action='store_true',
help="render as a timeline - preserve ordering and don't condense repeated calls")
parser.add_option('', '--hide',
dest='hide_fnmatch', action='store', metavar='EXPR',
help=("glob-style pattern matching the file paths whose frames to hide. Defaults to "
"'*{sep}lib{sep}*'.").format(sep=os.sep),
default='*{sep}lib{sep}*'.format(sep=os.sep))
parser.add_option('', '--hide-regex',
dest='hide_regex', action='store', metavar='REGEX',
help=("regex matching the file paths whose frames to hide. Useful if --hide doesn't give "
"enough control."))
parser.add_option('', '--show',
dest='show_fnmatch', action='store', metavar='EXPR',
help=("glob-style pattern matching the file paths whose frames to "
"show, regardless of --hide or --hide-regex. For example, use "
"--show '*/<library>/*' to show frames within a library that "
"would otherwise be hidden."))
parser.add_option('', '--show-regex',
dest='show_regex', action='store', metavar='REGEX',
help=("regex matching the file paths whose frames to always show. "
"Useful if --show doesn't give enough control."))
parser.add_option('', '--show-all',
dest='show_all', action='store_true',
help="show everything", default=False)
parser.add_option('', '--unicode',
dest='unicode', action='store_true',
help='(text renderer only) force unicode text output')
parser.add_option('', '--no-unicode',
dest='unicode', action='store_false',
help='(text renderer only) force ascii text output')
parser.add_option('', '--color',
dest='color', action='store_true',
help='(text renderer only) force ansi color text output')
parser.add_option('', '--no-color',
dest='color', action='store_false',
help='(text renderer only) force no color text output')
if not sys.argv[1:]:
parser.print_help()
sys.exit(2)
options, args = parser.parse_args()
if args == [] and options.module_name is None and options.load_prev is None:
parser.print_help()
sys.exit(2)
if not options.hide_regex:
options.hide_regex = fnmatch.translate(options.hide_fnmatch)
if not options.show_regex and options.show_fnmatch:
options.show_regex = fnmatch.translate(options.show_fnmatch)
if options.show_all:
options.show_regex = r'.*'
if options.load_prev:
session = load_report(options.load_prev)
else:
if options.module_name is not None:
sys.argv[:] = [options.module_name] + options.module_args
code = "run_module(modname, run_name='__main__')"
globs = {
'run_module': runpy.run_module,
'modname': options.module_name
}
else:
sys.argv[:] = args
progname = args[0]
sys.path.insert(0, os.path.dirname(progname))
with open(progname, 'rb') as fp:
code = compile(fp.read(), progname, 'exec')
globs = {
'__file__': progname,
'__name__': '__main__',
'__package__': None,
}
profiler = Profiler()
profiler.start()
try:
exec_(code, globs, None)
except (SystemExit, KeyboardInterrupt):
pass
profiler.stop()
session = profiler.last_session
if options.output_html:
options.renderer = 'html'
output_to_temp_file = (options.renderer == 'html'
and not options.outfile
and file_is_a_tty(sys.stdout))
if options.outfile:
f = codecs.open(options.outfile, 'w', 'utf-8')
should_close_f_after_writing = True
elif not output_to_temp_file:
if PY2:
f = codecs.getwriter('utf-8')(sys.stdout)
else:
f = sys.stdout
should_close_f_after_writing = False
renderer_kwargs = {'processor_options': {
'hide_regex': options.hide_regex,
'show_regex': options.show_regex,
}}
if options.timeline is not None:
renderer_kwargs['timeline'] = options.timeline
if options.renderer == 'text':
unicode_override = options.unicode != None
color_override = options.color != None
unicode = options.unicode if unicode_override else file_supports_unicode(f)
color = options.color if color_override else file_supports_color(f)
renderer_kwargs.update({'unicode': unicode, 'color': color})
renderer_class = get_renderer_class(options.renderer)
renderer = renderer_class(**renderer_kwargs)
# remove this frame from the trace
renderer.processors.append(remove_first_pyinstrument_frame_processor)
if output_to_temp_file:
output_filename = renderer.open_in_browser(session)
print('stdout is a terminal, so saved profile output to %s' % output_filename)
else:
f.write(renderer.render(session))
if should_close_f_after_writing:
f.close()
if options.renderer == 'text':
_, report_identifier = save_report(session)
print('To view this report with different options, run:')
print(' pyinstrument --load-prev %s [options]' % report_identifier)
print('')
def file_supports_color(file_obj):
"""
Returns True if the running system's terminal supports color.
Borrowed from Django
https://github.com/django/django/blob/master/django/core/management/color.py
"""
plat = sys.platform
supported_platform = plat != 'Pocket PC' and (plat != 'win32' or
'ANSICON' in os.environ)
is_a_tty = file_is_a_tty(file_obj)
return (supported_platform and is_a_tty)
def file_supports_unicode(file_obj):
encoding = getattr(file_obj, 'encoding', None)
if not encoding:
return False
codec_info = codecs.lookup(encoding)
return ('utf' in codec_info.name)
def file_is_a_tty(file_obj):
return hasattr(file_obj, 'isatty') and file_obj.isatty()
def get_renderer_class(renderer):
if renderer == 'text':
return renderers.ConsoleRenderer
elif renderer == 'html':
return renderers.HTMLRenderer
elif renderer == 'json':
return renderers.JSONRenderer
else:
return object_with_import_path(renderer)
def report_dir():
report_dir = os.path.join(tempfile.gettempdir(), 'pyinstrument')
if not os.path.exists(report_dir):
os.mkdir(report_dir)
return report_dir
def load_report(identifier=None):
'''
Returns the session referred to by identifier
'''
path = os.path.join(
report_dir(),
identifier + '.pyireport'
)
return ProfilerSession.load(path)
# pylint: disable=W0613
def remove_first_pyinstrument_frame_processor(frame, options):
'''
The first frame when using the command line is always the __main__ function. I want to remove
that from the output.
'''
if frame is None:
return None
if 'pyinstrument' in frame.file_path and len(frame.children) == 1:
frame = frame.children[0]
frame.remove_from_parent()
return frame
return frame
if __name__ == '__main__':
main()
|
joerick/pyinstrument | pyinstrument/__main__.py | remove_first_pyinstrument_frame_processor | python | def remove_first_pyinstrument_frame_processor(frame, options):
'''
The first frame when using the command line is always the __main__ function. I want to remove
that from the output.
'''
if frame is None:
return None
if 'pyinstrument' in frame.file_path and len(frame.children) == 1:
frame = frame.children[0]
frame.remove_from_parent()
return frame
return frame | The first frame when using the command line is always the __main__ function. I want to remove
that from the output. | train | https://github.com/joerick/pyinstrument/blob/cc4f3f6fc1b493d7cd058ecf41ad012e0030a512/pyinstrument/__main__.py#L277-L290 | null | import sys, os, codecs, runpy, tempfile, glob, time, fnmatch, optparse
import pyinstrument
from pyinstrument import Profiler, renderers
from pyinstrument.session import ProfilerSession
from pyinstrument.util import object_with_import_path
from pyinstrument.vendor.six import exec_, PY2
def main():
usage = ("usage: pyinstrument [options] scriptfile [arg] ...")
version_string = 'pyinstrument {v}, on Python {pyv[0]}.{pyv[1]}.{pyv[2]}'.format(
v=pyinstrument.__version__,
pyv=sys.version_info,
)
parser = optparse.OptionParser(usage=usage, version=version_string)
parser.allow_interspersed_args = False
def dash_m_callback(option, opt, value, parser):
parser.values.module_name = value
# everything after the -m argument should be passed to that module
parser.values.module_args = parser.rargs + parser.largs
parser.rargs[:] = []
parser.largs[:] = []
parser.add_option('', '--load-prev',
dest='load_prev', action='store', metavar='ID',
help="Instead of running a script, load a previous report")
parser.add_option('-m', '',
dest='module_name', action='callback', callback=dash_m_callback,
type="str",
help="run library module as a script, like 'python -m module'")
parser.add_option('-o', '--outfile',
dest="outfile", action='store',
help="save to <outfile>", default=None)
parser.add_option('-r', '--renderer',
dest='renderer', action='store', type='string',
help=("how the report should be rendered. One of: 'text', 'html', 'json', or python "
"import path to a renderer class"),
default='text')
parser.add_option('', '--html',
dest="output_html", action='store_true',
help=optparse.SUPPRESS_HELP, default=False) # deprecated shortcut for --renderer=html
parser.add_option('-t', '--timeline',
dest='timeline', action='store_true',
help="render as a timeline - preserve ordering and don't condense repeated calls")
parser.add_option('', '--hide',
dest='hide_fnmatch', action='store', metavar='EXPR',
help=("glob-style pattern matching the file paths whose frames to hide. Defaults to "
"'*{sep}lib{sep}*'.").format(sep=os.sep),
default='*{sep}lib{sep}*'.format(sep=os.sep))
parser.add_option('', '--hide-regex',
dest='hide_regex', action='store', metavar='REGEX',
help=("regex matching the file paths whose frames to hide. Useful if --hide doesn't give "
"enough control."))
parser.add_option('', '--show',
dest='show_fnmatch', action='store', metavar='EXPR',
help=("glob-style pattern matching the file paths whose frames to "
"show, regardless of --hide or --hide-regex. For example, use "
"--show '*/<library>/*' to show frames within a library that "
"would otherwise be hidden."))
parser.add_option('', '--show-regex',
dest='show_regex', action='store', metavar='REGEX',
help=("regex matching the file paths whose frames to always show. "
"Useful if --show doesn't give enough control."))
parser.add_option('', '--show-all',
dest='show_all', action='store_true',
help="show everything", default=False)
parser.add_option('', '--unicode',
dest='unicode', action='store_true',
help='(text renderer only) force unicode text output')
parser.add_option('', '--no-unicode',
dest='unicode', action='store_false',
help='(text renderer only) force ascii text output')
parser.add_option('', '--color',
dest='color', action='store_true',
help='(text renderer only) force ansi color text output')
parser.add_option('', '--no-color',
dest='color', action='store_false',
help='(text renderer only) force no color text output')
if not sys.argv[1:]:
parser.print_help()
sys.exit(2)
options, args = parser.parse_args()
if args == [] and options.module_name is None and options.load_prev is None:
parser.print_help()
sys.exit(2)
if not options.hide_regex:
options.hide_regex = fnmatch.translate(options.hide_fnmatch)
if not options.show_regex and options.show_fnmatch:
options.show_regex = fnmatch.translate(options.show_fnmatch)
if options.show_all:
options.show_regex = r'.*'
if options.load_prev:
session = load_report(options.load_prev)
else:
if options.module_name is not None:
sys.argv[:] = [options.module_name] + options.module_args
code = "run_module(modname, run_name='__main__')"
globs = {
'run_module': runpy.run_module,
'modname': options.module_name
}
else:
sys.argv[:] = args
progname = args[0]
sys.path.insert(0, os.path.dirname(progname))
with open(progname, 'rb') as fp:
code = compile(fp.read(), progname, 'exec')
globs = {
'__file__': progname,
'__name__': '__main__',
'__package__': None,
}
profiler = Profiler()
profiler.start()
try:
exec_(code, globs, None)
except (SystemExit, KeyboardInterrupt):
pass
profiler.stop()
session = profiler.last_session
if options.output_html:
options.renderer = 'html'
output_to_temp_file = (options.renderer == 'html'
and not options.outfile
and file_is_a_tty(sys.stdout))
if options.outfile:
f = codecs.open(options.outfile, 'w', 'utf-8')
should_close_f_after_writing = True
elif not output_to_temp_file:
if PY2:
f = codecs.getwriter('utf-8')(sys.stdout)
else:
f = sys.stdout
should_close_f_after_writing = False
renderer_kwargs = {'processor_options': {
'hide_regex': options.hide_regex,
'show_regex': options.show_regex,
}}
if options.timeline is not None:
renderer_kwargs['timeline'] = options.timeline
if options.renderer == 'text':
unicode_override = options.unicode != None
color_override = options.color != None
unicode = options.unicode if unicode_override else file_supports_unicode(f)
color = options.color if color_override else file_supports_color(f)
renderer_kwargs.update({'unicode': unicode, 'color': color})
renderer_class = get_renderer_class(options.renderer)
renderer = renderer_class(**renderer_kwargs)
# remove this frame from the trace
renderer.processors.append(remove_first_pyinstrument_frame_processor)
if output_to_temp_file:
output_filename = renderer.open_in_browser(session)
print('stdout is a terminal, so saved profile output to %s' % output_filename)
else:
f.write(renderer.render(session))
if should_close_f_after_writing:
f.close()
if options.renderer == 'text':
_, report_identifier = save_report(session)
print('To view this report with different options, run:')
print(' pyinstrument --load-prev %s [options]' % report_identifier)
print('')
def file_supports_color(file_obj):
"""
Returns True if the running system's terminal supports color.
Borrowed from Django
https://github.com/django/django/blob/master/django/core/management/color.py
"""
plat = sys.platform
supported_platform = plat != 'Pocket PC' and (plat != 'win32' or
'ANSICON' in os.environ)
is_a_tty = file_is_a_tty(file_obj)
return (supported_platform and is_a_tty)
def file_supports_unicode(file_obj):
encoding = getattr(file_obj, 'encoding', None)
if not encoding:
return False
codec_info = codecs.lookup(encoding)
return ('utf' in codec_info.name)
def file_is_a_tty(file_obj):
return hasattr(file_obj, 'isatty') and file_obj.isatty()
def get_renderer_class(renderer):
if renderer == 'text':
return renderers.ConsoleRenderer
elif renderer == 'html':
return renderers.HTMLRenderer
elif renderer == 'json':
return renderers.JSONRenderer
else:
return object_with_import_path(renderer)
def report_dir():
report_dir = os.path.join(tempfile.gettempdir(), 'pyinstrument')
if not os.path.exists(report_dir):
os.mkdir(report_dir)
return report_dir
def load_report(identifier=None):
'''
Returns the session referred to by identifier
'''
path = os.path.join(
report_dir(),
identifier + '.pyireport'
)
return ProfilerSession.load(path)
def save_report(session):
'''
Saves the session to a temp file, and returns that path.
Also prunes the number of reports to 10 so there aren't loads building up.
'''
# prune this folder to contain the last 10 sessions
previous_reports = glob.glob(os.path.join(report_dir(), '*.pyireport'))
previous_reports.sort(reverse=True)
while len(previous_reports) > 10:
report_file = previous_reports.pop()
os.remove(report_file)
identifier = time.strftime('%Y-%m-%dT%H-%M-%S', time.localtime(session.start_time))
path = os.path.join(
report_dir(),
identifier + '.pyireport'
)
session.save(path)
return path, identifier
# pylint: disable=W0613
if __name__ == '__main__':
main()
|
joerick/pyinstrument | pyinstrument/session.py | ProfilerSession.root_frame | python | def root_frame(self, trim_stem=True):
'''
Parses the internal frame records and returns a tree of Frame objects
'''
root_frame = None
frame_stack = []
for frame_tuple in self.frame_records:
identifier_stack = frame_tuple[0]
time = frame_tuple[1]
# now we must create a stack of frame objects and assign this time to the leaf
for stack_depth, frame_identifier in enumerate(identifier_stack):
if stack_depth < len(frame_stack):
if frame_identifier != frame_stack[stack_depth].identifier:
# trim any frames after and including this one
del frame_stack[stack_depth:]
if stack_depth >= len(frame_stack):
frame = Frame(frame_identifier)
frame_stack.append(frame)
if stack_depth == 0:
# There should only be one root frame, as far as I know
assert root_frame is None, ASSERTION_MESSAGE
root_frame = frame
else:
parent = frame_stack[stack_depth-1]
parent.add_child(frame)
# trim any extra frames
del frame_stack[stack_depth+1:] # pylint: disable=W0631
# assign the time to the final frame
frame_stack[-1].add_child(SelfTimeFrame(self_time=time))
if root_frame is None:
return None
if trim_stem:
root_frame = self._trim_stem(root_frame)
return root_frame | Parses the internal frame records and returns a tree of Frame objects | train | https://github.com/joerick/pyinstrument/blob/cc4f3f6fc1b493d7cd058ecf41ad012e0030a512/pyinstrument/session.py#L52-L95 | [
"def _trim_stem(self, frame):\n # trim the start of the tree before any branches.\n # we also don't want to trim beyond the call to profiler.start()\n\n start_stack = deque(self.start_call_stack)\n if start_stack.popleft() != frame.identifier:\n # the frame doesn't match where the profiler was st... | class ProfilerSession(object):
def __init__(self, frame_records, start_time, duration, sample_count, start_call_stack,
program, cpu_time=None):
self.frame_records = frame_records
self.start_time = start_time
self.duration = duration
self.sample_count = sample_count
self.start_call_stack = start_call_stack
self.program = program
self.cpu_time = cpu_time
@staticmethod
def load(filename):
with io.open(filename, 'rb' if PY2 else 'r') as f:
return ProfilerSession.from_json(json.load(f))
def save(self, filename):
with io.open(filename, 'wb' if PY2 else 'w') as f:
json.dump(self.to_json(), f)
def to_json(self):
return {
'frame_records': self.frame_records,
'start_time': self.start_time,
'duration': self.duration,
'sample_count': self.sample_count,
'start_call_stack': self.start_call_stack,
'program': self.program,
'cpu_time': self.cpu_time,
}
@staticmethod
def from_json(json_dict):
return ProfilerSession(
frame_records=json_dict['frame_records'],
start_time=json_dict['start_time'],
duration=json_dict['duration'],
sample_count=json_dict['sample_count'],
start_call_stack=json_dict['start_call_stack'],
program=json_dict['program'],
cpu_time=json_dict['cpu_time'],
)
def root_frame(self, trim_stem=True):
'''
Parses the internal frame records and returns a tree of Frame objects
'''
root_frame = None
frame_stack = []
for frame_tuple in self.frame_records:
identifier_stack = frame_tuple[0]
time = frame_tuple[1]
# now we must create a stack of frame objects and assign this time to the leaf
for stack_depth, frame_identifier in enumerate(identifier_stack):
if stack_depth < len(frame_stack):
if frame_identifier != frame_stack[stack_depth].identifier:
# trim any frames after and including this one
del frame_stack[stack_depth:]
if stack_depth >= len(frame_stack):
frame = Frame(frame_identifier)
frame_stack.append(frame)
if stack_depth == 0:
# There should only be one root frame, as far as I know
assert root_frame is None, ASSERTION_MESSAGE
root_frame = frame
else:
parent = frame_stack[stack_depth-1]
parent.add_child(frame)
# trim any extra frames
del frame_stack[stack_depth+1:] # pylint: disable=W0631
# assign the time to the final frame
frame_stack[-1].add_child(SelfTimeFrame(self_time=time))
if root_frame is None:
return None
if trim_stem:
root_frame = self._trim_stem(root_frame)
return root_frame
def _trim_stem(self, frame):
# trim the start of the tree before any branches.
# we also don't want to trim beyond the call to profiler.start()
start_stack = deque(self.start_call_stack)
if start_stack.popleft() != frame.identifier:
# the frame doesn't match where the profiler was started. Don't trim.
return frame
while frame.self_time == 0 and len(frame.children) == 1:
# check child matches the start_call_stack, otherwise stop descending
if len(start_stack) == 0 or frame.children[0].identifier != start_stack.popleft():
break
frame = frame.children[0]
frame.remove_from_parent()
return frame
|
joerick/pyinstrument | pyinstrument/frame.py | BaseFrame.remove_from_parent | python | def remove_from_parent(self):
'''
Removes this frame from its parent, and nulls the parent link
'''
if self.parent:
self.parent._children.remove(self)
self.parent._invalidate_time_caches()
self.parent = None | Removes this frame from its parent, and nulls the parent link | train | https://github.com/joerick/pyinstrument/blob/cc4f3f6fc1b493d7cd058ecf41ad012e0030a512/pyinstrument/frame.py#L11-L18 | null | class BaseFrame(object):
def __init__(self, parent=None, self_time=0):
self.parent = parent
self._self_time = self_time
self.group = None
# pylint: disable=W0212
@property
def proportion_of_parent(self):
if self.parent:
try:
return self.time() / self.parent.time()
except ZeroDivisionError:
return float('nan')
else:
return 1.0
@property
def total_self_time(self):
'''
The total amount of self time in this frame (including self time recorded by SelfTimeFrame
children)
'''
self_time = self.self_time
for child in self.children:
if isinstance(child, SelfTimeFrame):
self_time += child.self_time
return self_time
@property
def self_time(self):
return self._self_time
@self_time.setter
def self_time(self, self_time):
self._self_time = self_time
self._invalidate_time_caches()
# invalidates the cache for the time() function.
# called whenever self_time or _children is modified.
def _invalidate_time_caches(self):
pass
# stylistically I'd rather this was a property, but using @property appears to use twice
# as many stack frames, so I'm forced into using a function since this method is recursive
# down the call tree.
def time(self): raise NotImplementedError()
@property
def function(self): raise NotImplementedError()
@property
def file_path(self): raise NotImplementedError()
@property
def line_no(self): raise NotImplementedError()
@property
def file_path_short(self): raise NotImplementedError()
@property
def is_application_code(self): raise NotImplementedError()
@property
def code_position_short(self): raise NotImplementedError()
@property
def children(self): raise NotImplementedError()
|
joerick/pyinstrument | pyinstrument/frame.py | BaseFrame.total_self_time | python | def total_self_time(self):
'''
The total amount of self time in this frame (including self time recorded by SelfTimeFrame
children)
'''
self_time = self.self_time
for child in self.children:
if isinstance(child, SelfTimeFrame):
self_time += child.self_time
return self_time | The total amount of self time in this frame (including self time recorded by SelfTimeFrame
children) | train | https://github.com/joerick/pyinstrument/blob/cc4f3f6fc1b493d7cd058ecf41ad012e0030a512/pyinstrument/frame.py#L31-L40 | null | class BaseFrame(object):
def __init__(self, parent=None, self_time=0):
self.parent = parent
self._self_time = self_time
self.group = None
# pylint: disable=W0212
def remove_from_parent(self):
'''
Removes this frame from its parent, and nulls the parent link
'''
if self.parent:
self.parent._children.remove(self)
self.parent._invalidate_time_caches()
self.parent = None
@property
def proportion_of_parent(self):
if self.parent:
try:
return self.time() / self.parent.time()
except ZeroDivisionError:
return float('nan')
else:
return 1.0
@property
@property
def self_time(self):
return self._self_time
@self_time.setter
def self_time(self, self_time):
self._self_time = self_time
self._invalidate_time_caches()
# invalidates the cache for the time() function.
# called whenever self_time or _children is modified.
def _invalidate_time_caches(self):
pass
# stylistically I'd rather this was a property, but using @property appears to use twice
# as many stack frames, so I'm forced into using a function since this method is recursive
# down the call tree.
def time(self): raise NotImplementedError()
@property
def function(self): raise NotImplementedError()
@property
def file_path(self): raise NotImplementedError()
@property
def line_no(self): raise NotImplementedError()
@property
def file_path_short(self): raise NotImplementedError()
@property
def is_application_code(self): raise NotImplementedError()
@property
def code_position_short(self): raise NotImplementedError()
@property
def children(self): raise NotImplementedError()
|
joerick/pyinstrument | pyinstrument/frame.py | Frame.add_child | python | def add_child(self, frame, after=None):
'''
Adds a child frame, updating the parent link.
Optionally, insert the frame in a specific position by passing the frame to insert
this one after.
'''
frame.remove_from_parent()
frame.parent = self
if after is None:
self._children.append(frame)
else:
index = self._children.index(after) + 1
self._children.insert(index, frame)
self._invalidate_time_caches() | Adds a child frame, updating the parent link.
Optionally, insert the frame in a specific position by passing the frame to insert
this one after. | train | https://github.com/joerick/pyinstrument/blob/cc4f3f6fc1b493d7cd058ecf41ad012e0030a512/pyinstrument/frame.py#L99-L113 | [
"def _invalidate_time_caches(self):\n self._time = None\n # null all the parent's caches also.\n frame = self\n while frame.parent is not None:\n frame = frame.parent\n frame._time = None\n"
] | class Frame(BaseFrame):
"""
Object that represents a stack frame in the parsed tree
"""
def __init__(self, identifier='', parent=None, children=None, self_time=0):
super(Frame, self).__init__(parent=parent, self_time=self_time)
self.identifier = identifier
self._children = []
self._time = None
if children:
for child in children:
self.add_child(child)
def add_children(self, frames, after=None):
'''
Convenience method to add multiple frames at once.
'''
if after is not None:
# if there's an 'after' parameter, add the frames in reverse so the order is
# preserved.
for frame in reversed(frames):
self.add_child(frame, after=after)
else:
for frame in frames:
self.add_child(frame)
@property
def children(self):
# Return an immutable copy (this property should only be mutated using methods)
# Also, returning a copy avoid problems when mutating while iterating, which happens a lot
# in processors!
return tuple(self._children)
@property
def function(self):
if self.identifier:
return self.identifier.split('\x00')[0]
@property
def file_path(self):
if self.identifier:
return self.identifier.split('\x00')[1]
@property
def line_no(self):
if self.identifier:
return int(self.identifier.split('\x00')[2])
@property
def file_path_short(self):
""" Return the path resolved against the closest entry in sys.path """
if not hasattr(self, '_file_path_short'):
if self.file_path:
result = None
for path in sys.path:
# On Windows, if self.file_path and path are on different drives, relpath
# will result in exception, because it cannot compute a relpath in this case.
# The root cause is that on Windows, there is no root dir like '/' on Linux.
try:
candidate = os.path.relpath(self.file_path, path)
except ValueError:
continue
if not result or (len(candidate.split(os.sep)) < len(result.split(os.sep))):
result = candidate
self._file_path_short = result
else:
self._file_path_short = None
return self._file_path_short
@property
def is_application_code(self):
if self.identifier:
return (('%slib%s' % (os.sep, os.sep)) not in self.file_path
and '<frozen importlib._bootstrap' not in self.file_path)
@property
def code_position_short(self):
if self.identifier:
return '%s:%i' % (self.file_path_short, self.line_no)
def time(self):
if self._time is None:
# can't use a sum(<generator>) expression here sadly, because this method
# recurses down the call tree, and the generator uses an extra stack frame,
# meaning we hit the stack limit when the profiled code is 500 frames deep.
self._time = self.self_time
for child in self.children:
self._time += child.time()
return self._time
# pylint: disable=W0212
def _invalidate_time_caches(self):
self._time = None
# null all the parent's caches also.
frame = self
while frame.parent is not None:
frame = frame.parent
frame._time = None
def __repr__(self):
return 'Frame(identifier=%s, time=%f, len(children)=%d), group=%r' % (
self.identifier, self.time(), len(self.children), self.group
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.