repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
planetlabs/es_fluent
|
es_fluent/builder.py
|
QueryBuilder.sort
|
python
|
def sort(self, field, direction="asc"):
if not isinstance(field, basestring):
raise ValueError("Field should be a string")
if direction not in ["asc", "desc"]:
raise ValueError("Sort direction should be `asc` or `desc`")
self.sorts.append({field: direction})
|
Adds sort criteria.
|
train
|
https://github.com/planetlabs/es_fluent/blob/74f8db3a1bf9aa1d54512cf2d5e0ec58ee2f4b1c/es_fluent/builder.py#L144-L153
| null |
class QueryBuilder(object):
def __init__(self):
self.root_filter = Dict()
self.script_fields = ScriptFields()
self.fields = Fields()
self.sorts = []
self.source = True
self._size = None
@property
def size(self):
"""
Sets current size limit of the ES response, which limits the number of
documents returned. By default this is unset and the number of
documents returned is up to ES.
:return:
The current size limit.
"""
return self._size
@size.setter
def size(self, size):
"""
Sets the size of the ES response.
:param size: The number of documents to limit the response to.
"""
self._size = size
def and_filter(self, filter_or_string, *args, **kwargs):
"""
Convenience method to delegate to the root_filter to generate an
:class:`~es_fluent.filters.core.And` clause.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.root_filter.and_filter(filter_or_string, *args, **kwargs)
return self
def or_filter(self, filter_or_string, *args, **kwargs):
"""
Convenience method to delegate to the root_filter to generate an `or`
clause.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.root_filter.or_filter(filter_or_string, *args, **kwargs)
return self
def add_filter(self, filter_or_string, *args, **kwargs):
"""
Adds a filter to the query builder's filters.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.root_filter.add_filter(filter_or_string, *args, **kwargs)
return self
def add_field(self, field_instance):
"""
Adds a field to the query builder. The default behavior is
to return all fields. Explicitly adding a single field will
result in only that source field being returned.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.fields.add_field(field_instance)
return self
def add_script_field(self, field_instance):
"""
Add a script field to the query. The `field_instance` should be
an instance of `es_fluent.script_fields`.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.script_fields.add_field(field_instance)
return self
def find_filter(self, filter_cls):
"""
Finds an existing filter using a filter class `filter_cls`. If not
found, None is returned.
This method is useful in cases where one wants to modify and extend
and existing clause, a common example might be an
:class:`~es_fluent.filters.core.And` filter. The method only looks in the
query's top-level filter and does not recurse.
:param: ``filter_cls``
The the :class:`~es_fluent.filters.Filter` class
to find.
"""
return self.root_filter.find_filter(filter_cls)
def to_query(self):
result = {}
if not self.root_filter.is_empty():
result['filter'] = self.root_filter.to_query()
if not self.script_fields.is_empty():
result['script_fields'] = self.script_fields.to_query()
if not self.fields.to_query():
result['fields'] = self.fields.to_query()
# We don't bother with reprensenting sort as an object.
if len(self.sorts):
result['sort'] = self.sorts
if self._size is not None:
result['size'] = self._size
result['_source'] = self.source
return result
def disable_source(self):
"""
Don't include ``_source`` document in results.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.source = False
return self
def enable_source(self):
"""
Include ``_source`` document in results.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.source = True
def remove_sort(self, field_name):
"""
Clears sorting criteria affecting ``field_name``.
"""
self.sorts = [dict(field=value) for field, value in self.sorts if field
is not field_name]
def sort_reset(self):
"""
Resets sorting criteria.
"""
self.sorts = []
|
planetlabs/es_fluent
|
es_fluent/builder.py
|
QueryBuilder.remove_sort
|
python
|
def remove_sort(self, field_name):
self.sorts = [dict(field=value) for field, value in self.sorts if field
is not field_name]
|
Clears sorting criteria affecting ``field_name``.
|
train
|
https://github.com/planetlabs/es_fluent/blob/74f8db3a1bf9aa1d54512cf2d5e0ec58ee2f4b1c/es_fluent/builder.py#L155-L160
| null |
class QueryBuilder(object):
def __init__(self):
self.root_filter = Dict()
self.script_fields = ScriptFields()
self.fields = Fields()
self.sorts = []
self.source = True
self._size = None
@property
def size(self):
"""
Sets current size limit of the ES response, which limits the number of
documents returned. By default this is unset and the number of
documents returned is up to ES.
:return:
The current size limit.
"""
return self._size
@size.setter
def size(self, size):
"""
Sets the size of the ES response.
:param size: The number of documents to limit the response to.
"""
self._size = size
def and_filter(self, filter_or_string, *args, **kwargs):
"""
Convenience method to delegate to the root_filter to generate an
:class:`~es_fluent.filters.core.And` clause.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.root_filter.and_filter(filter_or_string, *args, **kwargs)
return self
def or_filter(self, filter_or_string, *args, **kwargs):
"""
Convenience method to delegate to the root_filter to generate an `or`
clause.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.root_filter.or_filter(filter_or_string, *args, **kwargs)
return self
def add_filter(self, filter_or_string, *args, **kwargs):
"""
Adds a filter to the query builder's filters.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.root_filter.add_filter(filter_or_string, *args, **kwargs)
return self
def add_field(self, field_instance):
"""
Adds a field to the query builder. The default behavior is
to return all fields. Explicitly adding a single field will
result in only that source field being returned.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.fields.add_field(field_instance)
return self
def add_script_field(self, field_instance):
"""
Add a script field to the query. The `field_instance` should be
an instance of `es_fluent.script_fields`.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.script_fields.add_field(field_instance)
return self
def find_filter(self, filter_cls):
"""
Finds an existing filter using a filter class `filter_cls`. If not
found, None is returned.
This method is useful in cases where one wants to modify and extend
and existing clause, a common example might be an
:class:`~es_fluent.filters.core.And` filter. The method only looks in the
query's top-level filter and does not recurse.
:param: ``filter_cls``
The the :class:`~es_fluent.filters.Filter` class
to find.
"""
return self.root_filter.find_filter(filter_cls)
def to_query(self):
result = {}
if not self.root_filter.is_empty():
result['filter'] = self.root_filter.to_query()
if not self.script_fields.is_empty():
result['script_fields'] = self.script_fields.to_query()
if not self.fields.to_query():
result['fields'] = self.fields.to_query()
# We don't bother with reprensenting sort as an object.
if len(self.sorts):
result['sort'] = self.sorts
if self._size is not None:
result['size'] = self._size
result['_source'] = self.source
return result
def disable_source(self):
"""
Don't include ``_source`` document in results.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.source = False
return self
def enable_source(self):
"""
Include ``_source`` document in results.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.source = True
def sort(self, field, direction="asc"):
"""
Adds sort criteria.
"""
if not isinstance(field, basestring):
raise ValueError("Field should be a string")
if direction not in ["asc", "desc"]:
raise ValueError("Sort direction should be `asc` or `desc`")
self.sorts.append({field: direction})
def sort_reset(self):
"""
Resets sorting criteria.
"""
self.sorts = []
|
planetlabs/es_fluent
|
es_fluent/fields/__init__.py
|
Fields.add_field
|
python
|
def add_field(self, field_instance_or_string):
if isinstance(field_instance_or_string, basestring):
field_instance = Field(field_instance_or_string)
elif isinstance(field_instance_or_string, Field):
field_instance_or_string = field_instance
else:
raise ValueError('Expected a basetring or Field instance')
self.fields.append(field_instance)
return self
|
Appends a field, can be a :class:`~es_fluent.fields.Field` or string.
|
train
|
https://github.com/planetlabs/es_fluent/blob/74f8db3a1bf9aa1d54512cf2d5e0ec58ee2f4b1c/es_fluent/fields/__init__.py#L14-L27
| null |
class Fields(object):
"""
Represents a collection of fields to be requested at query time.
"""
def __init__(self):
self.fields = []
def to_query(self):
"""
Serializes this into a json-serializable dictionary suitable for use
with the elasticsearch api.
"""
return [field.name for field in self.fields]
def is_empty(self):
"""
Returns ``False`` if no fields have been added, ``True`` otherwise.
"""
return len(self.fields) == 0
|
planetlabs/es_fluent
|
es_fluent/script_fields/__init__.py
|
ScriptFields.add_field
|
python
|
def add_field(self, field_instance):
if isinstance(field_instance, BaseScriptField):
field_instance = field_instance
else:
raise ValueError('Expected a basetring or Field instance')
self.fields.append(field_instance)
return self
|
Appends a field.
|
train
|
https://github.com/planetlabs/es_fluent/blob/74f8db3a1bf9aa1d54512cf2d5e0ec58ee2f4b1c/es_fluent/script_fields/__init__.py#L8-L19
| null |
class ScriptFields(object):
"""
Represents a collection of requested script fields.
"""
def __init__(self):
self.fields = []
def is_empty(self):
"""
Returns True if there are no script fields, False otherwise.
"""
return len(self.fields) == 0
def to_query(self):
"""
Returns a json-serializable representation.
"""
query = {}
for field_instance in self.fields:
query.update(field_instance.to_query())
return query
|
planetlabs/es_fluent
|
es_fluent/script_fields/__init__.py
|
ScriptFields.to_query
|
python
|
def to_query(self):
query = {}
for field_instance in self.fields:
query.update(field_instance.to_query())
return query
|
Returns a json-serializable representation.
|
train
|
https://github.com/planetlabs/es_fluent/blob/74f8db3a1bf9aa1d54512cf2d5e0ec58ee2f4b1c/es_fluent/script_fields/__init__.py#L27-L36
| null |
class ScriptFields(object):
"""
Represents a collection of requested script fields.
"""
def __init__(self):
self.fields = []
def add_field(self, field_instance):
"""
Appends a field.
"""
if isinstance(field_instance, BaseScriptField):
field_instance = field_instance
else:
raise ValueError('Expected a basetring or Field instance')
self.fields.append(field_instance)
return self
def is_empty(self):
"""
Returns True if there are no script fields, False otherwise.
"""
return len(self.fields) == 0
|
planetlabs/es_fluent
|
es_fluent/script_fields/__init__.py
|
ScriptField.to_query
|
python
|
def to_query(self):
return {
self.name: {
'lang': self.lang,
'script': self.script,
'params': self.script_params
}
}
|
Returns a json-serializable representation.
|
train
|
https://github.com/planetlabs/es_fluent/blob/74f8db3a1bf9aa1d54512cf2d5e0ec58ee2f4b1c/es_fluent/script_fields/__init__.py#L63-L73
| null |
class ScriptField(BaseScriptField):
"""
Represents a script field.
"""
def __init__(self, name, script, lang='groovy', **kwargs):
"""
:param string name: The resulting name of the field.
:param dict script: The script for the script field.
:param string lang: The language of the script.
:param dict kwargs: Additional keyword arguments become the params for
the script.
"""
self.name = name
self.script = script
self.lang = lang
self.script_params = kwargs
|
planetlabs/es_fluent
|
es_fluent/filters/__init__.py
|
register_filter
|
python
|
def register_filter(filter_cls):
if filter_cls.name is None:
return
elif filter_cls.name in FILTER_REGISTRY:
raise RuntimeError(
"Filter class already registered: {}".format(filter_cls.name))
else:
FILTER_REGISTRY[filter_cls.name] = filter_cls
|
Adds the ``filter_cls`` to our registry.
|
train
|
https://github.com/planetlabs/es_fluent/blob/74f8db3a1bf9aa1d54512cf2d5e0ec58ee2f4b1c/es_fluent/filters/__init__.py#L22-L32
| null |
# A dictionary of filter classes, keyed by their name attributes.
FILTER_REGISTRY = {}
class FilterRegistry(type):
"""
Metaclass used to automatically register new filter classes in our filter
registry. Enables shorthand filter notation.
>>> from es_fluent.builder import QueryBuilder
>>> query_builder = QueryBuilder()
>>> query_builder.add_filter('missing', 'boop').to_query()['filter']
{'missing': {'field': 'field_name'}}
"""
def __new__(cls, clsname, bases, attrs):
newclass = super(FilterRegistry, cls).__new__(
cls, clsname, bases, attrs)
register_filter(newclass)
return newclass
def build_filter(filter_or_string, *args, **kwargs):
"""
Overloaded filter construction. If ``filter_or_string`` is a string
we look up it's corresponding class in the filter registry and return it.
Otherwise, assume ``filter_or_string`` is an instance of a filter.
:return: :class:`~es_fluent.filters.Filter`
"""
if isinstance(filter_or_string, basestring):
# Names that start with `~` indicate a negated filter.
if filter_or_string.startswith('~'):
filter_name = filter_or_string[1:]
return ~FILTER_REGISTRY[filter_name](*args, **kwargs)
else:
filter_name = filter_or_string
return FILTER_REGISTRY[filter_name](*args, **kwargs)
else:
return filter_or_string
class Filter(object):
"""
The base filter. Subclasses of this Filter will automatically register
themselves on import.
"""
#: The shorthand name of the filter.
name = None
#: Auto-register any Filter subclass with our registry.
__metaclass__ = FilterRegistry
def __invert__(self):
"""
Returns this filter wrapped in a :class:`es_fluent.filters.Not` filter.
:
"""
not_filter = Not()
not_filter.add_filter(self)
return not_filter
def to_query(self):
"""
Serializes this ``Filter`` and any descendants into a json-serializable
dictionary suitable for use with the elasticsearch api.
"""
raise NotImplementedError()
from .core import (
Age,
And,
Custom,
Dict,
Exists,
Generic,
Missing,
Not,
Or,
Range,
RegExp,
Script,
ScriptID,
Term,
Terminal,
Terms,
)
from .geometry import (
GeoJSON,
IndexedShape,
)
|
planetlabs/es_fluent
|
es_fluent/filters/__init__.py
|
build_filter
|
python
|
def build_filter(filter_or_string, *args, **kwargs):
if isinstance(filter_or_string, basestring):
# Names that start with `~` indicate a negated filter.
if filter_or_string.startswith('~'):
filter_name = filter_or_string[1:]
return ~FILTER_REGISTRY[filter_name](*args, **kwargs)
else:
filter_name = filter_or_string
return FILTER_REGISTRY[filter_name](*args, **kwargs)
else:
return filter_or_string
|
Overloaded filter construction. If ``filter_or_string`` is a string
we look up it's corresponding class in the filter registry and return it.
Otherwise, assume ``filter_or_string`` is an instance of a filter.
:return: :class:`~es_fluent.filters.Filter`
|
train
|
https://github.com/planetlabs/es_fluent/blob/74f8db3a1bf9aa1d54512cf2d5e0ec58ee2f4b1c/es_fluent/filters/__init__.py#L35-L52
| null |
# A dictionary of filter classes, keyed by their name attributes.
FILTER_REGISTRY = {}
class FilterRegistry(type):
"""
Metaclass used to automatically register new filter classes in our filter
registry. Enables shorthand filter notation.
>>> from es_fluent.builder import QueryBuilder
>>> query_builder = QueryBuilder()
>>> query_builder.add_filter('missing', 'boop').to_query()['filter']
{'missing': {'field': 'field_name'}}
"""
def __new__(cls, clsname, bases, attrs):
newclass = super(FilterRegistry, cls).__new__(
cls, clsname, bases, attrs)
register_filter(newclass)
return newclass
def register_filter(filter_cls):
"""
Adds the ``filter_cls`` to our registry.
"""
if filter_cls.name is None:
return
elif filter_cls.name in FILTER_REGISTRY:
raise RuntimeError(
"Filter class already registered: {}".format(filter_cls.name))
else:
FILTER_REGISTRY[filter_cls.name] = filter_cls
class Filter(object):
"""
The base filter. Subclasses of this Filter will automatically register
themselves on import.
"""
#: The shorthand name of the filter.
name = None
#: Auto-register any Filter subclass with our registry.
__metaclass__ = FilterRegistry
def __invert__(self):
"""
Returns this filter wrapped in a :class:`es_fluent.filters.Not` filter.
:
"""
not_filter = Not()
not_filter.add_filter(self)
return not_filter
def to_query(self):
"""
Serializes this ``Filter`` and any descendants into a json-serializable
dictionary suitable for use with the elasticsearch api.
"""
raise NotImplementedError()
from .core import (
Age,
And,
Custom,
Dict,
Exists,
Generic,
Missing,
Not,
Or,
Range,
RegExp,
Script,
ScriptID,
Term,
Terminal,
Terms,
)
from .geometry import (
GeoJSON,
IndexedShape,
)
|
eyeseast/python-tablefu
|
table_fu/__init__.py
|
TableFu.sort
|
python
|
def sort(self, column_name=None, reverse=False):
if not column_name and self.options.has_key('sorted_by'):
column_name = self.options['sorted_by'].keys()[0]
if column_name not in self.default_columns:
raise ValueError("%s isn't a column in this table" % column_name)
index = self.default_columns.index(column_name)
self.table.sort(key = lambda row: row[index], reverse=reverse)
self.options['sorted_by'] = {column_name: {'reverse': reverse}}
|
Sort rows in this table, preserving a record of how that
sorting is done in TableFu.options['sorted_by']
|
train
|
https://github.com/eyeseast/python-tablefu/blob/d8761c1f87e3f89d9b89b0b6b9283fc4738b6676/table_fu/__init__.py#L135-L146
| null |
class TableFu(object):
"""
A table, to be manipulated like a spreadsheet.
TableFu reads in an open CSV file, parsing it
into a table property, Row and Datum objects.
Usage:
# test.csv
Author,Best Book,Number of Pages,Style
Samuel Beckett,Malone Muert,120,Modernism
James Joyce,Ulysses,644,Modernism
Nicholson Baker,Mezannine,150,Minimalism
Vladimir Sorokin,The Queue,263,Satire
>>> spreadsheet = TableFu(open('../tests/test.csv'))
>>> len(spreadsheet.rows)
4
>>> spreadsheet.columns
['Author', 'Best Book', 'Number of Pages', 'Style']
>>> spreadsheet.columns = ['Style', 'Author']
>>> spreadsheet.columms
['Style', 'Author']
"""
def __init__(self, table, **options):
"""
Takes a table argument and optional keyword arguments.
The 'table' argument should be a two-dimensional array,
either a list or tuple, or an open file that can be
parsed by Python's csv module (using csv.reader)
"""
if hasattr(table, 'next'): # for file-like objects
csv_options = {}
if 'dialect' in options:
csv_options['dialect'] = options.pop('dialect')
reader = csv.reader(table, **csv_options)
self.table = [row for row in reader]
else:
self.table = table
self.default_columns = self.table.pop(0)
self._columns = options.get('columns', [])
self.deleted_rows = []
self.faceted_on = None
self.totals = {}
self.formatting = options.get('formatting', {})
self.style = options.get('style', {})
self.options = options
if options.has_key('sorted_by'):
col = options['sorted_by'].keys()[0]
self.sort(column_name=col,
reverse=options['sorted_by'][col].get('reverse', False))
def __getitem__(self, row_num):
"""
Return one row in the table
"""
return Row(self.table[row_num], row_num, self)
def __iter__(self):
return iter(self.rows)
def __len__(self):
return len(list(self.table))
def add_rows(self, *rows):
for row in rows:
self.table.append(row)
def count(self):
return len(list(self))
@property
def rows(self):
return (Row(row, i, self) for i, row in enumerate(self.table))
@property
def headers(self):
if self._columns:
col_set = self._columns
else:
col_set = self.default_columns
return [Header(col, i, self) for i, col in enumerate(col_set)]
def _get_columns(self):
if self._columns:
return self._columns
return self.default_columns
def _set_columns(self, columns):
self._columns = self.options['columns'] = list(columns)
columns = property(_get_columns, _set_columns)
def delete_row(self, row_num):
self.deleted_rows.append(self.table.rows.pop(row_num))
def transform(self, column_name, func):
if column_name not in self.default_columns:
raise ValueError("%s isn't a column in this table" % column_name)
if not callable(func):
raise TypeError("%s isn't callable" % func)
index = self.default_columns.index(column_name)
for row in self.table:
val = row[index]
val = func(val)
row[index] = val
def values(self, column_name, unique=False):
if column_name not in self.default_columns:
raise ValueError("%s isn't a column in this table" % column_name)
index = self.default_columns.index(column_name)
result = [row[index] for row in self.table]
if unique:
return set(result)
return result
def total(self, column_name):
if column_name not in self.default_columns:
raise ValueError("%s isn't a column in this table" % column_name)
try:
values = (float(v) for v in self.values(column_name))
except ValueError:
raise ValueError('Column %s contains non-numeric values' % column_name)
return sum(values)
def filter(self, func=None, **query):
"""
Tables can be filtered in one of two ways:
- Simple keyword arguments return rows where values match *exactly*
- Pass in a function and return rows where that function evaluates to True
In either case, a new TableFu instance is returned
"""
if callable(func):
result = filter(func, self)
result.insert(0, self.default_columns)
return TableFu(result, **self.options)
else:
result = self
for column, value in query.items():
result = result.filter(lambda r: r[column] == value)
return result
def facet_by(self, column):
"""
Faceting creates new TableFu instances with rows matching
each possible value.
"""
faceted_spreadsheets = {}
for row in self.rows:
if row[column]:
col = row[column].value
if faceted_spreadsheets.has_key(col):
faceted_spreadsheets[col].append(row.cells)
else:
faceted_spreadsheets[col] = []
faceted_spreadsheets[col].append(row.cells)
# create a new TableFu instance for each facet
tables = []
for k, v in faceted_spreadsheets.items():
v.insert(0, self.default_columns)
table = TableFu(v)
table.faceted_on = k
table.formatting = self.formatting
table.options = self.options
tables.append(table)
tables.sort(key=lambda t: t.faceted_on)
return tables
def transpose(self):
table = copy(self.table)
table.insert(0, self.default_columns)
result = [
[row[i] for row in table]
for i in xrange(len(table[0]))
]
options = self.options.copy()
options.pop('columns', None)
return TableFu(result, **self.options)
def map(self, func, *columns):
"""
Map a function to rows, or to given columns
"""
if not columns:
return map(func, self.rows)
else:
values = (self.values(column) for column in columns)
result = [map(func, v) for v in values]
if len(columns) == 1:
return result[0]
else:
return result
# export methods
def html(self):
table = '<table>\n%s\n%s\n</table>'
thead = '<thead>\n<tr>%s</tr>\n</thead>' % ''.join(['<th>%s</th>' % col for col in self.columns])
tbody = '<tbody>\n%s\n</tbody>' % '\n'.join([row.as_tr() for row in self.rows])
return table % (thead, tbody)
def csv(self, **kwargs):
"""
Export this table as a CSV
"""
out = StringIO()
writer = csv.DictWriter(out, self.columns, **kwargs)
writer.writerow(dict(zip(self.columns, self.columns)))
writer.writerows(dict(row.items()) for row in self.rows)
return out
def dict(self):
return (dict(row.items()) for row in self.rows)
def json(self, **kwargs):
if not has_json:
raise ValueError("Couldn't find a JSON library")
return json.dumps(list(self.dict()), **kwargs)
# static methods for loading data
@staticmethod
def from_file(fn, **options):
"""
Creates a new TableFu instance from a file or path
"""
if hasattr(fn, 'read'):
return TableFu(fn, **options)
with open(fn) as f:
return TableFu(f, **options)
@staticmethod
def from_url(url, **options):
"""
Downloads the contents of a given URL and loads it
into a new TableFu instance
"""
resp = urllib2.urlopen(url)
return TableFu(resp, **options)
|
eyeseast/python-tablefu
|
table_fu/__init__.py
|
TableFu.filter
|
python
|
def filter(self, func=None, **query):
if callable(func):
result = filter(func, self)
result.insert(0, self.default_columns)
return TableFu(result, **self.options)
else:
result = self
for column, value in query.items():
result = result.filter(lambda r: r[column] == value)
return result
|
Tables can be filtered in one of two ways:
- Simple keyword arguments return rows where values match *exactly*
- Pass in a function and return rows where that function evaluates to True
In either case, a new TableFu instance is returned
|
train
|
https://github.com/eyeseast/python-tablefu/blob/d8761c1f87e3f89d9b89b0b6b9283fc4738b6676/table_fu/__init__.py#L181-L197
|
[
"def filter(self, func=None, **query):\n \"\"\"\n Tables can be filtered in one of two ways:\n - Simple keyword arguments return rows where values match *exactly*\n - Pass in a function and return rows where that function evaluates to True\n\n In either case, a new TableFu instance is returned\n \"\"\"\n if callable(func):\n result = filter(func, self)\n result.insert(0, self.default_columns)\n return TableFu(result, **self.options)\n else:\n result = self\n for column, value in query.items():\n result = result.filter(lambda r: r[column] == value)\n return result\n"
] |
class TableFu(object):
"""
A table, to be manipulated like a spreadsheet.
TableFu reads in an open CSV file, parsing it
into a table property, Row and Datum objects.
Usage:
# test.csv
Author,Best Book,Number of Pages,Style
Samuel Beckett,Malone Muert,120,Modernism
James Joyce,Ulysses,644,Modernism
Nicholson Baker,Mezannine,150,Minimalism
Vladimir Sorokin,The Queue,263,Satire
>>> spreadsheet = TableFu(open('../tests/test.csv'))
>>> len(spreadsheet.rows)
4
>>> spreadsheet.columns
['Author', 'Best Book', 'Number of Pages', 'Style']
>>> spreadsheet.columns = ['Style', 'Author']
>>> spreadsheet.columms
['Style', 'Author']
"""
def __init__(self, table, **options):
"""
Takes a table argument and optional keyword arguments.
The 'table' argument should be a two-dimensional array,
either a list or tuple, or an open file that can be
parsed by Python's csv module (using csv.reader)
"""
if hasattr(table, 'next'): # for file-like objects
csv_options = {}
if 'dialect' in options:
csv_options['dialect'] = options.pop('dialect')
reader = csv.reader(table, **csv_options)
self.table = [row for row in reader]
else:
self.table = table
self.default_columns = self.table.pop(0)
self._columns = options.get('columns', [])
self.deleted_rows = []
self.faceted_on = None
self.totals = {}
self.formatting = options.get('formatting', {})
self.style = options.get('style', {})
self.options = options
if options.has_key('sorted_by'):
col = options['sorted_by'].keys()[0]
self.sort(column_name=col,
reverse=options['sorted_by'][col].get('reverse', False))
def __getitem__(self, row_num):
"""
Return one row in the table
"""
return Row(self.table[row_num], row_num, self)
def __iter__(self):
return iter(self.rows)
def __len__(self):
return len(list(self.table))
def add_rows(self, *rows):
for row in rows:
self.table.append(row)
def count(self):
return len(list(self))
@property
def rows(self):
return (Row(row, i, self) for i, row in enumerate(self.table))
@property
def headers(self):
if self._columns:
col_set = self._columns
else:
col_set = self.default_columns
return [Header(col, i, self) for i, col in enumerate(col_set)]
def _get_columns(self):
if self._columns:
return self._columns
return self.default_columns
def _set_columns(self, columns):
self._columns = self.options['columns'] = list(columns)
columns = property(_get_columns, _set_columns)
def delete_row(self, row_num):
self.deleted_rows.append(self.table.rows.pop(row_num))
def sort(self, column_name=None, reverse=False):
"""
Sort rows in this table, preserving a record of how that
sorting is done in TableFu.options['sorted_by']
"""
if not column_name and self.options.has_key('sorted_by'):
column_name = self.options['sorted_by'].keys()[0]
if column_name not in self.default_columns:
raise ValueError("%s isn't a column in this table" % column_name)
index = self.default_columns.index(column_name)
self.table.sort(key = lambda row: row[index], reverse=reverse)
self.options['sorted_by'] = {column_name: {'reverse': reverse}}
def transform(self, column_name, func):
if column_name not in self.default_columns:
raise ValueError("%s isn't a column in this table" % column_name)
if not callable(func):
raise TypeError("%s isn't callable" % func)
index = self.default_columns.index(column_name)
for row in self.table:
val = row[index]
val = func(val)
row[index] = val
def values(self, column_name, unique=False):
if column_name not in self.default_columns:
raise ValueError("%s isn't a column in this table" % column_name)
index = self.default_columns.index(column_name)
result = [row[index] for row in self.table]
if unique:
return set(result)
return result
def total(self, column_name):
if column_name not in self.default_columns:
raise ValueError("%s isn't a column in this table" % column_name)
try:
values = (float(v) for v in self.values(column_name))
except ValueError:
raise ValueError('Column %s contains non-numeric values' % column_name)
return sum(values)
def filter(self, func=None, **query):
"""
Tables can be filtered in one of two ways:
- Simple keyword arguments return rows where values match *exactly*
- Pass in a function and return rows where that function evaluates to True
In either case, a new TableFu instance is returned
"""
if callable(func):
result = filter(func, self)
result.insert(0, self.default_columns)
return TableFu(result, **self.options)
else:
result = self
for column, value in query.items():
result = result.filter(lambda r: r[column] == value)
return result
def facet_by(self, column):
"""
Faceting creates new TableFu instances with rows matching
each possible value.
"""
faceted_spreadsheets = {}
for row in self.rows:
if row[column]:
col = row[column].value
if faceted_spreadsheets.has_key(col):
faceted_spreadsheets[col].append(row.cells)
else:
faceted_spreadsheets[col] = []
faceted_spreadsheets[col].append(row.cells)
# create a new TableFu instance for each facet
tables = []
for k, v in faceted_spreadsheets.items():
v.insert(0, self.default_columns)
table = TableFu(v)
table.faceted_on = k
table.formatting = self.formatting
table.options = self.options
tables.append(table)
tables.sort(key=lambda t: t.faceted_on)
return tables
def transpose(self):
table = copy(self.table)
table.insert(0, self.default_columns)
result = [
[row[i] for row in table]
for i in xrange(len(table[0]))
]
options = self.options.copy()
options.pop('columns', None)
return TableFu(result, **self.options)
def map(self, func, *columns):
"""
Map a function to rows, or to given columns
"""
if not columns:
return map(func, self.rows)
else:
values = (self.values(column) for column in columns)
result = [map(func, v) for v in values]
if len(columns) == 1:
return result[0]
else:
return result
# export methods
def html(self):
table = '<table>\n%s\n%s\n</table>'
thead = '<thead>\n<tr>%s</tr>\n</thead>' % ''.join(['<th>%s</th>' % col for col in self.columns])
tbody = '<tbody>\n%s\n</tbody>' % '\n'.join([row.as_tr() for row in self.rows])
return table % (thead, tbody)
def csv(self, **kwargs):
"""
Export this table as a CSV
"""
out = StringIO()
writer = csv.DictWriter(out, self.columns, **kwargs)
writer.writerow(dict(zip(self.columns, self.columns)))
writer.writerows(dict(row.items()) for row in self.rows)
return out
def dict(self):
return (dict(row.items()) for row in self.rows)
def json(self, **kwargs):
if not has_json:
raise ValueError("Couldn't find a JSON library")
return json.dumps(list(self.dict()), **kwargs)
# static methods for loading data
@staticmethod
def from_file(fn, **options):
"""
Creates a new TableFu instance from a file or path
"""
if hasattr(fn, 'read'):
return TableFu(fn, **options)
with open(fn) as f:
return TableFu(f, **options)
@staticmethod
def from_url(url, **options):
"""
Downloads the contents of a given URL and loads it
into a new TableFu instance
"""
resp = urllib2.urlopen(url)
return TableFu(resp, **options)
|
eyeseast/python-tablefu
|
table_fu/__init__.py
|
TableFu.facet_by
|
python
|
def facet_by(self, column):
faceted_spreadsheets = {}
for row in self.rows:
if row[column]:
col = row[column].value
if faceted_spreadsheets.has_key(col):
faceted_spreadsheets[col].append(row.cells)
else:
faceted_spreadsheets[col] = []
faceted_spreadsheets[col].append(row.cells)
# create a new TableFu instance for each facet
tables = []
for k, v in faceted_spreadsheets.items():
v.insert(0, self.default_columns)
table = TableFu(v)
table.faceted_on = k
table.formatting = self.formatting
table.options = self.options
tables.append(table)
tables.sort(key=lambda t: t.faceted_on)
return tables
|
Faceting creates new TableFu instances with rows matching
each possible value.
|
train
|
https://github.com/eyeseast/python-tablefu/blob/d8761c1f87e3f89d9b89b0b6b9283fc4738b6676/table_fu/__init__.py#L199-L225
| null |
class TableFu(object):
"""
A table, to be manipulated like a spreadsheet.
TableFu reads in an open CSV file, parsing it
into a table property, Row and Datum objects.
Usage:
# test.csv
Author,Best Book,Number of Pages,Style
Samuel Beckett,Malone Muert,120,Modernism
James Joyce,Ulysses,644,Modernism
Nicholson Baker,Mezannine,150,Minimalism
Vladimir Sorokin,The Queue,263,Satire
>>> spreadsheet = TableFu(open('../tests/test.csv'))
>>> len(spreadsheet.rows)
4
>>> spreadsheet.columns
['Author', 'Best Book', 'Number of Pages', 'Style']
>>> spreadsheet.columns = ['Style', 'Author']
>>> spreadsheet.columms
['Style', 'Author']
"""
def __init__(self, table, **options):
"""
Takes a table argument and optional keyword arguments.
The 'table' argument should be a two-dimensional array,
either a list or tuple, or an open file that can be
parsed by Python's csv module (using csv.reader)
"""
if hasattr(table, 'next'): # for file-like objects
csv_options = {}
if 'dialect' in options:
csv_options['dialect'] = options.pop('dialect')
reader = csv.reader(table, **csv_options)
self.table = [row for row in reader]
else:
self.table = table
self.default_columns = self.table.pop(0)
self._columns = options.get('columns', [])
self.deleted_rows = []
self.faceted_on = None
self.totals = {}
self.formatting = options.get('formatting', {})
self.style = options.get('style', {})
self.options = options
if options.has_key('sorted_by'):
col = options['sorted_by'].keys()[0]
self.sort(column_name=col,
reverse=options['sorted_by'][col].get('reverse', False))
def __getitem__(self, row_num):
"""
Return one row in the table
"""
return Row(self.table[row_num], row_num, self)
def __iter__(self):
return iter(self.rows)
def __len__(self):
return len(list(self.table))
def add_rows(self, *rows):
for row in rows:
self.table.append(row)
def count(self):
return len(list(self))
@property
def rows(self):
return (Row(row, i, self) for i, row in enumerate(self.table))
@property
def headers(self):
if self._columns:
col_set = self._columns
else:
col_set = self.default_columns
return [Header(col, i, self) for i, col in enumerate(col_set)]
def _get_columns(self):
if self._columns:
return self._columns
return self.default_columns
def _set_columns(self, columns):
self._columns = self.options['columns'] = list(columns)
columns = property(_get_columns, _set_columns)
def delete_row(self, row_num):
self.deleted_rows.append(self.table.rows.pop(row_num))
def sort(self, column_name=None, reverse=False):
"""
Sort rows in this table, preserving a record of how that
sorting is done in TableFu.options['sorted_by']
"""
if not column_name and self.options.has_key('sorted_by'):
column_name = self.options['sorted_by'].keys()[0]
if column_name not in self.default_columns:
raise ValueError("%s isn't a column in this table" % column_name)
index = self.default_columns.index(column_name)
self.table.sort(key = lambda row: row[index], reverse=reverse)
self.options['sorted_by'] = {column_name: {'reverse': reverse}}
def transform(self, column_name, func):
if column_name not in self.default_columns:
raise ValueError("%s isn't a column in this table" % column_name)
if not callable(func):
raise TypeError("%s isn't callable" % func)
index = self.default_columns.index(column_name)
for row in self.table:
val = row[index]
val = func(val)
row[index] = val
def values(self, column_name, unique=False):
if column_name not in self.default_columns:
raise ValueError("%s isn't a column in this table" % column_name)
index = self.default_columns.index(column_name)
result = [row[index] for row in self.table]
if unique:
return set(result)
return result
def total(self, column_name):
if column_name not in self.default_columns:
raise ValueError("%s isn't a column in this table" % column_name)
try:
values = (float(v) for v in self.values(column_name))
except ValueError:
raise ValueError('Column %s contains non-numeric values' % column_name)
return sum(values)
def filter(self, func=None, **query):
"""
Tables can be filtered in one of two ways:
- Simple keyword arguments return rows where values match *exactly*
- Pass in a function and return rows where that function evaluates to True
In either case, a new TableFu instance is returned
"""
if callable(func):
result = filter(func, self)
result.insert(0, self.default_columns)
return TableFu(result, **self.options)
else:
result = self
for column, value in query.items():
result = result.filter(lambda r: r[column] == value)
return result
def transpose(self):
table = copy(self.table)
table.insert(0, self.default_columns)
result = [
[row[i] for row in table]
for i in xrange(len(table[0]))
]
options = self.options.copy()
options.pop('columns', None)
return TableFu(result, **self.options)
def map(self, func, *columns):
"""
Map a function to rows, or to given columns
"""
if not columns:
return map(func, self.rows)
else:
values = (self.values(column) for column in columns)
result = [map(func, v) for v in values]
if len(columns) == 1:
return result[0]
else:
return result
# export methods
def html(self):
table = '<table>\n%s\n%s\n</table>'
thead = '<thead>\n<tr>%s</tr>\n</thead>' % ''.join(['<th>%s</th>' % col for col in self.columns])
tbody = '<tbody>\n%s\n</tbody>' % '\n'.join([row.as_tr() for row in self.rows])
return table % (thead, tbody)
def csv(self, **kwargs):
"""
Export this table as a CSV
"""
out = StringIO()
writer = csv.DictWriter(out, self.columns, **kwargs)
writer.writerow(dict(zip(self.columns, self.columns)))
writer.writerows(dict(row.items()) for row in self.rows)
return out
def dict(self):
return (dict(row.items()) for row in self.rows)
def json(self, **kwargs):
if not has_json:
raise ValueError("Couldn't find a JSON library")
return json.dumps(list(self.dict()), **kwargs)
# static methods for loading data
@staticmethod
def from_file(fn, **options):
"""
Creates a new TableFu instance from a file or path
"""
if hasattr(fn, 'read'):
return TableFu(fn, **options)
with open(fn) as f:
return TableFu(f, **options)
@staticmethod
def from_url(url, **options):
"""
Downloads the contents of a given URL and loads it
into a new TableFu instance
"""
resp = urllib2.urlopen(url)
return TableFu(resp, **options)
|
eyeseast/python-tablefu
|
table_fu/__init__.py
|
TableFu.map
|
python
|
def map(self, func, *columns):
if not columns:
return map(func, self.rows)
else:
values = (self.values(column) for column in columns)
result = [map(func, v) for v in values]
if len(columns) == 1:
return result[0]
else:
return result
|
Map a function to rows, or to given columns
|
train
|
https://github.com/eyeseast/python-tablefu/blob/d8761c1f87e3f89d9b89b0b6b9283fc4738b6676/table_fu/__init__.py#L239-L251
| null |
class TableFu(object):
"""
A table, to be manipulated like a spreadsheet.
TableFu reads in an open CSV file, parsing it
into a table property, Row and Datum objects.
Usage:
# test.csv
Author,Best Book,Number of Pages,Style
Samuel Beckett,Malone Muert,120,Modernism
James Joyce,Ulysses,644,Modernism
Nicholson Baker,Mezannine,150,Minimalism
Vladimir Sorokin,The Queue,263,Satire
>>> spreadsheet = TableFu(open('../tests/test.csv'))
>>> len(spreadsheet.rows)
4
>>> spreadsheet.columns
['Author', 'Best Book', 'Number of Pages', 'Style']
>>> spreadsheet.columns = ['Style', 'Author']
>>> spreadsheet.columms
['Style', 'Author']
"""
def __init__(self, table, **options):
"""
Takes a table argument and optional keyword arguments.
The 'table' argument should be a two-dimensional array,
either a list or tuple, or an open file that can be
parsed by Python's csv module (using csv.reader)
"""
if hasattr(table, 'next'): # for file-like objects
csv_options = {}
if 'dialect' in options:
csv_options['dialect'] = options.pop('dialect')
reader = csv.reader(table, **csv_options)
self.table = [row for row in reader]
else:
self.table = table
self.default_columns = self.table.pop(0)
self._columns = options.get('columns', [])
self.deleted_rows = []
self.faceted_on = None
self.totals = {}
self.formatting = options.get('formatting', {})
self.style = options.get('style', {})
self.options = options
if options.has_key('sorted_by'):
col = options['sorted_by'].keys()[0]
self.sort(column_name=col,
reverse=options['sorted_by'][col].get('reverse', False))
def __getitem__(self, row_num):
"""
Return one row in the table
"""
return Row(self.table[row_num], row_num, self)
def __iter__(self):
return iter(self.rows)
def __len__(self):
return len(list(self.table))
def add_rows(self, *rows):
for row in rows:
self.table.append(row)
def count(self):
return len(list(self))
@property
def rows(self):
return (Row(row, i, self) for i, row in enumerate(self.table))
@property
def headers(self):
if self._columns:
col_set = self._columns
else:
col_set = self.default_columns
return [Header(col, i, self) for i, col in enumerate(col_set)]
def _get_columns(self):
if self._columns:
return self._columns
return self.default_columns
def _set_columns(self, columns):
self._columns = self.options['columns'] = list(columns)
columns = property(_get_columns, _set_columns)
def delete_row(self, row_num):
self.deleted_rows.append(self.table.rows.pop(row_num))
def sort(self, column_name=None, reverse=False):
"""
Sort rows in this table, preserving a record of how that
sorting is done in TableFu.options['sorted_by']
"""
if not column_name and self.options.has_key('sorted_by'):
column_name = self.options['sorted_by'].keys()[0]
if column_name not in self.default_columns:
raise ValueError("%s isn't a column in this table" % column_name)
index = self.default_columns.index(column_name)
self.table.sort(key = lambda row: row[index], reverse=reverse)
self.options['sorted_by'] = {column_name: {'reverse': reverse}}
def transform(self, column_name, func):
if column_name not in self.default_columns:
raise ValueError("%s isn't a column in this table" % column_name)
if not callable(func):
raise TypeError("%s isn't callable" % func)
index = self.default_columns.index(column_name)
for row in self.table:
val = row[index]
val = func(val)
row[index] = val
def values(self, column_name, unique=False):
if column_name not in self.default_columns:
raise ValueError("%s isn't a column in this table" % column_name)
index = self.default_columns.index(column_name)
result = [row[index] for row in self.table]
if unique:
return set(result)
return result
def total(self, column_name):
if column_name not in self.default_columns:
raise ValueError("%s isn't a column in this table" % column_name)
try:
values = (float(v) for v in self.values(column_name))
except ValueError:
raise ValueError('Column %s contains non-numeric values' % column_name)
return sum(values)
def filter(self, func=None, **query):
"""
Tables can be filtered in one of two ways:
- Simple keyword arguments return rows where values match *exactly*
- Pass in a function and return rows where that function evaluates to True
In either case, a new TableFu instance is returned
"""
if callable(func):
result = filter(func, self)
result.insert(0, self.default_columns)
return TableFu(result, **self.options)
else:
result = self
for column, value in query.items():
result = result.filter(lambda r: r[column] == value)
return result
def facet_by(self, column):
"""
Faceting creates new TableFu instances with rows matching
each possible value.
"""
faceted_spreadsheets = {}
for row in self.rows:
if row[column]:
col = row[column].value
if faceted_spreadsheets.has_key(col):
faceted_spreadsheets[col].append(row.cells)
else:
faceted_spreadsheets[col] = []
faceted_spreadsheets[col].append(row.cells)
# create a new TableFu instance for each facet
tables = []
for k, v in faceted_spreadsheets.items():
v.insert(0, self.default_columns)
table = TableFu(v)
table.faceted_on = k
table.formatting = self.formatting
table.options = self.options
tables.append(table)
tables.sort(key=lambda t: t.faceted_on)
return tables
def transpose(self):
table = copy(self.table)
table.insert(0, self.default_columns)
result = [
[row[i] for row in table]
for i in xrange(len(table[0]))
]
options = self.options.copy()
options.pop('columns', None)
return TableFu(result, **self.options)
# export methods
def html(self):
table = '<table>\n%s\n%s\n</table>'
thead = '<thead>\n<tr>%s</tr>\n</thead>' % ''.join(['<th>%s</th>' % col for col in self.columns])
tbody = '<tbody>\n%s\n</tbody>' % '\n'.join([row.as_tr() for row in self.rows])
return table % (thead, tbody)
def csv(self, **kwargs):
"""
Export this table as a CSV
"""
out = StringIO()
writer = csv.DictWriter(out, self.columns, **kwargs)
writer.writerow(dict(zip(self.columns, self.columns)))
writer.writerows(dict(row.items()) for row in self.rows)
return out
def dict(self):
return (dict(row.items()) for row in self.rows)
def json(self, **kwargs):
if not has_json:
raise ValueError("Couldn't find a JSON library")
return json.dumps(list(self.dict()), **kwargs)
# static methods for loading data
@staticmethod
def from_file(fn, **options):
"""
Creates a new TableFu instance from a file or path
"""
if hasattr(fn, 'read'):
return TableFu(fn, **options)
with open(fn) as f:
return TableFu(f, **options)
@staticmethod
def from_url(url, **options):
"""
Downloads the contents of a given URL and loads it
into a new TableFu instance
"""
resp = urllib2.urlopen(url)
return TableFu(resp, **options)
|
eyeseast/python-tablefu
|
table_fu/__init__.py
|
TableFu.csv
|
python
|
def csv(self, **kwargs):
out = StringIO()
writer = csv.DictWriter(out, self.columns, **kwargs)
writer.writerow(dict(zip(self.columns, self.columns)))
writer.writerows(dict(row.items()) for row in self.rows)
return out
|
Export this table as a CSV
|
train
|
https://github.com/eyeseast/python-tablefu/blob/d8761c1f87e3f89d9b89b0b6b9283fc4738b6676/table_fu/__init__.py#L260-L269
| null |
class TableFu(object):
"""
A table, to be manipulated like a spreadsheet.
TableFu reads in an open CSV file, parsing it
into a table property, Row and Datum objects.
Usage:
# test.csv
Author,Best Book,Number of Pages,Style
Samuel Beckett,Malone Muert,120,Modernism
James Joyce,Ulysses,644,Modernism
Nicholson Baker,Mezannine,150,Minimalism
Vladimir Sorokin,The Queue,263,Satire
>>> spreadsheet = TableFu(open('../tests/test.csv'))
>>> len(spreadsheet.rows)
4
>>> spreadsheet.columns
['Author', 'Best Book', 'Number of Pages', 'Style']
>>> spreadsheet.columns = ['Style', 'Author']
>>> spreadsheet.columms
['Style', 'Author']
"""
def __init__(self, table, **options):
"""
Takes a table argument and optional keyword arguments.
The 'table' argument should be a two-dimensional array,
either a list or tuple, or an open file that can be
parsed by Python's csv module (using csv.reader)
"""
if hasattr(table, 'next'): # for file-like objects
csv_options = {}
if 'dialect' in options:
csv_options['dialect'] = options.pop('dialect')
reader = csv.reader(table, **csv_options)
self.table = [row for row in reader]
else:
self.table = table
self.default_columns = self.table.pop(0)
self._columns = options.get('columns', [])
self.deleted_rows = []
self.faceted_on = None
self.totals = {}
self.formatting = options.get('formatting', {})
self.style = options.get('style', {})
self.options = options
if options.has_key('sorted_by'):
col = options['sorted_by'].keys()[0]
self.sort(column_name=col,
reverse=options['sorted_by'][col].get('reverse', False))
def __getitem__(self, row_num):
"""
Return one row in the table
"""
return Row(self.table[row_num], row_num, self)
def __iter__(self):
return iter(self.rows)
def __len__(self):
return len(list(self.table))
def add_rows(self, *rows):
for row in rows:
self.table.append(row)
def count(self):
return len(list(self))
@property
def rows(self):
return (Row(row, i, self) for i, row in enumerate(self.table))
@property
def headers(self):
if self._columns:
col_set = self._columns
else:
col_set = self.default_columns
return [Header(col, i, self) for i, col in enumerate(col_set)]
def _get_columns(self):
if self._columns:
return self._columns
return self.default_columns
def _set_columns(self, columns):
self._columns = self.options['columns'] = list(columns)
columns = property(_get_columns, _set_columns)
def delete_row(self, row_num):
self.deleted_rows.append(self.table.rows.pop(row_num))
def sort(self, column_name=None, reverse=False):
"""
Sort rows in this table, preserving a record of how that
sorting is done in TableFu.options['sorted_by']
"""
if not column_name and self.options.has_key('sorted_by'):
column_name = self.options['sorted_by'].keys()[0]
if column_name not in self.default_columns:
raise ValueError("%s isn't a column in this table" % column_name)
index = self.default_columns.index(column_name)
self.table.sort(key = lambda row: row[index], reverse=reverse)
self.options['sorted_by'] = {column_name: {'reverse': reverse}}
def transform(self, column_name, func):
if column_name not in self.default_columns:
raise ValueError("%s isn't a column in this table" % column_name)
if not callable(func):
raise TypeError("%s isn't callable" % func)
index = self.default_columns.index(column_name)
for row in self.table:
val = row[index]
val = func(val)
row[index] = val
def values(self, column_name, unique=False):
if column_name not in self.default_columns:
raise ValueError("%s isn't a column in this table" % column_name)
index = self.default_columns.index(column_name)
result = [row[index] for row in self.table]
if unique:
return set(result)
return result
def total(self, column_name):
if column_name not in self.default_columns:
raise ValueError("%s isn't a column in this table" % column_name)
try:
values = (float(v) for v in self.values(column_name))
except ValueError:
raise ValueError('Column %s contains non-numeric values' % column_name)
return sum(values)
def filter(self, func=None, **query):
"""
Tables can be filtered in one of two ways:
- Simple keyword arguments return rows where values match *exactly*
- Pass in a function and return rows where that function evaluates to True
In either case, a new TableFu instance is returned
"""
if callable(func):
result = filter(func, self)
result.insert(0, self.default_columns)
return TableFu(result, **self.options)
else:
result = self
for column, value in query.items():
result = result.filter(lambda r: r[column] == value)
return result
def facet_by(self, column):
"""
Faceting creates new TableFu instances with rows matching
each possible value.
"""
faceted_spreadsheets = {}
for row in self.rows:
if row[column]:
col = row[column].value
if faceted_spreadsheets.has_key(col):
faceted_spreadsheets[col].append(row.cells)
else:
faceted_spreadsheets[col] = []
faceted_spreadsheets[col].append(row.cells)
# create a new TableFu instance for each facet
tables = []
for k, v in faceted_spreadsheets.items():
v.insert(0, self.default_columns)
table = TableFu(v)
table.faceted_on = k
table.formatting = self.formatting
table.options = self.options
tables.append(table)
tables.sort(key=lambda t: t.faceted_on)
return tables
def transpose(self):
table = copy(self.table)
table.insert(0, self.default_columns)
result = [
[row[i] for row in table]
for i in xrange(len(table[0]))
]
options = self.options.copy()
options.pop('columns', None)
return TableFu(result, **self.options)
def map(self, func, *columns):
"""
Map a function to rows, or to given columns
"""
if not columns:
return map(func, self.rows)
else:
values = (self.values(column) for column in columns)
result = [map(func, v) for v in values]
if len(columns) == 1:
return result[0]
else:
return result
# export methods
def html(self):
table = '<table>\n%s\n%s\n</table>'
thead = '<thead>\n<tr>%s</tr>\n</thead>' % ''.join(['<th>%s</th>' % col for col in self.columns])
tbody = '<tbody>\n%s\n</tbody>' % '\n'.join([row.as_tr() for row in self.rows])
return table % (thead, tbody)
def csv(self, **kwargs):
"""
Export this table as a CSV
"""
out = StringIO()
writer = csv.DictWriter(out, self.columns, **kwargs)
writer.writerow(dict(zip(self.columns, self.columns)))
writer.writerows(dict(row.items()) for row in self.rows)
return out
def dict(self):
return (dict(row.items()) for row in self.rows)
def json(self, **kwargs):
if not has_json:
raise ValueError("Couldn't find a JSON library")
return json.dumps(list(self.dict()), **kwargs)
# static methods for loading data
@staticmethod
def from_file(fn, **options):
"""
Creates a new TableFu instance from a file or path
"""
if hasattr(fn, 'read'):
return TableFu(fn, **options)
with open(fn) as f:
return TableFu(f, **options)
@staticmethod
def from_url(url, **options):
"""
Downloads the contents of a given URL and loads it
into a new TableFu instance
"""
resp = urllib2.urlopen(url)
return TableFu(resp, **options)
|
eyeseast/python-tablefu
|
table_fu/__init__.py
|
TableFu.from_file
|
python
|
def from_file(fn, **options):
if hasattr(fn, 'read'):
return TableFu(fn, **options)
with open(fn) as f:
return TableFu(f, **options)
|
Creates a new TableFu instance from a file or path
|
train
|
https://github.com/eyeseast/python-tablefu/blob/d8761c1f87e3f89d9b89b0b6b9283fc4738b6676/table_fu/__init__.py#L281-L288
| null |
class TableFu(object):
"""
A table, to be manipulated like a spreadsheet.
TableFu reads in an open CSV file, parsing it
into a table property, Row and Datum objects.
Usage:
# test.csv
Author,Best Book,Number of Pages,Style
Samuel Beckett,Malone Muert,120,Modernism
James Joyce,Ulysses,644,Modernism
Nicholson Baker,Mezannine,150,Minimalism
Vladimir Sorokin,The Queue,263,Satire
>>> spreadsheet = TableFu(open('../tests/test.csv'))
>>> len(spreadsheet.rows)
4
>>> spreadsheet.columns
['Author', 'Best Book', 'Number of Pages', 'Style']
>>> spreadsheet.columns = ['Style', 'Author']
>>> spreadsheet.columms
['Style', 'Author']
"""
def __init__(self, table, **options):
"""
Takes a table argument and optional keyword arguments.
The 'table' argument should be a two-dimensional array,
either a list or tuple, or an open file that can be
parsed by Python's csv module (using csv.reader)
"""
if hasattr(table, 'next'): # for file-like objects
csv_options = {}
if 'dialect' in options:
csv_options['dialect'] = options.pop('dialect')
reader = csv.reader(table, **csv_options)
self.table = [row for row in reader]
else:
self.table = table
self.default_columns = self.table.pop(0)
self._columns = options.get('columns', [])
self.deleted_rows = []
self.faceted_on = None
self.totals = {}
self.formatting = options.get('formatting', {})
self.style = options.get('style', {})
self.options = options
if options.has_key('sorted_by'):
col = options['sorted_by'].keys()[0]
self.sort(column_name=col,
reverse=options['sorted_by'][col].get('reverse', False))
def __getitem__(self, row_num):
"""
Return one row in the table
"""
return Row(self.table[row_num], row_num, self)
def __iter__(self):
return iter(self.rows)
def __len__(self):
return len(list(self.table))
def add_rows(self, *rows):
for row in rows:
self.table.append(row)
def count(self):
return len(list(self))
@property
def rows(self):
return (Row(row, i, self) for i, row in enumerate(self.table))
@property
def headers(self):
if self._columns:
col_set = self._columns
else:
col_set = self.default_columns
return [Header(col, i, self) for i, col in enumerate(col_set)]
def _get_columns(self):
if self._columns:
return self._columns
return self.default_columns
def _set_columns(self, columns):
self._columns = self.options['columns'] = list(columns)
columns = property(_get_columns, _set_columns)
def delete_row(self, row_num):
self.deleted_rows.append(self.table.rows.pop(row_num))
def sort(self, column_name=None, reverse=False):
"""
Sort rows in this table, preserving a record of how that
sorting is done in TableFu.options['sorted_by']
"""
if not column_name and self.options.has_key('sorted_by'):
column_name = self.options['sorted_by'].keys()[0]
if column_name not in self.default_columns:
raise ValueError("%s isn't a column in this table" % column_name)
index = self.default_columns.index(column_name)
self.table.sort(key = lambda row: row[index], reverse=reverse)
self.options['sorted_by'] = {column_name: {'reverse': reverse}}
def transform(self, column_name, func):
if column_name not in self.default_columns:
raise ValueError("%s isn't a column in this table" % column_name)
if not callable(func):
raise TypeError("%s isn't callable" % func)
index = self.default_columns.index(column_name)
for row in self.table:
val = row[index]
val = func(val)
row[index] = val
def values(self, column_name, unique=False):
if column_name not in self.default_columns:
raise ValueError("%s isn't a column in this table" % column_name)
index = self.default_columns.index(column_name)
result = [row[index] for row in self.table]
if unique:
return set(result)
return result
def total(self, column_name):
if column_name not in self.default_columns:
raise ValueError("%s isn't a column in this table" % column_name)
try:
values = (float(v) for v in self.values(column_name))
except ValueError:
raise ValueError('Column %s contains non-numeric values' % column_name)
return sum(values)
def filter(self, func=None, **query):
"""
Tables can be filtered in one of two ways:
- Simple keyword arguments return rows where values match *exactly*
- Pass in a function and return rows where that function evaluates to True
In either case, a new TableFu instance is returned
"""
if callable(func):
result = filter(func, self)
result.insert(0, self.default_columns)
return TableFu(result, **self.options)
else:
result = self
for column, value in query.items():
result = result.filter(lambda r: r[column] == value)
return result
def facet_by(self, column):
"""
Faceting creates new TableFu instances with rows matching
each possible value.
"""
faceted_spreadsheets = {}
for row in self.rows:
if row[column]:
col = row[column].value
if faceted_spreadsheets.has_key(col):
faceted_spreadsheets[col].append(row.cells)
else:
faceted_spreadsheets[col] = []
faceted_spreadsheets[col].append(row.cells)
# create a new TableFu instance for each facet
tables = []
for k, v in faceted_spreadsheets.items():
v.insert(0, self.default_columns)
table = TableFu(v)
table.faceted_on = k
table.formatting = self.formatting
table.options = self.options
tables.append(table)
tables.sort(key=lambda t: t.faceted_on)
return tables
def transpose(self):
table = copy(self.table)
table.insert(0, self.default_columns)
result = [
[row[i] for row in table]
for i in xrange(len(table[0]))
]
options = self.options.copy()
options.pop('columns', None)
return TableFu(result, **self.options)
def map(self, func, *columns):
"""
Map a function to rows, or to given columns
"""
if not columns:
return map(func, self.rows)
else:
values = (self.values(column) for column in columns)
result = [map(func, v) for v in values]
if len(columns) == 1:
return result[0]
else:
return result
# export methods
def html(self):
table = '<table>\n%s\n%s\n</table>'
thead = '<thead>\n<tr>%s</tr>\n</thead>' % ''.join(['<th>%s</th>' % col for col in self.columns])
tbody = '<tbody>\n%s\n</tbody>' % '\n'.join([row.as_tr() for row in self.rows])
return table % (thead, tbody)
def csv(self, **kwargs):
"""
Export this table as a CSV
"""
out = StringIO()
writer = csv.DictWriter(out, self.columns, **kwargs)
writer.writerow(dict(zip(self.columns, self.columns)))
writer.writerows(dict(row.items()) for row in self.rows)
return out
def dict(self):
return (dict(row.items()) for row in self.rows)
def json(self, **kwargs):
if not has_json:
raise ValueError("Couldn't find a JSON library")
return json.dumps(list(self.dict()), **kwargs)
# static methods for loading data
@staticmethod
@staticmethod
def from_url(url, **options):
"""
Downloads the contents of a given URL and loads it
into a new TableFu instance
"""
resp = urllib2.urlopen(url)
return TableFu(resp, **options)
|
eyeseast/python-tablefu
|
table_fu/__init__.py
|
TableFu.from_url
|
python
|
def from_url(url, **options):
resp = urllib2.urlopen(url)
return TableFu(resp, **options)
|
Downloads the contents of a given URL and loads it
into a new TableFu instance
|
train
|
https://github.com/eyeseast/python-tablefu/blob/d8761c1f87e3f89d9b89b0b6b9283fc4738b6676/table_fu/__init__.py#L291-L297
| null |
class TableFu(object):
"""
A table, to be manipulated like a spreadsheet.
TableFu reads in an open CSV file, parsing it
into a table property, Row and Datum objects.
Usage:
# test.csv
Author,Best Book,Number of Pages,Style
Samuel Beckett,Malone Muert,120,Modernism
James Joyce,Ulysses,644,Modernism
Nicholson Baker,Mezannine,150,Minimalism
Vladimir Sorokin,The Queue,263,Satire
>>> spreadsheet = TableFu(open('../tests/test.csv'))
>>> len(spreadsheet.rows)
4
>>> spreadsheet.columns
['Author', 'Best Book', 'Number of Pages', 'Style']
>>> spreadsheet.columns = ['Style', 'Author']
>>> spreadsheet.columms
['Style', 'Author']
"""
def __init__(self, table, **options):
"""
Takes a table argument and optional keyword arguments.
The 'table' argument should be a two-dimensional array,
either a list or tuple, or an open file that can be
parsed by Python's csv module (using csv.reader)
"""
if hasattr(table, 'next'): # for file-like objects
csv_options = {}
if 'dialect' in options:
csv_options['dialect'] = options.pop('dialect')
reader = csv.reader(table, **csv_options)
self.table = [row for row in reader]
else:
self.table = table
self.default_columns = self.table.pop(0)
self._columns = options.get('columns', [])
self.deleted_rows = []
self.faceted_on = None
self.totals = {}
self.formatting = options.get('formatting', {})
self.style = options.get('style', {})
self.options = options
if options.has_key('sorted_by'):
col = options['sorted_by'].keys()[0]
self.sort(column_name=col,
reverse=options['sorted_by'][col].get('reverse', False))
def __getitem__(self, row_num):
"""
Return one row in the table
"""
return Row(self.table[row_num], row_num, self)
def __iter__(self):
return iter(self.rows)
def __len__(self):
return len(list(self.table))
def add_rows(self, *rows):
for row in rows:
self.table.append(row)
def count(self):
return len(list(self))
@property
def rows(self):
return (Row(row, i, self) for i, row in enumerate(self.table))
@property
def headers(self):
if self._columns:
col_set = self._columns
else:
col_set = self.default_columns
return [Header(col, i, self) for i, col in enumerate(col_set)]
def _get_columns(self):
if self._columns:
return self._columns
return self.default_columns
def _set_columns(self, columns):
self._columns = self.options['columns'] = list(columns)
columns = property(_get_columns, _set_columns)
def delete_row(self, row_num):
self.deleted_rows.append(self.table.rows.pop(row_num))
def sort(self, column_name=None, reverse=False):
"""
Sort rows in this table, preserving a record of how that
sorting is done in TableFu.options['sorted_by']
"""
if not column_name and self.options.has_key('sorted_by'):
column_name = self.options['sorted_by'].keys()[0]
if column_name not in self.default_columns:
raise ValueError("%s isn't a column in this table" % column_name)
index = self.default_columns.index(column_name)
self.table.sort(key = lambda row: row[index], reverse=reverse)
self.options['sorted_by'] = {column_name: {'reverse': reverse}}
def transform(self, column_name, func):
if column_name not in self.default_columns:
raise ValueError("%s isn't a column in this table" % column_name)
if not callable(func):
raise TypeError("%s isn't callable" % func)
index = self.default_columns.index(column_name)
for row in self.table:
val = row[index]
val = func(val)
row[index] = val
def values(self, column_name, unique=False):
if column_name not in self.default_columns:
raise ValueError("%s isn't a column in this table" % column_name)
index = self.default_columns.index(column_name)
result = [row[index] for row in self.table]
if unique:
return set(result)
return result
def total(self, column_name):
if column_name not in self.default_columns:
raise ValueError("%s isn't a column in this table" % column_name)
try:
values = (float(v) for v in self.values(column_name))
except ValueError:
raise ValueError('Column %s contains non-numeric values' % column_name)
return sum(values)
def filter(self, func=None, **query):
"""
Tables can be filtered in one of two ways:
- Simple keyword arguments return rows where values match *exactly*
- Pass in a function and return rows where that function evaluates to True
In either case, a new TableFu instance is returned
"""
if callable(func):
result = filter(func, self)
result.insert(0, self.default_columns)
return TableFu(result, **self.options)
else:
result = self
for column, value in query.items():
result = result.filter(lambda r: r[column] == value)
return result
def facet_by(self, column):
"""
Faceting creates new TableFu instances with rows matching
each possible value.
"""
faceted_spreadsheets = {}
for row in self.rows:
if row[column]:
col = row[column].value
if faceted_spreadsheets.has_key(col):
faceted_spreadsheets[col].append(row.cells)
else:
faceted_spreadsheets[col] = []
faceted_spreadsheets[col].append(row.cells)
# create a new TableFu instance for each facet
tables = []
for k, v in faceted_spreadsheets.items():
v.insert(0, self.default_columns)
table = TableFu(v)
table.faceted_on = k
table.formatting = self.formatting
table.options = self.options
tables.append(table)
tables.sort(key=lambda t: t.faceted_on)
return tables
def transpose(self):
table = copy(self.table)
table.insert(0, self.default_columns)
result = [
[row[i] for row in table]
for i in xrange(len(table[0]))
]
options = self.options.copy()
options.pop('columns', None)
return TableFu(result, **self.options)
def map(self, func, *columns):
"""
Map a function to rows, or to given columns
"""
if not columns:
return map(func, self.rows)
else:
values = (self.values(column) for column in columns)
result = [map(func, v) for v in values]
if len(columns) == 1:
return result[0]
else:
return result
# export methods
def html(self):
table = '<table>\n%s\n%s\n</table>'
thead = '<thead>\n<tr>%s</tr>\n</thead>' % ''.join(['<th>%s</th>' % col for col in self.columns])
tbody = '<tbody>\n%s\n</tbody>' % '\n'.join([row.as_tr() for row in self.rows])
return table % (thead, tbody)
def csv(self, **kwargs):
"""
Export this table as a CSV
"""
out = StringIO()
writer = csv.DictWriter(out, self.columns, **kwargs)
writer.writerow(dict(zip(self.columns, self.columns)))
writer.writerows(dict(row.items()) for row in self.rows)
return out
def dict(self):
return (dict(row.items()) for row in self.rows)
def json(self, **kwargs):
if not has_json:
raise ValueError("Couldn't find a JSON library")
return json.dumps(list(self.dict()), **kwargs)
# static methods for loading data
@staticmethod
def from_file(fn, **options):
"""
Creates a new TableFu instance from a file or path
"""
if hasattr(fn, 'read'):
return TableFu(fn, **options)
with open(fn) as f:
return TableFu(f, **options)
@staticmethod
|
eyeseast/python-tablefu
|
table_fu/__init__.py
|
Row.get
|
python
|
def get(self, column_name, default=None):
if column_name in self.table.default_columns:
index = self.table.default_columns.index(column_name)
return Datum(self.cells[index], self.row_num, column_name, self.table)
return default
|
Return the Datum for column_name, or default.
|
train
|
https://github.com/eyeseast/python-tablefu/blob/d8761c1f87e3f89d9b89b0b6b9283fc4738b6676/table_fu/__init__.py#L326-L333
| null |
class Row(object):
"""
A row in a table
Rows act like dictionaries, but look more like lists.
Calling row['column'] returns a column lookup based on
the default set of columns.
"""
def __init__(self, cells, row_num, table):
self.table = table
self.row_num = row_num
self.cells = list(cells)
def __eq__(self, other):
if not type(other) == type(self):
return False
return self.cells == other.cells
def __len__(self):
return len(self.cells)
def update(self, d):
"Update multiple cell values in place"
for k, v in d.items():
self[k] = v
def keys(self):
return self.table.columns
def values(self):
return [d.value for d in self.data]
def items(self):
return zip(self.keys(), self.values())
def __getitem__(self, column_name):
"Get the value for a given cell, or raise KeyError if the column doesn't exist"
datum = self.get(column_name)
if datum is None:
raise KeyError("%s isn't a column in this table" % column_name)
else:
return datum
def __setitem__(self, column_name, value):
"""
Set the value for a given cell
"""
if not column_name in self.table.default_columns:
raise KeyError("%s isn't a column in this table" % column_name)
index = self.table.default_columns.index(column_name)
self.cells[index] = value
def __iter__(self):
"""
Iterate over values, *not keys*. Keys are accessible
as Row.table.columns or Row.keys()
"""
return iter(self.values())
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.__str__())
def __str__(self):
return ', '.join(str(self[column]) for column in self.table.columns)
def as_tr(self):
cells = ''.join(d.as_td() for d in self.data)
return '<tr id="row%s" class="row %s">%s</tr>' % (self.row_num, odd_even(self.row_num), cells)
@property
def data(self):
return [self[col] for col in self.table.columns]
|
eyeseast/python-tablefu
|
table_fu/formatting.py
|
_saferound
|
python
|
def _saferound(value, decimal_places):
try:
f = float(value)
except ValueError:
return ''
format = '%%.%df' % decimal_places
return format % f
|
Rounds a float value off to the desired precision
|
train
|
https://github.com/eyeseast/python-tablefu/blob/d8761c1f87e3f89d9b89b0b6b9283fc4738b6676/table_fu/formatting.py#L10-L19
| null |
"""
Utilities to format values into more meaningful strings.
Inspired by James Bennett's template_utils and Django's
template filters.
"""
import re
import statestyle
def ap_state(value, failure_string=None):
"""
Converts a state's name, postal abbreviation or FIPS to A.P. style.
Example usage:
>> ap_state("California")
'Calif.'
"""
try:
return statestyle.get(value).ap
except:
if failure_string:
return failure_string
else:
return value
def capfirst(value, failure_string='N/A'):
"""
Capitalizes the first character of the value.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
Cribbs from django's default filter set
"""
try:
value = value.lower()
return value[0].upper() + value[1:]
except:
return failure_string
def dollars(value):
return u'$%s'% intcomma(value)
def dollar_signs(value, failure_string='N/A'):
"""
Converts an integer into the corresponding number of dollar sign symbols.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
Meant to emulate the illustration of price range on Yelp.
"""
try:
count = int(value)
except ValueError:
return failure_string
string = ''
for i in range(0, count):
string += '$'
return string
def image(value, width='', height=''):
"""
Accepts a URL and returns an HTML image tag ready to be displayed.
Optionally, you can set the height and width with keyword arguments.
"""
style = ""
if width:
style += "width:%s" % width
if height:
style += "height:%s" % height
data_dict = dict(src=value, style=style)
return '<img src="%(src)s" style="%(style)s">' % data_dict
def link(title, url):
return u'<a href="%(url)s" title="%(title)s">%(title)s</a>' % {
'url': url,
'title': title
}
def intcomma(value):
"""
Borrowed from django.contrib.humanize
Converts an integer to a string containing commas every three digits.
For example, 3000 becomes '3,000' and 45000 becomes '45,000'.
"""
orig = str(value)
new = re.sub("^(-?\d+)(\d{3})", '\g<1>,\g<2>', orig)
if orig == new:
return new
else:
return intcomma(new)
def percentage(value, decimal_places=1, multiply=True, failure_string='N/A'):
"""
Converts a floating point value into a percentage value.
Number of decimal places set by the `decimal_places` kwarg. Default is one.
By default the number is multiplied by 100. You can prevent it from doing
that by setting the `multiply` keyword argument to False.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
"""
try:
value = float(value)
except ValueError:
return failure_string
if multiply:
value = value * 100
return _saferound(value, decimal_places) + '%'
def percent_change(value, decimal_places=1, multiply=True, failure_string='N/A'):
"""
Converts a floating point value into a percentage change value.
Number of decimal places set by the `precision` kwarg. Default is one.
Non-floats are assumed to be zero division errors and are presented as
'N/A' in the output.
By default the number is multiplied by 100. You can prevent it from doing
that by setting the `multiply` keyword argument to False.
"""
try:
f = float(value)
if multiply:
f = f * 100
except ValueError:
return failure_string
s = _saferound(f, decimal_places)
if f > 0:
return '+' + s + '%'
else:
return s + '%'
def ratio(value, decimal_places=0, failure_string='N/A'):
"""
Converts a floating point value a X:1 ratio.
Number of decimal places set by the `precision` kwarg. Default is one.
"""
try:
f = float(value)
except ValueError:
return failure_string
return _saferound(f, decimal_places) + ':1'
def stateface(value):
"""
Converts a state's name, postal abbreviation or FIPS to ProPublica's stateface
font code.
Example usage:
>> stateface("California")
'E'
Documentation: http://propublica.github.com/stateface/
"""
try:
return statestyle.get(value).stateface
except:
return value
def state_postal(value):
"""
Converts a state's name, or FIPS to its postal abbreviation
Example usage:
>> ap_state("California")
'Calif.'
"""
try:
return statestyle.get(value).postal
except:
return value
def title(value, failure_string='N/A'):
"""
Converts a string into titlecase.
Lifted from Django.
"""
try:
value = value.lower()
t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
result = re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t)
if not result:
return failure_string
return result
except:
return failure_string
DEFAULT_FORMATTERS = {
'ap_state': ap_state,
'capfirst': capfirst,
'dollars': dollars,
'dollar_signs': dollar_signs,
'intcomma': intcomma,
'image': image,
'link': link,
'percentage': percentage,
'percent_change': percent_change,
'ratio': ratio,
'stateface': stateface,
'state_postal': state_postal,
'title': title,
}
class Formatter(object):
"""
A formatter is a function (or any callable, really)
that takes a value and returns a nicer-looking value,
most likely a sting.
Formatter stores and calls those functions, keeping
the namespace uncluttered.
Formatting functions should take a value as the first
argument--usually the value of the Datum on which the
function is called--followed by any number of positional
arguments.
In the context of TableFu, those arguments may refer to
other columns in the same row.
>>> formatter = Formatter()
>>> formatter(1200, 'intcomma')
'1,200'
>>> formatter(1200, 'dollars')
'$1,200'
"""
def __init__(self):
self._filters = {}
for name, func in DEFAULT_FORMATTERS.items():
self.register(name, func)
def __call__(self, value, func, *args, **kwargs):
if not callable(func):
func = self._filters[func]
return func(value, *args, **kwargs)
def register(self, name=None, func=None):
if not func and not name:
return
if callable(name) and not func:
func = name
name = func.__name__
elif func and not name:
name = func.__name__
self._filters[name] = func
def unregister(self, name=None, func=None):
if not func and not name:
return
if not name:
name = func.__name__
if name not in self._filters:
return
del self._filters[name]
# Unless you need to subclass or keep formatting functions
# isolated, you can just import this instance.
format = Formatter()
|
eyeseast/python-tablefu
|
table_fu/formatting.py
|
ap_state
|
python
|
def ap_state(value, failure_string=None):
try:
return statestyle.get(value).ap
except:
if failure_string:
return failure_string
else:
return value
|
Converts a state's name, postal abbreviation or FIPS to A.P. style.
Example usage:
>> ap_state("California")
'Calif.'
|
train
|
https://github.com/eyeseast/python-tablefu/blob/d8761c1f87e3f89d9b89b0b6b9283fc4738b6676/table_fu/formatting.py#L22-L38
| null |
"""
Utilities to format values into more meaningful strings.
Inspired by James Bennett's template_utils and Django's
template filters.
"""
import re
import statestyle
def _saferound(value, decimal_places):
"""
Rounds a float value off to the desired precision
"""
try:
f = float(value)
except ValueError:
return ''
format = '%%.%df' % decimal_places
return format % f
def ap_state(value, failure_string=None):
"""
Converts a state's name, postal abbreviation or FIPS to A.P. style.
Example usage:
>> ap_state("California")
'Calif.'
"""
try:
return statestyle.get(value).ap
except:
if failure_string:
return failure_string
else:
return value
def capfirst(value, failure_string='N/A'):
"""
Capitalizes the first character of the value.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
Cribbs from django's default filter set
"""
try:
value = value.lower()
return value[0].upper() + value[1:]
except:
return failure_string
def dollars(value):
return u'$%s'% intcomma(value)
def dollar_signs(value, failure_string='N/A'):
"""
Converts an integer into the corresponding number of dollar sign symbols.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
Meant to emulate the illustration of price range on Yelp.
"""
try:
count = int(value)
except ValueError:
return failure_string
string = ''
for i in range(0, count):
string += '$'
return string
def image(value, width='', height=''):
"""
Accepts a URL and returns an HTML image tag ready to be displayed.
Optionally, you can set the height and width with keyword arguments.
"""
style = ""
if width:
style += "width:%s" % width
if height:
style += "height:%s" % height
data_dict = dict(src=value, style=style)
return '<img src="%(src)s" style="%(style)s">' % data_dict
def link(title, url):
return u'<a href="%(url)s" title="%(title)s">%(title)s</a>' % {
'url': url,
'title': title
}
def intcomma(value):
"""
Borrowed from django.contrib.humanize
Converts an integer to a string containing commas every three digits.
For example, 3000 becomes '3,000' and 45000 becomes '45,000'.
"""
orig = str(value)
new = re.sub("^(-?\d+)(\d{3})", '\g<1>,\g<2>', orig)
if orig == new:
return new
else:
return intcomma(new)
def percentage(value, decimal_places=1, multiply=True, failure_string='N/A'):
"""
Converts a floating point value into a percentage value.
Number of decimal places set by the `decimal_places` kwarg. Default is one.
By default the number is multiplied by 100. You can prevent it from doing
that by setting the `multiply` keyword argument to False.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
"""
try:
value = float(value)
except ValueError:
return failure_string
if multiply:
value = value * 100
return _saferound(value, decimal_places) + '%'
def percent_change(value, decimal_places=1, multiply=True, failure_string='N/A'):
"""
Converts a floating point value into a percentage change value.
Number of decimal places set by the `precision` kwarg. Default is one.
Non-floats are assumed to be zero division errors and are presented as
'N/A' in the output.
By default the number is multiplied by 100. You can prevent it from doing
that by setting the `multiply` keyword argument to False.
"""
try:
f = float(value)
if multiply:
f = f * 100
except ValueError:
return failure_string
s = _saferound(f, decimal_places)
if f > 0:
return '+' + s + '%'
else:
return s + '%'
def ratio(value, decimal_places=0, failure_string='N/A'):
"""
Converts a floating point value a X:1 ratio.
Number of decimal places set by the `precision` kwarg. Default is one.
"""
try:
f = float(value)
except ValueError:
return failure_string
return _saferound(f, decimal_places) + ':1'
def stateface(value):
"""
Converts a state's name, postal abbreviation or FIPS to ProPublica's stateface
font code.
Example usage:
>> stateface("California")
'E'
Documentation: http://propublica.github.com/stateface/
"""
try:
return statestyle.get(value).stateface
except:
return value
def state_postal(value):
"""
Converts a state's name, or FIPS to its postal abbreviation
Example usage:
>> ap_state("California")
'Calif.'
"""
try:
return statestyle.get(value).postal
except:
return value
def title(value, failure_string='N/A'):
"""
Converts a string into titlecase.
Lifted from Django.
"""
try:
value = value.lower()
t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
result = re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t)
if not result:
return failure_string
return result
except:
return failure_string
DEFAULT_FORMATTERS = {
'ap_state': ap_state,
'capfirst': capfirst,
'dollars': dollars,
'dollar_signs': dollar_signs,
'intcomma': intcomma,
'image': image,
'link': link,
'percentage': percentage,
'percent_change': percent_change,
'ratio': ratio,
'stateface': stateface,
'state_postal': state_postal,
'title': title,
}
class Formatter(object):
"""
A formatter is a function (or any callable, really)
that takes a value and returns a nicer-looking value,
most likely a sting.
Formatter stores and calls those functions, keeping
the namespace uncluttered.
Formatting functions should take a value as the first
argument--usually the value of the Datum on which the
function is called--followed by any number of positional
arguments.
In the context of TableFu, those arguments may refer to
other columns in the same row.
>>> formatter = Formatter()
>>> formatter(1200, 'intcomma')
'1,200'
>>> formatter(1200, 'dollars')
'$1,200'
"""
def __init__(self):
self._filters = {}
for name, func in DEFAULT_FORMATTERS.items():
self.register(name, func)
def __call__(self, value, func, *args, **kwargs):
if not callable(func):
func = self._filters[func]
return func(value, *args, **kwargs)
def register(self, name=None, func=None):
if not func and not name:
return
if callable(name) and not func:
func = name
name = func.__name__
elif func and not name:
name = func.__name__
self._filters[name] = func
def unregister(self, name=None, func=None):
if not func and not name:
return
if not name:
name = func.__name__
if name not in self._filters:
return
del self._filters[name]
# Unless you need to subclass or keep formatting functions
# isolated, you can just import this instance.
format = Formatter()
|
eyeseast/python-tablefu
|
table_fu/formatting.py
|
capfirst
|
python
|
def capfirst(value, failure_string='N/A'):
try:
value = value.lower()
return value[0].upper() + value[1:]
except:
return failure_string
|
Capitalizes the first character of the value.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
Cribbs from django's default filter set
|
train
|
https://github.com/eyeseast/python-tablefu/blob/d8761c1f87e3f89d9b89b0b6b9283fc4738b6676/table_fu/formatting.py#L41-L54
| null |
"""
Utilities to format values into more meaningful strings.
Inspired by James Bennett's template_utils and Django's
template filters.
"""
import re
import statestyle
def _saferound(value, decimal_places):
"""
Rounds a float value off to the desired precision
"""
try:
f = float(value)
except ValueError:
return ''
format = '%%.%df' % decimal_places
return format % f
def ap_state(value, failure_string=None):
"""
Converts a state's name, postal abbreviation or FIPS to A.P. style.
Example usage:
>> ap_state("California")
'Calif.'
"""
try:
return statestyle.get(value).ap
except:
if failure_string:
return failure_string
else:
return value
def capfirst(value, failure_string='N/A'):
"""
Capitalizes the first character of the value.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
Cribbs from django's default filter set
"""
try:
value = value.lower()
return value[0].upper() + value[1:]
except:
return failure_string
def dollars(value):
return u'$%s'% intcomma(value)
def dollar_signs(value, failure_string='N/A'):
"""
Converts an integer into the corresponding number of dollar sign symbols.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
Meant to emulate the illustration of price range on Yelp.
"""
try:
count = int(value)
except ValueError:
return failure_string
string = ''
for i in range(0, count):
string += '$'
return string
def image(value, width='', height=''):
"""
Accepts a URL and returns an HTML image tag ready to be displayed.
Optionally, you can set the height and width with keyword arguments.
"""
style = ""
if width:
style += "width:%s" % width
if height:
style += "height:%s" % height
data_dict = dict(src=value, style=style)
return '<img src="%(src)s" style="%(style)s">' % data_dict
def link(title, url):
return u'<a href="%(url)s" title="%(title)s">%(title)s</a>' % {
'url': url,
'title': title
}
def intcomma(value):
"""
Borrowed from django.contrib.humanize
Converts an integer to a string containing commas every three digits.
For example, 3000 becomes '3,000' and 45000 becomes '45,000'.
"""
orig = str(value)
new = re.sub("^(-?\d+)(\d{3})", '\g<1>,\g<2>', orig)
if orig == new:
return new
else:
return intcomma(new)
def percentage(value, decimal_places=1, multiply=True, failure_string='N/A'):
"""
Converts a floating point value into a percentage value.
Number of decimal places set by the `decimal_places` kwarg. Default is one.
By default the number is multiplied by 100. You can prevent it from doing
that by setting the `multiply` keyword argument to False.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
"""
try:
value = float(value)
except ValueError:
return failure_string
if multiply:
value = value * 100
return _saferound(value, decimal_places) + '%'
def percent_change(value, decimal_places=1, multiply=True, failure_string='N/A'):
"""
Converts a floating point value into a percentage change value.
Number of decimal places set by the `precision` kwarg. Default is one.
Non-floats are assumed to be zero division errors and are presented as
'N/A' in the output.
By default the number is multiplied by 100. You can prevent it from doing
that by setting the `multiply` keyword argument to False.
"""
try:
f = float(value)
if multiply:
f = f * 100
except ValueError:
return failure_string
s = _saferound(f, decimal_places)
if f > 0:
return '+' + s + '%'
else:
return s + '%'
def ratio(value, decimal_places=0, failure_string='N/A'):
"""
Converts a floating point value a X:1 ratio.
Number of decimal places set by the `precision` kwarg. Default is one.
"""
try:
f = float(value)
except ValueError:
return failure_string
return _saferound(f, decimal_places) + ':1'
def stateface(value):
"""
Converts a state's name, postal abbreviation or FIPS to ProPublica's stateface
font code.
Example usage:
>> stateface("California")
'E'
Documentation: http://propublica.github.com/stateface/
"""
try:
return statestyle.get(value).stateface
except:
return value
def state_postal(value):
"""
Converts a state's name, or FIPS to its postal abbreviation
Example usage:
>> ap_state("California")
'Calif.'
"""
try:
return statestyle.get(value).postal
except:
return value
def title(value, failure_string='N/A'):
"""
Converts a string into titlecase.
Lifted from Django.
"""
try:
value = value.lower()
t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
result = re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t)
if not result:
return failure_string
return result
except:
return failure_string
DEFAULT_FORMATTERS = {
'ap_state': ap_state,
'capfirst': capfirst,
'dollars': dollars,
'dollar_signs': dollar_signs,
'intcomma': intcomma,
'image': image,
'link': link,
'percentage': percentage,
'percent_change': percent_change,
'ratio': ratio,
'stateface': stateface,
'state_postal': state_postal,
'title': title,
}
class Formatter(object):
"""
A formatter is a function (or any callable, really)
that takes a value and returns a nicer-looking value,
most likely a sting.
Formatter stores and calls those functions, keeping
the namespace uncluttered.
Formatting functions should take a value as the first
argument--usually the value of the Datum on which the
function is called--followed by any number of positional
arguments.
In the context of TableFu, those arguments may refer to
other columns in the same row.
>>> formatter = Formatter()
>>> formatter(1200, 'intcomma')
'1,200'
>>> formatter(1200, 'dollars')
'$1,200'
"""
def __init__(self):
self._filters = {}
for name, func in DEFAULT_FORMATTERS.items():
self.register(name, func)
def __call__(self, value, func, *args, **kwargs):
if not callable(func):
func = self._filters[func]
return func(value, *args, **kwargs)
def register(self, name=None, func=None):
if not func and not name:
return
if callable(name) and not func:
func = name
name = func.__name__
elif func and not name:
name = func.__name__
self._filters[name] = func
def unregister(self, name=None, func=None):
if not func and not name:
return
if not name:
name = func.__name__
if name not in self._filters:
return
del self._filters[name]
# Unless you need to subclass or keep formatting functions
# isolated, you can just import this instance.
format = Formatter()
|
eyeseast/python-tablefu
|
table_fu/formatting.py
|
dollar_signs
|
python
|
def dollar_signs(value, failure_string='N/A'):
try:
count = int(value)
except ValueError:
return failure_string
string = ''
for i in range(0, count):
string += '$'
return string
|
Converts an integer into the corresponding number of dollar sign symbols.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
Meant to emulate the illustration of price range on Yelp.
|
train
|
https://github.com/eyeseast/python-tablefu/blob/d8761c1f87e3f89d9b89b0b6b9283fc4738b6676/table_fu/formatting.py#L61-L77
| null |
"""
Utilities to format values into more meaningful strings.
Inspired by James Bennett's template_utils and Django's
template filters.
"""
import re
import statestyle
def _saferound(value, decimal_places):
"""
Rounds a float value off to the desired precision
"""
try:
f = float(value)
except ValueError:
return ''
format = '%%.%df' % decimal_places
return format % f
def ap_state(value, failure_string=None):
"""
Converts a state's name, postal abbreviation or FIPS to A.P. style.
Example usage:
>> ap_state("California")
'Calif.'
"""
try:
return statestyle.get(value).ap
except:
if failure_string:
return failure_string
else:
return value
def capfirst(value, failure_string='N/A'):
"""
Capitalizes the first character of the value.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
Cribbs from django's default filter set
"""
try:
value = value.lower()
return value[0].upper() + value[1:]
except:
return failure_string
def dollars(value):
return u'$%s'% intcomma(value)
def dollar_signs(value, failure_string='N/A'):
"""
Converts an integer into the corresponding number of dollar sign symbols.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
Meant to emulate the illustration of price range on Yelp.
"""
try:
count = int(value)
except ValueError:
return failure_string
string = ''
for i in range(0, count):
string += '$'
return string
def image(value, width='', height=''):
"""
Accepts a URL and returns an HTML image tag ready to be displayed.
Optionally, you can set the height and width with keyword arguments.
"""
style = ""
if width:
style += "width:%s" % width
if height:
style += "height:%s" % height
data_dict = dict(src=value, style=style)
return '<img src="%(src)s" style="%(style)s">' % data_dict
def link(title, url):
return u'<a href="%(url)s" title="%(title)s">%(title)s</a>' % {
'url': url,
'title': title
}
def intcomma(value):
"""
Borrowed from django.contrib.humanize
Converts an integer to a string containing commas every three digits.
For example, 3000 becomes '3,000' and 45000 becomes '45,000'.
"""
orig = str(value)
new = re.sub("^(-?\d+)(\d{3})", '\g<1>,\g<2>', orig)
if orig == new:
return new
else:
return intcomma(new)
def percentage(value, decimal_places=1, multiply=True, failure_string='N/A'):
"""
Converts a floating point value into a percentage value.
Number of decimal places set by the `decimal_places` kwarg. Default is one.
By default the number is multiplied by 100. You can prevent it from doing
that by setting the `multiply` keyword argument to False.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
"""
try:
value = float(value)
except ValueError:
return failure_string
if multiply:
value = value * 100
return _saferound(value, decimal_places) + '%'
def percent_change(value, decimal_places=1, multiply=True, failure_string='N/A'):
"""
Converts a floating point value into a percentage change value.
Number of decimal places set by the `precision` kwarg. Default is one.
Non-floats are assumed to be zero division errors and are presented as
'N/A' in the output.
By default the number is multiplied by 100. You can prevent it from doing
that by setting the `multiply` keyword argument to False.
"""
try:
f = float(value)
if multiply:
f = f * 100
except ValueError:
return failure_string
s = _saferound(f, decimal_places)
if f > 0:
return '+' + s + '%'
else:
return s + '%'
def ratio(value, decimal_places=0, failure_string='N/A'):
"""
Converts a floating point value a X:1 ratio.
Number of decimal places set by the `precision` kwarg. Default is one.
"""
try:
f = float(value)
except ValueError:
return failure_string
return _saferound(f, decimal_places) + ':1'
def stateface(value):
"""
Converts a state's name, postal abbreviation or FIPS to ProPublica's stateface
font code.
Example usage:
>> stateface("California")
'E'
Documentation: http://propublica.github.com/stateface/
"""
try:
return statestyle.get(value).stateface
except:
return value
def state_postal(value):
"""
Converts a state's name, or FIPS to its postal abbreviation
Example usage:
>> ap_state("California")
'Calif.'
"""
try:
return statestyle.get(value).postal
except:
return value
def title(value, failure_string='N/A'):
"""
Converts a string into titlecase.
Lifted from Django.
"""
try:
value = value.lower()
t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
result = re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t)
if not result:
return failure_string
return result
except:
return failure_string
DEFAULT_FORMATTERS = {
'ap_state': ap_state,
'capfirst': capfirst,
'dollars': dollars,
'dollar_signs': dollar_signs,
'intcomma': intcomma,
'image': image,
'link': link,
'percentage': percentage,
'percent_change': percent_change,
'ratio': ratio,
'stateface': stateface,
'state_postal': state_postal,
'title': title,
}
class Formatter(object):
"""
A formatter is a function (or any callable, really)
that takes a value and returns a nicer-looking value,
most likely a sting.
Formatter stores and calls those functions, keeping
the namespace uncluttered.
Formatting functions should take a value as the first
argument--usually the value of the Datum on which the
function is called--followed by any number of positional
arguments.
In the context of TableFu, those arguments may refer to
other columns in the same row.
>>> formatter = Formatter()
>>> formatter(1200, 'intcomma')
'1,200'
>>> formatter(1200, 'dollars')
'$1,200'
"""
def __init__(self):
self._filters = {}
for name, func in DEFAULT_FORMATTERS.items():
self.register(name, func)
def __call__(self, value, func, *args, **kwargs):
if not callable(func):
func = self._filters[func]
return func(value, *args, **kwargs)
def register(self, name=None, func=None):
if not func and not name:
return
if callable(name) and not func:
func = name
name = func.__name__
elif func and not name:
name = func.__name__
self._filters[name] = func
def unregister(self, name=None, func=None):
if not func and not name:
return
if not name:
name = func.__name__
if name not in self._filters:
return
del self._filters[name]
# Unless you need to subclass or keep formatting functions
# isolated, you can just import this instance.
format = Formatter()
|
eyeseast/python-tablefu
|
table_fu/formatting.py
|
image
|
python
|
def image(value, width='', height=''):
style = ""
if width:
style += "width:%s" % width
if height:
style += "height:%s" % height
data_dict = dict(src=value, style=style)
return '<img src="%(src)s" style="%(style)s">' % data_dict
|
Accepts a URL and returns an HTML image tag ready to be displayed.
Optionally, you can set the height and width with keyword arguments.
|
train
|
https://github.com/eyeseast/python-tablefu/blob/d8761c1f87e3f89d9b89b0b6b9283fc4738b6676/table_fu/formatting.py#L80-L92
| null |
"""
Utilities to format values into more meaningful strings.
Inspired by James Bennett's template_utils and Django's
template filters.
"""
import re
import statestyle
def _saferound(value, decimal_places):
"""
Rounds a float value off to the desired precision
"""
try:
f = float(value)
except ValueError:
return ''
format = '%%.%df' % decimal_places
return format % f
def ap_state(value, failure_string=None):
"""
Converts a state's name, postal abbreviation or FIPS to A.P. style.
Example usage:
>> ap_state("California")
'Calif.'
"""
try:
return statestyle.get(value).ap
except:
if failure_string:
return failure_string
else:
return value
def capfirst(value, failure_string='N/A'):
"""
Capitalizes the first character of the value.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
Cribbs from django's default filter set
"""
try:
value = value.lower()
return value[0].upper() + value[1:]
except:
return failure_string
def dollars(value):
return u'$%s'% intcomma(value)
def dollar_signs(value, failure_string='N/A'):
"""
Converts an integer into the corresponding number of dollar sign symbols.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
Meant to emulate the illustration of price range on Yelp.
"""
try:
count = int(value)
except ValueError:
return failure_string
string = ''
for i in range(0, count):
string += '$'
return string
def image(value, width='', height=''):
"""
Accepts a URL and returns an HTML image tag ready to be displayed.
Optionally, you can set the height and width with keyword arguments.
"""
style = ""
if width:
style += "width:%s" % width
if height:
style += "height:%s" % height
data_dict = dict(src=value, style=style)
return '<img src="%(src)s" style="%(style)s">' % data_dict
def link(title, url):
return u'<a href="%(url)s" title="%(title)s">%(title)s</a>' % {
'url': url,
'title': title
}
def intcomma(value):
"""
Borrowed from django.contrib.humanize
Converts an integer to a string containing commas every three digits.
For example, 3000 becomes '3,000' and 45000 becomes '45,000'.
"""
orig = str(value)
new = re.sub("^(-?\d+)(\d{3})", '\g<1>,\g<2>', orig)
if orig == new:
return new
else:
return intcomma(new)
def percentage(value, decimal_places=1, multiply=True, failure_string='N/A'):
"""
Converts a floating point value into a percentage value.
Number of decimal places set by the `decimal_places` kwarg. Default is one.
By default the number is multiplied by 100. You can prevent it from doing
that by setting the `multiply` keyword argument to False.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
"""
try:
value = float(value)
except ValueError:
return failure_string
if multiply:
value = value * 100
return _saferound(value, decimal_places) + '%'
def percent_change(value, decimal_places=1, multiply=True, failure_string='N/A'):
"""
Converts a floating point value into a percentage change value.
Number of decimal places set by the `precision` kwarg. Default is one.
Non-floats are assumed to be zero division errors and are presented as
'N/A' in the output.
By default the number is multiplied by 100. You can prevent it from doing
that by setting the `multiply` keyword argument to False.
"""
try:
f = float(value)
if multiply:
f = f * 100
except ValueError:
return failure_string
s = _saferound(f, decimal_places)
if f > 0:
return '+' + s + '%'
else:
return s + '%'
def ratio(value, decimal_places=0, failure_string='N/A'):
"""
Converts a floating point value a X:1 ratio.
Number of decimal places set by the `precision` kwarg. Default is one.
"""
try:
f = float(value)
except ValueError:
return failure_string
return _saferound(f, decimal_places) + ':1'
def stateface(value):
"""
Converts a state's name, postal abbreviation or FIPS to ProPublica's stateface
font code.
Example usage:
>> stateface("California")
'E'
Documentation: http://propublica.github.com/stateface/
"""
try:
return statestyle.get(value).stateface
except:
return value
def state_postal(value):
"""
Converts a state's name, or FIPS to its postal abbreviation
Example usage:
>> ap_state("California")
'Calif.'
"""
try:
return statestyle.get(value).postal
except:
return value
def title(value, failure_string='N/A'):
"""
Converts a string into titlecase.
Lifted from Django.
"""
try:
value = value.lower()
t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
result = re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t)
if not result:
return failure_string
return result
except:
return failure_string
DEFAULT_FORMATTERS = {
'ap_state': ap_state,
'capfirst': capfirst,
'dollars': dollars,
'dollar_signs': dollar_signs,
'intcomma': intcomma,
'image': image,
'link': link,
'percentage': percentage,
'percent_change': percent_change,
'ratio': ratio,
'stateface': stateface,
'state_postal': state_postal,
'title': title,
}
class Formatter(object):
"""
A formatter is a function (or any callable, really)
that takes a value and returns a nicer-looking value,
most likely a sting.
Formatter stores and calls those functions, keeping
the namespace uncluttered.
Formatting functions should take a value as the first
argument--usually the value of the Datum on which the
function is called--followed by any number of positional
arguments.
In the context of TableFu, those arguments may refer to
other columns in the same row.
>>> formatter = Formatter()
>>> formatter(1200, 'intcomma')
'1,200'
>>> formatter(1200, 'dollars')
'$1,200'
"""
def __init__(self):
self._filters = {}
for name, func in DEFAULT_FORMATTERS.items():
self.register(name, func)
def __call__(self, value, func, *args, **kwargs):
if not callable(func):
func = self._filters[func]
return func(value, *args, **kwargs)
def register(self, name=None, func=None):
if not func and not name:
return
if callable(name) and not func:
func = name
name = func.__name__
elif func and not name:
name = func.__name__
self._filters[name] = func
def unregister(self, name=None, func=None):
if not func and not name:
return
if not name:
name = func.__name__
if name not in self._filters:
return
del self._filters[name]
# Unless you need to subclass or keep formatting functions
# isolated, you can just import this instance.
format = Formatter()
|
eyeseast/python-tablefu
|
table_fu/formatting.py
|
intcomma
|
python
|
def intcomma(value):
orig = str(value)
new = re.sub("^(-?\d+)(\d{3})", '\g<1>,\g<2>', orig)
if orig == new:
return new
else:
return intcomma(new)
|
Borrowed from django.contrib.humanize
Converts an integer to a string containing commas every three digits.
For example, 3000 becomes '3,000' and 45000 becomes '45,000'.
|
train
|
https://github.com/eyeseast/python-tablefu/blob/d8761c1f87e3f89d9b89b0b6b9283fc4738b6676/table_fu/formatting.py#L102-L114
|
[
"def intcomma(value):\n \"\"\"\n Borrowed from django.contrib.humanize\n\n Converts an integer to a string containing commas every three digits.\n For example, 3000 becomes '3,000' and 45000 becomes '45,000'.\n \"\"\"\n orig = str(value)\n new = re.sub(\"^(-?\\d+)(\\d{3})\", '\\g<1>,\\g<2>', orig)\n if orig == new:\n return new\n else:\n return intcomma(new)\n"
] |
"""
Utilities to format values into more meaningful strings.
Inspired by James Bennett's template_utils and Django's
template filters.
"""
import re
import statestyle
def _saferound(value, decimal_places):
"""
Rounds a float value off to the desired precision
"""
try:
f = float(value)
except ValueError:
return ''
format = '%%.%df' % decimal_places
return format % f
def ap_state(value, failure_string=None):
"""
Converts a state's name, postal abbreviation or FIPS to A.P. style.
Example usage:
>> ap_state("California")
'Calif.'
"""
try:
return statestyle.get(value).ap
except:
if failure_string:
return failure_string
else:
return value
def capfirst(value, failure_string='N/A'):
"""
Capitalizes the first character of the value.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
Cribbs from django's default filter set
"""
try:
value = value.lower()
return value[0].upper() + value[1:]
except:
return failure_string
def dollars(value):
return u'$%s'% intcomma(value)
def dollar_signs(value, failure_string='N/A'):
"""
Converts an integer into the corresponding number of dollar sign symbols.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
Meant to emulate the illustration of price range on Yelp.
"""
try:
count = int(value)
except ValueError:
return failure_string
string = ''
for i in range(0, count):
string += '$'
return string
def image(value, width='', height=''):
"""
Accepts a URL and returns an HTML image tag ready to be displayed.
Optionally, you can set the height and width with keyword arguments.
"""
style = ""
if width:
style += "width:%s" % width
if height:
style += "height:%s" % height
data_dict = dict(src=value, style=style)
return '<img src="%(src)s" style="%(style)s">' % data_dict
def link(title, url):
return u'<a href="%(url)s" title="%(title)s">%(title)s</a>' % {
'url': url,
'title': title
}
def intcomma(value):
"""
Borrowed from django.contrib.humanize
Converts an integer to a string containing commas every three digits.
For example, 3000 becomes '3,000' and 45000 becomes '45,000'.
"""
orig = str(value)
new = re.sub("^(-?\d+)(\d{3})", '\g<1>,\g<2>', orig)
if orig == new:
return new
else:
return intcomma(new)
def percentage(value, decimal_places=1, multiply=True, failure_string='N/A'):
"""
Converts a floating point value into a percentage value.
Number of decimal places set by the `decimal_places` kwarg. Default is one.
By default the number is multiplied by 100. You can prevent it from doing
that by setting the `multiply` keyword argument to False.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
"""
try:
value = float(value)
except ValueError:
return failure_string
if multiply:
value = value * 100
return _saferound(value, decimal_places) + '%'
def percent_change(value, decimal_places=1, multiply=True, failure_string='N/A'):
"""
Converts a floating point value into a percentage change value.
Number of decimal places set by the `precision` kwarg. Default is one.
Non-floats are assumed to be zero division errors and are presented as
'N/A' in the output.
By default the number is multiplied by 100. You can prevent it from doing
that by setting the `multiply` keyword argument to False.
"""
try:
f = float(value)
if multiply:
f = f * 100
except ValueError:
return failure_string
s = _saferound(f, decimal_places)
if f > 0:
return '+' + s + '%'
else:
return s + '%'
def ratio(value, decimal_places=0, failure_string='N/A'):
"""
Converts a floating point value a X:1 ratio.
Number of decimal places set by the `precision` kwarg. Default is one.
"""
try:
f = float(value)
except ValueError:
return failure_string
return _saferound(f, decimal_places) + ':1'
def stateface(value):
"""
Converts a state's name, postal abbreviation or FIPS to ProPublica's stateface
font code.
Example usage:
>> stateface("California")
'E'
Documentation: http://propublica.github.com/stateface/
"""
try:
return statestyle.get(value).stateface
except:
return value
def state_postal(value):
"""
Converts a state's name, or FIPS to its postal abbreviation
Example usage:
>> ap_state("California")
'Calif.'
"""
try:
return statestyle.get(value).postal
except:
return value
def title(value, failure_string='N/A'):
"""
Converts a string into titlecase.
Lifted from Django.
"""
try:
value = value.lower()
t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
result = re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t)
if not result:
return failure_string
return result
except:
return failure_string
DEFAULT_FORMATTERS = {
'ap_state': ap_state,
'capfirst': capfirst,
'dollars': dollars,
'dollar_signs': dollar_signs,
'intcomma': intcomma,
'image': image,
'link': link,
'percentage': percentage,
'percent_change': percent_change,
'ratio': ratio,
'stateface': stateface,
'state_postal': state_postal,
'title': title,
}
class Formatter(object):
"""
A formatter is a function (or any callable, really)
that takes a value and returns a nicer-looking value,
most likely a sting.
Formatter stores and calls those functions, keeping
the namespace uncluttered.
Formatting functions should take a value as the first
argument--usually the value of the Datum on which the
function is called--followed by any number of positional
arguments.
In the context of TableFu, those arguments may refer to
other columns in the same row.
>>> formatter = Formatter()
>>> formatter(1200, 'intcomma')
'1,200'
>>> formatter(1200, 'dollars')
'$1,200'
"""
def __init__(self):
self._filters = {}
for name, func in DEFAULT_FORMATTERS.items():
self.register(name, func)
def __call__(self, value, func, *args, **kwargs):
if not callable(func):
func = self._filters[func]
return func(value, *args, **kwargs)
def register(self, name=None, func=None):
if not func and not name:
return
if callable(name) and not func:
func = name
name = func.__name__
elif func and not name:
name = func.__name__
self._filters[name] = func
def unregister(self, name=None, func=None):
if not func and not name:
return
if not name:
name = func.__name__
if name not in self._filters:
return
del self._filters[name]
# Unless you need to subclass or keep formatting functions
# isolated, you can just import this instance.
format = Formatter()
|
eyeseast/python-tablefu
|
table_fu/formatting.py
|
percentage
|
python
|
def percentage(value, decimal_places=1, multiply=True, failure_string='N/A'):
try:
value = float(value)
except ValueError:
return failure_string
if multiply:
value = value * 100
return _saferound(value, decimal_places) + '%'
|
Converts a floating point value into a percentage value.
Number of decimal places set by the `decimal_places` kwarg. Default is one.
By default the number is multiplied by 100. You can prevent it from doing
that by setting the `multiply` keyword argument to False.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
|
train
|
https://github.com/eyeseast/python-tablefu/blob/d8761c1f87e3f89d9b89b0b6b9283fc4738b6676/table_fu/formatting.py#L117-L135
|
[
"def _saferound(value, decimal_places):\n \"\"\"\n Rounds a float value off to the desired precision\n \"\"\"\n try:\n f = float(value)\n except ValueError:\n return ''\n format = '%%.%df' % decimal_places\n return format % f\n"
] |
"""
Utilities to format values into more meaningful strings.
Inspired by James Bennett's template_utils and Django's
template filters.
"""
import re
import statestyle
def _saferound(value, decimal_places):
"""
Rounds a float value off to the desired precision
"""
try:
f = float(value)
except ValueError:
return ''
format = '%%.%df' % decimal_places
return format % f
def ap_state(value, failure_string=None):
"""
Converts a state's name, postal abbreviation or FIPS to A.P. style.
Example usage:
>> ap_state("California")
'Calif.'
"""
try:
return statestyle.get(value).ap
except:
if failure_string:
return failure_string
else:
return value
def capfirst(value, failure_string='N/A'):
"""
Capitalizes the first character of the value.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
Cribbs from django's default filter set
"""
try:
value = value.lower()
return value[0].upper() + value[1:]
except:
return failure_string
def dollars(value):
return u'$%s'% intcomma(value)
def dollar_signs(value, failure_string='N/A'):
"""
Converts an integer into the corresponding number of dollar sign symbols.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
Meant to emulate the illustration of price range on Yelp.
"""
try:
count = int(value)
except ValueError:
return failure_string
string = ''
for i in range(0, count):
string += '$'
return string
def image(value, width='', height=''):
"""
Accepts a URL and returns an HTML image tag ready to be displayed.
Optionally, you can set the height and width with keyword arguments.
"""
style = ""
if width:
style += "width:%s" % width
if height:
style += "height:%s" % height
data_dict = dict(src=value, style=style)
return '<img src="%(src)s" style="%(style)s">' % data_dict
def link(title, url):
return u'<a href="%(url)s" title="%(title)s">%(title)s</a>' % {
'url': url,
'title': title
}
def intcomma(value):
"""
Borrowed from django.contrib.humanize
Converts an integer to a string containing commas every three digits.
For example, 3000 becomes '3,000' and 45000 becomes '45,000'.
"""
orig = str(value)
new = re.sub("^(-?\d+)(\d{3})", '\g<1>,\g<2>', orig)
if orig == new:
return new
else:
return intcomma(new)
def percentage(value, decimal_places=1, multiply=True, failure_string='N/A'):
"""
Converts a floating point value into a percentage value.
Number of decimal places set by the `decimal_places` kwarg. Default is one.
By default the number is multiplied by 100. You can prevent it from doing
that by setting the `multiply` keyword argument to False.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
"""
try:
value = float(value)
except ValueError:
return failure_string
if multiply:
value = value * 100
return _saferound(value, decimal_places) + '%'
def percent_change(value, decimal_places=1, multiply=True, failure_string='N/A'):
"""
Converts a floating point value into a percentage change value.
Number of decimal places set by the `precision` kwarg. Default is one.
Non-floats are assumed to be zero division errors and are presented as
'N/A' in the output.
By default the number is multiplied by 100. You can prevent it from doing
that by setting the `multiply` keyword argument to False.
"""
try:
f = float(value)
if multiply:
f = f * 100
except ValueError:
return failure_string
s = _saferound(f, decimal_places)
if f > 0:
return '+' + s + '%'
else:
return s + '%'
def ratio(value, decimal_places=0, failure_string='N/A'):
"""
Converts a floating point value a X:1 ratio.
Number of decimal places set by the `precision` kwarg. Default is one.
"""
try:
f = float(value)
except ValueError:
return failure_string
return _saferound(f, decimal_places) + ':1'
def stateface(value):
"""
Converts a state's name, postal abbreviation or FIPS to ProPublica's stateface
font code.
Example usage:
>> stateface("California")
'E'
Documentation: http://propublica.github.com/stateface/
"""
try:
return statestyle.get(value).stateface
except:
return value
def state_postal(value):
"""
Converts a state's name, or FIPS to its postal abbreviation
Example usage:
>> ap_state("California")
'Calif.'
"""
try:
return statestyle.get(value).postal
except:
return value
def title(value, failure_string='N/A'):
"""
Converts a string into titlecase.
Lifted from Django.
"""
try:
value = value.lower()
t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
result = re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t)
if not result:
return failure_string
return result
except:
return failure_string
DEFAULT_FORMATTERS = {
'ap_state': ap_state,
'capfirst': capfirst,
'dollars': dollars,
'dollar_signs': dollar_signs,
'intcomma': intcomma,
'image': image,
'link': link,
'percentage': percentage,
'percent_change': percent_change,
'ratio': ratio,
'stateface': stateface,
'state_postal': state_postal,
'title': title,
}
class Formatter(object):
"""
A formatter is a function (or any callable, really)
that takes a value and returns a nicer-looking value,
most likely a sting.
Formatter stores and calls those functions, keeping
the namespace uncluttered.
Formatting functions should take a value as the first
argument--usually the value of the Datum on which the
function is called--followed by any number of positional
arguments.
In the context of TableFu, those arguments may refer to
other columns in the same row.
>>> formatter = Formatter()
>>> formatter(1200, 'intcomma')
'1,200'
>>> formatter(1200, 'dollars')
'$1,200'
"""
def __init__(self):
self._filters = {}
for name, func in DEFAULT_FORMATTERS.items():
self.register(name, func)
def __call__(self, value, func, *args, **kwargs):
if not callable(func):
func = self._filters[func]
return func(value, *args, **kwargs)
def register(self, name=None, func=None):
if not func and not name:
return
if callable(name) and not func:
func = name
name = func.__name__
elif func and not name:
name = func.__name__
self._filters[name] = func
def unregister(self, name=None, func=None):
if not func and not name:
return
if not name:
name = func.__name__
if name not in self._filters:
return
del self._filters[name]
# Unless you need to subclass or keep formatting functions
# isolated, you can just import this instance.
format = Formatter()
|
eyeseast/python-tablefu
|
table_fu/formatting.py
|
percent_change
|
python
|
def percent_change(value, decimal_places=1, multiply=True, failure_string='N/A'):
try:
f = float(value)
if multiply:
f = f * 100
except ValueError:
return failure_string
s = _saferound(f, decimal_places)
if f > 0:
return '+' + s + '%'
else:
return s + '%'
|
Converts a floating point value into a percentage change value.
Number of decimal places set by the `precision` kwarg. Default is one.
Non-floats are assumed to be zero division errors and are presented as
'N/A' in the output.
By default the number is multiplied by 100. You can prevent it from doing
that by setting the `multiply` keyword argument to False.
|
train
|
https://github.com/eyeseast/python-tablefu/blob/d8761c1f87e3f89d9b89b0b6b9283fc4738b6676/table_fu/formatting.py#L138-L160
|
[
"def _saferound(value, decimal_places):\n \"\"\"\n Rounds a float value off to the desired precision\n \"\"\"\n try:\n f = float(value)\n except ValueError:\n return ''\n format = '%%.%df' % decimal_places\n return format % f\n"
] |
"""
Utilities to format values into more meaningful strings.
Inspired by James Bennett's template_utils and Django's
template filters.
"""
import re
import statestyle
def _saferound(value, decimal_places):
"""
Rounds a float value off to the desired precision
"""
try:
f = float(value)
except ValueError:
return ''
format = '%%.%df' % decimal_places
return format % f
def ap_state(value, failure_string=None):
"""
Converts a state's name, postal abbreviation or FIPS to A.P. style.
Example usage:
>> ap_state("California")
'Calif.'
"""
try:
return statestyle.get(value).ap
except:
if failure_string:
return failure_string
else:
return value
def capfirst(value, failure_string='N/A'):
"""
Capitalizes the first character of the value.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
Cribbs from django's default filter set
"""
try:
value = value.lower()
return value[0].upper() + value[1:]
except:
return failure_string
def dollars(value):
return u'$%s'% intcomma(value)
def dollar_signs(value, failure_string='N/A'):
"""
Converts an integer into the corresponding number of dollar sign symbols.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
Meant to emulate the illustration of price range on Yelp.
"""
try:
count = int(value)
except ValueError:
return failure_string
string = ''
for i in range(0, count):
string += '$'
return string
def image(value, width='', height=''):
"""
Accepts a URL and returns an HTML image tag ready to be displayed.
Optionally, you can set the height and width with keyword arguments.
"""
style = ""
if width:
style += "width:%s" % width
if height:
style += "height:%s" % height
data_dict = dict(src=value, style=style)
return '<img src="%(src)s" style="%(style)s">' % data_dict
def link(title, url):
return u'<a href="%(url)s" title="%(title)s">%(title)s</a>' % {
'url': url,
'title': title
}
def intcomma(value):
"""
Borrowed from django.contrib.humanize
Converts an integer to a string containing commas every three digits.
For example, 3000 becomes '3,000' and 45000 becomes '45,000'.
"""
orig = str(value)
new = re.sub("^(-?\d+)(\d{3})", '\g<1>,\g<2>', orig)
if orig == new:
return new
else:
return intcomma(new)
def percentage(value, decimal_places=1, multiply=True, failure_string='N/A'):
"""
Converts a floating point value into a percentage value.
Number of decimal places set by the `decimal_places` kwarg. Default is one.
By default the number is multiplied by 100. You can prevent it from doing
that by setting the `multiply` keyword argument to False.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
"""
try:
value = float(value)
except ValueError:
return failure_string
if multiply:
value = value * 100
return _saferound(value, decimal_places) + '%'
def percent_change(value, decimal_places=1, multiply=True, failure_string='N/A'):
"""
Converts a floating point value into a percentage change value.
Number of decimal places set by the `precision` kwarg. Default is one.
Non-floats are assumed to be zero division errors and are presented as
'N/A' in the output.
By default the number is multiplied by 100. You can prevent it from doing
that by setting the `multiply` keyword argument to False.
"""
try:
f = float(value)
if multiply:
f = f * 100
except ValueError:
return failure_string
s = _saferound(f, decimal_places)
if f > 0:
return '+' + s + '%'
else:
return s + '%'
def ratio(value, decimal_places=0, failure_string='N/A'):
"""
Converts a floating point value a X:1 ratio.
Number of decimal places set by the `precision` kwarg. Default is one.
"""
try:
f = float(value)
except ValueError:
return failure_string
return _saferound(f, decimal_places) + ':1'
def stateface(value):
"""
Converts a state's name, postal abbreviation or FIPS to ProPublica's stateface
font code.
Example usage:
>> stateface("California")
'E'
Documentation: http://propublica.github.com/stateface/
"""
try:
return statestyle.get(value).stateface
except:
return value
def state_postal(value):
"""
Converts a state's name, or FIPS to its postal abbreviation
Example usage:
>> ap_state("California")
'Calif.'
"""
try:
return statestyle.get(value).postal
except:
return value
def title(value, failure_string='N/A'):
"""
Converts a string into titlecase.
Lifted from Django.
"""
try:
value = value.lower()
t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
result = re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t)
if not result:
return failure_string
return result
except:
return failure_string
DEFAULT_FORMATTERS = {
'ap_state': ap_state,
'capfirst': capfirst,
'dollars': dollars,
'dollar_signs': dollar_signs,
'intcomma': intcomma,
'image': image,
'link': link,
'percentage': percentage,
'percent_change': percent_change,
'ratio': ratio,
'stateface': stateface,
'state_postal': state_postal,
'title': title,
}
class Formatter(object):
"""
A formatter is a function (or any callable, really)
that takes a value and returns a nicer-looking value,
most likely a sting.
Formatter stores and calls those functions, keeping
the namespace uncluttered.
Formatting functions should take a value as the first
argument--usually the value of the Datum on which the
function is called--followed by any number of positional
arguments.
In the context of TableFu, those arguments may refer to
other columns in the same row.
>>> formatter = Formatter()
>>> formatter(1200, 'intcomma')
'1,200'
>>> formatter(1200, 'dollars')
'$1,200'
"""
def __init__(self):
self._filters = {}
for name, func in DEFAULT_FORMATTERS.items():
self.register(name, func)
def __call__(self, value, func, *args, **kwargs):
if not callable(func):
func = self._filters[func]
return func(value, *args, **kwargs)
def register(self, name=None, func=None):
if not func and not name:
return
if callable(name) and not func:
func = name
name = func.__name__
elif func and not name:
name = func.__name__
self._filters[name] = func
def unregister(self, name=None, func=None):
if not func and not name:
return
if not name:
name = func.__name__
if name not in self._filters:
return
del self._filters[name]
# Unless you need to subclass or keep formatting functions
# isolated, you can just import this instance.
format = Formatter()
|
eyeseast/python-tablefu
|
table_fu/formatting.py
|
ratio
|
python
|
def ratio(value, decimal_places=0, failure_string='N/A'):
try:
f = float(value)
except ValueError:
return failure_string
return _saferound(f, decimal_places) + ':1'
|
Converts a floating point value a X:1 ratio.
Number of decimal places set by the `precision` kwarg. Default is one.
|
train
|
https://github.com/eyeseast/python-tablefu/blob/d8761c1f87e3f89d9b89b0b6b9283fc4738b6676/table_fu/formatting.py#L163-L173
|
[
"def _saferound(value, decimal_places):\n \"\"\"\n Rounds a float value off to the desired precision\n \"\"\"\n try:\n f = float(value)\n except ValueError:\n return ''\n format = '%%.%df' % decimal_places\n return format % f\n"
] |
"""
Utilities to format values into more meaningful strings.
Inspired by James Bennett's template_utils and Django's
template filters.
"""
import re
import statestyle
def _saferound(value, decimal_places):
"""
Rounds a float value off to the desired precision
"""
try:
f = float(value)
except ValueError:
return ''
format = '%%.%df' % decimal_places
return format % f
def ap_state(value, failure_string=None):
"""
Converts a state's name, postal abbreviation or FIPS to A.P. style.
Example usage:
>> ap_state("California")
'Calif.'
"""
try:
return statestyle.get(value).ap
except:
if failure_string:
return failure_string
else:
return value
def capfirst(value, failure_string='N/A'):
"""
Capitalizes the first character of the value.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
Cribbs from django's default filter set
"""
try:
value = value.lower()
return value[0].upper() + value[1:]
except:
return failure_string
def dollars(value):
return u'$%s'% intcomma(value)
def dollar_signs(value, failure_string='N/A'):
"""
Converts an integer into the corresponding number of dollar sign symbols.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
Meant to emulate the illustration of price range on Yelp.
"""
try:
count = int(value)
except ValueError:
return failure_string
string = ''
for i in range(0, count):
string += '$'
return string
def image(value, width='', height=''):
"""
Accepts a URL and returns an HTML image tag ready to be displayed.
Optionally, you can set the height and width with keyword arguments.
"""
style = ""
if width:
style += "width:%s" % width
if height:
style += "height:%s" % height
data_dict = dict(src=value, style=style)
return '<img src="%(src)s" style="%(style)s">' % data_dict
def link(title, url):
return u'<a href="%(url)s" title="%(title)s">%(title)s</a>' % {
'url': url,
'title': title
}
def intcomma(value):
"""
Borrowed from django.contrib.humanize
Converts an integer to a string containing commas every three digits.
For example, 3000 becomes '3,000' and 45000 becomes '45,000'.
"""
orig = str(value)
new = re.sub("^(-?\d+)(\d{3})", '\g<1>,\g<2>', orig)
if orig == new:
return new
else:
return intcomma(new)
def percentage(value, decimal_places=1, multiply=True, failure_string='N/A'):
"""
Converts a floating point value into a percentage value.
Number of decimal places set by the `decimal_places` kwarg. Default is one.
By default the number is multiplied by 100. You can prevent it from doing
that by setting the `multiply` keyword argument to False.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
"""
try:
value = float(value)
except ValueError:
return failure_string
if multiply:
value = value * 100
return _saferound(value, decimal_places) + '%'
def percent_change(value, decimal_places=1, multiply=True, failure_string='N/A'):
"""
Converts a floating point value into a percentage change value.
Number of decimal places set by the `precision` kwarg. Default is one.
Non-floats are assumed to be zero division errors and are presented as
'N/A' in the output.
By default the number is multiplied by 100. You can prevent it from doing
that by setting the `multiply` keyword argument to False.
"""
try:
f = float(value)
if multiply:
f = f * 100
except ValueError:
return failure_string
s = _saferound(f, decimal_places)
if f > 0:
return '+' + s + '%'
else:
return s + '%'
def ratio(value, decimal_places=0, failure_string='N/A'):
"""
Converts a floating point value a X:1 ratio.
Number of decimal places set by the `precision` kwarg. Default is one.
"""
try:
f = float(value)
except ValueError:
return failure_string
return _saferound(f, decimal_places) + ':1'
def stateface(value):
"""
Converts a state's name, postal abbreviation or FIPS to ProPublica's stateface
font code.
Example usage:
>> stateface("California")
'E'
Documentation: http://propublica.github.com/stateface/
"""
try:
return statestyle.get(value).stateface
except:
return value
def state_postal(value):
"""
Converts a state's name, or FIPS to its postal abbreviation
Example usage:
>> ap_state("California")
'Calif.'
"""
try:
return statestyle.get(value).postal
except:
return value
def title(value, failure_string='N/A'):
"""
Converts a string into titlecase.
Lifted from Django.
"""
try:
value = value.lower()
t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
result = re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t)
if not result:
return failure_string
return result
except:
return failure_string
DEFAULT_FORMATTERS = {
'ap_state': ap_state,
'capfirst': capfirst,
'dollars': dollars,
'dollar_signs': dollar_signs,
'intcomma': intcomma,
'image': image,
'link': link,
'percentage': percentage,
'percent_change': percent_change,
'ratio': ratio,
'stateface': stateface,
'state_postal': state_postal,
'title': title,
}
class Formatter(object):
"""
A formatter is a function (or any callable, really)
that takes a value and returns a nicer-looking value,
most likely a sting.
Formatter stores and calls those functions, keeping
the namespace uncluttered.
Formatting functions should take a value as the first
argument--usually the value of the Datum on which the
function is called--followed by any number of positional
arguments.
In the context of TableFu, those arguments may refer to
other columns in the same row.
>>> formatter = Formatter()
>>> formatter(1200, 'intcomma')
'1,200'
>>> formatter(1200, 'dollars')
'$1,200'
"""
def __init__(self):
self._filters = {}
for name, func in DEFAULT_FORMATTERS.items():
self.register(name, func)
def __call__(self, value, func, *args, **kwargs):
if not callable(func):
func = self._filters[func]
return func(value, *args, **kwargs)
def register(self, name=None, func=None):
if not func and not name:
return
if callable(name) and not func:
func = name
name = func.__name__
elif func and not name:
name = func.__name__
self._filters[name] = func
def unregister(self, name=None, func=None):
if not func and not name:
return
if not name:
name = func.__name__
if name not in self._filters:
return
del self._filters[name]
# Unless you need to subclass or keep formatting functions
# isolated, you can just import this instance.
format = Formatter()
|
eyeseast/python-tablefu
|
table_fu/formatting.py
|
title
|
python
|
def title(value, failure_string='N/A'):
try:
value = value.lower()
t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
result = re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t)
if not result:
return failure_string
return result
except:
return failure_string
|
Converts a string into titlecase.
Lifted from Django.
|
train
|
https://github.com/eyeseast/python-tablefu/blob/d8761c1f87e3f89d9b89b0b6b9283fc4738b6676/table_fu/formatting.py#L210-L224
| null |
"""
Utilities to format values into more meaningful strings.
Inspired by James Bennett's template_utils and Django's
template filters.
"""
import re
import statestyle
def _saferound(value, decimal_places):
"""
Rounds a float value off to the desired precision
"""
try:
f = float(value)
except ValueError:
return ''
format = '%%.%df' % decimal_places
return format % f
def ap_state(value, failure_string=None):
"""
Converts a state's name, postal abbreviation or FIPS to A.P. style.
Example usage:
>> ap_state("California")
'Calif.'
"""
try:
return statestyle.get(value).ap
except:
if failure_string:
return failure_string
else:
return value
def capfirst(value, failure_string='N/A'):
"""
Capitalizes the first character of the value.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
Cribbs from django's default filter set
"""
try:
value = value.lower()
return value[0].upper() + value[1:]
except:
return failure_string
def dollars(value):
return u'$%s'% intcomma(value)
def dollar_signs(value, failure_string='N/A'):
"""
Converts an integer into the corresponding number of dollar sign symbols.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
Meant to emulate the illustration of price range on Yelp.
"""
try:
count = int(value)
except ValueError:
return failure_string
string = ''
for i in range(0, count):
string += '$'
return string
def image(value, width='', height=''):
"""
Accepts a URL and returns an HTML image tag ready to be displayed.
Optionally, you can set the height and width with keyword arguments.
"""
style = ""
if width:
style += "width:%s" % width
if height:
style += "height:%s" % height
data_dict = dict(src=value, style=style)
return '<img src="%(src)s" style="%(style)s">' % data_dict
def link(title, url):
return u'<a href="%(url)s" title="%(title)s">%(title)s</a>' % {
'url': url,
'title': title
}
def intcomma(value):
"""
Borrowed from django.contrib.humanize
Converts an integer to a string containing commas every three digits.
For example, 3000 becomes '3,000' and 45000 becomes '45,000'.
"""
orig = str(value)
new = re.sub("^(-?\d+)(\d{3})", '\g<1>,\g<2>', orig)
if orig == new:
return new
else:
return intcomma(new)
def percentage(value, decimal_places=1, multiply=True, failure_string='N/A'):
"""
Converts a floating point value into a percentage value.
Number of decimal places set by the `decimal_places` kwarg. Default is one.
By default the number is multiplied by 100. You can prevent it from doing
that by setting the `multiply` keyword argument to False.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
"""
try:
value = float(value)
except ValueError:
return failure_string
if multiply:
value = value * 100
return _saferound(value, decimal_places) + '%'
def percent_change(value, decimal_places=1, multiply=True, failure_string='N/A'):
"""
Converts a floating point value into a percentage change value.
Number of decimal places set by the `precision` kwarg. Default is one.
Non-floats are assumed to be zero division errors and are presented as
'N/A' in the output.
By default the number is multiplied by 100. You can prevent it from doing
that by setting the `multiply` keyword argument to False.
"""
try:
f = float(value)
if multiply:
f = f * 100
except ValueError:
return failure_string
s = _saferound(f, decimal_places)
if f > 0:
return '+' + s + '%'
else:
return s + '%'
def ratio(value, decimal_places=0, failure_string='N/A'):
"""
Converts a floating point value a X:1 ratio.
Number of decimal places set by the `precision` kwarg. Default is one.
"""
try:
f = float(value)
except ValueError:
return failure_string
return _saferound(f, decimal_places) + ':1'
def stateface(value):
"""
Converts a state's name, postal abbreviation or FIPS to ProPublica's stateface
font code.
Example usage:
>> stateface("California")
'E'
Documentation: http://propublica.github.com/stateface/
"""
try:
return statestyle.get(value).stateface
except:
return value
def state_postal(value):
"""
Converts a state's name, or FIPS to its postal abbreviation
Example usage:
>> ap_state("California")
'Calif.'
"""
try:
return statestyle.get(value).postal
except:
return value
def title(value, failure_string='N/A'):
"""
Converts a string into titlecase.
Lifted from Django.
"""
try:
value = value.lower()
t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
result = re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t)
if not result:
return failure_string
return result
except:
return failure_string
DEFAULT_FORMATTERS = {
'ap_state': ap_state,
'capfirst': capfirst,
'dollars': dollars,
'dollar_signs': dollar_signs,
'intcomma': intcomma,
'image': image,
'link': link,
'percentage': percentage,
'percent_change': percent_change,
'ratio': ratio,
'stateface': stateface,
'state_postal': state_postal,
'title': title,
}
class Formatter(object):
"""
A formatter is a function (or any callable, really)
that takes a value and returns a nicer-looking value,
most likely a sting.
Formatter stores and calls those functions, keeping
the namespace uncluttered.
Formatting functions should take a value as the first
argument--usually the value of the Datum on which the
function is called--followed by any number of positional
arguments.
In the context of TableFu, those arguments may refer to
other columns in the same row.
>>> formatter = Formatter()
>>> formatter(1200, 'intcomma')
'1,200'
>>> formatter(1200, 'dollars')
'$1,200'
"""
def __init__(self):
self._filters = {}
for name, func in DEFAULT_FORMATTERS.items():
self.register(name, func)
def __call__(self, value, func, *args, **kwargs):
if not callable(func):
func = self._filters[func]
return func(value, *args, **kwargs)
def register(self, name=None, func=None):
if not func and not name:
return
if callable(name) and not func:
func = name
name = func.__name__
elif func and not name:
name = func.__name__
self._filters[name] = func
def unregister(self, name=None, func=None):
if not func and not name:
return
if not name:
name = func.__name__
if name not in self._filters:
return
del self._filters[name]
# Unless you need to subclass or keep formatting functions
# isolated, you can just import this instance.
format = Formatter()
|
widdowquinn/pyADHoRe
|
pyadhore/iadhore.py
|
read
|
python
|
def read(mfile, sfile):
assert os.path.isfile(mfile), "%s multiplicon file does not exist"
assert os.path.isfile(sfile), "%s segments file does not exist"
return IadhoreData(mfile, sfile)
|
Returns an IadhoreData object, constructed from the passed
i-ADHoRe multiplicon and segments output.
- mfile (str), location of multiplicons.txt
- sfile (str), location of segments.txt
|
train
|
https://github.com/widdowquinn/pyADHoRe/blob/b2ebbf6ae9c6afe9262eb0e3d9cf395970e38533/pyadhore/iadhore.py#L366-L375
| null |
#!/usr/bin/env python
""" Provides a data structure for handling i-ADHoRe output data, based on
a tree structure for identifying interesting multiplicons, and an SQLite
backend for holding the multiplicon data.
(c) The James Hutton Institute 2013
Author: Leighton Pritchard
Contact:
leighton.pritchard@hutton.ac.uk
Leighton Pritchard,
Information and Computing Sciences,
James Hutton Institute,
Errol Road,
Invergowrie,
Dundee,
DD6 9LH,
Scotland,
UK
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# python package version
# should match r"^__version__ = '(?P<version>[^']+)'$" for setup.py
__version__ = '0.1.1'
# standard library
import collections
import csv
import os
import sqlite3
# NetworkX for graph/tree handling
import networkx as nx
class IadhoreData(object):
""" Implements an interface to i-ADHoRe output data, using NetworkX to
hold a tree representation of multiplicon relationships, and SQLite3
to hold the output data tables
"""
def __init__(self, multiplicon_file=None, segment_file=None,
db_filename=":memory:"):
""" Initialise data object
Arguments:
o multiplicon_file - location of iADHoRe multiplicon.txt
o segment_file - location of iADHoRe segment.txt file
o db_filename - location to write SQLite3 database (defaults to
in-memory)
"""
# Attributes later populated in methods
self._dbconn = None
self._redundant_multiplicon_cache = None
# Get arguments and initialise
self._multiplicon_file = multiplicon_file
self._segment_file = segment_file
self._db_file = db_filename
self._multiplicon_graph = nx.DiGraph()
# Set up database
self._dbsetup()
# Load multiplicon and segment data into tree/SQL database
self._parse_multiplicons()
self._parse_segments()
def _dbsetup(self):
""" Create/open local SQLite database
"""
self._dbconn = sqlite3.connect(self._db_file)
# Create table for multiplicons
sql = '''CREATE TABLE multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant)'''
self._dbconn.execute(sql)
# Create table for multiplicons ('order' appears to be reserved)
sql = '''CREATE TABLE segments
(id, multiplicon, genome, list, first, last, ord)'''
self._dbconn.execute(sql)
self._dbconn.commit()
def _parse_multiplicons(self):
""" Read the multiplicon output file, and parse into a (i) tree using
NetworkX, and (ii) an SQLite database.
"""
# Parse data with csv reader
reader = csv.reader(open(self._multiplicon_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
# Add data to SQLite db
sql = '''INSERT INTO multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant) VALUES
(?,?,?,?,?,?,?,?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
# Add multiplicons to tree
m_id = int(row[0])
self._multiplicon_graph.add_node(m_id)
if len(row[3]):
self._multiplicon_graph.add_edge(int(row[3]), m_id)
self._dbconn.commit()
def _parse_segments(self):
""" Read the segment output file and parse into an SQLite database.
"""
reader = csv.reader(open(self._segment_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
sql = '''INSERT INTO segments
(id, multiplicon, genome, list, first, last, ord)
VALUES (?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
self._dbconn.commit()
def get_multiplicon_leaves(self, redundant=False):
""" Return a generator of the IDs of multiplicons found at leaves
of the tree (i.e. from which no further multiplicons were derived).
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_seeds(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are initial
seeding 'pairs' in level 2 multiplicons.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.in_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_intermediates(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are neither
seeding 'pairs' in level 2 multiplicons, nor leaves.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if len(self._multiplicon_graph.in_edges(node)) and \
len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_properties(self, value):
""" Return a dictionary describing multiplicon data:
id, parent, level, number_of_anchorpoints, profile_length,
is_redundant and the contributing genome segments
"""
sql = '''SELECT id, parent, level, number_of_anchorpoints,
profile_length, is_redundant
FROM multiplicons WHERE id=:id'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = cur.fetchone()
cur.close()
return {'id': int(result[0]),
'parent': int(result[1]) if len(result[1]) else None,
'level': int(result[2]),
'number_of_anchorpoints': int(result[3]),
'profile_length': int(result[4]),
'is_redundant': True if result[5] == '-1' else False,
'segments': self.get_multiplicon_segments(value)}
def get_multiplicon_segments(self, value):
""" Return a dictionary describing the genome segments that
contribute to the named multiplicon, keyed by genome, with
(start feature, end feature) tuples.
"""
sql = '''SELECT genome, first, last FROM segments
WHERE multiplicon=:mp'''
cur = self._dbconn.cursor()
cur.execute(sql, {'mp': str(value)})
result = cur.fetchall()
cur.close()
segdict = collections.defaultdict(tuple)
for genome, start, end in result:
segdict[genome] = (start, end)
return segdict
def get_multiplicons_at_level(self, level, redundant=False):
""" Return a list of IDs of multiplicons at the requested level
"""
sql = '''SELECT id FROM multiplicons
WHERE level=:level'''
cur = self._dbconn.cursor()
cur.execute(sql, {'level': str(level)})
result = [int(r[0]) for r in cur.fetchall()]
cur.close()
if redundant:
return result
else:
return [r for r in result if not self.is_redundant_multiplicon(r)]
def is_redundant_multiplicon(self, value):
""" Returns True if the passed multiplicon ID is redundant, False
otherwise.
- value, (int) multiplicon ID
"""
if not hasattr(self, '_redundant_multiplicon_cache'):
sql = '''SELECT id FROM multiplicons WHERE is_redundant="-1"'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = [int(r[0]) for r in cur.fetchall()]
self._redundant_multiplicon_cache = set(result)
if value in self._redundant_multiplicon_cache:
return True
else:
return False
def write(self, mfile="multiplicons.txt", sfile="segments.txt",
clobber=False):
""" Writes multiplicon and segment files to the named locations.
- mfile, (str) location for multiplicons file
- sfile, (str) location for segments file
- clobber, (Boolean) True if we overwrite target files
"""
if not clobber:
if os.path.isfile(mfile):
raise IOError("Multiplicon file %s already exists." % mfile)
if os.path.isfile(sfile):
raise IOError("Segments file %s already exists." % sfile)
self._write_multiplicons(mfile)
self._write_segments(sfile)
def _write_multiplicons(self, filename):
""" Write multiplicons to file.
- filename, (str) location of output file
"""
# Column headers
mhead = '\t'.join(['id', 'genome_x', 'list_x', 'parent', 'genome_y',
'list_y', 'level', 'number_of_anchorpoints',
'profile_length', 'begin_x', 'end_x', 'begin_y',
'end_y', 'is_redundant'])
with open(filename, 'w') as fhandle:
fhandle.write(mhead + '\n')
for mrow in self.multiplicons:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
def _write_segments(self, filename):
""" Write segments to file.
- filename, (str) location of output file
"""
# Column headers
shead = '\t'.join(['id', 'multiplicon', 'genome', 'list', 'first',
'last', 'order'])
with open(filename, 'w') as fhandle:
fhandle.write(shead + '\n')
for mrow in self.segments:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
@property
def multiplicon_file(self):
""" Location of the i-ADHoRe multiplicon data output file."""
return self._multiplicon_file
@multiplicon_file.setter
def multiplicon_file(self, value):
""" Setter for _multiplicon_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._multiplicon_file = value
@property
def segment_file(self):
""" Location of the i-ADHoRe segment data output file."""
return self._segment_file
@segment_file.setter
def segment_file(self, value):
""" Setter for _segment_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._segment_file = value
@property
def db_file(self):
""" Location of the SQLite database."""
return self._db_file
@db_file.setter
def db_file(self, value):
""" Setter for _db_file attribute """
assert not os.path.isfile(value), "%s already exists" % value
self._db_file = value
@property
def multiplicon_graph(self):
""" Digraph representation of relationships between multiplicons."""
return self._multiplicon_graph
@property
def multiplicons(self):
""" Multiplicon table from SQLite database. """
sql = '''SELECT * FROM multiplicons'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
@property
def segments(self):
""" Segments table from SQLite database. """
sql = '''SELECT * FROM segments'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
# Function to return an IadhoreData object, given multiplicons
# and segments files
|
widdowquinn/pyADHoRe
|
pyadhore/iadhore.py
|
IadhoreData._dbsetup
|
python
|
def _dbsetup(self):
self._dbconn = sqlite3.connect(self._db_file)
# Create table for multiplicons
sql = '''CREATE TABLE multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant)'''
self._dbconn.execute(sql)
# Create table for multiplicons ('order' appears to be reserved)
sql = '''CREATE TABLE segments
(id, multiplicon, genome, list, first, last, ord)'''
self._dbconn.execute(sql)
self._dbconn.commit()
|
Create/open local SQLite database
|
train
|
https://github.com/widdowquinn/pyADHoRe/blob/b2ebbf6ae9c6afe9262eb0e3d9cf395970e38533/pyadhore/iadhore.py#L82-L96
| null |
class IadhoreData(object):
""" Implements an interface to i-ADHoRe output data, using NetworkX to
hold a tree representation of multiplicon relationships, and SQLite3
to hold the output data tables
"""
def __init__(self, multiplicon_file=None, segment_file=None,
db_filename=":memory:"):
""" Initialise data object
Arguments:
o multiplicon_file - location of iADHoRe multiplicon.txt
o segment_file - location of iADHoRe segment.txt file
o db_filename - location to write SQLite3 database (defaults to
in-memory)
"""
# Attributes later populated in methods
self._dbconn = None
self._redundant_multiplicon_cache = None
# Get arguments and initialise
self._multiplicon_file = multiplicon_file
self._segment_file = segment_file
self._db_file = db_filename
self._multiplicon_graph = nx.DiGraph()
# Set up database
self._dbsetup()
# Load multiplicon and segment data into tree/SQL database
self._parse_multiplicons()
self._parse_segments()
def _parse_multiplicons(self):
""" Read the multiplicon output file, and parse into a (i) tree using
NetworkX, and (ii) an SQLite database.
"""
# Parse data with csv reader
reader = csv.reader(open(self._multiplicon_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
# Add data to SQLite db
sql = '''INSERT INTO multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant) VALUES
(?,?,?,?,?,?,?,?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
# Add multiplicons to tree
m_id = int(row[0])
self._multiplicon_graph.add_node(m_id)
if len(row[3]):
self._multiplicon_graph.add_edge(int(row[3]), m_id)
self._dbconn.commit()
def _parse_segments(self):
""" Read the segment output file and parse into an SQLite database.
"""
reader = csv.reader(open(self._segment_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
sql = '''INSERT INTO segments
(id, multiplicon, genome, list, first, last, ord)
VALUES (?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
self._dbconn.commit()
def get_multiplicon_leaves(self, redundant=False):
""" Return a generator of the IDs of multiplicons found at leaves
of the tree (i.e. from which no further multiplicons were derived).
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_seeds(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are initial
seeding 'pairs' in level 2 multiplicons.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.in_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_intermediates(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are neither
seeding 'pairs' in level 2 multiplicons, nor leaves.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if len(self._multiplicon_graph.in_edges(node)) and \
len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_properties(self, value):
""" Return a dictionary describing multiplicon data:
id, parent, level, number_of_anchorpoints, profile_length,
is_redundant and the contributing genome segments
"""
sql = '''SELECT id, parent, level, number_of_anchorpoints,
profile_length, is_redundant
FROM multiplicons WHERE id=:id'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = cur.fetchone()
cur.close()
return {'id': int(result[0]),
'parent': int(result[1]) if len(result[1]) else None,
'level': int(result[2]),
'number_of_anchorpoints': int(result[3]),
'profile_length': int(result[4]),
'is_redundant': True if result[5] == '-1' else False,
'segments': self.get_multiplicon_segments(value)}
def get_multiplicon_segments(self, value):
""" Return a dictionary describing the genome segments that
contribute to the named multiplicon, keyed by genome, with
(start feature, end feature) tuples.
"""
sql = '''SELECT genome, first, last FROM segments
WHERE multiplicon=:mp'''
cur = self._dbconn.cursor()
cur.execute(sql, {'mp': str(value)})
result = cur.fetchall()
cur.close()
segdict = collections.defaultdict(tuple)
for genome, start, end in result:
segdict[genome] = (start, end)
return segdict
def get_multiplicons_at_level(self, level, redundant=False):
""" Return a list of IDs of multiplicons at the requested level
"""
sql = '''SELECT id FROM multiplicons
WHERE level=:level'''
cur = self._dbconn.cursor()
cur.execute(sql, {'level': str(level)})
result = [int(r[0]) for r in cur.fetchall()]
cur.close()
if redundant:
return result
else:
return [r for r in result if not self.is_redundant_multiplicon(r)]
def is_redundant_multiplicon(self, value):
""" Returns True if the passed multiplicon ID is redundant, False
otherwise.
- value, (int) multiplicon ID
"""
if not hasattr(self, '_redundant_multiplicon_cache'):
sql = '''SELECT id FROM multiplicons WHERE is_redundant="-1"'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = [int(r[0]) for r in cur.fetchall()]
self._redundant_multiplicon_cache = set(result)
if value in self._redundant_multiplicon_cache:
return True
else:
return False
def write(self, mfile="multiplicons.txt", sfile="segments.txt",
clobber=False):
""" Writes multiplicon and segment files to the named locations.
- mfile, (str) location for multiplicons file
- sfile, (str) location for segments file
- clobber, (Boolean) True if we overwrite target files
"""
if not clobber:
if os.path.isfile(mfile):
raise IOError("Multiplicon file %s already exists." % mfile)
if os.path.isfile(sfile):
raise IOError("Segments file %s already exists." % sfile)
self._write_multiplicons(mfile)
self._write_segments(sfile)
def _write_multiplicons(self, filename):
""" Write multiplicons to file.
- filename, (str) location of output file
"""
# Column headers
mhead = '\t'.join(['id', 'genome_x', 'list_x', 'parent', 'genome_y',
'list_y', 'level', 'number_of_anchorpoints',
'profile_length', 'begin_x', 'end_x', 'begin_y',
'end_y', 'is_redundant'])
with open(filename, 'w') as fhandle:
fhandle.write(mhead + '\n')
for mrow in self.multiplicons:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
def _write_segments(self, filename):
""" Write segments to file.
- filename, (str) location of output file
"""
# Column headers
shead = '\t'.join(['id', 'multiplicon', 'genome', 'list', 'first',
'last', 'order'])
with open(filename, 'w') as fhandle:
fhandle.write(shead + '\n')
for mrow in self.segments:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
@property
def multiplicon_file(self):
""" Location of the i-ADHoRe multiplicon data output file."""
return self._multiplicon_file
@multiplicon_file.setter
def multiplicon_file(self, value):
""" Setter for _multiplicon_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._multiplicon_file = value
@property
def segment_file(self):
""" Location of the i-ADHoRe segment data output file."""
return self._segment_file
@segment_file.setter
def segment_file(self, value):
""" Setter for _segment_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._segment_file = value
@property
def db_file(self):
""" Location of the SQLite database."""
return self._db_file
@db_file.setter
def db_file(self, value):
""" Setter for _db_file attribute """
assert not os.path.isfile(value), "%s already exists" % value
self._db_file = value
@property
def multiplicon_graph(self):
""" Digraph representation of relationships between multiplicons."""
return self._multiplicon_graph
@property
def multiplicons(self):
""" Multiplicon table from SQLite database. """
sql = '''SELECT * FROM multiplicons'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
@property
def segments(self):
""" Segments table from SQLite database. """
sql = '''SELECT * FROM segments'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
|
widdowquinn/pyADHoRe
|
pyadhore/iadhore.py
|
IadhoreData._parse_multiplicons
|
python
|
def _parse_multiplicons(self):
# Parse data with csv reader
reader = csv.reader(open(self._multiplicon_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
# Add data to SQLite db
sql = '''INSERT INTO multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant) VALUES
(?,?,?,?,?,?,?,?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
# Add multiplicons to tree
m_id = int(row[0])
self._multiplicon_graph.add_node(m_id)
if len(row[3]):
self._multiplicon_graph.add_edge(int(row[3]), m_id)
self._dbconn.commit()
|
Read the multiplicon output file, and parse into a (i) tree using
NetworkX, and (ii) an SQLite database.
|
train
|
https://github.com/widdowquinn/pyADHoRe/blob/b2ebbf6ae9c6afe9262eb0e3d9cf395970e38533/pyadhore/iadhore.py#L98-L120
| null |
class IadhoreData(object):
""" Implements an interface to i-ADHoRe output data, using NetworkX to
hold a tree representation of multiplicon relationships, and SQLite3
to hold the output data tables
"""
def __init__(self, multiplicon_file=None, segment_file=None,
db_filename=":memory:"):
""" Initialise data object
Arguments:
o multiplicon_file - location of iADHoRe multiplicon.txt
o segment_file - location of iADHoRe segment.txt file
o db_filename - location to write SQLite3 database (defaults to
in-memory)
"""
# Attributes later populated in methods
self._dbconn = None
self._redundant_multiplicon_cache = None
# Get arguments and initialise
self._multiplicon_file = multiplicon_file
self._segment_file = segment_file
self._db_file = db_filename
self._multiplicon_graph = nx.DiGraph()
# Set up database
self._dbsetup()
# Load multiplicon and segment data into tree/SQL database
self._parse_multiplicons()
self._parse_segments()
def _dbsetup(self):
""" Create/open local SQLite database
"""
self._dbconn = sqlite3.connect(self._db_file)
# Create table for multiplicons
sql = '''CREATE TABLE multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant)'''
self._dbconn.execute(sql)
# Create table for multiplicons ('order' appears to be reserved)
sql = '''CREATE TABLE segments
(id, multiplicon, genome, list, first, last, ord)'''
self._dbconn.execute(sql)
self._dbconn.commit()
def _parse_segments(self):
""" Read the segment output file and parse into an SQLite database.
"""
reader = csv.reader(open(self._segment_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
sql = '''INSERT INTO segments
(id, multiplicon, genome, list, first, last, ord)
VALUES (?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
self._dbconn.commit()
def get_multiplicon_leaves(self, redundant=False):
""" Return a generator of the IDs of multiplicons found at leaves
of the tree (i.e. from which no further multiplicons were derived).
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_seeds(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are initial
seeding 'pairs' in level 2 multiplicons.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.in_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_intermediates(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are neither
seeding 'pairs' in level 2 multiplicons, nor leaves.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if len(self._multiplicon_graph.in_edges(node)) and \
len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_properties(self, value):
""" Return a dictionary describing multiplicon data:
id, parent, level, number_of_anchorpoints, profile_length,
is_redundant and the contributing genome segments
"""
sql = '''SELECT id, parent, level, number_of_anchorpoints,
profile_length, is_redundant
FROM multiplicons WHERE id=:id'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = cur.fetchone()
cur.close()
return {'id': int(result[0]),
'parent': int(result[1]) if len(result[1]) else None,
'level': int(result[2]),
'number_of_anchorpoints': int(result[3]),
'profile_length': int(result[4]),
'is_redundant': True if result[5] == '-1' else False,
'segments': self.get_multiplicon_segments(value)}
def get_multiplicon_segments(self, value):
""" Return a dictionary describing the genome segments that
contribute to the named multiplicon, keyed by genome, with
(start feature, end feature) tuples.
"""
sql = '''SELECT genome, first, last FROM segments
WHERE multiplicon=:mp'''
cur = self._dbconn.cursor()
cur.execute(sql, {'mp': str(value)})
result = cur.fetchall()
cur.close()
segdict = collections.defaultdict(tuple)
for genome, start, end in result:
segdict[genome] = (start, end)
return segdict
def get_multiplicons_at_level(self, level, redundant=False):
""" Return a list of IDs of multiplicons at the requested level
"""
sql = '''SELECT id FROM multiplicons
WHERE level=:level'''
cur = self._dbconn.cursor()
cur.execute(sql, {'level': str(level)})
result = [int(r[0]) for r in cur.fetchall()]
cur.close()
if redundant:
return result
else:
return [r for r in result if not self.is_redundant_multiplicon(r)]
def is_redundant_multiplicon(self, value):
""" Returns True if the passed multiplicon ID is redundant, False
otherwise.
- value, (int) multiplicon ID
"""
if not hasattr(self, '_redundant_multiplicon_cache'):
sql = '''SELECT id FROM multiplicons WHERE is_redundant="-1"'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = [int(r[0]) for r in cur.fetchall()]
self._redundant_multiplicon_cache = set(result)
if value in self._redundant_multiplicon_cache:
return True
else:
return False
def write(self, mfile="multiplicons.txt", sfile="segments.txt",
clobber=False):
""" Writes multiplicon and segment files to the named locations.
- mfile, (str) location for multiplicons file
- sfile, (str) location for segments file
- clobber, (Boolean) True if we overwrite target files
"""
if not clobber:
if os.path.isfile(mfile):
raise IOError("Multiplicon file %s already exists." % mfile)
if os.path.isfile(sfile):
raise IOError("Segments file %s already exists." % sfile)
self._write_multiplicons(mfile)
self._write_segments(sfile)
def _write_multiplicons(self, filename):
""" Write multiplicons to file.
- filename, (str) location of output file
"""
# Column headers
mhead = '\t'.join(['id', 'genome_x', 'list_x', 'parent', 'genome_y',
'list_y', 'level', 'number_of_anchorpoints',
'profile_length', 'begin_x', 'end_x', 'begin_y',
'end_y', 'is_redundant'])
with open(filename, 'w') as fhandle:
fhandle.write(mhead + '\n')
for mrow in self.multiplicons:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
def _write_segments(self, filename):
""" Write segments to file.
- filename, (str) location of output file
"""
# Column headers
shead = '\t'.join(['id', 'multiplicon', 'genome', 'list', 'first',
'last', 'order'])
with open(filename, 'w') as fhandle:
fhandle.write(shead + '\n')
for mrow in self.segments:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
@property
def multiplicon_file(self):
""" Location of the i-ADHoRe multiplicon data output file."""
return self._multiplicon_file
@multiplicon_file.setter
def multiplicon_file(self, value):
""" Setter for _multiplicon_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._multiplicon_file = value
@property
def segment_file(self):
""" Location of the i-ADHoRe segment data output file."""
return self._segment_file
@segment_file.setter
def segment_file(self, value):
""" Setter for _segment_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._segment_file = value
@property
def db_file(self):
""" Location of the SQLite database."""
return self._db_file
@db_file.setter
def db_file(self, value):
""" Setter for _db_file attribute """
assert not os.path.isfile(value), "%s already exists" % value
self._db_file = value
@property
def multiplicon_graph(self):
""" Digraph representation of relationships between multiplicons."""
return self._multiplicon_graph
@property
def multiplicons(self):
""" Multiplicon table from SQLite database. """
sql = '''SELECT * FROM multiplicons'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
@property
def segments(self):
""" Segments table from SQLite database. """
sql = '''SELECT * FROM segments'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
|
widdowquinn/pyADHoRe
|
pyadhore/iadhore.py
|
IadhoreData._parse_segments
|
python
|
def _parse_segments(self):
reader = csv.reader(open(self._segment_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
sql = '''INSERT INTO segments
(id, multiplicon, genome, list, first, last, ord)
VALUES (?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
self._dbconn.commit()
|
Read the segment output file and parse into an SQLite database.
|
train
|
https://github.com/widdowquinn/pyADHoRe/blob/b2ebbf6ae9c6afe9262eb0e3d9cf395970e38533/pyadhore/iadhore.py#L122-L134
| null |
class IadhoreData(object):
""" Implements an interface to i-ADHoRe output data, using NetworkX to
hold a tree representation of multiplicon relationships, and SQLite3
to hold the output data tables
"""
def __init__(self, multiplicon_file=None, segment_file=None,
db_filename=":memory:"):
""" Initialise data object
Arguments:
o multiplicon_file - location of iADHoRe multiplicon.txt
o segment_file - location of iADHoRe segment.txt file
o db_filename - location to write SQLite3 database (defaults to
in-memory)
"""
# Attributes later populated in methods
self._dbconn = None
self._redundant_multiplicon_cache = None
# Get arguments and initialise
self._multiplicon_file = multiplicon_file
self._segment_file = segment_file
self._db_file = db_filename
self._multiplicon_graph = nx.DiGraph()
# Set up database
self._dbsetup()
# Load multiplicon and segment data into tree/SQL database
self._parse_multiplicons()
self._parse_segments()
def _dbsetup(self):
""" Create/open local SQLite database
"""
self._dbconn = sqlite3.connect(self._db_file)
# Create table for multiplicons
sql = '''CREATE TABLE multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant)'''
self._dbconn.execute(sql)
# Create table for multiplicons ('order' appears to be reserved)
sql = '''CREATE TABLE segments
(id, multiplicon, genome, list, first, last, ord)'''
self._dbconn.execute(sql)
self._dbconn.commit()
def _parse_multiplicons(self):
""" Read the multiplicon output file, and parse into a (i) tree using
NetworkX, and (ii) an SQLite database.
"""
# Parse data with csv reader
reader = csv.reader(open(self._multiplicon_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
# Add data to SQLite db
sql = '''INSERT INTO multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant) VALUES
(?,?,?,?,?,?,?,?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
# Add multiplicons to tree
m_id = int(row[0])
self._multiplicon_graph.add_node(m_id)
if len(row[3]):
self._multiplicon_graph.add_edge(int(row[3]), m_id)
self._dbconn.commit()
def get_multiplicon_leaves(self, redundant=False):
""" Return a generator of the IDs of multiplicons found at leaves
of the tree (i.e. from which no further multiplicons were derived).
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_seeds(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are initial
seeding 'pairs' in level 2 multiplicons.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.in_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_intermediates(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are neither
seeding 'pairs' in level 2 multiplicons, nor leaves.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if len(self._multiplicon_graph.in_edges(node)) and \
len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_properties(self, value):
""" Return a dictionary describing multiplicon data:
id, parent, level, number_of_anchorpoints, profile_length,
is_redundant and the contributing genome segments
"""
sql = '''SELECT id, parent, level, number_of_anchorpoints,
profile_length, is_redundant
FROM multiplicons WHERE id=:id'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = cur.fetchone()
cur.close()
return {'id': int(result[0]),
'parent': int(result[1]) if len(result[1]) else None,
'level': int(result[2]),
'number_of_anchorpoints': int(result[3]),
'profile_length': int(result[4]),
'is_redundant': True if result[5] == '-1' else False,
'segments': self.get_multiplicon_segments(value)}
def get_multiplicon_segments(self, value):
""" Return a dictionary describing the genome segments that
contribute to the named multiplicon, keyed by genome, with
(start feature, end feature) tuples.
"""
sql = '''SELECT genome, first, last FROM segments
WHERE multiplicon=:mp'''
cur = self._dbconn.cursor()
cur.execute(sql, {'mp': str(value)})
result = cur.fetchall()
cur.close()
segdict = collections.defaultdict(tuple)
for genome, start, end in result:
segdict[genome] = (start, end)
return segdict
def get_multiplicons_at_level(self, level, redundant=False):
""" Return a list of IDs of multiplicons at the requested level
"""
sql = '''SELECT id FROM multiplicons
WHERE level=:level'''
cur = self._dbconn.cursor()
cur.execute(sql, {'level': str(level)})
result = [int(r[0]) for r in cur.fetchall()]
cur.close()
if redundant:
return result
else:
return [r for r in result if not self.is_redundant_multiplicon(r)]
def is_redundant_multiplicon(self, value):
""" Returns True if the passed multiplicon ID is redundant, False
otherwise.
- value, (int) multiplicon ID
"""
if not hasattr(self, '_redundant_multiplicon_cache'):
sql = '''SELECT id FROM multiplicons WHERE is_redundant="-1"'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = [int(r[0]) for r in cur.fetchall()]
self._redundant_multiplicon_cache = set(result)
if value in self._redundant_multiplicon_cache:
return True
else:
return False
def write(self, mfile="multiplicons.txt", sfile="segments.txt",
clobber=False):
""" Writes multiplicon and segment files to the named locations.
- mfile, (str) location for multiplicons file
- sfile, (str) location for segments file
- clobber, (Boolean) True if we overwrite target files
"""
if not clobber:
if os.path.isfile(mfile):
raise IOError("Multiplicon file %s already exists." % mfile)
if os.path.isfile(sfile):
raise IOError("Segments file %s already exists." % sfile)
self._write_multiplicons(mfile)
self._write_segments(sfile)
def _write_multiplicons(self, filename):
""" Write multiplicons to file.
- filename, (str) location of output file
"""
# Column headers
mhead = '\t'.join(['id', 'genome_x', 'list_x', 'parent', 'genome_y',
'list_y', 'level', 'number_of_anchorpoints',
'profile_length', 'begin_x', 'end_x', 'begin_y',
'end_y', 'is_redundant'])
with open(filename, 'w') as fhandle:
fhandle.write(mhead + '\n')
for mrow in self.multiplicons:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
def _write_segments(self, filename):
""" Write segments to file.
- filename, (str) location of output file
"""
# Column headers
shead = '\t'.join(['id', 'multiplicon', 'genome', 'list', 'first',
'last', 'order'])
with open(filename, 'w') as fhandle:
fhandle.write(shead + '\n')
for mrow in self.segments:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
@property
def multiplicon_file(self):
""" Location of the i-ADHoRe multiplicon data output file."""
return self._multiplicon_file
@multiplicon_file.setter
def multiplicon_file(self, value):
""" Setter for _multiplicon_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._multiplicon_file = value
@property
def segment_file(self):
""" Location of the i-ADHoRe segment data output file."""
return self._segment_file
@segment_file.setter
def segment_file(self, value):
""" Setter for _segment_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._segment_file = value
@property
def db_file(self):
""" Location of the SQLite database."""
return self._db_file
@db_file.setter
def db_file(self, value):
""" Setter for _db_file attribute """
assert not os.path.isfile(value), "%s already exists" % value
self._db_file = value
@property
def multiplicon_graph(self):
""" Digraph representation of relationships between multiplicons."""
return self._multiplicon_graph
@property
def multiplicons(self):
""" Multiplicon table from SQLite database. """
sql = '''SELECT * FROM multiplicons'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
@property
def segments(self):
""" Segments table from SQLite database. """
sql = '''SELECT * FROM segments'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
|
widdowquinn/pyADHoRe
|
pyadhore/iadhore.py
|
IadhoreData.get_multiplicon_leaves
|
python
|
def get_multiplicon_leaves(self, redundant=False):
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
|
Return a generator of the IDs of multiplicons found at leaves
of the tree (i.e. from which no further multiplicons were derived).
Arguments:
o redundant - if true, report redundant multiplicons
|
train
|
https://github.com/widdowquinn/pyADHoRe/blob/b2ebbf6ae9c6afe9262eb0e3d9cf395970e38533/pyadhore/iadhore.py#L136-L153
| null |
class IadhoreData(object):
""" Implements an interface to i-ADHoRe output data, using NetworkX to
hold a tree representation of multiplicon relationships, and SQLite3
to hold the output data tables
"""
def __init__(self, multiplicon_file=None, segment_file=None,
db_filename=":memory:"):
""" Initialise data object
Arguments:
o multiplicon_file - location of iADHoRe multiplicon.txt
o segment_file - location of iADHoRe segment.txt file
o db_filename - location to write SQLite3 database (defaults to
in-memory)
"""
# Attributes later populated in methods
self._dbconn = None
self._redundant_multiplicon_cache = None
# Get arguments and initialise
self._multiplicon_file = multiplicon_file
self._segment_file = segment_file
self._db_file = db_filename
self._multiplicon_graph = nx.DiGraph()
# Set up database
self._dbsetup()
# Load multiplicon and segment data into tree/SQL database
self._parse_multiplicons()
self._parse_segments()
def _dbsetup(self):
""" Create/open local SQLite database
"""
self._dbconn = sqlite3.connect(self._db_file)
# Create table for multiplicons
sql = '''CREATE TABLE multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant)'''
self._dbconn.execute(sql)
# Create table for multiplicons ('order' appears to be reserved)
sql = '''CREATE TABLE segments
(id, multiplicon, genome, list, first, last, ord)'''
self._dbconn.execute(sql)
self._dbconn.commit()
def _parse_multiplicons(self):
""" Read the multiplicon output file, and parse into a (i) tree using
NetworkX, and (ii) an SQLite database.
"""
# Parse data with csv reader
reader = csv.reader(open(self._multiplicon_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
# Add data to SQLite db
sql = '''INSERT INTO multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant) VALUES
(?,?,?,?,?,?,?,?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
# Add multiplicons to tree
m_id = int(row[0])
self._multiplicon_graph.add_node(m_id)
if len(row[3]):
self._multiplicon_graph.add_edge(int(row[3]), m_id)
self._dbconn.commit()
def _parse_segments(self):
""" Read the segment output file and parse into an SQLite database.
"""
reader = csv.reader(open(self._segment_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
sql = '''INSERT INTO segments
(id, multiplicon, genome, list, first, last, ord)
VALUES (?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
self._dbconn.commit()
def get_multiplicon_seeds(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are initial
seeding 'pairs' in level 2 multiplicons.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.in_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_intermediates(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are neither
seeding 'pairs' in level 2 multiplicons, nor leaves.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if len(self._multiplicon_graph.in_edges(node)) and \
len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_properties(self, value):
""" Return a dictionary describing multiplicon data:
id, parent, level, number_of_anchorpoints, profile_length,
is_redundant and the contributing genome segments
"""
sql = '''SELECT id, parent, level, number_of_anchorpoints,
profile_length, is_redundant
FROM multiplicons WHERE id=:id'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = cur.fetchone()
cur.close()
return {'id': int(result[0]),
'parent': int(result[1]) if len(result[1]) else None,
'level': int(result[2]),
'number_of_anchorpoints': int(result[3]),
'profile_length': int(result[4]),
'is_redundant': True if result[5] == '-1' else False,
'segments': self.get_multiplicon_segments(value)}
def get_multiplicon_segments(self, value):
""" Return a dictionary describing the genome segments that
contribute to the named multiplicon, keyed by genome, with
(start feature, end feature) tuples.
"""
sql = '''SELECT genome, first, last FROM segments
WHERE multiplicon=:mp'''
cur = self._dbconn.cursor()
cur.execute(sql, {'mp': str(value)})
result = cur.fetchall()
cur.close()
segdict = collections.defaultdict(tuple)
for genome, start, end in result:
segdict[genome] = (start, end)
return segdict
def get_multiplicons_at_level(self, level, redundant=False):
""" Return a list of IDs of multiplicons at the requested level
"""
sql = '''SELECT id FROM multiplicons
WHERE level=:level'''
cur = self._dbconn.cursor()
cur.execute(sql, {'level': str(level)})
result = [int(r[0]) for r in cur.fetchall()]
cur.close()
if redundant:
return result
else:
return [r for r in result if not self.is_redundant_multiplicon(r)]
def is_redundant_multiplicon(self, value):
""" Returns True if the passed multiplicon ID is redundant, False
otherwise.
- value, (int) multiplicon ID
"""
if not hasattr(self, '_redundant_multiplicon_cache'):
sql = '''SELECT id FROM multiplicons WHERE is_redundant="-1"'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = [int(r[0]) for r in cur.fetchall()]
self._redundant_multiplicon_cache = set(result)
if value in self._redundant_multiplicon_cache:
return True
else:
return False
def write(self, mfile="multiplicons.txt", sfile="segments.txt",
clobber=False):
""" Writes multiplicon and segment files to the named locations.
- mfile, (str) location for multiplicons file
- sfile, (str) location for segments file
- clobber, (Boolean) True if we overwrite target files
"""
if not clobber:
if os.path.isfile(mfile):
raise IOError("Multiplicon file %s already exists." % mfile)
if os.path.isfile(sfile):
raise IOError("Segments file %s already exists." % sfile)
self._write_multiplicons(mfile)
self._write_segments(sfile)
def _write_multiplicons(self, filename):
""" Write multiplicons to file.
- filename, (str) location of output file
"""
# Column headers
mhead = '\t'.join(['id', 'genome_x', 'list_x', 'parent', 'genome_y',
'list_y', 'level', 'number_of_anchorpoints',
'profile_length', 'begin_x', 'end_x', 'begin_y',
'end_y', 'is_redundant'])
with open(filename, 'w') as fhandle:
fhandle.write(mhead + '\n')
for mrow in self.multiplicons:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
def _write_segments(self, filename):
""" Write segments to file.
- filename, (str) location of output file
"""
# Column headers
shead = '\t'.join(['id', 'multiplicon', 'genome', 'list', 'first',
'last', 'order'])
with open(filename, 'w') as fhandle:
fhandle.write(shead + '\n')
for mrow in self.segments:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
@property
def multiplicon_file(self):
""" Location of the i-ADHoRe multiplicon data output file."""
return self._multiplicon_file
@multiplicon_file.setter
def multiplicon_file(self, value):
""" Setter for _multiplicon_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._multiplicon_file = value
@property
def segment_file(self):
""" Location of the i-ADHoRe segment data output file."""
return self._segment_file
@segment_file.setter
def segment_file(self, value):
""" Setter for _segment_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._segment_file = value
@property
def db_file(self):
""" Location of the SQLite database."""
return self._db_file
@db_file.setter
def db_file(self, value):
""" Setter for _db_file attribute """
assert not os.path.isfile(value), "%s already exists" % value
self._db_file = value
@property
def multiplicon_graph(self):
""" Digraph representation of relationships between multiplicons."""
return self._multiplicon_graph
@property
def multiplicons(self):
""" Multiplicon table from SQLite database. """
sql = '''SELECT * FROM multiplicons'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
@property
def segments(self):
""" Segments table from SQLite database. """
sql = '''SELECT * FROM segments'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
|
widdowquinn/pyADHoRe
|
pyadhore/iadhore.py
|
IadhoreData.get_multiplicon_seeds
|
python
|
def get_multiplicon_seeds(self, redundant=False):
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.in_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
|
Return a generator of the IDs of multiplicons that are initial
seeding 'pairs' in level 2 multiplicons.
Arguments:
o redundant - if true, report redundant multiplicons
|
train
|
https://github.com/widdowquinn/pyADHoRe/blob/b2ebbf6ae9c6afe9262eb0e3d9cf395970e38533/pyadhore/iadhore.py#L155-L172
| null |
class IadhoreData(object):
""" Implements an interface to i-ADHoRe output data, using NetworkX to
hold a tree representation of multiplicon relationships, and SQLite3
to hold the output data tables
"""
def __init__(self, multiplicon_file=None, segment_file=None,
db_filename=":memory:"):
""" Initialise data object
Arguments:
o multiplicon_file - location of iADHoRe multiplicon.txt
o segment_file - location of iADHoRe segment.txt file
o db_filename - location to write SQLite3 database (defaults to
in-memory)
"""
# Attributes later populated in methods
self._dbconn = None
self._redundant_multiplicon_cache = None
# Get arguments and initialise
self._multiplicon_file = multiplicon_file
self._segment_file = segment_file
self._db_file = db_filename
self._multiplicon_graph = nx.DiGraph()
# Set up database
self._dbsetup()
# Load multiplicon and segment data into tree/SQL database
self._parse_multiplicons()
self._parse_segments()
def _dbsetup(self):
""" Create/open local SQLite database
"""
self._dbconn = sqlite3.connect(self._db_file)
# Create table for multiplicons
sql = '''CREATE TABLE multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant)'''
self._dbconn.execute(sql)
# Create table for multiplicons ('order' appears to be reserved)
sql = '''CREATE TABLE segments
(id, multiplicon, genome, list, first, last, ord)'''
self._dbconn.execute(sql)
self._dbconn.commit()
def _parse_multiplicons(self):
""" Read the multiplicon output file, and parse into a (i) tree using
NetworkX, and (ii) an SQLite database.
"""
# Parse data with csv reader
reader = csv.reader(open(self._multiplicon_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
# Add data to SQLite db
sql = '''INSERT INTO multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant) VALUES
(?,?,?,?,?,?,?,?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
# Add multiplicons to tree
m_id = int(row[0])
self._multiplicon_graph.add_node(m_id)
if len(row[3]):
self._multiplicon_graph.add_edge(int(row[3]), m_id)
self._dbconn.commit()
def _parse_segments(self):
""" Read the segment output file and parse into an SQLite database.
"""
reader = csv.reader(open(self._segment_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
sql = '''INSERT INTO segments
(id, multiplicon, genome, list, first, last, ord)
VALUES (?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
self._dbconn.commit()
def get_multiplicon_leaves(self, redundant=False):
""" Return a generator of the IDs of multiplicons found at leaves
of the tree (i.e. from which no further multiplicons were derived).
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_intermediates(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are neither
seeding 'pairs' in level 2 multiplicons, nor leaves.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if len(self._multiplicon_graph.in_edges(node)) and \
len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_properties(self, value):
""" Return a dictionary describing multiplicon data:
id, parent, level, number_of_anchorpoints, profile_length,
is_redundant and the contributing genome segments
"""
sql = '''SELECT id, parent, level, number_of_anchorpoints,
profile_length, is_redundant
FROM multiplicons WHERE id=:id'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = cur.fetchone()
cur.close()
return {'id': int(result[0]),
'parent': int(result[1]) if len(result[1]) else None,
'level': int(result[2]),
'number_of_anchorpoints': int(result[3]),
'profile_length': int(result[4]),
'is_redundant': True if result[5] == '-1' else False,
'segments': self.get_multiplicon_segments(value)}
def get_multiplicon_segments(self, value):
""" Return a dictionary describing the genome segments that
contribute to the named multiplicon, keyed by genome, with
(start feature, end feature) tuples.
"""
sql = '''SELECT genome, first, last FROM segments
WHERE multiplicon=:mp'''
cur = self._dbconn.cursor()
cur.execute(sql, {'mp': str(value)})
result = cur.fetchall()
cur.close()
segdict = collections.defaultdict(tuple)
for genome, start, end in result:
segdict[genome] = (start, end)
return segdict
def get_multiplicons_at_level(self, level, redundant=False):
""" Return a list of IDs of multiplicons at the requested level
"""
sql = '''SELECT id FROM multiplicons
WHERE level=:level'''
cur = self._dbconn.cursor()
cur.execute(sql, {'level': str(level)})
result = [int(r[0]) for r in cur.fetchall()]
cur.close()
if redundant:
return result
else:
return [r for r in result if not self.is_redundant_multiplicon(r)]
def is_redundant_multiplicon(self, value):
""" Returns True if the passed multiplicon ID is redundant, False
otherwise.
- value, (int) multiplicon ID
"""
if not hasattr(self, '_redundant_multiplicon_cache'):
sql = '''SELECT id FROM multiplicons WHERE is_redundant="-1"'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = [int(r[0]) for r in cur.fetchall()]
self._redundant_multiplicon_cache = set(result)
if value in self._redundant_multiplicon_cache:
return True
else:
return False
def write(self, mfile="multiplicons.txt", sfile="segments.txt",
clobber=False):
""" Writes multiplicon and segment files to the named locations.
- mfile, (str) location for multiplicons file
- sfile, (str) location for segments file
- clobber, (Boolean) True if we overwrite target files
"""
if not clobber:
if os.path.isfile(mfile):
raise IOError("Multiplicon file %s already exists." % mfile)
if os.path.isfile(sfile):
raise IOError("Segments file %s already exists." % sfile)
self._write_multiplicons(mfile)
self._write_segments(sfile)
def _write_multiplicons(self, filename):
""" Write multiplicons to file.
- filename, (str) location of output file
"""
# Column headers
mhead = '\t'.join(['id', 'genome_x', 'list_x', 'parent', 'genome_y',
'list_y', 'level', 'number_of_anchorpoints',
'profile_length', 'begin_x', 'end_x', 'begin_y',
'end_y', 'is_redundant'])
with open(filename, 'w') as fhandle:
fhandle.write(mhead + '\n')
for mrow in self.multiplicons:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
def _write_segments(self, filename):
""" Write segments to file.
- filename, (str) location of output file
"""
# Column headers
shead = '\t'.join(['id', 'multiplicon', 'genome', 'list', 'first',
'last', 'order'])
with open(filename, 'w') as fhandle:
fhandle.write(shead + '\n')
for mrow in self.segments:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
@property
def multiplicon_file(self):
""" Location of the i-ADHoRe multiplicon data output file."""
return self._multiplicon_file
@multiplicon_file.setter
def multiplicon_file(self, value):
""" Setter for _multiplicon_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._multiplicon_file = value
@property
def segment_file(self):
""" Location of the i-ADHoRe segment data output file."""
return self._segment_file
@segment_file.setter
def segment_file(self, value):
""" Setter for _segment_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._segment_file = value
@property
def db_file(self):
""" Location of the SQLite database."""
return self._db_file
@db_file.setter
def db_file(self, value):
""" Setter for _db_file attribute """
assert not os.path.isfile(value), "%s already exists" % value
self._db_file = value
@property
def multiplicon_graph(self):
""" Digraph representation of relationships between multiplicons."""
return self._multiplicon_graph
@property
def multiplicons(self):
""" Multiplicon table from SQLite database. """
sql = '''SELECT * FROM multiplicons'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
@property
def segments(self):
""" Segments table from SQLite database. """
sql = '''SELECT * FROM segments'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
|
widdowquinn/pyADHoRe
|
pyadhore/iadhore.py
|
IadhoreData.get_multiplicon_intermediates
|
python
|
def get_multiplicon_intermediates(self, redundant=False):
for node in self._multiplicon_graph.nodes():
if len(self._multiplicon_graph.in_edges(node)) and \
len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
|
Return a generator of the IDs of multiplicons that are neither
seeding 'pairs' in level 2 multiplicons, nor leaves.
Arguments:
o redundant - if true, report redundant multiplicons
|
train
|
https://github.com/widdowquinn/pyADHoRe/blob/b2ebbf6ae9c6afe9262eb0e3d9cf395970e38533/pyadhore/iadhore.py#L174-L192
| null |
class IadhoreData(object):
""" Implements an interface to i-ADHoRe output data, using NetworkX to
hold a tree representation of multiplicon relationships, and SQLite3
to hold the output data tables
"""
def __init__(self, multiplicon_file=None, segment_file=None,
db_filename=":memory:"):
""" Initialise data object
Arguments:
o multiplicon_file - location of iADHoRe multiplicon.txt
o segment_file - location of iADHoRe segment.txt file
o db_filename - location to write SQLite3 database (defaults to
in-memory)
"""
# Attributes later populated in methods
self._dbconn = None
self._redundant_multiplicon_cache = None
# Get arguments and initialise
self._multiplicon_file = multiplicon_file
self._segment_file = segment_file
self._db_file = db_filename
self._multiplicon_graph = nx.DiGraph()
# Set up database
self._dbsetup()
# Load multiplicon and segment data into tree/SQL database
self._parse_multiplicons()
self._parse_segments()
def _dbsetup(self):
""" Create/open local SQLite database
"""
self._dbconn = sqlite3.connect(self._db_file)
# Create table for multiplicons
sql = '''CREATE TABLE multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant)'''
self._dbconn.execute(sql)
# Create table for multiplicons ('order' appears to be reserved)
sql = '''CREATE TABLE segments
(id, multiplicon, genome, list, first, last, ord)'''
self._dbconn.execute(sql)
self._dbconn.commit()
def _parse_multiplicons(self):
""" Read the multiplicon output file, and parse into a (i) tree using
NetworkX, and (ii) an SQLite database.
"""
# Parse data with csv reader
reader = csv.reader(open(self._multiplicon_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
# Add data to SQLite db
sql = '''INSERT INTO multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant) VALUES
(?,?,?,?,?,?,?,?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
# Add multiplicons to tree
m_id = int(row[0])
self._multiplicon_graph.add_node(m_id)
if len(row[3]):
self._multiplicon_graph.add_edge(int(row[3]), m_id)
self._dbconn.commit()
def _parse_segments(self):
""" Read the segment output file and parse into an SQLite database.
"""
reader = csv.reader(open(self._segment_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
sql = '''INSERT INTO segments
(id, multiplicon, genome, list, first, last, ord)
VALUES (?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
self._dbconn.commit()
def get_multiplicon_leaves(self, redundant=False):
""" Return a generator of the IDs of multiplicons found at leaves
of the tree (i.e. from which no further multiplicons were derived).
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_seeds(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are initial
seeding 'pairs' in level 2 multiplicons.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.in_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_properties(self, value):
""" Return a dictionary describing multiplicon data:
id, parent, level, number_of_anchorpoints, profile_length,
is_redundant and the contributing genome segments
"""
sql = '''SELECT id, parent, level, number_of_anchorpoints,
profile_length, is_redundant
FROM multiplicons WHERE id=:id'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = cur.fetchone()
cur.close()
return {'id': int(result[0]),
'parent': int(result[1]) if len(result[1]) else None,
'level': int(result[2]),
'number_of_anchorpoints': int(result[3]),
'profile_length': int(result[4]),
'is_redundant': True if result[5] == '-1' else False,
'segments': self.get_multiplicon_segments(value)}
def get_multiplicon_segments(self, value):
""" Return a dictionary describing the genome segments that
contribute to the named multiplicon, keyed by genome, with
(start feature, end feature) tuples.
"""
sql = '''SELECT genome, first, last FROM segments
WHERE multiplicon=:mp'''
cur = self._dbconn.cursor()
cur.execute(sql, {'mp': str(value)})
result = cur.fetchall()
cur.close()
segdict = collections.defaultdict(tuple)
for genome, start, end in result:
segdict[genome] = (start, end)
return segdict
def get_multiplicons_at_level(self, level, redundant=False):
""" Return a list of IDs of multiplicons at the requested level
"""
sql = '''SELECT id FROM multiplicons
WHERE level=:level'''
cur = self._dbconn.cursor()
cur.execute(sql, {'level': str(level)})
result = [int(r[0]) for r in cur.fetchall()]
cur.close()
if redundant:
return result
else:
return [r for r in result if not self.is_redundant_multiplicon(r)]
def is_redundant_multiplicon(self, value):
""" Returns True if the passed multiplicon ID is redundant, False
otherwise.
- value, (int) multiplicon ID
"""
if not hasattr(self, '_redundant_multiplicon_cache'):
sql = '''SELECT id FROM multiplicons WHERE is_redundant="-1"'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = [int(r[0]) for r in cur.fetchall()]
self._redundant_multiplicon_cache = set(result)
if value in self._redundant_multiplicon_cache:
return True
else:
return False
def write(self, mfile="multiplicons.txt", sfile="segments.txt",
clobber=False):
""" Writes multiplicon and segment files to the named locations.
- mfile, (str) location for multiplicons file
- sfile, (str) location for segments file
- clobber, (Boolean) True if we overwrite target files
"""
if not clobber:
if os.path.isfile(mfile):
raise IOError("Multiplicon file %s already exists." % mfile)
if os.path.isfile(sfile):
raise IOError("Segments file %s already exists." % sfile)
self._write_multiplicons(mfile)
self._write_segments(sfile)
def _write_multiplicons(self, filename):
""" Write multiplicons to file.
- filename, (str) location of output file
"""
# Column headers
mhead = '\t'.join(['id', 'genome_x', 'list_x', 'parent', 'genome_y',
'list_y', 'level', 'number_of_anchorpoints',
'profile_length', 'begin_x', 'end_x', 'begin_y',
'end_y', 'is_redundant'])
with open(filename, 'w') as fhandle:
fhandle.write(mhead + '\n')
for mrow in self.multiplicons:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
def _write_segments(self, filename):
""" Write segments to file.
- filename, (str) location of output file
"""
# Column headers
shead = '\t'.join(['id', 'multiplicon', 'genome', 'list', 'first',
'last', 'order'])
with open(filename, 'w') as fhandle:
fhandle.write(shead + '\n')
for mrow in self.segments:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
@property
def multiplicon_file(self):
""" Location of the i-ADHoRe multiplicon data output file."""
return self._multiplicon_file
@multiplicon_file.setter
def multiplicon_file(self, value):
""" Setter for _multiplicon_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._multiplicon_file = value
@property
def segment_file(self):
""" Location of the i-ADHoRe segment data output file."""
return self._segment_file
@segment_file.setter
def segment_file(self, value):
""" Setter for _segment_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._segment_file = value
@property
def db_file(self):
""" Location of the SQLite database."""
return self._db_file
@db_file.setter
def db_file(self, value):
""" Setter for _db_file attribute """
assert not os.path.isfile(value), "%s already exists" % value
self._db_file = value
@property
def multiplicon_graph(self):
""" Digraph representation of relationships between multiplicons."""
return self._multiplicon_graph
@property
def multiplicons(self):
""" Multiplicon table from SQLite database. """
sql = '''SELECT * FROM multiplicons'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
@property
def segments(self):
""" Segments table from SQLite database. """
sql = '''SELECT * FROM segments'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
|
widdowquinn/pyADHoRe
|
pyadhore/iadhore.py
|
IadhoreData.get_multiplicon_properties
|
python
|
def get_multiplicon_properties(self, value):
sql = '''SELECT id, parent, level, number_of_anchorpoints,
profile_length, is_redundant
FROM multiplicons WHERE id=:id'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = cur.fetchone()
cur.close()
return {'id': int(result[0]),
'parent': int(result[1]) if len(result[1]) else None,
'level': int(result[2]),
'number_of_anchorpoints': int(result[3]),
'profile_length': int(result[4]),
'is_redundant': True if result[5] == '-1' else False,
'segments': self.get_multiplicon_segments(value)}
|
Return a dictionary describing multiplicon data:
id, parent, level, number_of_anchorpoints, profile_length,
is_redundant and the contributing genome segments
|
train
|
https://github.com/widdowquinn/pyADHoRe/blob/b2ebbf6ae9c6afe9262eb0e3d9cf395970e38533/pyadhore/iadhore.py#L194-L212
|
[
"def get_multiplicon_segments(self, value):\n \"\"\" Return a dictionary describing the genome segments that\n contribute to the named multiplicon, keyed by genome, with\n (start feature, end feature) tuples.\n \"\"\"\n sql = '''SELECT genome, first, last FROM segments\n WHERE multiplicon=:mp'''\n cur = self._dbconn.cursor()\n cur.execute(sql, {'mp': str(value)})\n result = cur.fetchall()\n cur.close()\n segdict = collections.defaultdict(tuple)\n for genome, start, end in result:\n segdict[genome] = (start, end)\n return segdict\n"
] |
class IadhoreData(object):
""" Implements an interface to i-ADHoRe output data, using NetworkX to
hold a tree representation of multiplicon relationships, and SQLite3
to hold the output data tables
"""
def __init__(self, multiplicon_file=None, segment_file=None,
db_filename=":memory:"):
""" Initialise data object
Arguments:
o multiplicon_file - location of iADHoRe multiplicon.txt
o segment_file - location of iADHoRe segment.txt file
o db_filename - location to write SQLite3 database (defaults to
in-memory)
"""
# Attributes later populated in methods
self._dbconn = None
self._redundant_multiplicon_cache = None
# Get arguments and initialise
self._multiplicon_file = multiplicon_file
self._segment_file = segment_file
self._db_file = db_filename
self._multiplicon_graph = nx.DiGraph()
# Set up database
self._dbsetup()
# Load multiplicon and segment data into tree/SQL database
self._parse_multiplicons()
self._parse_segments()
def _dbsetup(self):
""" Create/open local SQLite database
"""
self._dbconn = sqlite3.connect(self._db_file)
# Create table for multiplicons
sql = '''CREATE TABLE multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant)'''
self._dbconn.execute(sql)
# Create table for multiplicons ('order' appears to be reserved)
sql = '''CREATE TABLE segments
(id, multiplicon, genome, list, first, last, ord)'''
self._dbconn.execute(sql)
self._dbconn.commit()
def _parse_multiplicons(self):
""" Read the multiplicon output file, and parse into a (i) tree using
NetworkX, and (ii) an SQLite database.
"""
# Parse data with csv reader
reader = csv.reader(open(self._multiplicon_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
# Add data to SQLite db
sql = '''INSERT INTO multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant) VALUES
(?,?,?,?,?,?,?,?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
# Add multiplicons to tree
m_id = int(row[0])
self._multiplicon_graph.add_node(m_id)
if len(row[3]):
self._multiplicon_graph.add_edge(int(row[3]), m_id)
self._dbconn.commit()
def _parse_segments(self):
""" Read the segment output file and parse into an SQLite database.
"""
reader = csv.reader(open(self._segment_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
sql = '''INSERT INTO segments
(id, multiplicon, genome, list, first, last, ord)
VALUES (?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
self._dbconn.commit()
def get_multiplicon_leaves(self, redundant=False):
""" Return a generator of the IDs of multiplicons found at leaves
of the tree (i.e. from which no further multiplicons were derived).
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_seeds(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are initial
seeding 'pairs' in level 2 multiplicons.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.in_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_intermediates(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are neither
seeding 'pairs' in level 2 multiplicons, nor leaves.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if len(self._multiplicon_graph.in_edges(node)) and \
len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_segments(self, value):
""" Return a dictionary describing the genome segments that
contribute to the named multiplicon, keyed by genome, with
(start feature, end feature) tuples.
"""
sql = '''SELECT genome, first, last FROM segments
WHERE multiplicon=:mp'''
cur = self._dbconn.cursor()
cur.execute(sql, {'mp': str(value)})
result = cur.fetchall()
cur.close()
segdict = collections.defaultdict(tuple)
for genome, start, end in result:
segdict[genome] = (start, end)
return segdict
def get_multiplicons_at_level(self, level, redundant=False):
""" Return a list of IDs of multiplicons at the requested level
"""
sql = '''SELECT id FROM multiplicons
WHERE level=:level'''
cur = self._dbconn.cursor()
cur.execute(sql, {'level': str(level)})
result = [int(r[0]) for r in cur.fetchall()]
cur.close()
if redundant:
return result
else:
return [r for r in result if not self.is_redundant_multiplicon(r)]
def is_redundant_multiplicon(self, value):
""" Returns True if the passed multiplicon ID is redundant, False
otherwise.
- value, (int) multiplicon ID
"""
if not hasattr(self, '_redundant_multiplicon_cache'):
sql = '''SELECT id FROM multiplicons WHERE is_redundant="-1"'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = [int(r[0]) for r in cur.fetchall()]
self._redundant_multiplicon_cache = set(result)
if value in self._redundant_multiplicon_cache:
return True
else:
return False
def write(self, mfile="multiplicons.txt", sfile="segments.txt",
clobber=False):
""" Writes multiplicon and segment files to the named locations.
- mfile, (str) location for multiplicons file
- sfile, (str) location for segments file
- clobber, (Boolean) True if we overwrite target files
"""
if not clobber:
if os.path.isfile(mfile):
raise IOError("Multiplicon file %s already exists." % mfile)
if os.path.isfile(sfile):
raise IOError("Segments file %s already exists." % sfile)
self._write_multiplicons(mfile)
self._write_segments(sfile)
def _write_multiplicons(self, filename):
""" Write multiplicons to file.
- filename, (str) location of output file
"""
# Column headers
mhead = '\t'.join(['id', 'genome_x', 'list_x', 'parent', 'genome_y',
'list_y', 'level', 'number_of_anchorpoints',
'profile_length', 'begin_x', 'end_x', 'begin_y',
'end_y', 'is_redundant'])
with open(filename, 'w') as fhandle:
fhandle.write(mhead + '\n')
for mrow in self.multiplicons:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
def _write_segments(self, filename):
""" Write segments to file.
- filename, (str) location of output file
"""
# Column headers
shead = '\t'.join(['id', 'multiplicon', 'genome', 'list', 'first',
'last', 'order'])
with open(filename, 'w') as fhandle:
fhandle.write(shead + '\n')
for mrow in self.segments:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
@property
def multiplicon_file(self):
""" Location of the i-ADHoRe multiplicon data output file."""
return self._multiplicon_file
@multiplicon_file.setter
def multiplicon_file(self, value):
""" Setter for _multiplicon_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._multiplicon_file = value
@property
def segment_file(self):
""" Location of the i-ADHoRe segment data output file."""
return self._segment_file
@segment_file.setter
def segment_file(self, value):
""" Setter for _segment_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._segment_file = value
@property
def db_file(self):
""" Location of the SQLite database."""
return self._db_file
@db_file.setter
def db_file(self, value):
""" Setter for _db_file attribute """
assert not os.path.isfile(value), "%s already exists" % value
self._db_file = value
@property
def multiplicon_graph(self):
""" Digraph representation of relationships between multiplicons."""
return self._multiplicon_graph
@property
def multiplicons(self):
""" Multiplicon table from SQLite database. """
sql = '''SELECT * FROM multiplicons'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
@property
def segments(self):
""" Segments table from SQLite database. """
sql = '''SELECT * FROM segments'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
|
widdowquinn/pyADHoRe
|
pyadhore/iadhore.py
|
IadhoreData.get_multiplicon_segments
|
python
|
def get_multiplicon_segments(self, value):
sql = '''SELECT genome, first, last FROM segments
WHERE multiplicon=:mp'''
cur = self._dbconn.cursor()
cur.execute(sql, {'mp': str(value)})
result = cur.fetchall()
cur.close()
segdict = collections.defaultdict(tuple)
for genome, start, end in result:
segdict[genome] = (start, end)
return segdict
|
Return a dictionary describing the genome segments that
contribute to the named multiplicon, keyed by genome, with
(start feature, end feature) tuples.
|
train
|
https://github.com/widdowquinn/pyADHoRe/blob/b2ebbf6ae9c6afe9262eb0e3d9cf395970e38533/pyadhore/iadhore.py#L214-L228
| null |
class IadhoreData(object):
""" Implements an interface to i-ADHoRe output data, using NetworkX to
hold a tree representation of multiplicon relationships, and SQLite3
to hold the output data tables
"""
def __init__(self, multiplicon_file=None, segment_file=None,
db_filename=":memory:"):
""" Initialise data object
Arguments:
o multiplicon_file - location of iADHoRe multiplicon.txt
o segment_file - location of iADHoRe segment.txt file
o db_filename - location to write SQLite3 database (defaults to
in-memory)
"""
# Attributes later populated in methods
self._dbconn = None
self._redundant_multiplicon_cache = None
# Get arguments and initialise
self._multiplicon_file = multiplicon_file
self._segment_file = segment_file
self._db_file = db_filename
self._multiplicon_graph = nx.DiGraph()
# Set up database
self._dbsetup()
# Load multiplicon and segment data into tree/SQL database
self._parse_multiplicons()
self._parse_segments()
def _dbsetup(self):
""" Create/open local SQLite database
"""
self._dbconn = sqlite3.connect(self._db_file)
# Create table for multiplicons
sql = '''CREATE TABLE multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant)'''
self._dbconn.execute(sql)
# Create table for multiplicons ('order' appears to be reserved)
sql = '''CREATE TABLE segments
(id, multiplicon, genome, list, first, last, ord)'''
self._dbconn.execute(sql)
self._dbconn.commit()
def _parse_multiplicons(self):
""" Read the multiplicon output file, and parse into a (i) tree using
NetworkX, and (ii) an SQLite database.
"""
# Parse data with csv reader
reader = csv.reader(open(self._multiplicon_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
# Add data to SQLite db
sql = '''INSERT INTO multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant) VALUES
(?,?,?,?,?,?,?,?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
# Add multiplicons to tree
m_id = int(row[0])
self._multiplicon_graph.add_node(m_id)
if len(row[3]):
self._multiplicon_graph.add_edge(int(row[3]), m_id)
self._dbconn.commit()
def _parse_segments(self):
""" Read the segment output file and parse into an SQLite database.
"""
reader = csv.reader(open(self._segment_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
sql = '''INSERT INTO segments
(id, multiplicon, genome, list, first, last, ord)
VALUES (?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
self._dbconn.commit()
def get_multiplicon_leaves(self, redundant=False):
""" Return a generator of the IDs of multiplicons found at leaves
of the tree (i.e. from which no further multiplicons were derived).
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_seeds(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are initial
seeding 'pairs' in level 2 multiplicons.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.in_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_intermediates(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are neither
seeding 'pairs' in level 2 multiplicons, nor leaves.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if len(self._multiplicon_graph.in_edges(node)) and \
len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_properties(self, value):
""" Return a dictionary describing multiplicon data:
id, parent, level, number_of_anchorpoints, profile_length,
is_redundant and the contributing genome segments
"""
sql = '''SELECT id, parent, level, number_of_anchorpoints,
profile_length, is_redundant
FROM multiplicons WHERE id=:id'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = cur.fetchone()
cur.close()
return {'id': int(result[0]),
'parent': int(result[1]) if len(result[1]) else None,
'level': int(result[2]),
'number_of_anchorpoints': int(result[3]),
'profile_length': int(result[4]),
'is_redundant': True if result[5] == '-1' else False,
'segments': self.get_multiplicon_segments(value)}
def get_multiplicons_at_level(self, level, redundant=False):
""" Return a list of IDs of multiplicons at the requested level
"""
sql = '''SELECT id FROM multiplicons
WHERE level=:level'''
cur = self._dbconn.cursor()
cur.execute(sql, {'level': str(level)})
result = [int(r[0]) for r in cur.fetchall()]
cur.close()
if redundant:
return result
else:
return [r for r in result if not self.is_redundant_multiplicon(r)]
def is_redundant_multiplicon(self, value):
""" Returns True if the passed multiplicon ID is redundant, False
otherwise.
- value, (int) multiplicon ID
"""
if not hasattr(self, '_redundant_multiplicon_cache'):
sql = '''SELECT id FROM multiplicons WHERE is_redundant="-1"'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = [int(r[0]) for r in cur.fetchall()]
self._redundant_multiplicon_cache = set(result)
if value in self._redundant_multiplicon_cache:
return True
else:
return False
def write(self, mfile="multiplicons.txt", sfile="segments.txt",
clobber=False):
""" Writes multiplicon and segment files to the named locations.
- mfile, (str) location for multiplicons file
- sfile, (str) location for segments file
- clobber, (Boolean) True if we overwrite target files
"""
if not clobber:
if os.path.isfile(mfile):
raise IOError("Multiplicon file %s already exists." % mfile)
if os.path.isfile(sfile):
raise IOError("Segments file %s already exists." % sfile)
self._write_multiplicons(mfile)
self._write_segments(sfile)
def _write_multiplicons(self, filename):
""" Write multiplicons to file.
- filename, (str) location of output file
"""
# Column headers
mhead = '\t'.join(['id', 'genome_x', 'list_x', 'parent', 'genome_y',
'list_y', 'level', 'number_of_anchorpoints',
'profile_length', 'begin_x', 'end_x', 'begin_y',
'end_y', 'is_redundant'])
with open(filename, 'w') as fhandle:
fhandle.write(mhead + '\n')
for mrow in self.multiplicons:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
def _write_segments(self, filename):
""" Write segments to file.
- filename, (str) location of output file
"""
# Column headers
shead = '\t'.join(['id', 'multiplicon', 'genome', 'list', 'first',
'last', 'order'])
with open(filename, 'w') as fhandle:
fhandle.write(shead + '\n')
for mrow in self.segments:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
@property
def multiplicon_file(self):
""" Location of the i-ADHoRe multiplicon data output file."""
return self._multiplicon_file
@multiplicon_file.setter
def multiplicon_file(self, value):
""" Setter for _multiplicon_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._multiplicon_file = value
@property
def segment_file(self):
""" Location of the i-ADHoRe segment data output file."""
return self._segment_file
@segment_file.setter
def segment_file(self, value):
""" Setter for _segment_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._segment_file = value
@property
def db_file(self):
""" Location of the SQLite database."""
return self._db_file
@db_file.setter
def db_file(self, value):
""" Setter for _db_file attribute """
assert not os.path.isfile(value), "%s already exists" % value
self._db_file = value
@property
def multiplicon_graph(self):
""" Digraph representation of relationships between multiplicons."""
return self._multiplicon_graph
@property
def multiplicons(self):
""" Multiplicon table from SQLite database. """
sql = '''SELECT * FROM multiplicons'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
@property
def segments(self):
""" Segments table from SQLite database. """
sql = '''SELECT * FROM segments'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
|
widdowquinn/pyADHoRe
|
pyadhore/iadhore.py
|
IadhoreData.get_multiplicons_at_level
|
python
|
def get_multiplicons_at_level(self, level, redundant=False):
sql = '''SELECT id FROM multiplicons
WHERE level=:level'''
cur = self._dbconn.cursor()
cur.execute(sql, {'level': str(level)})
result = [int(r[0]) for r in cur.fetchall()]
cur.close()
if redundant:
return result
else:
return [r for r in result if not self.is_redundant_multiplicon(r)]
|
Return a list of IDs of multiplicons at the requested level
|
train
|
https://github.com/widdowquinn/pyADHoRe/blob/b2ebbf6ae9c6afe9262eb0e3d9cf395970e38533/pyadhore/iadhore.py#L230-L242
| null |
class IadhoreData(object):
""" Implements an interface to i-ADHoRe output data, using NetworkX to
hold a tree representation of multiplicon relationships, and SQLite3
to hold the output data tables
"""
def __init__(self, multiplicon_file=None, segment_file=None,
db_filename=":memory:"):
""" Initialise data object
Arguments:
o multiplicon_file - location of iADHoRe multiplicon.txt
o segment_file - location of iADHoRe segment.txt file
o db_filename - location to write SQLite3 database (defaults to
in-memory)
"""
# Attributes later populated in methods
self._dbconn = None
self._redundant_multiplicon_cache = None
# Get arguments and initialise
self._multiplicon_file = multiplicon_file
self._segment_file = segment_file
self._db_file = db_filename
self._multiplicon_graph = nx.DiGraph()
# Set up database
self._dbsetup()
# Load multiplicon and segment data into tree/SQL database
self._parse_multiplicons()
self._parse_segments()
def _dbsetup(self):
""" Create/open local SQLite database
"""
self._dbconn = sqlite3.connect(self._db_file)
# Create table for multiplicons
sql = '''CREATE TABLE multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant)'''
self._dbconn.execute(sql)
# Create table for multiplicons ('order' appears to be reserved)
sql = '''CREATE TABLE segments
(id, multiplicon, genome, list, first, last, ord)'''
self._dbconn.execute(sql)
self._dbconn.commit()
def _parse_multiplicons(self):
""" Read the multiplicon output file, and parse into a (i) tree using
NetworkX, and (ii) an SQLite database.
"""
# Parse data with csv reader
reader = csv.reader(open(self._multiplicon_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
# Add data to SQLite db
sql = '''INSERT INTO multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant) VALUES
(?,?,?,?,?,?,?,?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
# Add multiplicons to tree
m_id = int(row[0])
self._multiplicon_graph.add_node(m_id)
if len(row[3]):
self._multiplicon_graph.add_edge(int(row[3]), m_id)
self._dbconn.commit()
def _parse_segments(self):
""" Read the segment output file and parse into an SQLite database.
"""
reader = csv.reader(open(self._segment_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
sql = '''INSERT INTO segments
(id, multiplicon, genome, list, first, last, ord)
VALUES (?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
self._dbconn.commit()
def get_multiplicon_leaves(self, redundant=False):
""" Return a generator of the IDs of multiplicons found at leaves
of the tree (i.e. from which no further multiplicons were derived).
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_seeds(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are initial
seeding 'pairs' in level 2 multiplicons.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.in_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_intermediates(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are neither
seeding 'pairs' in level 2 multiplicons, nor leaves.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if len(self._multiplicon_graph.in_edges(node)) and \
len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_properties(self, value):
""" Return a dictionary describing multiplicon data:
id, parent, level, number_of_anchorpoints, profile_length,
is_redundant and the contributing genome segments
"""
sql = '''SELECT id, parent, level, number_of_anchorpoints,
profile_length, is_redundant
FROM multiplicons WHERE id=:id'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = cur.fetchone()
cur.close()
return {'id': int(result[0]),
'parent': int(result[1]) if len(result[1]) else None,
'level': int(result[2]),
'number_of_anchorpoints': int(result[3]),
'profile_length': int(result[4]),
'is_redundant': True if result[5] == '-1' else False,
'segments': self.get_multiplicon_segments(value)}
def get_multiplicon_segments(self, value):
""" Return a dictionary describing the genome segments that
contribute to the named multiplicon, keyed by genome, with
(start feature, end feature) tuples.
"""
sql = '''SELECT genome, first, last FROM segments
WHERE multiplicon=:mp'''
cur = self._dbconn.cursor()
cur.execute(sql, {'mp': str(value)})
result = cur.fetchall()
cur.close()
segdict = collections.defaultdict(tuple)
for genome, start, end in result:
segdict[genome] = (start, end)
return segdict
def is_redundant_multiplicon(self, value):
""" Returns True if the passed multiplicon ID is redundant, False
otherwise.
- value, (int) multiplicon ID
"""
if not hasattr(self, '_redundant_multiplicon_cache'):
sql = '''SELECT id FROM multiplicons WHERE is_redundant="-1"'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = [int(r[0]) for r in cur.fetchall()]
self._redundant_multiplicon_cache = set(result)
if value in self._redundant_multiplicon_cache:
return True
else:
return False
def write(self, mfile="multiplicons.txt", sfile="segments.txt",
clobber=False):
""" Writes multiplicon and segment files to the named locations.
- mfile, (str) location for multiplicons file
- sfile, (str) location for segments file
- clobber, (Boolean) True if we overwrite target files
"""
if not clobber:
if os.path.isfile(mfile):
raise IOError("Multiplicon file %s already exists." % mfile)
if os.path.isfile(sfile):
raise IOError("Segments file %s already exists." % sfile)
self._write_multiplicons(mfile)
self._write_segments(sfile)
def _write_multiplicons(self, filename):
""" Write multiplicons to file.
- filename, (str) location of output file
"""
# Column headers
mhead = '\t'.join(['id', 'genome_x', 'list_x', 'parent', 'genome_y',
'list_y', 'level', 'number_of_anchorpoints',
'profile_length', 'begin_x', 'end_x', 'begin_y',
'end_y', 'is_redundant'])
with open(filename, 'w') as fhandle:
fhandle.write(mhead + '\n')
for mrow in self.multiplicons:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
def _write_segments(self, filename):
""" Write segments to file.
- filename, (str) location of output file
"""
# Column headers
shead = '\t'.join(['id', 'multiplicon', 'genome', 'list', 'first',
'last', 'order'])
with open(filename, 'w') as fhandle:
fhandle.write(shead + '\n')
for mrow in self.segments:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
@property
def multiplicon_file(self):
""" Location of the i-ADHoRe multiplicon data output file."""
return self._multiplicon_file
@multiplicon_file.setter
def multiplicon_file(self, value):
""" Setter for _multiplicon_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._multiplicon_file = value
@property
def segment_file(self):
""" Location of the i-ADHoRe segment data output file."""
return self._segment_file
@segment_file.setter
def segment_file(self, value):
""" Setter for _segment_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._segment_file = value
@property
def db_file(self):
""" Location of the SQLite database."""
return self._db_file
@db_file.setter
def db_file(self, value):
""" Setter for _db_file attribute """
assert not os.path.isfile(value), "%s already exists" % value
self._db_file = value
@property
def multiplicon_graph(self):
""" Digraph representation of relationships between multiplicons."""
return self._multiplicon_graph
@property
def multiplicons(self):
""" Multiplicon table from SQLite database. """
sql = '''SELECT * FROM multiplicons'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
@property
def segments(self):
""" Segments table from SQLite database. """
sql = '''SELECT * FROM segments'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
|
widdowquinn/pyADHoRe
|
pyadhore/iadhore.py
|
IadhoreData.is_redundant_multiplicon
|
python
|
def is_redundant_multiplicon(self, value):
if not hasattr(self, '_redundant_multiplicon_cache'):
sql = '''SELECT id FROM multiplicons WHERE is_redundant="-1"'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = [int(r[0]) for r in cur.fetchall()]
self._redundant_multiplicon_cache = set(result)
if value in self._redundant_multiplicon_cache:
return True
else:
return False
|
Returns True if the passed multiplicon ID is redundant, False
otherwise.
- value, (int) multiplicon ID
|
train
|
https://github.com/widdowquinn/pyADHoRe/blob/b2ebbf6ae9c6afe9262eb0e3d9cf395970e38533/pyadhore/iadhore.py#L244-L259
| null |
class IadhoreData(object):
""" Implements an interface to i-ADHoRe output data, using NetworkX to
hold a tree representation of multiplicon relationships, and SQLite3
to hold the output data tables
"""
def __init__(self, multiplicon_file=None, segment_file=None,
db_filename=":memory:"):
""" Initialise data object
Arguments:
o multiplicon_file - location of iADHoRe multiplicon.txt
o segment_file - location of iADHoRe segment.txt file
o db_filename - location to write SQLite3 database (defaults to
in-memory)
"""
# Attributes later populated in methods
self._dbconn = None
self._redundant_multiplicon_cache = None
# Get arguments and initialise
self._multiplicon_file = multiplicon_file
self._segment_file = segment_file
self._db_file = db_filename
self._multiplicon_graph = nx.DiGraph()
# Set up database
self._dbsetup()
# Load multiplicon and segment data into tree/SQL database
self._parse_multiplicons()
self._parse_segments()
def _dbsetup(self):
""" Create/open local SQLite database
"""
self._dbconn = sqlite3.connect(self._db_file)
# Create table for multiplicons
sql = '''CREATE TABLE multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant)'''
self._dbconn.execute(sql)
# Create table for multiplicons ('order' appears to be reserved)
sql = '''CREATE TABLE segments
(id, multiplicon, genome, list, first, last, ord)'''
self._dbconn.execute(sql)
self._dbconn.commit()
def _parse_multiplicons(self):
""" Read the multiplicon output file, and parse into a (i) tree using
NetworkX, and (ii) an SQLite database.
"""
# Parse data with csv reader
reader = csv.reader(open(self._multiplicon_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
# Add data to SQLite db
sql = '''INSERT INTO multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant) VALUES
(?,?,?,?,?,?,?,?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
# Add multiplicons to tree
m_id = int(row[0])
self._multiplicon_graph.add_node(m_id)
if len(row[3]):
self._multiplicon_graph.add_edge(int(row[3]), m_id)
self._dbconn.commit()
def _parse_segments(self):
""" Read the segment output file and parse into an SQLite database.
"""
reader = csv.reader(open(self._segment_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
sql = '''INSERT INTO segments
(id, multiplicon, genome, list, first, last, ord)
VALUES (?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
self._dbconn.commit()
def get_multiplicon_leaves(self, redundant=False):
""" Return a generator of the IDs of multiplicons found at leaves
of the tree (i.e. from which no further multiplicons were derived).
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_seeds(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are initial
seeding 'pairs' in level 2 multiplicons.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.in_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_intermediates(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are neither
seeding 'pairs' in level 2 multiplicons, nor leaves.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if len(self._multiplicon_graph.in_edges(node)) and \
len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_properties(self, value):
""" Return a dictionary describing multiplicon data:
id, parent, level, number_of_anchorpoints, profile_length,
is_redundant and the contributing genome segments
"""
sql = '''SELECT id, parent, level, number_of_anchorpoints,
profile_length, is_redundant
FROM multiplicons WHERE id=:id'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = cur.fetchone()
cur.close()
return {'id': int(result[0]),
'parent': int(result[1]) if len(result[1]) else None,
'level': int(result[2]),
'number_of_anchorpoints': int(result[3]),
'profile_length': int(result[4]),
'is_redundant': True if result[5] == '-1' else False,
'segments': self.get_multiplicon_segments(value)}
def get_multiplicon_segments(self, value):
""" Return a dictionary describing the genome segments that
contribute to the named multiplicon, keyed by genome, with
(start feature, end feature) tuples.
"""
sql = '''SELECT genome, first, last FROM segments
WHERE multiplicon=:mp'''
cur = self._dbconn.cursor()
cur.execute(sql, {'mp': str(value)})
result = cur.fetchall()
cur.close()
segdict = collections.defaultdict(tuple)
for genome, start, end in result:
segdict[genome] = (start, end)
return segdict
def get_multiplicons_at_level(self, level, redundant=False):
""" Return a list of IDs of multiplicons at the requested level
"""
sql = '''SELECT id FROM multiplicons
WHERE level=:level'''
cur = self._dbconn.cursor()
cur.execute(sql, {'level': str(level)})
result = [int(r[0]) for r in cur.fetchall()]
cur.close()
if redundant:
return result
else:
return [r for r in result if not self.is_redundant_multiplicon(r)]
def write(self, mfile="multiplicons.txt", sfile="segments.txt",
clobber=False):
""" Writes multiplicon and segment files to the named locations.
- mfile, (str) location for multiplicons file
- sfile, (str) location for segments file
- clobber, (Boolean) True if we overwrite target files
"""
if not clobber:
if os.path.isfile(mfile):
raise IOError("Multiplicon file %s already exists." % mfile)
if os.path.isfile(sfile):
raise IOError("Segments file %s already exists." % sfile)
self._write_multiplicons(mfile)
self._write_segments(sfile)
def _write_multiplicons(self, filename):
""" Write multiplicons to file.
- filename, (str) location of output file
"""
# Column headers
mhead = '\t'.join(['id', 'genome_x', 'list_x', 'parent', 'genome_y',
'list_y', 'level', 'number_of_anchorpoints',
'profile_length', 'begin_x', 'end_x', 'begin_y',
'end_y', 'is_redundant'])
with open(filename, 'w') as fhandle:
fhandle.write(mhead + '\n')
for mrow in self.multiplicons:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
def _write_segments(self, filename):
""" Write segments to file.
- filename, (str) location of output file
"""
# Column headers
shead = '\t'.join(['id', 'multiplicon', 'genome', 'list', 'first',
'last', 'order'])
with open(filename, 'w') as fhandle:
fhandle.write(shead + '\n')
for mrow in self.segments:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
@property
def multiplicon_file(self):
""" Location of the i-ADHoRe multiplicon data output file."""
return self._multiplicon_file
@multiplicon_file.setter
def multiplicon_file(self, value):
""" Setter for _multiplicon_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._multiplicon_file = value
@property
def segment_file(self):
""" Location of the i-ADHoRe segment data output file."""
return self._segment_file
@segment_file.setter
def segment_file(self, value):
""" Setter for _segment_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._segment_file = value
@property
def db_file(self):
""" Location of the SQLite database."""
return self._db_file
@db_file.setter
def db_file(self, value):
""" Setter for _db_file attribute """
assert not os.path.isfile(value), "%s already exists" % value
self._db_file = value
@property
def multiplicon_graph(self):
""" Digraph representation of relationships between multiplicons."""
return self._multiplicon_graph
@property
def multiplicons(self):
""" Multiplicon table from SQLite database. """
sql = '''SELECT * FROM multiplicons'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
@property
def segments(self):
""" Segments table from SQLite database. """
sql = '''SELECT * FROM segments'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
|
widdowquinn/pyADHoRe
|
pyadhore/iadhore.py
|
IadhoreData.write
|
python
|
def write(self, mfile="multiplicons.txt", sfile="segments.txt",
clobber=False):
if not clobber:
if os.path.isfile(mfile):
raise IOError("Multiplicon file %s already exists." % mfile)
if os.path.isfile(sfile):
raise IOError("Segments file %s already exists." % sfile)
self._write_multiplicons(mfile)
self._write_segments(sfile)
|
Writes multiplicon and segment files to the named locations.
- mfile, (str) location for multiplicons file
- sfile, (str) location for segments file
- clobber, (Boolean) True if we overwrite target files
|
train
|
https://github.com/widdowquinn/pyADHoRe/blob/b2ebbf6ae9c6afe9262eb0e3d9cf395970e38533/pyadhore/iadhore.py#L261-L275
|
[
"def _write_multiplicons(self, filename):\n \"\"\" Write multiplicons to file.\n\n - filename, (str) location of output file\n \"\"\"\n # Column headers\n mhead = '\\t'.join(['id', 'genome_x', 'list_x', 'parent', 'genome_y',\n 'list_y', 'level', 'number_of_anchorpoints',\n 'profile_length', 'begin_x', 'end_x', 'begin_y',\n 'end_y', 'is_redundant'])\n with open(filename, 'w') as fhandle:\n fhandle.write(mhead + '\\n')\n for mrow in self.multiplicons:\n fhandle.write('\\t'.join([str(e) for e in mrow]) + '\\n')\n",
"def _write_segments(self, filename):\n \"\"\" Write segments to file.\n\n - filename, (str) location of output file\n \"\"\"\n # Column headers\n shead = '\\t'.join(['id', 'multiplicon', 'genome', 'list', 'first',\n 'last', 'order'])\n with open(filename, 'w') as fhandle:\n fhandle.write(shead + '\\n')\n for mrow in self.segments:\n fhandle.write('\\t'.join([str(e) for e in mrow]) + '\\n')\n"
] |
class IadhoreData(object):
""" Implements an interface to i-ADHoRe output data, using NetworkX to
hold a tree representation of multiplicon relationships, and SQLite3
to hold the output data tables
"""
def __init__(self, multiplicon_file=None, segment_file=None,
db_filename=":memory:"):
""" Initialise data object
Arguments:
o multiplicon_file - location of iADHoRe multiplicon.txt
o segment_file - location of iADHoRe segment.txt file
o db_filename - location to write SQLite3 database (defaults to
in-memory)
"""
# Attributes later populated in methods
self._dbconn = None
self._redundant_multiplicon_cache = None
# Get arguments and initialise
self._multiplicon_file = multiplicon_file
self._segment_file = segment_file
self._db_file = db_filename
self._multiplicon_graph = nx.DiGraph()
# Set up database
self._dbsetup()
# Load multiplicon and segment data into tree/SQL database
self._parse_multiplicons()
self._parse_segments()
def _dbsetup(self):
""" Create/open local SQLite database
"""
self._dbconn = sqlite3.connect(self._db_file)
# Create table for multiplicons
sql = '''CREATE TABLE multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant)'''
self._dbconn.execute(sql)
# Create table for multiplicons ('order' appears to be reserved)
sql = '''CREATE TABLE segments
(id, multiplicon, genome, list, first, last, ord)'''
self._dbconn.execute(sql)
self._dbconn.commit()
def _parse_multiplicons(self):
""" Read the multiplicon output file, and parse into a (i) tree using
NetworkX, and (ii) an SQLite database.
"""
# Parse data with csv reader
reader = csv.reader(open(self._multiplicon_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
# Add data to SQLite db
sql = '''INSERT INTO multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant) VALUES
(?,?,?,?,?,?,?,?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
# Add multiplicons to tree
m_id = int(row[0])
self._multiplicon_graph.add_node(m_id)
if len(row[3]):
self._multiplicon_graph.add_edge(int(row[3]), m_id)
self._dbconn.commit()
def _parse_segments(self):
""" Read the segment output file and parse into an SQLite database.
"""
reader = csv.reader(open(self._segment_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
sql = '''INSERT INTO segments
(id, multiplicon, genome, list, first, last, ord)
VALUES (?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
self._dbconn.commit()
def get_multiplicon_leaves(self, redundant=False):
""" Return a generator of the IDs of multiplicons found at leaves
of the tree (i.e. from which no further multiplicons were derived).
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_seeds(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are initial
seeding 'pairs' in level 2 multiplicons.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.in_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_intermediates(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are neither
seeding 'pairs' in level 2 multiplicons, nor leaves.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if len(self._multiplicon_graph.in_edges(node)) and \
len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_properties(self, value):
""" Return a dictionary describing multiplicon data:
id, parent, level, number_of_anchorpoints, profile_length,
is_redundant and the contributing genome segments
"""
sql = '''SELECT id, parent, level, number_of_anchorpoints,
profile_length, is_redundant
FROM multiplicons WHERE id=:id'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = cur.fetchone()
cur.close()
return {'id': int(result[0]),
'parent': int(result[1]) if len(result[1]) else None,
'level': int(result[2]),
'number_of_anchorpoints': int(result[3]),
'profile_length': int(result[4]),
'is_redundant': True if result[5] == '-1' else False,
'segments': self.get_multiplicon_segments(value)}
def get_multiplicon_segments(self, value):
""" Return a dictionary describing the genome segments that
contribute to the named multiplicon, keyed by genome, with
(start feature, end feature) tuples.
"""
sql = '''SELECT genome, first, last FROM segments
WHERE multiplicon=:mp'''
cur = self._dbconn.cursor()
cur.execute(sql, {'mp': str(value)})
result = cur.fetchall()
cur.close()
segdict = collections.defaultdict(tuple)
for genome, start, end in result:
segdict[genome] = (start, end)
return segdict
def get_multiplicons_at_level(self, level, redundant=False):
""" Return a list of IDs of multiplicons at the requested level
"""
sql = '''SELECT id FROM multiplicons
WHERE level=:level'''
cur = self._dbconn.cursor()
cur.execute(sql, {'level': str(level)})
result = [int(r[0]) for r in cur.fetchall()]
cur.close()
if redundant:
return result
else:
return [r for r in result if not self.is_redundant_multiplicon(r)]
def is_redundant_multiplicon(self, value):
""" Returns True if the passed multiplicon ID is redundant, False
otherwise.
- value, (int) multiplicon ID
"""
if not hasattr(self, '_redundant_multiplicon_cache'):
sql = '''SELECT id FROM multiplicons WHERE is_redundant="-1"'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = [int(r[0]) for r in cur.fetchall()]
self._redundant_multiplicon_cache = set(result)
if value in self._redundant_multiplicon_cache:
return True
else:
return False
def _write_multiplicons(self, filename):
""" Write multiplicons to file.
- filename, (str) location of output file
"""
# Column headers
mhead = '\t'.join(['id', 'genome_x', 'list_x', 'parent', 'genome_y',
'list_y', 'level', 'number_of_anchorpoints',
'profile_length', 'begin_x', 'end_x', 'begin_y',
'end_y', 'is_redundant'])
with open(filename, 'w') as fhandle:
fhandle.write(mhead + '\n')
for mrow in self.multiplicons:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
def _write_segments(self, filename):
""" Write segments to file.
- filename, (str) location of output file
"""
# Column headers
shead = '\t'.join(['id', 'multiplicon', 'genome', 'list', 'first',
'last', 'order'])
with open(filename, 'w') as fhandle:
fhandle.write(shead + '\n')
for mrow in self.segments:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
@property
def multiplicon_file(self):
""" Location of the i-ADHoRe multiplicon data output file."""
return self._multiplicon_file
@multiplicon_file.setter
def multiplicon_file(self, value):
""" Setter for _multiplicon_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._multiplicon_file = value
@property
def segment_file(self):
""" Location of the i-ADHoRe segment data output file."""
return self._segment_file
@segment_file.setter
def segment_file(self, value):
""" Setter for _segment_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._segment_file = value
@property
def db_file(self):
""" Location of the SQLite database."""
return self._db_file
@db_file.setter
def db_file(self, value):
""" Setter for _db_file attribute """
assert not os.path.isfile(value), "%s already exists" % value
self._db_file = value
@property
def multiplicon_graph(self):
""" Digraph representation of relationships between multiplicons."""
return self._multiplicon_graph
@property
def multiplicons(self):
""" Multiplicon table from SQLite database. """
sql = '''SELECT * FROM multiplicons'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
@property
def segments(self):
""" Segments table from SQLite database. """
sql = '''SELECT * FROM segments'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
|
widdowquinn/pyADHoRe
|
pyadhore/iadhore.py
|
IadhoreData._write_multiplicons
|
python
|
def _write_multiplicons(self, filename):
# Column headers
mhead = '\t'.join(['id', 'genome_x', 'list_x', 'parent', 'genome_y',
'list_y', 'level', 'number_of_anchorpoints',
'profile_length', 'begin_x', 'end_x', 'begin_y',
'end_y', 'is_redundant'])
with open(filename, 'w') as fhandle:
fhandle.write(mhead + '\n')
for mrow in self.multiplicons:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
|
Write multiplicons to file.
- filename, (str) location of output file
|
train
|
https://github.com/widdowquinn/pyADHoRe/blob/b2ebbf6ae9c6afe9262eb0e3d9cf395970e38533/pyadhore/iadhore.py#L277-L290
| null |
class IadhoreData(object):
""" Implements an interface to i-ADHoRe output data, using NetworkX to
hold a tree representation of multiplicon relationships, and SQLite3
to hold the output data tables
"""
def __init__(self, multiplicon_file=None, segment_file=None,
db_filename=":memory:"):
""" Initialise data object
Arguments:
o multiplicon_file - location of iADHoRe multiplicon.txt
o segment_file - location of iADHoRe segment.txt file
o db_filename - location to write SQLite3 database (defaults to
in-memory)
"""
# Attributes later populated in methods
self._dbconn = None
self._redundant_multiplicon_cache = None
# Get arguments and initialise
self._multiplicon_file = multiplicon_file
self._segment_file = segment_file
self._db_file = db_filename
self._multiplicon_graph = nx.DiGraph()
# Set up database
self._dbsetup()
# Load multiplicon and segment data into tree/SQL database
self._parse_multiplicons()
self._parse_segments()
def _dbsetup(self):
""" Create/open local SQLite database
"""
self._dbconn = sqlite3.connect(self._db_file)
# Create table for multiplicons
sql = '''CREATE TABLE multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant)'''
self._dbconn.execute(sql)
# Create table for multiplicons ('order' appears to be reserved)
sql = '''CREATE TABLE segments
(id, multiplicon, genome, list, first, last, ord)'''
self._dbconn.execute(sql)
self._dbconn.commit()
def _parse_multiplicons(self):
""" Read the multiplicon output file, and parse into a (i) tree using
NetworkX, and (ii) an SQLite database.
"""
# Parse data with csv reader
reader = csv.reader(open(self._multiplicon_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
# Add data to SQLite db
sql = '''INSERT INTO multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant) VALUES
(?,?,?,?,?,?,?,?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
# Add multiplicons to tree
m_id = int(row[0])
self._multiplicon_graph.add_node(m_id)
if len(row[3]):
self._multiplicon_graph.add_edge(int(row[3]), m_id)
self._dbconn.commit()
def _parse_segments(self):
""" Read the segment output file and parse into an SQLite database.
"""
reader = csv.reader(open(self._segment_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
sql = '''INSERT INTO segments
(id, multiplicon, genome, list, first, last, ord)
VALUES (?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
self._dbconn.commit()
def get_multiplicon_leaves(self, redundant=False):
""" Return a generator of the IDs of multiplicons found at leaves
of the tree (i.e. from which no further multiplicons were derived).
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_seeds(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are initial
seeding 'pairs' in level 2 multiplicons.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.in_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_intermediates(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are neither
seeding 'pairs' in level 2 multiplicons, nor leaves.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if len(self._multiplicon_graph.in_edges(node)) and \
len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_properties(self, value):
""" Return a dictionary describing multiplicon data:
id, parent, level, number_of_anchorpoints, profile_length,
is_redundant and the contributing genome segments
"""
sql = '''SELECT id, parent, level, number_of_anchorpoints,
profile_length, is_redundant
FROM multiplicons WHERE id=:id'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = cur.fetchone()
cur.close()
return {'id': int(result[0]),
'parent': int(result[1]) if len(result[1]) else None,
'level': int(result[2]),
'number_of_anchorpoints': int(result[3]),
'profile_length': int(result[4]),
'is_redundant': True if result[5] == '-1' else False,
'segments': self.get_multiplicon_segments(value)}
def get_multiplicon_segments(self, value):
""" Return a dictionary describing the genome segments that
contribute to the named multiplicon, keyed by genome, with
(start feature, end feature) tuples.
"""
sql = '''SELECT genome, first, last FROM segments
WHERE multiplicon=:mp'''
cur = self._dbconn.cursor()
cur.execute(sql, {'mp': str(value)})
result = cur.fetchall()
cur.close()
segdict = collections.defaultdict(tuple)
for genome, start, end in result:
segdict[genome] = (start, end)
return segdict
def get_multiplicons_at_level(self, level, redundant=False):
""" Return a list of IDs of multiplicons at the requested level
"""
sql = '''SELECT id FROM multiplicons
WHERE level=:level'''
cur = self._dbconn.cursor()
cur.execute(sql, {'level': str(level)})
result = [int(r[0]) for r in cur.fetchall()]
cur.close()
if redundant:
return result
else:
return [r for r in result if not self.is_redundant_multiplicon(r)]
def is_redundant_multiplicon(self, value):
""" Returns True if the passed multiplicon ID is redundant, False
otherwise.
- value, (int) multiplicon ID
"""
if not hasattr(self, '_redundant_multiplicon_cache'):
sql = '''SELECT id FROM multiplicons WHERE is_redundant="-1"'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = [int(r[0]) for r in cur.fetchall()]
self._redundant_multiplicon_cache = set(result)
if value in self._redundant_multiplicon_cache:
return True
else:
return False
def write(self, mfile="multiplicons.txt", sfile="segments.txt",
clobber=False):
""" Writes multiplicon and segment files to the named locations.
- mfile, (str) location for multiplicons file
- sfile, (str) location for segments file
- clobber, (Boolean) True if we overwrite target files
"""
if not clobber:
if os.path.isfile(mfile):
raise IOError("Multiplicon file %s already exists." % mfile)
if os.path.isfile(sfile):
raise IOError("Segments file %s already exists." % sfile)
self._write_multiplicons(mfile)
self._write_segments(sfile)
def _write_segments(self, filename):
""" Write segments to file.
- filename, (str) location of output file
"""
# Column headers
shead = '\t'.join(['id', 'multiplicon', 'genome', 'list', 'first',
'last', 'order'])
with open(filename, 'w') as fhandle:
fhandle.write(shead + '\n')
for mrow in self.segments:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
@property
def multiplicon_file(self):
""" Location of the i-ADHoRe multiplicon data output file."""
return self._multiplicon_file
@multiplicon_file.setter
def multiplicon_file(self, value):
""" Setter for _multiplicon_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._multiplicon_file = value
@property
def segment_file(self):
""" Location of the i-ADHoRe segment data output file."""
return self._segment_file
@segment_file.setter
def segment_file(self, value):
""" Setter for _segment_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._segment_file = value
@property
def db_file(self):
""" Location of the SQLite database."""
return self._db_file
@db_file.setter
def db_file(self, value):
""" Setter for _db_file attribute """
assert not os.path.isfile(value), "%s already exists" % value
self._db_file = value
@property
def multiplicon_graph(self):
""" Digraph representation of relationships between multiplicons."""
return self._multiplicon_graph
@property
def multiplicons(self):
""" Multiplicon table from SQLite database. """
sql = '''SELECT * FROM multiplicons'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
@property
def segments(self):
""" Segments table from SQLite database. """
sql = '''SELECT * FROM segments'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
|
widdowquinn/pyADHoRe
|
pyadhore/iadhore.py
|
IadhoreData._write_segments
|
python
|
def _write_segments(self, filename):
# Column headers
shead = '\t'.join(['id', 'multiplicon', 'genome', 'list', 'first',
'last', 'order'])
with open(filename, 'w') as fhandle:
fhandle.write(shead + '\n')
for mrow in self.segments:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
|
Write segments to file.
- filename, (str) location of output file
|
train
|
https://github.com/widdowquinn/pyADHoRe/blob/b2ebbf6ae9c6afe9262eb0e3d9cf395970e38533/pyadhore/iadhore.py#L292-L303
| null |
class IadhoreData(object):
""" Implements an interface to i-ADHoRe output data, using NetworkX to
hold a tree representation of multiplicon relationships, and SQLite3
to hold the output data tables
"""
def __init__(self, multiplicon_file=None, segment_file=None,
db_filename=":memory:"):
""" Initialise data object
Arguments:
o multiplicon_file - location of iADHoRe multiplicon.txt
o segment_file - location of iADHoRe segment.txt file
o db_filename - location to write SQLite3 database (defaults to
in-memory)
"""
# Attributes later populated in methods
self._dbconn = None
self._redundant_multiplicon_cache = None
# Get arguments and initialise
self._multiplicon_file = multiplicon_file
self._segment_file = segment_file
self._db_file = db_filename
self._multiplicon_graph = nx.DiGraph()
# Set up database
self._dbsetup()
# Load multiplicon and segment data into tree/SQL database
self._parse_multiplicons()
self._parse_segments()
def _dbsetup(self):
""" Create/open local SQLite database
"""
self._dbconn = sqlite3.connect(self._db_file)
# Create table for multiplicons
sql = '''CREATE TABLE multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant)'''
self._dbconn.execute(sql)
# Create table for multiplicons ('order' appears to be reserved)
sql = '''CREATE TABLE segments
(id, multiplicon, genome, list, first, last, ord)'''
self._dbconn.execute(sql)
self._dbconn.commit()
def _parse_multiplicons(self):
""" Read the multiplicon output file, and parse into a (i) tree using
NetworkX, and (ii) an SQLite database.
"""
# Parse data with csv reader
reader = csv.reader(open(self._multiplicon_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
# Add data to SQLite db
sql = '''INSERT INTO multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant) VALUES
(?,?,?,?,?,?,?,?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
# Add multiplicons to tree
m_id = int(row[0])
self._multiplicon_graph.add_node(m_id)
if len(row[3]):
self._multiplicon_graph.add_edge(int(row[3]), m_id)
self._dbconn.commit()
def _parse_segments(self):
""" Read the segment output file and parse into an SQLite database.
"""
reader = csv.reader(open(self._segment_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
sql = '''INSERT INTO segments
(id, multiplicon, genome, list, first, last, ord)
VALUES (?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
self._dbconn.commit()
def get_multiplicon_leaves(self, redundant=False):
""" Return a generator of the IDs of multiplicons found at leaves
of the tree (i.e. from which no further multiplicons were derived).
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_seeds(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are initial
seeding 'pairs' in level 2 multiplicons.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.in_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_intermediates(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are neither
seeding 'pairs' in level 2 multiplicons, nor leaves.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if len(self._multiplicon_graph.in_edges(node)) and \
len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_properties(self, value):
""" Return a dictionary describing multiplicon data:
id, parent, level, number_of_anchorpoints, profile_length,
is_redundant and the contributing genome segments
"""
sql = '''SELECT id, parent, level, number_of_anchorpoints,
profile_length, is_redundant
FROM multiplicons WHERE id=:id'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = cur.fetchone()
cur.close()
return {'id': int(result[0]),
'parent': int(result[1]) if len(result[1]) else None,
'level': int(result[2]),
'number_of_anchorpoints': int(result[3]),
'profile_length': int(result[4]),
'is_redundant': True if result[5] == '-1' else False,
'segments': self.get_multiplicon_segments(value)}
def get_multiplicon_segments(self, value):
""" Return a dictionary describing the genome segments that
contribute to the named multiplicon, keyed by genome, with
(start feature, end feature) tuples.
"""
sql = '''SELECT genome, first, last FROM segments
WHERE multiplicon=:mp'''
cur = self._dbconn.cursor()
cur.execute(sql, {'mp': str(value)})
result = cur.fetchall()
cur.close()
segdict = collections.defaultdict(tuple)
for genome, start, end in result:
segdict[genome] = (start, end)
return segdict
def get_multiplicons_at_level(self, level, redundant=False):
""" Return a list of IDs of multiplicons at the requested level
"""
sql = '''SELECT id FROM multiplicons
WHERE level=:level'''
cur = self._dbconn.cursor()
cur.execute(sql, {'level': str(level)})
result = [int(r[0]) for r in cur.fetchall()]
cur.close()
if redundant:
return result
else:
return [r for r in result if not self.is_redundant_multiplicon(r)]
def is_redundant_multiplicon(self, value):
""" Returns True if the passed multiplicon ID is redundant, False
otherwise.
- value, (int) multiplicon ID
"""
if not hasattr(self, '_redundant_multiplicon_cache'):
sql = '''SELECT id FROM multiplicons WHERE is_redundant="-1"'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = [int(r[0]) for r in cur.fetchall()]
self._redundant_multiplicon_cache = set(result)
if value in self._redundant_multiplicon_cache:
return True
else:
return False
def write(self, mfile="multiplicons.txt", sfile="segments.txt",
clobber=False):
""" Writes multiplicon and segment files to the named locations.
- mfile, (str) location for multiplicons file
- sfile, (str) location for segments file
- clobber, (Boolean) True if we overwrite target files
"""
if not clobber:
if os.path.isfile(mfile):
raise IOError("Multiplicon file %s already exists." % mfile)
if os.path.isfile(sfile):
raise IOError("Segments file %s already exists." % sfile)
self._write_multiplicons(mfile)
self._write_segments(sfile)
def _write_multiplicons(self, filename):
""" Write multiplicons to file.
- filename, (str) location of output file
"""
# Column headers
mhead = '\t'.join(['id', 'genome_x', 'list_x', 'parent', 'genome_y',
'list_y', 'level', 'number_of_anchorpoints',
'profile_length', 'begin_x', 'end_x', 'begin_y',
'end_y', 'is_redundant'])
with open(filename, 'w') as fhandle:
fhandle.write(mhead + '\n')
for mrow in self.multiplicons:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
@property
def multiplicon_file(self):
""" Location of the i-ADHoRe multiplicon data output file."""
return self._multiplicon_file
@multiplicon_file.setter
def multiplicon_file(self, value):
""" Setter for _multiplicon_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._multiplicon_file = value
@property
def segment_file(self):
""" Location of the i-ADHoRe segment data output file."""
return self._segment_file
@segment_file.setter
def segment_file(self, value):
""" Setter for _segment_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._segment_file = value
@property
def db_file(self):
""" Location of the SQLite database."""
return self._db_file
@db_file.setter
def db_file(self, value):
""" Setter for _db_file attribute """
assert not os.path.isfile(value), "%s already exists" % value
self._db_file = value
@property
def multiplicon_graph(self):
""" Digraph representation of relationships between multiplicons."""
return self._multiplicon_graph
@property
def multiplicons(self):
""" Multiplicon table from SQLite database. """
sql = '''SELECT * FROM multiplicons'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
@property
def segments(self):
""" Segments table from SQLite database. """
sql = '''SELECT * FROM segments'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
|
widdowquinn/pyADHoRe
|
pyadhore/iadhore.py
|
IadhoreData.multiplicon_file
|
python
|
def multiplicon_file(self, value):
assert os.path.isfile(value), "%s is not a valid file" % value
self._multiplicon_file = value
|
Setter for _multiplicon_file attribute
|
train
|
https://github.com/widdowquinn/pyADHoRe/blob/b2ebbf6ae9c6afe9262eb0e3d9cf395970e38533/pyadhore/iadhore.py#L311-L314
| null |
class IadhoreData(object):
""" Implements an interface to i-ADHoRe output data, using NetworkX to
hold a tree representation of multiplicon relationships, and SQLite3
to hold the output data tables
"""
def __init__(self, multiplicon_file=None, segment_file=None,
db_filename=":memory:"):
""" Initialise data object
Arguments:
o multiplicon_file - location of iADHoRe multiplicon.txt
o segment_file - location of iADHoRe segment.txt file
o db_filename - location to write SQLite3 database (defaults to
in-memory)
"""
# Attributes later populated in methods
self._dbconn = None
self._redundant_multiplicon_cache = None
# Get arguments and initialise
self._multiplicon_file = multiplicon_file
self._segment_file = segment_file
self._db_file = db_filename
self._multiplicon_graph = nx.DiGraph()
# Set up database
self._dbsetup()
# Load multiplicon and segment data into tree/SQL database
self._parse_multiplicons()
self._parse_segments()
def _dbsetup(self):
""" Create/open local SQLite database
"""
self._dbconn = sqlite3.connect(self._db_file)
# Create table for multiplicons
sql = '''CREATE TABLE multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant)'''
self._dbconn.execute(sql)
# Create table for multiplicons ('order' appears to be reserved)
sql = '''CREATE TABLE segments
(id, multiplicon, genome, list, first, last, ord)'''
self._dbconn.execute(sql)
self._dbconn.commit()
def _parse_multiplicons(self):
""" Read the multiplicon output file, and parse into a (i) tree using
NetworkX, and (ii) an SQLite database.
"""
# Parse data with csv reader
reader = csv.reader(open(self._multiplicon_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
# Add data to SQLite db
sql = '''INSERT INTO multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant) VALUES
(?,?,?,?,?,?,?,?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
# Add multiplicons to tree
m_id = int(row[0])
self._multiplicon_graph.add_node(m_id)
if len(row[3]):
self._multiplicon_graph.add_edge(int(row[3]), m_id)
self._dbconn.commit()
def _parse_segments(self):
""" Read the segment output file and parse into an SQLite database.
"""
reader = csv.reader(open(self._segment_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
sql = '''INSERT INTO segments
(id, multiplicon, genome, list, first, last, ord)
VALUES (?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
self._dbconn.commit()
def get_multiplicon_leaves(self, redundant=False):
""" Return a generator of the IDs of multiplicons found at leaves
of the tree (i.e. from which no further multiplicons were derived).
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_seeds(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are initial
seeding 'pairs' in level 2 multiplicons.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.in_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_intermediates(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are neither
seeding 'pairs' in level 2 multiplicons, nor leaves.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if len(self._multiplicon_graph.in_edges(node)) and \
len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_properties(self, value):
""" Return a dictionary describing multiplicon data:
id, parent, level, number_of_anchorpoints, profile_length,
is_redundant and the contributing genome segments
"""
sql = '''SELECT id, parent, level, number_of_anchorpoints,
profile_length, is_redundant
FROM multiplicons WHERE id=:id'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = cur.fetchone()
cur.close()
return {'id': int(result[0]),
'parent': int(result[1]) if len(result[1]) else None,
'level': int(result[2]),
'number_of_anchorpoints': int(result[3]),
'profile_length': int(result[4]),
'is_redundant': True if result[5] == '-1' else False,
'segments': self.get_multiplicon_segments(value)}
def get_multiplicon_segments(self, value):
""" Return a dictionary describing the genome segments that
contribute to the named multiplicon, keyed by genome, with
(start feature, end feature) tuples.
"""
sql = '''SELECT genome, first, last FROM segments
WHERE multiplicon=:mp'''
cur = self._dbconn.cursor()
cur.execute(sql, {'mp': str(value)})
result = cur.fetchall()
cur.close()
segdict = collections.defaultdict(tuple)
for genome, start, end in result:
segdict[genome] = (start, end)
return segdict
def get_multiplicons_at_level(self, level, redundant=False):
""" Return a list of IDs of multiplicons at the requested level
"""
sql = '''SELECT id FROM multiplicons
WHERE level=:level'''
cur = self._dbconn.cursor()
cur.execute(sql, {'level': str(level)})
result = [int(r[0]) for r in cur.fetchall()]
cur.close()
if redundant:
return result
else:
return [r for r in result if not self.is_redundant_multiplicon(r)]
def is_redundant_multiplicon(self, value):
""" Returns True if the passed multiplicon ID is redundant, False
otherwise.
- value, (int) multiplicon ID
"""
if not hasattr(self, '_redundant_multiplicon_cache'):
sql = '''SELECT id FROM multiplicons WHERE is_redundant="-1"'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = [int(r[0]) for r in cur.fetchall()]
self._redundant_multiplicon_cache = set(result)
if value in self._redundant_multiplicon_cache:
return True
else:
return False
def write(self, mfile="multiplicons.txt", sfile="segments.txt",
clobber=False):
""" Writes multiplicon and segment files to the named locations.
- mfile, (str) location for multiplicons file
- sfile, (str) location for segments file
- clobber, (Boolean) True if we overwrite target files
"""
if not clobber:
if os.path.isfile(mfile):
raise IOError("Multiplicon file %s already exists." % mfile)
if os.path.isfile(sfile):
raise IOError("Segments file %s already exists." % sfile)
self._write_multiplicons(mfile)
self._write_segments(sfile)
def _write_multiplicons(self, filename):
""" Write multiplicons to file.
- filename, (str) location of output file
"""
# Column headers
mhead = '\t'.join(['id', 'genome_x', 'list_x', 'parent', 'genome_y',
'list_y', 'level', 'number_of_anchorpoints',
'profile_length', 'begin_x', 'end_x', 'begin_y',
'end_y', 'is_redundant'])
with open(filename, 'w') as fhandle:
fhandle.write(mhead + '\n')
for mrow in self.multiplicons:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
def _write_segments(self, filename):
""" Write segments to file.
- filename, (str) location of output file
"""
# Column headers
shead = '\t'.join(['id', 'multiplicon', 'genome', 'list', 'first',
'last', 'order'])
with open(filename, 'w') as fhandle:
fhandle.write(shead + '\n')
for mrow in self.segments:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
@property
def multiplicon_file(self):
""" Location of the i-ADHoRe multiplicon data output file."""
return self._multiplicon_file
@multiplicon_file.setter
@property
def segment_file(self):
""" Location of the i-ADHoRe segment data output file."""
return self._segment_file
@segment_file.setter
def segment_file(self, value):
""" Setter for _segment_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._segment_file = value
@property
def db_file(self):
""" Location of the SQLite database."""
return self._db_file
@db_file.setter
def db_file(self, value):
""" Setter for _db_file attribute """
assert not os.path.isfile(value), "%s already exists" % value
self._db_file = value
@property
def multiplicon_graph(self):
""" Digraph representation of relationships between multiplicons."""
return self._multiplicon_graph
@property
def multiplicons(self):
""" Multiplicon table from SQLite database. """
sql = '''SELECT * FROM multiplicons'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
@property
def segments(self):
""" Segments table from SQLite database. """
sql = '''SELECT * FROM segments'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
|
widdowquinn/pyADHoRe
|
pyadhore/iadhore.py
|
IadhoreData.segment_file
|
python
|
def segment_file(self, value):
assert os.path.isfile(value), "%s is not a valid file" % value
self._segment_file = value
|
Setter for _segment_file attribute
|
train
|
https://github.com/widdowquinn/pyADHoRe/blob/b2ebbf6ae9c6afe9262eb0e3d9cf395970e38533/pyadhore/iadhore.py#L322-L325
| null |
class IadhoreData(object):
""" Implements an interface to i-ADHoRe output data, using NetworkX to
hold a tree representation of multiplicon relationships, and SQLite3
to hold the output data tables
"""
def __init__(self, multiplicon_file=None, segment_file=None,
db_filename=":memory:"):
""" Initialise data object
Arguments:
o multiplicon_file - location of iADHoRe multiplicon.txt
o segment_file - location of iADHoRe segment.txt file
o db_filename - location to write SQLite3 database (defaults to
in-memory)
"""
# Attributes later populated in methods
self._dbconn = None
self._redundant_multiplicon_cache = None
# Get arguments and initialise
self._multiplicon_file = multiplicon_file
self._segment_file = segment_file
self._db_file = db_filename
self._multiplicon_graph = nx.DiGraph()
# Set up database
self._dbsetup()
# Load multiplicon and segment data into tree/SQL database
self._parse_multiplicons()
self._parse_segments()
def _dbsetup(self):
""" Create/open local SQLite database
"""
self._dbconn = sqlite3.connect(self._db_file)
# Create table for multiplicons
sql = '''CREATE TABLE multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant)'''
self._dbconn.execute(sql)
# Create table for multiplicons ('order' appears to be reserved)
sql = '''CREATE TABLE segments
(id, multiplicon, genome, list, first, last, ord)'''
self._dbconn.execute(sql)
self._dbconn.commit()
def _parse_multiplicons(self):
""" Read the multiplicon output file, and parse into a (i) tree using
NetworkX, and (ii) an SQLite database.
"""
# Parse data with csv reader
reader = csv.reader(open(self._multiplicon_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
# Add data to SQLite db
sql = '''INSERT INTO multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant) VALUES
(?,?,?,?,?,?,?,?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
# Add multiplicons to tree
m_id = int(row[0])
self._multiplicon_graph.add_node(m_id)
if len(row[3]):
self._multiplicon_graph.add_edge(int(row[3]), m_id)
self._dbconn.commit()
def _parse_segments(self):
""" Read the segment output file and parse into an SQLite database.
"""
reader = csv.reader(open(self._segment_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
sql = '''INSERT INTO segments
(id, multiplicon, genome, list, first, last, ord)
VALUES (?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
self._dbconn.commit()
def get_multiplicon_leaves(self, redundant=False):
""" Return a generator of the IDs of multiplicons found at leaves
of the tree (i.e. from which no further multiplicons were derived).
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_seeds(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are initial
seeding 'pairs' in level 2 multiplicons.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.in_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_intermediates(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are neither
seeding 'pairs' in level 2 multiplicons, nor leaves.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if len(self._multiplicon_graph.in_edges(node)) and \
len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_properties(self, value):
""" Return a dictionary describing multiplicon data:
id, parent, level, number_of_anchorpoints, profile_length,
is_redundant and the contributing genome segments
"""
sql = '''SELECT id, parent, level, number_of_anchorpoints,
profile_length, is_redundant
FROM multiplicons WHERE id=:id'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = cur.fetchone()
cur.close()
return {'id': int(result[0]),
'parent': int(result[1]) if len(result[1]) else None,
'level': int(result[2]),
'number_of_anchorpoints': int(result[3]),
'profile_length': int(result[4]),
'is_redundant': True if result[5] == '-1' else False,
'segments': self.get_multiplicon_segments(value)}
def get_multiplicon_segments(self, value):
""" Return a dictionary describing the genome segments that
contribute to the named multiplicon, keyed by genome, with
(start feature, end feature) tuples.
"""
sql = '''SELECT genome, first, last FROM segments
WHERE multiplicon=:mp'''
cur = self._dbconn.cursor()
cur.execute(sql, {'mp': str(value)})
result = cur.fetchall()
cur.close()
segdict = collections.defaultdict(tuple)
for genome, start, end in result:
segdict[genome] = (start, end)
return segdict
def get_multiplicons_at_level(self, level, redundant=False):
""" Return a list of IDs of multiplicons at the requested level
"""
sql = '''SELECT id FROM multiplicons
WHERE level=:level'''
cur = self._dbconn.cursor()
cur.execute(sql, {'level': str(level)})
result = [int(r[0]) for r in cur.fetchall()]
cur.close()
if redundant:
return result
else:
return [r for r in result if not self.is_redundant_multiplicon(r)]
def is_redundant_multiplicon(self, value):
""" Returns True if the passed multiplicon ID is redundant, False
otherwise.
- value, (int) multiplicon ID
"""
if not hasattr(self, '_redundant_multiplicon_cache'):
sql = '''SELECT id FROM multiplicons WHERE is_redundant="-1"'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = [int(r[0]) for r in cur.fetchall()]
self._redundant_multiplicon_cache = set(result)
if value in self._redundant_multiplicon_cache:
return True
else:
return False
def write(self, mfile="multiplicons.txt", sfile="segments.txt",
clobber=False):
""" Writes multiplicon and segment files to the named locations.
- mfile, (str) location for multiplicons file
- sfile, (str) location for segments file
- clobber, (Boolean) True if we overwrite target files
"""
if not clobber:
if os.path.isfile(mfile):
raise IOError("Multiplicon file %s already exists." % mfile)
if os.path.isfile(sfile):
raise IOError("Segments file %s already exists." % sfile)
self._write_multiplicons(mfile)
self._write_segments(sfile)
def _write_multiplicons(self, filename):
""" Write multiplicons to file.
- filename, (str) location of output file
"""
# Column headers
mhead = '\t'.join(['id', 'genome_x', 'list_x', 'parent', 'genome_y',
'list_y', 'level', 'number_of_anchorpoints',
'profile_length', 'begin_x', 'end_x', 'begin_y',
'end_y', 'is_redundant'])
with open(filename, 'w') as fhandle:
fhandle.write(mhead + '\n')
for mrow in self.multiplicons:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
def _write_segments(self, filename):
""" Write segments to file.
- filename, (str) location of output file
"""
# Column headers
shead = '\t'.join(['id', 'multiplicon', 'genome', 'list', 'first',
'last', 'order'])
with open(filename, 'w') as fhandle:
fhandle.write(shead + '\n')
for mrow in self.segments:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
@property
def multiplicon_file(self):
""" Location of the i-ADHoRe multiplicon data output file."""
return self._multiplicon_file
@multiplicon_file.setter
def multiplicon_file(self, value):
""" Setter for _multiplicon_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._multiplicon_file = value
@property
def segment_file(self):
""" Location of the i-ADHoRe segment data output file."""
return self._segment_file
@segment_file.setter
@property
def db_file(self):
""" Location of the SQLite database."""
return self._db_file
@db_file.setter
def db_file(self, value):
""" Setter for _db_file attribute """
assert not os.path.isfile(value), "%s already exists" % value
self._db_file = value
@property
def multiplicon_graph(self):
""" Digraph representation of relationships between multiplicons."""
return self._multiplicon_graph
@property
def multiplicons(self):
""" Multiplicon table from SQLite database. """
sql = '''SELECT * FROM multiplicons'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
@property
def segments(self):
""" Segments table from SQLite database. """
sql = '''SELECT * FROM segments'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
|
widdowquinn/pyADHoRe
|
pyadhore/iadhore.py
|
IadhoreData.db_file
|
python
|
def db_file(self, value):
assert not os.path.isfile(value), "%s already exists" % value
self._db_file = value
|
Setter for _db_file attribute
|
train
|
https://github.com/widdowquinn/pyADHoRe/blob/b2ebbf6ae9c6afe9262eb0e3d9cf395970e38533/pyadhore/iadhore.py#L333-L336
| null |
class IadhoreData(object):
""" Implements an interface to i-ADHoRe output data, using NetworkX to
hold a tree representation of multiplicon relationships, and SQLite3
to hold the output data tables
"""
def __init__(self, multiplicon_file=None, segment_file=None,
db_filename=":memory:"):
""" Initialise data object
Arguments:
o multiplicon_file - location of iADHoRe multiplicon.txt
o segment_file - location of iADHoRe segment.txt file
o db_filename - location to write SQLite3 database (defaults to
in-memory)
"""
# Attributes later populated in methods
self._dbconn = None
self._redundant_multiplicon_cache = None
# Get arguments and initialise
self._multiplicon_file = multiplicon_file
self._segment_file = segment_file
self._db_file = db_filename
self._multiplicon_graph = nx.DiGraph()
# Set up database
self._dbsetup()
# Load multiplicon and segment data into tree/SQL database
self._parse_multiplicons()
self._parse_segments()
def _dbsetup(self):
""" Create/open local SQLite database
"""
self._dbconn = sqlite3.connect(self._db_file)
# Create table for multiplicons
sql = '''CREATE TABLE multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant)'''
self._dbconn.execute(sql)
# Create table for multiplicons ('order' appears to be reserved)
sql = '''CREATE TABLE segments
(id, multiplicon, genome, list, first, last, ord)'''
self._dbconn.execute(sql)
self._dbconn.commit()
def _parse_multiplicons(self):
""" Read the multiplicon output file, and parse into a (i) tree using
NetworkX, and (ii) an SQLite database.
"""
# Parse data with csv reader
reader = csv.reader(open(self._multiplicon_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
# Add data to SQLite db
sql = '''INSERT INTO multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant) VALUES
(?,?,?,?,?,?,?,?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
# Add multiplicons to tree
m_id = int(row[0])
self._multiplicon_graph.add_node(m_id)
if len(row[3]):
self._multiplicon_graph.add_edge(int(row[3]), m_id)
self._dbconn.commit()
def _parse_segments(self):
""" Read the segment output file and parse into an SQLite database.
"""
reader = csv.reader(open(self._segment_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
sql = '''INSERT INTO segments
(id, multiplicon, genome, list, first, last, ord)
VALUES (?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
self._dbconn.commit()
def get_multiplicon_leaves(self, redundant=False):
""" Return a generator of the IDs of multiplicons found at leaves
of the tree (i.e. from which no further multiplicons were derived).
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_seeds(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are initial
seeding 'pairs' in level 2 multiplicons.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.in_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_intermediates(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are neither
seeding 'pairs' in level 2 multiplicons, nor leaves.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if len(self._multiplicon_graph.in_edges(node)) and \
len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_properties(self, value):
""" Return a dictionary describing multiplicon data:
id, parent, level, number_of_anchorpoints, profile_length,
is_redundant and the contributing genome segments
"""
sql = '''SELECT id, parent, level, number_of_anchorpoints,
profile_length, is_redundant
FROM multiplicons WHERE id=:id'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = cur.fetchone()
cur.close()
return {'id': int(result[0]),
'parent': int(result[1]) if len(result[1]) else None,
'level': int(result[2]),
'number_of_anchorpoints': int(result[3]),
'profile_length': int(result[4]),
'is_redundant': True if result[5] == '-1' else False,
'segments': self.get_multiplicon_segments(value)}
def get_multiplicon_segments(self, value):
""" Return a dictionary describing the genome segments that
contribute to the named multiplicon, keyed by genome, with
(start feature, end feature) tuples.
"""
sql = '''SELECT genome, first, last FROM segments
WHERE multiplicon=:mp'''
cur = self._dbconn.cursor()
cur.execute(sql, {'mp': str(value)})
result = cur.fetchall()
cur.close()
segdict = collections.defaultdict(tuple)
for genome, start, end in result:
segdict[genome] = (start, end)
return segdict
def get_multiplicons_at_level(self, level, redundant=False):
""" Return a list of IDs of multiplicons at the requested level
"""
sql = '''SELECT id FROM multiplicons
WHERE level=:level'''
cur = self._dbconn.cursor()
cur.execute(sql, {'level': str(level)})
result = [int(r[0]) for r in cur.fetchall()]
cur.close()
if redundant:
return result
else:
return [r for r in result if not self.is_redundant_multiplicon(r)]
def is_redundant_multiplicon(self, value):
""" Returns True if the passed multiplicon ID is redundant, False
otherwise.
- value, (int) multiplicon ID
"""
if not hasattr(self, '_redundant_multiplicon_cache'):
sql = '''SELECT id FROM multiplicons WHERE is_redundant="-1"'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = [int(r[0]) for r in cur.fetchall()]
self._redundant_multiplicon_cache = set(result)
if value in self._redundant_multiplicon_cache:
return True
else:
return False
def write(self, mfile="multiplicons.txt", sfile="segments.txt",
clobber=False):
""" Writes multiplicon and segment files to the named locations.
- mfile, (str) location for multiplicons file
- sfile, (str) location for segments file
- clobber, (Boolean) True if we overwrite target files
"""
if not clobber:
if os.path.isfile(mfile):
raise IOError("Multiplicon file %s already exists." % mfile)
if os.path.isfile(sfile):
raise IOError("Segments file %s already exists." % sfile)
self._write_multiplicons(mfile)
self._write_segments(sfile)
def _write_multiplicons(self, filename):
""" Write multiplicons to file.
- filename, (str) location of output file
"""
# Column headers
mhead = '\t'.join(['id', 'genome_x', 'list_x', 'parent', 'genome_y',
'list_y', 'level', 'number_of_anchorpoints',
'profile_length', 'begin_x', 'end_x', 'begin_y',
'end_y', 'is_redundant'])
with open(filename, 'w') as fhandle:
fhandle.write(mhead + '\n')
for mrow in self.multiplicons:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
def _write_segments(self, filename):
""" Write segments to file.
- filename, (str) location of output file
"""
# Column headers
shead = '\t'.join(['id', 'multiplicon', 'genome', 'list', 'first',
'last', 'order'])
with open(filename, 'w') as fhandle:
fhandle.write(shead + '\n')
for mrow in self.segments:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
@property
def multiplicon_file(self):
""" Location of the i-ADHoRe multiplicon data output file."""
return self._multiplicon_file
@multiplicon_file.setter
def multiplicon_file(self, value):
""" Setter for _multiplicon_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._multiplicon_file = value
@property
def segment_file(self):
""" Location of the i-ADHoRe segment data output file."""
return self._segment_file
@segment_file.setter
def segment_file(self, value):
""" Setter for _segment_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._segment_file = value
@property
def db_file(self):
""" Location of the SQLite database."""
return self._db_file
@db_file.setter
@property
def multiplicon_graph(self):
""" Digraph representation of relationships between multiplicons."""
return self._multiplicon_graph
@property
def multiplicons(self):
""" Multiplicon table from SQLite database. """
sql = '''SELECT * FROM multiplicons'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
@property
def segments(self):
""" Segments table from SQLite database. """
sql = '''SELECT * FROM segments'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
|
widdowquinn/pyADHoRe
|
pyadhore/iadhore.py
|
IadhoreData.multiplicons
|
python
|
def multiplicons(self):
sql = '''SELECT * FROM multiplicons'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
|
Multiplicon table from SQLite database.
|
train
|
https://github.com/widdowquinn/pyADHoRe/blob/b2ebbf6ae9c6afe9262eb0e3d9cf395970e38533/pyadhore/iadhore.py#L344-L351
| null |
class IadhoreData(object):
""" Implements an interface to i-ADHoRe output data, using NetworkX to
hold a tree representation of multiplicon relationships, and SQLite3
to hold the output data tables
"""
def __init__(self, multiplicon_file=None, segment_file=None,
db_filename=":memory:"):
""" Initialise data object
Arguments:
o multiplicon_file - location of iADHoRe multiplicon.txt
o segment_file - location of iADHoRe segment.txt file
o db_filename - location to write SQLite3 database (defaults to
in-memory)
"""
# Attributes later populated in methods
self._dbconn = None
self._redundant_multiplicon_cache = None
# Get arguments and initialise
self._multiplicon_file = multiplicon_file
self._segment_file = segment_file
self._db_file = db_filename
self._multiplicon_graph = nx.DiGraph()
# Set up database
self._dbsetup()
# Load multiplicon and segment data into tree/SQL database
self._parse_multiplicons()
self._parse_segments()
def _dbsetup(self):
""" Create/open local SQLite database
"""
self._dbconn = sqlite3.connect(self._db_file)
# Create table for multiplicons
sql = '''CREATE TABLE multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant)'''
self._dbconn.execute(sql)
# Create table for multiplicons ('order' appears to be reserved)
sql = '''CREATE TABLE segments
(id, multiplicon, genome, list, first, last, ord)'''
self._dbconn.execute(sql)
self._dbconn.commit()
def _parse_multiplicons(self):
""" Read the multiplicon output file, and parse into a (i) tree using
NetworkX, and (ii) an SQLite database.
"""
# Parse data with csv reader
reader = csv.reader(open(self._multiplicon_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
# Add data to SQLite db
sql = '''INSERT INTO multiplicons
(id, genome_x, list_x, parent, genome_y, list_y, level,
number_of_anchorpoints, profile_length, begin_x, end_x,
begin_y, end_y, is_redundant) VALUES
(?,?,?,?,?,?,?,?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
# Add multiplicons to tree
m_id = int(row[0])
self._multiplicon_graph.add_node(m_id)
if len(row[3]):
self._multiplicon_graph.add_edge(int(row[3]), m_id)
self._dbconn.commit()
def _parse_segments(self):
""" Read the segment output file and parse into an SQLite database.
"""
reader = csv.reader(open(self._segment_file, 'rU'),
delimiter='\t')
for row in reader:
if reader.line_num == 1: # skip header
continue
sql = '''INSERT INTO segments
(id, multiplicon, genome, list, first, last, ord)
VALUES (?,?,?,?,?,?,?)'''
self._dbconn.execute(sql, row)
self._dbconn.commit()
def get_multiplicon_leaves(self, redundant=False):
""" Return a generator of the IDs of multiplicons found at leaves
of the tree (i.e. from which no further multiplicons were derived).
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_seeds(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are initial
seeding 'pairs' in level 2 multiplicons.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.in_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_intermediates(self, redundant=False):
""" Return a generator of the IDs of multiplicons that are neither
seeding 'pairs' in level 2 multiplicons, nor leaves.
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if len(self._multiplicon_graph.in_edges(node)) and \
len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue
def get_multiplicon_properties(self, value):
""" Return a dictionary describing multiplicon data:
id, parent, level, number_of_anchorpoints, profile_length,
is_redundant and the contributing genome segments
"""
sql = '''SELECT id, parent, level, number_of_anchorpoints,
profile_length, is_redundant
FROM multiplicons WHERE id=:id'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = cur.fetchone()
cur.close()
return {'id': int(result[0]),
'parent': int(result[1]) if len(result[1]) else None,
'level': int(result[2]),
'number_of_anchorpoints': int(result[3]),
'profile_length': int(result[4]),
'is_redundant': True if result[5] == '-1' else False,
'segments': self.get_multiplicon_segments(value)}
def get_multiplicon_segments(self, value):
""" Return a dictionary describing the genome segments that
contribute to the named multiplicon, keyed by genome, with
(start feature, end feature) tuples.
"""
sql = '''SELECT genome, first, last FROM segments
WHERE multiplicon=:mp'''
cur = self._dbconn.cursor()
cur.execute(sql, {'mp': str(value)})
result = cur.fetchall()
cur.close()
segdict = collections.defaultdict(tuple)
for genome, start, end in result:
segdict[genome] = (start, end)
return segdict
def get_multiplicons_at_level(self, level, redundant=False):
""" Return a list of IDs of multiplicons at the requested level
"""
sql = '''SELECT id FROM multiplicons
WHERE level=:level'''
cur = self._dbconn.cursor()
cur.execute(sql, {'level': str(level)})
result = [int(r[0]) for r in cur.fetchall()]
cur.close()
if redundant:
return result
else:
return [r for r in result if not self.is_redundant_multiplicon(r)]
def is_redundant_multiplicon(self, value):
""" Returns True if the passed multiplicon ID is redundant, False
otherwise.
- value, (int) multiplicon ID
"""
if not hasattr(self, '_redundant_multiplicon_cache'):
sql = '''SELECT id FROM multiplicons WHERE is_redundant="-1"'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = [int(r[0]) for r in cur.fetchall()]
self._redundant_multiplicon_cache = set(result)
if value in self._redundant_multiplicon_cache:
return True
else:
return False
def write(self, mfile="multiplicons.txt", sfile="segments.txt",
clobber=False):
""" Writes multiplicon and segment files to the named locations.
- mfile, (str) location for multiplicons file
- sfile, (str) location for segments file
- clobber, (Boolean) True if we overwrite target files
"""
if not clobber:
if os.path.isfile(mfile):
raise IOError("Multiplicon file %s already exists." % mfile)
if os.path.isfile(sfile):
raise IOError("Segments file %s already exists." % sfile)
self._write_multiplicons(mfile)
self._write_segments(sfile)
def _write_multiplicons(self, filename):
""" Write multiplicons to file.
- filename, (str) location of output file
"""
# Column headers
mhead = '\t'.join(['id', 'genome_x', 'list_x', 'parent', 'genome_y',
'list_y', 'level', 'number_of_anchorpoints',
'profile_length', 'begin_x', 'end_x', 'begin_y',
'end_y', 'is_redundant'])
with open(filename, 'w') as fhandle:
fhandle.write(mhead + '\n')
for mrow in self.multiplicons:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
def _write_segments(self, filename):
""" Write segments to file.
- filename, (str) location of output file
"""
# Column headers
shead = '\t'.join(['id', 'multiplicon', 'genome', 'list', 'first',
'last', 'order'])
with open(filename, 'w') as fhandle:
fhandle.write(shead + '\n')
for mrow in self.segments:
fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
@property
def multiplicon_file(self):
""" Location of the i-ADHoRe multiplicon data output file."""
return self._multiplicon_file
@multiplicon_file.setter
def multiplicon_file(self, value):
""" Setter for _multiplicon_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._multiplicon_file = value
@property
def segment_file(self):
""" Location of the i-ADHoRe segment data output file."""
return self._segment_file
@segment_file.setter
def segment_file(self, value):
""" Setter for _segment_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._segment_file = value
@property
def db_file(self):
""" Location of the SQLite database."""
return self._db_file
@db_file.setter
def db_file(self, value):
""" Setter for _db_file attribute """
assert not os.path.isfile(value), "%s already exists" % value
self._db_file = value
@property
def multiplicon_graph(self):
""" Digraph representation of relationships between multiplicons."""
return self._multiplicon_graph
@property
@property
def segments(self):
""" Segments table from SQLite database. """
sql = '''SELECT * FROM segments'''
cur = self._dbconn.cursor()
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.close()
return data
|
mozilla-b2g/fxos-certsuite
|
mcts/securitysuite/ssl.py
|
nssversion.to_ints
|
python
|
def to_ints(version):
# Example strings: "NSS_3_7_9_RTM", "NSS_3_6_BRANCH_20021026", "NSS_3_6_BETA2",
# "3.18 Basic ECC Beta", "3.16.5 Basic ECC"
# normalize version strings
norm_version = version.replace('NSS_', '').replace('.', '_').replace(' ', '_').upper().split('_')
# Asserting minimumum length of 3 as in [major,minor,tag]
assert len(norm_version) >= 3
# Asserting the first two fields are numeric major and minor
assert norm_version[0].isdigit() and norm_version[1].isdigit()
# Asserting last field is always a non-numeric tag or a date tag
# CAVE: fails with obscure date dags like certdata.txt-NSS_3_4_20020403_2
assert not norm_version[-1].isdigit() or len(norm_version[-1]) > 2
# fill in missing point and pointpoint versions
if not (norm_version[2].isdigit() and len(norm_version[2]) < 4): # <4 to distinguish from numeric date tags
norm_version.insert(2, "0")
if not (norm_version[3].isdigit() and len(norm_version[3]) < 4):
norm_version.insert(3, "0")
# Strictly ordering by RTM > RC > BETA > *
# CAVE: Order rule may result in bogus sorting of obscure tags (WITH_CBKI*, TPATCH*, BRANCHPOINT*, ...)
# Recent versions are tagged non-obscurely and consistently
tag_value = 0
for v in norm_version[4:]:
if v.startswith('BETA'):
tag_value = 100
if len(v[4:]) == 1 or len(v[4:]) == 2:
try:
tag_value += int(v[4:])
except ValueError:
pass
for v in norm_version[4:]:
if v.startswith('RC'):
tag_value = 200
if len(v[3:]) == 1 or len(v[3:]) == 2:
try:
tag_value += int(v[3:])
except ValueError:
pass
for v in norm_version[4:]:
if v == 'RTM':
tag_value = 300
# Special case: "x.y.z Basic ECC" is counted as RTM
# TODO: research the set of potential version string formats reported by libnss.
if norm_version[-2] == 'BASIC' and norm_version[-1] == 'ECC' and norm_version[-3].isdigit():
tag_value = 300
major, minor, point, pointpoint = (int(x) for x in norm_version[:4])
return [major, minor, point, pointpoint, tag_value]
|
Turn version string into a numeric representation for easy comparison.
Undeclared point versions are assumed to be 0.
:param version: a NSS version string
:return: array of [major, minor, point, pointpoint, tag value]
|
train
|
https://github.com/mozilla-b2g/fxos-certsuite/blob/152a76c7c4c9d908524cf6e6fc25a498058f363d/mcts/securitysuite/ssl.py#L203-L265
| null |
class nssversion(ExtraTest):
"""
Test that logs nss component versions from device.
"""
group = "ssl"
module = sys.modules[__name__]
# TODO: This list's tail must be maintained regularly.
b2g_version_to_hginfo = {
'1.2': {
'hgtag': 'mozilla-b2g26_v1_2',
'release_date:': '2013-12-09',
'release_branch': 'RELEASE_BASE_20131202',
'release_nss_version': 'NSS_3_15_3_RTM'
},
'1.3': {
'hgtag': 'mozilla-b2g28_v1_3',
'release_date:': '2014-04-17',
'release_branch': 'B2G_1_3_20140317_MERGEDAY',
'release_nss_version': 'NSS_3_15_5_RTM'
},
'1.3t': {
'hgtag': 'mozilla-b2g28_v1_3t',
'release_date:': '2014-04-17',
'release_branch': 'B2G_1_3T_20140317_MERGEDAY',
'release_nss_version': 'NSS_3_15_5_RTM'
},
'1.4': {
'hgtag': 'mozilla-b2g30_v1_4',
'release_date:': '2014-06-09',
'release_branch': 'B2G_1_4_20140609_MERGEDAY',
'release_nss_version': 'NSS_3_16_RTM'
},
'2.0': {
'hgtag': 'mozilla-b2g32_v2_0',
'release_date:': '2014-09-01',
'release_branch': 'B2G_2_0_20140902_MERGEDAY',
'release_nss_version': 'NSS_3_16_4_RTM'
},
'2.0m': {
'hgtag': 'mozilla-b2g32_v2_0m',
'release_date:': '2014-09-01',
'release_branch': 'B2G_2_0_20140902_MERGEDAY',
'release_nss_version': 'NSS_3_16_4_RTM'
},
'2.1': {
'hgtag': 'mozilla-b2g34_v2_1',
'release_date:': '2014-11-21',
'release_branch': 'FIREFOX_RELEASE_34_BASE',
'release_nss_version': 'NSS_3_17_2_RTM'
},
'2.1s': {
'hgtag': 'mozilla-b2g34_v2_1s',
'release_date:': '2014-11-21',
'release_branch': 'FIREFOX_RELEASE_34_BASE',
'release_nss_version': 'NSS_3_17_2_RTM'
},
'2.2': {
'hgtag': 'mozilla-b2g37_v2_2',
'release_date:': '2015-06-08',
'release_branch': 'FIREFOX_RELEASE_37_BASE',
'release_nss_version': 'NSS_3_17_4_RTM'
},
'2.5': {
'hgtag': 'mozilla-b2g44_v2_5',
'release_date:': '',
'release_branch': 'FIREFOX_RELEASE_44_BASE',
'release_nss_version': 'NSS_3_20_0_RTM'
}
}
@staticmethod
@staticmethod
def first_older_than_second(version_a, version_b):
"""
Tests for the NSS version string in the first parameter being less
recent than the second (a < b). Tag order is RTM > RC > BETA > *.
Works with hg tags like "NSS_3_7_9_RTM" and version strings reported by
nsINSSVersion, like "3.18 Basic ECC Beta" (mixed, too).
:param version_a: a NSS version string
:param version_b: another NSS version string
:return: bool (a < b)
"""
a = nssversion.to_ints(version_a)
b = nssversion.to_ints(version_b)
# must be of equal length
assert len(a) == len(b)
# Compare each version component, bail out on difference
for i in xrange(len(a)):
if b[i] < a[i]:
return False
if b[i] > a[i]:
return True
return False
@staticmethod
def most_recent_among(versions):
"""
Compare a list of NSS versions and return the latest one.
Uses first_older_than_second() for comparison.
:param versions: an array of NSS version strings
:return: verbatim copy of the most recent version string
"""
latest = versions[0]
for v in versions[1:]:
if nssversion.first_older_than_second(latest, v):
latest = v
return latest
@classmethod
def run(cls, version=None):
"""
Test runner method; is called by parent class defined in suite.py.
:param version: B2G version string to test against
:return: bool PASS/FAIL status
"""
try:
dumper = certdump()
versions = dumper.nssversion_via_marionette()
except Exception as e: # TODO: too broad exception
cls.log_status('FAIL', 'Failed to gather information from the device via Marionette: %s' % e)
return False
if version is None:
cls.log_status('FAIL', 'NSS version check requires a B2G version.\nReported component versions:\n%s' % (
'\n'.join(["%s: %s" % (k, versions[k]) for k in versions])))
return False
reported_version = versions['NSS_Version']
if version not in nssversion.b2g_version_to_hginfo:
cls.log_status('FAIL', 'No version comparison data for B2G %s.\nReported NSS component versions:\n%s' % (
version, '\n'.join(["%s: %s" % (k, versions[k]) for k in versions])))
return False
expected_version = nssversion.b2g_version_to_hginfo[version]['release_nss_version']
# Fail if reported version is a downgrade
if nssversion.first_older_than_second(reported_version, expected_version):
cls.log_status('FAIL', 'NSS downgrade detected. Expecting at least version %s.\n'
'Reported versions:\n%s' % (
expected_version, '\n'.join(["%s: %s" % (k, versions[k]) for k in versions])))
return False
# Pass if NSS version was upgraded.
if nssversion.first_older_than_second(expected_version, reported_version):
cls.log_status('PASS', 'NSS more recent than release version %s. Reported component versions:\n%s' % (
expected_version, '\n'.join(["%s: %s" % (k, versions[k]) for k in versions])))
return True
# Else device has reported the expected version.
cls.log_status('PASS', 'NSS version reported as expected. Reported component versions:\n%s' % (
'\n'.join(["%s: %s" % (k, versions[k]) for k in versions])))
return True
|
mozilla-b2g/fxos-certsuite
|
mcts/securitysuite/ssl.py
|
nssversion.first_older_than_second
|
python
|
def first_older_than_second(version_a, version_b):
a = nssversion.to_ints(version_a)
b = nssversion.to_ints(version_b)
# must be of equal length
assert len(a) == len(b)
# Compare each version component, bail out on difference
for i in xrange(len(a)):
if b[i] < a[i]:
return False
if b[i] > a[i]:
return True
return False
|
Tests for the NSS version string in the first parameter being less
recent than the second (a < b). Tag order is RTM > RC > BETA > *.
Works with hg tags like "NSS_3_7_9_RTM" and version strings reported by
nsINSSVersion, like "3.18 Basic ECC Beta" (mixed, too).
:param version_a: a NSS version string
:param version_b: another NSS version string
:return: bool (a < b)
|
train
|
https://github.com/mozilla-b2g/fxos-certsuite/blob/152a76c7c4c9d908524cf6e6fc25a498058f363d/mcts/securitysuite/ssl.py#L268-L291
| null |
class nssversion(ExtraTest):
"""
Test that logs nss component versions from device.
"""
group = "ssl"
module = sys.modules[__name__]
# TODO: This list's tail must be maintained regularly.
b2g_version_to_hginfo = {
'1.2': {
'hgtag': 'mozilla-b2g26_v1_2',
'release_date:': '2013-12-09',
'release_branch': 'RELEASE_BASE_20131202',
'release_nss_version': 'NSS_3_15_3_RTM'
},
'1.3': {
'hgtag': 'mozilla-b2g28_v1_3',
'release_date:': '2014-04-17',
'release_branch': 'B2G_1_3_20140317_MERGEDAY',
'release_nss_version': 'NSS_3_15_5_RTM'
},
'1.3t': {
'hgtag': 'mozilla-b2g28_v1_3t',
'release_date:': '2014-04-17',
'release_branch': 'B2G_1_3T_20140317_MERGEDAY',
'release_nss_version': 'NSS_3_15_5_RTM'
},
'1.4': {
'hgtag': 'mozilla-b2g30_v1_4',
'release_date:': '2014-06-09',
'release_branch': 'B2G_1_4_20140609_MERGEDAY',
'release_nss_version': 'NSS_3_16_RTM'
},
'2.0': {
'hgtag': 'mozilla-b2g32_v2_0',
'release_date:': '2014-09-01',
'release_branch': 'B2G_2_0_20140902_MERGEDAY',
'release_nss_version': 'NSS_3_16_4_RTM'
},
'2.0m': {
'hgtag': 'mozilla-b2g32_v2_0m',
'release_date:': '2014-09-01',
'release_branch': 'B2G_2_0_20140902_MERGEDAY',
'release_nss_version': 'NSS_3_16_4_RTM'
},
'2.1': {
'hgtag': 'mozilla-b2g34_v2_1',
'release_date:': '2014-11-21',
'release_branch': 'FIREFOX_RELEASE_34_BASE',
'release_nss_version': 'NSS_3_17_2_RTM'
},
'2.1s': {
'hgtag': 'mozilla-b2g34_v2_1s',
'release_date:': '2014-11-21',
'release_branch': 'FIREFOX_RELEASE_34_BASE',
'release_nss_version': 'NSS_3_17_2_RTM'
},
'2.2': {
'hgtag': 'mozilla-b2g37_v2_2',
'release_date:': '2015-06-08',
'release_branch': 'FIREFOX_RELEASE_37_BASE',
'release_nss_version': 'NSS_3_17_4_RTM'
},
'2.5': {
'hgtag': 'mozilla-b2g44_v2_5',
'release_date:': '',
'release_branch': 'FIREFOX_RELEASE_44_BASE',
'release_nss_version': 'NSS_3_20_0_RTM'
}
}
@staticmethod
def to_ints(version):
"""
Turn version string into a numeric representation for easy comparison.
Undeclared point versions are assumed to be 0.
:param version: a NSS version string
:return: array of [major, minor, point, pointpoint, tag value]
"""
# Example strings: "NSS_3_7_9_RTM", "NSS_3_6_BRANCH_20021026", "NSS_3_6_BETA2",
# "3.18 Basic ECC Beta", "3.16.5 Basic ECC"
# normalize version strings
norm_version = version.replace('NSS_', '').replace('.', '_').replace(' ', '_').upper().split('_')
# Asserting minimumum length of 3 as in [major,minor,tag]
assert len(norm_version) >= 3
# Asserting the first two fields are numeric major and minor
assert norm_version[0].isdigit() and norm_version[1].isdigit()
# Asserting last field is always a non-numeric tag or a date tag
# CAVE: fails with obscure date dags like certdata.txt-NSS_3_4_20020403_2
assert not norm_version[-1].isdigit() or len(norm_version[-1]) > 2
# fill in missing point and pointpoint versions
if not (norm_version[2].isdigit() and len(norm_version[2]) < 4): # <4 to distinguish from numeric date tags
norm_version.insert(2, "0")
if not (norm_version[3].isdigit() and len(norm_version[3]) < 4):
norm_version.insert(3, "0")
# Strictly ordering by RTM > RC > BETA > *
# CAVE: Order rule may result in bogus sorting of obscure tags (WITH_CBKI*, TPATCH*, BRANCHPOINT*, ...)
# Recent versions are tagged non-obscurely and consistently
tag_value = 0
for v in norm_version[4:]:
if v.startswith('BETA'):
tag_value = 100
if len(v[4:]) == 1 or len(v[4:]) == 2:
try:
tag_value += int(v[4:])
except ValueError:
pass
for v in norm_version[4:]:
if v.startswith('RC'):
tag_value = 200
if len(v[3:]) == 1 or len(v[3:]) == 2:
try:
tag_value += int(v[3:])
except ValueError:
pass
for v in norm_version[4:]:
if v == 'RTM':
tag_value = 300
# Special case: "x.y.z Basic ECC" is counted as RTM
# TODO: research the set of potential version string formats reported by libnss.
if norm_version[-2] == 'BASIC' and norm_version[-1] == 'ECC' and norm_version[-3].isdigit():
tag_value = 300
major, minor, point, pointpoint = (int(x) for x in norm_version[:4])
return [major, minor, point, pointpoint, tag_value]
@staticmethod
@staticmethod
def most_recent_among(versions):
"""
Compare a list of NSS versions and return the latest one.
Uses first_older_than_second() for comparison.
:param versions: an array of NSS version strings
:return: verbatim copy of the most recent version string
"""
latest = versions[0]
for v in versions[1:]:
if nssversion.first_older_than_second(latest, v):
latest = v
return latest
@classmethod
def run(cls, version=None):
"""
Test runner method; is called by parent class defined in suite.py.
:param version: B2G version string to test against
:return: bool PASS/FAIL status
"""
try:
dumper = certdump()
versions = dumper.nssversion_via_marionette()
except Exception as e: # TODO: too broad exception
cls.log_status('FAIL', 'Failed to gather information from the device via Marionette: %s' % e)
return False
if version is None:
cls.log_status('FAIL', 'NSS version check requires a B2G version.\nReported component versions:\n%s' % (
'\n'.join(["%s: %s" % (k, versions[k]) for k in versions])))
return False
reported_version = versions['NSS_Version']
if version not in nssversion.b2g_version_to_hginfo:
cls.log_status('FAIL', 'No version comparison data for B2G %s.\nReported NSS component versions:\n%s' % (
version, '\n'.join(["%s: %s" % (k, versions[k]) for k in versions])))
return False
expected_version = nssversion.b2g_version_to_hginfo[version]['release_nss_version']
# Fail if reported version is a downgrade
if nssversion.first_older_than_second(reported_version, expected_version):
cls.log_status('FAIL', 'NSS downgrade detected. Expecting at least version %s.\n'
'Reported versions:\n%s' % (
expected_version, '\n'.join(["%s: %s" % (k, versions[k]) for k in versions])))
return False
# Pass if NSS version was upgraded.
if nssversion.first_older_than_second(expected_version, reported_version):
cls.log_status('PASS', 'NSS more recent than release version %s. Reported component versions:\n%s' % (
expected_version, '\n'.join(["%s: %s" % (k, versions[k]) for k in versions])))
return True
# Else device has reported the expected version.
cls.log_status('PASS', 'NSS version reported as expected. Reported component versions:\n%s' % (
'\n'.join(["%s: %s" % (k, versions[k]) for k in versions])))
return True
|
mozilla-b2g/fxos-certsuite
|
mcts/securitysuite/ssl.py
|
nssversion.most_recent_among
|
python
|
def most_recent_among(versions):
latest = versions[0]
for v in versions[1:]:
if nssversion.first_older_than_second(latest, v):
latest = v
return latest
|
Compare a list of NSS versions and return the latest one.
Uses first_older_than_second() for comparison.
:param versions: an array of NSS version strings
:return: verbatim copy of the most recent version string
|
train
|
https://github.com/mozilla-b2g/fxos-certsuite/blob/152a76c7c4c9d908524cf6e6fc25a498058f363d/mcts/securitysuite/ssl.py#L294-L305
| null |
class nssversion(ExtraTest):
"""
Test that logs nss component versions from device.
"""
group = "ssl"
module = sys.modules[__name__]
# TODO: This list's tail must be maintained regularly.
b2g_version_to_hginfo = {
'1.2': {
'hgtag': 'mozilla-b2g26_v1_2',
'release_date:': '2013-12-09',
'release_branch': 'RELEASE_BASE_20131202',
'release_nss_version': 'NSS_3_15_3_RTM'
},
'1.3': {
'hgtag': 'mozilla-b2g28_v1_3',
'release_date:': '2014-04-17',
'release_branch': 'B2G_1_3_20140317_MERGEDAY',
'release_nss_version': 'NSS_3_15_5_RTM'
},
'1.3t': {
'hgtag': 'mozilla-b2g28_v1_3t',
'release_date:': '2014-04-17',
'release_branch': 'B2G_1_3T_20140317_MERGEDAY',
'release_nss_version': 'NSS_3_15_5_RTM'
},
'1.4': {
'hgtag': 'mozilla-b2g30_v1_4',
'release_date:': '2014-06-09',
'release_branch': 'B2G_1_4_20140609_MERGEDAY',
'release_nss_version': 'NSS_3_16_RTM'
},
'2.0': {
'hgtag': 'mozilla-b2g32_v2_0',
'release_date:': '2014-09-01',
'release_branch': 'B2G_2_0_20140902_MERGEDAY',
'release_nss_version': 'NSS_3_16_4_RTM'
},
'2.0m': {
'hgtag': 'mozilla-b2g32_v2_0m',
'release_date:': '2014-09-01',
'release_branch': 'B2G_2_0_20140902_MERGEDAY',
'release_nss_version': 'NSS_3_16_4_RTM'
},
'2.1': {
'hgtag': 'mozilla-b2g34_v2_1',
'release_date:': '2014-11-21',
'release_branch': 'FIREFOX_RELEASE_34_BASE',
'release_nss_version': 'NSS_3_17_2_RTM'
},
'2.1s': {
'hgtag': 'mozilla-b2g34_v2_1s',
'release_date:': '2014-11-21',
'release_branch': 'FIREFOX_RELEASE_34_BASE',
'release_nss_version': 'NSS_3_17_2_RTM'
},
'2.2': {
'hgtag': 'mozilla-b2g37_v2_2',
'release_date:': '2015-06-08',
'release_branch': 'FIREFOX_RELEASE_37_BASE',
'release_nss_version': 'NSS_3_17_4_RTM'
},
'2.5': {
'hgtag': 'mozilla-b2g44_v2_5',
'release_date:': '',
'release_branch': 'FIREFOX_RELEASE_44_BASE',
'release_nss_version': 'NSS_3_20_0_RTM'
}
}
@staticmethod
def to_ints(version):
"""
Turn version string into a numeric representation for easy comparison.
Undeclared point versions are assumed to be 0.
:param version: a NSS version string
:return: array of [major, minor, point, pointpoint, tag value]
"""
# Example strings: "NSS_3_7_9_RTM", "NSS_3_6_BRANCH_20021026", "NSS_3_6_BETA2",
# "3.18 Basic ECC Beta", "3.16.5 Basic ECC"
# normalize version strings
norm_version = version.replace('NSS_', '').replace('.', '_').replace(' ', '_').upper().split('_')
# Asserting minimumum length of 3 as in [major,minor,tag]
assert len(norm_version) >= 3
# Asserting the first two fields are numeric major and minor
assert norm_version[0].isdigit() and norm_version[1].isdigit()
# Asserting last field is always a non-numeric tag or a date tag
# CAVE: fails with obscure date dags like certdata.txt-NSS_3_4_20020403_2
assert not norm_version[-1].isdigit() or len(norm_version[-1]) > 2
# fill in missing point and pointpoint versions
if not (norm_version[2].isdigit() and len(norm_version[2]) < 4): # <4 to distinguish from numeric date tags
norm_version.insert(2, "0")
if not (norm_version[3].isdigit() and len(norm_version[3]) < 4):
norm_version.insert(3, "0")
# Strictly ordering by RTM > RC > BETA > *
# CAVE: Order rule may result in bogus sorting of obscure tags (WITH_CBKI*, TPATCH*, BRANCHPOINT*, ...)
# Recent versions are tagged non-obscurely and consistently
tag_value = 0
for v in norm_version[4:]:
if v.startswith('BETA'):
tag_value = 100
if len(v[4:]) == 1 or len(v[4:]) == 2:
try:
tag_value += int(v[4:])
except ValueError:
pass
for v in norm_version[4:]:
if v.startswith('RC'):
tag_value = 200
if len(v[3:]) == 1 or len(v[3:]) == 2:
try:
tag_value += int(v[3:])
except ValueError:
pass
for v in norm_version[4:]:
if v == 'RTM':
tag_value = 300
# Special case: "x.y.z Basic ECC" is counted as RTM
# TODO: research the set of potential version string formats reported by libnss.
if norm_version[-2] == 'BASIC' and norm_version[-1] == 'ECC' and norm_version[-3].isdigit():
tag_value = 300
major, minor, point, pointpoint = (int(x) for x in norm_version[:4])
return [major, minor, point, pointpoint, tag_value]
@staticmethod
def first_older_than_second(version_a, version_b):
"""
Tests for the NSS version string in the first parameter being less
recent than the second (a < b). Tag order is RTM > RC > BETA > *.
Works with hg tags like "NSS_3_7_9_RTM" and version strings reported by
nsINSSVersion, like "3.18 Basic ECC Beta" (mixed, too).
:param version_a: a NSS version string
:param version_b: another NSS version string
:return: bool (a < b)
"""
a = nssversion.to_ints(version_a)
b = nssversion.to_ints(version_b)
# must be of equal length
assert len(a) == len(b)
# Compare each version component, bail out on difference
for i in xrange(len(a)):
if b[i] < a[i]:
return False
if b[i] > a[i]:
return True
return False
@staticmethod
@classmethod
def run(cls, version=None):
"""
Test runner method; is called by parent class defined in suite.py.
:param version: B2G version string to test against
:return: bool PASS/FAIL status
"""
try:
dumper = certdump()
versions = dumper.nssversion_via_marionette()
except Exception as e: # TODO: too broad exception
cls.log_status('FAIL', 'Failed to gather information from the device via Marionette: %s' % e)
return False
if version is None:
cls.log_status('FAIL', 'NSS version check requires a B2G version.\nReported component versions:\n%s' % (
'\n'.join(["%s: %s" % (k, versions[k]) for k in versions])))
return False
reported_version = versions['NSS_Version']
if version not in nssversion.b2g_version_to_hginfo:
cls.log_status('FAIL', 'No version comparison data for B2G %s.\nReported NSS component versions:\n%s' % (
version, '\n'.join(["%s: %s" % (k, versions[k]) for k in versions])))
return False
expected_version = nssversion.b2g_version_to_hginfo[version]['release_nss_version']
# Fail if reported version is a downgrade
if nssversion.first_older_than_second(reported_version, expected_version):
cls.log_status('FAIL', 'NSS downgrade detected. Expecting at least version %s.\n'
'Reported versions:\n%s' % (
expected_version, '\n'.join(["%s: %s" % (k, versions[k]) for k in versions])))
return False
# Pass if NSS version was upgraded.
if nssversion.first_older_than_second(expected_version, reported_version):
cls.log_status('PASS', 'NSS more recent than release version %s. Reported component versions:\n%s' % (
expected_version, '\n'.join(["%s: %s" % (k, versions[k]) for k in versions])))
return True
# Else device has reported the expected version.
cls.log_status('PASS', 'NSS version reported as expected. Reported component versions:\n%s' % (
'\n'.join(["%s: %s" % (k, versions[k]) for k in versions])))
return True
|
mozilla-b2g/fxos-certsuite
|
mcts/securitysuite/ssl.py
|
nssversion.run
|
python
|
def run(cls, version=None):
try:
dumper = certdump()
versions = dumper.nssversion_via_marionette()
except Exception as e: # TODO: too broad exception
cls.log_status('FAIL', 'Failed to gather information from the device via Marionette: %s' % e)
return False
if version is None:
cls.log_status('FAIL', 'NSS version check requires a B2G version.\nReported component versions:\n%s' % (
'\n'.join(["%s: %s" % (k, versions[k]) for k in versions])))
return False
reported_version = versions['NSS_Version']
if version not in nssversion.b2g_version_to_hginfo:
cls.log_status('FAIL', 'No version comparison data for B2G %s.\nReported NSS component versions:\n%s' % (
version, '\n'.join(["%s: %s" % (k, versions[k]) for k in versions])))
return False
expected_version = nssversion.b2g_version_to_hginfo[version]['release_nss_version']
# Fail if reported version is a downgrade
if nssversion.first_older_than_second(reported_version, expected_version):
cls.log_status('FAIL', 'NSS downgrade detected. Expecting at least version %s.\n'
'Reported versions:\n%s' % (
expected_version, '\n'.join(["%s: %s" % (k, versions[k]) for k in versions])))
return False
# Pass if NSS version was upgraded.
if nssversion.first_older_than_second(expected_version, reported_version):
cls.log_status('PASS', 'NSS more recent than release version %s. Reported component versions:\n%s' % (
expected_version, '\n'.join(["%s: %s" % (k, versions[k]) for k in versions])))
return True
# Else device has reported the expected version.
cls.log_status('PASS', 'NSS version reported as expected. Reported component versions:\n%s' % (
'\n'.join(["%s: %s" % (k, versions[k]) for k in versions])))
return True
|
Test runner method; is called by parent class defined in suite.py.
:param version: B2G version string to test against
:return: bool PASS/FAIL status
|
train
|
https://github.com/mozilla-b2g/fxos-certsuite/blob/152a76c7c4c9d908524cf6e6fc25a498058f363d/mcts/securitysuite/ssl.py#L308-L353
|
[
"def nssversion_via_marionette(self):\n return run_marionette_script(certdump.js_nssversions(),\n chrome=True)\n"
] |
class nssversion(ExtraTest):
"""
Test that logs nss component versions from device.
"""
group = "ssl"
module = sys.modules[__name__]
# TODO: This list's tail must be maintained regularly.
b2g_version_to_hginfo = {
'1.2': {
'hgtag': 'mozilla-b2g26_v1_2',
'release_date:': '2013-12-09',
'release_branch': 'RELEASE_BASE_20131202',
'release_nss_version': 'NSS_3_15_3_RTM'
},
'1.3': {
'hgtag': 'mozilla-b2g28_v1_3',
'release_date:': '2014-04-17',
'release_branch': 'B2G_1_3_20140317_MERGEDAY',
'release_nss_version': 'NSS_3_15_5_RTM'
},
'1.3t': {
'hgtag': 'mozilla-b2g28_v1_3t',
'release_date:': '2014-04-17',
'release_branch': 'B2G_1_3T_20140317_MERGEDAY',
'release_nss_version': 'NSS_3_15_5_RTM'
},
'1.4': {
'hgtag': 'mozilla-b2g30_v1_4',
'release_date:': '2014-06-09',
'release_branch': 'B2G_1_4_20140609_MERGEDAY',
'release_nss_version': 'NSS_3_16_RTM'
},
'2.0': {
'hgtag': 'mozilla-b2g32_v2_0',
'release_date:': '2014-09-01',
'release_branch': 'B2G_2_0_20140902_MERGEDAY',
'release_nss_version': 'NSS_3_16_4_RTM'
},
'2.0m': {
'hgtag': 'mozilla-b2g32_v2_0m',
'release_date:': '2014-09-01',
'release_branch': 'B2G_2_0_20140902_MERGEDAY',
'release_nss_version': 'NSS_3_16_4_RTM'
},
'2.1': {
'hgtag': 'mozilla-b2g34_v2_1',
'release_date:': '2014-11-21',
'release_branch': 'FIREFOX_RELEASE_34_BASE',
'release_nss_version': 'NSS_3_17_2_RTM'
},
'2.1s': {
'hgtag': 'mozilla-b2g34_v2_1s',
'release_date:': '2014-11-21',
'release_branch': 'FIREFOX_RELEASE_34_BASE',
'release_nss_version': 'NSS_3_17_2_RTM'
},
'2.2': {
'hgtag': 'mozilla-b2g37_v2_2',
'release_date:': '2015-06-08',
'release_branch': 'FIREFOX_RELEASE_37_BASE',
'release_nss_version': 'NSS_3_17_4_RTM'
},
'2.5': {
'hgtag': 'mozilla-b2g44_v2_5',
'release_date:': '',
'release_branch': 'FIREFOX_RELEASE_44_BASE',
'release_nss_version': 'NSS_3_20_0_RTM'
}
}
@staticmethod
def to_ints(version):
"""
Turn version string into a numeric representation for easy comparison.
Undeclared point versions are assumed to be 0.
:param version: a NSS version string
:return: array of [major, minor, point, pointpoint, tag value]
"""
# Example strings: "NSS_3_7_9_RTM", "NSS_3_6_BRANCH_20021026", "NSS_3_6_BETA2",
# "3.18 Basic ECC Beta", "3.16.5 Basic ECC"
# normalize version strings
norm_version = version.replace('NSS_', '').replace('.', '_').replace(' ', '_').upper().split('_')
# Asserting minimumum length of 3 as in [major,minor,tag]
assert len(norm_version) >= 3
# Asserting the first two fields are numeric major and minor
assert norm_version[0].isdigit() and norm_version[1].isdigit()
# Asserting last field is always a non-numeric tag or a date tag
# CAVE: fails with obscure date dags like certdata.txt-NSS_3_4_20020403_2
assert not norm_version[-1].isdigit() or len(norm_version[-1]) > 2
# fill in missing point and pointpoint versions
if not (norm_version[2].isdigit() and len(norm_version[2]) < 4): # <4 to distinguish from numeric date tags
norm_version.insert(2, "0")
if not (norm_version[3].isdigit() and len(norm_version[3]) < 4):
norm_version.insert(3, "0")
# Strictly ordering by RTM > RC > BETA > *
# CAVE: Order rule may result in bogus sorting of obscure tags (WITH_CBKI*, TPATCH*, BRANCHPOINT*, ...)
# Recent versions are tagged non-obscurely and consistently
tag_value = 0
for v in norm_version[4:]:
if v.startswith('BETA'):
tag_value = 100
if len(v[4:]) == 1 or len(v[4:]) == 2:
try:
tag_value += int(v[4:])
except ValueError:
pass
for v in norm_version[4:]:
if v.startswith('RC'):
tag_value = 200
if len(v[3:]) == 1 or len(v[3:]) == 2:
try:
tag_value += int(v[3:])
except ValueError:
pass
for v in norm_version[4:]:
if v == 'RTM':
tag_value = 300
# Special case: "x.y.z Basic ECC" is counted as RTM
# TODO: research the set of potential version string formats reported by libnss.
if norm_version[-2] == 'BASIC' and norm_version[-1] == 'ECC' and norm_version[-3].isdigit():
tag_value = 300
major, minor, point, pointpoint = (int(x) for x in norm_version[:4])
return [major, minor, point, pointpoint, tag_value]
@staticmethod
def first_older_than_second(version_a, version_b):
"""
Tests for the NSS version string in the first parameter being less
recent than the second (a < b). Tag order is RTM > RC > BETA > *.
Works with hg tags like "NSS_3_7_9_RTM" and version strings reported by
nsINSSVersion, like "3.18 Basic ECC Beta" (mixed, too).
:param version_a: a NSS version string
:param version_b: another NSS version string
:return: bool (a < b)
"""
a = nssversion.to_ints(version_a)
b = nssversion.to_ints(version_b)
# must be of equal length
assert len(a) == len(b)
# Compare each version component, bail out on difference
for i in xrange(len(a)):
if b[i] < a[i]:
return False
if b[i] > a[i]:
return True
return False
@staticmethod
def most_recent_among(versions):
"""
Compare a list of NSS versions and return the latest one.
Uses first_older_than_second() for comparison.
:param versions: an array of NSS version strings
:return: verbatim copy of the most recent version string
"""
latest = versions[0]
for v in versions[1:]:
if nssversion.first_older_than_second(latest, v):
latest = v
return latest
@classmethod
|
mozilla-b2g/fxos-certsuite
|
mcts/securitysuite/filesystem.py
|
parse_ls
|
python
|
def parse_ls(out):
# assumed ls -lR line format:
# -rw-r--r-- root shell 0 2013-07-05 02:26 tasks
# drwxr-xr-x root root 2013-07-05 02:26 log
# brw------- root root 179, 0 2013-07-05 02:26 mmcblk0
# lrwxrwxrwx root root 2013-07-05 02:34 subsystem -> ../class/bdi
# CAVE: format may change through versions.
# TODO: implement plausibility test.
mode = r'^(.)'
field = r'([^ ]+) +'
dev = r'(\d+), +(\d+) '
date = r'(\d{4}\-\d{2}\-\d{2} \d{2}:\d{2}) '
name = r'(.+)$'
link = r'(.+) -> (.+)$'
logger = get_default_logger()
# adb returns newline as \r\n
# but mozdevice uses \n
for dirstr in out[2:-2].split('\r\n\r\n'):
lines = dirstr.split('\r\n')
dirname = lines[0][:-1]
if len(lines) == 2 and lines[1].startswith("opendir failed"):
continue
for filestr in lines[1:]:
if filestr.endswith(": No such file or directory"):
continue
if filestr.endswith(": Permission denied"):
continue
specs = None
if filestr[0] in 'dsp': # directory, socket, pipe
regexp = mode + field * 3 + date + name
m = re.search(regexp, filestr)
specs = {
'mode': m.group(1),
'perm': m.group(2),
'uid': m.group(3),
'gid': m.group(4),
'date': m.group(5),
'name': m.group(6)
}
elif filestr[0] == 'l': # symbolic link
regexp = mode + field * 3 + date + link
m = re.search(regexp, filestr)
specs = {
'mode': m.group(1),
'perm': m.group(2),
'uid': m.group(3),
'gid': m.group(4),
'date': m.group(5),
'name': m.group(6),
'link': m.group(7)
}
elif filestr[0] in 'cb': # device
regexp = mode + field * 3 + dev + date + name
m = re.search(regexp, filestr)
specs = {
'mode': m.group(1),
'perm': m.group(2),
'uid': m.group(3),
'gid': m.group(4),
'major': m.group(5),
'minor': m.group(6),
'date': m.group(7),
'name': m.group(8)
}
else: # rest
try:
regexp = mode + field * 4 + date + name
m = re.search(regexp, filestr)
specs = {
'mode': m.group(1),
'perm': m.group(2),
'uid': m.group(3),
'gid': m.group(4),
'size': m.group(5),
'date': m.group(6),
'name': m.group(7)
}
except:
logger.error("parse error on %s" % filestr)
try:
specs['name'] = '/' + os.path.relpath("%s/%s" % (dirname, specs['name']), '/')
if 'link' in specs.keys():
specs['link'] = '/' + os.path.relpath("%s/%s" % (dirname, specs['link']), '/')
except:
logger.warning("no name from %s" % filestr)
yield specs
|
Parser for Android's ls -lR output.
Takes a string, returns parsed structure.
|
train
|
https://github.com/mozilla-b2g/fxos-certsuite/blob/152a76c7c4c9d908524cf6e6fc25a498058f363d/mcts/securitysuite/filesystem.py#L23-L119
| null |
# -*- encoding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import re
import os
import sys
import subprocess
import mozdevice
# getter for shared logger instance
from mozlog.structured import get_default_logger
from mcts.utils.device.devicehelper import DeviceHelper
from mozdevice.adb import ADBError
# ######################################################################################################################
# shared module functions
#########################
#######################################################################################################################
# Test implementations
################################
# derived from shared test class
from suite import ExtraTest
#######################################################################################################################
# filesystem.wordwritable_info
class worldwritable_info(ExtraTest):
"""
Test that checks gonk file system for world-writable files.
"""
group = "filesystem"
module = sys.modules[__name__]
whitelist = {
'ok': [
'^/proc/.*$', # whitelisting for now
'^/sys/.*$', # whitelisting for now
'^/system/.*$', # /system/ is supposed to be read-only
'^/dev/null$',
'^/dev/zero$',
'^/dev/full$',
'^/dev/urandom$',
'^/dev/random$',
'^/dev/ashmem$',
'^/dev/ptmx$',
'^/dev/console$',
'^/dev/tty$',
'^/proc/.*/net/xt_qtaguid/ctrl$'
],
'unchecked': [
'^/dev/genlock$',
'^/dev/binder$',
'^/dev/kgsl-3d0$',
'^/dev/socket/keystore$',
'^/dev/socket/property_service$',
'^/dev/log/.*$',
'^/acct/uid/.*/cgroup.event_control$'
]
}
@classmethod
def whitelist_check(cls, name, flag='ok', whitelist=None):
if whitelist is None:
whitelist = cls.whitelist
r = re.compile('|'.join(whitelist[flag]))
return r.match(name) is not None
@classmethod
def run(cls, version=None):
logger = get_default_logger()
try:
device = DeviceHelper.getDevice()
except ADBError as e:
logger.error("Error connecting to device via adb (error: %s). Please be " \
"sure device is connected and 'remote debugging' is enabled." % \
e.msg)
raise
try:
out = device.shell_output('ls -alR /', root=True)
except ADBError as e:
cls.log_status('FAIL', 'Failed to gather filesystem information from device via adb: %s' % e.msg)
return False
found = []
for f in parse_ls(out):
if f['perm'][7] == 'w' and f['mode'] != 'l':
if not cls.whitelist_check(f['name']):
found.append(f['name'])
if len(found) > 0:
cls.log_status('PASS',
'Please ensure that the following world-writable files will not pose a security risk:\n%s' % '\n'.join(
found))
else:
cls.log_status('PASS', 'No unexpected suidroot executables found.')
return True
#######################################################################################################################
# filesystem.suidroot_info
class suidroot_info(ExtraTest):
"""
Test that checks gonk file system for suid root binaries.
"""
group = "filesystem"
module = sys.modules[__name__]
whitelist = {
'ok': [
'^/system/bin/run-as$'
],
'unchecked': [
'^/system/bin/diag_mdlog$'
]
}
@classmethod
def whitelist_check(cls, name, flag='ok', whitelist=None):
if whitelist is None:
whitelist = cls.whitelist
r = re.compile('|'.join(whitelist[flag]))
return r.match(name) is not None
@classmethod
def run(cls, version=None):
logger = get_default_logger()
try:
device = DeviceHelper.getDevice()
except ADBError as e:
logger.error("Error connecting to device via adb (error: %s). Please be " \
"sure device is connected and 'remote debugging' is enabled." % \
e.msg)
raise
try:
out = device.shell_output('ls -alR /', root=True)
except ADBError as e:
cls.log_status('FAIL', 'Failed to gather filesystem information from device via adb: %s' % e.msg)
return False
found = []
for f in parse_ls(out):
if f['perm'][2] == 's' and f['uid'] == 'root':
if not cls.whitelist_check(f['name']):
found.append(f['name'])
if len(found) > 0:
cls.log_status('PASS',
'Please ensure that the following suid root files are no security risk:\n%s' % '\n'.join(
found))
else:
cls.log_status('PASS', 'No unexpected suidroot executables found.')
return True
|
mozilla-b2g/fxos-certsuite
|
mcts/securitysuite/suite.py
|
securitycli
|
python
|
def securitycli():
parser = argparse.ArgumentParser(description="Runner for security test suite")
parser.add_argument("-l", "--list-test-groups", action="store_true",
help="List all logical test groups")
parser.add_argument("-a", "--list-all-tests", action="store_true",
help="List all tests")
parser.add_argument("-i", "--include", metavar="GROUP", action="append", default=[],
help="Only include specified group(s) in run, include several "
"groups by repeating flag")
parser.add_argument("--version", action="store", dest="version",
help="B2G version")
parser.add_argument("--ipython", dest="ipython", action="store_true",
help="drop to ipython session")
parser.add_argument('-H', '--host',
help='Hostname or ip for target device',
action='store', default='localhost')
parser.add_argument('-P', '--port',
help='Port for target device',
action='store', default=2828)
parser.add_argument('-m', '--mode',
help='Test mode (stingray, phone) default (phone)',
action='store', default='phone')
parser.add_argument("-v", dest="verbose", action="store_true",
help="Verbose output")
# add specialized mozilla logger options
commandline.add_logging_group(parser)
args = parser.parse_args()
# set up mozilla logger
logger = commandline.setup_logging("securitysuite", vars(args), {"raw": sys.stdout})
try:
if args.list_test_groups:
for group in ExtraTest.group_list(args.mode):
print group
elif args.list_all_tests:
for test in ExtraTest.test_list(args.mode):
print "%s.%s" % (test.group, test.__name__)
elif args.ipython:
from IPython import embed
embed()
elif args.mode == 'stingray':
logger.debug("security cli runnng with args %s" % args)
ExtraTest.run_groups(args.include,
version=args.version,
host=args.host, port=int(args.port),
mode=args.mode)
else:
logger.debug("security cli runnng with args %s" % args)
wait_for_adb_device()
if not adb_has_root():
logger.warning("adb has no root. Results will be incomplete.")
ExtraTest.run_groups(args.include, version=args.version)
except:
logger.critical(traceback.format_exc())
raise
|
Entry point for the runner defined in setup.py.
|
train
|
https://github.com/mozilla-b2g/fxos-certsuite/blob/152a76c7c4c9d908524cf6e6fc25a498058f363d/mcts/securitysuite/suite.py#L140-L202
|
[
"def wait_for_adb_device():\n try:\n device = DeviceHelper.getDevice()\n except ADBError:\n device = None\n print \"Waiting for adb connection...\"\n while device is None:\n try:\n device = DeviceHelper.getDevice()\n except ADBError:\n sleep(0.2)\n if len(device.devices()) < 1:\n print \"Waiting for adb device...\"\n while len(device.devices()) < 1:\n sleep(0.2)\n",
"def adb_has_root():\n # normally this should check via root=True to .shell_output, but doesn't work\n device = DeviceHelper.getDevice()\n return device.shell_output(\"id\").startswith(\"uid=0(root)\")\n",
"def group_list(mode='phone'):\n \"\"\"\n Returns a list of all groups in the test suite.\n \"\"\"\n if mode == 'stingray':\n return ['ssl']\n groups = []\n for t in ExtraTest.__subclasses__():\n if t.groupname() not in groups:\n groups.append(t.groupname())\n return groups\n",
"def test_list(group=None, mode='phone'):\n \"\"\"\n Returns a list of all tests, optionally filtered by group.\n \"\"\"\n if mode == 'stingray' and group is not None:\n group = 'ssl'\n if group is None:\n return ExtraTest.__subclasses__()\n else:\n tests = []\n for t in ExtraTest.__subclasses__():\n if t.groupname() == group:\n tests.append(t)\n return tests\n",
"def run_groups(groups=[], version=None, host='localhost', port=2828, mode='phone'):\n hasadb = mode == 'phone'\n logger = get_default_logger()\n if groups is None or len(groups) == 0: # run all groups\n logger.debug('running securitysuite tests for all groups %s' % str(ExtraTest.group_list(mode=mode)))\n groups = ExtraTest.group_list(mode=mode)\n else:\n logger.debug('running securitysuite tests for groups %s' % str(groups))\n logger.suite_start(tests=groups)\n\n # setup marionette before any test\n marionette = DeviceHelper.getMarionette(host=host, port=port)\n # setup device before any test\n device = DeviceHelper.getDevice()\n\n for g in groups:\n logger.debug(\"running securitysuite test group %s\" % g)\n logger.test_start(g)\n try:\n ExtraTest.run(g, version=version)\n logger.test_end(g, 'OK')\n except:\n logger.critical(traceback.format_exc())\n logger.test_end(g, 'FAIL')\n raise\n logger.suite_end()\n"
] |
# -*- encoding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import logging
import argparse
import traceback
from mozdevice import DeviceManagerADB, DMError, ADBError
from mozlog.structured import commandline, get_default_logger
from time import sleep
from mcts.utils.device.devicehelper import DeviceHelper
from mozdevice.adb import ADBError
# ######################################################################################################################
# Test class that all test must be derived from
###############################################
class ExtraTest(object):
"""
Parent class for all tests in this suite.
Every child must set its .group string and implement
its .run() method.
"""
@classmethod
def groupname(cls):
"""
Getter that returns a test's group name.
"""
if cls.group:
return cls.group
else:
return 'unknown'
@staticmethod
def group_list(mode='phone'):
"""
Returns a list of all groups in the test suite.
"""
if mode == 'stingray':
return ['ssl']
groups = []
for t in ExtraTest.__subclasses__():
if t.groupname() not in groups:
groups.append(t.groupname())
return groups
@staticmethod
def test_list(group=None, mode='phone'):
"""
Returns a list of all tests, optionally filtered by group.
"""
if mode == 'stingray' and group is not None:
group = 'ssl'
if group is None:
return ExtraTest.__subclasses__()
else:
tests = []
for t in ExtraTest.__subclasses__():
if t.groupname() == group:
tests.append(t)
return tests
@staticmethod
def run_groups(groups=[], version=None, host='localhost', port=2828, mode='phone'):
hasadb = mode == 'phone'
logger = get_default_logger()
if groups is None or len(groups) == 0: # run all groups
logger.debug('running securitysuite tests for all groups %s' % str(ExtraTest.group_list(mode=mode)))
groups = ExtraTest.group_list(mode=mode)
else:
logger.debug('running securitysuite tests for groups %s' % str(groups))
logger.suite_start(tests=groups)
# setup marionette before any test
marionette = DeviceHelper.getMarionette(host=host, port=port)
# setup device before any test
device = DeviceHelper.getDevice()
for g in groups:
logger.debug("running securitysuite test group %s" % g)
logger.test_start(g)
try:
ExtraTest.run(g, version=version)
logger.test_end(g, 'OK')
except:
logger.critical(traceback.format_exc())
logger.test_end(g, 'FAIL')
raise
logger.suite_end()
@classmethod
def run(cls, group=None, version=None):
"""
Runs all the tests, optionally just within the specified group.
"""
for t in cls.test_list(group):
t.run(version=version)
@classmethod
def log_status(cls, status, msg):
logger = get_default_logger()
logger.test_status(cls.groupname(), cls.__name__, status, message=msg)
#######################################################################################################################
# Shared module functionality
#############################
def wait_for_adb_device():
try:
device = DeviceHelper.getDevice()
except ADBError:
device = None
print "Waiting for adb connection..."
while device is None:
try:
device = DeviceHelper.getDevice()
except ADBError:
sleep(0.2)
if len(device.devices()) < 1:
print "Waiting for adb device..."
while len(device.devices()) < 1:
sleep(0.2)
def adb_has_root():
# normally this should check via root=True to .shell_output, but doesn't work
device = DeviceHelper.getDevice()
return device.shell_output("id").startswith("uid=0(root)")
#######################################################################################################################
# Command line handler
######################
if __name__ == "__main__":
securitycli()
|
mozilla-b2g/fxos-certsuite
|
mcts/utils/handlers/adb_b2g.py
|
ADBB2G.wait_for_device_ready
|
python
|
def wait_for_device_ready(self, timeout=None, wait_polling_interval=None, after_first=None):
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
self._logger.info("Waiting for device to become ready")
profiles = self.get_profiles()
assert len(profiles) == 1
profile_dir = profiles.itervalues().next()
prefs_file = posixpath.normpath(profile_dir + "/prefs.js")
current_date = int(self.shell_output('date +\"%s\"'))
set_date = current_date - (365 * 24 * 3600 + 24 * 3600 + 3600 + 60 + 1)
try:
self.shell_output("touch -t %i %s" % (set_date, prefs_file))
except adb.ADBError:
# See Bug 1092383, the format for the touch command
# has changed for flame-kk builds.
set_date = datetime.datetime.fromtimestamp(set_date)
self.shell_output("touch -t %s %s" %
(set_date.strftime('%Y%m%d.%H%M%S'),
prefs_file))
def prefs_modified():
times = [None, None]
def inner():
try:
listing = self.shell_output("ls -l %s" % (prefs_file))
mode, user, group, size, date, time, name = listing.split(None, 6)
mtime = "%s %s" % (date, time)
except:
return False
if times[0] is None:
times[0] = mtime
else:
times[1] = mtime
if times[1] != times[0]:
return True
return False
return inner
poll_wait(prefs_modified(), timeout=timeout,
polling_interval=wait_polling_interval, after_first=after_first)
|
Wait for the device to become ready for reliable interaction via adb.
NOTE: if the device is *already* ready this method will timeout.
:param timeout: Maximum time to wait for the device to become ready
:param wait_polling_interval: Interval at which to poll for device readiness.
:param after_first: A function to run after first polling for device
readiness. This allows use cases such as stopping b2g
setting the unready state, and then restarting b2g.
|
train
|
https://github.com/mozilla-b2g/fxos-certsuite/blob/152a76c7c4c9d908524cf6e6fc25a498058f363d/mcts/utils/handlers/adb_b2g.py#L131-L189
|
[
"def poll_wait(func, polling_interval=1.0, timeout=30, after_first=None):\n start_time = time.time()\n ran_first = False\n\n current_time = time.time()\n while current_time - start_time < timeout:\n value = func()\n if value:\n return value\n\n if not ran_first and after_first is not None:\n after_first()\n\n ran_first = True\n\n sleep = max(current_time + polling_interval - time.time(), 0)\n time.sleep(sleep)\n current_time = time.time()\n\n raise WaitTimeout()\n",
"def get_profiles(self, profile_base=\"/data/b2g/mozilla\", timeout=None):\n \"\"\"Return a list of paths to gecko profiles on the device,\n\n :param timeout: Timeout of each adb command run\n :param profile_base: Base directory containing the profiles.ini file\n \"\"\"\n\n rv = {}\n\n if timeout is None:\n timeout = self._timeout\n\n profile_path = posixpath.join(profile_base, \"profiles.ini\")\n try:\n proc = self.shell(\"cat %s\" % profile_path, timeout=timeout)\n config = ConfigParser.ConfigParser()\n config.readfp(proc.stdout_file)\n for section in config.sections():\n items = dict(config.items(section))\n if \"name\" in items and \"path\" in items:\n path = items[\"path\"]\n if \"isrelative\" in items and int(items[\"isrelative\"]):\n path = posixpath.normpath(\"%s/%s\" % (profile_base, path))\n rv[items[\"name\"]] = path\n finally:\n proc.stdout_file.close()\n proc.stderr_file.close()\n\n return rv\n",
"def prefs_modified():\n times = [None, None]\n\n def inner():\n try:\n listing = self.shell_output(\"ls -l %s\" % (prefs_file))\n mode, user, group, size, date, time, name = listing.split(None, 6)\n mtime = \"%s %s\" % (date, time)\n except:\n return False\n if times[0] is None:\n times[0] = mtime\n else:\n times[1] = mtime\n if times[1] != times[0]:\n return True\n\n return False\n\n return inner\n"
] |
class ADBB2G(adb.ADBDevice):
def __init__(self, *args, **kwargs):
if "wait_polling_interval" in kwargs:
self._wait_polling_interval = kwargs.pop("wait_polling_interval")
else:
self._wait_polling_interval = 1.0
adb.ADBDevice.__init__(self, *args, **kwargs)
def wait_for_net(self, timeout=None, wait_polling_interval=None):
"""Wait for the device to be assigned an IP address.
:param timeout: Maximum time to wait for an IP address to be defined
:param wait_polling_interval: Interval at which to poll for ip address.
"""
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
self._logger.info("Waiting for network connection")
poll_wait(self.get_ip_address, timeout=timeout)
def stop(self, timeout=None):
self._logger.info("Stopping b2g process")
if timeout is None:
timeout = self._timeout
self.shell_bool("stop b2g")
def b2g_stopped():
processes = set(item[1].split("/")[-1] for item in self.get_process_list())
return "b2g" not in processes
poll_wait(b2g_stopped, timeout=timeout)
def start(self, wait=True, timeout=None, wait_polling_interval=None):
"""Start b2g, waiting for the adb connection to become stable.
:param wait:
:param timeout: Maximum time to wait for restart.
:param wait_polling_interval: Interval at which to poll for device readiness.
"""
self._logger.info("Starting b2g process")
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
if wait:
self.wait_for_device_ready(timeout,
after_first=lambda:self.shell_bool("start b2g",
timeout=timeout))
else:
self.shell_bool("start b2g", timeout=timeout)
def restart(self, wait=True, timeout=None, wait_polling_interval=None):
"""Restart b2g, waiting for the adb connection to become stable.
:param timeout: Maximum time to wait for restart.
:param wait_polling_interval: Interval at which to poll for device readiness.
"""
self.stop(timeout=timeout)
self.start(wait, timeout=timeout, wait_polling_interval=wait_polling_interval)
def reboot(self, timeout=None, wait_polling_interval=None):
"""Reboot the device, waiting for the adb connection to become stable.
:param timeout: Maximum time to wait for reboot.
:param wait_polling_interval: Interval at which to poll for device readiness.
"""
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
self._logger.info("Rebooting device")
self.wait_for_device_ready(timeout,
after_first=lambda:self.command_output(["reboot"]))
def root(self, timeout=None, wait_polling_interval=None):
"""run adbd as root.
"""
self.command_output(["root"])
def get_profiles(self, profile_base="/data/b2g/mozilla", timeout=None):
"""Return a list of paths to gecko profiles on the device,
:param timeout: Timeout of each adb command run
:param profile_base: Base directory containing the profiles.ini file
"""
rv = {}
if timeout is None:
timeout = self._timeout
profile_path = posixpath.join(profile_base, "profiles.ini")
try:
proc = self.shell("cat %s" % profile_path, timeout=timeout)
config = ConfigParser.ConfigParser()
config.readfp(proc.stdout_file)
for section in config.sections():
items = dict(config.items(section))
if "name" in items and "path" in items:
path = items["path"]
if "isrelative" in items and int(items["isrelative"]):
path = posixpath.normpath("%s/%s" % (profile_base, path))
rv[items["name"]] = path
finally:
proc.stdout_file.close()
proc.stderr_file.close()
return rv
def devices(self, timeout=None):
"""Executes adb devices -l and returns a list of objects describing attached devices.
:param timeout: optional integer specifying the maximum time in
seconds for any spawned adb process to complete before
throwing an ADBTimeoutError. This timeout is per adb call. The
total time spent may exceed this value. If it is not
specified, the value set in the ADBHost constructor is used.
:returns: an object contain
:raises: * ADBTimeoutError
* ADBError
The output of adb devices -l ::
$ adb devices -l
List of devices attached
b313b945 device usb:1-7 product:d2vzw model:SCH_I535 device:d2vzw
is parsed and placed into an object as in
[{'device_serial': 'b313b945', 'state': 'device', 'product': 'd2vzw',
'usb': '1-7', 'device': 'd2vzw', 'model': 'SCH_I535' }]
"""
# b313b945 device usb:1-7 product:d2vzw model:SCH_I535 device:d2vzw
# from Android system/core/adb/transport.c statename()
re_device_info = re.compile(r'([^\s]+)\s+(offline|bootloader|device|host|recovery|sideload|no permissions|unauthorized|unknown)')
devices = []
lines = self.command_output(["devices", "-l"], timeout=timeout).split('\n')
for line in lines:
if line == 'List of devices attached ':
continue
match = re_device_info.match(line)
if match:
device = {
'device_serial': match.group(1),
'state': match.group(2)
}
remainder = line[match.end(2):].strip()
if remainder:
try:
device.update(dict([j.split(':')
for j in remainder.split(' ')]))
except ValueError:
self._logger.warning('devices: Unable to parse '
'remainder for device %s' % line)
devices.append(device)
return devices
|
mozilla-b2g/fxos-certsuite
|
mcts/utils/handlers/adb_b2g.py
|
ADBB2G.wait_for_net
|
python
|
def wait_for_net(self, timeout=None, wait_polling_interval=None):
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
self._logger.info("Waiting for network connection")
poll_wait(self.get_ip_address, timeout=timeout)
|
Wait for the device to be assigned an IP address.
:param timeout: Maximum time to wait for an IP address to be defined
:param wait_polling_interval: Interval at which to poll for ip address.
|
train
|
https://github.com/mozilla-b2g/fxos-certsuite/blob/152a76c7c4c9d908524cf6e6fc25a498058f363d/mcts/utils/handlers/adb_b2g.py#L191-L203
|
[
"def poll_wait(func, polling_interval=1.0, timeout=30, after_first=None):\n start_time = time.time()\n ran_first = False\n\n current_time = time.time()\n while current_time - start_time < timeout:\n value = func()\n if value:\n return value\n\n if not ran_first and after_first is not None:\n after_first()\n\n ran_first = True\n\n sleep = max(current_time + polling_interval - time.time(), 0)\n time.sleep(sleep)\n current_time = time.time()\n\n raise WaitTimeout()\n"
] |
class ADBB2G(adb.ADBDevice):
def __init__(self, *args, **kwargs):
if "wait_polling_interval" in kwargs:
self._wait_polling_interval = kwargs.pop("wait_polling_interval")
else:
self._wait_polling_interval = 1.0
adb.ADBDevice.__init__(self, *args, **kwargs)
def wait_for_device_ready(self, timeout=None, wait_polling_interval=None, after_first=None):
"""Wait for the device to become ready for reliable interaction via adb.
NOTE: if the device is *already* ready this method will timeout.
:param timeout: Maximum time to wait for the device to become ready
:param wait_polling_interval: Interval at which to poll for device readiness.
:param after_first: A function to run after first polling for device
readiness. This allows use cases such as stopping b2g
setting the unready state, and then restarting b2g.
"""
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
self._logger.info("Waiting for device to become ready")
profiles = self.get_profiles()
assert len(profiles) == 1
profile_dir = profiles.itervalues().next()
prefs_file = posixpath.normpath(profile_dir + "/prefs.js")
current_date = int(self.shell_output('date +\"%s\"'))
set_date = current_date - (365 * 24 * 3600 + 24 * 3600 + 3600 + 60 + 1)
try:
self.shell_output("touch -t %i %s" % (set_date, prefs_file))
except adb.ADBError:
# See Bug 1092383, the format for the touch command
# has changed for flame-kk builds.
set_date = datetime.datetime.fromtimestamp(set_date)
self.shell_output("touch -t %s %s" %
(set_date.strftime('%Y%m%d.%H%M%S'),
prefs_file))
def prefs_modified():
times = [None, None]
def inner():
try:
listing = self.shell_output("ls -l %s" % (prefs_file))
mode, user, group, size, date, time, name = listing.split(None, 6)
mtime = "%s %s" % (date, time)
except:
return False
if times[0] is None:
times[0] = mtime
else:
times[1] = mtime
if times[1] != times[0]:
return True
return False
return inner
poll_wait(prefs_modified(), timeout=timeout,
polling_interval=wait_polling_interval, after_first=after_first)
def stop(self, timeout=None):
self._logger.info("Stopping b2g process")
if timeout is None:
timeout = self._timeout
self.shell_bool("stop b2g")
def b2g_stopped():
processes = set(item[1].split("/")[-1] for item in self.get_process_list())
return "b2g" not in processes
poll_wait(b2g_stopped, timeout=timeout)
def start(self, wait=True, timeout=None, wait_polling_interval=None):
"""Start b2g, waiting for the adb connection to become stable.
:param wait:
:param timeout: Maximum time to wait for restart.
:param wait_polling_interval: Interval at which to poll for device readiness.
"""
self._logger.info("Starting b2g process")
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
if wait:
self.wait_for_device_ready(timeout,
after_first=lambda:self.shell_bool("start b2g",
timeout=timeout))
else:
self.shell_bool("start b2g", timeout=timeout)
def restart(self, wait=True, timeout=None, wait_polling_interval=None):
"""Restart b2g, waiting for the adb connection to become stable.
:param timeout: Maximum time to wait for restart.
:param wait_polling_interval: Interval at which to poll for device readiness.
"""
self.stop(timeout=timeout)
self.start(wait, timeout=timeout, wait_polling_interval=wait_polling_interval)
def reboot(self, timeout=None, wait_polling_interval=None):
"""Reboot the device, waiting for the adb connection to become stable.
:param timeout: Maximum time to wait for reboot.
:param wait_polling_interval: Interval at which to poll for device readiness.
"""
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
self._logger.info("Rebooting device")
self.wait_for_device_ready(timeout,
after_first=lambda:self.command_output(["reboot"]))
def root(self, timeout=None, wait_polling_interval=None):
"""run adbd as root.
"""
self.command_output(["root"])
def get_profiles(self, profile_base="/data/b2g/mozilla", timeout=None):
"""Return a list of paths to gecko profiles on the device,
:param timeout: Timeout of each adb command run
:param profile_base: Base directory containing the profiles.ini file
"""
rv = {}
if timeout is None:
timeout = self._timeout
profile_path = posixpath.join(profile_base, "profiles.ini")
try:
proc = self.shell("cat %s" % profile_path, timeout=timeout)
config = ConfigParser.ConfigParser()
config.readfp(proc.stdout_file)
for section in config.sections():
items = dict(config.items(section))
if "name" in items and "path" in items:
path = items["path"]
if "isrelative" in items and int(items["isrelative"]):
path = posixpath.normpath("%s/%s" % (profile_base, path))
rv[items["name"]] = path
finally:
proc.stdout_file.close()
proc.stderr_file.close()
return rv
def devices(self, timeout=None):
"""Executes adb devices -l and returns a list of objects describing attached devices.
:param timeout: optional integer specifying the maximum time in
seconds for any spawned adb process to complete before
throwing an ADBTimeoutError. This timeout is per adb call. The
total time spent may exceed this value. If it is not
specified, the value set in the ADBHost constructor is used.
:returns: an object contain
:raises: * ADBTimeoutError
* ADBError
The output of adb devices -l ::
$ adb devices -l
List of devices attached
b313b945 device usb:1-7 product:d2vzw model:SCH_I535 device:d2vzw
is parsed and placed into an object as in
[{'device_serial': 'b313b945', 'state': 'device', 'product': 'd2vzw',
'usb': '1-7', 'device': 'd2vzw', 'model': 'SCH_I535' }]
"""
# b313b945 device usb:1-7 product:d2vzw model:SCH_I535 device:d2vzw
# from Android system/core/adb/transport.c statename()
re_device_info = re.compile(r'([^\s]+)\s+(offline|bootloader|device|host|recovery|sideload|no permissions|unauthorized|unknown)')
devices = []
lines = self.command_output(["devices", "-l"], timeout=timeout).split('\n')
for line in lines:
if line == 'List of devices attached ':
continue
match = re_device_info.match(line)
if match:
device = {
'device_serial': match.group(1),
'state': match.group(2)
}
remainder = line[match.end(2):].strip()
if remainder:
try:
device.update(dict([j.split(':')
for j in remainder.split(' ')]))
except ValueError:
self._logger.warning('devices: Unable to parse '
'remainder for device %s' % line)
devices.append(device)
return devices
|
mozilla-b2g/fxos-certsuite
|
mcts/utils/handlers/adb_b2g.py
|
ADBB2G.start
|
python
|
def start(self, wait=True, timeout=None, wait_polling_interval=None):
self._logger.info("Starting b2g process")
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
if wait:
self.wait_for_device_ready(timeout,
after_first=lambda:self.shell_bool("start b2g",
timeout=timeout))
else:
self.shell_bool("start b2g", timeout=timeout)
|
Start b2g, waiting for the adb connection to become stable.
:param wait:
:param timeout: Maximum time to wait for restart.
:param wait_polling_interval: Interval at which to poll for device readiness.
|
train
|
https://github.com/mozilla-b2g/fxos-certsuite/blob/152a76c7c4c9d908524cf6e6fc25a498058f363d/mcts/utils/handlers/adb_b2g.py#L215-L235
|
[
"def wait_for_device_ready(self, timeout=None, wait_polling_interval=None, after_first=None):\n \"\"\"Wait for the device to become ready for reliable interaction via adb.\n NOTE: if the device is *already* ready this method will timeout.\n\n :param timeout: Maximum time to wait for the device to become ready\n :param wait_polling_interval: Interval at which to poll for device readiness.\n :param after_first: A function to run after first polling for device\n readiness. This allows use cases such as stopping b2g\n setting the unready state, and then restarting b2g.\n \"\"\"\n\n if timeout is None:\n timeout = self._timeout\n if wait_polling_interval is None:\n wait_polling_interval = self._wait_polling_interval\n\n self._logger.info(\"Waiting for device to become ready\")\n profiles = self.get_profiles()\n assert len(profiles) == 1\n\n profile_dir = profiles.itervalues().next()\n prefs_file = posixpath.normpath(profile_dir + \"/prefs.js\")\n\n current_date = int(self.shell_output('date +\\\"%s\\\"'))\n set_date = current_date - (365 * 24 * 3600 + 24 * 3600 + 3600 + 60 + 1)\n\n try:\n self.shell_output(\"touch -t %i %s\" % (set_date, prefs_file))\n except adb.ADBError:\n # See Bug 1092383, the format for the touch command\n # has changed for flame-kk builds.\n set_date = datetime.datetime.fromtimestamp(set_date)\n self.shell_output(\"touch -t %s %s\" %\n (set_date.strftime('%Y%m%d.%H%M%S'),\n prefs_file))\n\n def prefs_modified():\n times = [None, None]\n\n def inner():\n try:\n listing = self.shell_output(\"ls -l %s\" % (prefs_file))\n mode, user, group, size, date, time, name = listing.split(None, 6)\n mtime = \"%s %s\" % (date, time)\n except:\n return False\n if times[0] is None:\n times[0] = mtime\n else:\n times[1] = mtime\n if times[1] != times[0]:\n return True\n\n return False\n\n return inner\n\n poll_wait(prefs_modified(), timeout=timeout,\n polling_interval=wait_polling_interval, after_first=after_first)\n"
] |
class ADBB2G(adb.ADBDevice):
def __init__(self, *args, **kwargs):
if "wait_polling_interval" in kwargs:
self._wait_polling_interval = kwargs.pop("wait_polling_interval")
else:
self._wait_polling_interval = 1.0
adb.ADBDevice.__init__(self, *args, **kwargs)
def wait_for_device_ready(self, timeout=None, wait_polling_interval=None, after_first=None):
"""Wait for the device to become ready for reliable interaction via adb.
NOTE: if the device is *already* ready this method will timeout.
:param timeout: Maximum time to wait for the device to become ready
:param wait_polling_interval: Interval at which to poll for device readiness.
:param after_first: A function to run after first polling for device
readiness. This allows use cases such as stopping b2g
setting the unready state, and then restarting b2g.
"""
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
self._logger.info("Waiting for device to become ready")
profiles = self.get_profiles()
assert len(profiles) == 1
profile_dir = profiles.itervalues().next()
prefs_file = posixpath.normpath(profile_dir + "/prefs.js")
current_date = int(self.shell_output('date +\"%s\"'))
set_date = current_date - (365 * 24 * 3600 + 24 * 3600 + 3600 + 60 + 1)
try:
self.shell_output("touch -t %i %s" % (set_date, prefs_file))
except adb.ADBError:
# See Bug 1092383, the format for the touch command
# has changed for flame-kk builds.
set_date = datetime.datetime.fromtimestamp(set_date)
self.shell_output("touch -t %s %s" %
(set_date.strftime('%Y%m%d.%H%M%S'),
prefs_file))
def prefs_modified():
times = [None, None]
def inner():
try:
listing = self.shell_output("ls -l %s" % (prefs_file))
mode, user, group, size, date, time, name = listing.split(None, 6)
mtime = "%s %s" % (date, time)
except:
return False
if times[0] is None:
times[0] = mtime
else:
times[1] = mtime
if times[1] != times[0]:
return True
return False
return inner
poll_wait(prefs_modified(), timeout=timeout,
polling_interval=wait_polling_interval, after_first=after_first)
def wait_for_net(self, timeout=None, wait_polling_interval=None):
"""Wait for the device to be assigned an IP address.
:param timeout: Maximum time to wait for an IP address to be defined
:param wait_polling_interval: Interval at which to poll for ip address.
"""
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
self._logger.info("Waiting for network connection")
poll_wait(self.get_ip_address, timeout=timeout)
def stop(self, timeout=None):
self._logger.info("Stopping b2g process")
if timeout is None:
timeout = self._timeout
self.shell_bool("stop b2g")
def b2g_stopped():
processes = set(item[1].split("/")[-1] for item in self.get_process_list())
return "b2g" not in processes
poll_wait(b2g_stopped, timeout=timeout)
def restart(self, wait=True, timeout=None, wait_polling_interval=None):
"""Restart b2g, waiting for the adb connection to become stable.
:param timeout: Maximum time to wait for restart.
:param wait_polling_interval: Interval at which to poll for device readiness.
"""
self.stop(timeout=timeout)
self.start(wait, timeout=timeout, wait_polling_interval=wait_polling_interval)
def reboot(self, timeout=None, wait_polling_interval=None):
"""Reboot the device, waiting for the adb connection to become stable.
:param timeout: Maximum time to wait for reboot.
:param wait_polling_interval: Interval at which to poll for device readiness.
"""
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
self._logger.info("Rebooting device")
self.wait_for_device_ready(timeout,
after_first=lambda:self.command_output(["reboot"]))
def root(self, timeout=None, wait_polling_interval=None):
"""run adbd as root.
"""
self.command_output(["root"])
def get_profiles(self, profile_base="/data/b2g/mozilla", timeout=None):
"""Return a list of paths to gecko profiles on the device,
:param timeout: Timeout of each adb command run
:param profile_base: Base directory containing the profiles.ini file
"""
rv = {}
if timeout is None:
timeout = self._timeout
profile_path = posixpath.join(profile_base, "profiles.ini")
try:
proc = self.shell("cat %s" % profile_path, timeout=timeout)
config = ConfigParser.ConfigParser()
config.readfp(proc.stdout_file)
for section in config.sections():
items = dict(config.items(section))
if "name" in items and "path" in items:
path = items["path"]
if "isrelative" in items and int(items["isrelative"]):
path = posixpath.normpath("%s/%s" % (profile_base, path))
rv[items["name"]] = path
finally:
proc.stdout_file.close()
proc.stderr_file.close()
return rv
def devices(self, timeout=None):
"""Executes adb devices -l and returns a list of objects describing attached devices.
:param timeout: optional integer specifying the maximum time in
seconds for any spawned adb process to complete before
throwing an ADBTimeoutError. This timeout is per adb call. The
total time spent may exceed this value. If it is not
specified, the value set in the ADBHost constructor is used.
:returns: an object contain
:raises: * ADBTimeoutError
* ADBError
The output of adb devices -l ::
$ adb devices -l
List of devices attached
b313b945 device usb:1-7 product:d2vzw model:SCH_I535 device:d2vzw
is parsed and placed into an object as in
[{'device_serial': 'b313b945', 'state': 'device', 'product': 'd2vzw',
'usb': '1-7', 'device': 'd2vzw', 'model': 'SCH_I535' }]
"""
# b313b945 device usb:1-7 product:d2vzw model:SCH_I535 device:d2vzw
# from Android system/core/adb/transport.c statename()
re_device_info = re.compile(r'([^\s]+)\s+(offline|bootloader|device|host|recovery|sideload|no permissions|unauthorized|unknown)')
devices = []
lines = self.command_output(["devices", "-l"], timeout=timeout).split('\n')
for line in lines:
if line == 'List of devices attached ':
continue
match = re_device_info.match(line)
if match:
device = {
'device_serial': match.group(1),
'state': match.group(2)
}
remainder = line[match.end(2):].strip()
if remainder:
try:
device.update(dict([j.split(':')
for j in remainder.split(' ')]))
except ValueError:
self._logger.warning('devices: Unable to parse '
'remainder for device %s' % line)
devices.append(device)
return devices
|
mozilla-b2g/fxos-certsuite
|
mcts/utils/handlers/adb_b2g.py
|
ADBB2G.restart
|
python
|
def restart(self, wait=True, timeout=None, wait_polling_interval=None):
self.stop(timeout=timeout)
self.start(wait, timeout=timeout, wait_polling_interval=wait_polling_interval)
|
Restart b2g, waiting for the adb connection to become stable.
:param timeout: Maximum time to wait for restart.
:param wait_polling_interval: Interval at which to poll for device readiness.
|
train
|
https://github.com/mozilla-b2g/fxos-certsuite/blob/152a76c7c4c9d908524cf6e6fc25a498058f363d/mcts/utils/handlers/adb_b2g.py#L237-L244
|
[
"def stop(self, timeout=None):\n self._logger.info(\"Stopping b2g process\")\n if timeout is None:\n timeout = self._timeout\n self.shell_bool(\"stop b2g\")\n def b2g_stopped():\n processes = set(item[1].split(\"/\")[-1] for item in self.get_process_list())\n return \"b2g\" not in processes\n poll_wait(b2g_stopped, timeout=timeout)\n",
"def start(self, wait=True, timeout=None, wait_polling_interval=None):\n \"\"\"Start b2g, waiting for the adb connection to become stable.\n\n :param wait:\n :param timeout: Maximum time to wait for restart.\n :param wait_polling_interval: Interval at which to poll for device readiness.\n \"\"\"\n self._logger.info(\"Starting b2g process\")\n\n if timeout is None:\n timeout = self._timeout\n\n if wait_polling_interval is None:\n wait_polling_interval = self._wait_polling_interval\n\n if wait:\n self.wait_for_device_ready(timeout,\n after_first=lambda:self.shell_bool(\"start b2g\",\n timeout=timeout))\n else:\n self.shell_bool(\"start b2g\", timeout=timeout)\n"
] |
class ADBB2G(adb.ADBDevice):
def __init__(self, *args, **kwargs):
if "wait_polling_interval" in kwargs:
self._wait_polling_interval = kwargs.pop("wait_polling_interval")
else:
self._wait_polling_interval = 1.0
adb.ADBDevice.__init__(self, *args, **kwargs)
def wait_for_device_ready(self, timeout=None, wait_polling_interval=None, after_first=None):
"""Wait for the device to become ready for reliable interaction via adb.
NOTE: if the device is *already* ready this method will timeout.
:param timeout: Maximum time to wait for the device to become ready
:param wait_polling_interval: Interval at which to poll for device readiness.
:param after_first: A function to run after first polling for device
readiness. This allows use cases such as stopping b2g
setting the unready state, and then restarting b2g.
"""
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
self._logger.info("Waiting for device to become ready")
profiles = self.get_profiles()
assert len(profiles) == 1
profile_dir = profiles.itervalues().next()
prefs_file = posixpath.normpath(profile_dir + "/prefs.js")
current_date = int(self.shell_output('date +\"%s\"'))
set_date = current_date - (365 * 24 * 3600 + 24 * 3600 + 3600 + 60 + 1)
try:
self.shell_output("touch -t %i %s" % (set_date, prefs_file))
except adb.ADBError:
# See Bug 1092383, the format for the touch command
# has changed for flame-kk builds.
set_date = datetime.datetime.fromtimestamp(set_date)
self.shell_output("touch -t %s %s" %
(set_date.strftime('%Y%m%d.%H%M%S'),
prefs_file))
def prefs_modified():
times = [None, None]
def inner():
try:
listing = self.shell_output("ls -l %s" % (prefs_file))
mode, user, group, size, date, time, name = listing.split(None, 6)
mtime = "%s %s" % (date, time)
except:
return False
if times[0] is None:
times[0] = mtime
else:
times[1] = mtime
if times[1] != times[0]:
return True
return False
return inner
poll_wait(prefs_modified(), timeout=timeout,
polling_interval=wait_polling_interval, after_first=after_first)
def wait_for_net(self, timeout=None, wait_polling_interval=None):
"""Wait for the device to be assigned an IP address.
:param timeout: Maximum time to wait for an IP address to be defined
:param wait_polling_interval: Interval at which to poll for ip address.
"""
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
self._logger.info("Waiting for network connection")
poll_wait(self.get_ip_address, timeout=timeout)
def stop(self, timeout=None):
self._logger.info("Stopping b2g process")
if timeout is None:
timeout = self._timeout
self.shell_bool("stop b2g")
def b2g_stopped():
processes = set(item[1].split("/")[-1] for item in self.get_process_list())
return "b2g" not in processes
poll_wait(b2g_stopped, timeout=timeout)
def start(self, wait=True, timeout=None, wait_polling_interval=None):
"""Start b2g, waiting for the adb connection to become stable.
:param wait:
:param timeout: Maximum time to wait for restart.
:param wait_polling_interval: Interval at which to poll for device readiness.
"""
self._logger.info("Starting b2g process")
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
if wait:
self.wait_for_device_ready(timeout,
after_first=lambda:self.shell_bool("start b2g",
timeout=timeout))
else:
self.shell_bool("start b2g", timeout=timeout)
def reboot(self, timeout=None, wait_polling_interval=None):
"""Reboot the device, waiting for the adb connection to become stable.
:param timeout: Maximum time to wait for reboot.
:param wait_polling_interval: Interval at which to poll for device readiness.
"""
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
self._logger.info("Rebooting device")
self.wait_for_device_ready(timeout,
after_first=lambda:self.command_output(["reboot"]))
def root(self, timeout=None, wait_polling_interval=None):
"""run adbd as root.
"""
self.command_output(["root"])
def get_profiles(self, profile_base="/data/b2g/mozilla", timeout=None):
"""Return a list of paths to gecko profiles on the device,
:param timeout: Timeout of each adb command run
:param profile_base: Base directory containing the profiles.ini file
"""
rv = {}
if timeout is None:
timeout = self._timeout
profile_path = posixpath.join(profile_base, "profiles.ini")
try:
proc = self.shell("cat %s" % profile_path, timeout=timeout)
config = ConfigParser.ConfigParser()
config.readfp(proc.stdout_file)
for section in config.sections():
items = dict(config.items(section))
if "name" in items and "path" in items:
path = items["path"]
if "isrelative" in items and int(items["isrelative"]):
path = posixpath.normpath("%s/%s" % (profile_base, path))
rv[items["name"]] = path
finally:
proc.stdout_file.close()
proc.stderr_file.close()
return rv
def devices(self, timeout=None):
"""Executes adb devices -l and returns a list of objects describing attached devices.
:param timeout: optional integer specifying the maximum time in
seconds for any spawned adb process to complete before
throwing an ADBTimeoutError. This timeout is per adb call. The
total time spent may exceed this value. If it is not
specified, the value set in the ADBHost constructor is used.
:returns: an object contain
:raises: * ADBTimeoutError
* ADBError
The output of adb devices -l ::
$ adb devices -l
List of devices attached
b313b945 device usb:1-7 product:d2vzw model:SCH_I535 device:d2vzw
is parsed and placed into an object as in
[{'device_serial': 'b313b945', 'state': 'device', 'product': 'd2vzw',
'usb': '1-7', 'device': 'd2vzw', 'model': 'SCH_I535' }]
"""
# b313b945 device usb:1-7 product:d2vzw model:SCH_I535 device:d2vzw
# from Android system/core/adb/transport.c statename()
re_device_info = re.compile(r'([^\s]+)\s+(offline|bootloader|device|host|recovery|sideload|no permissions|unauthorized|unknown)')
devices = []
lines = self.command_output(["devices", "-l"], timeout=timeout).split('\n')
for line in lines:
if line == 'List of devices attached ':
continue
match = re_device_info.match(line)
if match:
device = {
'device_serial': match.group(1),
'state': match.group(2)
}
remainder = line[match.end(2):].strip()
if remainder:
try:
device.update(dict([j.split(':')
for j in remainder.split(' ')]))
except ValueError:
self._logger.warning('devices: Unable to parse '
'remainder for device %s' % line)
devices.append(device)
return devices
|
mozilla-b2g/fxos-certsuite
|
mcts/utils/handlers/adb_b2g.py
|
ADBB2G.reboot
|
python
|
def reboot(self, timeout=None, wait_polling_interval=None):
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
self._logger.info("Rebooting device")
self.wait_for_device_ready(timeout,
after_first=lambda:self.command_output(["reboot"]))
|
Reboot the device, waiting for the adb connection to become stable.
:param timeout: Maximum time to wait for reboot.
:param wait_polling_interval: Interval at which to poll for device readiness.
|
train
|
https://github.com/mozilla-b2g/fxos-certsuite/blob/152a76c7c4c9d908524cf6e6fc25a498058f363d/mcts/utils/handlers/adb_b2g.py#L246-L259
|
[
"def wait_for_device_ready(self, timeout=None, wait_polling_interval=None, after_first=None):\n \"\"\"Wait for the device to become ready for reliable interaction via adb.\n NOTE: if the device is *already* ready this method will timeout.\n\n :param timeout: Maximum time to wait for the device to become ready\n :param wait_polling_interval: Interval at which to poll for device readiness.\n :param after_first: A function to run after first polling for device\n readiness. This allows use cases such as stopping b2g\n setting the unready state, and then restarting b2g.\n \"\"\"\n\n if timeout is None:\n timeout = self._timeout\n if wait_polling_interval is None:\n wait_polling_interval = self._wait_polling_interval\n\n self._logger.info(\"Waiting for device to become ready\")\n profiles = self.get_profiles()\n assert len(profiles) == 1\n\n profile_dir = profiles.itervalues().next()\n prefs_file = posixpath.normpath(profile_dir + \"/prefs.js\")\n\n current_date = int(self.shell_output('date +\\\"%s\\\"'))\n set_date = current_date - (365 * 24 * 3600 + 24 * 3600 + 3600 + 60 + 1)\n\n try:\n self.shell_output(\"touch -t %i %s\" % (set_date, prefs_file))\n except adb.ADBError:\n # See Bug 1092383, the format for the touch command\n # has changed for flame-kk builds.\n set_date = datetime.datetime.fromtimestamp(set_date)\n self.shell_output(\"touch -t %s %s\" %\n (set_date.strftime('%Y%m%d.%H%M%S'),\n prefs_file))\n\n def prefs_modified():\n times = [None, None]\n\n def inner():\n try:\n listing = self.shell_output(\"ls -l %s\" % (prefs_file))\n mode, user, group, size, date, time, name = listing.split(None, 6)\n mtime = \"%s %s\" % (date, time)\n except:\n return False\n if times[0] is None:\n times[0] = mtime\n else:\n times[1] = mtime\n if times[1] != times[0]:\n return True\n\n return False\n\n return inner\n\n poll_wait(prefs_modified(), timeout=timeout,\n polling_interval=wait_polling_interval, after_first=after_first)\n"
] |
class ADBB2G(adb.ADBDevice):
def __init__(self, *args, **kwargs):
if "wait_polling_interval" in kwargs:
self._wait_polling_interval = kwargs.pop("wait_polling_interval")
else:
self._wait_polling_interval = 1.0
adb.ADBDevice.__init__(self, *args, **kwargs)
def wait_for_device_ready(self, timeout=None, wait_polling_interval=None, after_first=None):
"""Wait for the device to become ready for reliable interaction via adb.
NOTE: if the device is *already* ready this method will timeout.
:param timeout: Maximum time to wait for the device to become ready
:param wait_polling_interval: Interval at which to poll for device readiness.
:param after_first: A function to run after first polling for device
readiness. This allows use cases such as stopping b2g
setting the unready state, and then restarting b2g.
"""
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
self._logger.info("Waiting for device to become ready")
profiles = self.get_profiles()
assert len(profiles) == 1
profile_dir = profiles.itervalues().next()
prefs_file = posixpath.normpath(profile_dir + "/prefs.js")
current_date = int(self.shell_output('date +\"%s\"'))
set_date = current_date - (365 * 24 * 3600 + 24 * 3600 + 3600 + 60 + 1)
try:
self.shell_output("touch -t %i %s" % (set_date, prefs_file))
except adb.ADBError:
# See Bug 1092383, the format for the touch command
# has changed for flame-kk builds.
set_date = datetime.datetime.fromtimestamp(set_date)
self.shell_output("touch -t %s %s" %
(set_date.strftime('%Y%m%d.%H%M%S'),
prefs_file))
def prefs_modified():
times = [None, None]
def inner():
try:
listing = self.shell_output("ls -l %s" % (prefs_file))
mode, user, group, size, date, time, name = listing.split(None, 6)
mtime = "%s %s" % (date, time)
except:
return False
if times[0] is None:
times[0] = mtime
else:
times[1] = mtime
if times[1] != times[0]:
return True
return False
return inner
poll_wait(prefs_modified(), timeout=timeout,
polling_interval=wait_polling_interval, after_first=after_first)
def wait_for_net(self, timeout=None, wait_polling_interval=None):
"""Wait for the device to be assigned an IP address.
:param timeout: Maximum time to wait for an IP address to be defined
:param wait_polling_interval: Interval at which to poll for ip address.
"""
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
self._logger.info("Waiting for network connection")
poll_wait(self.get_ip_address, timeout=timeout)
def stop(self, timeout=None):
self._logger.info("Stopping b2g process")
if timeout is None:
timeout = self._timeout
self.shell_bool("stop b2g")
def b2g_stopped():
processes = set(item[1].split("/")[-1] for item in self.get_process_list())
return "b2g" not in processes
poll_wait(b2g_stopped, timeout=timeout)
def start(self, wait=True, timeout=None, wait_polling_interval=None):
"""Start b2g, waiting for the adb connection to become stable.
:param wait:
:param timeout: Maximum time to wait for restart.
:param wait_polling_interval: Interval at which to poll for device readiness.
"""
self._logger.info("Starting b2g process")
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
if wait:
self.wait_for_device_ready(timeout,
after_first=lambda:self.shell_bool("start b2g",
timeout=timeout))
else:
self.shell_bool("start b2g", timeout=timeout)
def restart(self, wait=True, timeout=None, wait_polling_interval=None):
"""Restart b2g, waiting for the adb connection to become stable.
:param timeout: Maximum time to wait for restart.
:param wait_polling_interval: Interval at which to poll for device readiness.
"""
self.stop(timeout=timeout)
self.start(wait, timeout=timeout, wait_polling_interval=wait_polling_interval)
def root(self, timeout=None, wait_polling_interval=None):
"""run adbd as root.
"""
self.command_output(["root"])
def get_profiles(self, profile_base="/data/b2g/mozilla", timeout=None):
"""Return a list of paths to gecko profiles on the device,
:param timeout: Timeout of each adb command run
:param profile_base: Base directory containing the profiles.ini file
"""
rv = {}
if timeout is None:
timeout = self._timeout
profile_path = posixpath.join(profile_base, "profiles.ini")
try:
proc = self.shell("cat %s" % profile_path, timeout=timeout)
config = ConfigParser.ConfigParser()
config.readfp(proc.stdout_file)
for section in config.sections():
items = dict(config.items(section))
if "name" in items and "path" in items:
path = items["path"]
if "isrelative" in items and int(items["isrelative"]):
path = posixpath.normpath("%s/%s" % (profile_base, path))
rv[items["name"]] = path
finally:
proc.stdout_file.close()
proc.stderr_file.close()
return rv
def devices(self, timeout=None):
"""Executes adb devices -l and returns a list of objects describing attached devices.
:param timeout: optional integer specifying the maximum time in
seconds for any spawned adb process to complete before
throwing an ADBTimeoutError. This timeout is per adb call. The
total time spent may exceed this value. If it is not
specified, the value set in the ADBHost constructor is used.
:returns: an object contain
:raises: * ADBTimeoutError
* ADBError
The output of adb devices -l ::
$ adb devices -l
List of devices attached
b313b945 device usb:1-7 product:d2vzw model:SCH_I535 device:d2vzw
is parsed and placed into an object as in
[{'device_serial': 'b313b945', 'state': 'device', 'product': 'd2vzw',
'usb': '1-7', 'device': 'd2vzw', 'model': 'SCH_I535' }]
"""
# b313b945 device usb:1-7 product:d2vzw model:SCH_I535 device:d2vzw
# from Android system/core/adb/transport.c statename()
re_device_info = re.compile(r'([^\s]+)\s+(offline|bootloader|device|host|recovery|sideload|no permissions|unauthorized|unknown)')
devices = []
lines = self.command_output(["devices", "-l"], timeout=timeout).split('\n')
for line in lines:
if line == 'List of devices attached ':
continue
match = re_device_info.match(line)
if match:
device = {
'device_serial': match.group(1),
'state': match.group(2)
}
remainder = line[match.end(2):].strip()
if remainder:
try:
device.update(dict([j.split(':')
for j in remainder.split(' ')]))
except ValueError:
self._logger.warning('devices: Unable to parse '
'remainder for device %s' % line)
devices.append(device)
return devices
|
mozilla-b2g/fxos-certsuite
|
mcts/utils/handlers/adb_b2g.py
|
ADBB2G.get_profiles
|
python
|
def get_profiles(self, profile_base="/data/b2g/mozilla", timeout=None):
rv = {}
if timeout is None:
timeout = self._timeout
profile_path = posixpath.join(profile_base, "profiles.ini")
try:
proc = self.shell("cat %s" % profile_path, timeout=timeout)
config = ConfigParser.ConfigParser()
config.readfp(proc.stdout_file)
for section in config.sections():
items = dict(config.items(section))
if "name" in items and "path" in items:
path = items["path"]
if "isrelative" in items and int(items["isrelative"]):
path = posixpath.normpath("%s/%s" % (profile_base, path))
rv[items["name"]] = path
finally:
proc.stdout_file.close()
proc.stderr_file.close()
return rv
|
Return a list of paths to gecko profiles on the device,
:param timeout: Timeout of each adb command run
:param profile_base: Base directory containing the profiles.ini file
|
train
|
https://github.com/mozilla-b2g/fxos-certsuite/blob/152a76c7c4c9d908524cf6e6fc25a498058f363d/mcts/utils/handlers/adb_b2g.py#L266-L294
| null |
class ADBB2G(adb.ADBDevice):
def __init__(self, *args, **kwargs):
if "wait_polling_interval" in kwargs:
self._wait_polling_interval = kwargs.pop("wait_polling_interval")
else:
self._wait_polling_interval = 1.0
adb.ADBDevice.__init__(self, *args, **kwargs)
def wait_for_device_ready(self, timeout=None, wait_polling_interval=None, after_first=None):
"""Wait for the device to become ready for reliable interaction via adb.
NOTE: if the device is *already* ready this method will timeout.
:param timeout: Maximum time to wait for the device to become ready
:param wait_polling_interval: Interval at which to poll for device readiness.
:param after_first: A function to run after first polling for device
readiness. This allows use cases such as stopping b2g
setting the unready state, and then restarting b2g.
"""
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
self._logger.info("Waiting for device to become ready")
profiles = self.get_profiles()
assert len(profiles) == 1
profile_dir = profiles.itervalues().next()
prefs_file = posixpath.normpath(profile_dir + "/prefs.js")
current_date = int(self.shell_output('date +\"%s\"'))
set_date = current_date - (365 * 24 * 3600 + 24 * 3600 + 3600 + 60 + 1)
try:
self.shell_output("touch -t %i %s" % (set_date, prefs_file))
except adb.ADBError:
# See Bug 1092383, the format for the touch command
# has changed for flame-kk builds.
set_date = datetime.datetime.fromtimestamp(set_date)
self.shell_output("touch -t %s %s" %
(set_date.strftime('%Y%m%d.%H%M%S'),
prefs_file))
def prefs_modified():
times = [None, None]
def inner():
try:
listing = self.shell_output("ls -l %s" % (prefs_file))
mode, user, group, size, date, time, name = listing.split(None, 6)
mtime = "%s %s" % (date, time)
except:
return False
if times[0] is None:
times[0] = mtime
else:
times[1] = mtime
if times[1] != times[0]:
return True
return False
return inner
poll_wait(prefs_modified(), timeout=timeout,
polling_interval=wait_polling_interval, after_first=after_first)
def wait_for_net(self, timeout=None, wait_polling_interval=None):
"""Wait for the device to be assigned an IP address.
:param timeout: Maximum time to wait for an IP address to be defined
:param wait_polling_interval: Interval at which to poll for ip address.
"""
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
self._logger.info("Waiting for network connection")
poll_wait(self.get_ip_address, timeout=timeout)
def stop(self, timeout=None):
self._logger.info("Stopping b2g process")
if timeout is None:
timeout = self._timeout
self.shell_bool("stop b2g")
def b2g_stopped():
processes = set(item[1].split("/")[-1] for item in self.get_process_list())
return "b2g" not in processes
poll_wait(b2g_stopped, timeout=timeout)
def start(self, wait=True, timeout=None, wait_polling_interval=None):
"""Start b2g, waiting for the adb connection to become stable.
:param wait:
:param timeout: Maximum time to wait for restart.
:param wait_polling_interval: Interval at which to poll for device readiness.
"""
self._logger.info("Starting b2g process")
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
if wait:
self.wait_for_device_ready(timeout,
after_first=lambda:self.shell_bool("start b2g",
timeout=timeout))
else:
self.shell_bool("start b2g", timeout=timeout)
def restart(self, wait=True, timeout=None, wait_polling_interval=None):
"""Restart b2g, waiting for the adb connection to become stable.
:param timeout: Maximum time to wait for restart.
:param wait_polling_interval: Interval at which to poll for device readiness.
"""
self.stop(timeout=timeout)
self.start(wait, timeout=timeout, wait_polling_interval=wait_polling_interval)
def reboot(self, timeout=None, wait_polling_interval=None):
"""Reboot the device, waiting for the adb connection to become stable.
:param timeout: Maximum time to wait for reboot.
:param wait_polling_interval: Interval at which to poll for device readiness.
"""
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
self._logger.info("Rebooting device")
self.wait_for_device_ready(timeout,
after_first=lambda:self.command_output(["reboot"]))
def root(self, timeout=None, wait_polling_interval=None):
"""run adbd as root.
"""
self.command_output(["root"])
def devices(self, timeout=None):
"""Executes adb devices -l and returns a list of objects describing attached devices.
:param timeout: optional integer specifying the maximum time in
seconds for any spawned adb process to complete before
throwing an ADBTimeoutError. This timeout is per adb call. The
total time spent may exceed this value. If it is not
specified, the value set in the ADBHost constructor is used.
:returns: an object contain
:raises: * ADBTimeoutError
* ADBError
The output of adb devices -l ::
$ adb devices -l
List of devices attached
b313b945 device usb:1-7 product:d2vzw model:SCH_I535 device:d2vzw
is parsed and placed into an object as in
[{'device_serial': 'b313b945', 'state': 'device', 'product': 'd2vzw',
'usb': '1-7', 'device': 'd2vzw', 'model': 'SCH_I535' }]
"""
# b313b945 device usb:1-7 product:d2vzw model:SCH_I535 device:d2vzw
# from Android system/core/adb/transport.c statename()
re_device_info = re.compile(r'([^\s]+)\s+(offline|bootloader|device|host|recovery|sideload|no permissions|unauthorized|unknown)')
devices = []
lines = self.command_output(["devices", "-l"], timeout=timeout).split('\n')
for line in lines:
if line == 'List of devices attached ':
continue
match = re_device_info.match(line)
if match:
device = {
'device_serial': match.group(1),
'state': match.group(2)
}
remainder = line[match.end(2):].strip()
if remainder:
try:
device.update(dict([j.split(':')
for j in remainder.split(' ')]))
except ValueError:
self._logger.warning('devices: Unable to parse '
'remainder for device %s' % line)
devices.append(device)
return devices
|
mozilla-b2g/fxos-certsuite
|
mcts/utils/handlers/adb_b2g.py
|
ADBB2G.devices
|
python
|
def devices(self, timeout=None):
# b313b945 device usb:1-7 product:d2vzw model:SCH_I535 device:d2vzw
# from Android system/core/adb/transport.c statename()
re_device_info = re.compile(r'([^\s]+)\s+(offline|bootloader|device|host|recovery|sideload|no permissions|unauthorized|unknown)')
devices = []
lines = self.command_output(["devices", "-l"], timeout=timeout).split('\n')
for line in lines:
if line == 'List of devices attached ':
continue
match = re_device_info.match(line)
if match:
device = {
'device_serial': match.group(1),
'state': match.group(2)
}
remainder = line[match.end(2):].strip()
if remainder:
try:
device.update(dict([j.split(':')
for j in remainder.split(' ')]))
except ValueError:
self._logger.warning('devices: Unable to parse '
'remainder for device %s' % line)
devices.append(device)
return devices
|
Executes adb devices -l and returns a list of objects describing attached devices.
:param timeout: optional integer specifying the maximum time in
seconds for any spawned adb process to complete before
throwing an ADBTimeoutError. This timeout is per adb call. The
total time spent may exceed this value. If it is not
specified, the value set in the ADBHost constructor is used.
:returns: an object contain
:raises: * ADBTimeoutError
* ADBError
The output of adb devices -l ::
$ adb devices -l
List of devices attached
b313b945 device usb:1-7 product:d2vzw model:SCH_I535 device:d2vzw
is parsed and placed into an object as in
[{'device_serial': 'b313b945', 'state': 'device', 'product': 'd2vzw',
'usb': '1-7', 'device': 'd2vzw', 'model': 'SCH_I535' }]
|
train
|
https://github.com/mozilla-b2g/fxos-certsuite/blob/152a76c7c4c9d908524cf6e6fc25a498058f363d/mcts/utils/handlers/adb_b2g.py#L296-L343
| null |
class ADBB2G(adb.ADBDevice):
def __init__(self, *args, **kwargs):
if "wait_polling_interval" in kwargs:
self._wait_polling_interval = kwargs.pop("wait_polling_interval")
else:
self._wait_polling_interval = 1.0
adb.ADBDevice.__init__(self, *args, **kwargs)
def wait_for_device_ready(self, timeout=None, wait_polling_interval=None, after_first=None):
"""Wait for the device to become ready for reliable interaction via adb.
NOTE: if the device is *already* ready this method will timeout.
:param timeout: Maximum time to wait for the device to become ready
:param wait_polling_interval: Interval at which to poll for device readiness.
:param after_first: A function to run after first polling for device
readiness. This allows use cases such as stopping b2g
setting the unready state, and then restarting b2g.
"""
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
self._logger.info("Waiting for device to become ready")
profiles = self.get_profiles()
assert len(profiles) == 1
profile_dir = profiles.itervalues().next()
prefs_file = posixpath.normpath(profile_dir + "/prefs.js")
current_date = int(self.shell_output('date +\"%s\"'))
set_date = current_date - (365 * 24 * 3600 + 24 * 3600 + 3600 + 60 + 1)
try:
self.shell_output("touch -t %i %s" % (set_date, prefs_file))
except adb.ADBError:
# See Bug 1092383, the format for the touch command
# has changed for flame-kk builds.
set_date = datetime.datetime.fromtimestamp(set_date)
self.shell_output("touch -t %s %s" %
(set_date.strftime('%Y%m%d.%H%M%S'),
prefs_file))
def prefs_modified():
times = [None, None]
def inner():
try:
listing = self.shell_output("ls -l %s" % (prefs_file))
mode, user, group, size, date, time, name = listing.split(None, 6)
mtime = "%s %s" % (date, time)
except:
return False
if times[0] is None:
times[0] = mtime
else:
times[1] = mtime
if times[1] != times[0]:
return True
return False
return inner
poll_wait(prefs_modified(), timeout=timeout,
polling_interval=wait_polling_interval, after_first=after_first)
def wait_for_net(self, timeout=None, wait_polling_interval=None):
"""Wait for the device to be assigned an IP address.
:param timeout: Maximum time to wait for an IP address to be defined
:param wait_polling_interval: Interval at which to poll for ip address.
"""
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
self._logger.info("Waiting for network connection")
poll_wait(self.get_ip_address, timeout=timeout)
def stop(self, timeout=None):
self._logger.info("Stopping b2g process")
if timeout is None:
timeout = self._timeout
self.shell_bool("stop b2g")
def b2g_stopped():
processes = set(item[1].split("/")[-1] for item in self.get_process_list())
return "b2g" not in processes
poll_wait(b2g_stopped, timeout=timeout)
def start(self, wait=True, timeout=None, wait_polling_interval=None):
"""Start b2g, waiting for the adb connection to become stable.
:param wait:
:param timeout: Maximum time to wait for restart.
:param wait_polling_interval: Interval at which to poll for device readiness.
"""
self._logger.info("Starting b2g process")
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
if wait:
self.wait_for_device_ready(timeout,
after_first=lambda:self.shell_bool("start b2g",
timeout=timeout))
else:
self.shell_bool("start b2g", timeout=timeout)
def restart(self, wait=True, timeout=None, wait_polling_interval=None):
"""Restart b2g, waiting for the adb connection to become stable.
:param timeout: Maximum time to wait for restart.
:param wait_polling_interval: Interval at which to poll for device readiness.
"""
self.stop(timeout=timeout)
self.start(wait, timeout=timeout, wait_polling_interval=wait_polling_interval)
def reboot(self, timeout=None, wait_polling_interval=None):
"""Reboot the device, waiting for the adb connection to become stable.
:param timeout: Maximum time to wait for reboot.
:param wait_polling_interval: Interval at which to poll for device readiness.
"""
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
self._logger.info("Rebooting device")
self.wait_for_device_ready(timeout,
after_first=lambda:self.command_output(["reboot"]))
def root(self, timeout=None, wait_polling_interval=None):
"""run adbd as root.
"""
self.command_output(["root"])
def get_profiles(self, profile_base="/data/b2g/mozilla", timeout=None):
"""Return a list of paths to gecko profiles on the device,
:param timeout: Timeout of each adb command run
:param profile_base: Base directory containing the profiles.ini file
"""
rv = {}
if timeout is None:
timeout = self._timeout
profile_path = posixpath.join(profile_base, "profiles.ini")
try:
proc = self.shell("cat %s" % profile_path, timeout=timeout)
config = ConfigParser.ConfigParser()
config.readfp(proc.stdout_file)
for section in config.sections():
items = dict(config.items(section))
if "name" in items and "path" in items:
path = items["path"]
if "isrelative" in items and int(items["isrelative"]):
path = posixpath.normpath("%s/%s" % (profile_base, path))
rv[items["name"]] = path
finally:
proc.stdout_file.close()
proc.stderr_file.close()
return rv
|
mozilla-b2g/fxos-certsuite
|
mcts/certsuite/wait.py
|
Wait.until
|
python
|
def until(self, condition, is_true=None, message=""):
rv = None
last_exc = None
until = is_true or until_pred
start = self.clock.now
while not until(self.clock, self.end):
try:
rv = condition()
except (KeyboardInterrupt, SystemExit) as e:
raise e
except self.exceptions as e:
last_exc = sys.exc_info()
if isinstance(rv, bool) and not rv:
time.sleep(self.interval)
continue
if rv is not None:
return rv
self.clock.sleep(self.interval)
if message:
message = " with message: %s" % message
raise TimeoutException(
"Timed out after %s seconds%s" %
((self.clock.now - start), message), cause=last_exc)
|
Repeatedly runs condition until its return value evalutes to true,
or its timeout expires or the predicate evaluates to true.
This will poll at the given interval until the given timeout
is reached, or the predicate or conditions returns true. A
condition that returns null or does not evaluate to true will
fully elapse its timeout before raising a
``TimeoutException``.
If an exception is raised in the condition function and it's
not ignored, this function will raise immediately. If the
exception is ignored, it will continue polling for the
condition until it returns successfully or a
``TimeoutException`` is raised.
The return value of the callable `condition` will be returned
once it completes successfully.
:param condition: A callable function whose return value will
be returned by this function if it evalutes to true.
:param is_true: An optional predicate that will terminate and
return when it evalutes to False. It should be a function
that will be passed `clock` and an end time. The default
predicate will terminate a wait when the clock elapses the
timeout.
:param message: An optional message to include in the
exception's message if this function times out.
:returns: Return value of `condition`.
|
train
|
https://github.com/mozilla-b2g/fxos-certsuite/blob/152a76c7c4c9d908524cf6e6fc25a498058f363d/mcts/certsuite/wait.py#L78-L140
| null |
class Wait(object):
"""An explicit conditional utility class for waiting until a condition
evalutes to true or not null.
This will repeatedly evaluate a condition in anticipation for a
truthy return value, or its timeout to expire, or its waiting
predicate to become true.
A ``Wait`` instance defines the maximum amount of time to wait for
a condition, as well as the frequency with which to check the
condition. Furthermore, the user may configure the wait to ignore
specific types of exceptions whilst waiting.
"""
def __init__(self, timeout=DEFAULT_TIMEOUT,
interval=DEFAULT_INTERVAL, ignored_exceptions=None,
clock=None):
"""Configure the instance to have a custom timeout, interval, and a
list of ignored exceptions.
Optionally a different time implementation than the one
provided by the standard library (time, through
``SystemClock``) can also be provided.
Sample usage::
# Wait 30 seconds for condition to return "foo", checking
# it every 5 seconds.
wait = Wait(timeout=30, interval=5)
foo = wait.until(lambda: return get_foo())
:param timeout: How long to wait for the evaluated condition
to become true. The default timeout is
`wait.DEFAULT_TIMEOUT`.
:param interval: How often the condition should be evaluated.
In reality the interval may be greater as the cost of
evaluating the condition function is not factored in. The
default polling interval is `wait.DEFAULT_INTERVAL`.
:param ignored_exceptions: Ignore specific types of exceptions
whilst waiting for the condition. Any exceptions not
whitelisted will be allowed to propagate, terminating the
wait.
:param clock: Allows overriding the use of the runtime's
default time library. See ``wait.SystemClock`` for
implementation details.
"""
self.timeout = timeout
self.interval = interval
self.clock = clock or SystemClock()
self.end = self.clock.now + self.timeout
exceptions = []
if ignored_exceptions is not None:
if isinstance(ignored_exceptions, collections.Iterable):
exceptions.extend(iter(ignored_exceptions))
else:
exceptions.append(ignored_exceptions)
self.exceptions = tuple(set(exceptions))
|
mozilla-b2g/fxos-certsuite
|
mcts/tools/webidl/process_idl.py
|
main
|
python
|
def main(argv):
argparser = argparse.ArgumentParser()
argparser.add_argument("manifest", help="Manifest file for the idl")
argparser.add_argument("b2g", help="Path to b2g directory (e.g. ~/B2G")
args = argparser.parse_args(argv[1:])
with open(args.manifest, 'r') as f:
manifest = json.loads(f.read())
# import WebIDL using a path relative to the gecko tree
sys.path.append(os.path.join(args.b2g, 'gecko', 'dom', 'bindings', 'parser'))
import WebIDL
parser = WebIDL.Parser()
webidl_path = args.b2g
# embed idl files in individual script tags
for filename in manifest['files']:
path = os.path.realpath(os.path.join(webidl_path, filename))
with open(path, 'r') as f:
parser.parse(''.join([line for line in f.readlines() if not line.startswith('#')]))
results = parser.finish()
tested = []
untested = []
for result in results:
if isinstance(result, WebIDL.IDLImplementsStatement):
continue
if isinstance(result, WebIDL.IDLTypedefType):
tested.append(jsonify_typedef(result))
continue
if result.isInterface():
if result.isExternal():
continue
print(result.identifier.name)
if result.identifier.name in manifest['untested']:
untested.append(jsonify_interface(result))
else:
tested.append(jsonify_interface(result))
with open('merged_idl.js', 'w') as merged:
merged.write('TESTED_IDL=')
merged.write(json.dumps(tested))
merged.write(';\n')
merged.write('UNTESTED_IDL=')
merged.write(json.dumps(untested))
merged.write(';\n')
|
This parses a json manifest file containing list of webidl files and
generates a file containing javascript arrays of json objects for
each webidl file.
usage: process_idl.py manifest.json ~/B2G
The generated js file can then be included with the test app.
|
train
|
https://github.com/mozilla-b2g/fxos-certsuite/blob/152a76c7c4c9d908524cf6e6fc25a498058f363d/mcts/tools/webidl/process_idl.py#L97-L158
| null |
#!/usr/bin/python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import json
import os
import re
import sys
# Map to types recognized by the idlharness.js tests
typenames = {
"Boolean" : "boolean",
"Double" : "double",
"UnrestrictedDouble" : "double",
}
def jsonify_interface(intf):
result = {}
result['name'] = intf.identifier.name
result['type'] = "interface"
result['extAttrs'] = []
# nothing further for externals
if intf.isExternal():
return result
# there doesn't seem to be a clean way to test for this
try:
result['partial'] = intf._isPartial
except AttributeError:
result['partial'] = False
members = []
for intf_member in intf.members:
member = {}
member['extAttrs'] = []
if intf_member.isAttr():
member['name'] = intf_member.identifier.name
member['type'] = 'attribute'
member['readonly'] = intf_member.readonly
member['idlType'] = jsonify_type(intf_member.type)
elif intf_member.isMethod():
member['name'] = intf_member.identifier.name
member['type'] = 'operation'
member['getter'] = intf_member.isGetter()
member['setter'] = intf_member.isSetter()
member['creator'] = intf_member.isCreator()
member['deleter'] = intf_member.isDeleter()
member['stringifier'] = intf_member.isStringifier()
member['jsonofier'] = intf_member.isJsonifier()
member['legacycaller'] = intf_member.isLegacycaller()
overloads = intf_member.signatures()
for overload in overloads:
ret = overload[0]
member['idlType'] = jsonify_type(ret)
args = overload[1]
arguments = []
for arg in args:
argument = {}
argument['name'] = arg.identifier.name
argument['optional'] = False #TODO
argument['variadic'] = False #TODO
argument['idlType'] = jsonify_type(arg.type)
arguments.append(argument)
member['arguments'] = arguments
# idlharness can only handle one overload at the moment
break
members.append(member)
result['members'] = members
return json.dumps(result)
def jsonify_type(t):
result = {}
result['sequence'] = t.isSequence()
result['nullable'] = t.nullable()
result['array'] = t.isArray()
result['union'] = t.isUnion()
result['idlType'] = typenames.get(str(t), str(t))
return result
def jsonify_typedef(typedef):
result = {}
result['name'] = typedef.identifier.name
result['type'] = "typedef"
result['extAttrs'] = []
return json.dumps(result)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
mozilla-b2g/fxos-certsuite
|
mcts/tools/webidl/manifest_parser.py
|
main
|
python
|
def main(argv):
argparser = argparse.ArgumentParser()
argparser.add_argument("gecko", help="/B2G/gecko/dom/webidl")
args = argparser.parse_args(argv[1:])
files = [ "gecko/dom/webidl/" + f for f in listdir(args.gecko) if isfile(join(args.gecko,f)) and f.endswith("webidl") ]
files.sort()
with open('manifest_generated.json', 'w') as merged:
merged.write('{\n "files": [\n')
merged.write(" \"" + "\",\n \"".join(files) + "\"\n")
merged.write(' ],\n "untested": [\n ],\n "skipped": [\n ]\n}\n')
|
This will generate you a manifest file, and you need to modify it!
There are three category: files, untested, skipped.
You can reference current manifest.json.
usage: manifest_parser.py (GECKO LOCATION: /B2G/gecko/dom/webidl)
The generated file can then be used with process_idl.py
|
train
|
https://github.com/mozilla-b2g/fxos-certsuite/blob/152a76c7c4c9d908524cf6e6fc25a498058f363d/mcts/tools/webidl/manifest_parser.py#L15-L36
| null |
#!/usr/bin/python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import json
import re
import sys
from os import listdir
from os.path import isfile, join
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
mozilla-b2g/fxos-certsuite
|
mcts/certsuite/cert.py
|
run_marionette_script
|
python
|
def run_marionette_script(script, chrome=False, async=False, host='localhost', port=2828):
m = DeviceHelper.getMarionette(host, port)
m.start_session()
if chrome:
m.set_context(marionette.Marionette.CONTEXT_CHROME)
if not async:
result = m.execute_script(script)
else:
result = m.execute_async_script(script)
m.delete_session()
return result
|
Create a Marionette instance and run the provided script
|
train
|
https://github.com/mozilla-b2g/fxos-certsuite/blob/152a76c7c4c9d908524cf6e6fc25a498058f363d/mcts/certsuite/cert.py#L520-L531
|
[
"def getMarionette(host='localhost', port=2828):\n if not DeviceHelper.marionette:\n DeviceHelper.marionette = Marionette(host, port)\n\n return DeviceHelper.marionette\n"
] |
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import ConfigParser
import json
import logging
import os
import sys
import pkg_resources
import re
import StringIO
import time
import traceback
import wait
from py.xml import html
from zipfile import ZipFile
import fxos_appgen
import marionette
import mozdevice
import moznetwork
import wptserve
from mozlog.structured import commandline
from omni_analyzer import OmniAnalyzer
from mcts.utils.device.devicehelper import DeviceHelper
"""Signalizes whether client has made initial connection to HTTP
server.
This is used for whilst waiting for the user to enter the correct
hostname and port to the device's browser.
"""
connected = False
headers = None
installed = False
webapi_results = None
webapi_results_embed_app = None
last_test_started = 'None'
logger = None
expected_results_path = '../static/expected_results'
# supported_versions = ["2.2", "2.1", "2.0", "1.4", "1.3"]
expected_result_folder = os.path.join('..', 'static', 'expected_results')
@wptserve.handlers.handler
def webapi_results_handler(request, response):
global headers
headers = request.headers
global webapi_results
webapi_results = json.loads(request.POST["results"])
response.headers.set('Access-Control-Allow-Origin', '*')
response.content = "ok"
@wptserve.handlers.handler
def webapi_results_embed_apps_handler(request, response):
global webapi_results_embed_app
webapi_results_embed_app = json.loads(request.POST["results"])
response.headers.set('Access-Control-Allow-Origin', '*')
response.content = "ok"
@wptserve.handlers.handler
def webapi_log_handler(request, response):
global last_test_started
global logger
log_string = request.POST["log"]
index = log_string.find('test started:')
if index > -1:
last_test_started = log_string[index + len('test started:'):]
logger.debug(log_string)
response.headers.set('Access-Control-Allow-Origin', '*')
response.content = "ok"
routes = [("POST", "/webapi_results", webapi_results_handler),
("POST", "/webapi_results_embed_apps", webapi_results_embed_apps_handler),
("POST", "/webapi_log", webapi_log_handler),
("GET", "/*", wptserve.handlers.file_handler)]
mcts_current_path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
static_path = os.path.join(mcts_current_path, "static")
def read_manifest(app):
with open(os.path.join(app, 'manifest.webapp')) as f:
manifest = f.read()
return manifest
def package_app(path, extrafiles={}):
app_path = 'app.zip'
with ZipFile(app_path, 'w') as zip_file:
for root, dirs, files in os.walk(path):
for f in files:
if f in extrafiles:
continue
zip_file.write(os.path.join(root, f), f)
for f in extrafiles:
zip_file.writestr(f, extrafiles[f])
def install_app(logger, appname, version, apptype, apppath, all_perms,
extrafiles, launch=False):
logger.debug('uninstalling: %s' % appname)
fxos_appgen.uninstall_app(appname)
logger.debug('packaging: %s version: %s apptype: %s all_perms: %s' %
(appname, version, apptype, all_perms))
details = fxos_appgen.create_details(version, all_perms=all_perms)
manifest = json.dumps(fxos_appgen.create_manifest(appname, details, apptype, version))
files = extrafiles.copy()
files['manifest.webapp'] = manifest
package_app(apppath, files)
logger.debug('installing: %s' % appname)
fxos_appgen.install_app(appname, 'app.zip', script_timeout=120000)
if launch:
logger.debug('launching: %s' % appname)
fxos_appgen.launch_app(appname)
def test_id(suite, test, subtest):
return '%s.%s.%s' % (suite, test, subtest)
def log_pass(logger, testid, message=''):
logger.test_end(testid, 'PASS', expected='PASS', message=message)
def log_ok(logger, testid, message=''):
logger.test_end(testid, 'OK', expected='OK', message=message)
def log_fail(logger, testid, message=''):
logger.test_end(testid, 'FAIL', expected='PASS', message=message)
def test_omni_analyzer(logger, report, args):
testid = test_id('cert', 'omni-analyzer', 'check-omni-diff')
logger.test_start(testid)
omni_ref_path = pkg_resources.resource_filename(
__name__, os.path.join(expected_results_path, 'expected_omni_results', 'omni.ja.mcts'))
omni_analyzer = OmniAnalyzer(omni_ref_path, logger=logger)
if args.html_result_file is not None:
diff, is_run_success = omni_analyzer.run(html_format=True, results_file=os.path.join(os.path.dirname(args.html_result_file), 'omni_diff_report.html'))
else:
diff, is_run_success = omni_analyzer.run()
report["omni_result"] = diff
def test_webapi(logger, report, args, addr):
errors = False
logger.debug('Running webapi verifier tests')
for apptype in ['web', 'privileged', 'certified']:
global webapi_results
webapi_results = None
appname = '%s WebAPI Verifier' % apptype.capitalize()
sampleapppath = os.path.join(static_path, 'sample_apps')
apppath = os.path.join(sampleapppath, 'webapi-test-app')
install_app(logger, appname, args.version, apptype, apppath, True,
{'results_uri.js':
'RESULTS_URI="http://%s:%s/webapi_results";LOG_URI="http://%s:%s/webapi_log";' % (addr * 2)},
True)
try:
wait.Wait(timeout=120).until(lambda: webapi_results is not None)
except wait.TimeoutException:
logger.error('Timed out waiting for results for test: %s' % last_test_started)
errors = True
logger.debug('uninstalling: %s' % appname)
fxos_appgen.uninstall_app(appname)
if webapi_results is None:
continue
if "headers" not in report:
report["headers"] = headers
results_folder = 'webapi_ref/'
results_filename = '%s.json' % apptype
if args.generate_reference:
dirname = os.path.dirname(results_folder + results_filename)
if not os.path.exists(dirname) and dirname <> "":
os.makedirs(dirname)
with open(results_folder + results_filename, 'w') as f:
f.write(json.dumps(webapi_results, sort_keys=True, indent=2))
else:
file_path = pkg_resources.resource_filename(
__name__, os.path.sep.join([expected_results_path, 'expected_webapi_results', results_filename]))
parse_webapi_results(file_path, webapi_results, '%s-' % apptype, logger, report)
logger.debug('Done.')
if errors:
logger.error('Test webapi with errors')
def test_permissions(logger, report, args, addr):
errors = False
#logger.test_start('permissions')
logger.debug('Running permissions tests')
permissions = get_permissions()
# test default permissions
for apptype in ['web', 'privileged', 'certified']:
logger.debug('Testing default permissions: %s' % apptype)
results = {}
expected_webapi_results = None
appname = 'Default Permissions Test App'
fxos_appgen.uninstall_app(appname)
installed_appname = appname.lower().replace(" ", "-")
fxos_appgen.generate_app(appname, install=True, app_type=apptype,
all_perm=True)
for permission in permissions:
result = get_permission(permission, installed_appname)
results[permission] = result
results_folder = 'permissions_ref/'
results_filename = '%s.json' % apptype
if args.generate_reference:
dirname = os.path.dirname(results_folder + results_filename)
if not os.path.exists(dirname) and dirname <> "":
os.makedirs(dirname)
with open(results_folder + results_filename, 'w') as f:
f.write(json.dumps(results, sort_keys=True, indent=2))
else:
file_path = pkg_resources.resource_filename(__name__,
os.path.sep.join([expected_results_path, 'expected_permissions_results',
results_filename]))
parse_permissions_results(file_path, results, '%s-' % apptype,
logger, report)
fxos_appgen.uninstall_app(appname)
# test individual permissions
logger.debug('Testing individual permissions')
results = {}
# first install test app for embed-apps permission test
embed_appname = 'Embed Apps Test App'
sampleapppath = os.path.join(static_path, 'sample_apps')
apppath = os.path.join(sampleapppath, 'embed-apps-test-app')
install_app(logger, embed_appname, args.version, 'certified', apppath, True,
{'results_uri.js': 'RESULTS_URI="http://%s:%s/webapi_results_embed_apps";' % addr},
False)
appname = 'Permissions Test App'
installed_appname = appname.lower().replace(" ", "-")
sampleapppath = os.path.join(static_path, 'sample_apps')
apppath = os.path.join(sampleapppath, 'permissions-test-app')
install_app(logger, appname, args.version, 'web', apppath, False,
{'results_uri.js':
'RESULTS_URI="http://%s:%s/webapi_results";LOG_URI="http://%s:%s/webapi_log";' % (addr * 2)})
for permission in [None] + permissions:
global webapi_results
global webapi_results_embed_app
webapi_results = None
webapi_results_embed_app = None
# if we try to launch after killing too quickly, the app seems
# to not fully launch
time.sleep(5)
if permission is not None:
logger.debug('testing permission: %s' % permission)
set_permission(permission, u'allow', installed_appname)
else:
logger.debug('testing permission: None')
fxos_appgen.launch_app(appname)
try:
wait.Wait(timeout=60).until(lambda: webapi_results is not None)
# embed-apps results are posted to a separate URL
if webapi_results_embed_app:
webapi_results['embed-apps'] = webapi_results_embed_app['embed-apps']
else:
webapi_results['embed-apps'] = False
if permission is None:
expected_webapi_results = webapi_results
else:
results[permission] = diff_results(expected_webapi_results, webapi_results)
except wait.TimeoutException:
logger.error('Timed out waiting for results')
errors = True
if permission is not None:
results[permission] = 'timed out'
else:
# If we timeout on our baseline results there is
# no point in proceeding.
logger.error('Could not get baseline results for permissions. Skipping tests.')
break
kill('app://' + installed_appname)
if permission is not None:
set_permission(permission, u'deny', installed_appname)
logger.debug('uninstalling: %s' % appname)
fxos_appgen.uninstall_app(appname)
# we test open-remote-window separately as opening a remote
# window might stop the test app
results['open-remote-window'] = test_open_remote_window(logger,
args.version, addr)
results_folder = 'permissions_ref/'
results_filename = 'permissions.json'
if args.generate_reference:
dirname = os.path.dirname(results_folder + results_filename)
if not os.path.exists(dirname) and dirname <> "":
os.makedirs(dirname)
with open(results_folder + results_filename, 'w') as f:
f.write(json.dumps(results, sort_keys=True, indent=2))
else:
file_path = pkg_resources.resource_filename(__name__,
os.path.sep.join([expected_results_path, 'expected_permissions_results',
results_filename]))
parse_permissions_results(file_path, results, 'individual-',
logger, report)
logger.debug('Done.')
if errors:
logger.error('Test individual with errors')
# clean up embed-apps test app
logger.debug('uninstalling: %s' % embed_appname)
fxos_appgen.uninstall_app(embed_appname)
def test_crash_reporter(logger, report):
testid = test_id('cert','crash-reporter', 'crash-report-toggle')
logger.test_start(testid)
logger.debug('start checking test reporter')
crash_report_toggle = (report.get('application_ini', {})
.get('Crash Reporter', {})
.get('enabled'))
if crash_report_toggle == '1':
log_pass(logger, testid)
else:
log_fail(logger, testid, 'crash report toggle = %s' % crash_report_toggle)
def test_user_agent(logger, report):
testid = test_id('cert','user-agent', 'user-agent-string')
logger.test_start(testid)
logger.debug('Running user agent tests')
user_agent = run_marionette_script("return navigator.userAgent;")
# See https://developer.mozilla.org/en-US/docs/Gecko_user_agent_string_reference#Firefox_OS
# and https://wiki.mozilla.org/B2G/User_Agent/Device_Model_Inclusion_Requirements
logger.debug('UserAgent: %s' % user_agent)
ua_rexp = re.compile("Mozilla/(\d+\.\d+) \((Mobile|Tablet)(;.*)?; rv:(\d+\.\d+)\) Gecko/(\d+\.\d+) Firefox/(\d+\.\d+)")
m = ua_rexp.match(user_agent)
valid = True
if m is None or len(m.groups()) != 6:
# no match
valid = False
message = 'Did not match regular expression'
elif m.groups()[2] != None:
# Specified a device string, strip leading ';' and any leading/trailing whitespace
device = m.groups()[2][1:].strip()
# Do not use slash ("/"), semicolon (";"), round brackets or any whitespace.
device_rexp = re.compile('[/;\(\)\s]')
m = device_rexp.search(device)
if m:
valid = False
message = 'Device identifier: "%s" contains forbidden characters' % device
if valid:
log_pass(logger, testid)
else:
log_ok(logger, testid, 'current user-agent string: %s: %s' % (user_agent, message))
def test_open_remote_window(logger, version, addr):
global webapi_results
results = {}
for value in ['deny', 'allow']:
result = False
webapi_results = None
appname = 'Open Remote Window Test App'
installed_appname = appname.lower().replace(" ", "-")
sampleapppath = os.path.join(static_path, 'sample_apps')
apppath = os.path.join(sampleapppath, 'open-remote-window-test-app')
install_app(logger, appname, version, 'web', apppath, False,
{'results_uri.js':
'RESULTS_URI="http://%s:%s/webapi_results";' % addr})
set_permission('open-remote-window', value, installed_appname)
fxos_appgen.launch_app(appname)
try:
wait.Wait(timeout=30).until(lambda: webapi_results is not None)
except wait.TimeoutException:
results[value] = 'timed out'
if webapi_results is not None:
result = webapi_results['open-remote-window']
# launching here will force the remote window (if any) to be hidden
# but will not retrigger the test.
fxos_appgen.launch_app(appname)
logger.debug('uninstalling: %s' % appname)
fxos_appgen.uninstall_app(appname)
results['open-remote-window-' + value] = result
return results
def diff_results(a, b):
a_set = set(a.keys())
b_set = set(b.keys())
result = list(b_set.difference(a_set))
same_keys = a_set.intersection(b_set)
for key in same_keys:
if type(a[key]) is dict:
if type(b[key]) is not dict:
result.append(key + ' (expected object)')
else:
result.extend([key + '.' + item for item in diff_results(a[key], b[key])])
return result
def log_results(diff, logger, report, test_group, name):
testid = test_id('cert', test_group, name)
if diff:
report[name.replace('-', '_')] = diff
for result in diff:
logger.test_start(testid)
try:
log_fail(logger, testid, 'Unexpected result for: %s' % result['name'])
except TypeError:
log_fail(logger, testid, 'Unexpected result for: %s' % result)
else:
logger.test_start(testid)
log_pass(logger, testid)
def parse_webapi_results(expected_results_path, results, prefix, logger, report):
with open(expected_results_path) as f:
expected_results = json.load(f)
#compute difference in window functions
expected_window = expected_results["windowList"]
window = results["windowList"]
missing_window = diff_results(window, expected_window)
log_results(missing_window, logger, report, 'webapi', prefix + 'missing-window-functions')
added_window = diff_results(expected_window, window)
log_results(added_window, logger, report, 'webapi', prefix + 'added-window-functions')
# compute differences in WebIDL results
expected_webidl = {}
for result in expected_results['webIDLResults']:
expected_webidl[result['name']] = result
unexpected_webidl_results = []
added_webidl_results = []
for result in results['webIDLResults']:
try:
if expected_webidl[result['name']]['result'] != result['result']:
unexpected_webidl_results.append(result)
del expected_webidl[result['name']]
except KeyError:
added_webidl_results.append(result)
# since we delete found results above, anything here is missing
missing_webidl_results = list(expected_webidl.values())
log_results(unexpected_webidl_results, logger, report, 'webapi', prefix + 'unexpected-webidl-results')
log_results(added_webidl_results, logger, report, 'webapi', prefix + 'added-webidl-results')
log_results(missing_webidl_results, logger, report, 'webapi', prefix + 'missing-webidl-results')
def parse_permissions_results(expected_results_path, results, prefix, logger, report):
with open(expected_results_path) as f:
expected_results = json.load(f)
# compute differences in permissions results
unexpected_results = diff_results(expected_results, results)
log_results(unexpected_results, logger, report, 'permissions', prefix + 'unexpected-permissions-results')
return not unexpected_results
def kill(name):
"""Kill the specified app"""
script = """
let manager = window.wrappedJSObject.appWindowManager || new window.wrappedJSObject.AppWindowManager();
manager.kill('%s');
"""
return run_marionette_script(script % name)
def get_permission(permission, app):
# The object created to wrap PermissionSettingsModule is to work around
# an intermittent bug where it will sometimes be undefined.
script = """
const {classes: Cc, interfaces: Ci, utils: Cu, results: Cr} = Components;
var a = {b: Cu.import("resource://gre/modules/PermissionSettings.jsm")};
return a.b.PermissionSettingsModule.getPermission('%s', '%s/manifest.webapp', '%s', '', false);
"""
app_url = 'app://' + app
return run_marionette_script(script % (permission, app_url, app_url), True)
def get_permissions():
"""Return permissions in PermissionsTable.jsm"""
script = """
const {classes: Cc, interfaces: Ci, utils: Cu, results: Cr} = Components;
Cu.import("resource://gre/modules/PermissionsTable.jsm");
result = []
for (permission in PermissionsTable) {
result.push(permission);
}
return result;
"""
return run_marionette_script(script, True)
def set_permission(permission, value, app):
"""Set a permission for the specified app
Value should be 'deny' or 'allow'
"""
# The object created to wrap PermissionSettingsModule is to work around
# an intermittent bug where it will sometimes be undefined.
script = """
const {classes: Cc, interfaces: Ci, utils: Cu, results: Cr} = Components;
var a = {b: Cu.import("resource://gre/modules/PermissionSettings.jsm")};
return a.b.PermissionSettingsModule.addPermission({
type: '%s',
origin: '%s',
manifestURL: '%s/manifest.webapp',
value: '%s',
browserFlag: false
});
"""
app_url = 'app://' + app
run_marionette_script(script % (permission, app_url, app_url, value), True)
def make_html_report(path, report):
def tabelize(value):
try:
rows = []
for key in value.keys():
rows.append(html.tr(html.td(html.pre(key)), html.td(tabelize(value[key]))))
return html.table(rows)
except AttributeError:
if type(value) == type([]):
return html.table(map(tabelize, value))
else:
return html.pre(value)
body_els = []
keys = report.keys()
keys.sort()
links = []
for key in keys:
links.append(html.li(html.a(key, href="#" + key)))
body_els.append(html.ul(links))
for key in keys:
body_els.append(html.a(html.h1(key), id=key))
body_els.append(tabelize(report[key]))
with open(path, 'w') as f:
doc = html.html(html.head(html.style('table, td {border: 1px solid;}')), html.body(body_els))
f.write(str(doc))
def get_application_ini(dm):
# application.ini information
#appinicontents = dm.pullFile('/system/b2g/application.ini')
#sf = StringIO.StringIO(appinicontents)
#config = ConfigParser.ConfigParser()
#config.readfp(sf)
ini_file = '_app.ini'
dm.pull('/system/b2g/application.ini', ini_file)
config = ConfigParser.RawConfigParser()
config.read(ini_file)
application_ini = {}
for section in config.sections():
application_ini[section] = dict(config.items(section))
return application_ini
def get_buildprop(dm):
# get build properties
buildprops = {}
buildpropoutput = dm.shell_output("cat /system/build.prop")
for buildprop in [line for line in buildpropoutput.splitlines() if '=' \
in line]:
eq = buildprop.find('=')
prop = buildprop[:eq]
val = buildprop[eq + 1:]
buildprops[prop] = val
return buildprops
def get_processes_running(dm):
return map(lambda p: {'name': p[1], 'user': p[2]}, dm.get_process_list())
def get_kernel_version(dm):
return dm.shell_output("cat /proc/version")
def _run(args, logger):
# This function is to simply make the cli() function easier to handle
test_groups = [
'omni-analyzer',
'permissions',
'webapi',
'user-agent',
'crash-reporter'
]
if args.list_test_groups:
for t in test_groups:
print t
return 0
skip_tests = []
test_groups = set(args.include if args.include else test_groups)
if args.device_profile:
skiplist = []
with open(args.device_profile, 'r') as device_profile_file:
skiplist = json.load(device_profile_file)['result']['cert']
skip_tests = [x for x in test_groups if x in skiplist]
test_groups = [x for x in test_groups if x not in skiplist]
report = {'buildprops': {}}
logging.basicConfig()
# Step 1: Get device information
try:
dm = DeviceHelper.getDevice()
except mozdevice.DMError as e:
print "Error connecting to device via adb (error: %s). Please be " \
"sure device is connected and 'remote debugging' is enabled." % \
e.msg
raise
# wait here to make sure marionette is running
logger.debug('Attempting to set up port forwarding for marionette')
retries = 0
while retries < 5:
try:
m = DeviceHelper.getMarionette()
m.start_session()
m.delete_session()
break
except (IOError, TypeError):
time.sleep(5)
retries += 1
else:
raise Exception("Couldn't connect to marionette after %d attempts. " \
"Is the marionette extension installed?" % retries)
# if args.version not in supported_versions:
# print "%s is not a valid version. Please enter one of %s" % \
# (args.version, supported_versions)
# raise Exception("%s is not a valid version" % args.version)
result_file_path = args.result_file
if not result_file_path:
result_file_path = "results.json"
# Make sure we can write to the results file before running tests.
# This will also ensure this file exists in case we error out later on.
try:
result_file = open(result_file_path, "w")
result_file.close()
except IOError as e:
print 'Could not open result file for writing: %s errno: %d' % (result_file_path, e.errno)
raise
report['buildprops'] = get_buildprop(dm)
report['processes_running'] = get_processes_running(dm)
report['kernel_version'] = get_kernel_version(dm)
report['application_ini'] = get_application_ini(dm)
logger.suite_start(tests=[])
# record skipped test to report
for test in skip_tests:
logger.test_start(test)
logger.test_end(test, 'SKIP', message='Skipped by device profile')
# run the omni.ja analyzer
if 'omni-analyzer' in test_groups:
test_omni_analyzer(logger, report, args)
# start webserver
if 'webapi' in test_groups or 'permissions' in test_groups:
httpd = wptserve.server.WebTestHttpd(
host=moznetwork.get_ip(), port=8000, routes=routes, doc_root=static_path)
httpd.start()
addr = (httpd.host, httpd.port)
# run webapi and webidl tests
if 'webapi' in test_groups:
test_webapi(logger, report, args, addr)
if 'permissions' in test_groups:
test_permissions(logger, report, args, addr)
if 'user-agent' in test_groups:
test_user_agent(logger, report)
if 'crash-reporter' in test_groups:
test_crash_reporter(logger, report)
logger.suite_end()
with open(result_file_path, "w") as result_file:
result_file.write(json.dumps(report, indent=2))
logger.debug('Results have been stored in: %s' % result_file_path)
if args.html_result_file is not None:
make_html_report(args.html_result_file, report)
logger.debug('HTML Results have been stored in: %s' % args.html_result_file)
def cli():
global logger
global webapi_results
global webapi_results_embed_app
reload(sys)
sys.setdefaultencoding('utf-8')
parser = argparse.ArgumentParser()
parser.add_argument("--version",
help="version of FxOS under test",
default="2.2",
action="store")
parser.add_argument("--debug",
help="enable debug logging",
action="store_true")
parser.add_argument("--list-test-groups",
help="print test groups available to run",
action="store_true")
parser.add_argument("--include",
metavar="TEST-GROUP",
help="include this test group",
action="append")
parser.add_argument("--result-file",
help="absolute file path to store the resulting json." \
"Defaults to results.json on your current path",
action="store")
parser.add_argument("--html-result-file",
help="absolute file path to store the resulting html.",
action="store")
parser.add_argument("--generate-reference",
help="Generate expected result files",
action="store_true")
parser.add_argument('-p', "--device-profile", action="store", type=os.path.abspath,
help="specify the device profile file path which could include skipped test case information")
commandline.add_logging_group(parser)
args = parser.parse_args()
if not args.debug:
logging.disable(logging.ERROR)
logger = commandline.setup_logging("certsuite", vars(args), {})
try:
_run(args, logger)
except:
logger.critical(traceback.format_exc())
raise
if __name__ == "__main__":
cli()
|
mozilla-b2g/fxos-certsuite
|
mcts/certsuite/cert.py
|
set_permission
|
python
|
def set_permission(permission, value, app):
# The object created to wrap PermissionSettingsModule is to work around
# an intermittent bug where it will sometimes be undefined.
script = """
const {classes: Cc, interfaces: Ci, utils: Cu, results: Cr} = Components;
var a = {b: Cu.import("resource://gre/modules/PermissionSettings.jsm")};
return a.b.PermissionSettingsModule.addPermission({
type: '%s',
origin: '%s',
manifestURL: '%s/manifest.webapp',
value: '%s',
browserFlag: false
});
"""
app_url = 'app://' + app
run_marionette_script(script % (permission, app_url, app_url, value), True)
|
Set a permission for the specified app
Value should be 'deny' or 'allow'
|
train
|
https://github.com/mozilla-b2g/fxos-certsuite/blob/152a76c7c4c9d908524cf6e6fc25a498058f363d/mcts/certsuite/cert.py#L572-L590
|
[
"def run_marionette_script(script, chrome=False, async=False, host='localhost', port=2828):\n \"\"\"Create a Marionette instance and run the provided script\"\"\"\n m = DeviceHelper.getMarionette(host, port)\n m.start_session()\n if chrome:\n m.set_context(marionette.Marionette.CONTEXT_CHROME)\n if not async:\n result = m.execute_script(script)\n else:\n result = m.execute_async_script(script)\n m.delete_session()\n return result\n"
] |
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import ConfigParser
import json
import logging
import os
import sys
import pkg_resources
import re
import StringIO
import time
import traceback
import wait
from py.xml import html
from zipfile import ZipFile
import fxos_appgen
import marionette
import mozdevice
import moznetwork
import wptserve
from mozlog.structured import commandline
from omni_analyzer import OmniAnalyzer
from mcts.utils.device.devicehelper import DeviceHelper
"""Signalizes whether client has made initial connection to HTTP
server.
This is used for whilst waiting for the user to enter the correct
hostname and port to the device's browser.
"""
connected = False
headers = None
installed = False
webapi_results = None
webapi_results_embed_app = None
last_test_started = 'None'
logger = None
expected_results_path = '../static/expected_results'
# supported_versions = ["2.2", "2.1", "2.0", "1.4", "1.3"]
expected_result_folder = os.path.join('..', 'static', 'expected_results')
@wptserve.handlers.handler
def webapi_results_handler(request, response):
global headers
headers = request.headers
global webapi_results
webapi_results = json.loads(request.POST["results"])
response.headers.set('Access-Control-Allow-Origin', '*')
response.content = "ok"
@wptserve.handlers.handler
def webapi_results_embed_apps_handler(request, response):
global webapi_results_embed_app
webapi_results_embed_app = json.loads(request.POST["results"])
response.headers.set('Access-Control-Allow-Origin', '*')
response.content = "ok"
@wptserve.handlers.handler
def webapi_log_handler(request, response):
global last_test_started
global logger
log_string = request.POST["log"]
index = log_string.find('test started:')
if index > -1:
last_test_started = log_string[index + len('test started:'):]
logger.debug(log_string)
response.headers.set('Access-Control-Allow-Origin', '*')
response.content = "ok"
routes = [("POST", "/webapi_results", webapi_results_handler),
("POST", "/webapi_results_embed_apps", webapi_results_embed_apps_handler),
("POST", "/webapi_log", webapi_log_handler),
("GET", "/*", wptserve.handlers.file_handler)]
mcts_current_path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
static_path = os.path.join(mcts_current_path, "static")
def read_manifest(app):
with open(os.path.join(app, 'manifest.webapp')) as f:
manifest = f.read()
return manifest
def package_app(path, extrafiles={}):
app_path = 'app.zip'
with ZipFile(app_path, 'w') as zip_file:
for root, dirs, files in os.walk(path):
for f in files:
if f in extrafiles:
continue
zip_file.write(os.path.join(root, f), f)
for f in extrafiles:
zip_file.writestr(f, extrafiles[f])
def install_app(logger, appname, version, apptype, apppath, all_perms,
extrafiles, launch=False):
logger.debug('uninstalling: %s' % appname)
fxos_appgen.uninstall_app(appname)
logger.debug('packaging: %s version: %s apptype: %s all_perms: %s' %
(appname, version, apptype, all_perms))
details = fxos_appgen.create_details(version, all_perms=all_perms)
manifest = json.dumps(fxos_appgen.create_manifest(appname, details, apptype, version))
files = extrafiles.copy()
files['manifest.webapp'] = manifest
package_app(apppath, files)
logger.debug('installing: %s' % appname)
fxos_appgen.install_app(appname, 'app.zip', script_timeout=120000)
if launch:
logger.debug('launching: %s' % appname)
fxos_appgen.launch_app(appname)
def test_id(suite, test, subtest):
return '%s.%s.%s' % (suite, test, subtest)
def log_pass(logger, testid, message=''):
logger.test_end(testid, 'PASS', expected='PASS', message=message)
def log_ok(logger, testid, message=''):
logger.test_end(testid, 'OK', expected='OK', message=message)
def log_fail(logger, testid, message=''):
logger.test_end(testid, 'FAIL', expected='PASS', message=message)
def test_omni_analyzer(logger, report, args):
testid = test_id('cert', 'omni-analyzer', 'check-omni-diff')
logger.test_start(testid)
omni_ref_path = pkg_resources.resource_filename(
__name__, os.path.join(expected_results_path, 'expected_omni_results', 'omni.ja.mcts'))
omni_analyzer = OmniAnalyzer(omni_ref_path, logger=logger)
if args.html_result_file is not None:
diff, is_run_success = omni_analyzer.run(html_format=True, results_file=os.path.join(os.path.dirname(args.html_result_file), 'omni_diff_report.html'))
else:
diff, is_run_success = omni_analyzer.run()
report["omni_result"] = diff
def test_webapi(logger, report, args, addr):
errors = False
logger.debug('Running webapi verifier tests')
for apptype in ['web', 'privileged', 'certified']:
global webapi_results
webapi_results = None
appname = '%s WebAPI Verifier' % apptype.capitalize()
sampleapppath = os.path.join(static_path, 'sample_apps')
apppath = os.path.join(sampleapppath, 'webapi-test-app')
install_app(logger, appname, args.version, apptype, apppath, True,
{'results_uri.js':
'RESULTS_URI="http://%s:%s/webapi_results";LOG_URI="http://%s:%s/webapi_log";' % (addr * 2)},
True)
try:
wait.Wait(timeout=120).until(lambda: webapi_results is not None)
except wait.TimeoutException:
logger.error('Timed out waiting for results for test: %s' % last_test_started)
errors = True
logger.debug('uninstalling: %s' % appname)
fxos_appgen.uninstall_app(appname)
if webapi_results is None:
continue
if "headers" not in report:
report["headers"] = headers
results_folder = 'webapi_ref/'
results_filename = '%s.json' % apptype
if args.generate_reference:
dirname = os.path.dirname(results_folder + results_filename)
if not os.path.exists(dirname) and dirname <> "":
os.makedirs(dirname)
with open(results_folder + results_filename, 'w') as f:
f.write(json.dumps(webapi_results, sort_keys=True, indent=2))
else:
file_path = pkg_resources.resource_filename(
__name__, os.path.sep.join([expected_results_path, 'expected_webapi_results', results_filename]))
parse_webapi_results(file_path, webapi_results, '%s-' % apptype, logger, report)
logger.debug('Done.')
if errors:
logger.error('Test webapi with errors')
def test_permissions(logger, report, args, addr):
errors = False
#logger.test_start('permissions')
logger.debug('Running permissions tests')
permissions = get_permissions()
# test default permissions
for apptype in ['web', 'privileged', 'certified']:
logger.debug('Testing default permissions: %s' % apptype)
results = {}
expected_webapi_results = None
appname = 'Default Permissions Test App'
fxos_appgen.uninstall_app(appname)
installed_appname = appname.lower().replace(" ", "-")
fxos_appgen.generate_app(appname, install=True, app_type=apptype,
all_perm=True)
for permission in permissions:
result = get_permission(permission, installed_appname)
results[permission] = result
results_folder = 'permissions_ref/'
results_filename = '%s.json' % apptype
if args.generate_reference:
dirname = os.path.dirname(results_folder + results_filename)
if not os.path.exists(dirname) and dirname <> "":
os.makedirs(dirname)
with open(results_folder + results_filename, 'w') as f:
f.write(json.dumps(results, sort_keys=True, indent=2))
else:
file_path = pkg_resources.resource_filename(__name__,
os.path.sep.join([expected_results_path, 'expected_permissions_results',
results_filename]))
parse_permissions_results(file_path, results, '%s-' % apptype,
logger, report)
fxos_appgen.uninstall_app(appname)
# test individual permissions
logger.debug('Testing individual permissions')
results = {}
# first install test app for embed-apps permission test
embed_appname = 'Embed Apps Test App'
sampleapppath = os.path.join(static_path, 'sample_apps')
apppath = os.path.join(sampleapppath, 'embed-apps-test-app')
install_app(logger, embed_appname, args.version, 'certified', apppath, True,
{'results_uri.js': 'RESULTS_URI="http://%s:%s/webapi_results_embed_apps";' % addr},
False)
appname = 'Permissions Test App'
installed_appname = appname.lower().replace(" ", "-")
sampleapppath = os.path.join(static_path, 'sample_apps')
apppath = os.path.join(sampleapppath, 'permissions-test-app')
install_app(logger, appname, args.version, 'web', apppath, False,
{'results_uri.js':
'RESULTS_URI="http://%s:%s/webapi_results";LOG_URI="http://%s:%s/webapi_log";' % (addr * 2)})
for permission in [None] + permissions:
global webapi_results
global webapi_results_embed_app
webapi_results = None
webapi_results_embed_app = None
# if we try to launch after killing too quickly, the app seems
# to not fully launch
time.sleep(5)
if permission is not None:
logger.debug('testing permission: %s' % permission)
set_permission(permission, u'allow', installed_appname)
else:
logger.debug('testing permission: None')
fxos_appgen.launch_app(appname)
try:
wait.Wait(timeout=60).until(lambda: webapi_results is not None)
# embed-apps results are posted to a separate URL
if webapi_results_embed_app:
webapi_results['embed-apps'] = webapi_results_embed_app['embed-apps']
else:
webapi_results['embed-apps'] = False
if permission is None:
expected_webapi_results = webapi_results
else:
results[permission] = diff_results(expected_webapi_results, webapi_results)
except wait.TimeoutException:
logger.error('Timed out waiting for results')
errors = True
if permission is not None:
results[permission] = 'timed out'
else:
# If we timeout on our baseline results there is
# no point in proceeding.
logger.error('Could not get baseline results for permissions. Skipping tests.')
break
kill('app://' + installed_appname)
if permission is not None:
set_permission(permission, u'deny', installed_appname)
logger.debug('uninstalling: %s' % appname)
fxos_appgen.uninstall_app(appname)
# we test open-remote-window separately as opening a remote
# window might stop the test app
results['open-remote-window'] = test_open_remote_window(logger,
args.version, addr)
results_folder = 'permissions_ref/'
results_filename = 'permissions.json'
if args.generate_reference:
dirname = os.path.dirname(results_folder + results_filename)
if not os.path.exists(dirname) and dirname <> "":
os.makedirs(dirname)
with open(results_folder + results_filename, 'w') as f:
f.write(json.dumps(results, sort_keys=True, indent=2))
else:
file_path = pkg_resources.resource_filename(__name__,
os.path.sep.join([expected_results_path, 'expected_permissions_results',
results_filename]))
parse_permissions_results(file_path, results, 'individual-',
logger, report)
logger.debug('Done.')
if errors:
logger.error('Test individual with errors')
# clean up embed-apps test app
logger.debug('uninstalling: %s' % embed_appname)
fxos_appgen.uninstall_app(embed_appname)
def test_crash_reporter(logger, report):
testid = test_id('cert','crash-reporter', 'crash-report-toggle')
logger.test_start(testid)
logger.debug('start checking test reporter')
crash_report_toggle = (report.get('application_ini', {})
.get('Crash Reporter', {})
.get('enabled'))
if crash_report_toggle == '1':
log_pass(logger, testid)
else:
log_fail(logger, testid, 'crash report toggle = %s' % crash_report_toggle)
def test_user_agent(logger, report):
testid = test_id('cert','user-agent', 'user-agent-string')
logger.test_start(testid)
logger.debug('Running user agent tests')
user_agent = run_marionette_script("return navigator.userAgent;")
# See https://developer.mozilla.org/en-US/docs/Gecko_user_agent_string_reference#Firefox_OS
# and https://wiki.mozilla.org/B2G/User_Agent/Device_Model_Inclusion_Requirements
logger.debug('UserAgent: %s' % user_agent)
ua_rexp = re.compile("Mozilla/(\d+\.\d+) \((Mobile|Tablet)(;.*)?; rv:(\d+\.\d+)\) Gecko/(\d+\.\d+) Firefox/(\d+\.\d+)")
m = ua_rexp.match(user_agent)
valid = True
if m is None or len(m.groups()) != 6:
# no match
valid = False
message = 'Did not match regular expression'
elif m.groups()[2] != None:
# Specified a device string, strip leading ';' and any leading/trailing whitespace
device = m.groups()[2][1:].strip()
# Do not use slash ("/"), semicolon (";"), round brackets or any whitespace.
device_rexp = re.compile('[/;\(\)\s]')
m = device_rexp.search(device)
if m:
valid = False
message = 'Device identifier: "%s" contains forbidden characters' % device
if valid:
log_pass(logger, testid)
else:
log_ok(logger, testid, 'current user-agent string: %s: %s' % (user_agent, message))
def test_open_remote_window(logger, version, addr):
global webapi_results
results = {}
for value in ['deny', 'allow']:
result = False
webapi_results = None
appname = 'Open Remote Window Test App'
installed_appname = appname.lower().replace(" ", "-")
sampleapppath = os.path.join(static_path, 'sample_apps')
apppath = os.path.join(sampleapppath, 'open-remote-window-test-app')
install_app(logger, appname, version, 'web', apppath, False,
{'results_uri.js':
'RESULTS_URI="http://%s:%s/webapi_results";' % addr})
set_permission('open-remote-window', value, installed_appname)
fxos_appgen.launch_app(appname)
try:
wait.Wait(timeout=30).until(lambda: webapi_results is not None)
except wait.TimeoutException:
results[value] = 'timed out'
if webapi_results is not None:
result = webapi_results['open-remote-window']
# launching here will force the remote window (if any) to be hidden
# but will not retrigger the test.
fxos_appgen.launch_app(appname)
logger.debug('uninstalling: %s' % appname)
fxos_appgen.uninstall_app(appname)
results['open-remote-window-' + value] = result
return results
def diff_results(a, b):
a_set = set(a.keys())
b_set = set(b.keys())
result = list(b_set.difference(a_set))
same_keys = a_set.intersection(b_set)
for key in same_keys:
if type(a[key]) is dict:
if type(b[key]) is not dict:
result.append(key + ' (expected object)')
else:
result.extend([key + '.' + item for item in diff_results(a[key], b[key])])
return result
def log_results(diff, logger, report, test_group, name):
testid = test_id('cert', test_group, name)
if diff:
report[name.replace('-', '_')] = diff
for result in diff:
logger.test_start(testid)
try:
log_fail(logger, testid, 'Unexpected result for: %s' % result['name'])
except TypeError:
log_fail(logger, testid, 'Unexpected result for: %s' % result)
else:
logger.test_start(testid)
log_pass(logger, testid)
def parse_webapi_results(expected_results_path, results, prefix, logger, report):
with open(expected_results_path) as f:
expected_results = json.load(f)
#compute difference in window functions
expected_window = expected_results["windowList"]
window = results["windowList"]
missing_window = diff_results(window, expected_window)
log_results(missing_window, logger, report, 'webapi', prefix + 'missing-window-functions')
added_window = diff_results(expected_window, window)
log_results(added_window, logger, report, 'webapi', prefix + 'added-window-functions')
# compute differences in WebIDL results
expected_webidl = {}
for result in expected_results['webIDLResults']:
expected_webidl[result['name']] = result
unexpected_webidl_results = []
added_webidl_results = []
for result in results['webIDLResults']:
try:
if expected_webidl[result['name']]['result'] != result['result']:
unexpected_webidl_results.append(result)
del expected_webidl[result['name']]
except KeyError:
added_webidl_results.append(result)
# since we delete found results above, anything here is missing
missing_webidl_results = list(expected_webidl.values())
log_results(unexpected_webidl_results, logger, report, 'webapi', prefix + 'unexpected-webidl-results')
log_results(added_webidl_results, logger, report, 'webapi', prefix + 'added-webidl-results')
log_results(missing_webidl_results, logger, report, 'webapi', prefix + 'missing-webidl-results')
def parse_permissions_results(expected_results_path, results, prefix, logger, report):
with open(expected_results_path) as f:
expected_results = json.load(f)
# compute differences in permissions results
unexpected_results = diff_results(expected_results, results)
log_results(unexpected_results, logger, report, 'permissions', prefix + 'unexpected-permissions-results')
return not unexpected_results
def run_marionette_script(script, chrome=False, async=False, host='localhost', port=2828):
"""Create a Marionette instance and run the provided script"""
m = DeviceHelper.getMarionette(host, port)
m.start_session()
if chrome:
m.set_context(marionette.Marionette.CONTEXT_CHROME)
if not async:
result = m.execute_script(script)
else:
result = m.execute_async_script(script)
m.delete_session()
return result
def kill(name):
"""Kill the specified app"""
script = """
let manager = window.wrappedJSObject.appWindowManager || new window.wrappedJSObject.AppWindowManager();
manager.kill('%s');
"""
return run_marionette_script(script % name)
def get_permission(permission, app):
# The object created to wrap PermissionSettingsModule is to work around
# an intermittent bug where it will sometimes be undefined.
script = """
const {classes: Cc, interfaces: Ci, utils: Cu, results: Cr} = Components;
var a = {b: Cu.import("resource://gre/modules/PermissionSettings.jsm")};
return a.b.PermissionSettingsModule.getPermission('%s', '%s/manifest.webapp', '%s', '', false);
"""
app_url = 'app://' + app
return run_marionette_script(script % (permission, app_url, app_url), True)
def get_permissions():
"""Return permissions in PermissionsTable.jsm"""
script = """
const {classes: Cc, interfaces: Ci, utils: Cu, results: Cr} = Components;
Cu.import("resource://gre/modules/PermissionsTable.jsm");
result = []
for (permission in PermissionsTable) {
result.push(permission);
}
return result;
"""
return run_marionette_script(script, True)
def make_html_report(path, report):
def tabelize(value):
try:
rows = []
for key in value.keys():
rows.append(html.tr(html.td(html.pre(key)), html.td(tabelize(value[key]))))
return html.table(rows)
except AttributeError:
if type(value) == type([]):
return html.table(map(tabelize, value))
else:
return html.pre(value)
body_els = []
keys = report.keys()
keys.sort()
links = []
for key in keys:
links.append(html.li(html.a(key, href="#" + key)))
body_els.append(html.ul(links))
for key in keys:
body_els.append(html.a(html.h1(key), id=key))
body_els.append(tabelize(report[key]))
with open(path, 'w') as f:
doc = html.html(html.head(html.style('table, td {border: 1px solid;}')), html.body(body_els))
f.write(str(doc))
def get_application_ini(dm):
# application.ini information
#appinicontents = dm.pullFile('/system/b2g/application.ini')
#sf = StringIO.StringIO(appinicontents)
#config = ConfigParser.ConfigParser()
#config.readfp(sf)
ini_file = '_app.ini'
dm.pull('/system/b2g/application.ini', ini_file)
config = ConfigParser.RawConfigParser()
config.read(ini_file)
application_ini = {}
for section in config.sections():
application_ini[section] = dict(config.items(section))
return application_ini
def get_buildprop(dm):
# get build properties
buildprops = {}
buildpropoutput = dm.shell_output("cat /system/build.prop")
for buildprop in [line for line in buildpropoutput.splitlines() if '=' \
in line]:
eq = buildprop.find('=')
prop = buildprop[:eq]
val = buildprop[eq + 1:]
buildprops[prop] = val
return buildprops
def get_processes_running(dm):
return map(lambda p: {'name': p[1], 'user': p[2]}, dm.get_process_list())
def get_kernel_version(dm):
return dm.shell_output("cat /proc/version")
def _run(args, logger):
# This function is to simply make the cli() function easier to handle
test_groups = [
'omni-analyzer',
'permissions',
'webapi',
'user-agent',
'crash-reporter'
]
if args.list_test_groups:
for t in test_groups:
print t
return 0
skip_tests = []
test_groups = set(args.include if args.include else test_groups)
if args.device_profile:
skiplist = []
with open(args.device_profile, 'r') as device_profile_file:
skiplist = json.load(device_profile_file)['result']['cert']
skip_tests = [x for x in test_groups if x in skiplist]
test_groups = [x for x in test_groups if x not in skiplist]
report = {'buildprops': {}}
logging.basicConfig()
# Step 1: Get device information
try:
dm = DeviceHelper.getDevice()
except mozdevice.DMError as e:
print "Error connecting to device via adb (error: %s). Please be " \
"sure device is connected and 'remote debugging' is enabled." % \
e.msg
raise
# wait here to make sure marionette is running
logger.debug('Attempting to set up port forwarding for marionette')
retries = 0
while retries < 5:
try:
m = DeviceHelper.getMarionette()
m.start_session()
m.delete_session()
break
except (IOError, TypeError):
time.sleep(5)
retries += 1
else:
raise Exception("Couldn't connect to marionette after %d attempts. " \
"Is the marionette extension installed?" % retries)
# if args.version not in supported_versions:
# print "%s is not a valid version. Please enter one of %s" % \
# (args.version, supported_versions)
# raise Exception("%s is not a valid version" % args.version)
result_file_path = args.result_file
if not result_file_path:
result_file_path = "results.json"
# Make sure we can write to the results file before running tests.
# This will also ensure this file exists in case we error out later on.
try:
result_file = open(result_file_path, "w")
result_file.close()
except IOError as e:
print 'Could not open result file for writing: %s errno: %d' % (result_file_path, e.errno)
raise
report['buildprops'] = get_buildprop(dm)
report['processes_running'] = get_processes_running(dm)
report['kernel_version'] = get_kernel_version(dm)
report['application_ini'] = get_application_ini(dm)
logger.suite_start(tests=[])
# record skipped test to report
for test in skip_tests:
logger.test_start(test)
logger.test_end(test, 'SKIP', message='Skipped by device profile')
# run the omni.ja analyzer
if 'omni-analyzer' in test_groups:
test_omni_analyzer(logger, report, args)
# start webserver
if 'webapi' in test_groups or 'permissions' in test_groups:
httpd = wptserve.server.WebTestHttpd(
host=moznetwork.get_ip(), port=8000, routes=routes, doc_root=static_path)
httpd.start()
addr = (httpd.host, httpd.port)
# run webapi and webidl tests
if 'webapi' in test_groups:
test_webapi(logger, report, args, addr)
if 'permissions' in test_groups:
test_permissions(logger, report, args, addr)
if 'user-agent' in test_groups:
test_user_agent(logger, report)
if 'crash-reporter' in test_groups:
test_crash_reporter(logger, report)
logger.suite_end()
with open(result_file_path, "w") as result_file:
result_file.write(json.dumps(report, indent=2))
logger.debug('Results have been stored in: %s' % result_file_path)
if args.html_result_file is not None:
make_html_report(args.html_result_file, report)
logger.debug('HTML Results have been stored in: %s' % args.html_result_file)
def cli():
global logger
global webapi_results
global webapi_results_embed_app
reload(sys)
sys.setdefaultencoding('utf-8')
parser = argparse.ArgumentParser()
parser.add_argument("--version",
help="version of FxOS under test",
default="2.2",
action="store")
parser.add_argument("--debug",
help="enable debug logging",
action="store_true")
parser.add_argument("--list-test-groups",
help="print test groups available to run",
action="store_true")
parser.add_argument("--include",
metavar="TEST-GROUP",
help="include this test group",
action="append")
parser.add_argument("--result-file",
help="absolute file path to store the resulting json." \
"Defaults to results.json on your current path",
action="store")
parser.add_argument("--html-result-file",
help="absolute file path to store the resulting html.",
action="store")
parser.add_argument("--generate-reference",
help="Generate expected result files",
action="store_true")
parser.add_argument('-p', "--device-profile", action="store", type=os.path.abspath,
help="specify the device profile file path which could include skipped test case information")
commandline.add_logging_group(parser)
args = parser.parse_args()
if not args.debug:
logging.disable(logging.ERROR)
logger = commandline.setup_logging("certsuite", vars(args), {})
try:
_run(args, logger)
except:
logger.critical(traceback.format_exc())
raise
if __name__ == "__main__":
cli()
|
sixty-north/added-value
|
source/added_value/repr_role.py
|
make_repr_node
|
python
|
def make_repr_node(rawtext, app, prefixed_name, obj, parent, modname, options):
text = repr(obj)
node = nodes.Text(text, rawsource=rawtext)
return node
|
Render a Python object to text using the repr() function.
:param rawtext: Text being replaced with link node.
:param app: Sphinx application context
:param prefixed_name: The dotted Python name for obj.
:param obj: The Python object to be rendered to text.
:param parent: The parent Python object of obj.
:param module: The name of the module containing obj.
:param options: Options dictionary passed to role func.
|
train
|
https://github.com/sixty-north/added-value/blob/7ae75b56712822b074fc874612d6058bb7d16a1e/source/added_value/repr_role.py#L6-L19
| null |
from docutils import nodes
from added_value.pyobj_role import make_pyobj_role
repr_role = make_pyobj_role(make_repr_node)
|
sixty-north/added-value
|
docs/code/steel.py
|
Table.cells
|
python
|
def cells(self):
return {row_key: {column_key:cell for column_key, cell in zip(self._column_keys, cells)}
for row_key, cells in self._rows_mapping.items()}
|
A dictionary of dictionaries containing all cells.
|
train
|
https://github.com/sixty-north/added-value/blob/7ae75b56712822b074fc874612d6058bb7d16a1e/docs/code/steel.py#L53-L57
| null |
class Table:
def __init__(self, column_keys, rows_mapping):
self._column_keys = list(column_keys)
self._column_indexes = {key:index for index, key in enumerate(self._column_keys)}
num_columns = len(column_keys)
self._row_keys = list(rows_mapping.keys())
if not all(len(row)==num_columns for row in rows_mapping.values()):
raise ValueError("Not all row mapping values have a length of {}".format(num_columns))
self._rows_mapping = rows_mapping
@property
def column_keys(self):
return self._column_keys
@property
def row_keys(self):
return self._row_keys
def has_row_key(self, row_key):
return row_key in self._rows_mapping.keys()
def has_column_key(self, column_key):
return column_key in self._column_keys
def row(self, row_key):
try:
return self._rows_mapping[row_key]
except KeyError:
raise ValueError("No such row key {!r} for {!r}".format(row_key, self))
def column(self, column_key):
try:
column_index = self._column_indexes[column_key]
except KeyError:
raise ValueError("No such column key {!r} for {!r}".format(column_key, self))
return [row[column_index] for row in self._rows_mapping.values()]
def cell(self, row_key, column_key):
try:
column_index = self._column_indexes[column_key]
except KeyError:
raise ValueError("No such column key {!r} for {!r}".format(column_key, self))
try:
return self._rows_mapping[row_key][column_index]
except KeyError:
raise ValueError("No such row key {!r} for {!r}".format(row_key, self))
# TODO: __getitem__ indexes and slices
@property
def transpose(self):
return Table(
self._row_keys,
{column_key: self.column(column_key) for column_key in self.column_keys}
)
|
sixty-north/added-value
|
source/added_value/tabulator.py
|
tabulate_body
|
python
|
def tabulate_body(
obj,
level_keys,
v_level_indexes,
h_level_indexes,
v_level_sort_keys=None,
h_level_sort_keys=None,
):
v_key_sorted = make_sorter(v_level_sort_keys, v_level_indexes)
h_key_sorted = make_sorter(h_level_sort_keys, h_level_indexes)
h_level_keys = [level_keys[level] for level in h_level_indexes]
v_level_keys = [level_keys[level] for level in v_level_indexes]
h_key_tuples = h_key_sorted(product(*h_level_keys))
v_key_tuples = v_key_sorted(product(*v_level_keys))
h_size = len(h_key_tuples)
v_size = len(v_key_tuples)
table = [[MISSING for _ in range(h_size)] for _ in range(v_size)]
for h_index, h_keys in enumerate(h_key_tuples):
for v_index, v_keys in enumerate(v_key_tuples):
key_path = [None] * len(level_keys)
merge_into_by_index(key_path, h_level_indexes, h_keys)
merge_into_by_index(key_path, v_level_indexes, v_keys)
for v_level, v_key in zip(v_level_indexes, v_keys):
key_path[v_level] = v_key
item = obj
for key in key_path:
try:
item = item[key]
except (IndexError, KeyError):
break
else: # no-break
table[v_index][h_index] = item
return table, v_key_tuples, h_key_tuples
|
Args:
v_level_indexes: A sequence of level indexes.
h_level_indexes: A sequence of level indexes.
|
train
|
https://github.com/sixty-north/added-value/blob/7ae75b56712822b074fc874612d6058bb7d16a1e/source/added_value/tabulator.py#L61-L105
|
[
"def make_sorter(level_sort_keys, level_indexes):\n if level_sort_keys is not None:\n if len(level_sort_keys) != len(level_indexes):\n raise ValueError(\n \"level_sort_keys with length {} does not correspond to level_indexes with length {}\".format(\n len(level_sort_keys), len(level_indexes)\n )\n )\n\n def key_sorted(level_keys):\n return tuplesorted(level_keys, *level_sort_keys)\n\n else:\n key_sorted = list\n return key_sorted\n",
"def merge_into_by_index(sequence, indexes, values):\n for index, value in zip(indexes, values):\n sequence[index] = value\n",
"def key_sorted(level_keys):\n return tuplesorted(level_keys, *level_sort_keys)\n"
] |
from collections import Mapping, deque
from itertools import product, chain, repeat
from added_value.items_table_directive import NonStringIterable
from added_value.multisort import tuplesorted
from added_value.sorted_frozen_set import SortedFrozenSet
from added_value.toposet import TopoSet
from added_value.util import unchain, empty_iterable
depth_marker = object()
ROOT = object()
LEAF = object()
_UNSET = object()
def breadth_first(obj, leaves=False):
queue = deque()
queue.append(obj)
queue.append(None)
level_keys = []
current_level_keys = TopoSet()
while len(queue) > 0:
node = queue.popleft()
if node is None:
level_keys.append(current_level_keys)
current_level_keys = TopoSet()
queue.append(None)
if queue[0] is None:
break
else:
continue
if isinstance(node, Mapping):
current_level_keys.update(node.keys())
for value in node.values():
queue.append(value)
elif isinstance(node, NonStringIterable):
current_level_keys.update(range(len(node)))
for value in node:
queue.append(value)
else:
if leaves:
current_level_keys.add(node)
return [
list(s) for s in level_keys[:-1]
] # Why the slice? Remove leaves? Is the last always empty?
class Missing(object):
def __str__(self):
return ""
def __repr__(self):
return self.__class__.__name__
MISSING = Missing()
def make_sorter(level_sort_keys, level_indexes):
if level_sort_keys is not None:
if len(level_sort_keys) != len(level_indexes):
raise ValueError(
"level_sort_keys with length {} does not correspond to level_indexes with length {}".format(
len(level_sort_keys), len(level_indexes)
)
)
def key_sorted(level_keys):
return tuplesorted(level_keys, *level_sort_keys)
else:
key_sorted = list
return key_sorted
def strip_missing_rows(table, row_keys):
stripped_table = []
stripped_v_key_tuples = []
for row, v_key_tuple in zip(table, row_keys):
if any(cell is not MISSING for cell in row):
stripped_table.append(list(row))
stripped_v_key_tuples.append(v_key_tuple)
return stripped_table, stripped_v_key_tuples
def strip_missing_columns(table, h_key_tuples):
transposed_table = transpose(table)
stripped_transposed_table, stripped_h_key_tuples = strip_missing_rows(
transposed_table, h_key_tuples
)
stripped_table = transpose(stripped_transposed_table)
return stripped_table, stripped_h_key_tuples
def merge_into_by_index(sequence, indexes, values):
for index, value in zip(indexes, values):
sequence[index] = value
def is_rectangular(seq_of_seqs):
return len(set(map(len, seq_of_seqs))) <= 1
def size_h(rows_of_columns):
try:
first_row = rows_of_columns[0]
except IndexError:
return 0
else:
return len(first_row)
def size_v(rows_of_columns):
return sum(1 for row in rows_of_columns if len(row) != 0)
def size(rows_of_columns):
return size_v(rows_of_columns), size_h(rows_of_columns)
def transpose(rows_of_columns):
return list(map(list, zip(*rows_of_columns)))
def assemble_table(
table_body, v_key_tuples, h_key_tuples, v_level_titles=None, h_level_titles=None, empty=""
):
if not is_rectangular(table_body):
raise ValueError("table_body {} is not rectangular".format(table_body))
if not is_rectangular(v_key_tuples):
raise ValueError("v_key_tuples {} is not rectangular".format(v_key_tuples))
if not is_rectangular(h_key_tuples):
raise ValueError("h_key_tuples {} is not rectangular".format(h_key_tuples))
if size_v(v_key_tuples) > 0 and (size_v(table_body) != size_v(v_key_tuples)):
raise ValueError("table body and v_key_tuples have incompatible dimensions")
h_key_tuples_transposed = transpose(h_key_tuples)
if size_h(h_key_tuples_transposed) > 0 and (
size_h(table_body) != size_h(h_key_tuples_transposed)
):
raise ValueError("table body and h_key_tuples have incompatible dimensions")
if (v_level_titles is not None) and (len(v_level_titles) != size_h(v_key_tuples)):
raise ValueError("v_level_titles and v_key_tuples have incompatible dimensions")
if (h_level_titles is not None) and (len(h_level_titles) != size_v(h_key_tuples_transposed)):
raise ValueError("h_level_titles and h_key_tuples have incompatible dimensions")
boxed_h_level_titles = (
unchain(h_level_titles)
if (h_level_titles is not None)
else repeat(empty_iterable(), size_v(h_key_tuples_transposed))
)
num_h_level_title_columns = int(bool(h_level_titles))
num_stub_columns = max(size_h(v_key_tuples), num_h_level_title_columns)
table = []
num_empty_columns = num_stub_columns - num_h_level_title_columns
for boxed_h_level_title, h_key_row in zip(boxed_h_level_titles, h_key_tuples_transposed):
row = list(chain(repeat(" ", num_empty_columns), boxed_h_level_title, h_key_row))
table.append(row)
if v_level_titles is not None:
v_level_titles_row = v_level_titles + [empty] * size_h(table_body)
table.append(v_level_titles_row)
for v_key_row, table_row in zip(v_key_tuples, table_body):
row = list(v_key_row)
row.extend(table_row)
table.append(row)
assert is_rectangular(table)
return table
def tabulate(
obj,
v_level_indexes=None,
h_level_indexes=None,
v_level_visibility=None,
h_level_visibility=None,
v_level_sort_keys=None,
h_level_sort_keys=None,
v_level_titles=None,
h_level_titles=None,
empty="",
):
"""Render a nested data structure into a two-dimensional table.
Args:
obj: The indexable data structure to be rendered, which can
either be a non-string sequence or a mapping containing other
sequences and mappings nested to arbitrarily many levels,
with all the leaf items (which are neither sequences nor
mappings, excluding strings).
v_level_indexes: An iterable of the zero-based indexes of
the levels for which the keys/indexes will be displayed
along the vertical axis of the table. Taken together
with the levels in h_levels these must represent the
complete set of levels in the obj data structure. No
level index should appear in both v_level_indexes and
h_level_indexes, but all level indexes must appear in
either v_level_indexes or h_level_indexes. If None,
the levels not used in h_level_indexes will be used.
If both v_level_indexes and h_level_indexes are not
alternate indexes will be used as v_level and h_level
indexes.
h_level_indexes: An iterable of the zero-based indexes of
the levels for which the keys/indexes will be displayed
along the horizontal axis of the table. Taken together
with the levels in v_levels these must represent the
complete set of levels in the obj data structure. No
level index should appear in both h_level_indexes and
v_level_indexes, but all level indexes must appear in
either h_level_indexes or v_level_indexes. If None,
the levels not used in v_level_indexes will be used.
If both v_level_indexes and h_level_indexes are not
alternate indexes will be used as v_level and h_level
indexes.
v_level_visibility: An optional iterable of booleans, where each
item corresponds to a level in v_level_indexes, and
controls whether than level of index is included in
the table stub columns. This iterable must contain
the same number of items as v_level_indexes.
h_level_visibility: An optional iterable of booleans, where each
item corresponds to a level in h_level_indexes, and
controls whether than level of index is included in
the table header rows. This iterable must contain
the same number of items as h_level_indexes.
v_level_sort_keys: An optional iterable of Keys, where each
key corresponds to a level in v_level_indexes, and
controls how that key is sorted. If None, keys are sorted
as-is.
h_level_sort_keys: An optional iterable of Keys, where each
key corresponds to a level in v_level_indexes, and
controls how that key is sorted. If None, keys are sorted
as-is.
v_level_titles: An optional iterable of strings, where each
string is a title which corresponds to a level in v_level_indexes,
and which will be displayed against the row keys for that level.
If None, no titles will be included.
h_level_titles: An optional iterable of strings, where each
string is a title which corresponds to a level in h_level_indexes,
and which will be displayed against the column keys for that level.
If None, no titles will be included.
empty: An optional string value to use for empty cells.
Returns:
A list of lists representing the rows of cells.
Example:
tabulate(dict_of_dicts, [0, 1], [])
"""
level_keys = breadth_first(obj)
v_level_indexes, h_level_indexes = validate_level_indexes(
len(level_keys), v_level_indexes, h_level_indexes
)
if v_level_visibility is None:
v_level_visibility = [True] * len(v_level_indexes)
if h_level_visibility is None:
h_level_visibility = [True] * len(h_level_indexes)
table, v_key_tuples, h_key_tuples = tabulate_body(
obj, level_keys, v_level_indexes, h_level_indexes, v_level_sort_keys, h_level_sort_keys
)
table, v_key_tuples = strip_missing_rows(table, v_key_tuples)
table, h_key_tuples = strip_missing_columns(table, h_key_tuples)
v_key_tuples = strip_hidden(v_key_tuples, v_level_visibility)
h_key_tuples = strip_hidden(h_key_tuples, h_level_visibility)
return assemble_table(
table, v_key_tuples, h_key_tuples, v_level_titles, h_level_titles, empty=empty
)
def validate_level_indexes(num_levels, v_level_indexes, h_level_indexes):
"""Ensure that v_level_indexes and h_level_indexes are consistent.
Args:
num_levels: The number of levels of keys in the data structure being tabulated.
v_level_indexes: A sequence of level indexes between zero and num_levels for
the vertical axis, or None.
h_level_indexes: A sequence of level indexes between zero and num_levels for for
the horizontal axis, or None.
Returns:
A 2-tuple containing v_level_indexes and h_level_indexes sequences.
Raises:
ValueError: If v_level_indexes contains duplicate values.
ValueError: If h_level_indexes contains duplicate values.
ValueError: If v_level_indexes contains out of range values.
ValueError: If h_level_indexes contains out of range values.
ValueError: If taken together v_level_indexes and h_level_indexes
do not include all levels from zero to up to, but not including
num_levels.
ValueError: If v_level_indexes and h_level_indexes have items in
common.
"""
if num_levels < 1:
raise ValueError("num_levels {} is less than one".format(num_levels))
all_levels = SortedFrozenSet(range(num_levels))
if (h_level_indexes is None) and (v_level_indexes is None):
v_level_indexes = range(0, num_levels, 2)
h_level_indexes = range(1, num_levels, 2)
h_level_set = SortedFrozenSet(h_level_indexes)
v_level_set = SortedFrozenSet(v_level_indexes)
if h_level_indexes is None:
h_level_indexes = all_levels - v_level_set
if v_level_indexes is None:
v_level_indexes = all_levels - h_level_set
if len(h_level_indexes) != len(h_level_set):
raise ValueError("h_level_indexes contains duplicate values")
if h_level_set and ((h_level_set[0] < 0) or (h_level_set[-1] >= num_levels)):
raise ValueError("h_level_indexes contains out of range values")
if len(v_level_indexes) != len(v_level_set):
raise ValueError("v_level_indexes contains duplicate values")
if v_level_set and ((v_level_set[0] < 0) or (v_level_set[-1] >= num_levels)):
raise ValueError("v_level_indexes contains out of range values")
unmentioned_levels = all_levels - v_level_set - h_level_set
if len(unmentioned_levels) > 0:
raise ValueError(
"v_level_indexes and h_level_indexes do not together include levels {}".format(
", ".join(map(str, unmentioned_levels))
)
)
if not h_level_set.isdisjoint(v_level_set):
raise ValueError("h_level_indexes and v_level_indexes are not disjoint")
v_level_indexes = list(v_level_indexes)
h_level_indexes = list(h_level_indexes)
return v_level_indexes, h_level_indexes
def strip_hidden(key_tuples, visibilities):
"""Filter each tuple according to visibility.
Args:
key_tuples: A sequence of tuples of equal length (i.e. rectangular)
visibilities: A sequence of booleans equal in length to the tuples contained in key_tuples.
Returns:
A sequence equal in length to key_tuples where the items are tuples with a length corresponding
to the number of items in visibility which are True.
"""
result = []
for key_tuple in key_tuples:
if len(key_tuple) != len(visibilities):
raise ValueError(
"length of key tuple {} is not equal to length of visibilities {}".format(
key_tuple, visibilities
)
)
filtered_tuple = tuple(item for item, visible in zip(key_tuple, visibilities) if visible)
result.append(filtered_tuple)
return result
# TODO: Multidimensional arrays. e.g. ndarray
|
sixty-north/added-value
|
source/added_value/tabulator.py
|
tabulate
|
python
|
def tabulate(
obj,
v_level_indexes=None,
h_level_indexes=None,
v_level_visibility=None,
h_level_visibility=None,
v_level_sort_keys=None,
h_level_sort_keys=None,
v_level_titles=None,
h_level_titles=None,
empty="",
):
level_keys = breadth_first(obj)
v_level_indexes, h_level_indexes = validate_level_indexes(
len(level_keys), v_level_indexes, h_level_indexes
)
if v_level_visibility is None:
v_level_visibility = [True] * len(v_level_indexes)
if h_level_visibility is None:
h_level_visibility = [True] * len(h_level_indexes)
table, v_key_tuples, h_key_tuples = tabulate_body(
obj, level_keys, v_level_indexes, h_level_indexes, v_level_sort_keys, h_level_sort_keys
)
table, v_key_tuples = strip_missing_rows(table, v_key_tuples)
table, h_key_tuples = strip_missing_columns(table, h_key_tuples)
v_key_tuples = strip_hidden(v_key_tuples, v_level_visibility)
h_key_tuples = strip_hidden(h_key_tuples, h_level_visibility)
return assemble_table(
table, v_key_tuples, h_key_tuples, v_level_titles, h_level_titles, empty=empty
)
|
Render a nested data structure into a two-dimensional table.
Args:
obj: The indexable data structure to be rendered, which can
either be a non-string sequence or a mapping containing other
sequences and mappings nested to arbitrarily many levels,
with all the leaf items (which are neither sequences nor
mappings, excluding strings).
v_level_indexes: An iterable of the zero-based indexes of
the levels for which the keys/indexes will be displayed
along the vertical axis of the table. Taken together
with the levels in h_levels these must represent the
complete set of levels in the obj data structure. No
level index should appear in both v_level_indexes and
h_level_indexes, but all level indexes must appear in
either v_level_indexes or h_level_indexes. If None,
the levels not used in h_level_indexes will be used.
If both v_level_indexes and h_level_indexes are not
alternate indexes will be used as v_level and h_level
indexes.
h_level_indexes: An iterable of the zero-based indexes of
the levels for which the keys/indexes will be displayed
along the horizontal axis of the table. Taken together
with the levels in v_levels these must represent the
complete set of levels in the obj data structure. No
level index should appear in both h_level_indexes and
v_level_indexes, but all level indexes must appear in
either h_level_indexes or v_level_indexes. If None,
the levels not used in v_level_indexes will be used.
If both v_level_indexes and h_level_indexes are not
alternate indexes will be used as v_level and h_level
indexes.
v_level_visibility: An optional iterable of booleans, where each
item corresponds to a level in v_level_indexes, and
controls whether than level of index is included in
the table stub columns. This iterable must contain
the same number of items as v_level_indexes.
h_level_visibility: An optional iterable of booleans, where each
item corresponds to a level in h_level_indexes, and
controls whether than level of index is included in
the table header rows. This iterable must contain
the same number of items as h_level_indexes.
v_level_sort_keys: An optional iterable of Keys, where each
key corresponds to a level in v_level_indexes, and
controls how that key is sorted. If None, keys are sorted
as-is.
h_level_sort_keys: An optional iterable of Keys, where each
key corresponds to a level in v_level_indexes, and
controls how that key is sorted. If None, keys are sorted
as-is.
v_level_titles: An optional iterable of strings, where each
string is a title which corresponds to a level in v_level_indexes,
and which will be displayed against the row keys for that level.
If None, no titles will be included.
h_level_titles: An optional iterable of strings, where each
string is a title which corresponds to a level in h_level_indexes,
and which will be displayed against the column keys for that level.
If None, no titles will be included.
empty: An optional string value to use for empty cells.
Returns:
A list of lists representing the rows of cells.
Example:
tabulate(dict_of_dicts, [0, 1], [])
|
train
|
https://github.com/sixty-north/added-value/blob/7ae75b56712822b074fc874612d6058bb7d16a1e/source/added_value/tabulator.py#L225-L335
|
[
"def breadth_first(obj, leaves=False):\n queue = deque()\n queue.append(obj)\n queue.append(None)\n level_keys = []\n current_level_keys = TopoSet()\n while len(queue) > 0:\n node = queue.popleft()\n if node is None:\n level_keys.append(current_level_keys)\n current_level_keys = TopoSet()\n queue.append(None)\n if queue[0] is None:\n break\n else:\n continue\n if isinstance(node, Mapping):\n current_level_keys.update(node.keys())\n for value in node.values():\n queue.append(value)\n elif isinstance(node, NonStringIterable):\n current_level_keys.update(range(len(node)))\n for value in node:\n queue.append(value)\n else:\n if leaves:\n current_level_keys.add(node)\n\n return [\n list(s) for s in level_keys[:-1]\n ] # Why the slice? Remove leaves? Is the last always empty?\n",
"def tabulate_body(\n obj,\n level_keys,\n v_level_indexes,\n h_level_indexes,\n v_level_sort_keys=None,\n h_level_sort_keys=None,\n):\n \"\"\"\n Args:\n v_level_indexes: A sequence of level indexes.\n h_level_indexes: A sequence of level indexes.\n \"\"\"\n v_key_sorted = make_sorter(v_level_sort_keys, v_level_indexes)\n h_key_sorted = make_sorter(h_level_sort_keys, h_level_indexes)\n\n h_level_keys = [level_keys[level] for level in h_level_indexes]\n v_level_keys = [level_keys[level] for level in v_level_indexes]\n\n h_key_tuples = h_key_sorted(product(*h_level_keys))\n v_key_tuples = v_key_sorted(product(*v_level_keys))\n\n h_size = len(h_key_tuples)\n v_size = len(v_key_tuples)\n\n table = [[MISSING for _ in range(h_size)] for _ in range(v_size)]\n\n for h_index, h_keys in enumerate(h_key_tuples):\n for v_index, v_keys in enumerate(v_key_tuples):\n key_path = [None] * len(level_keys)\n merge_into_by_index(key_path, h_level_indexes, h_keys)\n merge_into_by_index(key_path, v_level_indexes, v_keys)\n for v_level, v_key in zip(v_level_indexes, v_keys):\n key_path[v_level] = v_key\n\n item = obj\n for key in key_path:\n try:\n item = item[key]\n except (IndexError, KeyError):\n break\n else: # no-break\n table[v_index][h_index] = item\n\n return table, v_key_tuples, h_key_tuples\n",
"def strip_missing_rows(table, row_keys):\n stripped_table = []\n stripped_v_key_tuples = []\n for row, v_key_tuple in zip(table, row_keys):\n if any(cell is not MISSING for cell in row):\n stripped_table.append(list(row))\n stripped_v_key_tuples.append(v_key_tuple)\n return stripped_table, stripped_v_key_tuples\n",
"def strip_missing_columns(table, h_key_tuples):\n transposed_table = transpose(table)\n stripped_transposed_table, stripped_h_key_tuples = strip_missing_rows(\n transposed_table, h_key_tuples\n )\n stripped_table = transpose(stripped_transposed_table)\n return stripped_table, stripped_h_key_tuples\n",
"def assemble_table(\n table_body, v_key_tuples, h_key_tuples, v_level_titles=None, h_level_titles=None, empty=\"\"\n):\n if not is_rectangular(table_body):\n raise ValueError(\"table_body {} is not rectangular\".format(table_body))\n if not is_rectangular(v_key_tuples):\n raise ValueError(\"v_key_tuples {} is not rectangular\".format(v_key_tuples))\n if not is_rectangular(h_key_tuples):\n raise ValueError(\"h_key_tuples {} is not rectangular\".format(h_key_tuples))\n if size_v(v_key_tuples) > 0 and (size_v(table_body) != size_v(v_key_tuples)):\n raise ValueError(\"table body and v_key_tuples have incompatible dimensions\")\n h_key_tuples_transposed = transpose(h_key_tuples)\n if size_h(h_key_tuples_transposed) > 0 and (\n size_h(table_body) != size_h(h_key_tuples_transposed)\n ):\n raise ValueError(\"table body and h_key_tuples have incompatible dimensions\")\n\n if (v_level_titles is not None) and (len(v_level_titles) != size_h(v_key_tuples)):\n raise ValueError(\"v_level_titles and v_key_tuples have incompatible dimensions\")\n\n if (h_level_titles is not None) and (len(h_level_titles) != size_v(h_key_tuples_transposed)):\n raise ValueError(\"h_level_titles and h_key_tuples have incompatible dimensions\")\n\n boxed_h_level_titles = (\n unchain(h_level_titles)\n if (h_level_titles is not None)\n else repeat(empty_iterable(), size_v(h_key_tuples_transposed))\n )\n\n num_h_level_title_columns = int(bool(h_level_titles))\n num_stub_columns = max(size_h(v_key_tuples), num_h_level_title_columns)\n table = []\n\n num_empty_columns = num_stub_columns - num_h_level_title_columns\n for boxed_h_level_title, h_key_row in zip(boxed_h_level_titles, h_key_tuples_transposed):\n row = list(chain(repeat(\" \", num_empty_columns), boxed_h_level_title, h_key_row))\n table.append(row)\n\n if v_level_titles is not None:\n v_level_titles_row = v_level_titles + [empty] * size_h(table_body)\n table.append(v_level_titles_row)\n\n for v_key_row, table_row in zip(v_key_tuples, table_body):\n row = list(v_key_row)\n row.extend(table_row)\n table.append(row)\n\n assert is_rectangular(table)\n return table\n",
"def validate_level_indexes(num_levels, v_level_indexes, h_level_indexes):\n \"\"\"Ensure that v_level_indexes and h_level_indexes are consistent.\n\n Args:\n num_levels: The number of levels of keys in the data structure being tabulated.\n v_level_indexes: A sequence of level indexes between zero and num_levels for\n the vertical axis, or None.\n h_level_indexes: A sequence of level indexes between zero and num_levels for for\n the horizontal axis, or None.\n\n Returns:\n A 2-tuple containing v_level_indexes and h_level_indexes sequences.\n\n Raises:\n ValueError: If v_level_indexes contains duplicate values.\n ValueError: If h_level_indexes contains duplicate values.\n ValueError: If v_level_indexes contains out of range values.\n ValueError: If h_level_indexes contains out of range values.\n ValueError: If taken together v_level_indexes and h_level_indexes\n do not include all levels from zero to up to, but not including\n num_levels.\n ValueError: If v_level_indexes and h_level_indexes have items in\n common.\n \"\"\"\n if num_levels < 1:\n raise ValueError(\"num_levels {} is less than one\".format(num_levels))\n\n all_levels = SortedFrozenSet(range(num_levels))\n\n if (h_level_indexes is None) and (v_level_indexes is None):\n v_level_indexes = range(0, num_levels, 2)\n h_level_indexes = range(1, num_levels, 2)\n\n h_level_set = SortedFrozenSet(h_level_indexes)\n v_level_set = SortedFrozenSet(v_level_indexes)\n\n if h_level_indexes is None:\n h_level_indexes = all_levels - v_level_set\n if v_level_indexes is None:\n v_level_indexes = all_levels - h_level_set\n\n if len(h_level_indexes) != len(h_level_set):\n raise ValueError(\"h_level_indexes contains duplicate values\")\n if h_level_set and ((h_level_set[0] < 0) or (h_level_set[-1] >= num_levels)):\n raise ValueError(\"h_level_indexes contains out of range values\")\n if len(v_level_indexes) != len(v_level_set):\n raise ValueError(\"v_level_indexes contains duplicate values\")\n if v_level_set and ((v_level_set[0] < 0) or (v_level_set[-1] >= num_levels)):\n raise ValueError(\"v_level_indexes contains out of range values\")\n\n unmentioned_levels = all_levels - v_level_set - h_level_set\n if len(unmentioned_levels) > 0:\n raise ValueError(\n \"v_level_indexes and h_level_indexes do not together include levels {}\".format(\n \", \".join(map(str, unmentioned_levels))\n )\n )\n if not h_level_set.isdisjoint(v_level_set):\n raise ValueError(\"h_level_indexes and v_level_indexes are not disjoint\")\n v_level_indexes = list(v_level_indexes)\n h_level_indexes = list(h_level_indexes)\n return v_level_indexes, h_level_indexes\n",
"def strip_hidden(key_tuples, visibilities):\n \"\"\"Filter each tuple according to visibility.\n\n Args:\n key_tuples: A sequence of tuples of equal length (i.e. rectangular)\n visibilities: A sequence of booleans equal in length to the tuples contained in key_tuples.\n\n Returns:\n A sequence equal in length to key_tuples where the items are tuples with a length corresponding\n to the number of items in visibility which are True.\n \"\"\"\n result = []\n for key_tuple in key_tuples:\n if len(key_tuple) != len(visibilities):\n raise ValueError(\n \"length of key tuple {} is not equal to length of visibilities {}\".format(\n key_tuple, visibilities\n )\n )\n filtered_tuple = tuple(item for item, visible in zip(key_tuple, visibilities) if visible)\n result.append(filtered_tuple)\n return result\n"
] |
from collections import Mapping, deque
from itertools import product, chain, repeat
from added_value.items_table_directive import NonStringIterable
from added_value.multisort import tuplesorted
from added_value.sorted_frozen_set import SortedFrozenSet
from added_value.toposet import TopoSet
from added_value.util import unchain, empty_iterable
depth_marker = object()
ROOT = object()
LEAF = object()
_UNSET = object()
def breadth_first(obj, leaves=False):
queue = deque()
queue.append(obj)
queue.append(None)
level_keys = []
current_level_keys = TopoSet()
while len(queue) > 0:
node = queue.popleft()
if node is None:
level_keys.append(current_level_keys)
current_level_keys = TopoSet()
queue.append(None)
if queue[0] is None:
break
else:
continue
if isinstance(node, Mapping):
current_level_keys.update(node.keys())
for value in node.values():
queue.append(value)
elif isinstance(node, NonStringIterable):
current_level_keys.update(range(len(node)))
for value in node:
queue.append(value)
else:
if leaves:
current_level_keys.add(node)
return [
list(s) for s in level_keys[:-1]
] # Why the slice? Remove leaves? Is the last always empty?
class Missing(object):
def __str__(self):
return ""
def __repr__(self):
return self.__class__.__name__
MISSING = Missing()
def tabulate_body(
obj,
level_keys,
v_level_indexes,
h_level_indexes,
v_level_sort_keys=None,
h_level_sort_keys=None,
):
"""
Args:
v_level_indexes: A sequence of level indexes.
h_level_indexes: A sequence of level indexes.
"""
v_key_sorted = make_sorter(v_level_sort_keys, v_level_indexes)
h_key_sorted = make_sorter(h_level_sort_keys, h_level_indexes)
h_level_keys = [level_keys[level] for level in h_level_indexes]
v_level_keys = [level_keys[level] for level in v_level_indexes]
h_key_tuples = h_key_sorted(product(*h_level_keys))
v_key_tuples = v_key_sorted(product(*v_level_keys))
h_size = len(h_key_tuples)
v_size = len(v_key_tuples)
table = [[MISSING for _ in range(h_size)] for _ in range(v_size)]
for h_index, h_keys in enumerate(h_key_tuples):
for v_index, v_keys in enumerate(v_key_tuples):
key_path = [None] * len(level_keys)
merge_into_by_index(key_path, h_level_indexes, h_keys)
merge_into_by_index(key_path, v_level_indexes, v_keys)
for v_level, v_key in zip(v_level_indexes, v_keys):
key_path[v_level] = v_key
item = obj
for key in key_path:
try:
item = item[key]
except (IndexError, KeyError):
break
else: # no-break
table[v_index][h_index] = item
return table, v_key_tuples, h_key_tuples
def make_sorter(level_sort_keys, level_indexes):
if level_sort_keys is not None:
if len(level_sort_keys) != len(level_indexes):
raise ValueError(
"level_sort_keys with length {} does not correspond to level_indexes with length {}".format(
len(level_sort_keys), len(level_indexes)
)
)
def key_sorted(level_keys):
return tuplesorted(level_keys, *level_sort_keys)
else:
key_sorted = list
return key_sorted
def strip_missing_rows(table, row_keys):
stripped_table = []
stripped_v_key_tuples = []
for row, v_key_tuple in zip(table, row_keys):
if any(cell is not MISSING for cell in row):
stripped_table.append(list(row))
stripped_v_key_tuples.append(v_key_tuple)
return stripped_table, stripped_v_key_tuples
def strip_missing_columns(table, h_key_tuples):
transposed_table = transpose(table)
stripped_transposed_table, stripped_h_key_tuples = strip_missing_rows(
transposed_table, h_key_tuples
)
stripped_table = transpose(stripped_transposed_table)
return stripped_table, stripped_h_key_tuples
def merge_into_by_index(sequence, indexes, values):
for index, value in zip(indexes, values):
sequence[index] = value
def is_rectangular(seq_of_seqs):
return len(set(map(len, seq_of_seqs))) <= 1
def size_h(rows_of_columns):
try:
first_row = rows_of_columns[0]
except IndexError:
return 0
else:
return len(first_row)
def size_v(rows_of_columns):
return sum(1 for row in rows_of_columns if len(row) != 0)
def size(rows_of_columns):
return size_v(rows_of_columns), size_h(rows_of_columns)
def transpose(rows_of_columns):
return list(map(list, zip(*rows_of_columns)))
def assemble_table(
table_body, v_key_tuples, h_key_tuples, v_level_titles=None, h_level_titles=None, empty=""
):
if not is_rectangular(table_body):
raise ValueError("table_body {} is not rectangular".format(table_body))
if not is_rectangular(v_key_tuples):
raise ValueError("v_key_tuples {} is not rectangular".format(v_key_tuples))
if not is_rectangular(h_key_tuples):
raise ValueError("h_key_tuples {} is not rectangular".format(h_key_tuples))
if size_v(v_key_tuples) > 0 and (size_v(table_body) != size_v(v_key_tuples)):
raise ValueError("table body and v_key_tuples have incompatible dimensions")
h_key_tuples_transposed = transpose(h_key_tuples)
if size_h(h_key_tuples_transposed) > 0 and (
size_h(table_body) != size_h(h_key_tuples_transposed)
):
raise ValueError("table body and h_key_tuples have incompatible dimensions")
if (v_level_titles is not None) and (len(v_level_titles) != size_h(v_key_tuples)):
raise ValueError("v_level_titles and v_key_tuples have incompatible dimensions")
if (h_level_titles is not None) and (len(h_level_titles) != size_v(h_key_tuples_transposed)):
raise ValueError("h_level_titles and h_key_tuples have incompatible dimensions")
boxed_h_level_titles = (
unchain(h_level_titles)
if (h_level_titles is not None)
else repeat(empty_iterable(), size_v(h_key_tuples_transposed))
)
num_h_level_title_columns = int(bool(h_level_titles))
num_stub_columns = max(size_h(v_key_tuples), num_h_level_title_columns)
table = []
num_empty_columns = num_stub_columns - num_h_level_title_columns
for boxed_h_level_title, h_key_row in zip(boxed_h_level_titles, h_key_tuples_transposed):
row = list(chain(repeat(" ", num_empty_columns), boxed_h_level_title, h_key_row))
table.append(row)
if v_level_titles is not None:
v_level_titles_row = v_level_titles + [empty] * size_h(table_body)
table.append(v_level_titles_row)
for v_key_row, table_row in zip(v_key_tuples, table_body):
row = list(v_key_row)
row.extend(table_row)
table.append(row)
assert is_rectangular(table)
return table
def validate_level_indexes(num_levels, v_level_indexes, h_level_indexes):
"""Ensure that v_level_indexes and h_level_indexes are consistent.
Args:
num_levels: The number of levels of keys in the data structure being tabulated.
v_level_indexes: A sequence of level indexes between zero and num_levels for
the vertical axis, or None.
h_level_indexes: A sequence of level indexes between zero and num_levels for for
the horizontal axis, or None.
Returns:
A 2-tuple containing v_level_indexes and h_level_indexes sequences.
Raises:
ValueError: If v_level_indexes contains duplicate values.
ValueError: If h_level_indexes contains duplicate values.
ValueError: If v_level_indexes contains out of range values.
ValueError: If h_level_indexes contains out of range values.
ValueError: If taken together v_level_indexes and h_level_indexes
do not include all levels from zero to up to, but not including
num_levels.
ValueError: If v_level_indexes and h_level_indexes have items in
common.
"""
if num_levels < 1:
raise ValueError("num_levels {} is less than one".format(num_levels))
all_levels = SortedFrozenSet(range(num_levels))
if (h_level_indexes is None) and (v_level_indexes is None):
v_level_indexes = range(0, num_levels, 2)
h_level_indexes = range(1, num_levels, 2)
h_level_set = SortedFrozenSet(h_level_indexes)
v_level_set = SortedFrozenSet(v_level_indexes)
if h_level_indexes is None:
h_level_indexes = all_levels - v_level_set
if v_level_indexes is None:
v_level_indexes = all_levels - h_level_set
if len(h_level_indexes) != len(h_level_set):
raise ValueError("h_level_indexes contains duplicate values")
if h_level_set and ((h_level_set[0] < 0) or (h_level_set[-1] >= num_levels)):
raise ValueError("h_level_indexes contains out of range values")
if len(v_level_indexes) != len(v_level_set):
raise ValueError("v_level_indexes contains duplicate values")
if v_level_set and ((v_level_set[0] < 0) or (v_level_set[-1] >= num_levels)):
raise ValueError("v_level_indexes contains out of range values")
unmentioned_levels = all_levels - v_level_set - h_level_set
if len(unmentioned_levels) > 0:
raise ValueError(
"v_level_indexes and h_level_indexes do not together include levels {}".format(
", ".join(map(str, unmentioned_levels))
)
)
if not h_level_set.isdisjoint(v_level_set):
raise ValueError("h_level_indexes and v_level_indexes are not disjoint")
v_level_indexes = list(v_level_indexes)
h_level_indexes = list(h_level_indexes)
return v_level_indexes, h_level_indexes
def strip_hidden(key_tuples, visibilities):
"""Filter each tuple according to visibility.
Args:
key_tuples: A sequence of tuples of equal length (i.e. rectangular)
visibilities: A sequence of booleans equal in length to the tuples contained in key_tuples.
Returns:
A sequence equal in length to key_tuples where the items are tuples with a length corresponding
to the number of items in visibility which are True.
"""
result = []
for key_tuple in key_tuples:
if len(key_tuple) != len(visibilities):
raise ValueError(
"length of key tuple {} is not equal to length of visibilities {}".format(
key_tuple, visibilities
)
)
filtered_tuple = tuple(item for item, visible in zip(key_tuple, visibilities) if visible)
result.append(filtered_tuple)
return result
# TODO: Multidimensional arrays. e.g. ndarray
|
sixty-north/added-value
|
source/added_value/tabulator.py
|
validate_level_indexes
|
python
|
def validate_level_indexes(num_levels, v_level_indexes, h_level_indexes):
if num_levels < 1:
raise ValueError("num_levels {} is less than one".format(num_levels))
all_levels = SortedFrozenSet(range(num_levels))
if (h_level_indexes is None) and (v_level_indexes is None):
v_level_indexes = range(0, num_levels, 2)
h_level_indexes = range(1, num_levels, 2)
h_level_set = SortedFrozenSet(h_level_indexes)
v_level_set = SortedFrozenSet(v_level_indexes)
if h_level_indexes is None:
h_level_indexes = all_levels - v_level_set
if v_level_indexes is None:
v_level_indexes = all_levels - h_level_set
if len(h_level_indexes) != len(h_level_set):
raise ValueError("h_level_indexes contains duplicate values")
if h_level_set and ((h_level_set[0] < 0) or (h_level_set[-1] >= num_levels)):
raise ValueError("h_level_indexes contains out of range values")
if len(v_level_indexes) != len(v_level_set):
raise ValueError("v_level_indexes contains duplicate values")
if v_level_set and ((v_level_set[0] < 0) or (v_level_set[-1] >= num_levels)):
raise ValueError("v_level_indexes contains out of range values")
unmentioned_levels = all_levels - v_level_set - h_level_set
if len(unmentioned_levels) > 0:
raise ValueError(
"v_level_indexes and h_level_indexes do not together include levels {}".format(
", ".join(map(str, unmentioned_levels))
)
)
if not h_level_set.isdisjoint(v_level_set):
raise ValueError("h_level_indexes and v_level_indexes are not disjoint")
v_level_indexes = list(v_level_indexes)
h_level_indexes = list(h_level_indexes)
return v_level_indexes, h_level_indexes
|
Ensure that v_level_indexes and h_level_indexes are consistent.
Args:
num_levels: The number of levels of keys in the data structure being tabulated.
v_level_indexes: A sequence of level indexes between zero and num_levels for
the vertical axis, or None.
h_level_indexes: A sequence of level indexes between zero and num_levels for for
the horizontal axis, or None.
Returns:
A 2-tuple containing v_level_indexes and h_level_indexes sequences.
Raises:
ValueError: If v_level_indexes contains duplicate values.
ValueError: If h_level_indexes contains duplicate values.
ValueError: If v_level_indexes contains out of range values.
ValueError: If h_level_indexes contains out of range values.
ValueError: If taken together v_level_indexes and h_level_indexes
do not include all levels from zero to up to, but not including
num_levels.
ValueError: If v_level_indexes and h_level_indexes have items in
common.
|
train
|
https://github.com/sixty-north/added-value/blob/7ae75b56712822b074fc874612d6058bb7d16a1e/source/added_value/tabulator.py#L338-L399
| null |
from collections import Mapping, deque
from itertools import product, chain, repeat
from added_value.items_table_directive import NonStringIterable
from added_value.multisort import tuplesorted
from added_value.sorted_frozen_set import SortedFrozenSet
from added_value.toposet import TopoSet
from added_value.util import unchain, empty_iterable
depth_marker = object()
ROOT = object()
LEAF = object()
_UNSET = object()
def breadth_first(obj, leaves=False):
queue = deque()
queue.append(obj)
queue.append(None)
level_keys = []
current_level_keys = TopoSet()
while len(queue) > 0:
node = queue.popleft()
if node is None:
level_keys.append(current_level_keys)
current_level_keys = TopoSet()
queue.append(None)
if queue[0] is None:
break
else:
continue
if isinstance(node, Mapping):
current_level_keys.update(node.keys())
for value in node.values():
queue.append(value)
elif isinstance(node, NonStringIterable):
current_level_keys.update(range(len(node)))
for value in node:
queue.append(value)
else:
if leaves:
current_level_keys.add(node)
return [
list(s) for s in level_keys[:-1]
] # Why the slice? Remove leaves? Is the last always empty?
class Missing(object):
def __str__(self):
return ""
def __repr__(self):
return self.__class__.__name__
MISSING = Missing()
def tabulate_body(
obj,
level_keys,
v_level_indexes,
h_level_indexes,
v_level_sort_keys=None,
h_level_sort_keys=None,
):
"""
Args:
v_level_indexes: A sequence of level indexes.
h_level_indexes: A sequence of level indexes.
"""
v_key_sorted = make_sorter(v_level_sort_keys, v_level_indexes)
h_key_sorted = make_sorter(h_level_sort_keys, h_level_indexes)
h_level_keys = [level_keys[level] for level in h_level_indexes]
v_level_keys = [level_keys[level] for level in v_level_indexes]
h_key_tuples = h_key_sorted(product(*h_level_keys))
v_key_tuples = v_key_sorted(product(*v_level_keys))
h_size = len(h_key_tuples)
v_size = len(v_key_tuples)
table = [[MISSING for _ in range(h_size)] for _ in range(v_size)]
for h_index, h_keys in enumerate(h_key_tuples):
for v_index, v_keys in enumerate(v_key_tuples):
key_path = [None] * len(level_keys)
merge_into_by_index(key_path, h_level_indexes, h_keys)
merge_into_by_index(key_path, v_level_indexes, v_keys)
for v_level, v_key in zip(v_level_indexes, v_keys):
key_path[v_level] = v_key
item = obj
for key in key_path:
try:
item = item[key]
except (IndexError, KeyError):
break
else: # no-break
table[v_index][h_index] = item
return table, v_key_tuples, h_key_tuples
def make_sorter(level_sort_keys, level_indexes):
if level_sort_keys is not None:
if len(level_sort_keys) != len(level_indexes):
raise ValueError(
"level_sort_keys with length {} does not correspond to level_indexes with length {}".format(
len(level_sort_keys), len(level_indexes)
)
)
def key_sorted(level_keys):
return tuplesorted(level_keys, *level_sort_keys)
else:
key_sorted = list
return key_sorted
def strip_missing_rows(table, row_keys):
stripped_table = []
stripped_v_key_tuples = []
for row, v_key_tuple in zip(table, row_keys):
if any(cell is not MISSING for cell in row):
stripped_table.append(list(row))
stripped_v_key_tuples.append(v_key_tuple)
return stripped_table, stripped_v_key_tuples
def strip_missing_columns(table, h_key_tuples):
transposed_table = transpose(table)
stripped_transposed_table, stripped_h_key_tuples = strip_missing_rows(
transposed_table, h_key_tuples
)
stripped_table = transpose(stripped_transposed_table)
return stripped_table, stripped_h_key_tuples
def merge_into_by_index(sequence, indexes, values):
for index, value in zip(indexes, values):
sequence[index] = value
def is_rectangular(seq_of_seqs):
return len(set(map(len, seq_of_seqs))) <= 1
def size_h(rows_of_columns):
try:
first_row = rows_of_columns[0]
except IndexError:
return 0
else:
return len(first_row)
def size_v(rows_of_columns):
return sum(1 for row in rows_of_columns if len(row) != 0)
def size(rows_of_columns):
return size_v(rows_of_columns), size_h(rows_of_columns)
def transpose(rows_of_columns):
return list(map(list, zip(*rows_of_columns)))
def assemble_table(
table_body, v_key_tuples, h_key_tuples, v_level_titles=None, h_level_titles=None, empty=""
):
if not is_rectangular(table_body):
raise ValueError("table_body {} is not rectangular".format(table_body))
if not is_rectangular(v_key_tuples):
raise ValueError("v_key_tuples {} is not rectangular".format(v_key_tuples))
if not is_rectangular(h_key_tuples):
raise ValueError("h_key_tuples {} is not rectangular".format(h_key_tuples))
if size_v(v_key_tuples) > 0 and (size_v(table_body) != size_v(v_key_tuples)):
raise ValueError("table body and v_key_tuples have incompatible dimensions")
h_key_tuples_transposed = transpose(h_key_tuples)
if size_h(h_key_tuples_transposed) > 0 and (
size_h(table_body) != size_h(h_key_tuples_transposed)
):
raise ValueError("table body and h_key_tuples have incompatible dimensions")
if (v_level_titles is not None) and (len(v_level_titles) != size_h(v_key_tuples)):
raise ValueError("v_level_titles and v_key_tuples have incompatible dimensions")
if (h_level_titles is not None) and (len(h_level_titles) != size_v(h_key_tuples_transposed)):
raise ValueError("h_level_titles and h_key_tuples have incompatible dimensions")
boxed_h_level_titles = (
unchain(h_level_titles)
if (h_level_titles is not None)
else repeat(empty_iterable(), size_v(h_key_tuples_transposed))
)
num_h_level_title_columns = int(bool(h_level_titles))
num_stub_columns = max(size_h(v_key_tuples), num_h_level_title_columns)
table = []
num_empty_columns = num_stub_columns - num_h_level_title_columns
for boxed_h_level_title, h_key_row in zip(boxed_h_level_titles, h_key_tuples_transposed):
row = list(chain(repeat(" ", num_empty_columns), boxed_h_level_title, h_key_row))
table.append(row)
if v_level_titles is not None:
v_level_titles_row = v_level_titles + [empty] * size_h(table_body)
table.append(v_level_titles_row)
for v_key_row, table_row in zip(v_key_tuples, table_body):
row = list(v_key_row)
row.extend(table_row)
table.append(row)
assert is_rectangular(table)
return table
def tabulate(
obj,
v_level_indexes=None,
h_level_indexes=None,
v_level_visibility=None,
h_level_visibility=None,
v_level_sort_keys=None,
h_level_sort_keys=None,
v_level_titles=None,
h_level_titles=None,
empty="",
):
"""Render a nested data structure into a two-dimensional table.
Args:
obj: The indexable data structure to be rendered, which can
either be a non-string sequence or a mapping containing other
sequences and mappings nested to arbitrarily many levels,
with all the leaf items (which are neither sequences nor
mappings, excluding strings).
v_level_indexes: An iterable of the zero-based indexes of
the levels for which the keys/indexes will be displayed
along the vertical axis of the table. Taken together
with the levels in h_levels these must represent the
complete set of levels in the obj data structure. No
level index should appear in both v_level_indexes and
h_level_indexes, but all level indexes must appear in
either v_level_indexes or h_level_indexes. If None,
the levels not used in h_level_indexes will be used.
If both v_level_indexes and h_level_indexes are not
alternate indexes will be used as v_level and h_level
indexes.
h_level_indexes: An iterable of the zero-based indexes of
the levels for which the keys/indexes will be displayed
along the horizontal axis of the table. Taken together
with the levels in v_levels these must represent the
complete set of levels in the obj data structure. No
level index should appear in both h_level_indexes and
v_level_indexes, but all level indexes must appear in
either h_level_indexes or v_level_indexes. If None,
the levels not used in v_level_indexes will be used.
If both v_level_indexes and h_level_indexes are not
alternate indexes will be used as v_level and h_level
indexes.
v_level_visibility: An optional iterable of booleans, where each
item corresponds to a level in v_level_indexes, and
controls whether than level of index is included in
the table stub columns. This iterable must contain
the same number of items as v_level_indexes.
h_level_visibility: An optional iterable of booleans, where each
item corresponds to a level in h_level_indexes, and
controls whether than level of index is included in
the table header rows. This iterable must contain
the same number of items as h_level_indexes.
v_level_sort_keys: An optional iterable of Keys, where each
key corresponds to a level in v_level_indexes, and
controls how that key is sorted. If None, keys are sorted
as-is.
h_level_sort_keys: An optional iterable of Keys, where each
key corresponds to a level in v_level_indexes, and
controls how that key is sorted. If None, keys are sorted
as-is.
v_level_titles: An optional iterable of strings, where each
string is a title which corresponds to a level in v_level_indexes,
and which will be displayed against the row keys for that level.
If None, no titles will be included.
h_level_titles: An optional iterable of strings, where each
string is a title which corresponds to a level in h_level_indexes,
and which will be displayed against the column keys for that level.
If None, no titles will be included.
empty: An optional string value to use for empty cells.
Returns:
A list of lists representing the rows of cells.
Example:
tabulate(dict_of_dicts, [0, 1], [])
"""
level_keys = breadth_first(obj)
v_level_indexes, h_level_indexes = validate_level_indexes(
len(level_keys), v_level_indexes, h_level_indexes
)
if v_level_visibility is None:
v_level_visibility = [True] * len(v_level_indexes)
if h_level_visibility is None:
h_level_visibility = [True] * len(h_level_indexes)
table, v_key_tuples, h_key_tuples = tabulate_body(
obj, level_keys, v_level_indexes, h_level_indexes, v_level_sort_keys, h_level_sort_keys
)
table, v_key_tuples = strip_missing_rows(table, v_key_tuples)
table, h_key_tuples = strip_missing_columns(table, h_key_tuples)
v_key_tuples = strip_hidden(v_key_tuples, v_level_visibility)
h_key_tuples = strip_hidden(h_key_tuples, h_level_visibility)
return assemble_table(
table, v_key_tuples, h_key_tuples, v_level_titles, h_level_titles, empty=empty
)
def strip_hidden(key_tuples, visibilities):
"""Filter each tuple according to visibility.
Args:
key_tuples: A sequence of tuples of equal length (i.e. rectangular)
visibilities: A sequence of booleans equal in length to the tuples contained in key_tuples.
Returns:
A sequence equal in length to key_tuples where the items are tuples with a length corresponding
to the number of items in visibility which are True.
"""
result = []
for key_tuple in key_tuples:
if len(key_tuple) != len(visibilities):
raise ValueError(
"length of key tuple {} is not equal to length of visibilities {}".format(
key_tuple, visibilities
)
)
filtered_tuple = tuple(item for item, visible in zip(key_tuple, visibilities) if visible)
result.append(filtered_tuple)
return result
# TODO: Multidimensional arrays. e.g. ndarray
|
sixty-north/added-value
|
source/added_value/tabulator.py
|
strip_hidden
|
python
|
def strip_hidden(key_tuples, visibilities):
result = []
for key_tuple in key_tuples:
if len(key_tuple) != len(visibilities):
raise ValueError(
"length of key tuple {} is not equal to length of visibilities {}".format(
key_tuple, visibilities
)
)
filtered_tuple = tuple(item for item, visible in zip(key_tuple, visibilities) if visible)
result.append(filtered_tuple)
return result
|
Filter each tuple according to visibility.
Args:
key_tuples: A sequence of tuples of equal length (i.e. rectangular)
visibilities: A sequence of booleans equal in length to the tuples contained in key_tuples.
Returns:
A sequence equal in length to key_tuples where the items are tuples with a length corresponding
to the number of items in visibility which are True.
|
train
|
https://github.com/sixty-north/added-value/blob/7ae75b56712822b074fc874612d6058bb7d16a1e/source/added_value/tabulator.py#L402-L423
| null |
from collections import Mapping, deque
from itertools import product, chain, repeat
from added_value.items_table_directive import NonStringIterable
from added_value.multisort import tuplesorted
from added_value.sorted_frozen_set import SortedFrozenSet
from added_value.toposet import TopoSet
from added_value.util import unchain, empty_iterable
depth_marker = object()
ROOT = object()
LEAF = object()
_UNSET = object()
def breadth_first(obj, leaves=False):
queue = deque()
queue.append(obj)
queue.append(None)
level_keys = []
current_level_keys = TopoSet()
while len(queue) > 0:
node = queue.popleft()
if node is None:
level_keys.append(current_level_keys)
current_level_keys = TopoSet()
queue.append(None)
if queue[0] is None:
break
else:
continue
if isinstance(node, Mapping):
current_level_keys.update(node.keys())
for value in node.values():
queue.append(value)
elif isinstance(node, NonStringIterable):
current_level_keys.update(range(len(node)))
for value in node:
queue.append(value)
else:
if leaves:
current_level_keys.add(node)
return [
list(s) for s in level_keys[:-1]
] # Why the slice? Remove leaves? Is the last always empty?
class Missing(object):
def __str__(self):
return ""
def __repr__(self):
return self.__class__.__name__
MISSING = Missing()
def tabulate_body(
obj,
level_keys,
v_level_indexes,
h_level_indexes,
v_level_sort_keys=None,
h_level_sort_keys=None,
):
"""
Args:
v_level_indexes: A sequence of level indexes.
h_level_indexes: A sequence of level indexes.
"""
v_key_sorted = make_sorter(v_level_sort_keys, v_level_indexes)
h_key_sorted = make_sorter(h_level_sort_keys, h_level_indexes)
h_level_keys = [level_keys[level] for level in h_level_indexes]
v_level_keys = [level_keys[level] for level in v_level_indexes]
h_key_tuples = h_key_sorted(product(*h_level_keys))
v_key_tuples = v_key_sorted(product(*v_level_keys))
h_size = len(h_key_tuples)
v_size = len(v_key_tuples)
table = [[MISSING for _ in range(h_size)] for _ in range(v_size)]
for h_index, h_keys in enumerate(h_key_tuples):
for v_index, v_keys in enumerate(v_key_tuples):
key_path = [None] * len(level_keys)
merge_into_by_index(key_path, h_level_indexes, h_keys)
merge_into_by_index(key_path, v_level_indexes, v_keys)
for v_level, v_key in zip(v_level_indexes, v_keys):
key_path[v_level] = v_key
item = obj
for key in key_path:
try:
item = item[key]
except (IndexError, KeyError):
break
else: # no-break
table[v_index][h_index] = item
return table, v_key_tuples, h_key_tuples
def make_sorter(level_sort_keys, level_indexes):
if level_sort_keys is not None:
if len(level_sort_keys) != len(level_indexes):
raise ValueError(
"level_sort_keys with length {} does not correspond to level_indexes with length {}".format(
len(level_sort_keys), len(level_indexes)
)
)
def key_sorted(level_keys):
return tuplesorted(level_keys, *level_sort_keys)
else:
key_sorted = list
return key_sorted
def strip_missing_rows(table, row_keys):
stripped_table = []
stripped_v_key_tuples = []
for row, v_key_tuple in zip(table, row_keys):
if any(cell is not MISSING for cell in row):
stripped_table.append(list(row))
stripped_v_key_tuples.append(v_key_tuple)
return stripped_table, stripped_v_key_tuples
def strip_missing_columns(table, h_key_tuples):
transposed_table = transpose(table)
stripped_transposed_table, stripped_h_key_tuples = strip_missing_rows(
transposed_table, h_key_tuples
)
stripped_table = transpose(stripped_transposed_table)
return stripped_table, stripped_h_key_tuples
def merge_into_by_index(sequence, indexes, values):
for index, value in zip(indexes, values):
sequence[index] = value
def is_rectangular(seq_of_seqs):
return len(set(map(len, seq_of_seqs))) <= 1
def size_h(rows_of_columns):
try:
first_row = rows_of_columns[0]
except IndexError:
return 0
else:
return len(first_row)
def size_v(rows_of_columns):
return sum(1 for row in rows_of_columns if len(row) != 0)
def size(rows_of_columns):
return size_v(rows_of_columns), size_h(rows_of_columns)
def transpose(rows_of_columns):
return list(map(list, zip(*rows_of_columns)))
def assemble_table(
table_body, v_key_tuples, h_key_tuples, v_level_titles=None, h_level_titles=None, empty=""
):
if not is_rectangular(table_body):
raise ValueError("table_body {} is not rectangular".format(table_body))
if not is_rectangular(v_key_tuples):
raise ValueError("v_key_tuples {} is not rectangular".format(v_key_tuples))
if not is_rectangular(h_key_tuples):
raise ValueError("h_key_tuples {} is not rectangular".format(h_key_tuples))
if size_v(v_key_tuples) > 0 and (size_v(table_body) != size_v(v_key_tuples)):
raise ValueError("table body and v_key_tuples have incompatible dimensions")
h_key_tuples_transposed = transpose(h_key_tuples)
if size_h(h_key_tuples_transposed) > 0 and (
size_h(table_body) != size_h(h_key_tuples_transposed)
):
raise ValueError("table body and h_key_tuples have incompatible dimensions")
if (v_level_titles is not None) and (len(v_level_titles) != size_h(v_key_tuples)):
raise ValueError("v_level_titles and v_key_tuples have incompatible dimensions")
if (h_level_titles is not None) and (len(h_level_titles) != size_v(h_key_tuples_transposed)):
raise ValueError("h_level_titles and h_key_tuples have incompatible dimensions")
boxed_h_level_titles = (
unchain(h_level_titles)
if (h_level_titles is not None)
else repeat(empty_iterable(), size_v(h_key_tuples_transposed))
)
num_h_level_title_columns = int(bool(h_level_titles))
num_stub_columns = max(size_h(v_key_tuples), num_h_level_title_columns)
table = []
num_empty_columns = num_stub_columns - num_h_level_title_columns
for boxed_h_level_title, h_key_row in zip(boxed_h_level_titles, h_key_tuples_transposed):
row = list(chain(repeat(" ", num_empty_columns), boxed_h_level_title, h_key_row))
table.append(row)
if v_level_titles is not None:
v_level_titles_row = v_level_titles + [empty] * size_h(table_body)
table.append(v_level_titles_row)
for v_key_row, table_row in zip(v_key_tuples, table_body):
row = list(v_key_row)
row.extend(table_row)
table.append(row)
assert is_rectangular(table)
return table
def tabulate(
obj,
v_level_indexes=None,
h_level_indexes=None,
v_level_visibility=None,
h_level_visibility=None,
v_level_sort_keys=None,
h_level_sort_keys=None,
v_level_titles=None,
h_level_titles=None,
empty="",
):
"""Render a nested data structure into a two-dimensional table.
Args:
obj: The indexable data structure to be rendered, which can
either be a non-string sequence or a mapping containing other
sequences and mappings nested to arbitrarily many levels,
with all the leaf items (which are neither sequences nor
mappings, excluding strings).
v_level_indexes: An iterable of the zero-based indexes of
the levels for which the keys/indexes will be displayed
along the vertical axis of the table. Taken together
with the levels in h_levels these must represent the
complete set of levels in the obj data structure. No
level index should appear in both v_level_indexes and
h_level_indexes, but all level indexes must appear in
either v_level_indexes or h_level_indexes. If None,
the levels not used in h_level_indexes will be used.
If both v_level_indexes and h_level_indexes are not
alternate indexes will be used as v_level and h_level
indexes.
h_level_indexes: An iterable of the zero-based indexes of
the levels for which the keys/indexes will be displayed
along the horizontal axis of the table. Taken together
with the levels in v_levels these must represent the
complete set of levels in the obj data structure. No
level index should appear in both h_level_indexes and
v_level_indexes, but all level indexes must appear in
either h_level_indexes or v_level_indexes. If None,
the levels not used in v_level_indexes will be used.
If both v_level_indexes and h_level_indexes are not
alternate indexes will be used as v_level and h_level
indexes.
v_level_visibility: An optional iterable of booleans, where each
item corresponds to a level in v_level_indexes, and
controls whether than level of index is included in
the table stub columns. This iterable must contain
the same number of items as v_level_indexes.
h_level_visibility: An optional iterable of booleans, where each
item corresponds to a level in h_level_indexes, and
controls whether than level of index is included in
the table header rows. This iterable must contain
the same number of items as h_level_indexes.
v_level_sort_keys: An optional iterable of Keys, where each
key corresponds to a level in v_level_indexes, and
controls how that key is sorted. If None, keys are sorted
as-is.
h_level_sort_keys: An optional iterable of Keys, where each
key corresponds to a level in v_level_indexes, and
controls how that key is sorted. If None, keys are sorted
as-is.
v_level_titles: An optional iterable of strings, where each
string is a title which corresponds to a level in v_level_indexes,
and which will be displayed against the row keys for that level.
If None, no titles will be included.
h_level_titles: An optional iterable of strings, where each
string is a title which corresponds to a level in h_level_indexes,
and which will be displayed against the column keys for that level.
If None, no titles will be included.
empty: An optional string value to use for empty cells.
Returns:
A list of lists representing the rows of cells.
Example:
tabulate(dict_of_dicts, [0, 1], [])
"""
level_keys = breadth_first(obj)
v_level_indexes, h_level_indexes = validate_level_indexes(
len(level_keys), v_level_indexes, h_level_indexes
)
if v_level_visibility is None:
v_level_visibility = [True] * len(v_level_indexes)
if h_level_visibility is None:
h_level_visibility = [True] * len(h_level_indexes)
table, v_key_tuples, h_key_tuples = tabulate_body(
obj, level_keys, v_level_indexes, h_level_indexes, v_level_sort_keys, h_level_sort_keys
)
table, v_key_tuples = strip_missing_rows(table, v_key_tuples)
table, h_key_tuples = strip_missing_columns(table, h_key_tuples)
v_key_tuples = strip_hidden(v_key_tuples, v_level_visibility)
h_key_tuples = strip_hidden(h_key_tuples, h_level_visibility)
return assemble_table(
table, v_key_tuples, h_key_tuples, v_level_titles, h_level_titles, empty=empty
)
def validate_level_indexes(num_levels, v_level_indexes, h_level_indexes):
"""Ensure that v_level_indexes and h_level_indexes are consistent.
Args:
num_levels: The number of levels of keys in the data structure being tabulated.
v_level_indexes: A sequence of level indexes between zero and num_levels for
the vertical axis, or None.
h_level_indexes: A sequence of level indexes between zero and num_levels for for
the horizontal axis, or None.
Returns:
A 2-tuple containing v_level_indexes and h_level_indexes sequences.
Raises:
ValueError: If v_level_indexes contains duplicate values.
ValueError: If h_level_indexes contains duplicate values.
ValueError: If v_level_indexes contains out of range values.
ValueError: If h_level_indexes contains out of range values.
ValueError: If taken together v_level_indexes and h_level_indexes
do not include all levels from zero to up to, but not including
num_levels.
ValueError: If v_level_indexes and h_level_indexes have items in
common.
"""
if num_levels < 1:
raise ValueError("num_levels {} is less than one".format(num_levels))
all_levels = SortedFrozenSet(range(num_levels))
if (h_level_indexes is None) and (v_level_indexes is None):
v_level_indexes = range(0, num_levels, 2)
h_level_indexes = range(1, num_levels, 2)
h_level_set = SortedFrozenSet(h_level_indexes)
v_level_set = SortedFrozenSet(v_level_indexes)
if h_level_indexes is None:
h_level_indexes = all_levels - v_level_set
if v_level_indexes is None:
v_level_indexes = all_levels - h_level_set
if len(h_level_indexes) != len(h_level_set):
raise ValueError("h_level_indexes contains duplicate values")
if h_level_set and ((h_level_set[0] < 0) or (h_level_set[-1] >= num_levels)):
raise ValueError("h_level_indexes contains out of range values")
if len(v_level_indexes) != len(v_level_set):
raise ValueError("v_level_indexes contains duplicate values")
if v_level_set and ((v_level_set[0] < 0) or (v_level_set[-1] >= num_levels)):
raise ValueError("v_level_indexes contains out of range values")
unmentioned_levels = all_levels - v_level_set - h_level_set
if len(unmentioned_levels) > 0:
raise ValueError(
"v_level_indexes and h_level_indexes do not together include levels {}".format(
", ".join(map(str, unmentioned_levels))
)
)
if not h_level_set.isdisjoint(v_level_set):
raise ValueError("h_level_indexes and v_level_indexes are not disjoint")
v_level_indexes = list(v_level_indexes)
h_level_indexes = list(h_level_indexes)
return v_level_indexes, h_level_indexes
# TODO: Multidimensional arrays. e.g. ndarray
|
sixty-north/added-value
|
source/added_value/util.py
|
pairwise_longest
|
python
|
def pairwise_longest(iterable, fillvalue=None):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip_longest(a, b, fillvalue=fillvalue)
|
s -> (s0,s1), (s1,s2), (s2, s3), ...
|
train
|
https://github.com/sixty-north/added-value/blob/7ae75b56712822b074fc874612d6058bb7d16a1e/source/added_value/util.py#L19-L23
| null |
from itertools import chain, repeat, islice, zip_longest, tee, groupby
def pad_infinite(iterable, padding=None):
return chain(iterable, repeat(padding))
def pad(iterable, size, padding=None):
return islice(pad_infinite(iterable, padding), size)
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def is_sorted(iterable, key=lambda x: x):
return all(a <= b for a, b in pairwise((key(item) for item in iterable)))
# def unchain(iterable):
# """Convert an iterable into an infinite series of lists of containing zero or one items.
# """
# if iterable is not None:
# for item in iterable:
# yield [item]
# while True:
# yield []
def one(item):
yield item
def extend(iterable, item_factory=lambda: None):
return chain(iterable, iter(item_factory, object()))
def unchain(iterable, box=None):
if box is None:
box = lambda item: [item]
return chain(map(box, iterable))
def extended_unchain(iterable, box=list):
"""Convert an iterable into an infinite series of lists of containing zero or one items.
"""
return extend(unchain(iterable, box), box)
def empty_iterable():
yield from ()
def run_length_encode(items):
return ((key, len(list(group))) for key, group in groupby(items))
def key_for_value(d, v):
return next(key for key, value in d.items() if value == v)
|
sixty-north/added-value
|
setup.py
|
read_version
|
python
|
def read_version():
"Read the `(version-string, version-info)` from `added_value/version.py`."
version_file = local_file('source', 'added_value', 'version.py')
local_vars = {}
with open(version_file) as handle:
exec(handle.read(), {}, local_vars) # pylint: disable=exec-used
return local_vars['__version__']
|
Read the `(version-string, version-info)` from `added_value/version.py`.
|
train
|
https://github.com/sixty-north/added-value/blob/7ae75b56712822b074fc874612d6058bb7d16a1e/setup.py#L25-L31
|
[
"def local_file(*name):\n return os.path.join(\n os.path.dirname(__file__),\n *name)\n"
] |
# -*- coding: utf-8 -*-
import os
import io
from setuptools import setup, find_packages
with open('README.rst', 'r') as readme:
long_description = readme.read()
def local_file(*name):
return os.path.join(
os.path.dirname(__file__),
*name)
def read(name, **kwargs):
with io.open(
name,
encoding=kwargs.get("encoding", "utf8")
) as handle:
return handle.read()
install_requires = [
'docutils',
'sphinx',
'natsort',
'six',
]
setup(
name='added-value',
packages=find_packages(where='source'),
package_dir={'': 'source'},
version = read_version(),
url='https://github.com/sixty-north/added-value',
download_url="https://pypi.python.org/pypi/added-value",
license='BSD',
author='Robert Smallshire',
author_email='rob@sixty-north.com',
description='Sphinx "added-value" extension',
long_description=long_description,
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
"Programming Language :: Python :: 3",
'Topic :: Documentation',
'Topic :: Utilities',
],
platforms='any',
include_package_data=True,
install_requires=install_requires,
requires=install_requires,
extras_require={
'test': ['pytest', 'pytest-cov', 'coveralls', 'beautifulsoup4', 'hypothesis'],
'docs': ['sphinx', 'sphinx_rtd_theme'],
'deploy': ['bumpversion', 'twine', 'wheel'],
},
project_urls={
"Source": "https://github.com/sixty-north/added-value",
"Documentation": "https://added-value.readthedocs.io/en/latest/",
}
)
|
sixty-north/added-value
|
source/added_value/toposet.py
|
TopoSet.update
|
python
|
def update(self, iterable):
for pair in pairwise_longest(iterable, fillvalue=_FILL):
self._edges.append(pair)
self._results = None
|
Update with an ordered iterable of items.
Args:
iterable: An ordered iterable of items. The relative
order of the items in this iterable will be respected
in the TopoSet (in the absence of cycles).
|
train
|
https://github.com/sixty-north/added-value/blob/7ae75b56712822b074fc874612d6058bb7d16a1e/source/added_value/toposet.py#L42-L52
|
[
"def pairwise_longest(iterable, fillvalue=None):\n \"s -> (s0,s1), (s1,s2), (s2, s3), ...\"\n a, b = tee(iterable)\n next(b, None)\n return zip_longest(a, b, fillvalue=fillvalue)\n"
] |
class TopoSet(MutableSet):
"""A topologically sorted set."""
def __init__(self, iterable=None):
self._results = None
self._edges = []
self._discarded = set()
if iterable is not None:
self.update(iterable)
def __len__(self):
return len(self._topo_sorted.sorted) + len(self._topo_sorted.cyclic)
def __contains__(self, item):
for pair in self._edges:
if item in pair:
return True
return False
@property
def _topo_sorted(self):
if self._discarded:
self._do_discard()
self._results = None
if self._results is None:
sorted, cyclic = topological_sort(self._edges)
self._results = Results(
sorted=[item for item in sorted if item is not _FILL],
cyclic=[item for item in cyclic if item is not _FILL],
)
return self._results
def add(self, value):
self.update([value])
def discard(self, value):
self._discarded.add(value)
if len(self._discarded) > len(self._edges):
self._do_discard()
def _do_discard(self):
for value in self._discarded:
sources = [source for source, target in self._edges if target == value]
targets = [target for source, target in self._edges if source == value]
if sources or targets:
self._edges = list(filter(lambda pair: value not in pair, self._edges))
self._edges.extend(product(sources, targets))
def has_cycles(self):
return bool(self._topo_sorted.cyclic)
def __iter__(self):
for item in self._topo_sorted.sorted:
yield item
for item in self._topo_sorted.cyclic:
yield item
@property
def sorted(self):
return self._topo_sorted.sorted
@property
def cyclic(self):
return self._topo_sorted.cyclic
def __repr__(self):
name = type(self).__name__
iterable = ", ".join(map(repr, self)) if len(self) > 0 else ""
return "{}([{}])".format(name, iterable)
|
sixty-north/added-value
|
source/added_value/pyobj_role.py
|
pyobj_role
|
python
|
def pyobj_role(make_node, name, rawtext, text, lineno, inliner, options=None, content=None):
if options is None:
options = {}
if content is None:
content = []
try:
prefixed_name, obj, parent, modname = import_by_name(text)
except ImportError:
msg = inliner.reporter.error("Could not locate Python object {}".format(text), line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
app = inliner.document.settings.env.app
node = make_node(rawtext, app, prefixed_name, obj, parent, modname, options)
return [node], []
|
Include Python object value, rendering it to text using str.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
:param make_node: A callable which accepts (rawtext, app, prefixed_name, obj, parent, modname, options) and which returns a node
:param name: The role name used in the document.
:param rawtext: The entire markup snippet, with role.
:param text: The text marked with the role.
:param lineno: The line number where rawtext appears in the input.
:param inliner: The inliner instance that called us.
:param options: Directive options for customization.
:param content: The directive content for customization.
|
train
|
https://github.com/sixty-north/added-value/blob/7ae75b56712822b074fc874612d6058bb7d16a1e/source/added_value/pyobj_role.py#L10-L40
| null |
from functools import partial
from sphinx.ext.autosummary import import_by_name
def make_pyobj_role(make_node):
return partial(pyobj_role, make_node)
|
sixty-north/added-value
|
source/added_value/toposort.py
|
topological_sort
|
python
|
def topological_sort(dependency_pairs):
"Sort values subject to dependency constraints"
num_heads = defaultdict(int) # num arrows pointing in
tails = defaultdict(list) # list of arrows going out
heads = [] # unique list of heads in order first seen
for h, t in dependency_pairs:
num_heads[t] += 1
if h in tails:
tails[h].append(t)
else:
tails[h] = [t]
heads.append(h)
ordered = [h for h in heads if h not in num_heads]
for h in ordered:
for t in tails[h]:
num_heads[t] -= 1
if not num_heads[t]:
ordered.append(t)
cyclic = [n for n, heads in num_heads.items() if heads]
return Results(ordered, cyclic)
|
Sort values subject to dependency constraints
|
train
|
https://github.com/sixty-north/added-value/blob/7ae75b56712822b074fc874612d6058bb7d16a1e/source/added_value/toposort.py#L6-L26
| null |
from collections import defaultdict, namedtuple
Results = namedtuple("Results", ["sorted", "cyclic"])
|
sixty-north/added-value
|
source/added_value/items_table_directive.py
|
ItemsTableDirective.interpret_obj
|
python
|
def interpret_obj(
self,
obj,
v_level_indexes,
h_level_indexes,
v_level_visibility,
h_level_visibility,
v_level_sort_keys,
h_level_sort_keys,
v_level_titles,
h_level_titles,
):
if not isinstance(obj, NonStringIterable):
raise self.error("Cannot make a table from object {!r}".format(obj))
rectangular_rows = tabulate(
obj,
v_level_indexes=v_level_indexes,
h_level_indexes=h_level_indexes,
v_level_visibility=v_level_visibility,
h_level_visibility=h_level_visibility,
v_level_sort_keys=v_level_sort_keys,
h_level_sort_keys=h_level_sort_keys,
v_level_titles=v_level_titles,
h_level_titles=h_level_titles,
)
assert is_rectangular(rectangular_rows)
num_rows, num_cols = size(rectangular_rows)
return rectangular_rows, num_cols
|
Interpret the given Python object as a table.
Args:
obj: A sequence (later a mapping, too)
Returns:
A list of lists represents rows of cells.
Raises:
TypeError: If the type couldn't be interpreted as a table.
|
train
|
https://github.com/sixty-north/added-value/blob/7ae75b56712822b074fc874612d6058bb7d16a1e/source/added_value/items_table_directive.py#L243-L282
|
[
"def size(rows_of_columns):\n return size_v(rows_of_columns), size_h(rows_of_columns)\n",
"def tabulate(\n obj,\n v_level_indexes=None,\n h_level_indexes=None,\n v_level_visibility=None,\n h_level_visibility=None,\n v_level_sort_keys=None,\n h_level_sort_keys=None,\n v_level_titles=None,\n h_level_titles=None,\n empty=\"\",\n):\n \"\"\"Render a nested data structure into a two-dimensional table.\n\n Args:\n obj: The indexable data structure to be rendered, which can\n either be a non-string sequence or a mapping containing other\n sequences and mappings nested to arbitrarily many levels,\n with all the leaf items (which are neither sequences nor\n mappings, excluding strings).\n\n v_level_indexes: An iterable of the zero-based indexes of\n the levels for which the keys/indexes will be displayed\n along the vertical axis of the table. Taken together\n with the levels in h_levels these must represent the\n complete set of levels in the obj data structure. No\n level index should appear in both v_level_indexes and\n h_level_indexes, but all level indexes must appear in\n either v_level_indexes or h_level_indexes. If None,\n the levels not used in h_level_indexes will be used.\n If both v_level_indexes and h_level_indexes are not\n alternate indexes will be used as v_level and h_level\n indexes.\n\n h_level_indexes: An iterable of the zero-based indexes of\n the levels for which the keys/indexes will be displayed\n along the horizontal axis of the table. Taken together\n with the levels in v_levels these must represent the\n complete set of levels in the obj data structure. No\n level index should appear in both h_level_indexes and\n v_level_indexes, but all level indexes must appear in\n either h_level_indexes or v_level_indexes. If None,\n the levels not used in v_level_indexes will be used.\n If both v_level_indexes and h_level_indexes are not\n alternate indexes will be used as v_level and h_level\n indexes.\n\n v_level_visibility: An optional iterable of booleans, where each\n item corresponds to a level in v_level_indexes, and\n controls whether than level of index is included in\n the table stub columns. This iterable must contain\n the same number of items as v_level_indexes.\n\n h_level_visibility: An optional iterable of booleans, where each\n item corresponds to a level in h_level_indexes, and\n controls whether than level of index is included in\n the table header rows. This iterable must contain\n the same number of items as h_level_indexes.\n\n v_level_sort_keys: An optional iterable of Keys, where each\n key corresponds to a level in v_level_indexes, and\n controls how that key is sorted. If None, keys are sorted\n as-is.\n\n h_level_sort_keys: An optional iterable of Keys, where each\n key corresponds to a level in v_level_indexes, and\n controls how that key is sorted. If None, keys are sorted\n as-is.\n\n v_level_titles: An optional iterable of strings, where each\n string is a title which corresponds to a level in v_level_indexes,\n and which will be displayed against the row keys for that level.\n If None, no titles will be included.\n\n h_level_titles: An optional iterable of strings, where each\n string is a title which corresponds to a level in h_level_indexes,\n and which will be displayed against the column keys for that level.\n If None, no titles will be included.\n\n empty: An optional string value to use for empty cells.\n\n Returns:\n A list of lists representing the rows of cells.\n\n Example:\n\n tabulate(dict_of_dicts, [0, 1], [])\n\n \"\"\"\n level_keys = breadth_first(obj)\n\n v_level_indexes, h_level_indexes = validate_level_indexes(\n len(level_keys), v_level_indexes, h_level_indexes\n )\n\n if v_level_visibility is None:\n v_level_visibility = [True] * len(v_level_indexes)\n if h_level_visibility is None:\n h_level_visibility = [True] * len(h_level_indexes)\n\n table, v_key_tuples, h_key_tuples = tabulate_body(\n obj, level_keys, v_level_indexes, h_level_indexes, v_level_sort_keys, h_level_sort_keys\n )\n\n table, v_key_tuples = strip_missing_rows(table, v_key_tuples)\n table, h_key_tuples = strip_missing_columns(table, h_key_tuples)\n v_key_tuples = strip_hidden(v_key_tuples, v_level_visibility)\n h_key_tuples = strip_hidden(h_key_tuples, h_level_visibility)\n return assemble_table(\n table, v_key_tuples, h_key_tuples, v_level_titles, h_level_titles, empty=empty\n )\n",
"def is_rectangular(seq_of_seqs):\n return len(set(map(len, seq_of_seqs))) <= 1\n"
] |
class ItemsTableDirective(Directive):
"""Format a data structure as a table.
If the items of the sequence are themselves sequences, they will formatted as rows.
"""
required_arguments = 1
has_content = False
option_spec = {
TITLE_OPTION: unchanged_required,
HEADER_ROWS_OPTION: directives.nonnegative_int,
STUB_COLUMNS_OPTION: directives.nonnegative_int,
HEADER_OPTION: unchanged,
H_LEVEL_TITLES_OPTION: unchanged,
V_LEVEL_TITLES_OPTION: unchanged,
H_LEVEL_INDEXES_OPTION: unchanged_required,
V_LEVEL_INDEXES_OPTION: unchanged_required,
H_LEVEL_VISIBILITY_OPTION: unchanged,
V_LEVEL_VISIBILITY_OPTION: unchanged,
H_LEVEL_SORT_ORDERS_OPTION: unchanged,
V_LEVEL_SORT_ORDERS_OPTION: unchanged,
CELL_FORMATS_OPTION: unchanged_required,
WIDTHS_OPTION: directives.value_or(("auto", "grid"), directives.positive_int_list),
CLASS_OPTION: directives.class_option,
ALIGN_OPTION: align,
NAME_OPTION: unchanged,
}
@property
def widths(self):
return self.options.get(WIDTHS_OPTION, "")
@property
def header_rows(self):
return self.options.get(HEADER_ROWS_OPTION, 0)
@property
def stub_columns(self):
return self.options.get(STUB_COLUMNS_OPTION, 0)
@property
def v_level_titles(self):
if V_LEVEL_TITLES_OPTION not in self.options:
return None
titles = self.options[V_LEVEL_TITLES_OPTION]
titles_stream = StringIO(titles)
reader = csv.reader(titles_stream, delimiter=",", quotechar='"', skipinitialspace=True, doublequote=True)
titles_row = next(reader)
stripped_titles = [cell.strip() for cell in titles_row]
return stripped_titles
@property
def h_level_titles(self):
if H_LEVEL_TITLES_OPTION not in self.options:
return None
titles = self.options[H_LEVEL_TITLES_OPTION]
titles_stream = StringIO(titles)
reader = csv.reader(titles_stream, delimiter=",", quotechar='"', skipinitialspace=True, doublequote=True)
titles_row = next(reader)
stripped_titles = [cell.strip() for cell in titles_row]
return stripped_titles
@property
def v_level_indexes(self):
text = self.options.get(V_LEVEL_INDEXES_OPTION, "")
try:
items = list(map(int, filter(None, text.split(","))))
except ValueError:
raise self.error(
"Could not interpret option {} {!r}".format(V_LEVEL_INDEXES_OPTION, text)
)
return items or None
@property
def h_level_indexes(self):
text = self.options.get(H_LEVEL_INDEXES_OPTION, "")
try:
items = list(map(int, filter(None, text.split(","))))
except ValueError:
raise self.error(
"Could not interpret option {} {!r}".format(H_LEVEL_INDEXES_OPTION, text)
)
return items or None
@property
def v_level_visibility(self):
text = self.options.get(V_LEVEL_VISIBILITY_OPTION, "")
try:
visibilities = list(map(lambda s: s.strip().lower(), filter(None, text.split(","))))
except ValueError:
raise self.error(
"Could not interpret option {} {!r}".format(V_LEVEL_VISIBILITY_OPTION, text)
)
if not visibilities:
return None
try:
return [VISIBILITIES[visibility] for visibility in visibilities]
except KeyError:
raise self.error(
"Could not interpret option {} {!r}. Items must each be one of {}".format(
V_LEVEL_VISIBILITY_OPTION,
text,
list_conjunction(list(map(repr, VISIBILITIES.keys())), "or"),
)
)
@property
def h_level_visibility(self):
text = self.options.get(H_LEVEL_VISIBILITY_OPTION, "")
try:
visibilities = list(map(lambda s: s.strip().lower(), filter(None, text.split(","))))
except ValueError:
raise self.error(
"Could not interpret option {} {!r}".format(H_LEVEL_VISIBILITY_OPTION, text)
)
if not visibilities:
return None
try:
return [VISIBILITIES[visibility] for visibility in visibilities]
except KeyError:
raise self.error(
"Could not interpret option {} {!r}. Items must each be one of {}".format(
H_LEVEL_VISIBILITY_OPTION,
text,
list_conjunction(list(map(repr, VISIBILITIES.keys())), "or"),
)
)
@property
def v_level_sort_orders(self):
text = self.options.get(V_LEVEL_SORT_ORDERS_OPTION, "")
try:
orders = list(map(lambda s: s.strip(), filter(None, text.split(","))))
except ValueError:
raise self.error(
"Could not interpret option {} {!r}".format(V_LEVEL_SORT_ORDERS_OPTION, text)
)
if not orders:
return None
try:
return [SORT_ORDERS[order] for order in orders]
except KeyError:
raise self.error(
"Could not interpret option {} {!r}. Items must each be one of {}".format(
V_LEVEL_SORT_ORDERS_OPTION, text, ", ".join(SORT_ORDERS.keys())
)
)
@property
def h_level_sort_orders(self):
text = self.options.get(H_LEVEL_SORT_ORDERS_OPTION, "")
try:
orders = list(map(lambda s: s.strip(), filter(None, text.split(","))))
except ValueError:
raise self.error(
"Could not interpret option {} {!r}".format(H_LEVEL_SORT_ORDERS_OPTION, text)
)
if not orders:
return None
try:
return [SORT_ORDERS[order] for order in orders]
except KeyError:
raise self.error(
"Could not interpret option {} {!r}. Items must each be one of {}".format(
H_LEVEL_SORT_ORDERS_OPTION, text, ", ".join(SORT_ORDERS.keys())
)
)
def get_column_widths(self, max_cols):
if isinstance(self.widths, list):
if len(self.widths) != max_cols:
raise self.error(
"{!s} widths tabulate not match the number of columns {!s} in table. ({} directive).".format(
self.widths, max_cols, self.name
)
)
col_widths = self.widths
elif max_cols:
col_widths = [100.0 / max_cols] * max_cols
else:
raise self.error(
"Something went wrong calculating column widths. ({} directive).".format(self.name)
)
return col_widths
def process_header_option(self):
table_head = []
max_header_cols = 0
if HEADER_OPTION in self.options:
# TODO: Have the option for this to be a Python object too.
header = self.options[HEADER_OPTION]
header_stream = StringIO(header)
reader = csv.reader(header_stream, delimiter=",", quotechar='"')
header_row = next(reader)
stripped_header_row = [cell.strip() for cell in header_row]
table_head.append(stripped_header_row)
max_header_cols = len(header_row)
return table_head, max_header_cols
def augment_cells(self, rows, source, *, span):
return self.augment_cells_span(rows, source) if span else self.augment_cells_no_span(rows, source)
def augment_cells_span(self, rows, source):
# TODO: Hardwired str transform.
# 4-tuple: morerows, morecols, offset, cellblock
# - morerows: The number of additional rows this cells spans
# - morecols: The number of additional columns this cell spans
# - offset: Offset from the line-number at the start of the table
# - cellblock: The contents of the cell
return [
[(0, span - 1, 0, StringList(str(cell).splitlines(), source=source))
for cell, span in run_length_encode(row)]
for row in rows
]
def augment_cells_no_span(self, rows, source):
"""Convert each cell into a tuple suitable for consumption by build_table.
"""
# TODO: Hardwired str transform.
# 4-tuple: morerows, morecols, offset, cellblock
# - morerows: The number of additional rows this cells spans
# - morecols: The number of additional columns this cell spans
# - offset: Offset from the line-number at the start of the table
# - cellblock: The contents of the cell
return [
[(0, 0, 0, StringList(str(cell).splitlines(), source=source)) for cell in row]
for row in rows
]
def run(self):
obj_name = self.arguments[0]
try:
prefixed_name, obj, parent, modname = import_by_name(obj_name)
except ImportError:
raise self.error(
"Could not locate Python object {} ({} directive).".format(obj_name, self.name)
)
table_head, max_header_cols = self.process_header_option()
rows, max_cols = self.interpret_obj(
obj,
self.v_level_indexes,
self.h_level_indexes,
self.v_level_visibility,
self.h_level_visibility,
self.v_level_sort_orders,
self.h_level_sort_orders,
self.v_level_titles,
self.h_level_titles,
)
max_cols = max(max_cols, max_header_cols)
col_widths = self.get_column_widths(max_cols)
table_head.extend(rows[: self.header_rows])
table_body = rows[self.header_rows :]
table_head = self.augment_cells(table_head, source=prefixed_name, span=True)
table_body = self.augment_cells(table_body, source=prefixed_name, span=False)
table = (col_widths, table_head, table_body)
table_node = self.state.build_table(
table, self.content_offset, self.stub_columns, widths=self.widths
)
table_node["classes"] += self.options.get("class", [])
if "align" in self.options:
table_node["align"] = self.options.get("align")
self.add_name(table_node)
return [table_node]
|
sixty-north/added-value
|
source/added_value/items_table_directive.py
|
ItemsTableDirective.augment_cells_no_span
|
python
|
def augment_cells_no_span(self, rows, source):
# TODO: Hardwired str transform.
# 4-tuple: morerows, morecols, offset, cellblock
# - morerows: The number of additional rows this cells spans
# - morecols: The number of additional columns this cell spans
# - offset: Offset from the line-number at the start of the table
# - cellblock: The contents of the cell
return [
[(0, 0, 0, StringList(str(cell).splitlines(), source=source)) for cell in row]
for row in rows
]
|
Convert each cell into a tuple suitable for consumption by build_table.
|
train
|
https://github.com/sixty-north/added-value/blob/7ae75b56712822b074fc874612d6058bb7d16a1e/source/added_value/items_table_directive.py#L300-L312
| null |
class ItemsTableDirective(Directive):
"""Format a data structure as a table.
If the items of the sequence are themselves sequences, they will formatted as rows.
"""
required_arguments = 1
has_content = False
option_spec = {
TITLE_OPTION: unchanged_required,
HEADER_ROWS_OPTION: directives.nonnegative_int,
STUB_COLUMNS_OPTION: directives.nonnegative_int,
HEADER_OPTION: unchanged,
H_LEVEL_TITLES_OPTION: unchanged,
V_LEVEL_TITLES_OPTION: unchanged,
H_LEVEL_INDEXES_OPTION: unchanged_required,
V_LEVEL_INDEXES_OPTION: unchanged_required,
H_LEVEL_VISIBILITY_OPTION: unchanged,
V_LEVEL_VISIBILITY_OPTION: unchanged,
H_LEVEL_SORT_ORDERS_OPTION: unchanged,
V_LEVEL_SORT_ORDERS_OPTION: unchanged,
CELL_FORMATS_OPTION: unchanged_required,
WIDTHS_OPTION: directives.value_or(("auto", "grid"), directives.positive_int_list),
CLASS_OPTION: directives.class_option,
ALIGN_OPTION: align,
NAME_OPTION: unchanged,
}
@property
def widths(self):
return self.options.get(WIDTHS_OPTION, "")
@property
def header_rows(self):
return self.options.get(HEADER_ROWS_OPTION, 0)
@property
def stub_columns(self):
return self.options.get(STUB_COLUMNS_OPTION, 0)
@property
def v_level_titles(self):
if V_LEVEL_TITLES_OPTION not in self.options:
return None
titles = self.options[V_LEVEL_TITLES_OPTION]
titles_stream = StringIO(titles)
reader = csv.reader(titles_stream, delimiter=",", quotechar='"', skipinitialspace=True, doublequote=True)
titles_row = next(reader)
stripped_titles = [cell.strip() for cell in titles_row]
return stripped_titles
@property
def h_level_titles(self):
if H_LEVEL_TITLES_OPTION not in self.options:
return None
titles = self.options[H_LEVEL_TITLES_OPTION]
titles_stream = StringIO(titles)
reader = csv.reader(titles_stream, delimiter=",", quotechar='"', skipinitialspace=True, doublequote=True)
titles_row = next(reader)
stripped_titles = [cell.strip() for cell in titles_row]
return stripped_titles
@property
def v_level_indexes(self):
text = self.options.get(V_LEVEL_INDEXES_OPTION, "")
try:
items = list(map(int, filter(None, text.split(","))))
except ValueError:
raise self.error(
"Could not interpret option {} {!r}".format(V_LEVEL_INDEXES_OPTION, text)
)
return items or None
@property
def h_level_indexes(self):
text = self.options.get(H_LEVEL_INDEXES_OPTION, "")
try:
items = list(map(int, filter(None, text.split(","))))
except ValueError:
raise self.error(
"Could not interpret option {} {!r}".format(H_LEVEL_INDEXES_OPTION, text)
)
return items or None
@property
def v_level_visibility(self):
text = self.options.get(V_LEVEL_VISIBILITY_OPTION, "")
try:
visibilities = list(map(lambda s: s.strip().lower(), filter(None, text.split(","))))
except ValueError:
raise self.error(
"Could not interpret option {} {!r}".format(V_LEVEL_VISIBILITY_OPTION, text)
)
if not visibilities:
return None
try:
return [VISIBILITIES[visibility] for visibility in visibilities]
except KeyError:
raise self.error(
"Could not interpret option {} {!r}. Items must each be one of {}".format(
V_LEVEL_VISIBILITY_OPTION,
text,
list_conjunction(list(map(repr, VISIBILITIES.keys())), "or"),
)
)
@property
def h_level_visibility(self):
text = self.options.get(H_LEVEL_VISIBILITY_OPTION, "")
try:
visibilities = list(map(lambda s: s.strip().lower(), filter(None, text.split(","))))
except ValueError:
raise self.error(
"Could not interpret option {} {!r}".format(H_LEVEL_VISIBILITY_OPTION, text)
)
if not visibilities:
return None
try:
return [VISIBILITIES[visibility] for visibility in visibilities]
except KeyError:
raise self.error(
"Could not interpret option {} {!r}. Items must each be one of {}".format(
H_LEVEL_VISIBILITY_OPTION,
text,
list_conjunction(list(map(repr, VISIBILITIES.keys())), "or"),
)
)
@property
def v_level_sort_orders(self):
text = self.options.get(V_LEVEL_SORT_ORDERS_OPTION, "")
try:
orders = list(map(lambda s: s.strip(), filter(None, text.split(","))))
except ValueError:
raise self.error(
"Could not interpret option {} {!r}".format(V_LEVEL_SORT_ORDERS_OPTION, text)
)
if not orders:
return None
try:
return [SORT_ORDERS[order] for order in orders]
except KeyError:
raise self.error(
"Could not interpret option {} {!r}. Items must each be one of {}".format(
V_LEVEL_SORT_ORDERS_OPTION, text, ", ".join(SORT_ORDERS.keys())
)
)
@property
def h_level_sort_orders(self):
text = self.options.get(H_LEVEL_SORT_ORDERS_OPTION, "")
try:
orders = list(map(lambda s: s.strip(), filter(None, text.split(","))))
except ValueError:
raise self.error(
"Could not interpret option {} {!r}".format(H_LEVEL_SORT_ORDERS_OPTION, text)
)
if not orders:
return None
try:
return [SORT_ORDERS[order] for order in orders]
except KeyError:
raise self.error(
"Could not interpret option {} {!r}. Items must each be one of {}".format(
H_LEVEL_SORT_ORDERS_OPTION, text, ", ".join(SORT_ORDERS.keys())
)
)
def get_column_widths(self, max_cols):
if isinstance(self.widths, list):
if len(self.widths) != max_cols:
raise self.error(
"{!s} widths tabulate not match the number of columns {!s} in table. ({} directive).".format(
self.widths, max_cols, self.name
)
)
col_widths = self.widths
elif max_cols:
col_widths = [100.0 / max_cols] * max_cols
else:
raise self.error(
"Something went wrong calculating column widths. ({} directive).".format(self.name)
)
return col_widths
def process_header_option(self):
table_head = []
max_header_cols = 0
if HEADER_OPTION in self.options:
# TODO: Have the option for this to be a Python object too.
header = self.options[HEADER_OPTION]
header_stream = StringIO(header)
reader = csv.reader(header_stream, delimiter=",", quotechar='"')
header_row = next(reader)
stripped_header_row = [cell.strip() for cell in header_row]
table_head.append(stripped_header_row)
max_header_cols = len(header_row)
return table_head, max_header_cols
def interpret_obj(
self,
obj,
v_level_indexes,
h_level_indexes,
v_level_visibility,
h_level_visibility,
v_level_sort_keys,
h_level_sort_keys,
v_level_titles,
h_level_titles,
):
"""Interpret the given Python object as a table.
Args:
obj: A sequence (later a mapping, too)
Returns:
A list of lists represents rows of cells.
Raises:
TypeError: If the type couldn't be interpreted as a table.
"""
if not isinstance(obj, NonStringIterable):
raise self.error("Cannot make a table from object {!r}".format(obj))
rectangular_rows = tabulate(
obj,
v_level_indexes=v_level_indexes,
h_level_indexes=h_level_indexes,
v_level_visibility=v_level_visibility,
h_level_visibility=h_level_visibility,
v_level_sort_keys=v_level_sort_keys,
h_level_sort_keys=h_level_sort_keys,
v_level_titles=v_level_titles,
h_level_titles=h_level_titles,
)
assert is_rectangular(rectangular_rows)
num_rows, num_cols = size(rectangular_rows)
return rectangular_rows, num_cols
def augment_cells(self, rows, source, *, span):
return self.augment_cells_span(rows, source) if span else self.augment_cells_no_span(rows, source)
def augment_cells_span(self, rows, source):
# TODO: Hardwired str transform.
# 4-tuple: morerows, morecols, offset, cellblock
# - morerows: The number of additional rows this cells spans
# - morecols: The number of additional columns this cell spans
# - offset: Offset from the line-number at the start of the table
# - cellblock: The contents of the cell
return [
[(0, span - 1, 0, StringList(str(cell).splitlines(), source=source))
for cell, span in run_length_encode(row)]
for row in rows
]
def run(self):
obj_name = self.arguments[0]
try:
prefixed_name, obj, parent, modname = import_by_name(obj_name)
except ImportError:
raise self.error(
"Could not locate Python object {} ({} directive).".format(obj_name, self.name)
)
table_head, max_header_cols = self.process_header_option()
rows, max_cols = self.interpret_obj(
obj,
self.v_level_indexes,
self.h_level_indexes,
self.v_level_visibility,
self.h_level_visibility,
self.v_level_sort_orders,
self.h_level_sort_orders,
self.v_level_titles,
self.h_level_titles,
)
max_cols = max(max_cols, max_header_cols)
col_widths = self.get_column_widths(max_cols)
table_head.extend(rows[: self.header_rows])
table_body = rows[self.header_rows :]
table_head = self.augment_cells(table_head, source=prefixed_name, span=True)
table_body = self.augment_cells(table_body, source=prefixed_name, span=False)
table = (col_widths, table_head, table_body)
table_node = self.state.build_table(
table, self.content_offset, self.stub_columns, widths=self.widths
)
table_node["classes"] += self.options.get("class", [])
if "align" in self.options:
table_node["align"] = self.options.get("align")
self.add_name(table_node)
return [table_node]
|
sixty-north/added-value
|
source/added_value/str_role.py
|
make_str_node
|
python
|
def make_str_node(rawtext, app, prefixed_name, obj, parent, modname, options):
text = str(obj)
node = nodes.Text(text, rawsource=rawtext)
return node
|
Render a Python object to text using the repr() function.
:param rawtext: Text being replaced with link node.
:param app: Sphinx application context
:param prefixed_name: The dotted Python name for obj.
:param obj: The Python object to be rendered to text.
:param parent: The parent Python object of obj.
:param module: The name of the module containing obj.
:param options: Options dictionary passed to role func.
|
train
|
https://github.com/sixty-north/added-value/blob/7ae75b56712822b074fc874612d6058bb7d16a1e/source/added_value/str_role.py#L6-L19
| null |
from docutils import nodes
from added_value.pyobj_role import make_pyobj_role
str_role = make_pyobj_role(make_str_node)
|
sixty-north/added-value
|
source/added_value/multisort.py
|
multisorted
|
python
|
def multisorted(items, *keys):
if len(keys) == 0:
keys = [asc()]
for key in reversed(keys):
items = sorted(items, key=key.func, reverse=key.reverse)
return items
|
Sort by multiple attributes.
Args:
items: An iterable series to be sorted.
*keys: Key objects which extract key values from the items.
The first key will be the most significant, and the
last key the least significant. If no key functions
are provided, the items will be sorted in ascending
natural order.
Returns:
A list of items sorted according to keys.
|
train
|
https://github.com/sixty-north/added-value/blob/7ae75b56712822b074fc874612d6058bb7d16a1e/source/added_value/multisort.py#L42-L59
|
[
"def asc(func=identity):\n \"\"\"Obtain a key for ascending sort.\"\"\"\n return Key(func, reverse=False)\n"
] |
from natsort import natsort_keygen
class Key(object):
def __init__(self, func, reverse=False):
self._func = func
self._reverse = reverse
@property
def func(self):
return self._func
@property
def reverse(self):
return self._reverse
def identity(x):
return x
def asc(func=identity):
"""Obtain a key for ascending sort."""
return Key(func, reverse=False)
def dec(func=identity):
"""Obtain a key for descending sort."""
return Key(func, reverse=True)
# Returns the same key value for all x, so
# stable sort will maintain order.
_as_is = Key(func=lambda x: 0)
def as_is():
"""Obtain a key for a neutral sort."""
return _as_is
def tuplesorted(items, *keys):
"""Sort by tuples with a different key for each item.
Args:
items: An iterable series of sequences (typically tuples)
*keys: Key objects which transform individual elements of
each tuple into sort keys. The zeroth object
transforms the zeroth element of each tuple, the first
key object transforms the first element of each tuple,
and so on.
Returns:
A list of items sorted according to keys.
"""
# Transform the keys so each works on one item of the tuple
tuple_keys = [
Key(func=lambda t, i=index, k=key: k.func(t[i]), reverse=key.reverse)
for index, key in enumerate(keys)
]
return multisorted(items, *tuple_keys)
|
sixty-north/added-value
|
source/added_value/multisort.py
|
tuplesorted
|
python
|
def tuplesorted(items, *keys):
# Transform the keys so each works on one item of the tuple
tuple_keys = [
Key(func=lambda t, i=index, k=key: k.func(t[i]), reverse=key.reverse)
for index, key in enumerate(keys)
]
return multisorted(items, *tuple_keys)
|
Sort by tuples with a different key for each item.
Args:
items: An iterable series of sequences (typically tuples)
*keys: Key objects which transform individual elements of
each tuple into sort keys. The zeroth object
transforms the zeroth element of each tuple, the first
key object transforms the first element of each tuple,
and so on.
Returns:
A list of items sorted according to keys.
|
train
|
https://github.com/sixty-north/added-value/blob/7ae75b56712822b074fc874612d6058bb7d16a1e/source/added_value/multisort.py#L62-L81
|
[
"def multisorted(items, *keys):\n \"\"\"Sort by multiple attributes.\n\n Args:\n items: An iterable series to be sorted.\n *keys: Key objects which extract key values from the items.\n The first key will be the most significant, and the\n last key the least significant. If no key functions\n are provided, the items will be sorted in ascending\n natural order.\n Returns:\n A list of items sorted according to keys.\n \"\"\"\n if len(keys) == 0:\n keys = [asc()]\n for key in reversed(keys):\n items = sorted(items, key=key.func, reverse=key.reverse)\n return items\n"
] |
from natsort import natsort_keygen
class Key(object):
def __init__(self, func, reverse=False):
self._func = func
self._reverse = reverse
@property
def func(self):
return self._func
@property
def reverse(self):
return self._reverse
def identity(x):
return x
def asc(func=identity):
"""Obtain a key for ascending sort."""
return Key(func, reverse=False)
def dec(func=identity):
"""Obtain a key for descending sort."""
return Key(func, reverse=True)
# Returns the same key value for all x, so
# stable sort will maintain order.
_as_is = Key(func=lambda x: 0)
def as_is():
"""Obtain a key for a neutral sort."""
return _as_is
def multisorted(items, *keys):
"""Sort by multiple attributes.
Args:
items: An iterable series to be sorted.
*keys: Key objects which extract key values from the items.
The first key will be the most significant, and the
last key the least significant. If no key functions
are provided, the items will be sorted in ascending
natural order.
Returns:
A list of items sorted according to keys.
"""
if len(keys) == 0:
keys = [asc()]
for key in reversed(keys):
items = sorted(items, key=key.func, reverse=key.reverse)
return items
|
sixty-north/added-value
|
source/added_value/format_role.py
|
format_role
|
python
|
def format_role(name, rawtext, text, lineno, inliner, options=None, content=None):
if options is None:
options = {}
if content is None:
content = []
name, _, format_spec = tuple(field.strip() for field in text.partition(","))
try:
prefixed_name, obj, parent, modname = import_by_name(name)
except ImportError:
msg = inliner.reporter.error("Could not locate Python object {}".format(text), line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
app = inliner.document.settings.env.app
try:
formatted_value = format(obj, format_spec)
except ValueError as value_error:
msg = inliner.reporter.error(
"Format error in {}: {}".format(text, value_error), line=lineno
)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
node = nodes.Text(formatted_value, rawsource=rawtext)
return [node], []
|
Include Python object value, rendering it to text using str.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
:param make_node: A callable which accepts (rawtext, app, prefixed_name, obj, parent, modname, options) and which returns a node
:param name: The role name used in the document.
:param rawtext: The entire markup snippet, with role.
:param text: The text marked with the role.
:param lineno: The line number where rawtext appears in the input.
:param inliner: The inliner instance that called us.
:param options: Directive options for customization.
:param content: The directive content for customization.
|
train
|
https://github.com/sixty-north/added-value/blob/7ae75b56712822b074fc874612d6058bb7d16a1e/source/added_value/format_role.py#L5-L47
| null |
from docutils import nodes
from sphinx.ext.autosummary import import_by_name
|
sixty-north/added-value
|
source/added_value/any_items_role.py
|
make_any_items_node
|
python
|
def make_any_items_node(rawtext, app, prefixed_name, obj, parent, modname, options):
text = list_conjunction(obj, "or")
node = nodes.Text(text, rawsource=rawtext)
return node
|
Render a Python sequence as a comma-separated list, with an "or" for the final item.
:param rawtext: Text being replaced with link node.
:param app: Sphinx application context
:param prefixed_name: The dotted Python name for obj.
:param obj: The Python object to be rendered to text.
:param parent: The parent Python object of obj.
:param module: The name of the module containing obj.
:param options: Options dictionary passed to role func.
|
train
|
https://github.com/sixty-north/added-value/blob/7ae75b56712822b074fc874612d6058bb7d16a1e/source/added_value/any_items_role.py#L7-L20
|
[
"def list_conjunction(sequence, word):\n if len(sequence) == 0:\n text = \"\"\n elif len(sequence) == 1:\n text = str(sequence)\n elif len(sequence) == 2:\n text = \"{!s} {} {!s}\".format(sequence[0], word, sequence[1])\n else:\n all_but_last = \", \".join(map(str, sequence[:-1]))\n last = sequence[-1]\n text = \"{}, {} {!s}\".format(all_but_last, word, last)\n return text\n"
] |
from docutils import nodes
from added_value.grammatical_conjunctions import list_conjunction
from added_value.pyobj_role import make_pyobj_role
any_items_role = make_pyobj_role(make_any_items_node)
|
msiedlarek/wiring
|
wiring/interface.py
|
get_implemented_interfaces
|
python
|
def get_implemented_interfaces(cls):
if hasattr(cls, '__interfaces__'):
return cls.__interfaces__
return six.moves.reduce(
lambda x, y: x.union(y),
map(
get_implemented_interfaces,
inspect.getmro(cls)[1:]
),
set()
)
|
Returns a set of :term:`interfaces <interface>` declared as implemented by
class `cls`.
|
train
|
https://github.com/msiedlarek/wiring/blob/c32165b680356fe9f1e422a1d11127f867065f94/wiring/interface.py#L287-L301
| null |
import collections
import inspect
import operator
import six
__all__ = (
'InterfaceComplianceError',
'MissingAttributeError',
'MethodValidationError',
'Attribute',
'Method',
'Interface',
'get_implemented_interfaces',
'set_implemented_interfaces',
'add_implemented_interfaces',
'implements',
'implements_only',
'isimplementation',
)
class InterfaceComplianceError(Exception):
"""
Common base for all interface compliance validation errors.
"""
class MissingAttributeError(InterfaceComplianceError):
"""
Exception raised when an object is validated against :py:class:`Interface`
(by :py:meth:`Interface.check_compliance`) and is found to be missing
a required attribute.
"""
def __init__(self, attribute_name):
self.attribute_name = attribute_name
"""Name of the missing attribute."""
def __str__(self):
return "Validated object is missing `{attribute}` attribute.".format(
attribute=self.attribute_name
)
class MethodValidationError(InterfaceComplianceError):
"""
Exception raised when a function is validated against :py:class:`Method`
specification (e.g. by :py:meth:`Interface.check_compliance`) and some of
the arguments differ.
"""
def __init__(self, function, expected_argspec, observed_argspec):
self.function = function
"""
Function object that didn't pass the check.
"""
self.expected_argspec = expected_argspec
"""
An `inspect.ArgSpec` or `inspect.FullArgSpec` (depending on Python
version) named tuple specifying expected function arguments.
"""
self.observed_argspec = observed_argspec
"""
An `inspect.ArgSpec` or `inspect.FullArgSpec` (depending on Python
version) named tuple specifying arguments that the validated
function actually takes.
"""
def __str__(self):
return (
"Function `{function}` does not comply with interface definition."
" Expected arguments: {expected}"
" Observed arguments: {observed}"
).format(
function=self.function.__name__,
expected=inspect.formatargspec(*self.expected_argspec),
observed=inspect.formatargspec(*self.observed_argspec)
)
class Attribute(object):
"""
This class stores a specification of an object attribute, namely its
docstring. It is used by :py:class:`InterfaceMetaclass` to store
information about required attributes of an :term:`interface`.
"""
def __init__(self, docstring=None):
self.docstring = docstring
"""
Docstring of a described attribute.
"""
def __repr__(self):
if self.docstring:
return '<Attribute("{}")>'.format(self.docstring)
else:
return '<Attribute()>'
class Method(Attribute):
"""
This class stores a specification of a method, describing its arguments and
holding its docstring. It is used by :py:class:`InterfaceMetaclass` to
store information about required methods of an :term:`interface`.
"""
def __init__(self, argument_specification, docstring=None):
super(Method, self).__init__(docstring)
self.argument_specification = argument_specification
"""
An `inspect.ArgSpec` or `inspect.FullArgSpec` (depending on Python
version) named tuple specifying arguments taken by described method.
These will not include implied `self` argument.
"""
def __repr__(self):
return '<Method{}>'.format(
inspect.formatargspec(*self.argument_specification)
)
def check_compliance(self, function):
"""
Checks if a given `function` complies with this specification. If an
inconsistency is detected a :py:exc:`MethodValidationError` exception
is raised.
.. note::
This method will not work as expected when `function` is an unbound
method (``SomeClass.some_method``), as in Python 3 there is no way
to recognize that this is in fact a method. Therefore, the implied
`self` argument will not be ignored.
:raises:
:py:exc:`MethodValidationError`
"""
argument_specification = _get_argument_specification(function)
if inspect.ismethod(function):
# Remove implied `self` argument from specification if function is
# a method.
argument_specification = argument_specification._replace(
args=argument_specification.args[1:]
)
if argument_specification != self.argument_specification:
raise MethodValidationError(
function,
self.argument_specification,
argument_specification
)
class InterfaceMetaclass(type):
"""
This metaclass analyzes declared attributes and methods of new
:term:`interface` classes and turns them into a dictionary of
:py:class:`Attribute` and :py:class:`Method` specifications which is stored
as `attributes` dictionary of an interface class. It also handles
inheritance of those declarations.
It also collects (and stores in `implied` attribute) a set of this and all
base interfaces, as it never changes after the class is declared and is
very commonly needed.
"""
def __new__(cls, interface_name, bases, attributes):
ignored_attribute_names = (
'__module__',
'__qualname__',
'__locals__',
'__doc__',
)
ignored_attributes = {}
processed_attributes = {}
# Filter out private attributes which should not be treated as
# interface declarations.
for name, value in six.iteritems(attributes):
if (isinstance(value, classmethod) or
isinstance(value, staticmethod) or
name in ignored_attribute_names):
ignored_attributes[name] = value
else:
processed_attributes[name] = value
interface = super(InterfaceMetaclass, cls).__new__(
cls,
interface_name,
bases,
ignored_attributes
)
# Precalculate a tuple of this and all base interfaces in method
# resolution order.
interface.implied = tuple((
ancestor for ancestor in inspect.getmro(interface)
if cls._is_interface_class(ancestor)
))
interface.attributes = {}
for base in reversed(bases):
if cls._is_interface_class(base):
interface.attributes.update(base.attributes)
for name, value in six.iteritems(processed_attributes):
if isinstance(value, Attribute):
interface.attributes[name] = value
elif inspect.isfunction(value):
docstring = inspect.getdoc(value)
argument_specification = _get_argument_specification(value)
interface.attributes[name] = Method(
argument_specification,
docstring=docstring
)
else:
if isinstance(value, six.string_types):
docstring = value
else:
docstring = None
interface.attributes[name] = Attribute(docstring=docstring)
return interface
@classmethod
def _is_interface_class(cls, other_class):
if not isinstance(other_class, cls):
# Other class didn't came from this metaclass.
return False
if all(map(lambda b: not isinstance(b, cls), other_class.__bases__)):
# Other class is Interface class from this module.
return False
return True
@six.add_metaclass(InterfaceMetaclass)
class Interface(object):
__doc__ = """
A base class for :term:`interface` classes, using the
:py:class:`InterfaceMetaclass`.
""" + InterfaceMetaclass.__doc__
implied = frozenset()
"""
A `frozenset` of this and all base :term:`interfaces <interface>`.
"""
attributes = {}
"""
Dictionary describing provided attributes, including methods. Keys are
attribute names and values are :py:class:`Attribute` or :py:class:`Method`
instances.
"""
@classmethod
def check_compliance(cls, instance):
"""
Checks if given `instance` complies with this :term:`interface`. If
`instance` is found to be invalid a :py:exc:`InterfaceComplianceError`
subclass is raised.
`instance`'s class doesn't have to declare it implements an interface
to be validated against it.
.. note::
Classes cannot be validated against an interface, because instance
attributes couldn't be checked.
:raises:
:py:exc:`MissingAttributeError`,
:py:exc:`MethodValidationError`
"""
if inspect.isclass(instance):
raise TypeError(
"Only instances, not classes, can be validated against an"
" interface."
)
for name, value in six.iteritems(cls.attributes):
if not hasattr(instance, name):
raise MissingAttributeError(name)
if isinstance(value, Method):
value.check_compliance(getattr(instance, name))
def set_implemented_interfaces(cls, interfaces):
"""
Declares :term:`interfaces <interface>` as implemented by class `cls`.
Those already declared are overriden.
"""
setattr(
cls,
'__interfaces__',
frozenset(
six.moves.reduce(
lambda x, y: x.union(y),
map(operator.attrgetter('implied'), interfaces),
set()
)
)
)
def add_implemented_interfaces(cls, interfaces):
"""
Adds :term:`interfaces <interface>` to those already declared as
implemented by class `cls`.
"""
implemented = set(
six.moves.reduce(
lambda x, y: x.union(y),
map(operator.attrgetter('implied'), interfaces),
set()
)
)
implemented.update(*map(
get_implemented_interfaces,
inspect.getmro(cls)
))
setattr(cls, '__interfaces__', frozenset(implemented))
def implements(*interfaces):
"""
Decorator declaring :term:`interfaces <interface>` implemented by
a decorated class.
"""
def wrapper(cls):
add_implemented_interfaces(cls, interfaces)
return cls
return wrapper
def implements_only(*interfaces):
"""
Decorator declaring :term:`interfaces <interface>` implemented by
a decorated class. Previous declarations including inherited declarations
are overridden.
"""
def wrapper(cls):
set_implemented_interfaces(cls, interfaces)
return cls
return wrapper
def isimplementation(obj, interfaces):
"""
Returns `True` if `obj` is a class implementing all of `interfaces` or an
instance of such class.
`interfaces` can be a single :term:`interface` class or an iterable of
interface classes.
"""
if not inspect.isclass(obj):
isimplementation(obj.__class__, interfaces)
if not isinstance(interfaces, collections.Iterable):
interfaces = [interfaces]
return frozenset(interfaces).issubset(
get_implemented_interfaces(obj)
)
if six.PY3:
_get_argument_specification = inspect.getfullargspec
else:
_get_argument_specification = inspect.getargspec
|
msiedlarek/wiring
|
wiring/interface.py
|
set_implemented_interfaces
|
python
|
def set_implemented_interfaces(cls, interfaces):
setattr(
cls,
'__interfaces__',
frozenset(
six.moves.reduce(
lambda x, y: x.union(y),
map(operator.attrgetter('implied'), interfaces),
set()
)
)
)
|
Declares :term:`interfaces <interface>` as implemented by class `cls`.
Those already declared are overriden.
|
train
|
https://github.com/msiedlarek/wiring/blob/c32165b680356fe9f1e422a1d11127f867065f94/wiring/interface.py#L304-L319
| null |
import collections
import inspect
import operator
import six
__all__ = (
'InterfaceComplianceError',
'MissingAttributeError',
'MethodValidationError',
'Attribute',
'Method',
'Interface',
'get_implemented_interfaces',
'set_implemented_interfaces',
'add_implemented_interfaces',
'implements',
'implements_only',
'isimplementation',
)
class InterfaceComplianceError(Exception):
"""
Common base for all interface compliance validation errors.
"""
class MissingAttributeError(InterfaceComplianceError):
"""
Exception raised when an object is validated against :py:class:`Interface`
(by :py:meth:`Interface.check_compliance`) and is found to be missing
a required attribute.
"""
def __init__(self, attribute_name):
self.attribute_name = attribute_name
"""Name of the missing attribute."""
def __str__(self):
return "Validated object is missing `{attribute}` attribute.".format(
attribute=self.attribute_name
)
class MethodValidationError(InterfaceComplianceError):
"""
Exception raised when a function is validated against :py:class:`Method`
specification (e.g. by :py:meth:`Interface.check_compliance`) and some of
the arguments differ.
"""
def __init__(self, function, expected_argspec, observed_argspec):
self.function = function
"""
Function object that didn't pass the check.
"""
self.expected_argspec = expected_argspec
"""
An `inspect.ArgSpec` or `inspect.FullArgSpec` (depending on Python
version) named tuple specifying expected function arguments.
"""
self.observed_argspec = observed_argspec
"""
An `inspect.ArgSpec` or `inspect.FullArgSpec` (depending on Python
version) named tuple specifying arguments that the validated
function actually takes.
"""
def __str__(self):
return (
"Function `{function}` does not comply with interface definition."
" Expected arguments: {expected}"
" Observed arguments: {observed}"
).format(
function=self.function.__name__,
expected=inspect.formatargspec(*self.expected_argspec),
observed=inspect.formatargspec(*self.observed_argspec)
)
class Attribute(object):
"""
This class stores a specification of an object attribute, namely its
docstring. It is used by :py:class:`InterfaceMetaclass` to store
information about required attributes of an :term:`interface`.
"""
def __init__(self, docstring=None):
self.docstring = docstring
"""
Docstring of a described attribute.
"""
def __repr__(self):
if self.docstring:
return '<Attribute("{}")>'.format(self.docstring)
else:
return '<Attribute()>'
class Method(Attribute):
"""
This class stores a specification of a method, describing its arguments and
holding its docstring. It is used by :py:class:`InterfaceMetaclass` to
store information about required methods of an :term:`interface`.
"""
def __init__(self, argument_specification, docstring=None):
super(Method, self).__init__(docstring)
self.argument_specification = argument_specification
"""
An `inspect.ArgSpec` or `inspect.FullArgSpec` (depending on Python
version) named tuple specifying arguments taken by described method.
These will not include implied `self` argument.
"""
def __repr__(self):
return '<Method{}>'.format(
inspect.formatargspec(*self.argument_specification)
)
def check_compliance(self, function):
"""
Checks if a given `function` complies with this specification. If an
inconsistency is detected a :py:exc:`MethodValidationError` exception
is raised.
.. note::
This method will not work as expected when `function` is an unbound
method (``SomeClass.some_method``), as in Python 3 there is no way
to recognize that this is in fact a method. Therefore, the implied
`self` argument will not be ignored.
:raises:
:py:exc:`MethodValidationError`
"""
argument_specification = _get_argument_specification(function)
if inspect.ismethod(function):
# Remove implied `self` argument from specification if function is
# a method.
argument_specification = argument_specification._replace(
args=argument_specification.args[1:]
)
if argument_specification != self.argument_specification:
raise MethodValidationError(
function,
self.argument_specification,
argument_specification
)
class InterfaceMetaclass(type):
"""
This metaclass analyzes declared attributes and methods of new
:term:`interface` classes and turns them into a dictionary of
:py:class:`Attribute` and :py:class:`Method` specifications which is stored
as `attributes` dictionary of an interface class. It also handles
inheritance of those declarations.
It also collects (and stores in `implied` attribute) a set of this and all
base interfaces, as it never changes after the class is declared and is
very commonly needed.
"""
def __new__(cls, interface_name, bases, attributes):
ignored_attribute_names = (
'__module__',
'__qualname__',
'__locals__',
'__doc__',
)
ignored_attributes = {}
processed_attributes = {}
# Filter out private attributes which should not be treated as
# interface declarations.
for name, value in six.iteritems(attributes):
if (isinstance(value, classmethod) or
isinstance(value, staticmethod) or
name in ignored_attribute_names):
ignored_attributes[name] = value
else:
processed_attributes[name] = value
interface = super(InterfaceMetaclass, cls).__new__(
cls,
interface_name,
bases,
ignored_attributes
)
# Precalculate a tuple of this and all base interfaces in method
# resolution order.
interface.implied = tuple((
ancestor for ancestor in inspect.getmro(interface)
if cls._is_interface_class(ancestor)
))
interface.attributes = {}
for base in reversed(bases):
if cls._is_interface_class(base):
interface.attributes.update(base.attributes)
for name, value in six.iteritems(processed_attributes):
if isinstance(value, Attribute):
interface.attributes[name] = value
elif inspect.isfunction(value):
docstring = inspect.getdoc(value)
argument_specification = _get_argument_specification(value)
interface.attributes[name] = Method(
argument_specification,
docstring=docstring
)
else:
if isinstance(value, six.string_types):
docstring = value
else:
docstring = None
interface.attributes[name] = Attribute(docstring=docstring)
return interface
@classmethod
def _is_interface_class(cls, other_class):
if not isinstance(other_class, cls):
# Other class didn't came from this metaclass.
return False
if all(map(lambda b: not isinstance(b, cls), other_class.__bases__)):
# Other class is Interface class from this module.
return False
return True
@six.add_metaclass(InterfaceMetaclass)
class Interface(object):
__doc__ = """
A base class for :term:`interface` classes, using the
:py:class:`InterfaceMetaclass`.
""" + InterfaceMetaclass.__doc__
implied = frozenset()
"""
A `frozenset` of this and all base :term:`interfaces <interface>`.
"""
attributes = {}
"""
Dictionary describing provided attributes, including methods. Keys are
attribute names and values are :py:class:`Attribute` or :py:class:`Method`
instances.
"""
@classmethod
def check_compliance(cls, instance):
"""
Checks if given `instance` complies with this :term:`interface`. If
`instance` is found to be invalid a :py:exc:`InterfaceComplianceError`
subclass is raised.
`instance`'s class doesn't have to declare it implements an interface
to be validated against it.
.. note::
Classes cannot be validated against an interface, because instance
attributes couldn't be checked.
:raises:
:py:exc:`MissingAttributeError`,
:py:exc:`MethodValidationError`
"""
if inspect.isclass(instance):
raise TypeError(
"Only instances, not classes, can be validated against an"
" interface."
)
for name, value in six.iteritems(cls.attributes):
if not hasattr(instance, name):
raise MissingAttributeError(name)
if isinstance(value, Method):
value.check_compliance(getattr(instance, name))
def get_implemented_interfaces(cls):
"""
Returns a set of :term:`interfaces <interface>` declared as implemented by
class `cls`.
"""
if hasattr(cls, '__interfaces__'):
return cls.__interfaces__
return six.moves.reduce(
lambda x, y: x.union(y),
map(
get_implemented_interfaces,
inspect.getmro(cls)[1:]
),
set()
)
def add_implemented_interfaces(cls, interfaces):
"""
Adds :term:`interfaces <interface>` to those already declared as
implemented by class `cls`.
"""
implemented = set(
six.moves.reduce(
lambda x, y: x.union(y),
map(operator.attrgetter('implied'), interfaces),
set()
)
)
implemented.update(*map(
get_implemented_interfaces,
inspect.getmro(cls)
))
setattr(cls, '__interfaces__', frozenset(implemented))
def implements(*interfaces):
"""
Decorator declaring :term:`interfaces <interface>` implemented by
a decorated class.
"""
def wrapper(cls):
add_implemented_interfaces(cls, interfaces)
return cls
return wrapper
def implements_only(*interfaces):
"""
Decorator declaring :term:`interfaces <interface>` implemented by
a decorated class. Previous declarations including inherited declarations
are overridden.
"""
def wrapper(cls):
set_implemented_interfaces(cls, interfaces)
return cls
return wrapper
def isimplementation(obj, interfaces):
"""
Returns `True` if `obj` is a class implementing all of `interfaces` or an
instance of such class.
`interfaces` can be a single :term:`interface` class or an iterable of
interface classes.
"""
if not inspect.isclass(obj):
isimplementation(obj.__class__, interfaces)
if not isinstance(interfaces, collections.Iterable):
interfaces = [interfaces]
return frozenset(interfaces).issubset(
get_implemented_interfaces(obj)
)
if six.PY3:
_get_argument_specification = inspect.getfullargspec
else:
_get_argument_specification = inspect.getargspec
|
msiedlarek/wiring
|
wiring/interface.py
|
add_implemented_interfaces
|
python
|
def add_implemented_interfaces(cls, interfaces):
implemented = set(
six.moves.reduce(
lambda x, y: x.union(y),
map(operator.attrgetter('implied'), interfaces),
set()
)
)
implemented.update(*map(
get_implemented_interfaces,
inspect.getmro(cls)
))
setattr(cls, '__interfaces__', frozenset(implemented))
|
Adds :term:`interfaces <interface>` to those already declared as
implemented by class `cls`.
|
train
|
https://github.com/msiedlarek/wiring/blob/c32165b680356fe9f1e422a1d11127f867065f94/wiring/interface.py#L322-L338
| null |
import collections
import inspect
import operator
import six
__all__ = (
'InterfaceComplianceError',
'MissingAttributeError',
'MethodValidationError',
'Attribute',
'Method',
'Interface',
'get_implemented_interfaces',
'set_implemented_interfaces',
'add_implemented_interfaces',
'implements',
'implements_only',
'isimplementation',
)
class InterfaceComplianceError(Exception):
"""
Common base for all interface compliance validation errors.
"""
class MissingAttributeError(InterfaceComplianceError):
"""
Exception raised when an object is validated against :py:class:`Interface`
(by :py:meth:`Interface.check_compliance`) and is found to be missing
a required attribute.
"""
def __init__(self, attribute_name):
self.attribute_name = attribute_name
"""Name of the missing attribute."""
def __str__(self):
return "Validated object is missing `{attribute}` attribute.".format(
attribute=self.attribute_name
)
class MethodValidationError(InterfaceComplianceError):
"""
Exception raised when a function is validated against :py:class:`Method`
specification (e.g. by :py:meth:`Interface.check_compliance`) and some of
the arguments differ.
"""
def __init__(self, function, expected_argspec, observed_argspec):
self.function = function
"""
Function object that didn't pass the check.
"""
self.expected_argspec = expected_argspec
"""
An `inspect.ArgSpec` or `inspect.FullArgSpec` (depending on Python
version) named tuple specifying expected function arguments.
"""
self.observed_argspec = observed_argspec
"""
An `inspect.ArgSpec` or `inspect.FullArgSpec` (depending on Python
version) named tuple specifying arguments that the validated
function actually takes.
"""
def __str__(self):
return (
"Function `{function}` does not comply with interface definition."
" Expected arguments: {expected}"
" Observed arguments: {observed}"
).format(
function=self.function.__name__,
expected=inspect.formatargspec(*self.expected_argspec),
observed=inspect.formatargspec(*self.observed_argspec)
)
class Attribute(object):
"""
This class stores a specification of an object attribute, namely its
docstring. It is used by :py:class:`InterfaceMetaclass` to store
information about required attributes of an :term:`interface`.
"""
def __init__(self, docstring=None):
self.docstring = docstring
"""
Docstring of a described attribute.
"""
def __repr__(self):
if self.docstring:
return '<Attribute("{}")>'.format(self.docstring)
else:
return '<Attribute()>'
class Method(Attribute):
"""
This class stores a specification of a method, describing its arguments and
holding its docstring. It is used by :py:class:`InterfaceMetaclass` to
store information about required methods of an :term:`interface`.
"""
def __init__(self, argument_specification, docstring=None):
super(Method, self).__init__(docstring)
self.argument_specification = argument_specification
"""
An `inspect.ArgSpec` or `inspect.FullArgSpec` (depending on Python
version) named tuple specifying arguments taken by described method.
These will not include implied `self` argument.
"""
def __repr__(self):
return '<Method{}>'.format(
inspect.formatargspec(*self.argument_specification)
)
def check_compliance(self, function):
"""
Checks if a given `function` complies with this specification. If an
inconsistency is detected a :py:exc:`MethodValidationError` exception
is raised.
.. note::
This method will not work as expected when `function` is an unbound
method (``SomeClass.some_method``), as in Python 3 there is no way
to recognize that this is in fact a method. Therefore, the implied
`self` argument will not be ignored.
:raises:
:py:exc:`MethodValidationError`
"""
argument_specification = _get_argument_specification(function)
if inspect.ismethod(function):
# Remove implied `self` argument from specification if function is
# a method.
argument_specification = argument_specification._replace(
args=argument_specification.args[1:]
)
if argument_specification != self.argument_specification:
raise MethodValidationError(
function,
self.argument_specification,
argument_specification
)
class InterfaceMetaclass(type):
"""
This metaclass analyzes declared attributes and methods of new
:term:`interface` classes and turns them into a dictionary of
:py:class:`Attribute` and :py:class:`Method` specifications which is stored
as `attributes` dictionary of an interface class. It also handles
inheritance of those declarations.
It also collects (and stores in `implied` attribute) a set of this and all
base interfaces, as it never changes after the class is declared and is
very commonly needed.
"""
def __new__(cls, interface_name, bases, attributes):
ignored_attribute_names = (
'__module__',
'__qualname__',
'__locals__',
'__doc__',
)
ignored_attributes = {}
processed_attributes = {}
# Filter out private attributes which should not be treated as
# interface declarations.
for name, value in six.iteritems(attributes):
if (isinstance(value, classmethod) or
isinstance(value, staticmethod) or
name in ignored_attribute_names):
ignored_attributes[name] = value
else:
processed_attributes[name] = value
interface = super(InterfaceMetaclass, cls).__new__(
cls,
interface_name,
bases,
ignored_attributes
)
# Precalculate a tuple of this and all base interfaces in method
# resolution order.
interface.implied = tuple((
ancestor for ancestor in inspect.getmro(interface)
if cls._is_interface_class(ancestor)
))
interface.attributes = {}
for base in reversed(bases):
if cls._is_interface_class(base):
interface.attributes.update(base.attributes)
for name, value in six.iteritems(processed_attributes):
if isinstance(value, Attribute):
interface.attributes[name] = value
elif inspect.isfunction(value):
docstring = inspect.getdoc(value)
argument_specification = _get_argument_specification(value)
interface.attributes[name] = Method(
argument_specification,
docstring=docstring
)
else:
if isinstance(value, six.string_types):
docstring = value
else:
docstring = None
interface.attributes[name] = Attribute(docstring=docstring)
return interface
@classmethod
def _is_interface_class(cls, other_class):
if not isinstance(other_class, cls):
# Other class didn't came from this metaclass.
return False
if all(map(lambda b: not isinstance(b, cls), other_class.__bases__)):
# Other class is Interface class from this module.
return False
return True
@six.add_metaclass(InterfaceMetaclass)
class Interface(object):
__doc__ = """
A base class for :term:`interface` classes, using the
:py:class:`InterfaceMetaclass`.
""" + InterfaceMetaclass.__doc__
implied = frozenset()
"""
A `frozenset` of this and all base :term:`interfaces <interface>`.
"""
attributes = {}
"""
Dictionary describing provided attributes, including methods. Keys are
attribute names and values are :py:class:`Attribute` or :py:class:`Method`
instances.
"""
@classmethod
def check_compliance(cls, instance):
"""
Checks if given `instance` complies with this :term:`interface`. If
`instance` is found to be invalid a :py:exc:`InterfaceComplianceError`
subclass is raised.
`instance`'s class doesn't have to declare it implements an interface
to be validated against it.
.. note::
Classes cannot be validated against an interface, because instance
attributes couldn't be checked.
:raises:
:py:exc:`MissingAttributeError`,
:py:exc:`MethodValidationError`
"""
if inspect.isclass(instance):
raise TypeError(
"Only instances, not classes, can be validated against an"
" interface."
)
for name, value in six.iteritems(cls.attributes):
if not hasattr(instance, name):
raise MissingAttributeError(name)
if isinstance(value, Method):
value.check_compliance(getattr(instance, name))
def get_implemented_interfaces(cls):
"""
Returns a set of :term:`interfaces <interface>` declared as implemented by
class `cls`.
"""
if hasattr(cls, '__interfaces__'):
return cls.__interfaces__
return six.moves.reduce(
lambda x, y: x.union(y),
map(
get_implemented_interfaces,
inspect.getmro(cls)[1:]
),
set()
)
def set_implemented_interfaces(cls, interfaces):
"""
Declares :term:`interfaces <interface>` as implemented by class `cls`.
Those already declared are overriden.
"""
setattr(
cls,
'__interfaces__',
frozenset(
six.moves.reduce(
lambda x, y: x.union(y),
map(operator.attrgetter('implied'), interfaces),
set()
)
)
)
def implements(*interfaces):
"""
Decorator declaring :term:`interfaces <interface>` implemented by
a decorated class.
"""
def wrapper(cls):
add_implemented_interfaces(cls, interfaces)
return cls
return wrapper
def implements_only(*interfaces):
"""
Decorator declaring :term:`interfaces <interface>` implemented by
a decorated class. Previous declarations including inherited declarations
are overridden.
"""
def wrapper(cls):
set_implemented_interfaces(cls, interfaces)
return cls
return wrapper
def isimplementation(obj, interfaces):
"""
Returns `True` if `obj` is a class implementing all of `interfaces` or an
instance of such class.
`interfaces` can be a single :term:`interface` class or an iterable of
interface classes.
"""
if not inspect.isclass(obj):
isimplementation(obj.__class__, interfaces)
if not isinstance(interfaces, collections.Iterable):
interfaces = [interfaces]
return frozenset(interfaces).issubset(
get_implemented_interfaces(obj)
)
if six.PY3:
_get_argument_specification = inspect.getfullargspec
else:
_get_argument_specification = inspect.getargspec
|
msiedlarek/wiring
|
wiring/interface.py
|
isimplementation
|
python
|
def isimplementation(obj, interfaces):
if not inspect.isclass(obj):
isimplementation(obj.__class__, interfaces)
if not isinstance(interfaces, collections.Iterable):
interfaces = [interfaces]
return frozenset(interfaces).issubset(
get_implemented_interfaces(obj)
)
|
Returns `True` if `obj` is a class implementing all of `interfaces` or an
instance of such class.
`interfaces` can be a single :term:`interface` class or an iterable of
interface classes.
|
train
|
https://github.com/msiedlarek/wiring/blob/c32165b680356fe9f1e422a1d11127f867065f94/wiring/interface.py#L364-L378
|
[
"def get_implemented_interfaces(cls):\n \"\"\"\n Returns a set of :term:`interfaces <interface>` declared as implemented by\n class `cls`.\n \"\"\"\n if hasattr(cls, '__interfaces__'):\n return cls.__interfaces__\n return six.moves.reduce(\n lambda x, y: x.union(y),\n map(\n get_implemented_interfaces,\n inspect.getmro(cls)[1:]\n ),\n set()\n )\n",
"def isimplementation(obj, interfaces):\n \"\"\"\n Returns `True` if `obj` is a class implementing all of `interfaces` or an\n instance of such class.\n\n `interfaces` can be a single :term:`interface` class or an iterable of\n interface classes.\n \"\"\"\n if not inspect.isclass(obj):\n isimplementation(obj.__class__, interfaces)\n if not isinstance(interfaces, collections.Iterable):\n interfaces = [interfaces]\n return frozenset(interfaces).issubset(\n get_implemented_interfaces(obj)\n )\n"
] |
import collections
import inspect
import operator
import six
__all__ = (
'InterfaceComplianceError',
'MissingAttributeError',
'MethodValidationError',
'Attribute',
'Method',
'Interface',
'get_implemented_interfaces',
'set_implemented_interfaces',
'add_implemented_interfaces',
'implements',
'implements_only',
'isimplementation',
)
class InterfaceComplianceError(Exception):
"""
Common base for all interface compliance validation errors.
"""
class MissingAttributeError(InterfaceComplianceError):
"""
Exception raised when an object is validated against :py:class:`Interface`
(by :py:meth:`Interface.check_compliance`) and is found to be missing
a required attribute.
"""
def __init__(self, attribute_name):
self.attribute_name = attribute_name
"""Name of the missing attribute."""
def __str__(self):
return "Validated object is missing `{attribute}` attribute.".format(
attribute=self.attribute_name
)
class MethodValidationError(InterfaceComplianceError):
"""
Exception raised when a function is validated against :py:class:`Method`
specification (e.g. by :py:meth:`Interface.check_compliance`) and some of
the arguments differ.
"""
def __init__(self, function, expected_argspec, observed_argspec):
self.function = function
"""
Function object that didn't pass the check.
"""
self.expected_argspec = expected_argspec
"""
An `inspect.ArgSpec` or `inspect.FullArgSpec` (depending on Python
version) named tuple specifying expected function arguments.
"""
self.observed_argspec = observed_argspec
"""
An `inspect.ArgSpec` or `inspect.FullArgSpec` (depending on Python
version) named tuple specifying arguments that the validated
function actually takes.
"""
def __str__(self):
return (
"Function `{function}` does not comply with interface definition."
" Expected arguments: {expected}"
" Observed arguments: {observed}"
).format(
function=self.function.__name__,
expected=inspect.formatargspec(*self.expected_argspec),
observed=inspect.formatargspec(*self.observed_argspec)
)
class Attribute(object):
"""
This class stores a specification of an object attribute, namely its
docstring. It is used by :py:class:`InterfaceMetaclass` to store
information about required attributes of an :term:`interface`.
"""
def __init__(self, docstring=None):
self.docstring = docstring
"""
Docstring of a described attribute.
"""
def __repr__(self):
if self.docstring:
return '<Attribute("{}")>'.format(self.docstring)
else:
return '<Attribute()>'
class Method(Attribute):
"""
This class stores a specification of a method, describing its arguments and
holding its docstring. It is used by :py:class:`InterfaceMetaclass` to
store information about required methods of an :term:`interface`.
"""
def __init__(self, argument_specification, docstring=None):
super(Method, self).__init__(docstring)
self.argument_specification = argument_specification
"""
An `inspect.ArgSpec` or `inspect.FullArgSpec` (depending on Python
version) named tuple specifying arguments taken by described method.
These will not include implied `self` argument.
"""
def __repr__(self):
return '<Method{}>'.format(
inspect.formatargspec(*self.argument_specification)
)
def check_compliance(self, function):
"""
Checks if a given `function` complies with this specification. If an
inconsistency is detected a :py:exc:`MethodValidationError` exception
is raised.
.. note::
This method will not work as expected when `function` is an unbound
method (``SomeClass.some_method``), as in Python 3 there is no way
to recognize that this is in fact a method. Therefore, the implied
`self` argument will not be ignored.
:raises:
:py:exc:`MethodValidationError`
"""
argument_specification = _get_argument_specification(function)
if inspect.ismethod(function):
# Remove implied `self` argument from specification if function is
# a method.
argument_specification = argument_specification._replace(
args=argument_specification.args[1:]
)
if argument_specification != self.argument_specification:
raise MethodValidationError(
function,
self.argument_specification,
argument_specification
)
class InterfaceMetaclass(type):
"""
This metaclass analyzes declared attributes and methods of new
:term:`interface` classes and turns them into a dictionary of
:py:class:`Attribute` and :py:class:`Method` specifications which is stored
as `attributes` dictionary of an interface class. It also handles
inheritance of those declarations.
It also collects (and stores in `implied` attribute) a set of this and all
base interfaces, as it never changes after the class is declared and is
very commonly needed.
"""
def __new__(cls, interface_name, bases, attributes):
ignored_attribute_names = (
'__module__',
'__qualname__',
'__locals__',
'__doc__',
)
ignored_attributes = {}
processed_attributes = {}
# Filter out private attributes which should not be treated as
# interface declarations.
for name, value in six.iteritems(attributes):
if (isinstance(value, classmethod) or
isinstance(value, staticmethod) or
name in ignored_attribute_names):
ignored_attributes[name] = value
else:
processed_attributes[name] = value
interface = super(InterfaceMetaclass, cls).__new__(
cls,
interface_name,
bases,
ignored_attributes
)
# Precalculate a tuple of this and all base interfaces in method
# resolution order.
interface.implied = tuple((
ancestor for ancestor in inspect.getmro(interface)
if cls._is_interface_class(ancestor)
))
interface.attributes = {}
for base in reversed(bases):
if cls._is_interface_class(base):
interface.attributes.update(base.attributes)
for name, value in six.iteritems(processed_attributes):
if isinstance(value, Attribute):
interface.attributes[name] = value
elif inspect.isfunction(value):
docstring = inspect.getdoc(value)
argument_specification = _get_argument_specification(value)
interface.attributes[name] = Method(
argument_specification,
docstring=docstring
)
else:
if isinstance(value, six.string_types):
docstring = value
else:
docstring = None
interface.attributes[name] = Attribute(docstring=docstring)
return interface
@classmethod
def _is_interface_class(cls, other_class):
if not isinstance(other_class, cls):
# Other class didn't came from this metaclass.
return False
if all(map(lambda b: not isinstance(b, cls), other_class.__bases__)):
# Other class is Interface class from this module.
return False
return True
@six.add_metaclass(InterfaceMetaclass)
class Interface(object):
__doc__ = """
A base class for :term:`interface` classes, using the
:py:class:`InterfaceMetaclass`.
""" + InterfaceMetaclass.__doc__
implied = frozenset()
"""
A `frozenset` of this and all base :term:`interfaces <interface>`.
"""
attributes = {}
"""
Dictionary describing provided attributes, including methods. Keys are
attribute names and values are :py:class:`Attribute` or :py:class:`Method`
instances.
"""
@classmethod
def check_compliance(cls, instance):
"""
Checks if given `instance` complies with this :term:`interface`. If
`instance` is found to be invalid a :py:exc:`InterfaceComplianceError`
subclass is raised.
`instance`'s class doesn't have to declare it implements an interface
to be validated against it.
.. note::
Classes cannot be validated against an interface, because instance
attributes couldn't be checked.
:raises:
:py:exc:`MissingAttributeError`,
:py:exc:`MethodValidationError`
"""
if inspect.isclass(instance):
raise TypeError(
"Only instances, not classes, can be validated against an"
" interface."
)
for name, value in six.iteritems(cls.attributes):
if not hasattr(instance, name):
raise MissingAttributeError(name)
if isinstance(value, Method):
value.check_compliance(getattr(instance, name))
def get_implemented_interfaces(cls):
"""
Returns a set of :term:`interfaces <interface>` declared as implemented by
class `cls`.
"""
if hasattr(cls, '__interfaces__'):
return cls.__interfaces__
return six.moves.reduce(
lambda x, y: x.union(y),
map(
get_implemented_interfaces,
inspect.getmro(cls)[1:]
),
set()
)
def set_implemented_interfaces(cls, interfaces):
"""
Declares :term:`interfaces <interface>` as implemented by class `cls`.
Those already declared are overriden.
"""
setattr(
cls,
'__interfaces__',
frozenset(
six.moves.reduce(
lambda x, y: x.union(y),
map(operator.attrgetter('implied'), interfaces),
set()
)
)
)
def add_implemented_interfaces(cls, interfaces):
"""
Adds :term:`interfaces <interface>` to those already declared as
implemented by class `cls`.
"""
implemented = set(
six.moves.reduce(
lambda x, y: x.union(y),
map(operator.attrgetter('implied'), interfaces),
set()
)
)
implemented.update(*map(
get_implemented_interfaces,
inspect.getmro(cls)
))
setattr(cls, '__interfaces__', frozenset(implemented))
def implements(*interfaces):
"""
Decorator declaring :term:`interfaces <interface>` implemented by
a decorated class.
"""
def wrapper(cls):
add_implemented_interfaces(cls, interfaces)
return cls
return wrapper
def implements_only(*interfaces):
"""
Decorator declaring :term:`interfaces <interface>` implemented by
a decorated class. Previous declarations including inherited declarations
are overridden.
"""
def wrapper(cls):
set_implemented_interfaces(cls, interfaces)
return cls
return wrapper
if six.PY3:
_get_argument_specification = inspect.getfullargspec
else:
_get_argument_specification = inspect.getargspec
|
msiedlarek/wiring
|
wiring/interface.py
|
Method.check_compliance
|
python
|
def check_compliance(self, function):
argument_specification = _get_argument_specification(function)
if inspect.ismethod(function):
# Remove implied `self` argument from specification if function is
# a method.
argument_specification = argument_specification._replace(
args=argument_specification.args[1:]
)
if argument_specification != self.argument_specification:
raise MethodValidationError(
function,
self.argument_specification,
argument_specification
)
|
Checks if a given `function` complies with this specification. If an
inconsistency is detected a :py:exc:`MethodValidationError` exception
is raised.
.. note::
This method will not work as expected when `function` is an unbound
method (``SomeClass.some_method``), as in Python 3 there is no way
to recognize that this is in fact a method. Therefore, the implied
`self` argument will not be ignored.
:raises:
:py:exc:`MethodValidationError`
|
train
|
https://github.com/msiedlarek/wiring/blob/c32165b680356fe9f1e422a1d11127f867065f94/wiring/interface.py#L124-L151
| null |
class Method(Attribute):
"""
This class stores a specification of a method, describing its arguments and
holding its docstring. It is used by :py:class:`InterfaceMetaclass` to
store information about required methods of an :term:`interface`.
"""
def __init__(self, argument_specification, docstring=None):
super(Method, self).__init__(docstring)
self.argument_specification = argument_specification
"""
An `inspect.ArgSpec` or `inspect.FullArgSpec` (depending on Python
version) named tuple specifying arguments taken by described method.
These will not include implied `self` argument.
"""
def __repr__(self):
return '<Method{}>'.format(
inspect.formatargspec(*self.argument_specification)
)
|
msiedlarek/wiring
|
wiring/graph.py
|
Graph.acquire
|
python
|
def acquire(self, specification, arguments=None):
if arguments is None:
realized_dependencies = {}
else:
realized_dependencies = copy.copy(arguments)
provider = self.providers[specification]
scope = None
if provider.scope is not None:
try:
scope = self.scopes[provider.scope]
except KeyError:
raise UnknownScopeError(provider.scope)
if scope is not None and specification in scope:
return scope[specification]
dependencies = six.iteritems(provider.dependencies)
for argument, dependency_specification in dependencies:
if argument not in realized_dependencies:
if isinstance(dependency_specification, Factory):
realized_dependencies[argument] = self.FactoryProxy(
self,
dependency_specification.specification
)
else:
realized_dependencies[argument] = self.acquire(
dependency_specification
)
args = []
kwargs = {}
for argument, value in six.iteritems(realized_dependencies):
if isinstance(argument, six.integer_types):
# Integer keys are for positional arguments.
if len(args) <= argument:
args.extend([None] * (argument + 1 - len(args)))
args[argument] = value
elif isinstance(argument, six.string_types):
# String keys are for keyword arguments.
kwargs[argument] = value
else:
raise TypeError(
"{} is not a valid argument key".format(repr(argument))
)
instance = provider(*args, **kwargs)
if scope is not None:
scope[specification] = instance
return instance
|
Returns an object for `specification` injecting its provider
with a mix of its :term:`dependencies <dependency>` and given
`arguments`. If there is a conflict between the injectable
dependencies and `arguments`, the value from `arguments` is
used.
When one of `arguments` keys is neither an integer nor a string
a `TypeError` is raised.
:param specification:
An object :term:`specification`.
:param arguments:
A dictionary of arguments given to the object :term:`provider`,
overriding those that would be injected or filling in for those
that wouldn't. Positional arguments should be stored under 0-based
integer keys.
:raises:
TypeError
|
train
|
https://github.com/msiedlarek/wiring/blob/c32165b680356fe9f1e422a1d11127f867065f94/wiring/graph.py#L150-L222
|
[
"def acquire(self, specification, arguments=None):\n \"\"\"\n Returns an object for `specification` injecting its provider\n with a mix of its :term:`dependencies <dependency>` and given\n `arguments`. If there is a conflict between the injectable\n dependencies and `arguments`, the value from `arguments` is\n used.\n\n When one of `arguments` keys is neither an integer nor a string\n a `TypeError` is raised.\n\n :param specification:\n An object :term:`specification`.\n :param arguments:\n A dictionary of arguments given to the object :term:`provider`,\n overriding those that would be injected or filling in for those\n that wouldn't. Positional arguments should be stored under 0-based\n integer keys.\n :raises:\n TypeError\n \"\"\"\n if arguments is None:\n realized_dependencies = {}\n else:\n realized_dependencies = copy.copy(arguments)\n\n provider = self.providers[specification]\n\n scope = None\n if provider.scope is not None:\n try:\n scope = self.scopes[provider.scope]\n except KeyError:\n raise UnknownScopeError(provider.scope)\n\n if scope is not None and specification in scope:\n return scope[specification]\n\n dependencies = six.iteritems(provider.dependencies)\n for argument, dependency_specification in dependencies:\n if argument not in realized_dependencies:\n if isinstance(dependency_specification, Factory):\n realized_dependencies[argument] = self.FactoryProxy(\n self,\n dependency_specification.specification\n )\n else:\n realized_dependencies[argument] = self.acquire(\n dependency_specification\n )\n\n args = []\n kwargs = {}\n for argument, value in six.iteritems(realized_dependencies):\n if isinstance(argument, six.integer_types):\n # Integer keys are for positional arguments.\n if len(args) <= argument:\n args.extend([None] * (argument + 1 - len(args)))\n args[argument] = value\n elif isinstance(argument, six.string_types):\n # String keys are for keyword arguments.\n kwargs[argument] = value\n else:\n raise TypeError(\n \"{} is not a valid argument key\".format(repr(argument))\n )\n\n instance = provider(*args, **kwargs)\n\n if scope is not None:\n scope[specification] = instance\n\n return instance\n"
] |
class Graph(object):
"""
Respresents an :term:`object graph`. Contains registered scopes and
providers, and can be used to validate and resolve provider dependencies
and creating provided objects.
"""
class FactoryProxy(object):
"""
A proxy object injected when `Factory(<specification>)` is requested as
a dependency.
"""
def __init__(self, graph, specification):
self.graph = graph
self.specification = specification
def __call__(self, *args, **kwargs):
return self.graph.get(self.specification, *args, **kwargs)
def __init__(self):
self.providers = {}
"""
Dictionary mapping :term:`specifications <specification>` to
:py:interface:`wiring.providers.IProvider` implementers that can
provide the specified object.
"""
self.scopes = {}
"""
Dictionary mapping :term:`scope` types to their instances. Scope
instances must conform to :py:interface:`wiring.scopes.IScope`
interface.
"""
self.register_scope(SingletonScope, SingletonScope())
self.register_scope(ProcessScope, ProcessScope())
self.register_scope(ThreadScope, ThreadScope())
def get(self, specification, *args, **kwargs):
"""
A more convenient version of :py:meth:`acquire()` for when you can
provide positional arguments in a right order.
"""
arguments = dict(enumerate(args))
arguments.update(kwargs)
return self.acquire(specification, arguments=arguments)
def register_provider(self, specification, provider):
"""
Registers a :term:`provider` (a :py:class:`wiring.providers.Provider`
instance) to be called when an object specified by
:term:`specification` is needed. If there was already a provider for
this specification it is overriden.
"""
if provider.scope is not None and provider.scope not in self.scopes:
raise UnknownScopeError(provider.scope)
self.providers[specification] = provider
def unregister_provider(self, specification):
"""
Removes :term:`provider` for given `specification` from the graph.
"""
del self.providers[specification]
def register_factory(self, specification, factory, scope=None):
"""
Shortcut for creating and registering
a :py:class:`wiring.providers.FactoryProvider`.
"""
self.register_provider(
specification,
FactoryProvider(factory, scope=scope)
)
def register_function(self, specification, function, scope=None):
"""
Shortcut for creating and registering
a :py:class:`wiring.providers.FunctionProvider`.
"""
self.register_provider(
specification,
FunctionProvider(function, scope=scope)
)
def register_instance(self, specification, instance):
"""
Registers given `instance` to be used as-is when an object specified by
given :term:`specification` is needed. If there was already a provider
for this specification it is overriden.
"""
self.register_provider(specification, InstanceProvider(instance))
def register_scope(self, scope_type, instance):
"""
Register instance of a :term:`scope` for given scope type. This scope
may be later referred to by providers using this type.
"""
self.scopes[scope_type] = instance
def unregister_scope(self, scope_type):
"""
Removes a :term:`scope` type from the graph.
"""
del self.scopes[scope_type]
def validate(self):
"""
Asserts that every declared :term:`specification` can actually be
realized, meaning that all of its :term:`dependencies <dependency>` are
present and there are no self-dependencies or :term:`dependency cycles
<dependency cycle>`. If such a problem is found, a proper exception
(deriving from :py:class:`GraphValidationError`) is raised.
:raises:
:py:exc:`MissingDependencyError`,
:py:exc:`SelfDependencyError`,
:py:exc:`DependencyCycleError`
"""
# This method uses Tarjan's strongly connected components algorithm
# with added self-dependency check to find dependency cyclces.
# Index is just an integer, it's wrapped in a list as a workaround for
# Python 2's lack of `nonlocal` keyword, so the nested
# `strongconnect()` may modify it.
index = [0]
indices = {}
lowlinks = {}
stack = []
def strongconnect(specification):
# Set the depth index for the node to the smallest unused index.
indices[specification] = index[0]
lowlinks[specification] = index[0]
index[0] += 1
stack.append(specification)
provider = self.providers[specification]
dependencies = six.itervalues(provider.dependencies)
for dependency in dependencies:
if isinstance(dependency, Factory):
dependency = dependency.specification
if dependency not in self.providers:
raise MissingDependencyError(specification, dependency)
if dependency == specification:
raise SelfDependencyError(specification)
if dependency not in indices:
# Dependency has not yet been visited; recurse on it.
strongconnect(dependency)
lowlinks[specification] = min(
lowlinks[specification],
lowlinks[dependency]
)
elif dependency in stack:
# Dependency is in stack and hence in the current strongly
# connected component.
lowlinks[specification] = min(
lowlinks[specification],
indices[dependency]
)
if lowlinks[specification] == indices[specification]:
component = []
while True:
component.append(stack.pop())
if component[-1] == specification:
break
if len(component) > 1:
raise DependencyCycleError(reversed(component))
for specification, provider in six.iteritems(self.providers):
if specification not in indices:
strongconnect(specification)
|
msiedlarek/wiring
|
wiring/graph.py
|
Graph.get
|
python
|
def get(self, specification, *args, **kwargs):
arguments = dict(enumerate(args))
arguments.update(kwargs)
return self.acquire(specification, arguments=arguments)
|
A more convenient version of :py:meth:`acquire()` for when you can
provide positional arguments in a right order.
|
train
|
https://github.com/msiedlarek/wiring/blob/c32165b680356fe9f1e422a1d11127f867065f94/wiring/graph.py#L224-L231
|
[
"def acquire(self, specification, arguments=None):\n \"\"\"\n Returns an object for `specification` injecting its provider\n with a mix of its :term:`dependencies <dependency>` and given\n `arguments`. If there is a conflict between the injectable\n dependencies and `arguments`, the value from `arguments` is\n used.\n\n When one of `arguments` keys is neither an integer nor a string\n a `TypeError` is raised.\n\n :param specification:\n An object :term:`specification`.\n :param arguments:\n A dictionary of arguments given to the object :term:`provider`,\n overriding those that would be injected or filling in for those\n that wouldn't. Positional arguments should be stored under 0-based\n integer keys.\n :raises:\n TypeError\n \"\"\"\n if arguments is None:\n realized_dependencies = {}\n else:\n realized_dependencies = copy.copy(arguments)\n\n provider = self.providers[specification]\n\n scope = None\n if provider.scope is not None:\n try:\n scope = self.scopes[provider.scope]\n except KeyError:\n raise UnknownScopeError(provider.scope)\n\n if scope is not None and specification in scope:\n return scope[specification]\n\n dependencies = six.iteritems(provider.dependencies)\n for argument, dependency_specification in dependencies:\n if argument not in realized_dependencies:\n if isinstance(dependency_specification, Factory):\n realized_dependencies[argument] = self.FactoryProxy(\n self,\n dependency_specification.specification\n )\n else:\n realized_dependencies[argument] = self.acquire(\n dependency_specification\n )\n\n args = []\n kwargs = {}\n for argument, value in six.iteritems(realized_dependencies):\n if isinstance(argument, six.integer_types):\n # Integer keys are for positional arguments.\n if len(args) <= argument:\n args.extend([None] * (argument + 1 - len(args)))\n args[argument] = value\n elif isinstance(argument, six.string_types):\n # String keys are for keyword arguments.\n kwargs[argument] = value\n else:\n raise TypeError(\n \"{} is not a valid argument key\".format(repr(argument))\n )\n\n instance = provider(*args, **kwargs)\n\n if scope is not None:\n scope[specification] = instance\n\n return instance\n"
] |
class Graph(object):
"""
Respresents an :term:`object graph`. Contains registered scopes and
providers, and can be used to validate and resolve provider dependencies
and creating provided objects.
"""
class FactoryProxy(object):
"""
A proxy object injected when `Factory(<specification>)` is requested as
a dependency.
"""
def __init__(self, graph, specification):
self.graph = graph
self.specification = specification
def __call__(self, *args, **kwargs):
return self.graph.get(self.specification, *args, **kwargs)
def __init__(self):
self.providers = {}
"""
Dictionary mapping :term:`specifications <specification>` to
:py:interface:`wiring.providers.IProvider` implementers that can
provide the specified object.
"""
self.scopes = {}
"""
Dictionary mapping :term:`scope` types to their instances. Scope
instances must conform to :py:interface:`wiring.scopes.IScope`
interface.
"""
self.register_scope(SingletonScope, SingletonScope())
self.register_scope(ProcessScope, ProcessScope())
self.register_scope(ThreadScope, ThreadScope())
def acquire(self, specification, arguments=None):
"""
Returns an object for `specification` injecting its provider
with a mix of its :term:`dependencies <dependency>` and given
`arguments`. If there is a conflict between the injectable
dependencies and `arguments`, the value from `arguments` is
used.
When one of `arguments` keys is neither an integer nor a string
a `TypeError` is raised.
:param specification:
An object :term:`specification`.
:param arguments:
A dictionary of arguments given to the object :term:`provider`,
overriding those that would be injected or filling in for those
that wouldn't. Positional arguments should be stored under 0-based
integer keys.
:raises:
TypeError
"""
if arguments is None:
realized_dependencies = {}
else:
realized_dependencies = copy.copy(arguments)
provider = self.providers[specification]
scope = None
if provider.scope is not None:
try:
scope = self.scopes[provider.scope]
except KeyError:
raise UnknownScopeError(provider.scope)
if scope is not None and specification in scope:
return scope[specification]
dependencies = six.iteritems(provider.dependencies)
for argument, dependency_specification in dependencies:
if argument not in realized_dependencies:
if isinstance(dependency_specification, Factory):
realized_dependencies[argument] = self.FactoryProxy(
self,
dependency_specification.specification
)
else:
realized_dependencies[argument] = self.acquire(
dependency_specification
)
args = []
kwargs = {}
for argument, value in six.iteritems(realized_dependencies):
if isinstance(argument, six.integer_types):
# Integer keys are for positional arguments.
if len(args) <= argument:
args.extend([None] * (argument + 1 - len(args)))
args[argument] = value
elif isinstance(argument, six.string_types):
# String keys are for keyword arguments.
kwargs[argument] = value
else:
raise TypeError(
"{} is not a valid argument key".format(repr(argument))
)
instance = provider(*args, **kwargs)
if scope is not None:
scope[specification] = instance
return instance
def register_provider(self, specification, provider):
"""
Registers a :term:`provider` (a :py:class:`wiring.providers.Provider`
instance) to be called when an object specified by
:term:`specification` is needed. If there was already a provider for
this specification it is overriden.
"""
if provider.scope is not None and provider.scope not in self.scopes:
raise UnknownScopeError(provider.scope)
self.providers[specification] = provider
def unregister_provider(self, specification):
"""
Removes :term:`provider` for given `specification` from the graph.
"""
del self.providers[specification]
def register_factory(self, specification, factory, scope=None):
"""
Shortcut for creating and registering
a :py:class:`wiring.providers.FactoryProvider`.
"""
self.register_provider(
specification,
FactoryProvider(factory, scope=scope)
)
def register_function(self, specification, function, scope=None):
"""
Shortcut for creating and registering
a :py:class:`wiring.providers.FunctionProvider`.
"""
self.register_provider(
specification,
FunctionProvider(function, scope=scope)
)
def register_instance(self, specification, instance):
"""
Registers given `instance` to be used as-is when an object specified by
given :term:`specification` is needed. If there was already a provider
for this specification it is overriden.
"""
self.register_provider(specification, InstanceProvider(instance))
def register_scope(self, scope_type, instance):
"""
Register instance of a :term:`scope` for given scope type. This scope
may be later referred to by providers using this type.
"""
self.scopes[scope_type] = instance
def unregister_scope(self, scope_type):
"""
Removes a :term:`scope` type from the graph.
"""
del self.scopes[scope_type]
def validate(self):
"""
Asserts that every declared :term:`specification` can actually be
realized, meaning that all of its :term:`dependencies <dependency>` are
present and there are no self-dependencies or :term:`dependency cycles
<dependency cycle>`. If such a problem is found, a proper exception
(deriving from :py:class:`GraphValidationError`) is raised.
:raises:
:py:exc:`MissingDependencyError`,
:py:exc:`SelfDependencyError`,
:py:exc:`DependencyCycleError`
"""
# This method uses Tarjan's strongly connected components algorithm
# with added self-dependency check to find dependency cyclces.
# Index is just an integer, it's wrapped in a list as a workaround for
# Python 2's lack of `nonlocal` keyword, so the nested
# `strongconnect()` may modify it.
index = [0]
indices = {}
lowlinks = {}
stack = []
def strongconnect(specification):
# Set the depth index for the node to the smallest unused index.
indices[specification] = index[0]
lowlinks[specification] = index[0]
index[0] += 1
stack.append(specification)
provider = self.providers[specification]
dependencies = six.itervalues(provider.dependencies)
for dependency in dependencies:
if isinstance(dependency, Factory):
dependency = dependency.specification
if dependency not in self.providers:
raise MissingDependencyError(specification, dependency)
if dependency == specification:
raise SelfDependencyError(specification)
if dependency not in indices:
# Dependency has not yet been visited; recurse on it.
strongconnect(dependency)
lowlinks[specification] = min(
lowlinks[specification],
lowlinks[dependency]
)
elif dependency in stack:
# Dependency is in stack and hence in the current strongly
# connected component.
lowlinks[specification] = min(
lowlinks[specification],
indices[dependency]
)
if lowlinks[specification] == indices[specification]:
component = []
while True:
component.append(stack.pop())
if component[-1] == specification:
break
if len(component) > 1:
raise DependencyCycleError(reversed(component))
for specification, provider in six.iteritems(self.providers):
if specification not in indices:
strongconnect(specification)
|
msiedlarek/wiring
|
wiring/graph.py
|
Graph.register_provider
|
python
|
def register_provider(self, specification, provider):
if provider.scope is not None and provider.scope not in self.scopes:
raise UnknownScopeError(provider.scope)
self.providers[specification] = provider
|
Registers a :term:`provider` (a :py:class:`wiring.providers.Provider`
instance) to be called when an object specified by
:term:`specification` is needed. If there was already a provider for
this specification it is overriden.
|
train
|
https://github.com/msiedlarek/wiring/blob/c32165b680356fe9f1e422a1d11127f867065f94/wiring/graph.py#L233-L242
| null |
class Graph(object):
"""
Respresents an :term:`object graph`. Contains registered scopes and
providers, and can be used to validate and resolve provider dependencies
and creating provided objects.
"""
class FactoryProxy(object):
"""
A proxy object injected when `Factory(<specification>)` is requested as
a dependency.
"""
def __init__(self, graph, specification):
self.graph = graph
self.specification = specification
def __call__(self, *args, **kwargs):
return self.graph.get(self.specification, *args, **kwargs)
def __init__(self):
self.providers = {}
"""
Dictionary mapping :term:`specifications <specification>` to
:py:interface:`wiring.providers.IProvider` implementers that can
provide the specified object.
"""
self.scopes = {}
"""
Dictionary mapping :term:`scope` types to their instances. Scope
instances must conform to :py:interface:`wiring.scopes.IScope`
interface.
"""
self.register_scope(SingletonScope, SingletonScope())
self.register_scope(ProcessScope, ProcessScope())
self.register_scope(ThreadScope, ThreadScope())
def acquire(self, specification, arguments=None):
"""
Returns an object for `specification` injecting its provider
with a mix of its :term:`dependencies <dependency>` and given
`arguments`. If there is a conflict between the injectable
dependencies and `arguments`, the value from `arguments` is
used.
When one of `arguments` keys is neither an integer nor a string
a `TypeError` is raised.
:param specification:
An object :term:`specification`.
:param arguments:
A dictionary of arguments given to the object :term:`provider`,
overriding those that would be injected or filling in for those
that wouldn't. Positional arguments should be stored under 0-based
integer keys.
:raises:
TypeError
"""
if arguments is None:
realized_dependencies = {}
else:
realized_dependencies = copy.copy(arguments)
provider = self.providers[specification]
scope = None
if provider.scope is not None:
try:
scope = self.scopes[provider.scope]
except KeyError:
raise UnknownScopeError(provider.scope)
if scope is not None and specification in scope:
return scope[specification]
dependencies = six.iteritems(provider.dependencies)
for argument, dependency_specification in dependencies:
if argument not in realized_dependencies:
if isinstance(dependency_specification, Factory):
realized_dependencies[argument] = self.FactoryProxy(
self,
dependency_specification.specification
)
else:
realized_dependencies[argument] = self.acquire(
dependency_specification
)
args = []
kwargs = {}
for argument, value in six.iteritems(realized_dependencies):
if isinstance(argument, six.integer_types):
# Integer keys are for positional arguments.
if len(args) <= argument:
args.extend([None] * (argument + 1 - len(args)))
args[argument] = value
elif isinstance(argument, six.string_types):
# String keys are for keyword arguments.
kwargs[argument] = value
else:
raise TypeError(
"{} is not a valid argument key".format(repr(argument))
)
instance = provider(*args, **kwargs)
if scope is not None:
scope[specification] = instance
return instance
def get(self, specification, *args, **kwargs):
"""
A more convenient version of :py:meth:`acquire()` for when you can
provide positional arguments in a right order.
"""
arguments = dict(enumerate(args))
arguments.update(kwargs)
return self.acquire(specification, arguments=arguments)
def unregister_provider(self, specification):
"""
Removes :term:`provider` for given `specification` from the graph.
"""
del self.providers[specification]
def register_factory(self, specification, factory, scope=None):
"""
Shortcut for creating and registering
a :py:class:`wiring.providers.FactoryProvider`.
"""
self.register_provider(
specification,
FactoryProvider(factory, scope=scope)
)
def register_function(self, specification, function, scope=None):
"""
Shortcut for creating and registering
a :py:class:`wiring.providers.FunctionProvider`.
"""
self.register_provider(
specification,
FunctionProvider(function, scope=scope)
)
def register_instance(self, specification, instance):
"""
Registers given `instance` to be used as-is when an object specified by
given :term:`specification` is needed. If there was already a provider
for this specification it is overriden.
"""
self.register_provider(specification, InstanceProvider(instance))
def register_scope(self, scope_type, instance):
"""
Register instance of a :term:`scope` for given scope type. This scope
may be later referred to by providers using this type.
"""
self.scopes[scope_type] = instance
def unregister_scope(self, scope_type):
"""
Removes a :term:`scope` type from the graph.
"""
del self.scopes[scope_type]
def validate(self):
"""
Asserts that every declared :term:`specification` can actually be
realized, meaning that all of its :term:`dependencies <dependency>` are
present and there are no self-dependencies or :term:`dependency cycles
<dependency cycle>`. If such a problem is found, a proper exception
(deriving from :py:class:`GraphValidationError`) is raised.
:raises:
:py:exc:`MissingDependencyError`,
:py:exc:`SelfDependencyError`,
:py:exc:`DependencyCycleError`
"""
# This method uses Tarjan's strongly connected components algorithm
# with added self-dependency check to find dependency cyclces.
# Index is just an integer, it's wrapped in a list as a workaround for
# Python 2's lack of `nonlocal` keyword, so the nested
# `strongconnect()` may modify it.
index = [0]
indices = {}
lowlinks = {}
stack = []
def strongconnect(specification):
# Set the depth index for the node to the smallest unused index.
indices[specification] = index[0]
lowlinks[specification] = index[0]
index[0] += 1
stack.append(specification)
provider = self.providers[specification]
dependencies = six.itervalues(provider.dependencies)
for dependency in dependencies:
if isinstance(dependency, Factory):
dependency = dependency.specification
if dependency not in self.providers:
raise MissingDependencyError(specification, dependency)
if dependency == specification:
raise SelfDependencyError(specification)
if dependency not in indices:
# Dependency has not yet been visited; recurse on it.
strongconnect(dependency)
lowlinks[specification] = min(
lowlinks[specification],
lowlinks[dependency]
)
elif dependency in stack:
# Dependency is in stack and hence in the current strongly
# connected component.
lowlinks[specification] = min(
lowlinks[specification],
indices[dependency]
)
if lowlinks[specification] == indices[specification]:
component = []
while True:
component.append(stack.pop())
if component[-1] == specification:
break
if len(component) > 1:
raise DependencyCycleError(reversed(component))
for specification, provider in six.iteritems(self.providers):
if specification not in indices:
strongconnect(specification)
|
msiedlarek/wiring
|
wiring/graph.py
|
Graph.register_factory
|
python
|
def register_factory(self, specification, factory, scope=None):
self.register_provider(
specification,
FactoryProvider(factory, scope=scope)
)
|
Shortcut for creating and registering
a :py:class:`wiring.providers.FactoryProvider`.
|
train
|
https://github.com/msiedlarek/wiring/blob/c32165b680356fe9f1e422a1d11127f867065f94/wiring/graph.py#L250-L258
|
[
"def register_provider(self, specification, provider):\n \"\"\"\n Registers a :term:`provider` (a :py:class:`wiring.providers.Provider`\n instance) to be called when an object specified by\n :term:`specification` is needed. If there was already a provider for\n this specification it is overriden.\n \"\"\"\n if provider.scope is not None and provider.scope not in self.scopes:\n raise UnknownScopeError(provider.scope)\n self.providers[specification] = provider\n"
] |
class Graph(object):
"""
Respresents an :term:`object graph`. Contains registered scopes and
providers, and can be used to validate and resolve provider dependencies
and creating provided objects.
"""
class FactoryProxy(object):
"""
A proxy object injected when `Factory(<specification>)` is requested as
a dependency.
"""
def __init__(self, graph, specification):
self.graph = graph
self.specification = specification
def __call__(self, *args, **kwargs):
return self.graph.get(self.specification, *args, **kwargs)
def __init__(self):
self.providers = {}
"""
Dictionary mapping :term:`specifications <specification>` to
:py:interface:`wiring.providers.IProvider` implementers that can
provide the specified object.
"""
self.scopes = {}
"""
Dictionary mapping :term:`scope` types to their instances. Scope
instances must conform to :py:interface:`wiring.scopes.IScope`
interface.
"""
self.register_scope(SingletonScope, SingletonScope())
self.register_scope(ProcessScope, ProcessScope())
self.register_scope(ThreadScope, ThreadScope())
def acquire(self, specification, arguments=None):
"""
Returns an object for `specification` injecting its provider
with a mix of its :term:`dependencies <dependency>` and given
`arguments`. If there is a conflict between the injectable
dependencies and `arguments`, the value from `arguments` is
used.
When one of `arguments` keys is neither an integer nor a string
a `TypeError` is raised.
:param specification:
An object :term:`specification`.
:param arguments:
A dictionary of arguments given to the object :term:`provider`,
overriding those that would be injected or filling in for those
that wouldn't. Positional arguments should be stored under 0-based
integer keys.
:raises:
TypeError
"""
if arguments is None:
realized_dependencies = {}
else:
realized_dependencies = copy.copy(arguments)
provider = self.providers[specification]
scope = None
if provider.scope is not None:
try:
scope = self.scopes[provider.scope]
except KeyError:
raise UnknownScopeError(provider.scope)
if scope is not None and specification in scope:
return scope[specification]
dependencies = six.iteritems(provider.dependencies)
for argument, dependency_specification in dependencies:
if argument not in realized_dependencies:
if isinstance(dependency_specification, Factory):
realized_dependencies[argument] = self.FactoryProxy(
self,
dependency_specification.specification
)
else:
realized_dependencies[argument] = self.acquire(
dependency_specification
)
args = []
kwargs = {}
for argument, value in six.iteritems(realized_dependencies):
if isinstance(argument, six.integer_types):
# Integer keys are for positional arguments.
if len(args) <= argument:
args.extend([None] * (argument + 1 - len(args)))
args[argument] = value
elif isinstance(argument, six.string_types):
# String keys are for keyword arguments.
kwargs[argument] = value
else:
raise TypeError(
"{} is not a valid argument key".format(repr(argument))
)
instance = provider(*args, **kwargs)
if scope is not None:
scope[specification] = instance
return instance
def get(self, specification, *args, **kwargs):
"""
A more convenient version of :py:meth:`acquire()` for when you can
provide positional arguments in a right order.
"""
arguments = dict(enumerate(args))
arguments.update(kwargs)
return self.acquire(specification, arguments=arguments)
def register_provider(self, specification, provider):
"""
Registers a :term:`provider` (a :py:class:`wiring.providers.Provider`
instance) to be called when an object specified by
:term:`specification` is needed. If there was already a provider for
this specification it is overriden.
"""
if provider.scope is not None and provider.scope not in self.scopes:
raise UnknownScopeError(provider.scope)
self.providers[specification] = provider
def unregister_provider(self, specification):
"""
Removes :term:`provider` for given `specification` from the graph.
"""
del self.providers[specification]
def register_function(self, specification, function, scope=None):
"""
Shortcut for creating and registering
a :py:class:`wiring.providers.FunctionProvider`.
"""
self.register_provider(
specification,
FunctionProvider(function, scope=scope)
)
def register_instance(self, specification, instance):
"""
Registers given `instance` to be used as-is when an object specified by
given :term:`specification` is needed. If there was already a provider
for this specification it is overriden.
"""
self.register_provider(specification, InstanceProvider(instance))
def register_scope(self, scope_type, instance):
"""
Register instance of a :term:`scope` for given scope type. This scope
may be later referred to by providers using this type.
"""
self.scopes[scope_type] = instance
def unregister_scope(self, scope_type):
"""
Removes a :term:`scope` type from the graph.
"""
del self.scopes[scope_type]
def validate(self):
"""
Asserts that every declared :term:`specification` can actually be
realized, meaning that all of its :term:`dependencies <dependency>` are
present and there are no self-dependencies or :term:`dependency cycles
<dependency cycle>`. If such a problem is found, a proper exception
(deriving from :py:class:`GraphValidationError`) is raised.
:raises:
:py:exc:`MissingDependencyError`,
:py:exc:`SelfDependencyError`,
:py:exc:`DependencyCycleError`
"""
# This method uses Tarjan's strongly connected components algorithm
# with added self-dependency check to find dependency cyclces.
# Index is just an integer, it's wrapped in a list as a workaround for
# Python 2's lack of `nonlocal` keyword, so the nested
# `strongconnect()` may modify it.
index = [0]
indices = {}
lowlinks = {}
stack = []
def strongconnect(specification):
# Set the depth index for the node to the smallest unused index.
indices[specification] = index[0]
lowlinks[specification] = index[0]
index[0] += 1
stack.append(specification)
provider = self.providers[specification]
dependencies = six.itervalues(provider.dependencies)
for dependency in dependencies:
if isinstance(dependency, Factory):
dependency = dependency.specification
if dependency not in self.providers:
raise MissingDependencyError(specification, dependency)
if dependency == specification:
raise SelfDependencyError(specification)
if dependency not in indices:
# Dependency has not yet been visited; recurse on it.
strongconnect(dependency)
lowlinks[specification] = min(
lowlinks[specification],
lowlinks[dependency]
)
elif dependency in stack:
# Dependency is in stack and hence in the current strongly
# connected component.
lowlinks[specification] = min(
lowlinks[specification],
indices[dependency]
)
if lowlinks[specification] == indices[specification]:
component = []
while True:
component.append(stack.pop())
if component[-1] == specification:
break
if len(component) > 1:
raise DependencyCycleError(reversed(component))
for specification, provider in six.iteritems(self.providers):
if specification not in indices:
strongconnect(specification)
|
msiedlarek/wiring
|
wiring/graph.py
|
Graph.register_function
|
python
|
def register_function(self, specification, function, scope=None):
self.register_provider(
specification,
FunctionProvider(function, scope=scope)
)
|
Shortcut for creating and registering
a :py:class:`wiring.providers.FunctionProvider`.
|
train
|
https://github.com/msiedlarek/wiring/blob/c32165b680356fe9f1e422a1d11127f867065f94/wiring/graph.py#L260-L268
|
[
"def register_provider(self, specification, provider):\n \"\"\"\n Registers a :term:`provider` (a :py:class:`wiring.providers.Provider`\n instance) to be called when an object specified by\n :term:`specification` is needed. If there was already a provider for\n this specification it is overriden.\n \"\"\"\n if provider.scope is not None and provider.scope not in self.scopes:\n raise UnknownScopeError(provider.scope)\n self.providers[specification] = provider\n"
] |
class Graph(object):
"""
Respresents an :term:`object graph`. Contains registered scopes and
providers, and can be used to validate and resolve provider dependencies
and creating provided objects.
"""
class FactoryProxy(object):
"""
A proxy object injected when `Factory(<specification>)` is requested as
a dependency.
"""
def __init__(self, graph, specification):
self.graph = graph
self.specification = specification
def __call__(self, *args, **kwargs):
return self.graph.get(self.specification, *args, **kwargs)
def __init__(self):
self.providers = {}
"""
Dictionary mapping :term:`specifications <specification>` to
:py:interface:`wiring.providers.IProvider` implementers that can
provide the specified object.
"""
self.scopes = {}
"""
Dictionary mapping :term:`scope` types to their instances. Scope
instances must conform to :py:interface:`wiring.scopes.IScope`
interface.
"""
self.register_scope(SingletonScope, SingletonScope())
self.register_scope(ProcessScope, ProcessScope())
self.register_scope(ThreadScope, ThreadScope())
def acquire(self, specification, arguments=None):
"""
Returns an object for `specification` injecting its provider
with a mix of its :term:`dependencies <dependency>` and given
`arguments`. If there is a conflict between the injectable
dependencies and `arguments`, the value from `arguments` is
used.
When one of `arguments` keys is neither an integer nor a string
a `TypeError` is raised.
:param specification:
An object :term:`specification`.
:param arguments:
A dictionary of arguments given to the object :term:`provider`,
overriding those that would be injected or filling in for those
that wouldn't. Positional arguments should be stored under 0-based
integer keys.
:raises:
TypeError
"""
if arguments is None:
realized_dependencies = {}
else:
realized_dependencies = copy.copy(arguments)
provider = self.providers[specification]
scope = None
if provider.scope is not None:
try:
scope = self.scopes[provider.scope]
except KeyError:
raise UnknownScopeError(provider.scope)
if scope is not None and specification in scope:
return scope[specification]
dependencies = six.iteritems(provider.dependencies)
for argument, dependency_specification in dependencies:
if argument not in realized_dependencies:
if isinstance(dependency_specification, Factory):
realized_dependencies[argument] = self.FactoryProxy(
self,
dependency_specification.specification
)
else:
realized_dependencies[argument] = self.acquire(
dependency_specification
)
args = []
kwargs = {}
for argument, value in six.iteritems(realized_dependencies):
if isinstance(argument, six.integer_types):
# Integer keys are for positional arguments.
if len(args) <= argument:
args.extend([None] * (argument + 1 - len(args)))
args[argument] = value
elif isinstance(argument, six.string_types):
# String keys are for keyword arguments.
kwargs[argument] = value
else:
raise TypeError(
"{} is not a valid argument key".format(repr(argument))
)
instance = provider(*args, **kwargs)
if scope is not None:
scope[specification] = instance
return instance
def get(self, specification, *args, **kwargs):
"""
A more convenient version of :py:meth:`acquire()` for when you can
provide positional arguments in a right order.
"""
arguments = dict(enumerate(args))
arguments.update(kwargs)
return self.acquire(specification, arguments=arguments)
def register_provider(self, specification, provider):
"""
Registers a :term:`provider` (a :py:class:`wiring.providers.Provider`
instance) to be called when an object specified by
:term:`specification` is needed. If there was already a provider for
this specification it is overriden.
"""
if provider.scope is not None and provider.scope not in self.scopes:
raise UnknownScopeError(provider.scope)
self.providers[specification] = provider
def unregister_provider(self, specification):
"""
Removes :term:`provider` for given `specification` from the graph.
"""
del self.providers[specification]
def register_factory(self, specification, factory, scope=None):
"""
Shortcut for creating and registering
a :py:class:`wiring.providers.FactoryProvider`.
"""
self.register_provider(
specification,
FactoryProvider(factory, scope=scope)
)
def register_instance(self, specification, instance):
"""
Registers given `instance` to be used as-is when an object specified by
given :term:`specification` is needed. If there was already a provider
for this specification it is overriden.
"""
self.register_provider(specification, InstanceProvider(instance))
def register_scope(self, scope_type, instance):
"""
Register instance of a :term:`scope` for given scope type. This scope
may be later referred to by providers using this type.
"""
self.scopes[scope_type] = instance
def unregister_scope(self, scope_type):
"""
Removes a :term:`scope` type from the graph.
"""
del self.scopes[scope_type]
def validate(self):
"""
Asserts that every declared :term:`specification` can actually be
realized, meaning that all of its :term:`dependencies <dependency>` are
present and there are no self-dependencies or :term:`dependency cycles
<dependency cycle>`. If such a problem is found, a proper exception
(deriving from :py:class:`GraphValidationError`) is raised.
:raises:
:py:exc:`MissingDependencyError`,
:py:exc:`SelfDependencyError`,
:py:exc:`DependencyCycleError`
"""
# This method uses Tarjan's strongly connected components algorithm
# with added self-dependency check to find dependency cyclces.
# Index is just an integer, it's wrapped in a list as a workaround for
# Python 2's lack of `nonlocal` keyword, so the nested
# `strongconnect()` may modify it.
index = [0]
indices = {}
lowlinks = {}
stack = []
def strongconnect(specification):
# Set the depth index for the node to the smallest unused index.
indices[specification] = index[0]
lowlinks[specification] = index[0]
index[0] += 1
stack.append(specification)
provider = self.providers[specification]
dependencies = six.itervalues(provider.dependencies)
for dependency in dependencies:
if isinstance(dependency, Factory):
dependency = dependency.specification
if dependency not in self.providers:
raise MissingDependencyError(specification, dependency)
if dependency == specification:
raise SelfDependencyError(specification)
if dependency not in indices:
# Dependency has not yet been visited; recurse on it.
strongconnect(dependency)
lowlinks[specification] = min(
lowlinks[specification],
lowlinks[dependency]
)
elif dependency in stack:
# Dependency is in stack and hence in the current strongly
# connected component.
lowlinks[specification] = min(
lowlinks[specification],
indices[dependency]
)
if lowlinks[specification] == indices[specification]:
component = []
while True:
component.append(stack.pop())
if component[-1] == specification:
break
if len(component) > 1:
raise DependencyCycleError(reversed(component))
for specification, provider in six.iteritems(self.providers):
if specification not in indices:
strongconnect(specification)
|
msiedlarek/wiring
|
wiring/graph.py
|
Graph.validate
|
python
|
def validate(self):
# This method uses Tarjan's strongly connected components algorithm
# with added self-dependency check to find dependency cyclces.
# Index is just an integer, it's wrapped in a list as a workaround for
# Python 2's lack of `nonlocal` keyword, so the nested
# `strongconnect()` may modify it.
index = [0]
indices = {}
lowlinks = {}
stack = []
def strongconnect(specification):
# Set the depth index for the node to the smallest unused index.
indices[specification] = index[0]
lowlinks[specification] = index[0]
index[0] += 1
stack.append(specification)
provider = self.providers[specification]
dependencies = six.itervalues(provider.dependencies)
for dependency in dependencies:
if isinstance(dependency, Factory):
dependency = dependency.specification
if dependency not in self.providers:
raise MissingDependencyError(specification, dependency)
if dependency == specification:
raise SelfDependencyError(specification)
if dependency not in indices:
# Dependency has not yet been visited; recurse on it.
strongconnect(dependency)
lowlinks[specification] = min(
lowlinks[specification],
lowlinks[dependency]
)
elif dependency in stack:
# Dependency is in stack and hence in the current strongly
# connected component.
lowlinks[specification] = min(
lowlinks[specification],
indices[dependency]
)
if lowlinks[specification] == indices[specification]:
component = []
while True:
component.append(stack.pop())
if component[-1] == specification:
break
if len(component) > 1:
raise DependencyCycleError(reversed(component))
for specification, provider in six.iteritems(self.providers):
if specification not in indices:
strongconnect(specification)
|
Asserts that every declared :term:`specification` can actually be
realized, meaning that all of its :term:`dependencies <dependency>` are
present and there are no self-dependencies or :term:`dependency cycles
<dependency cycle>`. If such a problem is found, a proper exception
(deriving from :py:class:`GraphValidationError`) is raised.
:raises:
:py:exc:`MissingDependencyError`,
:py:exc:`SelfDependencyError`,
:py:exc:`DependencyCycleError`
|
train
|
https://github.com/msiedlarek/wiring/blob/c32165b680356fe9f1e422a1d11127f867065f94/wiring/graph.py#L291-L356
|
[
"def strongconnect(specification):\n # Set the depth index for the node to the smallest unused index.\n indices[specification] = index[0]\n lowlinks[specification] = index[0]\n index[0] += 1\n stack.append(specification)\n provider = self.providers[specification]\n dependencies = six.itervalues(provider.dependencies)\n for dependency in dependencies:\n if isinstance(dependency, Factory):\n dependency = dependency.specification\n if dependency not in self.providers:\n raise MissingDependencyError(specification, dependency)\n if dependency == specification:\n raise SelfDependencyError(specification)\n if dependency not in indices:\n # Dependency has not yet been visited; recurse on it.\n strongconnect(dependency)\n lowlinks[specification] = min(\n lowlinks[specification],\n lowlinks[dependency]\n )\n elif dependency in stack:\n # Dependency is in stack and hence in the current strongly\n # connected component.\n lowlinks[specification] = min(\n lowlinks[specification],\n indices[dependency]\n )\n if lowlinks[specification] == indices[specification]:\n component = []\n while True:\n component.append(stack.pop())\n if component[-1] == specification:\n break\n if len(component) > 1:\n raise DependencyCycleError(reversed(component))\n"
] |
class Graph(object):
"""
Respresents an :term:`object graph`. Contains registered scopes and
providers, and can be used to validate and resolve provider dependencies
and creating provided objects.
"""
class FactoryProxy(object):
"""
A proxy object injected when `Factory(<specification>)` is requested as
a dependency.
"""
def __init__(self, graph, specification):
self.graph = graph
self.specification = specification
def __call__(self, *args, **kwargs):
return self.graph.get(self.specification, *args, **kwargs)
def __init__(self):
self.providers = {}
"""
Dictionary mapping :term:`specifications <specification>` to
:py:interface:`wiring.providers.IProvider` implementers that can
provide the specified object.
"""
self.scopes = {}
"""
Dictionary mapping :term:`scope` types to their instances. Scope
instances must conform to :py:interface:`wiring.scopes.IScope`
interface.
"""
self.register_scope(SingletonScope, SingletonScope())
self.register_scope(ProcessScope, ProcessScope())
self.register_scope(ThreadScope, ThreadScope())
def acquire(self, specification, arguments=None):
"""
Returns an object for `specification` injecting its provider
with a mix of its :term:`dependencies <dependency>` and given
`arguments`. If there is a conflict between the injectable
dependencies and `arguments`, the value from `arguments` is
used.
When one of `arguments` keys is neither an integer nor a string
a `TypeError` is raised.
:param specification:
An object :term:`specification`.
:param arguments:
A dictionary of arguments given to the object :term:`provider`,
overriding those that would be injected or filling in for those
that wouldn't. Positional arguments should be stored under 0-based
integer keys.
:raises:
TypeError
"""
if arguments is None:
realized_dependencies = {}
else:
realized_dependencies = copy.copy(arguments)
provider = self.providers[specification]
scope = None
if provider.scope is not None:
try:
scope = self.scopes[provider.scope]
except KeyError:
raise UnknownScopeError(provider.scope)
if scope is not None and specification in scope:
return scope[specification]
dependencies = six.iteritems(provider.dependencies)
for argument, dependency_specification in dependencies:
if argument not in realized_dependencies:
if isinstance(dependency_specification, Factory):
realized_dependencies[argument] = self.FactoryProxy(
self,
dependency_specification.specification
)
else:
realized_dependencies[argument] = self.acquire(
dependency_specification
)
args = []
kwargs = {}
for argument, value in six.iteritems(realized_dependencies):
if isinstance(argument, six.integer_types):
# Integer keys are for positional arguments.
if len(args) <= argument:
args.extend([None] * (argument + 1 - len(args)))
args[argument] = value
elif isinstance(argument, six.string_types):
# String keys are for keyword arguments.
kwargs[argument] = value
else:
raise TypeError(
"{} is not a valid argument key".format(repr(argument))
)
instance = provider(*args, **kwargs)
if scope is not None:
scope[specification] = instance
return instance
def get(self, specification, *args, **kwargs):
"""
A more convenient version of :py:meth:`acquire()` for when you can
provide positional arguments in a right order.
"""
arguments = dict(enumerate(args))
arguments.update(kwargs)
return self.acquire(specification, arguments=arguments)
def register_provider(self, specification, provider):
"""
Registers a :term:`provider` (a :py:class:`wiring.providers.Provider`
instance) to be called when an object specified by
:term:`specification` is needed. If there was already a provider for
this specification it is overriden.
"""
if provider.scope is not None and provider.scope not in self.scopes:
raise UnknownScopeError(provider.scope)
self.providers[specification] = provider
def unregister_provider(self, specification):
"""
Removes :term:`provider` for given `specification` from the graph.
"""
del self.providers[specification]
def register_factory(self, specification, factory, scope=None):
"""
Shortcut for creating and registering
a :py:class:`wiring.providers.FactoryProvider`.
"""
self.register_provider(
specification,
FactoryProvider(factory, scope=scope)
)
def register_function(self, specification, function, scope=None):
"""
Shortcut for creating and registering
a :py:class:`wiring.providers.FunctionProvider`.
"""
self.register_provider(
specification,
FunctionProvider(function, scope=scope)
)
def register_instance(self, specification, instance):
"""
Registers given `instance` to be used as-is when an object specified by
given :term:`specification` is needed. If there was already a provider
for this specification it is overriden.
"""
self.register_provider(specification, InstanceProvider(instance))
def register_scope(self, scope_type, instance):
"""
Register instance of a :term:`scope` for given scope type. This scope
may be later referred to by providers using this type.
"""
self.scopes[scope_type] = instance
def unregister_scope(self, scope_type):
"""
Removes a :term:`scope` type from the graph.
"""
del self.scopes[scope_type]
|
msiedlarek/wiring
|
wiring/scanning/register.py
|
register
|
python
|
def register(provider_factory, *args, **kwargs):
def decorator(target):
def callback(scanner, name, target):
if not args:
specification = target
elif len(args) == 1:
specification = args[0]
else:
specification = tuple(args)
scanner.callback(
specification,
provider_factory(target, **kwargs)
)
venusian.attach(target, callback, category=WIRING_VENUSIAN_CATEGORY)
return target
return decorator
|
Returns a decorator that registers its arguments for scanning, so it can be
picked up by :py:func:`wiring.scanning.scan.scan`.
First argument - `provider_factory` - is a callable that is invoked during
scanning with decorated argument and `kwargs` as arguments, and it should
return a :term:`provider` to be registered.
Rest of the positional arguments (`args`) are used to build the
:term:`specification` for registration. If there is only one - it is used
directly as a specification. If there are more - a tuple of them is used as
a specification. If there are none - the decorated object itself is used as
a specification.
Example::
@register(FactoryProvider)
class MyClass:
pass
graph = Graph()
scan_to_graph([__package__], graph)
assert isinstance(graph.get(MyClass), MyClass)
Another example::
@register(FactoryProvider, 'my_factory')
class MyClass:
pass
graph = Graph()
scan_to_graph([__package__], graph)
assert isinstance(graph.get('my_factory'), MyClass)
|
train
|
https://github.com/msiedlarek/wiring/blob/c32165b680356fe9f1e422a1d11127f867065f94/wiring/scanning/register.py#L14-L63
| null |
import venusian
from wiring import FactoryProvider, FunctionProvider, InstanceProvider
WIRING_VENUSIAN_CATEGORY = 'wiring'
"""
A `Venusian`_ category under which all Wiring callbacks are registered.
.. _Venusian: https://pypi.python.org/pypi/venusian
"""
def factory(*args, **kwargs):
"""
A shortcut for using :py:func:`register` with
:py:class:`wiring.providers.FactoryProvider`.
Example::
from wiring.scanning import register
@register.factory()
class MyClass:
pass
"""
return register(FactoryProvider, *args, **kwargs)
def function(*args, **kwargs):
"""
A shortcut for using :py:func:`register` with
:py:class:`wiring.providers.FunctionProvider`.
Example::
from wiring.scanning import register
@register.function()
def my_function():
pass
"""
return register(FunctionProvider, *args, **kwargs)
def instance(*args, **kwargs):
"""
A shortcut for using :py:func:`register` with
:py:class:`wiring.providers.FunctionProvider`.
Example::
from wiring.scanning import register
class MyGlobal:
pass
my_global = MyGlobal()
register.instance('my_global')(my_global)
"""
return register(InstanceProvider, *args, **kwargs)
|
msiedlarek/wiring
|
wiring/dependency.py
|
get_dependencies
|
python
|
def get_dependencies(factory):
if inspect.isclass(factory):
# If factory is a class we want to check constructor depdendencies.
if six.PY3:
init_check = inspect.isfunction
else:
init_check = inspect.ismethod
dependencies = {}
if hasattr(factory, '__init__') and init_check(factory.__init__):
dependencies.update(get_dependencies(factory.__init__))
if hasattr(factory, '__new__') and inspect.isfunction(factory.__new__):
dependencies.update(get_dependencies(factory.__new__))
return dependencies
elif inspect.isfunction(factory) or inspect.ismethod(factory):
function = factory
else:
raise TypeError("`factory` must be a class or a function.")
if hasattr(function, '__injection__'):
# Function has precollected dependencies (happens when using `inject()`
# decorator. Nothing to do here.
return function.__injection__
dependencies = {}
def process_dependency_tuples(tuples):
for key, value in tuples:
if isinstance(value, UnrealizedInjection):
dependencies[key] = value.specification
if six.PY3:
argument_specification = inspect.getfullargspec(function)
if argument_specification.kwonlydefaults:
process_dependency_tuples(
six.iteritems(argument_specification.kwonlydefaults)
)
if argument_specification.annotations:
dependencies.update(argument_specification.annotations)
else:
argument_specification = inspect.getargspec(function)
if argument_specification.defaults:
process_dependency_tuples(zip(
reversed(argument_specification.args),
reversed(argument_specification.defaults)
))
return dependencies
|
This function inspects a function to find its arguments marked for
injection, either with :py:func:`inject()` decorator,
:py:class:`UnrealizedInjection` class of through Python 3 function
annotations. If `factory` is a class, then its constructor is inspected.
Returned dictionary is a mapping of::
[argument index/name] -> [specification]
For example, dependencies for function::
@inject(ILogger, db=(IDBConnection, 'archive'))
def foo(log, db=None):
pass
would be::
{
0: ILogger,
'db': (IDBConnection, 'archive'),
}
`Old-style classes`_ (from before Python 2.2) are not supported.
.. _Old-style classes:
https://docs.python.org/2/reference/datamodel.html#new-style-and-classic-classes
|
train
|
https://github.com/msiedlarek/wiring/blob/c32165b680356fe9f1e422a1d11127f867065f94/wiring/dependency.py#L177-L249
| null |
import inspect
import six
__all__ = (
'Factory',
'UnrealizedInjection',
'get_dependencies',
'inject',
'injected',
)
class Factory(tuple):
"""
This class is a wrapper for a specification, declaring that instead of
a created object for the specification, a callable returning the object
should be injected. This callable accepts additional arguments that will be
merged with the injected ones, just like in
:py:meth:`wiring.graph.Graph.get` method.
For example::
class DBConnection(object):
@injected('db.url')
def __init__(self, url, read_only=False):
# ....
@inject(db_factory=Factory('db.connection')):
def get_user(id, db_factory=None):
db = db_factory(read_only=True)
return db.get_model('user', id=id)
Unless an instance for `db.connection` specification is cached in a scope,
each execution of `get_user()` will create a new database connection
object.
This feature is particularly useful when you need an object from a narrower
scope, like a thread-scoped database connection in an application
singleton. You cannot just get a connection object in the application
constructor and save it, because when one of it methods is called from
a different thread it will use the same connection object. That effectively
defeats the purpose of thread scope.
To prevent that you can inject and save in a constructor a factory of
database connections and call it in every method to obtain a connection
object for current thread.
"""
__slots__ = []
def __new__(cls, *specification):
"""
You construct this class by giving it :term:`specification` elements.
For example, if your specification is::
(IDBConnection, 'archive')
then you can declare the :term:`dependency` like this::
@inject(db=Factory(IDBConnection, 'archive'))
def foo(db=None):
pass
When no specification is given to the class constructor,
a `ValueError` is raised.
:raises:
ValueError
"""
if not specification:
raise ValueError("No dependency specification given.")
if len(specification) == 1:
specification = specification[0]
else:
specification = tuple(specification)
return super(Factory, cls).__new__(
cls,
(specification,)
)
@property
def specification(self):
"""
A :term:`specification` of an object of which a factory will be
injected.
"""
return self[0]
def __repr__(self):
specification = self.specification
if not isinstance(specification, tuple):
specification = '({})'.format(specification)
return '<Factory{specification}>'.format(
specification=specification
)
class UnrealizedInjection(tuple):
"""
Instances of this class are placeholders that can be used as default values
for arguments to mark that they should be provided with injected
:term:`dependency`, without using the :py:func:`inject()` decorator. For
example::
def __init__(self, db_connection=UnrealizedInjection(IDBConnection)):
if not db_connection:
raise ValueError()
Note that instances of this class always evaluate to `False` when converted
to boolean, to allow easy checking for dependencies that hasn't been
injected.
Instances of this class are immutable, `as any default argument value in
Python should be
<http://docs.python-guide.org/en/latest/writing/gotchas/#mutable-default-arguments>`_.
There's also an :py:data:`injected` shortcut for this class in this
package.
"""
__slots__ = []
def __new__(cls, *specification):
"""
You construct this class by giving it :term:`specification` elements.
For example, if your specification is::
(IDBConnection, 'archive')
then you can declare the :term:`dependency` like this::
def foo(db=UnrealizedInjection(IDBConnection, 'archive')):
pass
When no specification is given to the class constructor,
a `ValueError` is raised.
:raises:
ValueError
"""
if not specification:
raise ValueError("No dependency specification given.")
if len(specification) == 1:
specification = specification[0]
else:
specification = tuple(specification)
return super(UnrealizedInjection, cls).__new__(
cls,
(specification,)
)
@property
def specification(self):
"""
A :term:`specification` of an object that should be injected in place
of this placholder.
"""
return self[0]
def __repr__(self):
specification = self.specification
if not isinstance(specification, tuple):
specification = '({})'.format(specification)
return '<UnrealizedInjection{specification}>'.format(
specification=specification
)
def __bool__(self):
return False
def __nonzero__(self):
return False
def inject(*positional_dependencies, **keyword_dependencies):
"""
This decorator can be used to specify injection rules for decorated
function arguments. Each argument to this decorator should be
a :term:`specification` for injecting into related argument of decorated
function. `None` can be given instead of a specification to prevent
argument from being injected. This is handy for positional arguments.
Example::
@inject(None, IDBConnection, logger=(ILogger, 'system'))
def foo(noninjectable_argument, db_connection, logger=None):
pass
This decorator can be used multiple times and also with
:py:class:`UnrealizedInjection` class. Specified dependencies are collected
and when conflicting the outermost :term:`specification` is used::
@inject(db=(IDBConnection, 'archive2'))
@inject(db=IDBConnection)
def foo(db=injected(IDBConnection, 'archive')):
# In this example 'archive2' database connection will be injected.
pass
"""
def decorator(function):
dependencies = get_dependencies(function)
def process_dependency_tuples(tuples):
for key, dependency_description in tuples:
if dependency_description is None:
specification = None
elif isinstance(dependency_description, UnrealizedInjection):
specification = dependency_description.specification
else:
specification = dependency_description
if specification is None:
try:
del dependencies[key]
except KeyError:
pass
else:
dependencies[key] = specification
process_dependency_tuples(enumerate(positional_dependencies))
process_dependency_tuples(six.iteritems(keyword_dependencies))
function.__injection__ = dependencies
return function
return decorator
injected = UnrealizedInjection
"""
Shortcut for :py:class:`UnrealizedInjection` to be used in method definition
arguments.
"""
|
msiedlarek/wiring
|
wiring/dependency.py
|
inject
|
python
|
def inject(*positional_dependencies, **keyword_dependencies):
def decorator(function):
dependencies = get_dependencies(function)
def process_dependency_tuples(tuples):
for key, dependency_description in tuples:
if dependency_description is None:
specification = None
elif isinstance(dependency_description, UnrealizedInjection):
specification = dependency_description.specification
else:
specification = dependency_description
if specification is None:
try:
del dependencies[key]
except KeyError:
pass
else:
dependencies[key] = specification
process_dependency_tuples(enumerate(positional_dependencies))
process_dependency_tuples(six.iteritems(keyword_dependencies))
function.__injection__ = dependencies
return function
return decorator
|
This decorator can be used to specify injection rules for decorated
function arguments. Each argument to this decorator should be
a :term:`specification` for injecting into related argument of decorated
function. `None` can be given instead of a specification to prevent
argument from being injected. This is handy for positional arguments.
Example::
@inject(None, IDBConnection, logger=(ILogger, 'system'))
def foo(noninjectable_argument, db_connection, logger=None):
pass
This decorator can be used multiple times and also with
:py:class:`UnrealizedInjection` class. Specified dependencies are collected
and when conflicting the outermost :term:`specification` is used::
@inject(db=(IDBConnection, 'archive2'))
@inject(db=IDBConnection)
def foo(db=injected(IDBConnection, 'archive')):
# In this example 'archive2' database connection will be injected.
pass
|
train
|
https://github.com/msiedlarek/wiring/blob/c32165b680356fe9f1e422a1d11127f867065f94/wiring/dependency.py#L252-L299
| null |
import inspect
import six
__all__ = (
'Factory',
'UnrealizedInjection',
'get_dependencies',
'inject',
'injected',
)
class Factory(tuple):
"""
This class is a wrapper for a specification, declaring that instead of
a created object for the specification, a callable returning the object
should be injected. This callable accepts additional arguments that will be
merged with the injected ones, just like in
:py:meth:`wiring.graph.Graph.get` method.
For example::
class DBConnection(object):
@injected('db.url')
def __init__(self, url, read_only=False):
# ....
@inject(db_factory=Factory('db.connection')):
def get_user(id, db_factory=None):
db = db_factory(read_only=True)
return db.get_model('user', id=id)
Unless an instance for `db.connection` specification is cached in a scope,
each execution of `get_user()` will create a new database connection
object.
This feature is particularly useful when you need an object from a narrower
scope, like a thread-scoped database connection in an application
singleton. You cannot just get a connection object in the application
constructor and save it, because when one of it methods is called from
a different thread it will use the same connection object. That effectively
defeats the purpose of thread scope.
To prevent that you can inject and save in a constructor a factory of
database connections and call it in every method to obtain a connection
object for current thread.
"""
__slots__ = []
def __new__(cls, *specification):
"""
You construct this class by giving it :term:`specification` elements.
For example, if your specification is::
(IDBConnection, 'archive')
then you can declare the :term:`dependency` like this::
@inject(db=Factory(IDBConnection, 'archive'))
def foo(db=None):
pass
When no specification is given to the class constructor,
a `ValueError` is raised.
:raises:
ValueError
"""
if not specification:
raise ValueError("No dependency specification given.")
if len(specification) == 1:
specification = specification[0]
else:
specification = tuple(specification)
return super(Factory, cls).__new__(
cls,
(specification,)
)
@property
def specification(self):
"""
A :term:`specification` of an object of which a factory will be
injected.
"""
return self[0]
def __repr__(self):
specification = self.specification
if not isinstance(specification, tuple):
specification = '({})'.format(specification)
return '<Factory{specification}>'.format(
specification=specification
)
class UnrealizedInjection(tuple):
"""
Instances of this class are placeholders that can be used as default values
for arguments to mark that they should be provided with injected
:term:`dependency`, without using the :py:func:`inject()` decorator. For
example::
def __init__(self, db_connection=UnrealizedInjection(IDBConnection)):
if not db_connection:
raise ValueError()
Note that instances of this class always evaluate to `False` when converted
to boolean, to allow easy checking for dependencies that hasn't been
injected.
Instances of this class are immutable, `as any default argument value in
Python should be
<http://docs.python-guide.org/en/latest/writing/gotchas/#mutable-default-arguments>`_.
There's also an :py:data:`injected` shortcut for this class in this
package.
"""
__slots__ = []
def __new__(cls, *specification):
"""
You construct this class by giving it :term:`specification` elements.
For example, if your specification is::
(IDBConnection, 'archive')
then you can declare the :term:`dependency` like this::
def foo(db=UnrealizedInjection(IDBConnection, 'archive')):
pass
When no specification is given to the class constructor,
a `ValueError` is raised.
:raises:
ValueError
"""
if not specification:
raise ValueError("No dependency specification given.")
if len(specification) == 1:
specification = specification[0]
else:
specification = tuple(specification)
return super(UnrealizedInjection, cls).__new__(
cls,
(specification,)
)
@property
def specification(self):
"""
A :term:`specification` of an object that should be injected in place
of this placholder.
"""
return self[0]
def __repr__(self):
specification = self.specification
if not isinstance(specification, tuple):
specification = '({})'.format(specification)
return '<UnrealizedInjection{specification}>'.format(
specification=specification
)
def __bool__(self):
return False
def __nonzero__(self):
return False
def get_dependencies(factory):
"""
This function inspects a function to find its arguments marked for
injection, either with :py:func:`inject()` decorator,
:py:class:`UnrealizedInjection` class of through Python 3 function
annotations. If `factory` is a class, then its constructor is inspected.
Returned dictionary is a mapping of::
[argument index/name] -> [specification]
For example, dependencies for function::
@inject(ILogger, db=(IDBConnection, 'archive'))
def foo(log, db=None):
pass
would be::
{
0: ILogger,
'db': (IDBConnection, 'archive'),
}
`Old-style classes`_ (from before Python 2.2) are not supported.
.. _Old-style classes:
https://docs.python.org/2/reference/datamodel.html#new-style-and-classic-classes
"""
if inspect.isclass(factory):
# If factory is a class we want to check constructor depdendencies.
if six.PY3:
init_check = inspect.isfunction
else:
init_check = inspect.ismethod
dependencies = {}
if hasattr(factory, '__init__') and init_check(factory.__init__):
dependencies.update(get_dependencies(factory.__init__))
if hasattr(factory, '__new__') and inspect.isfunction(factory.__new__):
dependencies.update(get_dependencies(factory.__new__))
return dependencies
elif inspect.isfunction(factory) or inspect.ismethod(factory):
function = factory
else:
raise TypeError("`factory` must be a class or a function.")
if hasattr(function, '__injection__'):
# Function has precollected dependencies (happens when using `inject()`
# decorator. Nothing to do here.
return function.__injection__
dependencies = {}
def process_dependency_tuples(tuples):
for key, value in tuples:
if isinstance(value, UnrealizedInjection):
dependencies[key] = value.specification
if six.PY3:
argument_specification = inspect.getfullargspec(function)
if argument_specification.kwonlydefaults:
process_dependency_tuples(
six.iteritems(argument_specification.kwonlydefaults)
)
if argument_specification.annotations:
dependencies.update(argument_specification.annotations)
else:
argument_specification = inspect.getargspec(function)
if argument_specification.defaults:
process_dependency_tuples(zip(
reversed(argument_specification.args),
reversed(argument_specification.defaults)
))
return dependencies
injected = UnrealizedInjection
"""
Shortcut for :py:class:`UnrealizedInjection` to be used in method definition
arguments.
"""
|
msiedlarek/wiring
|
wiring/configuration.py
|
provides
|
python
|
def provides(*specification):
if len(specification) == 1:
specification = specification[0]
else:
specification = tuple(specification)
def decorator(function):
function.__provides__ = specification
return function
return decorator
|
Decorator marking wrapped :py:class:`Module` method as :term:`provider` for
given :term:`specification`.
For example::
class ApplicationModule(Module):
@provides('db_connection')
def provide_db_connection(self):
return DBConnection(host='localhost')
|
train
|
https://github.com/msiedlarek/wiring/blob/c32165b680356fe9f1e422a1d11127f867065f94/wiring/configuration.py#L249-L271
| null |
import collections
import inspect
import six
from wiring.providers import (
FactoryProvider,
FunctionProvider,
InstanceProvider
)
__all__ = (
'InvalidConfigurationError',
'Module',
'provides',
'scope',
)
class InvalidConfigurationError(Exception):
"""
Raised when there is some problem with a :term:`module` class, for example
when module defines more than one :term:`provider` for a single
:term:`specification`.
"""
def __init__(self, module, message):
self.module = module
"""A :term:`module` class where the problem was found."""
self.message = message
"""A message describing the problem."""
def __str__(self):
return "Configuration error in module {module}: {message}".format(
module='.'.join((self.module.__module__, self.module.__name__)),
message=self.message
)
class ModuleMetaclass(type):
"""
This metaclass analyzes special attributes of a new :term:`module` classes
and generates `providers` attribute, which is a mapping of
:term:`specifications <specification>` and related :term:`providers
<provider>`.
Supported attributes are:
`providers`
A dictionary mapping specifications to provider objects,
implementing :py:interface:`wiring.providers.IProvider` interface.
`instances`
A dictionary mapping specifications to objects that will be wrapped
in :py:class:`wiring.providers.InstanceProvider`.
`factories`
A dictionary mapping specifications to callable that will be
wrapped in :py:class:`wiring.providers.FactoryProvider`. If
dictionary value is a tuple, first element is treated as callable
and the second as scope type for this provider.
`functions`
A dictionary mapping specifications to callable that will be
wrapped in :py:class:`wiring.providers.FunctionProvider`.
Three last attributes are provided for convinience and will be merged into
the first one by this metaclass. For example this module::
class SomeModule(Module):
providers = {
'foo': CustomProvider('foo'),
}
instances = {
'db_url': 'sqlite://somedb',
}
factories = {
'db_connection': (DatabaseConnection, ThreadScope),
'bar': create_bar,
}
functions = {
'foobarize': foobarize,
}
@provides('fizz')
def provide_fizz(self, db_connection=injected('db_connection')):
return db_connection.sql('SELECT fizz FROM buzz;')
is an equivalent of::
class SomeModule(Module):
providers = {
'foo': CustomProvider('foo'),
'db_url': InstanceProvider('sqlite://somedb'),
'db_connection': FactoryProvider(
DatabaseConnection,
scope=ThreadScope
),
'bar': FactoryProvider(create_bar),
'foobarize': FunctionProvider(foobarize),
}
@provides('fizz')
def provide_fizz(self, db_connection=injected('db_connection')):
return db_connection.sql('SELECT fizz FROM buzz;')
Defined modules can later register their providers into an
:term:`object graph` using :py:meth:`Module.add_to`.
When there is more than one provider declared for a single specification,
:py:exc:`InvalidConfigurationError` is raised.
:raises:
:py:exc:`InvalidConfigurationError`
"""
def __new__(cls, module_name, bases, attributes):
special_attributes = (
'providers',
'instances',
'factories',
'functions',
)
module = super(ModuleMetaclass, cls).__new__(
cls,
module_name,
bases,
{
key: value for key, value in six.iteritems(attributes)
if key not in special_attributes
}
)
providers = {}
for ancestor in reversed(inspect.getmro(module)):
if cls._is_module_class(ancestor):
providers.update(ancestor.providers)
already_provided = set()
providers_attribute = attributes.get('providers', {})
providers.update(providers_attribute)
already_provided.update(six.iterkeys(providers_attribute))
def check_specification(key):
if key in already_provided:
raise InvalidConfigurationError(
module,
"Multiple sources defined for specification {spec}".format(
spec=repr(key)
)
)
already_provided.add(key)
for key, value in six.iteritems(attributes.get('instances', {})):
check_specification(key)
providers[key] = InstanceProvider(value)
for key, value in six.iteritems(attributes.get('factories', {})):
check_specification(key)
if not isinstance(value, collections.Iterable):
value = [value]
if len(value) < 1 or len(value) > 2:
raise InvalidConfigurationError(
module,
(
"Wrong number of arguments for {spec} in"
" `factories`."
).format(
spec=repr(key)
)
)
providers[key] = FactoryProvider(
value[0],
scope=(value[1] if len(value) > 1 else None)
)
for key, value in six.iteritems(attributes.get('functions', {})):
check_specification(key)
providers[key] = FunctionProvider(value)
for key, value in six.iteritems(attributes):
if hasattr(value, '__provides__'):
check_specification(value.__provides__)
module.providers = providers
return module
@classmethod
def _is_module_class(cls, other_class):
if not isinstance(other_class, cls):
# Other class didn't came from this metaclass.
return False
if all(map(lambda b: not isinstance(b, cls), other_class.__bases__)):
# Other class is Module class from this module.
return False
return True
@six.add_metaclass(ModuleMetaclass)
class Module(object):
__doc__ = """
A base class for :term:`module` classes, using the
:py:class:`ModuleMetaclass`.
""" + ModuleMetaclass.__doc__
providers = {}
"""
A dictionary mapping specifications to provider objects, implementing
:py:interface:`wiring.providers.IProvider` interface.
"""
scan = []
"""
A sequence of module references to recursively scan for providers
registered with :py:mod:`wiring.scanning.register` module.
If a string is given instead of a module reference, it will be used to
import the module.
"""
scan_ignore = []
"""
A sequence of module paths to ignore when scanning modules in
:py:attr:`scan`.
"""
def __init__(self):
if self.scan:
from wiring.scanning import scan_to_module
scan_to_module(self.scan, self, ignore=self.scan_ignore)
def add_to(self, graph):
"""
Register all of declared providers into a given :term:`object graph`.
"""
for specification, provider in six.iteritems(self.providers):
graph.register_provider(specification, provider)
for name in dir(self):
value = getattr(self, name)
if hasattr(value, '__provides__'):
graph.register_factory(
value.__provides__,
value,
scope=getattr(value, '__scope__', None)
)
def scope(scope):
"""
Decorator specifying a :term:`scope` for wrapped :py:class:`Module`
:term:`provider` method. `scope` should be a scope type that will later be
registered in an :term:`object graph`.
For example::
class ApplicationModule(Module):
@provides('db_connection')
@scope(ThreadScope)
def provide_db_connection(self):
return DBConnection(host='localhost')
"""
def decorator(function):
function.__scope__ = scope
return function
return decorator
|
msiedlarek/wiring
|
wiring/scanning/scan.py
|
scan_to_module
|
python
|
def scan_to_module(python_modules, module, ignore=tuple()):
def callback(specification, provider):
module.providers[specification] = provider
scan(python_modules, callback, ignore=ignore)
|
Scans `python_modules` with :py:func:`scan` and adds found providers
to `module`'s :py:attr:`wiring.configuration.Module.providers`.
`ignore` argument is passed through to :py:func:`scan`.
|
train
|
https://github.com/msiedlarek/wiring/blob/c32165b680356fe9f1e422a1d11127f867065f94/wiring/scanning/scan.py#L16-L25
|
[
"def scan(python_modules, callback, ignore=tuple()):\n \"\"\"\n Recursively scans `python_modules` for providers registered with\n :py:mod:`wiring.scanning.register` module and for each one calls `callback`\n with :term:`specification` as the first argument, and the provider object\n as the second.\n\n Each element in `python_modules` may be a module reference or a string\n representing a path to a module.\n\n Module paths given in `ignore` are excluded from scanning.\n \"\"\"\n scanner = venusian.Scanner(callback=callback)\n for python_module in python_modules:\n if isinstance(python_module, six.string_types):\n python_module = importlib.import_module(python_module)\n scanner.scan(\n python_module,\n categories=[WIRING_VENUSIAN_CATEGORY],\n ignore=ignore\n )\n"
] |
import importlib
import six
import venusian
from wiring.scanning.register import WIRING_VENUSIAN_CATEGORY
__all__ = (
'scan_to_module',
'scan_to_graph',
'scan',
)
def scan_to_graph(python_modules, graph, ignore=tuple()):
"""
Scans `python_modules` with :py:func:`scan` and registers found providers
in `graph`.
`ignore` argument is passed through to :py:func:`scan`.
"""
def callback(specification, provider):
graph.register_provider(specification, provider)
scan(python_modules, callback, ignore=ignore)
def scan(python_modules, callback, ignore=tuple()):
"""
Recursively scans `python_modules` for providers registered with
:py:mod:`wiring.scanning.register` module and for each one calls `callback`
with :term:`specification` as the first argument, and the provider object
as the second.
Each element in `python_modules` may be a module reference or a string
representing a path to a module.
Module paths given in `ignore` are excluded from scanning.
"""
scanner = venusian.Scanner(callback=callback)
for python_module in python_modules:
if isinstance(python_module, six.string_types):
python_module = importlib.import_module(python_module)
scanner.scan(
python_module,
categories=[WIRING_VENUSIAN_CATEGORY],
ignore=ignore
)
|
msiedlarek/wiring
|
wiring/scanning/scan.py
|
scan_to_graph
|
python
|
def scan_to_graph(python_modules, graph, ignore=tuple()):
def callback(specification, provider):
graph.register_provider(specification, provider)
scan(python_modules, callback, ignore=ignore)
|
Scans `python_modules` with :py:func:`scan` and registers found providers
in `graph`.
`ignore` argument is passed through to :py:func:`scan`.
|
train
|
https://github.com/msiedlarek/wiring/blob/c32165b680356fe9f1e422a1d11127f867065f94/wiring/scanning/scan.py#L28-L37
|
[
"def scan(python_modules, callback, ignore=tuple()):\n \"\"\"\n Recursively scans `python_modules` for providers registered with\n :py:mod:`wiring.scanning.register` module and for each one calls `callback`\n with :term:`specification` as the first argument, and the provider object\n as the second.\n\n Each element in `python_modules` may be a module reference or a string\n representing a path to a module.\n\n Module paths given in `ignore` are excluded from scanning.\n \"\"\"\n scanner = venusian.Scanner(callback=callback)\n for python_module in python_modules:\n if isinstance(python_module, six.string_types):\n python_module = importlib.import_module(python_module)\n scanner.scan(\n python_module,\n categories=[WIRING_VENUSIAN_CATEGORY],\n ignore=ignore\n )\n"
] |
import importlib
import six
import venusian
from wiring.scanning.register import WIRING_VENUSIAN_CATEGORY
__all__ = (
'scan_to_module',
'scan_to_graph',
'scan',
)
def scan_to_module(python_modules, module, ignore=tuple()):
"""
Scans `python_modules` with :py:func:`scan` and adds found providers
to `module`'s :py:attr:`wiring.configuration.Module.providers`.
`ignore` argument is passed through to :py:func:`scan`.
"""
def callback(specification, provider):
module.providers[specification] = provider
scan(python_modules, callback, ignore=ignore)
def scan(python_modules, callback, ignore=tuple()):
"""
Recursively scans `python_modules` for providers registered with
:py:mod:`wiring.scanning.register` module and for each one calls `callback`
with :term:`specification` as the first argument, and the provider object
as the second.
Each element in `python_modules` may be a module reference or a string
representing a path to a module.
Module paths given in `ignore` are excluded from scanning.
"""
scanner = venusian.Scanner(callback=callback)
for python_module in python_modules:
if isinstance(python_module, six.string_types):
python_module = importlib.import_module(python_module)
scanner.scan(
python_module,
categories=[WIRING_VENUSIAN_CATEGORY],
ignore=ignore
)
|
msiedlarek/wiring
|
wiring/scanning/scan.py
|
scan
|
python
|
def scan(python_modules, callback, ignore=tuple()):
scanner = venusian.Scanner(callback=callback)
for python_module in python_modules:
if isinstance(python_module, six.string_types):
python_module = importlib.import_module(python_module)
scanner.scan(
python_module,
categories=[WIRING_VENUSIAN_CATEGORY],
ignore=ignore
)
|
Recursively scans `python_modules` for providers registered with
:py:mod:`wiring.scanning.register` module and for each one calls `callback`
with :term:`specification` as the first argument, and the provider object
as the second.
Each element in `python_modules` may be a module reference or a string
representing a path to a module.
Module paths given in `ignore` are excluded from scanning.
|
train
|
https://github.com/msiedlarek/wiring/blob/c32165b680356fe9f1e422a1d11127f867065f94/wiring/scanning/scan.py#L40-L60
| null |
import importlib
import six
import venusian
from wiring.scanning.register import WIRING_VENUSIAN_CATEGORY
__all__ = (
'scan_to_module',
'scan_to_graph',
'scan',
)
def scan_to_module(python_modules, module, ignore=tuple()):
"""
Scans `python_modules` with :py:func:`scan` and adds found providers
to `module`'s :py:attr:`wiring.configuration.Module.providers`.
`ignore` argument is passed through to :py:func:`scan`.
"""
def callback(specification, provider):
module.providers[specification] = provider
scan(python_modules, callback, ignore=ignore)
def scan_to_graph(python_modules, graph, ignore=tuple()):
"""
Scans `python_modules` with :py:func:`scan` and registers found providers
in `graph`.
`ignore` argument is passed through to :py:func:`scan`.
"""
def callback(specification, provider):
graph.register_provider(specification, provider)
scan(python_modules, callback, ignore=ignore)
|
invoice-x/invoice2data
|
src/invoice2data/input/tesseract4.py
|
to_text
|
python
|
def to_text(path, language='fra'):
import subprocess
from distutils import spawn
import tempfile
import time
# Check for dependencies. Needs Tesseract and Imagemagick installed.
if not spawn.find_executable('tesseract'):
raise EnvironmentError('tesseract not installed.')
if not spawn.find_executable('convert'):
raise EnvironmentError('imagemagick not installed.')
if not spawn.find_executable('gs'):
raise EnvironmentError('ghostscript not installed.')
with tempfile.NamedTemporaryFile(suffix='.tiff') as tf:
# Step 1: Convert to TIFF
gs_cmd = [
'gs',
'-q',
'-dNOPAUSE',
'-r600x600',
'-sDEVICE=tiff24nc',
'-sOutputFile=' + tf.name,
path,
'-c',
'quit',
]
subprocess.Popen(gs_cmd)
time.sleep(3)
# Step 2: Enhance TIFF
magick_cmd = [
'convert',
tf.name,
'-colorspace',
'gray',
'-type',
'grayscale',
'-contrast-stretch',
'0',
'-sharpen',
'0x1',
'tiff:-',
]
p1 = subprocess.Popen(magick_cmd, stdout=subprocess.PIPE)
tess_cmd = ['tesseract', '-l', language, '--oem', '1', '--psm', '3', 'stdin', 'stdout']
p2 = subprocess.Popen(tess_cmd, stdin=p1.stdout, stdout=subprocess.PIPE)
out, err = p2.communicate()
extracted_str = out
return extracted_str
|
Wraps Tesseract 4 OCR with custom language model.
Parameters
----------
path : str
path of electronic invoice in JPG or PNG format
Returns
-------
extracted_str : str
returns extracted text from image in JPG or PNG format
|
train
|
https://github.com/invoice-x/invoice2data/blob/d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20/src/invoice2data/input/tesseract4.py#L2-L69
| null |
# -*- coding: utf-8 -*-
|
invoice-x/invoice2data
|
src/invoice2data/input/gvision.py
|
to_text
|
python
|
def to_text(path, bucket_name='cloud-vision-84893', language='fr'):
"""OCR with PDF/TIFF as source files on GCS"""
import os
from google.cloud import vision
from google.cloud import storage
from google.protobuf import json_format
# Supported mime_types are: 'application/pdf' and 'image/tiff'
mime_type = 'application/pdf'
path_dir, filename = os.path.split(path)
result_blob_basename = filename.replace('.pdf', '').replace('.PDF', '')
result_blob_name = result_blob_basename + '/output-1-to-1.json'
result_blob_uri = 'gs://{}/{}/'.format(bucket_name, result_blob_basename)
input_blob_uri = 'gs://{}/{}'.format(bucket_name, filename)
# Upload file to gcloud if it doesn't exist yet
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucket_name)
if bucket.get_blob(filename) is None:
blob = bucket.blob(filename)
blob.upload_from_filename(path)
# See if result already exists
# TODO: upload as hash, not filename
result_blob = bucket.get_blob(result_blob_name)
if result_blob is None:
# How many pages should be grouped into each json output file.
batch_size = 10
client = vision.ImageAnnotatorClient()
feature = vision.types.Feature(type=vision.enums.Feature.Type.DOCUMENT_TEXT_DETECTION)
gcs_source = vision.types.GcsSource(uri=input_blob_uri)
input_config = vision.types.InputConfig(gcs_source=gcs_source, mime_type=mime_type)
gcs_destination = vision.types.GcsDestination(uri=result_blob_uri)
output_config = vision.types.OutputConfig(
gcs_destination=gcs_destination, batch_size=batch_size
)
async_request = vision.types.AsyncAnnotateFileRequest(
features=[feature], input_config=input_config, output_config=output_config
)
operation = client.async_batch_annotate_files(requests=[async_request])
print('Waiting for the operation to finish.')
operation.result(timeout=180)
# Get result after OCR is completed
result_blob = bucket.get_blob(result_blob_name)
json_string = result_blob.download_as_string()
response = json_format.Parse(json_string, vision.types.AnnotateFileResponse())
# The actual response for the first page of the input file.
first_page_response = response.responses[0]
annotation = first_page_response.full_text_annotation
return annotation.text.encode('utf-8')
|
Sends PDF files to Google Cloud Vision for OCR.
Before using invoice2data, make sure you have the auth json path set as
env var GOOGLE_APPLICATION_CREDENTIALS
Parameters
----------
path : str
path of electronic invoice in JPG or PNG format
bucket_name : str
name of bucket to use for file storage and results cache.
Returns
-------
extracted_str : str
returns extracted text from image in JPG or PNG format
|
train
|
https://github.com/invoice-x/invoice2data/blob/d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20/src/invoice2data/input/gvision.py#L2-L83
| null |
# -*- coding: utf-8 -*-
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.