text stringlengths 81 112k |
|---|
Render an arbitrary markdown document.
:param str text: (required), the text of the document to render
:param str mode: (optional), 'markdown' or 'gfm'
:param str context: (optional), only important when using mode 'gfm',
this is the repository to use as the context for the rendering
:param bool raw: (optional), renders a document like a README.md, no gfm,
no context
:returns: str -- HTML formatted text
def markdown(text, mode='', context='', raw=False):
"""Render an arbitrary markdown document.
:param str text: (required), the text of the document to render
:param str mode: (optional), 'markdown' or 'gfm'
:param str context: (optional), only important when using mode 'gfm',
this is the repository to use as the context for the rendering
:param bool raw: (optional), renders a document like a README.md, no gfm,
no context
:returns: str -- HTML formatted text
"""
return gh.markdown(text, mode, context, raw) |
Find repositories via various criteria.
.. warning::
You will only be able to make 5 calls with this or other search
functions. To raise the rate-limit on this set of endpoints, create an
authenticated :class:`GitHub <github3.github.GitHub>` Session with
``login``.
The query can contain any combination of the following supported
qualifers:
- ``in`` Qualifies which fields are searched. With this qualifier you
can restrict the search to just the repository name, description,
readme, or any combination of these.
- ``size`` Finds repositories that match a certain size (in
kilobytes).
- ``forks`` Filters repositories based on the number of forks, and/or
whether forked repositories should be included in the results at
all.
- ``created`` or ``pushed`` Filters repositories based on times of
creation, or when they were last updated. Format: ``YYYY-MM-DD``.
Examples: ``created:<2011``, ``pushed:<2013-02``,
``pushed:>=2013-03-06``
- ``user`` or ``repo`` Limits searches to a specific user or
repository.
- ``language`` Searches repositories based on the language they're
written in.
- ``stars`` Searches repositories based on the number of stars.
For more information about these qualifiers, see: http://git.io/4Z8AkA
:param str query: (required), a valid query as described above, e.g.,
``tetris language:assembly``
:param str sort: (optional), how the results should be sorted;
options: ``stars``, ``forks``, ``updated``; default: best match
:param str order: (optional), the direction of the sorted results,
options: ``asc``, ``desc``; default: ``desc``
:param int per_page: (optional)
:param bool text_match: (optional), if True, return matching search
terms. See http://git.io/4ct1eQ for more information
:param int number: (optional), number of repositories to return.
Default: -1, returns all available repositories
:param str etag: (optional), previous ETag header value
:return: generator of :class:`Repository <github3.repos.Repository>`
def search_repositories(query, sort=None, order=None, per_page=None,
text_match=False, number=-1, etag=None):
"""Find repositories via various criteria.
.. warning::
You will only be able to make 5 calls with this or other search
functions. To raise the rate-limit on this set of endpoints, create an
authenticated :class:`GitHub <github3.github.GitHub>` Session with
``login``.
The query can contain any combination of the following supported
qualifers:
- ``in`` Qualifies which fields are searched. With this qualifier you
can restrict the search to just the repository name, description,
readme, or any combination of these.
- ``size`` Finds repositories that match a certain size (in
kilobytes).
- ``forks`` Filters repositories based on the number of forks, and/or
whether forked repositories should be included in the results at
all.
- ``created`` or ``pushed`` Filters repositories based on times of
creation, or when they were last updated. Format: ``YYYY-MM-DD``.
Examples: ``created:<2011``, ``pushed:<2013-02``,
``pushed:>=2013-03-06``
- ``user`` or ``repo`` Limits searches to a specific user or
repository.
- ``language`` Searches repositories based on the language they're
written in.
- ``stars`` Searches repositories based on the number of stars.
For more information about these qualifiers, see: http://git.io/4Z8AkA
:param str query: (required), a valid query as described above, e.g.,
``tetris language:assembly``
:param str sort: (optional), how the results should be sorted;
options: ``stars``, ``forks``, ``updated``; default: best match
:param str order: (optional), the direction of the sorted results,
options: ``asc``, ``desc``; default: ``desc``
:param int per_page: (optional)
:param bool text_match: (optional), if True, return matching search
terms. See http://git.io/4ct1eQ for more information
:param int number: (optional), number of repositories to return.
Default: -1, returns all available repositories
:param str etag: (optional), previous ETag header value
:return: generator of :class:`Repository <github3.repos.Repository>`
"""
return gh.search_repositories(query, sort, order, per_page, text_match,
number, etag) |
Describe limits in effect on your AWS account. See also https://console.aws.amazon.com/ec2/v2/home#Limits:
def limits(args):
"""
Describe limits in effect on your AWS account. See also https://console.aws.amazon.com/ec2/v2/home#Limits:
"""
# https://aws.amazon.com/about-aws/whats-new/2014/06/19/amazon-ec2-service-limits-report-now-available/
# Console-only APIs: getInstanceLimits, getAccountLimits, getAutoscalingLimits, getHostLimits
# http://boto3.readthedocs.io/en/latest/reference/services/dynamodb.html#DynamoDB.Client.describe_limits
attrs = ["max-instances", "vpc-max-security-groups-per-interface", "vpc-max-elastic-ips"]
table = clients.ec2.describe_account_attributes(AttributeNames=attrs)["AccountAttributes"]
page_output(tabulate(table, args)) |
Add labels to this issue.
:param str args: (required), names of the labels you wish to add
:returns: list of :class:`Label`\ s
def add_labels(self, *args):
"""Add labels to this issue.
:param str args: (required), names of the labels you wish to add
:returns: list of :class:`Label`\ s
"""
url = self._build_url('labels', base_url=self._api)
json = self._json(self._post(url, data=args), 200)
return [Label(l, self) for l in json] if json else [] |
Assigns user ``login`` to this issue. This is a short cut for
``issue.edit``.
:param str login: username of the person to assign this issue to
:returns: bool
def assign(self, login):
"""Assigns user ``login`` to this issue. This is a short cut for
``issue.edit``.
:param str login: username of the person to assign this issue to
:returns: bool
"""
if not login:
return False
number = self.milestone.number if self.milestone else None
labels = [str(l) for l in self.labels]
return self.edit(self.title, self.body, login, self.state, number,
labels) |
Get a single comment by its id.
The catch here is that id is NOT a simple number to obtain. If
you were to look at the comments on issue #15 in
sigmavirus24/Todo.txt-python, the first comment's id is 4150787.
:param int id_num: (required), comment id, see example above
:returns: :class:`IssueComment <github3.issues.comment.IssueComment>`
def comment(self, id_num):
"""Get a single comment by its id.
The catch here is that id is NOT a simple number to obtain. If
you were to look at the comments on issue #15 in
sigmavirus24/Todo.txt-python, the first comment's id is 4150787.
:param int id_num: (required), comment id, see example above
:returns: :class:`IssueComment <github3.issues.comment.IssueComment>`
"""
json = None
if int(id_num) > 0: # Might as well check that it's positive
owner, repo = self.repository
url = self._build_url('repos', owner, repo, 'issues', 'comments',
str(id_num))
json = self._json(self._get(url), 200)
return IssueComment(json) if json else None |
Create a comment on this issue.
:param str body: (required), comment body
:returns: :class:`IssueComment <github3.issues.comment.IssueComment>`
def create_comment(self, body):
"""Create a comment on this issue.
:param str body: (required), comment body
:returns: :class:`IssueComment <github3.issues.comment.IssueComment>`
"""
json = None
if body:
url = self._build_url('comments', base_url=self._api)
json = self._json(self._post(url, data={'body': body}),
201)
return IssueComment(json, self) if json else None |
Edit this issue.
:param str title: Title of the issue
:param str body: markdown formatted body (description) of the issue
:param str assignee: login name of user the issue should be assigned
to
:param str state: accepted values: ('open', 'closed')
:param int milestone: the NUMBER (not title) of the milestone to
assign this to [1]_, or 0 to remove the milestone
:param list labels: list of labels to apply this to
:returns: bool
.. [1] Milestone numbering starts at 1, i.e. the first milestone you
create is 1, the second is 2, etc.
def edit(self, title=None, body=None, assignee=None, state=None,
milestone=None, labels=None):
"""Edit this issue.
:param str title: Title of the issue
:param str body: markdown formatted body (description) of the issue
:param str assignee: login name of user the issue should be assigned
to
:param str state: accepted values: ('open', 'closed')
:param int milestone: the NUMBER (not title) of the milestone to
assign this to [1]_, or 0 to remove the milestone
:param list labels: list of labels to apply this to
:returns: bool
.. [1] Milestone numbering starts at 1, i.e. the first milestone you
create is 1, the second is 2, etc.
"""
json = None
data = {'title': title, 'body': body, 'assignee': assignee,
'state': state, 'milestone': milestone, 'labels': labels}
self._remove_none(data)
if data:
if 'milestone' in data and data['milestone'] == 0:
data['milestone'] = None
json = self._json(self._patch(self._api, data=dumps(data)), 200)
if json:
self._update_(json)
return True
return False |
Iterate over the comments on this issue.
:param int number: (optional), number of comments to iterate over
:returns: iterator of
:class:`IssueComment <github3.issues.comment.IssueComment>`\ s
def iter_comments(self, number=-1):
"""Iterate over the comments on this issue.
:param int number: (optional), number of comments to iterate over
:returns: iterator of
:class:`IssueComment <github3.issues.comment.IssueComment>`\ s
"""
url = self._build_url('comments', base_url=self._api)
return self._iter(int(number), url, IssueComment) |
Iterate over events associated with this issue only.
:param int number: (optional), number of events to return. Default: -1
returns all events available.
:returns: generator of
:class:`IssueEvent <github3.issues.event.IssueEvent>`\ s
def iter_events(self, number=-1):
"""Iterate over events associated with this issue only.
:param int number: (optional), number of events to return. Default: -1
returns all events available.
:returns: generator of
:class:`IssueEvent <github3.issues.event.IssueEvent>`\ s
"""
url = self._build_url('events', base_url=self._api)
return self._iter(int(number), url, IssueEvent) |
Removes label ``name`` from this issue.
:param str name: (required), name of the label to remove
:returns: bool
def remove_label(self, name):
"""Removes label ``name`` from this issue.
:param str name: (required), name of the label to remove
:returns: bool
"""
url = self._build_url('labels', name, base_url=self._api)
# Docs say it should be a list of strings returned, practice says it
# is just a 204/404 response. I'm tenatively changing this until I
# hear back from Support.
return self._boolean(self._delete(url), 204, 404) |
Replace all labels on this issue with ``labels``.
:param list labels: label names
:returns: bool
def replace_labels(self, labels):
"""Replace all labels on this issue with ``labels``.
:param list labels: label names
:returns: bool
"""
url = self._build_url('labels', base_url=self._api)
json = self._json(self._put(url, data=dumps(labels)), 200)
return [Label(l, self) for l in json] if json else [] |
Re-open a closed issue.
:returns: bool
def reopen(self):
"""Re-open a closed issue.
:returns: bool
"""
assignee = self.assignee.login if self.assignee else ''
number = self.milestone.number if self.milestone else None
labels = [str(l) for l in self.labels]
return self.edit(self.title, self.body, assignee, 'open',
number, labels) |
Convert an ISO 8601 formatted string in UTC into a
timezone-aware datetime object.
def _strptime(self, time_str):
"""Convert an ISO 8601 formatted string in UTC into a
timezone-aware datetime object."""
if time_str:
# Parse UTC string into naive datetime, then add timezone
dt = datetime.strptime(time_str, __timeformat__)
return dt.replace(tzinfo=UTC())
return None |
Generic iterator for this project.
:param int count: How many items to return.
:param int url: First URL to start with
:param class cls: cls to return an object of
:param params dict: (optional) Parameters for the request
:param str etag: (optional), ETag from the last call
def _iter(self, count, url, cls, params=None, etag=None):
"""Generic iterator for this project.
:param int count: How many items to return.
:param int url: First URL to start with
:param class cls: cls to return an object of
:param params dict: (optional) Parameters for the request
:param str etag: (optional), ETag from the last call
"""
from .structs import GitHubIterator
return GitHubIterator(count, url, cls, self, params, etag) |
Number of requests before GitHub imposes a ratelimit.
:returns: int
def ratelimit_remaining(self):
"""Number of requests before GitHub imposes a ratelimit.
:returns: int
"""
json = self._json(self._get(self._github_url + '/rate_limit'), 200)
core = json.get('resources', {}).get('core', {})
self._remaining = core.get('remaining', 0)
return self._remaining |
Re-retrieve the information for this object and returns the
refreshed instance.
:param bool conditional: If True, then we will search for a stored
header ('Last-Modified', or 'ETag') on the object and send that
as described in the `Conditional Requests`_ section of the docs
:returns: self
The reasoning for the return value is the following example: ::
repos = [r.refresh() for r in g.iter_repos('kennethreitz')]
Without the return value, that would be an array of ``None``'s and you
would otherwise have to do: ::
repos = [r for i in g.iter_repos('kennethreitz')]
[r.refresh() for r in repos]
Which is really an anti-pattern.
.. versionchanged:: 0.5
.. _Conditional Requests:
http://developer.github.com/v3/#conditional-requests
def refresh(self, conditional=False):
"""Re-retrieve the information for this object and returns the
refreshed instance.
:param bool conditional: If True, then we will search for a stored
header ('Last-Modified', or 'ETag') on the object and send that
as described in the `Conditional Requests`_ section of the docs
:returns: self
The reasoning for the return value is the following example: ::
repos = [r.refresh() for r in g.iter_repos('kennethreitz')]
Without the return value, that would be an array of ``None``'s and you
would otherwise have to do: ::
repos = [r for i in g.iter_repos('kennethreitz')]
[r.refresh() for r in repos]
Which is really an anti-pattern.
.. versionchanged:: 0.5
.. _Conditional Requests:
http://developer.github.com/v3/#conditional-requests
"""
headers = {}
if conditional:
if self.last_modified:
headers['If-Modified-Since'] = self.last_modified
elif self.etag:
headers['If-None-Match'] = self.etag
headers = headers or None
json = self._json(self._get(self._api, headers=headers), 200)
if json is not None:
self.__init__(json, self._session)
return self |
Edit this comment.
:param str body: (required), new body of the comment, Markdown
formatted
:returns: bool
def edit(self, body):
"""Edit this comment.
:param str body: (required), new body of the comment, Markdown
formatted
:returns: bool
"""
if body:
json = self._json(self._patch(self._api,
data=dumps({'body': body})), 200)
if json:
self._update_(json)
return True
return False |
To plot formatter
def toplot(ts,
filename=None,
grid=True,
legend=True,
pargs=(),
**kwargs):
'''To plot formatter'''
fig = plt.figure()
ax = fig.add_subplot(111)
dates = list(ts.dates())
ax.plot(dates, ts.values(), *pargs)
ax.grid(grid)
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
fig.autofmt_xdate()
# add legend or title
names = ts.name.split('__')
if len(names) == 1:
title = names[0]
fontweight = kwargs.get('title_fontweight', 'bold')
ax.set_title(title, fontweight=fontweight)#,fontsize=fontsize,
elif legend:
##add legend
loc = kwargs.get('legend_location','best')
ncol = kwargs.get('legend_ncol', 2)
ax.legend(names, loc=loc, ncol=ncol)
return plt |
Ensure the table is valid for converting to grid table.
* The table must a list of lists
* Each row must contain the same number of columns
* The table must not be empty
Parameters
----------
table : list of lists of str
The list of rows of strings to convert to a grid table
Returns
-------
message : str
If no problems are found, this message is empty, otherwise it
tries to describe the problem that was found.
def check_table(table):
"""
Ensure the table is valid for converting to grid table.
* The table must a list of lists
* Each row must contain the same number of columns
* The table must not be empty
Parameters
----------
table : list of lists of str
The list of rows of strings to convert to a grid table
Returns
-------
message : str
If no problems are found, this message is empty, otherwise it
tries to describe the problem that was found.
"""
if not type(table) is list:
return "Table must be a list of lists"
if len(table) == 0:
return "Table must contain at least one row and one column"
for i in range(len(table)):
if not type(table[i]) is list:
return "Table must be a list of lists"
if not len(table[i]) == len(table[0]):
"Each row must have the same number of columns"
return "" |
Gets the span containing the [row, column] pair
Parameters
----------
spans : list of lists of lists
A list containing spans, which are lists of [row, column] pairs
that define where a span is inside a table.
Returns
-------
span : list of lists
A span containing the [row, column] pair
def get_span(spans, row, column):
"""
Gets the span containing the [row, column] pair
Parameters
----------
spans : list of lists of lists
A list containing spans, which are lists of [row, column] pairs
that define where a span is inside a table.
Returns
-------
span : list of lists
A span containing the [row, column] pair
"""
for i in range(len(spans)):
if [row, column] in spans[i]:
return spans[i]
return None |
Search through a table and return the first [row, column] pair
who's value is None.
Parameters
----------
table : list of lists of str
Returns
-------
list of int
The row column pair of the None type cell
def find_unassigned_table_cell(table):
"""
Search through a table and return the first [row, column] pair
who's value is None.
Parameters
----------
table : list of lists of str
Returns
-------
list of int
The row column pair of the None type cell
"""
for row in range(len(table)):
for column in range(len(table[row])):
if table[row][column] is None:
return row, column
return row, column |
insert *values* at date *dte*.
def insert(self, dte, values):
'''insert *values* at date *dte*.'''
if len(values):
dte = self.dateconvert(dte)
if not self:
self._date = np.array([dte])
self._data = np.array([values])
else:
# search for the date
index = self._skl.rank(dte)
if index < 0:
# date not available
N = len(self._data)
index = -1-index
self._date.resize((N+1,))
self._data.resize((N+1, self.count()))
if index < N:
self._date[index+1:] = self._date[index:-1]
self._data[index+1:] = self._data[index:-1]
self._date[index] = dte
self._data[index] = values
self._skl.insert(dte) |
Convert node names into node instances...
def _translate_nodes(root, *nodes):
"""
Convert node names into node instances...
"""
#name2node = {[n, None] for n in nodes if type(n) is str}
name2node = dict([[n, None] for n in nodes if type(n) is str])
for n in root.traverse():
if n.name in name2node:
if name2node[n.name] is not None:
raise TreeError("Ambiguous node name: {}".format(str(n.name)))
else:
name2node[n.name] = n
if None in list(name2node.values()):
notfound = [key for key, value in six.iteritems(name2node) if value is None]
raise ValueError("Node names not found: "+str(notfound))
valid_nodes = []
for n in nodes:
if type(n) is not str:
if type(n) is not root.__class__:
raise TreeError("Invalid target node: "+str(n))
else:
valid_nodes.append(n)
valid_nodes.extend(list(name2node.values()))
if len(valid_nodes) == 1:
return valid_nodes[0]
else:
return valid_nodes |
Add or update a node's feature.
def add_feature(self, pr_name, pr_value):
""" Add or update a node's feature. """
setattr(self, pr_name, pr_value)
self.features.add(pr_name) |
Add or update several features.
def add_features(self, **features):
""" Add or update several features. """
for fname, fvalue in six.iteritems(features):
setattr(self, fname, fvalue)
self.features.add(fname) |
Permanently deletes a node's feature.
def del_feature(self, pr_name):
""" Permanently deletes a node's feature."""
if hasattr(self, pr_name):
delattr(self, pr_name)
self.features.remove(pr_name) |
Adds a new child to this node. If child node is not suplied
as an argument, a new node instance will be created.
Parameters
----------
child:
the node instance to be added as a child.
name:
the name that will be given to the child.
dist:
the distance from the node to the child.
support':
the support value of child partition.
Returns:
--------
The child node instance
def add_child(self, child=None, name=None, dist=None, support=None):
"""
Adds a new child to this node. If child node is not suplied
as an argument, a new node instance will be created.
Parameters
----------
child:
the node instance to be added as a child.
name:
the name that will be given to the child.
dist:
the distance from the node to the child.
support':
the support value of child partition.
Returns:
--------
The child node instance
"""
if child is None:
child = self.__class__()
if name is not None:
child.name = name
if dist is not None:
child.dist = dist
if support is not None:
child.support = support
self.children.append(child)
child.up = self
return child |
Removes a child from this node (parent and child
nodes still exit but are no longer connected).
def remove_child(self, child):
"""
Removes a child from this node (parent and child
nodes still exit but are no longer connected).
"""
try:
self.children.remove(child)
except ValueError as e:
raise TreeError("child not found")
else:
child.up = None
return child |
Adds a sister to this node. If sister node is not supplied
as an argument, a new TreeNode instance will be created and
returned.
def add_sister(self, sister=None, name=None, dist=None):
"""
Adds a sister to this node. If sister node is not supplied
as an argument, a new TreeNode instance will be created and
returned.
"""
if self.up == None:
raise TreeError("A parent node is required to add a sister")
else:
return self.up.add_child(child=sister, name=name, dist=dist) |
Removes a sister node. It has the same effect as
**`TreeNode.up.remove_child(sister)`**
If a sister node is not supplied, the first sister will be deleted
and returned.
:argument sister: A node instance
:return: The node removed
def remove_sister(self, sister=None):
"""
Removes a sister node. It has the same effect as
**`TreeNode.up.remove_child(sister)`**
If a sister node is not supplied, the first sister will be deleted
and returned.
:argument sister: A node instance
:return: The node removed
"""
sisters = self.get_sisters()
if len(sisters) > 0:
if sister is None:
sister = sisters.pop(0)
return self.up.remove_child(sister) |
Deletes node from the tree structure. Notice that this method
makes 'disappear' the node from the tree structure. This means
that children from the deleted node are transferred to the
next available parent.
Parameters:
-----------
prevent_nondicotomic:
When True (default), delete
function will be execute recursively to prevent single-child
nodes.
preserve_branch_length:
If True, branch lengths of the deleted nodes are transferred
(summed up) to its parent's branch, thus keeping original
distances among nodes.
**Example:**
/ C
root-|
| / B
\--- H |
\ A
> H.delete() will produce this structure:
/ C
|
root-|--B
|
\ A
def delete(self, prevent_nondicotomic=True, preserve_branch_length=False):
"""
Deletes node from the tree structure. Notice that this method
makes 'disappear' the node from the tree structure. This means
that children from the deleted node are transferred to the
next available parent.
Parameters:
-----------
prevent_nondicotomic:
When True (default), delete
function will be execute recursively to prevent single-child
nodes.
preserve_branch_length:
If True, branch lengths of the deleted nodes are transferred
(summed up) to its parent's branch, thus keeping original
distances among nodes.
**Example:**
/ C
root-|
| / B
\--- H |
\ A
> H.delete() will produce this structure:
/ C
|
root-|--B
|
\ A
"""
parent = self.up
if parent:
if preserve_branch_length:
if len(self.children) == 1:
self.children[0].dist += self.dist
elif len(self.children) > 1:
parent.dist += self.dist
for ch in self.children:
parent.add_child(ch)
parent.remove_child(self)
# Avoids parents with only one child
if prevent_nondicotomic and parent and\
len(parent.children) < 2:
parent.delete(prevent_nondicotomic=False,
preserve_branch_length=preserve_branch_length) |
Detachs this node (and all its descendants) from its parent
and returns the referent to itself.
Detached node conserves all its structure of descendants, and can
be attached to another node through the 'add_child' function. This
mechanism can be seen as a cut and paste.
def detach(self):
"""
Detachs this node (and all its descendants) from its parent
and returns the referent to itself.
Detached node conserves all its structure of descendants, and can
be attached to another node through the 'add_child' function. This
mechanism can be seen as a cut and paste.
"""
if self.up:
self.up.children.remove(self)
self.up = None
return self |
Prunes the topology of a node to conserve only a selected list of leaf
internal nodes. The minimum number of nodes that conserve the
topological relationships among the requested nodes will be
retained. Root node is always conserved.
Parameters:
-----------
nodes:
a list of node names or node objects that should be retained
preserve_branch_length:
If True, branch lengths of the deleted nodes are transferred
(summed up) to its parent's branch, thus keeping original distances
among nodes.
**Examples:**
t1 = Tree('(((((A,B)C)D,E)F,G)H,(I,J)K)root;', format=1)
t1.prune(['A', 'B'])
# /-A
# /D /C|
# /F| \-B
# | |
# /H| \-E
# | | /-A
#-root \-G -root
# | \-B
# | /-I
# \K|
# \-J
t1 = Tree('(((((A,B)C)D,E)F,G)H,(I,J)K)root;', format=1)
t1.prune(['A', 'B', 'C'])
# /-A
# /D /C|
# /F| \-B
# | |
# /H| \-E
# | | /-A
#-root \-G -root- C|
# | \-B
# | /-I
# \K|
# \-J
t1 = Tree('(((((A,B)C)D,E)F,G)H,(I,J)K)root;', format=1)
t1.prune(['A', 'B', 'I'])
# /-A
# /D /C|
# /F| \-B
# | |
# /H| \-E /-I
# | | -root
#-root \-G | /-A
# | \C|
# | /-I \-B
# \K|
# \-J
t1 = Tree('(((((A,B)C)D,E)F,G)H,(I,J)K)root;', format=1)
t1.prune(['A', 'B', 'F', 'H'])
# /-A
# /D /C|
# /F| \-B
# | |
# /H| \-E
# | | /-A
#-root \-G -root-H /F|
# | \-B
# | /-I
# \K|
# \-J
def prune(self, nodes, preserve_branch_length=False):
"""
Prunes the topology of a node to conserve only a selected list of leaf
internal nodes. The minimum number of nodes that conserve the
topological relationships among the requested nodes will be
retained. Root node is always conserved.
Parameters:
-----------
nodes:
a list of node names or node objects that should be retained
preserve_branch_length:
If True, branch lengths of the deleted nodes are transferred
(summed up) to its parent's branch, thus keeping original distances
among nodes.
**Examples:**
t1 = Tree('(((((A,B)C)D,E)F,G)H,(I,J)K)root;', format=1)
t1.prune(['A', 'B'])
# /-A
# /D /C|
# /F| \-B
# | |
# /H| \-E
# | | /-A
#-root \-G -root
# | \-B
# | /-I
# \K|
# \-J
t1 = Tree('(((((A,B)C)D,E)F,G)H,(I,J)K)root;', format=1)
t1.prune(['A', 'B', 'C'])
# /-A
# /D /C|
# /F| \-B
# | |
# /H| \-E
# | | /-A
#-root \-G -root- C|
# | \-B
# | /-I
# \K|
# \-J
t1 = Tree('(((((A,B)C)D,E)F,G)H,(I,J)K)root;', format=1)
t1.prune(['A', 'B', 'I'])
# /-A
# /D /C|
# /F| \-B
# | |
# /H| \-E /-I
# | | -root
#-root \-G | /-A
# | \C|
# | /-I \-B
# \K|
# \-J
t1 = Tree('(((((A,B)C)D,E)F,G)H,(I,J)K)root;', format=1)
t1.prune(['A', 'B', 'F', 'H'])
# /-A
# /D /C|
# /F| \-B
# | |
# /H| \-E
# | | /-A
#-root \-G -root-H /F|
# | \-B
# | /-I
# \K|
# \-J
"""
def cmp_nodes(x, y):
# if several nodes are in the same path of two kept nodes,
# only one should be maintained. This prioritize internal
# nodes that are already in the to_keep list and then
# deeper nodes (closer to the leaves).
if n2depth[x] > n2depth[y]:
return -1
elif n2depth[x] < n2depth[y]:
return 1
else:
return 0
to_keep = set(_translate_nodes(self, *nodes))
start, node2path = self.get_common_ancestor(to_keep, get_path=True)
to_keep.add(self)
# Calculate which kept nodes are visiting the same nodes in
# their path to the common ancestor.
n2count = {}
n2depth = {}
for seed, path in six.iteritems(node2path):
for visited_node in path:
if visited_node not in n2depth:
depth = visited_node.get_distance(start, topology_only=True)
n2depth[visited_node] = depth
if visited_node is not seed:
n2count.setdefault(visited_node, set()).add(seed)
# if several internal nodes are in the path of exactly the same kept
# nodes, only one (the deepest) should be maintain.
visitors2nodes = {}
for node, visitors in six.iteritems(n2count):
# keep nodes connection at least two other nodes
if len(visitors)>1:
visitor_key = frozenset(visitors)
visitors2nodes.setdefault(visitor_key, set()).add(node)
for visitors, nodes in six.iteritems(visitors2nodes):
if not (to_keep & nodes):
sorted_nodes = sorted(nodes, key=cmp_to_key(cmp_nodes))
to_keep.add(sorted_nodes[0])
for n in self.get_descendants('postorder'):
if n not in to_keep:
if preserve_branch_length:
if len(n.children) == 1:
n.children[0].dist += n.dist
elif len(n.children) > 1 and n.up:
n.up.dist += n.dist
n.delete(prevent_nondicotomic=False) |
Returns an indepent list of sister nodes.
def get_sisters(self):
""" Returns an indepent list of sister nodes."""
if self.up != None:
return [ch for ch in self.up.children if ch != self]
else:
return [] |
Returns an iterator over the leaves under this node.
def iter_leaves(self, is_leaf_fn=None):
""" Returns an iterator over the leaves under this node."""
for n in self.traverse(strategy="preorder", is_leaf_fn=is_leaf_fn):
if not is_leaf_fn:
if n.is_leaf():
yield n
else:
if is_leaf_fn(n):
yield n |
Returns an iterator over the leaf names under this node.
def iter_leaf_names(self, is_leaf_fn=None):
"""Returns an iterator over the leaf names under this node."""
for n in self.iter_leaves(is_leaf_fn=is_leaf_fn):
yield n.name |
Returns an iterator over all descendant nodes.
def iter_descendants(self, strategy="levelorder", is_leaf_fn=None):
""" Returns an iterator over all descendant nodes."""
for n in self.traverse(strategy=strategy, is_leaf_fn=is_leaf_fn):
if n is not self:
yield n |
Returns a list of all (leaves and internal) descendant nodes.
def get_descendants(self, strategy="levelorder", is_leaf_fn=None):
""" Returns a list of all (leaves and internal) descendant nodes."""
return [n for n in self.iter_descendants(
strategy=strategy, is_leaf_fn=is_leaf_fn)] |
Returns an iterator to traverse tree under this node.
Parameters:
-----------
strategy:
set the way in which tree will be traversed. Possible
values are: "preorder" (first parent and then children)
'postorder' (first children and the parent) and
"levelorder" (nodes are visited in order from root to leaves)
is_leaf_fn:
If supplied, ``is_leaf_fn`` function will be used to
interrogate nodes about if they are terminal or internal.
``is_leaf_fn`` function should receive a node instance as first
argument and return True or False. Use this argument to
traverse a tree by dynamically collapsing internal nodes matching
``is_leaf_fn``.
def traverse(self, strategy="levelorder", is_leaf_fn=None):
""" Returns an iterator to traverse tree under this node.
Parameters:
-----------
strategy:
set the way in which tree will be traversed. Possible
values are: "preorder" (first parent and then children)
'postorder' (first children and the parent) and
"levelorder" (nodes are visited in order from root to leaves)
is_leaf_fn:
If supplied, ``is_leaf_fn`` function will be used to
interrogate nodes about if they are terminal or internal.
``is_leaf_fn`` function should receive a node instance as first
argument and return True or False. Use this argument to
traverse a tree by dynamically collapsing internal nodes matching
``is_leaf_fn``.
"""
if strategy == "preorder":
return self._iter_descendants_preorder(is_leaf_fn=is_leaf_fn)
elif strategy == "levelorder":
return self._iter_descendants_levelorder(is_leaf_fn=is_leaf_fn)
elif strategy == "postorder":
return self._iter_descendants_postorder(is_leaf_fn=is_leaf_fn) |
Iterate over all nodes in a tree yielding every node in both
pre and post order. Each iteration returns a postorder flag
(True if node is being visited in postorder) and a node
instance.
def iter_prepostorder(self, is_leaf_fn=None):
"""
Iterate over all nodes in a tree yielding every node in both
pre and post order. Each iteration returns a postorder flag
(True if node is being visited in postorder) and a node
instance.
"""
to_visit = [self]
if is_leaf_fn is not None:
_leaf = is_leaf_fn
else:
_leaf = self.__class__.is_leaf
while to_visit:
node = to_visit.pop(-1)
try:
node = node[1]
except TypeError:
# PREORDER ACTIONS
yield (False, node)
if not _leaf(node):
# ADD CHILDREN
to_visit.extend(reversed(node.children + [[1, node]]))
else:
#POSTORDER ACTIONS
yield (True, node) |
Iterate over all desdecendant nodes.
def _iter_descendants_levelorder(self, is_leaf_fn=None):
""" Iterate over all desdecendant nodes."""
tovisit = deque([self])
while len(tovisit) > 0:
node = tovisit.popleft()
yield node
if not is_leaf_fn or not is_leaf_fn(node):
tovisit.extend(node.children) |
Iterator over all descendant nodes.
def _iter_descendants_preorder(self, is_leaf_fn=None):
""" Iterator over all descendant nodes. """
to_visit = deque()
node = self
while node is not None:
yield node
if not is_leaf_fn or not is_leaf_fn(node):
to_visit.extendleft(reversed(node.children))
try:
node = to_visit.popleft()
except:
node = None |
Iterates over the list of all ancestor nodes from
current node to the current tree root.
def iter_ancestors(self):
"""
Iterates over the list of all ancestor nodes from
current node to the current tree root.
"""
node = self
while node.up is not None:
yield node.up
node = node.up |
Returns the newick representation of current node. Several
arguments control the way in which extra data is shown for
every node:
Parameters:
-----------
features:
a list of feature names to be exported using the Extended Newick
Format (i.e. features=["name", "dist"]). Use an empty list to
export all available features in each node (features=[])
outfile:
writes the output to a given file
format:
defines the newick standard used to encode the tree.
format_root_node:
If True, it allows features and branch information from root node
to be exported as a part of the newick text string. For newick
compatibility reasons, this is False by default.
is_leaf_fn:
See :func:`TreeNode.traverse` for documentation.
**Example:**
t.get_newick(features=["species","name"], format=1)
def write(self,
features=None,
outfile=None,
format=0,
is_leaf_fn=None,
format_root_node=False,
dist_formatter=None,
support_formatter=None,
name_formatter=None):
"""
Returns the newick representation of current node. Several
arguments control the way in which extra data is shown for
every node:
Parameters:
-----------
features:
a list of feature names to be exported using the Extended Newick
Format (i.e. features=["name", "dist"]). Use an empty list to
export all available features in each node (features=[])
outfile:
writes the output to a given file
format:
defines the newick standard used to encode the tree.
format_root_node:
If True, it allows features and branch information from root node
to be exported as a part of the newick text string. For newick
compatibility reasons, this is False by default.
is_leaf_fn:
See :func:`TreeNode.traverse` for documentation.
**Example:**
t.get_newick(features=["species","name"], format=1)
"""
nw = write_newick(self, features=features,
format=format,
is_leaf_fn=is_leaf_fn,
format_root_node=format_root_node,
dist_formatter=dist_formatter,
support_formatter=support_formatter,
name_formatter=name_formatter)
if outfile is not None:
with open(outfile, "w") as OUT:
OUT.write(nw)
else:
return nw |
Returns the absolute root node of current tree structure.
def get_tree_root(self):
""" Returns the absolute root node of current tree structure."""
root = self
while root.up is not None:
root = root.up
return root |
Returns the first common ancestor between this node and a given
list of 'target_nodes'.
**Examples:**
t = tree.Tree("(((A:0.1, B:0.01):0.001, C:0.0001):1.0[&&NHX:name=common], (D:0.00001):0.000001):2.0[&&NHX:name=root];")
A = t.get_descendants_by_name("A")[0]
C = t.get_descendants_by_name("C")[0]
common = A.get_common_ancestor(C)
print common.name
def get_common_ancestor(self, *target_nodes, **kargs):
"""
Returns the first common ancestor between this node and a given
list of 'target_nodes'.
**Examples:**
t = tree.Tree("(((A:0.1, B:0.01):0.001, C:0.0001):1.0[&&NHX:name=common], (D:0.00001):0.000001):2.0[&&NHX:name=root];")
A = t.get_descendants_by_name("A")[0]
C = t.get_descendants_by_name("C")[0]
common = A.get_common_ancestor(C)
print common.name
"""
get_path = kargs.get("get_path", False)
if len(target_nodes) == 1 and type(target_nodes[0]) \
in set([set, tuple, list, frozenset]):
target_nodes = target_nodes[0]
# Convert node names into node instances
target_nodes = _translate_nodes(self, *target_nodes)
# If only one node is provided, use self as the second target
if type(target_nodes) != list:
target_nodes = [target_nodes, self]
n2path = {}
reference = []
ref_node = None
for n in target_nodes:
current = n
while current:
n2path.setdefault(n, set()).add(current)
if not ref_node:
reference.append(current)
current = current.up
if not ref_node:
ref_node = n
common = None
for n in reference:
broken = False
for node, path in six.iteritems(n2path):
if node is not ref_node and n not in path:
broken = True
break
if not broken:
common = n
break
if not common:
raise TreeError("Nodes are not connected!")
if get_path:
return common, n2path
else:
return common |
Search nodes in an interative way. Matches are being yield as
they are being found. This avoids to scan the full tree
topology before returning the first matches. Useful when
dealing with huge trees.
def iter_search_nodes(self, **conditions):
"""
Search nodes in an interative way. Matches are being yield as
they are being found. This avoids to scan the full tree
topology before returning the first matches. Useful when
dealing with huge trees.
"""
for n in self.traverse():
conditions_passed = 0
for key, value in six.iteritems(conditions):
if hasattr(n, key) and getattr(n, key) == value:
conditions_passed +=1
if conditions_passed == len(conditions):
yield n |
Returns the list of nodes matching a given set of conditions.
**Example:**
tree.search_nodes(dist=0.0, name="human")
def search_nodes(self, **conditions):
"""
Returns the list of nodes matching a given set of conditions.
**Example:**
tree.search_nodes(dist=0.0, name="human")
"""
matching_nodes = []
for n in self.iter_search_nodes(**conditions):
matching_nodes.append(n)
return matching_nodes |
Returns the distance between two nodes. If only one target is
specified, it returns the distance bewtween the target and the
current node.
Parameters:
-----------
target:
a node within the same tree structure.
target2:
a node within the same tree structure. If not specified,
current node is used as target2.
topology_only:
If set to True, distance will refer to the number of nodes
between target and target2.
Returns:
--------
branch length distance between target and target2. If topology_only
flag is True, returns the number of nodes between target and target2.
def get_distance(self, target, target2=None, topology_only=False):
"""
Returns the distance between two nodes. If only one target is
specified, it returns the distance bewtween the target and the
current node.
Parameters:
-----------
target:
a node within the same tree structure.
target2:
a node within the same tree structure. If not specified,
current node is used as target2.
topology_only:
If set to True, distance will refer to the number of nodes
between target and target2.
Returns:
--------
branch length distance between target and target2. If topology_only
flag is True, returns the number of nodes between target and target2.
"""
if target2 is None:
target2 = self
root = self.get_tree_root()
else:
# is target node under current node?
root = self
target, target2 = _translate_nodes(root, target, target2)
ancestor = root.get_common_ancestor(target, target2)
dist = 0.0
for n in [target2, target]:
current = n
while current != ancestor:
if topology_only:
if current!=target:
dist += 1
else:
dist += current.dist
current = current.up
return dist |
Returns the node's farthest descendant or ancestor node, and the
distance to it.
:argument False topology_only: If set to True, distance
between nodes will be referred to the number of nodes
between them. In other words, topological distance will be
used instead of branch length distances.
:return: A tuple containing the farthest node referred to the
current node and the distance to it.
def get_farthest_node(self, topology_only=False):
"""
Returns the node's farthest descendant or ancestor node, and the
distance to it.
:argument False topology_only: If set to True, distance
between nodes will be referred to the number of nodes
between them. In other words, topological distance will be
used instead of branch length distances.
:return: A tuple containing the farthest node referred to the
current node and the distance to it.
"""
# Init fasthest node to current farthest leaf
farthest_node, farthest_dist = self.get_farthest_leaf(
topology_only=topology_only)
prev = self
cdist = 0.0 if topology_only else prev.dist
current = prev.up
while current is not None:
for ch in current.children:
if ch != prev:
if not ch.is_leaf():
fnode, fdist = ch.get_farthest_leaf(
topology_only=topology_only)
else:
fnode = ch
fdist = 0
if topology_only:
fdist += 1.0
else:
fdist += ch.dist
if cdist+fdist > farthest_dist:
farthest_dist = cdist + fdist
farthest_node = fnode
prev = current
if topology_only:
cdist += 1
else:
cdist += prev.dist
current = prev.up
return farthest_node, farthest_dist |
Returns node's farthest descendant node (which is always a leaf), and the
distance to it.
:argument False topology_only: If set to True, distance
between nodes will be referred to the number of nodes
between them. In other words, topological distance will be
used instead of branch length distances.
:return: A tuple containing the farthest leaf referred to the
current node and the distance to it.
def get_farthest_leaf(self, topology_only=False, is_leaf_fn=None):
"""
Returns node's farthest descendant node (which is always a leaf), and the
distance to it.
:argument False topology_only: If set to True, distance
between nodes will be referred to the number of nodes
between them. In other words, topological distance will be
used instead of branch length distances.
:return: A tuple containing the farthest leaf referred to the
current node and the distance to it.
"""
min_node, min_dist, max_node, max_dist = self._get_farthest_and_closest_leaves(
topology_only=topology_only, is_leaf_fn=is_leaf_fn)
return max_node, max_dist |
Returns the node that divides the current tree into two
distance-balanced partitions.
def get_midpoint_outgroup(self):
"""
Returns the node that divides the current tree into two
distance-balanced partitions.
"""
# Gets the farthest node to the current root
root = self.get_tree_root()
nA, r2A_dist = root.get_farthest_leaf()
nB, A2B_dist = nA.get_farthest_node()
outgroup = nA
middist = A2B_dist / 2.0
cdist = 0
current = nA
while current is not None:
cdist += current.dist
if cdist > (middist): # Deja de subir cuando se pasa del maximo
break
else:
current = current.up
return current |
Generates a random topology by populating current node.
:argument None names_library: If provided, names library
(list, set, dict, etc.) will be used to name nodes.
:argument False reuse_names: If True, node names will not be
necessarily unique, which makes the process a bit more
efficient.
:argument False random_branches: If True, branch distances and support
values will be randomized.
:argument (0,1) branch_range: If random_branches is True, this
range of values will be used to generate random distances.
:argument (0,1) support_range: If random_branches is True,
this range of values will be used to generate random branch
support values.
def populate(self,
size,
names_library=None,
reuse_names=False,
random_branches=False,
branch_range=(0, 1),
support_range=(0, 1)):
"""
Generates a random topology by populating current node.
:argument None names_library: If provided, names library
(list, set, dict, etc.) will be used to name nodes.
:argument False reuse_names: If True, node names will not be
necessarily unique, which makes the process a bit more
efficient.
:argument False random_branches: If True, branch distances and support
values will be randomized.
:argument (0,1) branch_range: If random_branches is True, this
range of values will be used to generate random distances.
:argument (0,1) support_range: If random_branches is True,
this range of values will be used to generate random branch
support values.
"""
NewNode = self.__class__
if len(self.children) > 1:
connector = NewNode()
for ch in self.get_children():
ch.detach()
connector.add_child(child = ch)
root = NewNode()
self.add_child(child = connector)
self.add_child(child = root)
else:
root = self
next_deq = deque([root])
for i in range(size-1):
if random.randint(0, 1):
p = next_deq.pop()
else:
p = next_deq.popleft()
c1 = p.add_child()
c2 = p.add_child()
next_deq.extend([c1, c2])
if random_branches:
c1.dist = random.uniform(*branch_range)
c2.dist = random.uniform(*branch_range)
c1.support = random.uniform(*branch_range)
c2.support = random.uniform(*branch_range)
else:
c1.dist = 1.0
c2.dist = 1.0
c1.support = 1.0
c2.support = 1.0
# next contains leaf nodes
charset = "abcdefghijklmnopqrstuvwxyz"
if names_library:
names_library = deque(names_library)
else:
avail_names = itertools.combinations_with_replacement(charset, 10)
for n in next_deq:
if names_library:
if reuse_names:
tname = random.sample(names_library, 1)[0]
else:
tname = names_library.pop()
else:
tname = ''.join(next(avail_names))
n.name = tname |
Sets a descendant node as the outgroup of a tree. This function
can be used to root a tree or even an internal node.
Parameters:
-----------
outgroup:
a node instance within the same tree structure that will be
used as a basal node.
def set_outgroup(self, outgroup):
"""
Sets a descendant node as the outgroup of a tree. This function
can be used to root a tree or even an internal node.
Parameters:
-----------
outgroup:
a node instance within the same tree structure that will be
used as a basal node.
"""
outgroup = _translate_nodes(self, outgroup)
if self == outgroup:
##return
## why raise an error for this?
raise TreeError("Cannot set myself as outgroup")
parent_outgroup = outgroup.up
# Detects (sub)tree root
n = outgroup
while n.up is not self:
n = n.up
# If outgroup is a child from root, but with more than one
# sister nodes, creates a new node to group them
self.children.remove(n)
if len(self.children) != 1:
down_branch_connector = self.__class__()
down_branch_connector.dist = 0.0
down_branch_connector.support = n.support
for ch in self.get_children():
down_branch_connector.children.append(ch)
ch.up = down_branch_connector
self.children.remove(ch)
else:
down_branch_connector = self.children[0]
# Connects down branch to myself or to outgroup
quien_va_ser_padre = parent_outgroup
if quien_va_ser_padre is not self:
# Parent-child swapping
quien_va_ser_hijo = quien_va_ser_padre.up
quien_fue_padre = None
buffered_dist = quien_va_ser_padre.dist
buffered_support = quien_va_ser_padre.support
while quien_va_ser_hijo is not self:
quien_va_ser_padre.children.append(quien_va_ser_hijo)
quien_va_ser_hijo.children.remove(quien_va_ser_padre)
buffered_dist2 = quien_va_ser_hijo.dist
buffered_support2 = quien_va_ser_hijo.support
quien_va_ser_hijo.dist = buffered_dist
quien_va_ser_hijo.support = buffered_support
buffered_dist = buffered_dist2
buffered_support = buffered_support2
quien_va_ser_padre.up = quien_fue_padre
quien_fue_padre = quien_va_ser_padre
quien_va_ser_padre = quien_va_ser_hijo
quien_va_ser_hijo = quien_va_ser_padre.up
quien_va_ser_padre.children.append(down_branch_connector)
down_branch_connector.up = quien_va_ser_padre
quien_va_ser_padre.up = quien_fue_padre
down_branch_connector.dist += buffered_dist
outgroup2 = parent_outgroup
parent_outgroup.children.remove(outgroup)
outgroup2.dist = 0
else:
outgroup2 = down_branch_connector
outgroup.up = self
outgroup2.up = self
# outgroup is always the first children. Some function my
# trust on this fact, so do no change this.
self.children = [outgroup,outgroup2]
middist = (outgroup2.dist + outgroup.dist)/2
outgroup.dist = middist
outgroup2.dist = middist
outgroup2.support = outgroup.support |
Unroots current node. This function is expected to be used on
the absolute tree root node, but it can be also be applied to
any other internal node. It will convert a split into a
multifurcation.
def unroot(self):
"""
Unroots current node. This function is expected to be used on
the absolute tree root node, but it can be also be applied to
any other internal node. It will convert a split into a
multifurcation.
"""
if len(self.children)==2:
if not self.children[0].is_leaf():
self.children[0].delete()
elif not self.children[1].is_leaf():
self.children[1].delete()
else:
raise TreeError("Cannot unroot a tree with only two leaves") |
Returns the ASCII representation of the tree.
Code based on the PyCogent GPL project.
def _asciiArt(self, char1='-', show_internal=True, compact=False, attributes=None):
"""
Returns the ASCII representation of the tree.
Code based on the PyCogent GPL project.
"""
if not attributes:
attributes = ["name"]
# toytree edit:
# removed six dependency for map with comprehension
# node_name = ', '.join(map(str, [getattr(self, v) for v in attributes if hasattr(self, v)]))
_attrlist = [getattr(self, v) for v in attributes if hasattr(self, v)]
node_name = ", ".join([str(i) for i in _attrlist])
LEN = max(3, len(node_name) if not self.children or show_internal else 3)
PAD = ' ' * LEN
PA = ' ' * (LEN-1)
if not self.is_leaf():
mids = []
result = []
for c in self.children:
if len(self.children) == 1:
char2 = '/'
elif c is self.children[0]:
char2 = '/'
elif c is self.children[-1]:
char2 = '\\'
else:
char2 = '-'
(clines, mid) = c._asciiArt(char2, show_internal, compact, attributes)
mids.append(mid+len(result))
result.extend(clines)
if not compact:
result.append('')
if not compact:
result.pop()
(lo, hi, end) = (mids[0], mids[-1], len(result))
prefixes = [PAD] * (lo+1) + [PA+'|'] * (hi-lo-1) + [PAD] * (end-hi)
mid = int((lo + hi) / 2)
prefixes[mid] = char1 + '-'*(LEN-2) + prefixes[mid][-1]
result = [p+l for (p,l) in zip(prefixes, result)]
if show_internal:
stem = result[mid]
result[mid] = stem[0] + node_name + stem[len(node_name)+1:]
return (result, mid)
else:
return ([char1 + '-' + node_name], 0) |
Returns a string containing an ascii drawing of the tree.
Parameters:
-----------
show_internal:
include internal edge names.
compact:
use exactly one line per tip.
attributes:
A list of node attributes to shown in the ASCII representation.
def get_ascii(self, show_internal=True, compact=False, attributes=None):
"""
Returns a string containing an ascii drawing of the tree.
Parameters:
-----------
show_internal:
include internal edge names.
compact:
use exactly one line per tip.
attributes:
A list of node attributes to shown in the ASCII representation.
"""
(lines, mid) = self._asciiArt(show_internal=show_internal,
compact=compact,
attributes=attributes)
return '\n'+'\n'.join(lines) |
Sort the branches of a given tree (swapping children nodes)
according to the size of each partition.
def ladderize(self, direction=0):
"""
Sort the branches of a given tree (swapping children nodes)
according to the size of each partition.
"""
if not self.is_leaf():
n2s = {}
for n in self.get_children():
s = n.ladderize(direction=direction)
n2s[n] = s
self.children.sort(key=lambda x: n2s[x])
if direction == 1:
self.children.reverse()
size = sum(n2s.values())
else:
size = 1
return size |
This function sort the branches of a given tree by
considerening node names. After the tree is sorted, nodes are
labeled using ascendent numbers. This can be used to ensure
that nodes in a tree with the same node names are always
labeled in the same way. Note that if duplicated names are
present, extra criteria should be added to sort nodes.
Unique id is stored as a node._nid attribute
def sort_descendants(self, attr="name"):
"""
This function sort the branches of a given tree by
considerening node names. After the tree is sorted, nodes are
labeled using ascendent numbers. This can be used to ensure
that nodes in a tree with the same node names are always
labeled in the same way. Note that if duplicated names are
present, extra criteria should be added to sort nodes.
Unique id is stored as a node._nid attribute
"""
node2content = self.get_cached_content(store_attr=attr, container_type=list)
for n in self.traverse():
if not n.is_leaf():
n.children.sort(key=lambda x: str(sorted(node2content[x]))) |
Returns a dictionary pointing to the preloaded content of each
internal node under this tree. Such a dictionary is intended
to work as a cache for operations that require many traversal
operations.
Parameters:
-----------
store_attr:
Specifies the node attribute that should be cached (i.e. name,
distance, etc.). When none, the whole node instance is cached.
_store: (internal use)
def get_cached_content(self, store_attr=None, container_type=set, _store=None):
"""
Returns a dictionary pointing to the preloaded content of each
internal node under this tree. Such a dictionary is intended
to work as a cache for operations that require many traversal
operations.
Parameters:
-----------
store_attr:
Specifies the node attribute that should be cached (i.e. name,
distance, etc.). When none, the whole node instance is cached.
_store: (internal use)
"""
if _store is None:
_store = {}
for ch in self.children:
ch.get_cached_content(store_attr=store_attr,
container_type=container_type,
_store=_store)
if self.children:
val = container_type()
for ch in self.children:
if type(val) == list:
val.extend(_store[ch])
if type(val) == set:
val.update(_store[ch])
_store[self] = val
else:
if store_attr is None:
val = self
else:
val = getattr(self, store_attr)
_store[self] = container_type([val])
return _store |
Returns the Robinson-Foulds symmetric distance between current
tree and a different tree instance.
Parameters:
-----------
t2:
reference tree
attr_t1:
Compare trees using a custom node attribute as a node name.
attr_t2:
Compare trees using a custom node attribute as a node name in target tree.
attr_t2:
If True, consider trees as unrooted.
False expand_polytomies:
If True, all polytomies in the reference and target tree will be
expanded into all possible binary trees. Robinson-foulds distance
will be calculated between all tree combinations and the minimum
value will be returned.
See also, :func:`NodeTree.expand_polytomy`.
Returns:
--------
(rf, rf_max, common_attrs, names, edges_t1, edges_t2,
discarded_edges_t1, discarded_edges_t2)
def robinson_foulds(self,
t2,
attr_t1="name",
attr_t2="name",
unrooted_trees=False,
expand_polytomies=False,
polytomy_size_limit=5,
skip_large_polytomies=False,
correct_by_polytomy_size=False,
min_support_t1=0.0,
min_support_t2=0.0):
"""
Returns the Robinson-Foulds symmetric distance between current
tree and a different tree instance.
Parameters:
-----------
t2:
reference tree
attr_t1:
Compare trees using a custom node attribute as a node name.
attr_t2:
Compare trees using a custom node attribute as a node name in target tree.
attr_t2:
If True, consider trees as unrooted.
False expand_polytomies:
If True, all polytomies in the reference and target tree will be
expanded into all possible binary trees. Robinson-foulds distance
will be calculated between all tree combinations and the minimum
value will be returned.
See also, :func:`NodeTree.expand_polytomy`.
Returns:
--------
(rf, rf_max, common_attrs, names, edges_t1, edges_t2,
discarded_edges_t1, discarded_edges_t2)
"""
ref_t = self
target_t = t2
if not unrooted_trees and (len(ref_t.children) > 2 or len(target_t.children) > 2):
raise TreeError("Unrooted tree found! You may want to activate the unrooted_trees flag.")
if expand_polytomies and correct_by_polytomy_size:
raise TreeError("expand_polytomies and correct_by_polytomy_size are mutually exclusive.")
if expand_polytomies and unrooted_trees:
raise TreeError("expand_polytomies and unrooted_trees arguments cannot be enabled at the same time")
attrs_t1 = set([getattr(n, attr_t1) for n in ref_t.iter_leaves() if hasattr(n, attr_t1)])
attrs_t2 = set([getattr(n, attr_t2) for n in target_t.iter_leaves() if hasattr(n, attr_t2)])
common_attrs = attrs_t1 & attrs_t2
# release mem
attrs_t1, attrs_t2 = None, None
# Check for duplicated items (is it necessary? can we optimize? what's the impact in performance?')
size1 = len([True for n in ref_t.iter_leaves() if getattr(n, attr_t1, None) in common_attrs])
size2 = len([True for n in target_t.iter_leaves() if getattr(n, attr_t2, None) in common_attrs])
if size1 > len(common_attrs):
raise TreeError('Duplicated items found in source tree')
if size2 > len(common_attrs):
raise TreeError('Duplicated items found in reference tree')
if expand_polytomies:
ref_trees = [
TreeNode(nw) for nw in
ref_t.expand_polytomies(
map_attr=attr_t1,
polytomy_size_limit=polytomy_size_limit,
skip_large_polytomies=skip_large_polytomies
)
]
target_trees = [
TreeNode(nw) for nw in
target_t.expand_polytomies(
map_attr=attr_t2,
polytomy_size_limit=polytomy_size_limit,
skip_large_polytomies=skip_large_polytomies,
)
]
attr_t1, attr_t2 = "name", "name"
else:
ref_trees = [ref_t]
target_trees = [target_t]
polytomy_correction = 0
if correct_by_polytomy_size:
corr1 = sum([0]+[len(n.children) - 2 for n in ref_t.traverse() if len(n.children) > 2])
corr2 = sum([0]+[len(n.children) - 2 for n in target_t.traverse() if len(n.children) > 2])
if corr1 and corr2:
raise TreeError("Both trees contain polytomies! Try expand_polytomies=True instead")
else:
polytomy_correction = max([corr1, corr2])
min_comparison = None
for t1 in ref_trees:
t1_content = t1.get_cached_content()
t1_leaves = t1_content[t1]
if unrooted_trees:
edges1 = set([
tuple(sorted([tuple(sorted([getattr(n, attr_t1) for n in content if hasattr(n, attr_t1) and getattr(n, attr_t1) in common_attrs])),
tuple(sorted([getattr(n, attr_t1) for n in t1_leaves-content if hasattr(n, attr_t1) and getattr(n, attr_t1) in common_attrs]))]))
for content in six.itervalues(t1_content)])
edges1.discard(((),()))
else:
edges1 = set([
tuple(sorted([getattr(n, attr_t1) for n in content if hasattr(n, attr_t1) and getattr(n, attr_t1) in common_attrs]))
for content in six.itervalues(t1_content)])
edges1.discard(())
if min_support_t1:
support_t1 = dict([
(tuple(sorted([getattr(n, attr_t1) for n in content if hasattr(n, attr_t1) and getattr(n, attr_t1) in common_attrs])), branch.support)
for branch, content in six.iteritems(t1_content)])
for t2 in target_trees:
t2_content = t2.get_cached_content()
t2_leaves = t2_content[t2]
if unrooted_trees:
edges2 = set([
tuple(sorted([
tuple(sorted([getattr(n, attr_t2) for n in content if hasattr(n, attr_t2) and getattr(n, attr_t2) in common_attrs])),
tuple(sorted([getattr(n, attr_t2) for n in t2_leaves-content if hasattr(n, attr_t2) and getattr(n, attr_t2) in common_attrs]))]))
for content in six.itervalues(t2_content)])
edges2.discard(((),()))
else:
edges2 = set([
tuple(sorted([getattr(n, attr_t2) for n in content if hasattr(n, attr_t2) and getattr(n, attr_t2) in common_attrs]))
for content in six.itervalues(t2_content)])
edges2.discard(())
if min_support_t2:
support_t2 = dict([
(tuple(sorted(([getattr(n, attr_t2) for n in content if hasattr(n, attr_t2) and getattr(n, attr_t2) in common_attrs]))), branch.support)
for branch, content in six.iteritems(t2_content)])
# if a support value is passed as a constraint, discard lowly supported branches from the analysis
discard_t1, discard_t2 = set(), set()
if min_support_t1 and unrooted_trees:
discard_t1 = set([p for p in edges1 if support_t1.get(p[0], support_t1.get(p[1], 999999999)) < min_support_t1])
elif min_support_t1:
discard_t1 = set([p for p in edges1 if support_t1[p] < min_support_t1])
if min_support_t2 and unrooted_trees:
discard_t2 = set([p for p in edges2 if support_t2.get(p[0], support_t2.get(p[1], 999999999)) < min_support_t2])
elif min_support_t2:
discard_t2 = set([p for p in edges2 if support_t2[p] < min_support_t2])
#rf = len(edges1 ^ edges2) - (len(discard_t1) + len(discard_t2)) - polytomy_correction # poly_corr is 0 if the flag is not enabled
#rf = len((edges1-discard_t1) ^ (edges2-discard_t2)) - polytomy_correction
# the two root edges are never counted here, as they are always
# present in both trees because of the common attr filters
rf = len(((edges1 ^ edges2) - discard_t2) - discard_t1) - polytomy_correction
if unrooted_trees:
# thought this may work, but it does not, still I don't see why
#max_parts = (len(common_attrs)*2) - 6 - len(discard_t1) - len(discard_t2)
max_parts = (len([p for p in edges1 - discard_t1 if len(p[0])>1 and len(p[1])>1]) +
len([p for p in edges2 - discard_t2 if len(p[0])>1 and len(p[1])>1]))
else:
# thought this may work, but it does not, still I don't see why
#max_parts = (len(common_attrs)*2) - 4 - len(discard_t1) - len(discard_t2)
# Otherwise we need to count the actual number of valid
# partitions in each tree -2 is to avoid counting the root
# partition of the two trees (only needed in rooted trees)
max_parts = (len([p for p in edges1 - discard_t1 if len(p)>1]) +
len([p for p in edges2 - discard_t2 if len(p)>1])) - 2
# print max_parts
if not min_comparison or min_comparison[0] > rf:
min_comparison = [rf, max_parts, common_attrs, edges1, edges2, discard_t1, discard_t2]
return min_comparison |
Iterate over the list of edges of a tree. Each egde is represented as a
tuple of two elements, each containing the list of nodes separated by
the edge.
def iter_edges(self, cached_content=None):
"""
Iterate over the list of edges of a tree. Each egde is represented as a
tuple of two elements, each containing the list of nodes separated by
the edge.
"""
if not cached_content:
cached_content = self.get_cached_content()
all_leaves = cached_content[self]
for n, side1 in six.iteritems(cached_content):
yield (side1, all_leaves - side1) |
Returns the unique ID representing the topology of the current tree.
Two trees with the same topology will produce the same id. If trees are
unrooted, make sure that the root node is not binary or use the
tree.unroot() function before generating the topology id.
This is useful to detect the number of unique topologies over a bunch
of trees, without requiring full distance methods.
The id is, by default, calculated based on the terminal node's names.
Any other node attribute could be used instead.
def get_topology_id(self, attr="name"):
"""
Returns the unique ID representing the topology of the current tree.
Two trees with the same topology will produce the same id. If trees are
unrooted, make sure that the root node is not binary or use the
tree.unroot() function before generating the topology id.
This is useful to detect the number of unique topologies over a bunch
of trees, without requiring full distance methods.
The id is, by default, calculated based on the terminal node's names.
Any other node attribute could be used instead.
"""
edge_keys = []
for s1, s2 in self.get_edges():
k1 = sorted([getattr(e, attr) for e in s1])
k2 = sorted([getattr(e, attr) for e in s2])
edge_keys.append(sorted([k1, k2]))
return md5(str(sorted(edge_keys)).encode('utf-8')).hexdigest() |
Returns True if a given target attribute is monophyletic under
this node for the provided set of values.
If not all values are represented in the current tree
structure, a ValueError exception will be raised to warn that
strict monophyly could never be reached (this behaviour can be
avoided by enabling the `ignore_missing` flag.
Parameters:
-----------
values:
a set of values for which monophyly is expected.
target_attr:
node attribute being used to check monophyly (i.e. species for
species trees, names for gene family trees, or any custom feature
present in the tree).
ignore_missing:
Avoid raising an Exception when missing attributes are found.
unrooted:
If True, tree will be treated as unrooted, thus allowing to find
monophyly even when current outgroup is spliting a monophyletic group.
Returns:
--------
the following tuple
IsMonophyletic (boolean),
clade type ('monophyletic', 'paraphyletic' or 'polyphyletic'),
leaves breaking the monophyly (set)
def check_monophyly(self,
values,
target_attr,
ignore_missing=False,
unrooted=False):
"""
Returns True if a given target attribute is monophyletic under
this node for the provided set of values.
If not all values are represented in the current tree
structure, a ValueError exception will be raised to warn that
strict monophyly could never be reached (this behaviour can be
avoided by enabling the `ignore_missing` flag.
Parameters:
-----------
values:
a set of values for which monophyly is expected.
target_attr:
node attribute being used to check monophyly (i.e. species for
species trees, names for gene family trees, or any custom feature
present in the tree).
ignore_missing:
Avoid raising an Exception when missing attributes are found.
unrooted:
If True, tree will be treated as unrooted, thus allowing to find
monophyly even when current outgroup is spliting a monophyletic group.
Returns:
--------
the following tuple
IsMonophyletic (boolean),
clade type ('monophyletic', 'paraphyletic' or 'polyphyletic'),
leaves breaking the monophyly (set)
"""
if type(values) != set:
values = set(values)
# This is the only time I traverse the tree, then I use cached
# leaf content
n2leaves = self.get_cached_content()
# Raise an error if requested attribute values are not even present
if ignore_missing:
found_values = set([getattr(n, target_attr) for n in n2leaves[self]])
missing_values = values - found_values
values = values & found_values
# Locate leaves matching requested attribute values
targets = set([leaf for leaf in n2leaves[self]
if getattr(leaf, target_attr) in values])
if not ignore_missing:
if values - set([getattr(leaf, target_attr) for leaf in targets]):
raise ValueError('The monophyly of the provided values could never be reached, as not all of them exist in the tree.'
' Please check your target attribute and values, or set the ignore_missing flag to True')
if unrooted:
smallest = None
for side1, side2 in self.iter_edges(cached_content=n2leaves):
if targets.issubset(side1) and (not smallest or len(side1) < len(smallest)):
smallest = side1
elif targets.issubset(side2) and (not smallest or len(side2) < len(smallest)):
smallest = side2
if smallest is not None and len(smallest) == len(targets):
break
foreign_leaves = smallest - targets
else:
# Check monophyly with get_common_ancestor. Note that this
# step does not require traversing the tree again because
# targets are node instances instead of node names, and
# get_common_ancestor function is smart enough to detect it
# and avoid unnecessary traversing.
common = self.get_common_ancestor(targets)
observed = n2leaves[common]
foreign_leaves = set([leaf for leaf in observed
if getattr(leaf, target_attr) not in values])
if not foreign_leaves:
return True, "monophyletic", foreign_leaves
else:
# if the requested attribute is not monophyletic in this
# node, let's differentiate between poly and paraphyly.
poly_common = self.get_common_ancestor(foreign_leaves)
# if the common ancestor of all foreign leaves is self
# contained, we have a paraphyly. Otherwise, polyphyly.
polyphyletic = [leaf for leaf in poly_common if
getattr(leaf, target_attr) in values]
if polyphyletic:
return False, "polyphyletic", foreign_leaves
else:
return False, "paraphyletic", foreign_leaves |
Returns a list of nodes matching the provided monophyly
criteria. For a node to be considered a match, all
`target_attr` values within and node, and exclusively them,
should be grouped.
:param values: a set of values for which monophyly is
expected.
:param target_attr: node attribute being used to check
monophyly (i.e. species for species trees, names for gene
family trees).
def get_monophyletic(self, values, target_attr):
"""
Returns a list of nodes matching the provided monophyly
criteria. For a node to be considered a match, all
`target_attr` values within and node, and exclusively them,
should be grouped.
:param values: a set of values for which monophyly is
expected.
:param target_attr: node attribute being used to check
monophyly (i.e. species for species trees, names for gene
family trees).
"""
if type(values) != set:
values = set(values)
n2values = self.get_cached_content(store_attr=target_attr)
is_monophyletic = lambda node: n2values[node] == values
for match in self.iter_leaves(is_leaf_fn=is_monophyletic):
if is_monophyletic(match):
yield match |
Given a tree with one or more polytomies, this functions returns the
list of all trees (in newick format) resulting from the combination of
all possible solutions of the multifurcated nodes.
.. warning:
Please note that the number of of possible binary trees grows
exponentially with the number and size of polytomies. Using this
function with large multifurcations is not feasible:
polytomy size: 3 number of binary trees: 3
polytomy size: 4 number of binary trees: 15
polytomy size: 5 number of binary trees: 105
polytomy size: 6 number of binary trees: 945
polytomy size: 7 number of binary trees: 10395
polytomy size: 8 number of binary trees: 135135
polytomy size: 9 number of binary trees: 2027025
http://ajmonline.org/2010/darwin.php
def expand_polytomies(self,
map_attr="name",
polytomy_size_limit=5,
skip_large_polytomies=False):
"""
Given a tree with one or more polytomies, this functions returns the
list of all trees (in newick format) resulting from the combination of
all possible solutions of the multifurcated nodes.
.. warning:
Please note that the number of of possible binary trees grows
exponentially with the number and size of polytomies. Using this
function with large multifurcations is not feasible:
polytomy size: 3 number of binary trees: 3
polytomy size: 4 number of binary trees: 15
polytomy size: 5 number of binary trees: 105
polytomy size: 6 number of binary trees: 945
polytomy size: 7 number of binary trees: 10395
polytomy size: 8 number of binary trees: 135135
polytomy size: 9 number of binary trees: 2027025
http://ajmonline.org/2010/darwin.php
"""
class TipTuple(tuple):
pass
def add_leaf(tree, label):
yield (label, tree)
if not isinstance(tree, TipTuple) and isinstance(tree, tuple):
for left in add_leaf(tree[0], label):
yield (left, tree[1])
for right in add_leaf(tree[1], label):
yield (tree[0], right)
def enum_unordered(labels):
if len(labels) == 1:
yield labels[0]
else:
for tree in enum_unordered(labels[1:]):
for new_tree in add_leaf(tree, labels[0]):
yield new_tree
n2subtrees = {}
for n in self.traverse("postorder"):
if n.is_leaf():
subtrees = [getattr(n, map_attr)]
else:
subtrees = []
if len(n.children) > polytomy_size_limit:
if skip_large_polytomies:
for childtrees in itertools.product(*[n2subtrees[ch] for ch in n.children]):
subtrees.append(TipTuple(childtrees))
else:
raise TreeError("Found polytomy larger than current limit: %s" %n)
else:
for childtrees in itertools.product(*[n2subtrees[ch] for ch in n.children]):
subtrees.extend([TipTuple(subtree) for subtree in enum_unordered(childtrees)])
n2subtrees[n] = subtrees
return ["%s;"%str(nw) for nw in n2subtrees[self]] |
Resolve all polytomies under current node by creating an
arbitrary dicotomic structure among the affected nodes. This
function randomly modifies current tree topology and should
only be used for compatibility reasons (i.e. programs
rejecting multifurcated node in the newick representation).
:param 0.0 default_dist: artificial branch distance of new
nodes.
:param 0.0 default_support: artificial branch support of new
nodes.
:param True recursive: Resolve any polytomy under this
node. When False, only current node will be checked and fixed.
def resolve_polytomy(self,
default_dist=0.0,
default_support=0.0,
recursive=True):
"""
Resolve all polytomies under current node by creating an
arbitrary dicotomic structure among the affected nodes. This
function randomly modifies current tree topology and should
only be used for compatibility reasons (i.e. programs
rejecting multifurcated node in the newick representation).
:param 0.0 default_dist: artificial branch distance of new
nodes.
:param 0.0 default_support: artificial branch support of new
nodes.
:param True recursive: Resolve any polytomy under this
node. When False, only current node will be checked and fixed.
"""
def _resolve(node):
if len(node.children) > 2:
children = list(node.children)
node.children = []
next_node = root = node
for i in range(len(children) - 2):
next_node = next_node.add_child()
next_node.dist = default_dist
next_node.support = default_support
next_node = root
for ch in children:
next_node.add_child(ch)
if ch != children[-2]:
next_node = next_node.children[0]
target = [self]
if recursive:
target.extend([n for n in self.get_descendants()])
for n in target:
_resolve(n) |
Removes all empty lines from above and below the text.
We can't just use text.strip() because that would remove the leading
space for the table.
Parameters
----------
lines : list of str
Returns
-------
lines : list of str
The text lines without empty lines above or below
def truncate_empty_lines(lines):
"""
Removes all empty lines from above and below the text.
We can't just use text.strip() because that would remove the leading
space for the table.
Parameters
----------
lines : list of str
Returns
-------
lines : list of str
The text lines without empty lines above or below
"""
while lines[0].rstrip() == '':
lines.pop(0)
while lines[len(lines) - 1].rstrip() == '':
lines.pop(-1)
return lines |
Convert a date or datetime object into a javsacript timestamp
def jstimestamp_slow(dte):
'''Convert a date or datetime object into a javsacript timestamp'''
year, month, day, hour, minute, second = dte.timetuple()[:6]
days = date(year, month, 1).toordinal() - _EPOCH_ORD + day - 1
hours = days*24 + hour
minutes = hours*60 + minute
seconds = minutes*60 + second
if isinstance(dte,datetime):
return 1000*seconds + 0.001*dte.microsecond
else:
return 1000*seconds |
Convert a date or datetime object into a javsacript timestamp.
def jstimestamp(dte):
'''Convert a date or datetime object into a javsacript timestamp.'''
days = date(dte.year, dte.month, 1).toordinal() - _EPOCH_ORD + dte.day - 1
hours = days*24
if isinstance(dte,datetime):
hours += dte.hour
minutes = hours*60 + dte.minute
seconds = minutes*60 + dte.second
return 1000*seconds + int(0.001*dte.microsecond)
else:
return 3600000*hours |
Convert a string or html file to an rst table string.
Parameters
----------
html_string : str
Either the html string, or the filepath to the html
force_headers : bool
Make the first row become headers, whether or not they are
headers in the html file.
center_cells : bool
Whether or not to center the contents of the cells
center_headers : bool
Whether or not to center the contents of the header cells
Returns
-------
str
The html table converted to an rst grid table
Notes
-----
This function **requires** BeautifulSoup_ to work.
Example
-------
>>> html_text = '''
... <table>
... <tr>
... <th>
... Header 1
... </th>
... <th>
... Header 2
... </th>
... <th>
... Header 3
... </th>
... <tr>
... <td>
... <p>This is a paragraph</p>
... </td>
... <td>
... <ul>
... <li>List item 1</li>
... <li>List item 2</li>
... </ul>
... </td>
... <td>
... <ol>
... <li>Ordered 1</li>
... <li>Ordered 2</li>
... </ol>
... </td>
... </tr>
... </table>
... '''
>>> import dashtable
>>> print(dashtable.html2rst(html_text))
+---------------------+----------------+--------------+
| Header 1 | Header 2 | Header 3 |
+=====================+================+==============+
| This is a paragraph | - List item 1 | #. Ordered 1 |
| | - List item 2 | #. Ordered 2 |
+---------------------+----------------+--------------+
.. _BeautifulSoup: https://www.crummy.com/software/BeautifulSoup/
def html2rst(html_string, force_headers=False, center_cells=False,
center_headers=False):
"""
Convert a string or html file to an rst table string.
Parameters
----------
html_string : str
Either the html string, or the filepath to the html
force_headers : bool
Make the first row become headers, whether or not they are
headers in the html file.
center_cells : bool
Whether or not to center the contents of the cells
center_headers : bool
Whether or not to center the contents of the header cells
Returns
-------
str
The html table converted to an rst grid table
Notes
-----
This function **requires** BeautifulSoup_ to work.
Example
-------
>>> html_text = '''
... <table>
... <tr>
... <th>
... Header 1
... </th>
... <th>
... Header 2
... </th>
... <th>
... Header 3
... </th>
... <tr>
... <td>
... <p>This is a paragraph</p>
... </td>
... <td>
... <ul>
... <li>List item 1</li>
... <li>List item 2</li>
... </ul>
... </td>
... <td>
... <ol>
... <li>Ordered 1</li>
... <li>Ordered 2</li>
... </ol>
... </td>
... </tr>
... </table>
... '''
>>> import dashtable
>>> print(dashtable.html2rst(html_text))
+---------------------+----------------+--------------+
| Header 1 | Header 2 | Header 3 |
+=====================+================+==============+
| This is a paragraph | - List item 1 | #. Ordered 1 |
| | - List item 2 | #. Ordered 2 |
+---------------------+----------------+--------------+
.. _BeautifulSoup: https://www.crummy.com/software/BeautifulSoup/
"""
if os.path.isfile(html_string):
file = open(html_string, 'r', encoding='utf-8')
lines = file.readlines()
file.close()
html_string = ''.join(lines)
table_data, spans, use_headers = html2data(
html_string)
if table_data == '':
return ''
if force_headers:
use_headers = True
return data2rst(table_data, spans, use_headers, center_cells, center_headers) |
Create a list of rows and columns that will make up a span
Parameters
----------
row : int
The row of the first cell in the span
column : int
The column of the first cell in the span
extra_rows : int
The number of rows that make up the span
extra_columns : int
The number of columns that make up the span
Returns
-------
span : list of lists of int
A span is a list of [row, column] pairs that make up a span
def make_span(row, column, extra_rows, extra_columns):
"""
Create a list of rows and columns that will make up a span
Parameters
----------
row : int
The row of the first cell in the span
column : int
The column of the first cell in the span
extra_rows : int
The number of rows that make up the span
extra_columns : int
The number of columns that make up the span
Returns
-------
span : list of lists of int
A span is a list of [row, column] pairs that make up a span
"""
span = [[row, column]]
for r in range(row, row + extra_rows + 1):
span.append([r, column])
for c in range(column, column + extra_columns + 1):
span.append([row, c])
span.append([r, c])
return span |
Convert the contents of a span of the table to a grid table cell
Parameters
----------
table : list of lists of str
The table of rows containg strings to convert to a grid table
span : list of lists of int
list of [row, column] pairs that make up a span in the table
widths : list of int
list of the column widths of the table
heights : list of int
list of the heights of each row in the table
use_headers : bool
Whether or not to use headers in the table
Returns
-------
cell : dashtable.data2rst.Cell
def make_cell(table, span, widths, heights, use_headers):
"""
Convert the contents of a span of the table to a grid table cell
Parameters
----------
table : list of lists of str
The table of rows containg strings to convert to a grid table
span : list of lists of int
list of [row, column] pairs that make up a span in the table
widths : list of int
list of the column widths of the table
heights : list of int
list of the heights of each row in the table
use_headers : bool
Whether or not to use headers in the table
Returns
-------
cell : dashtable.data2rst.Cell
"""
width = get_span_char_width(span, widths)
height = get_span_char_height(span, heights)
text_row = span[0][0]
text_column = span[0][1]
text = table[text_row][text_column]
lines = text.split("\n")
for i in range(len(lines)):
width_difference = width - len(lines[i])
lines[i] = ''.join([lines[i], " " * width_difference])
height_difference = height - len(lines)
empty_lines = []
for i in range(0, height_difference):
empty_lines.append(" " * width)
lines.extend(empty_lines)
output = [
''.join(["+", (width * "-") + "+"])
]
for i in range(0, height):
output.append("|" + lines[i] + "|")
if use_headers and span[0][0] == 0:
symbol = "="
else:
symbol = "-"
output.append(
''.join(["+", width * symbol, "+"])
)
text = "\n".join(output)
row_count = get_span_row_count(span)
column_count = get_span_column_count(span)
cell = Cell(text, text_row, text_column, row_count, column_count)
return cell |
Initialize application object.
def init_app(self, app, **kwargs):
"""Initialize application object."""
self.init_db(app, **kwargs)
app.config.setdefault('ALEMBIC', {
'script_location': pkg_resources.resource_filename(
'invenio_db', 'alembic'
),
'version_locations': [
(base_entry.name, pkg_resources.resource_filename(
base_entry.module_name, os.path.join(*base_entry.attrs)
)) for base_entry in pkg_resources.iter_entry_points(
'invenio_db.alembic'
)
],
})
self.alembic.init_app(app)
app.extensions['invenio-db'] = self
app.cli.add_command(db_cmd) |
Initialize Flask-SQLAlchemy extension.
def init_db(self, app, entry_point_group='invenio_db.models', **kwargs):
"""Initialize Flask-SQLAlchemy extension."""
# Setup SQLAlchemy
app.config.setdefault(
'SQLALCHEMY_DATABASE_URI',
'sqlite:///' + os.path.join(app.instance_path, app.name + '.db')
)
app.config.setdefault('SQLALCHEMY_ECHO', False)
# Initialize Flask-SQLAlchemy extension.
database = kwargs.get('db', db)
database.init_app(app)
# Initialize versioning support.
self.init_versioning(app, database, kwargs.get('versioning_manager'))
# Initialize model bases
if entry_point_group:
for base_entry in pkg_resources.iter_entry_points(
entry_point_group):
base_entry.load()
# All models should be loaded by now.
sa.orm.configure_mappers()
# Ensure that versioning classes have been built.
if app.config['DB_VERSIONING']:
manager = self.versioning_manager
if manager.pending_classes:
if not versioning_models_registered(manager, database.Model):
manager.builder.configure_versioned_classes()
elif 'transaction' not in database.metadata.tables:
manager.declarative_base = database.Model
manager.create_transaction_model()
manager.plugins.after_build_tx_class(manager) |
Initialize the versioning support using SQLAlchemy-Continuum.
def init_versioning(self, app, database, versioning_manager=None):
"""Initialize the versioning support using SQLAlchemy-Continuum."""
try:
pkg_resources.get_distribution('sqlalchemy_continuum')
except pkg_resources.DistributionNotFound: # pragma: no cover
default_versioning = False
else:
default_versioning = True
app.config.setdefault('DB_VERSIONING', default_versioning)
if not app.config['DB_VERSIONING']:
return
if not default_versioning: # pragma: no cover
raise RuntimeError(
'Please install extra versioning support first by running '
'pip install invenio-db[versioning].'
)
# Now we can import SQLAlchemy-Continuum.
from sqlalchemy_continuum import make_versioned
from sqlalchemy_continuum import versioning_manager as default_vm
from sqlalchemy_continuum.plugins import FlaskPlugin
# Try to guess user model class:
if 'DB_VERSIONING_USER_MODEL' not in app.config: # pragma: no cover
try:
pkg_resources.get_distribution('invenio_accounts')
except pkg_resources.DistributionNotFound:
user_cls = None
else:
user_cls = 'User'
else:
user_cls = app.config.get('DB_VERSIONING_USER_MODEL')
plugins = [FlaskPlugin()] if user_cls else []
# Call make_versioned() before your models are defined.
self.versioning_manager = versioning_manager or default_vm
make_versioned(
user_cls=user_cls,
manager=self.versioning_manager,
plugins=plugins,
)
# Register models that have been loaded beforehand.
builder = self.versioning_manager.builder
for tbl in database.metadata.tables.values():
builder.instrument_versioned_classes(
database.mapper, get_class_by_table(database.Model, tbl)
) |
Convert an html string to data table
Parameters
----------
html_string : str
row_count : int
column_count : int
Returns
-------
data_table : list of lists of str
def extract_table(html_string, row_count, column_count):
"""
Convert an html string to data table
Parameters
----------
html_string : str
row_count : int
column_count : int
Returns
-------
data_table : list of lists of str
"""
try:
from bs4 import BeautifulSoup
from bs4.element import Tag
except ImportError:
print("ERROR: You must have BeautifulSoup to use html2data")
return
#html_string = convertRichText(html_string)
data_table = []
for row in range(0, row_count):
data_table.append([])
for column in range(0, column_count):
data_table[-1].append(None)
soup = BeautifulSoup(html_string, 'html.parser')
table = soup.find('table')
if not table:
return ''
trs = table.findAll('tr')
if len(trs) == 0:
return [['']]
for tr in range(len(trs)):
ths = trs[tr].findAll('th')
if len(ths) == 0:
tds = trs[tr].findAll('td')
else:
tds = ths
if len(tds) == 0:
tds = []
for i in range(0, column_count):
tds.append(Tag("", name=""))
for i in range(len(tds)):
td = tds[i]
row, column = find_unassigned_table_cell(data_table)
r_span_count = 1
c_span_count = 1
if td.has_attr('rowspan'):
r_span_count = int(td['rowspan'])
if td.has_attr('colspan'):
c_span_count = int(td['colspan'])
for row_prime in range(row, row + r_span_count):
for column_prime in range(column, column + c_span_count):
if row_prime == row and column_prime == column:
items = []
for item in td.contents:
items.append(str(item))
string = ''.join(items).strip()
text = restructify(string).rstrip()
data_table[row_prime][column_prime] = text
else:
data_table[row_prime][column_prime] = ""
if i + 1 < column_count and i == len(tds) - 1:
for x in range(len(tds), column_count):
if data_table[row][x] is None:
data_table[row][x] = ""
for row in range(len(data_table)):
for column in range(len(data_table[row])):
if not data_table[row][column]:
data_table[row][column] = ""
return data_table |
Ensure SQLite checks foreign key constraints.
For further details see "Foreign key support" sections on
https://docs.sqlalchemy.org/en/latest/dialects/sqlite.html#foreign-key-support
def do_sqlite_connect(dbapi_connection, connection_record):
"""Ensure SQLite checks foreign key constraints.
For further details see "Foreign key support" sections on
https://docs.sqlalchemy.org/en/latest/dialects/sqlite.html#foreign-key-support
"""
# Enable foreign key constraint checking
cursor = dbapi_connection.cursor()
cursor.execute('PRAGMA foreign_keys=ON')
cursor.close() |
Call before engine creation.
def apply_driver_hacks(self, app, info, options):
"""Call before engine creation."""
# Don't forget to apply hacks defined on parent object.
super(SQLAlchemy, self).apply_driver_hacks(app, info, options)
if info.drivername == 'sqlite':
connect_args = options.setdefault('connect_args', {})
if 'isolation_level' not in connect_args:
# disable pysqlite's emitting of the BEGIN statement entirely.
# also stops it from emitting COMMIT before any DDL.
connect_args['isolation_level'] = None
if not event.contains(Engine, 'connect', do_sqlite_connect):
event.listen(Engine, 'connect', do_sqlite_connect)
if not event.contains(Engine, 'begin', do_sqlite_begin):
event.listen(Engine, 'begin', do_sqlite_begin)
from sqlite3 import register_adapter
def adapt_proxy(proxy):
"""Get current object and try to adapt it again."""
return proxy._get_current_object()
register_adapter(LocalProxy, adapt_proxy)
elif info.drivername == 'postgresql+psycopg2': # pragma: no cover
from psycopg2.extensions import adapt, register_adapter
def adapt_proxy(proxy):
"""Get current object and try to adapt it again."""
return adapt(proxy._get_current_object())
register_adapter(LocalProxy, adapt_proxy)
elif info.drivername == 'mysql+pymysql': # pragma: no cover
from pymysql import converters
def escape_local_proxy(val, mapping):
"""Get current object and try to adapt it again."""
return converters.escape_item(
val._get_current_object(),
self.engine.dialect.encoding,
mapping=mapping,
)
converters.conversions[LocalProxy] = escape_local_proxy
converters.encoders[LocalProxy] = escape_local_proxy |
Create tables.
def create(verbose):
"""Create tables."""
click.secho('Creating all tables!', fg='yellow', bold=True)
with click.progressbar(_db.metadata.sorted_tables) as bar:
for table in bar:
if verbose:
click.echo(' Creating table {0}'.format(table))
table.create(bind=_db.engine, checkfirst=True)
create_alembic_version_table()
click.secho('Created all tables!', fg='green') |
Drop tables.
def drop(verbose):
"""Drop tables."""
click.secho('Dropping all tables!', fg='red', bold=True)
with click.progressbar(reversed(_db.metadata.sorted_tables)) as bar:
for table in bar:
if verbose:
click.echo(' Dropping table {0}'.format(table))
table.drop(bind=_db.engine, checkfirst=True)
drop_alembic_version_table()
click.secho('Dropped all tables!', fg='green') |
Create database.
def init():
"""Create database."""
click.secho('Creating database {0}'.format(_db.engine.url),
fg='green')
if not database_exists(str(_db.engine.url)):
create_database(str(_db.engine.url)) |
Drop database.
def destroy():
"""Drop database."""
click.secho('Destroying database {0}'.format(_db.engine.url),
fg='red', bold=True)
if _db.engine.name == 'sqlite':
try:
drop_database(_db.engine.url)
except FileNotFoundError as e:
click.secho('Sqlite database has not been initialised',
fg='red', bold=True)
else:
drop_database(_db.engine.url) |
Fast rolling operation with O(log n) updates where n is the
window size
def rolling(self, op):
"""Fast rolling operation with O(log n) updates where n is the
window size
"""
missing = self.missing
ismissing = self.ismissing
window = self.window
it = iter(self.iterable)
queue = deque(islice(it, window))
ol = self.skiplist((e for e in queue if e == e))
yield op(ol,missing)
for newelem in it:
oldelem = queue.popleft()
if not ismissing(oldelem):
ol.remove(oldelem)
queue.append(newelem)
if not ismissing(newelem):
ol.insert(newelem)
yield op(ol, missing) |
Find the length of a colspan.
Parameters
----------
span : list of lists of int
The [row, column] pairs that make up the span
Returns
-------
columns : int
The number of columns included in the span
Example
-------
Consider this table::
+------+------------------+
| foo | bar |
+------+--------+---------+
| spam | goblet | berries |
+------+--------+---------+
::
>>> span = [[0, 1], [0, 2]]
>>> print(get_span_column_count(span))
2
def get_span_column_count(span):
"""
Find the length of a colspan.
Parameters
----------
span : list of lists of int
The [row, column] pairs that make up the span
Returns
-------
columns : int
The number of columns included in the span
Example
-------
Consider this table::
+------+------------------+
| foo | bar |
+------+--------+---------+
| spam | goblet | berries |
+------+--------+---------+
::
>>> span = [[0, 1], [0, 2]]
>>> print(get_span_column_count(span))
2
"""
columns = 1
first_column = span[0][1]
for i in range(len(span)):
if span[i][1] > first_column:
columns += 1
first_column = span[i][1]
return columns |
returns self as a dictionary with _underscore subdicts corrected.
def to_dict(self):
"returns self as a dictionary with _underscore subdicts corrected."
ndict = {}
for key, val in self.__dict__.items():
if key[0] == "_":
ndict[key[1:]] = val
else:
ndict[key] = val
return ndict |
Sum the widths of the columns that make up the span, plus the extra.
Parameters
----------
span : list of lists of int
list of [row, column] pairs that make up the span
column_widths : list of int
The widths of the columns that make up the table
Returns
-------
total_width : int
The total width of the span
def get_span_char_width(span, column_widths):
"""
Sum the widths of the columns that make up the span, plus the extra.
Parameters
----------
span : list of lists of int
list of [row, column] pairs that make up the span
column_widths : list of int
The widths of the columns that make up the table
Returns
-------
total_width : int
The total width of the span
"""
start_column = span[0][1]
column_count = get_span_column_count(span)
total_width = 0
for i in range(start_column, start_column + column_count):
total_width += column_widths[i]
total_width += column_count - 1
return total_width |
Rebuild a model's EncryptedType properties when the SECRET_KEY is changed.
:param old_key: old SECRET_KEY.
:param model: the affected db model.
:param properties: list of properties to rebuild.
def rebuild_encrypted_properties(old_key, model, properties):
"""Rebuild a model's EncryptedType properties when the SECRET_KEY is changed.
:param old_key: old SECRET_KEY.
:param model: the affected db model.
:param properties: list of properties to rebuild.
"""
inspector = reflection.Inspector.from_engine(db.engine)
primary_key_names = inspector.get_primary_keys(model.__tablename__)
new_secret_key = current_app.secret_key
db.session.expunge_all()
try:
with db.session.begin_nested():
current_app.secret_key = old_key
db_columns = []
for primary_key in primary_key_names:
db_columns.append(getattr(model, primary_key))
for prop in properties:
db_columns.append(getattr(model, prop))
old_rows = db.session.query(*db_columns).all()
except Exception as e:
current_app.logger.error(
'Exception occurred while reading encrypted properties. '
'Try again before starting the server with the new secret key.')
raise e
finally:
current_app.secret_key = new_secret_key
db.session.expunge_all()
for old_row in old_rows:
primary_keys, old_entries = old_row[:len(primary_key_names)], \
old_row[len(primary_key_names):]
primary_key_fields = dict(zip(primary_key_names, primary_keys))
update_values = dict(zip(properties, old_entries))
model.query.filter_by(**primary_key_fields).\
update(update_values)
db.session.commit() |
Create alembic_version table.
def create_alembic_version_table():
"""Create alembic_version table."""
alembic = current_app.extensions['invenio-db'].alembic
if not alembic.migration_context._has_version_table():
alembic.migration_context._ensure_version_table()
for head in alembic.script_directory.revision_map._real_heads:
alembic.migration_context.stamp(alembic.script_directory, head) |
Drop alembic_version table.
def drop_alembic_version_table():
"""Drop alembic_version table."""
if _db.engine.dialect.has_table(_db.engine, 'alembic_version'):
alembic_version = _db.Table('alembic_version', _db.metadata,
autoload_with=_db.engine)
alembic_version.drop(bind=_db.engine) |
Get the name of the versioned model class.
def versioning_model_classname(manager, model):
"""Get the name of the versioned model class."""
if manager.options.get('use_module_name', True):
return '%s%sVersion' % (
model.__module__.title().replace('.', ''), model.__name__)
else:
return '%sVersion' % (model.__name__,) |
Return True if all versioning models have been registered.
def versioning_models_registered(manager, base):
"""Return True if all versioning models have been registered."""
declared_models = base._decl_class_registry.keys()
return all(versioning_model_classname(manager, c) in declared_models
for c in manager.pending_classes) |
Convert an iterable into a symmetric matrix.
def vector_to_symmetric(v):
'''Convert an iterable into a symmetric matrix.'''
np = len(v)
N = (int(sqrt(1 + 8*np)) - 1)//2
if N*(N+1)//2 != np:
raise ValueError('Cannot convert vector to symmetric matrix')
sym = ndarray((N,N))
iterable = iter(v)
for r in range(N):
for c in range(r+1):
sym[r,c] = sym[c,r] = iterable.next()
return sym |
The covariance matrix from the aggregate sample. It accepts an
optional parameter for the degree of freedoms.
:parameter ddof: If not ``None`` normalization is by (N - ddof), where N is
the number of observations; this overrides the value implied by bias.
The default value is None.
def cov(self, ddof=None, bias=0):
'''The covariance matrix from the aggregate sample. It accepts an
optional parameter for the degree of freedoms.
:parameter ddof: If not ``None`` normalization is by (N - ddof), where N is
the number of observations; this overrides the value implied by bias.
The default value is None.
'''
N = self.n
M = N if bias else N-1
M = M if ddof is None else N-ddof
return (self.sxx - outer(self.sx,self.sx)/N)/M |
The correlation matrix
def corr(self):
'''The correlation matrix'''
cov = self.cov()
N = cov.shape[0]
corr = ndarray((N,N))
for r in range(N):
for c in range(r):
corr[r,c] = corr[c,r] = cov[r,c]/sqrt(cov[r,r]*cov[c,c])
corr[r,r] = 1.
return corr |
Calculate the Calmar ratio for a Weiner process
@param sharpe: Annualized Sharpe ratio
@param T: Time interval in years
def calmar(sharpe, T = 1.0):
'''
Calculate the Calmar ratio for a Weiner process
@param sharpe: Annualized Sharpe ratio
@param T: Time interval in years
'''
x = 0.5*T*sharpe*sharpe
return x/qp(x) |
Multiplicator for normalizing calmar ratio to period tau
def calmarnorm(sharpe, T, tau = 1.0):
'''
Multiplicator for normalizing calmar ratio to period tau
'''
return calmar(sharpe,tau)/calmar(sharpe,T) |
Upgrade database.
def upgrade():
"""Upgrade database."""
op.execute('COMMIT') # See https://bitbucket.org/zzzeek/alembic/issue/123
ctx = op.get_context()
metadata = ctx.opts['target_metadata']
metadata.naming_convention = NAMING_CONVENTION
metadata.bind = ctx.connection.engine
insp = Inspector.from_engine(ctx.connection.engine)
for table_name in insp.get_table_names():
if table_name not in metadata.tables:
continue
table = metadata.tables[table_name]
ixs = {}
uqs = {}
fks = {}
for ix in insp.get_indexes(table_name):
ixs[tuple(ix['column_names'])] = ix
for uq in insp.get_unique_constraints(table_name):
uqs[tuple(uq['column_names'])] = uq
for fk in insp.get_foreign_keys(table_name):
fks[(tuple(fk['constrained_columns']), fk['referred_table'])] = fk
with op.batch_alter_table(
table_name, naming_convention=NAMING_CONVENTION) as batch_op:
for c in list(table.constraints) + list(table.indexes):
key = None
if isinstance(c, sa.schema.ForeignKeyConstraint):
key = (tuple(c.column_keys), c.referred_table.name)
fk = fks.get(key)
if fk and c.name != fk['name']:
batch_op.drop_constraint(
fk['name'], type_='foreignkey')
batch_op.create_foreign_key(
op.f(c.name), fk['referred_table'],
fk['constrained_columns'],
fk['referred_columns'],
**fk['options']
)
elif isinstance(c, sa.schema.UniqueConstraint):
key = tuple(c.columns.keys())
uq = uqs.get(key)
if uq and c.name != uq['name']:
batch_op.drop_constraint(uq['name'], type_='unique')
batch_op.create_unique_constraint(
op.f(c.name), uq['column_names'])
elif isinstance(c, sa.schema.CheckConstraint):
util.warn('Update {0.table.name} CHECK {0.name} '
'manually'.format(c))
elif isinstance(c, sa.schema.Index):
key = tuple(c.columns.keys())
ix = ixs.get(key)
if ix and c.name != ix['name']:
batch_op.drop_index(ix['name'])
batch_op.create_index(
op.f(c.name), ix['column_names'],
unique=ix['unique'],
)
elif isinstance(c, sa.schema.PrimaryKeyConstraint) or \
c.name == '_unnamed_':
# NOTE we don't care about primary keys since they have
# specific syntax.
pass
else:
raise RuntimeError('Missing {0!r}'.format(c)) |
Convert table data to a simple rst table
Parameters
----------
table : list of lists of str
A table of strings.
spans : list of lists of lists of int
A list of spans. A span is a list of [Row, Column] pairs of
table cells that are joined together.
use_headers : bool, optional
Whether or not to include headers in the table. A header is
a cell that is underlined with "="
headers_row : int
The row that will be the headers. In a simple rst table, the
headers do not need to be at the top.
Returns
-------
str
The simple rst table
Example
-------
>>> table = [
... ["Inputs", "", "Output"],
... ["A", "B", "A or B"],
... ["False", "False", "False"],
... ["True", "False", "True"],
... ["False", "True", "True"],
... ["True", "True", "True"],
... ]
>>> spans = [
... [ [0, 0], [0, 1] ]
... ]
>>> print(data2simplerst(table, spans, headers_row=1))
====== ===== ======
Inputs Output
------------- ------
A B A or B
====== ===== ======
False False False
True False True
False True True
True True True
====== ===== ======
def data2simplerst(table, spans=[[[0, 0]]], use_headers=True, headers_row=0):
"""
Convert table data to a simple rst table
Parameters
----------
table : list of lists of str
A table of strings.
spans : list of lists of lists of int
A list of spans. A span is a list of [Row, Column] pairs of
table cells that are joined together.
use_headers : bool, optional
Whether or not to include headers in the table. A header is
a cell that is underlined with "="
headers_row : int
The row that will be the headers. In a simple rst table, the
headers do not need to be at the top.
Returns
-------
str
The simple rst table
Example
-------
>>> table = [
... ["Inputs", "", "Output"],
... ["A", "B", "A or B"],
... ["False", "False", "False"],
... ["True", "False", "True"],
... ["False", "True", "True"],
... ["True", "True", "True"],
... ]
>>> spans = [
... [ [0, 0], [0, 1] ]
... ]
>>> print(data2simplerst(table, spans, headers_row=1))
====== ===== ======
Inputs Output
------------- ------
A B A or B
====== ===== ======
False False False
True False True
False True True
True True True
====== ===== ======
"""
table = copy.deepcopy(table)
table_ok = check_table(table)
if not table_ok == "":
return "ERROR: " + table_ok
if not spans == [[[0, 0]]]:
for span in spans:
span_ok = check_span(span, table)
if not span_ok == "":
return "ERROR: " + span_ok
table = ensure_table_strings(table)
table = multis_2_mono(table)
output = []
column_widths = []
for col in table[0]:
column_widths.append(0)
for row in range(len(table)):
for column in range(len(table[row])):
if len(table[row][column]) > column_widths[column]:
column_widths[column] = len(table[row][column])
underline = ''
for col in column_widths:
underline = ''.join([underline + col * '=', ' '])
output.append(underline)
for row in range(len(table)):
string = ''
column = 0
while column < len(table[row]):
span = get_span(spans, row, column)
if (span and span[0] == [row, column] and
not table[row][column] == ''):
span_col_count = get_span_column_count(span)
end_col = column + span_col_count
width = sum(column_widths[column:end_col])
width += 2 * (span_col_count - 1)
string += center_line(width, table[row][column]) + ' '
elif table[row][column] == '':
pass
else:
string += center_line(
column_widths[column], table[row][column]) + ' '
column += 1
output.append(string)
if row == headers_row and use_headers:
output.append(underline)
else:
if row_includes_spans(table, row, spans):
new_underline = ''
column = 0
while column < len(table[row]):
span = get_span(spans, row, column)
if (span and span[0] == [row, column] and
not table[row][column] == ''):
span_col_count = get_span_column_count(span)
end_column = column + span_col_count
width = sum(column_widths[column:end_column])
width += 2 * (span_col_count - 1)
new_underline += (width * '-') + ' '
elif table[row][column] == '':
pass
else:
new_underline += (column_widths[column] * '-') + ' '
column += 1
output.append(new_underline)
for i in range(len(output)):
output[i] = output[i].rstrip()
output.append(underline)
return '\n'.join(output) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.